You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by ta...@apache.org on 2017/02/06 21:11:05 UTC

[3/7] incubator-impala git commit: IMPALA-1670, IMPALA-4141: Support multiple partitions in ALTER TABLE ADD PARTITION

IMPALA-1670,IMPALA-4141: Support multiple partitions in ALTER TABLE
ADD PARTITION

Just like Hive, Implala should support multiple partitions in ALTER
TABLE ADD PARTITION statements. The syntax is as follows:

ALTER TABLE table_name ADD [IF NOT EXISTS]
    PARTITION partition_spec1 [location_spec1] [cache_spec1]
    PARTITION partition_spec2 [location_spec2] [cache_spec2]
    ...

Grammar was modified to handle the new syntax. Introduced PartitionDef
class to capture the repeatable part of the statement. TPartitionDef
is the name of the corresponding thrift class.

AlterTableAddPartitionStmt and CatalogOpExecutor classes were also
modified to work with a list of partitions. Duplicate partition specs
are rejected in AlterTableAddPartitionStmt.analyze().

Added FE, E2E and integration tests.

Change-Id: Iddbc951f2931f488f7048c9780260f6b49100750
Reviewed-on: http://gerrit.cloudera.org:8080/4144
Reviewed-by: Dimitris Tsirogiannis <dt...@cloudera.com>
Tested-by: Impala Public Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/c452595b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/c452595b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/c452595b

Branch: refs/heads/master
Commit: c452595bff5840f86b35f44d677558d3a940ceab
Parents: 0f8ae35
Author: Attila Jeges <at...@cloudera.com>
Authored: Tue Aug 16 01:05:42 2016 -0700
Committer: Impala Public Jenkins <im...@gerrit.cloudera.org>
Committed: Sat Feb 4 01:47:23 2017 +0000

----------------------------------------------------------------------
 common/thrift/JniCatalog.thrift                 |  22 +-
 fe/src/main/cup/sql-parser.cup                  |  27 ++-
 .../analysis/AlterTableAddPartitionStmt.java    |  93 ++++----
 .../apache/impala/analysis/PartitionDef.java    | 111 +++++++++
 .../impala/analysis/PartitionKeyValue.java      |  12 +-
 .../apache/impala/analysis/PartitionSpec.java   |  30 ++-
 .../impala/analysis/PartitionSpecBase.java      |   1 +
 .../impala/service/CatalogOpExecutor.java       | 237 +++++++++++++------
 .../apache/impala/analysis/AnalyzeDDLTest.java  |  72 ++++++
 .../impala/analysis/AuthorizationTest.java      |  13 +-
 .../org/apache/impala/analysis/ParserTest.java  |  22 +-
 .../org/apache/impala/analysis/ToSqlTest.java   |  42 ++++
 .../queries/QueryTest/alter-table.test          | 104 ++++++++
 .../queries/QueryTest/grant_revoke.test         |  27 +++
 tests/common/impala_test_suite.py               |  44 ++++
 tests/metadata/test_hms_integration.py          | 169 ++++++++++---
 tests/metadata/test_refresh_partition.py        |  39 +--
 17 files changed, 858 insertions(+), 207 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/common/thrift/JniCatalog.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/JniCatalog.thrift b/common/thrift/JniCatalog.thrift
index d224658..96b2e00 100644
--- a/common/thrift/JniCatalog.thrift
+++ b/common/thrift/JniCatalog.thrift
@@ -171,20 +171,28 @@ struct TAlterTableAddReplaceColsParams {
   2: required bool replace_existing_cols
 }
 
-// Parameters for ALTER TABLE ADD PARTITION commands
-struct TAlterTableAddPartitionParams {
+// Parameters for specifying a single partition in ALTER TABLE ADD PARTITION
+struct TPartitionDef {
   // The partition spec (list of keys and values) to add.
   1: required list<CatalogObjects.TPartitionKeyValue> partition_spec
 
-  // If true, no error is raised if a partition with the same spec already exists.
-  2: required bool if_not_exists
-
   // Optional HDFS storage location for the Partition. If not specified the
   // default storage location is used.
-  3: optional string location
+  2: optional string location
 
   // Optional caching operation to perform on the newly added partition.
-  4: optional THdfsCachingOp cache_op
+  3: optional THdfsCachingOp cache_op
+}
+
+// Parameters for ALTER TABLE ADD PARTITION commands
+struct TAlterTableAddPartitionParams {
+  // If 'if_not_exists' is true, no error is raised when a partition with the same spec
+  // already exists. If multiple partitions are specified, the statement will ignore
+  // those that exist and add the rest.
+  1: required bool if_not_exists
+
+  // The list of partitions to add
+  2: required list<TPartitionDef> partitions
 }
 
 enum TRangePartitionOperationType {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/fe/src/main/cup/sql-parser.cup
----------------------------------------------------------------------
diff --git a/fe/src/main/cup/sql-parser.cup b/fe/src/main/cup/sql-parser.cup
index 33f0591..adef592 100644
--- a/fe/src/main/cup/sql-parser.cup
+++ b/fe/src/main/cup/sql-parser.cup
@@ -392,6 +392,8 @@ nonterminal PartitionKeyValue partition_key_value;
 nonterminal PartitionKeyValue static_partition_key_value;
 nonterminal Qualifier union_op;
 
+nonterminal PartitionDef partition_def;
+nonterminal List<PartitionDef> partition_def_list;
 nonterminal AlterTableStmt alter_tbl_stmt;
 nonterminal StatementBase alter_view_stmt;
 nonterminal ComputeStatsStmt compute_stats_stmt;
@@ -914,16 +916,31 @@ opt_kw_role ::=
   | /* empty */
   ;
 
+partition_def ::=
+  partition_spec:partition location_val:location cache_op_val:cache_op
+  {: RESULT = new PartitionDef(partition, location, cache_op); :}
+  ;
+
+partition_def_list ::=
+  partition_def:item
+  {:
+    List<PartitionDef> list = Lists.newArrayList(item);
+    RESULT = list;
+  :}
+  | partition_def_list:list partition_def:item
+  {:
+    list.add(item);
+    RESULT = list;
+  :}
+  ;
+
 alter_tbl_stmt ::=
   KW_ALTER KW_TABLE table_name:table replace_existing_cols_val:replace KW_COLUMNS
   LPAREN column_def_list:col_defs RPAREN
   {: RESULT = new AlterTableAddReplaceColsStmt(table, col_defs, replace); :}
   | KW_ALTER KW_TABLE table_name:table KW_ADD if_not_exists_val:if_not_exists
-    partition_spec:partition location_val:location cache_op_val:cache_op
-  {:
-    RESULT = new AlterTableAddPartitionStmt(table, partition,
-        location, if_not_exists, cache_op);
-  :}
+    partition_def_list:partitions
+  {: RESULT = new AlterTableAddPartitionStmt(table, if_not_exists, partitions); :}
   | KW_ALTER KW_TABLE table_name:table KW_DROP opt_kw_column ident_or_default:col_name
   {: RESULT = new AlterTableDropColStmt(table, col_name); :}
   | KW_ALTER KW_TABLE table_name:table KW_ADD if_not_exists_val:if_not_exists

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
index b946436..151f245 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAddPartitionStmt.java
@@ -17,16 +17,20 @@
 
 package org.apache.impala.analysis;
 
-import org.apache.impala.authorization.Privilege;
-import org.apache.impala.catalog.HdfsTable;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Joiner;
+import com.google.common.collect.Sets;
+
 import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.Table;
 import org.apache.impala.common.AnalysisException;
-import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.service.CatalogOpExecutor;
 import org.apache.impala.thrift.TAlterTableAddPartitionParams;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TAlterTableType;
-import org.apache.hadoop.fs.permission.FsAction;
+
+import java.util.List;
+import java.util.Set;
 
 import com.google.common.base.Preconditions;
 
@@ -34,48 +38,44 @@ import com.google.common.base.Preconditions;
  * Represents an ALTER TABLE ADD PARTITION statement.
  */
 public class AlterTableAddPartitionStmt extends AlterTableStmt {
-  private final HdfsUri location_;
   private final boolean ifNotExists_;
-  private final PartitionSpec partitionSpec_;
-  private final HdfsCachingOp cacheOp_;
+  private final List<PartitionDef> partitions_;
 
   public AlterTableAddPartitionStmt(TableName tableName,
-      PartitionSpec partitionSpec, HdfsUri location, boolean ifNotExists,
-      HdfsCachingOp cacheOp) {
+      boolean ifNotExists, List<PartitionDef> partitions) {
     super(tableName);
-    Preconditions.checkNotNull(partitionSpec);
-    location_ = location;
+    Preconditions.checkNotNull(partitions);
+    Preconditions.checkState(!partitions.isEmpty());
+    partitions_ = partitions;
+    // If 'ifNotExists' is true, no error is raised if a partition with the same spec
+    // already exists. If multiple partitions are specified, the statement will ignore
+    // those that exist and add the rest.
     ifNotExists_ = ifNotExists;
-    partitionSpec_ = partitionSpec;
-    partitionSpec_.setTableName(tableName);
-    cacheOp_ = cacheOp;
+    for (PartitionDef p: partitions_) {
+      p.setTableName(tableName);
+      if (!ifNotExists_) p.setPartitionShouldNotExist();
+    }
   }
 
   public boolean getIfNotExists() { return ifNotExists_; }
-  public HdfsUri getLocation() { return location_; }
 
   @Override
   public String toSql() {
-    StringBuilder sb = new StringBuilder("ALTER TABLE " + getTbl());
-    sb.append(" ADD ");
-    if (ifNotExists_) {
-      sb.append("IF NOT EXISTS ");
-    }
-    sb.append(" " + partitionSpec_.toSql());
-    if (location_ != null) sb.append(String.format(" LOCATION '%s'", location_));
-    if (cacheOp_ != null) sb.append(cacheOp_.toSql());
+    StringBuilder sb = new StringBuilder("ALTER TABLE ");
+    if (getDb() != null) sb.append(getDb() + ".");
+    sb.append(getTbl()).append(" ADD");
+    if (ifNotExists_) sb.append(" IF NOT EXISTS");
+    for (PartitionDef p: partitions_) sb.append(" " + p.toSql());
     return sb.toString();
   }
 
   @Override
   public TAlterTableParams toThrift() {
-    TAlterTableParams params = super.toThrift();
-    params.setAlter_type(TAlterTableType.ADD_PARTITION);
     TAlterTableAddPartitionParams addPartParams = new TAlterTableAddPartitionParams();
-    addPartParams.setPartition_spec(partitionSpec_.toThrift());
-    addPartParams.setLocation(location_ == null ? null : location_.toString());
     addPartParams.setIf_not_exists(ifNotExists_);
-    if (cacheOp_ != null) addPartParams.setCache_op(cacheOp_.toThrift());
+    for (PartitionDef p: partitions_) addPartParams.addToPartitions(p.toThrift());
+    TAlterTableParams params = super.toThrift();
+    params.setAlter_type(TAlterTableType.ADD_PARTITION);
     params.setAdd_partition_params(addPartParams);
     return params;
   }
@@ -86,34 +86,21 @@ public class AlterTableAddPartitionStmt extends AlterTableStmt {
     Table table = getTargetTable();
     if (table instanceof KuduTable) {
       throw new AnalysisException("ALTER TABLE ADD PARTITION is not supported for " +
-          "Kudu tables: " + partitionSpec_.toSql());
+          "Kudu tables: " + table.getTableName());
     }
-    if (!ifNotExists_) partitionSpec_.setPartitionShouldNotExist();
-    partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
-    partitionSpec_.analyze(analyzer);
-    if (location_ != null) {
-      location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
+    if (partitions_.size() > CatalogOpExecutor.MAX_PARTITION_UPDATES_PER_RPC) {
+      throw new AnalysisException(
+          String.format("One ALTER TABLE ADD PARTITION cannot add more than %d " +
+          "partitions.", CatalogOpExecutor.MAX_PARTITION_UPDATES_PER_RPC));
     }
+    Set<String> partitionSpecs = Sets.newHashSet();
+    for (PartitionDef p: partitions_) {
+      p.analyze(analyzer);
 
-    boolean shouldCache = false;
-    if (cacheOp_ != null) {
-      cacheOp_.analyze(analyzer);
-      shouldCache = cacheOp_.shouldCache();
-    } else if (table instanceof HdfsTable) {
-      shouldCache = ((HdfsTable)table).isMarkedCached();
-    }
-    if (shouldCache) {
-      if (!(table instanceof HdfsTable)) {
-        throw new AnalysisException("Caching must target a HDFS table: " +
-            table.getFullName());
-      }
-      HdfsTable hdfsTable = (HdfsTable)table;
-      if ((location_ != null && !FileSystemUtil.isPathCacheable(location_.getPath())) ||
-          (location_ == null && !hdfsTable.isLocationCacheable())) {
-        throw new AnalysisException(String.format("Location '%s' cannot be cached. " +
-            "Please retry without caching: ALTER TABLE %s ADD PARTITION ... UNCACHED",
-            (location_ != null) ? location_.toString() : hdfsTable.getLocation(),
-            table.getFullName()));
+      // Make sure no duplicate partition specs are specified
+      if (!partitionSpecs.add(p.getPartitionSpec().toCanonicalString())) {
+        throw new AnalysisException(String.format("Duplicate partition spec: (%s)",
+            Joiner.on(", ").join(p.getPartitionSpec().getPartitionSpecKeyValues())));
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/fe/src/main/java/org/apache/impala/analysis/PartitionDef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionDef.java b/fe/src/main/java/org/apache/impala/analysis/PartitionDef.java
new file mode 100644
index 0000000..6a1175d
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionDef.java
@@ -0,0 +1,111 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.analysis;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.catalog.Table;
+import org.apache.impala.catalog.TableLoadingException;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FileSystemUtil;
+import org.apache.impala.thrift.TPartitionDef;
+
+/**
+ * Represents a partition definition used in ALTER TABLE ADD PARTITION consisting of
+ * partition key-value pairs and an optional location and optional caching options.
+ */
+public class PartitionDef implements ParseNode {
+  private final PartitionSpec partitionSpec_;
+  private final HdfsUri location_;
+  private final HdfsCachingOp cacheOp_;
+
+  public PartitionDef(PartitionSpec partitionSpec, HdfsUri location,
+      HdfsCachingOp cacheOp) {
+    Preconditions.checkNotNull(partitionSpec);
+    partitionSpec_ = partitionSpec;
+    location_ = location;
+    cacheOp_ = cacheOp;
+  }
+
+  public void setTableName(TableName tableName) {
+    partitionSpec_.setTableName(tableName);
+  }
+  public void setPartitionShouldNotExist() {
+    partitionSpec_.setPartitionShouldNotExist();
+  }
+
+  public HdfsUri getLocation() { return location_; }
+  public PartitionSpec getPartitionSpec() { return partitionSpec_; }
+
+  @Override
+  public String toSql() {
+    StringBuilder sb = new StringBuilder(partitionSpec_.toSql());
+    if (location_ != null) sb.append(String.format(" LOCATION '%s'", location_));
+    if (cacheOp_ != null) sb.append(" " + cacheOp_.toSql());
+    return sb.toString();
+  }
+
+  public TPartitionDef toThrift() {
+    TPartitionDef params = new TPartitionDef();
+    params.setPartition_spec(partitionSpec_.toThrift());
+    if (location_ != null) params.setLocation(location_.toString());
+    if (cacheOp_ != null) params.setCache_op(cacheOp_.toThrift());
+    return params;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
+    partitionSpec_.analyze(analyzer);
+
+    if (location_ != null) {
+      location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
+    }
+
+    Table table;
+    try {
+      table = analyzer.getTable(partitionSpec_.getTableName(), Privilege.ALTER,
+          false);
+    } catch (TableLoadingException e) {
+      throw new AnalysisException(e.getMessage(), e);
+    }
+
+    Preconditions.checkState(table instanceof HdfsTable);
+    HdfsTable hdfsTable = (HdfsTable)table;
+
+    boolean shouldCache;
+    if (cacheOp_ != null) {
+      cacheOp_.analyze(analyzer);
+      shouldCache = cacheOp_.shouldCache();
+    } else {
+      shouldCache = hdfsTable.isMarkedCached();
+    }
+    if (shouldCache) {
+      if ((location_ != null && !FileSystemUtil.isPathCacheable(location_.getPath())) ||
+          (location_ == null && !hdfsTable.isLocationCacheable())) {
+        throw new AnalysisException(String.format("Location '%s' cannot be cached. " +
+            "Please retry without caching: ALTER TABLE %s ADD PARTITION ... UNCACHED",
+            (location_ != null) ? location_.toString() : hdfsTable.getLocation(),
+            hdfsTable.getFullName()));
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/fe/src/main/java/org/apache/impala/analysis/PartitionKeyValue.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionKeyValue.java b/fe/src/main/java/org/apache/impala/analysis/PartitionKeyValue.java
index 4289108..bc387f0 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionKeyValue.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionKeyValue.java
@@ -17,8 +17,9 @@
 
 package org.apache.impala.analysis;
 
-import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
+import org.apache.impala.common.AnalysisException;
+import java.util.Comparator;
 
 /**
  * Representation of a single column:value element in the PARTITION (...) clause of an
@@ -85,4 +86,13 @@ public class PartitionKeyValue {
     }
     return literalValue.getStringValue();
   }
+
+  public static Comparator<PartitionKeyValue> getColNameComparator() {
+    return new Comparator<PartitionKeyValue>() {
+      @Override
+      public int compare(PartitionKeyValue t, PartitionKeyValue o) {
+        return t.colName_.compareTo(o.colName_);
+      }
+    };
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java b/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
index c131f84..2ea53c4 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionSpec.java
@@ -17,8 +17,11 @@
 
 package org.apache.impala.analysis;
 
-import java.util.List;
-import java.util.Set;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.impala.catalog.Column;
@@ -26,11 +29,9 @@ import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.thrift.TPartitionKeyValue;
 
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
 
 /**
  * Represents a partition spec - a collection of partition key/values.
@@ -46,9 +47,7 @@ public class PartitionSpec extends PartitionSpecBase {
     this.partitionSpec_ = ImmutableList.copyOf(partitionSpec);
   }
 
-  public List<PartitionKeyValue> getPartitionSpecKeyValues() {
-    return partitionSpec_;
-  }
+  public List<PartitionKeyValue> getPartitionSpecKeyValues() { return partitionSpec_; }
 
   public boolean partitionExists() {
     Preconditions.checkNotNull(partitionExists_);
@@ -149,4 +148,15 @@ public class PartitionSpec extends PartitionSpecBase {
     }
     return String.format("PARTITION (%s)", Joiner.on(", ").join(partitionSpecStr));
   }
+
+  /**
+   * Utility method that returns the concatenated string of key=value pairs ordered by
+   * key. Since analyze() ensures that there are no duplicate keys in partition specs,
+   * this method provides a uniquely comparable string representation for this object.
+   */
+  public String toCanonicalString() {
+    List<PartitionKeyValue> sortedPartitionSpec = Lists.newArrayList(partitionSpec_);
+    Collections.sort(sortedPartitionSpec, PartitionKeyValue.getColNameComparator());
+    return Joiner.on(",").join(sortedPartitionSpec);
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java b/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
index e76936e..5ead2d8 100644
--- a/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
+++ b/fe/src/main/java/org/apache/impala/analysis/PartitionSpecBase.java
@@ -40,6 +40,7 @@ public abstract class PartitionSpecBase implements ParseNode {
   public String getTbl() { return tableName_.getTbl(); }
 
   public void setTableName(TableName tableName) {this.tableName_ = tableName; }
+  public TableName getTableName() { return tableName_; }
 
   // The value Hive is configured to use for NULL partition key values.
   // Set during analysis.

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 76571cc..c11f990 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -17,7 +17,14 @@
 
 package org.apache.impala.service;
 
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -27,6 +34,7 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
@@ -129,6 +137,7 @@ import org.apache.impala.thrift.TGrantRevokeRoleParams;
 import org.apache.impala.thrift.THdfsCachingOp;
 import org.apache.impala.thrift.THdfsFileFormat;
 import org.apache.impala.thrift.TPartitionKeyValue;
+import org.apache.impala.thrift.TPartitionDef;
 import org.apache.impala.thrift.TPartitionStats;
 import org.apache.impala.thrift.TPrivilege;
 import org.apache.impala.thrift.TResetMetadataRequest;
@@ -147,12 +156,6 @@ import org.apache.impala.util.HdfsCachingUtil;
 import org.apache.log4j.Logger;
 import org.apache.thrift.TException;
 
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
 /**
  * Class used to execute Catalog Operations, including DDL and refresh/invalidate
  * metadata requests. Acts as a bridge between the Thrift catalog operation requests
@@ -234,7 +237,9 @@ public class CatalogOpExecutor {
 
   // The maximum number of partitions to update in one Hive Metastore RPC.
   // Used when persisting the results of COMPUTE STATS statements.
-  private final static short MAX_PARTITION_UPDATES_PER_RPC = 500;
+  // It is also used as an upper limit for the number of partitions allowed in one ADD
+  // PARTITION statement.
+  public final static short MAX_PARTITION_UPDATES_PER_RPC = 500;
 
   public CatalogOpExecutor(CatalogServiceCatalog catalog) {
     catalog_ = catalog;
@@ -378,6 +383,8 @@ public class CatalogOpExecutor {
           catalog_.getLock().writeLock().unlock();
         }
       }
+
+      Table refreshedTable = null;
       // Get a new catalog version to assign to the table being altered.
       long newCatalogVersion = catalog_.incrementAndGetCatalogVersion();
       boolean reloadMetadata = true;
@@ -396,15 +403,10 @@ public class CatalogOpExecutor {
           reloadTableSchema = true;
           break;
         case ADD_PARTITION:
-          TAlterTableAddPartitionParams addPartParams =
-              params.getAdd_partition_params();
-          // Create and add HdfsPartition object to the corresponding HdfsTable and
-          // load its block metadata. Get the new table object with an updated catalog
-          // version. If the partition already exists in Hive and "IfNotExists" is
-          // true, then return without populating the response object.
-          Table refreshedTable = alterTableAddPartition(tbl,
-              addPartParams.getPartition_spec(), addPartParams.isIf_not_exists(),
-              addPartParams.getLocation(), addPartParams.getCache_op());
+          // Create and add HdfsPartition objects to the corresponding HdfsTable and load
+          // their block metadata. Get the new table object with an updated catalog
+          // version.
+          refreshedTable = alterTableAddPartitions(tbl, params.getAdd_partition_params());
           if (refreshedTable != null) {
             refreshedTable.setCatalogVersion(newCatalogVersion);
             addTableToCatalogUpdate(refreshedTable, response.result);
@@ -1905,38 +1907,145 @@ public class CatalogOpExecutor {
   }
 
   /**
-   * Adds a new partition to the given table in Hive. Also creates and adds
-   * a new HdfsPartition to the corresponding HdfsTable.
-   * If cacheOp is not null, the partition's location will be cached according
-   * to the cacheOp. If cacheOp is null, the new partition will inherit the
-   * the caching properties of the parent table.
-   * Returns null if the partition already exists in Hive and "IfNotExists"
-   * is true. Otherwise, returns the table object with an updated catalog version.
+   * Adds new partitions to the given table in HMS. Also creates and adds new
+   * HdfsPartitions to the corresponding HdfsTable. Returns the table object with an
+   * updated catalog version or null if the table is not altered because all the
+   * partitions already exist and IF NOT EXISTS is specified.
+   * If IF NOT EXISTS is not used and there is a conflict with the partitions that already
+   * exist in HMS or catalog cache, then:
+   * - HMS and catalog cache are left intact, and
+   * - ImpalaRuntimeException is thrown.
+   * If IF NOT EXISTS is used, conflicts are handled as follows:
+   * 1. If a partition exists in catalog cache, ignore it.
+   * 2. If a partition exists in HMS but not in catalog cache, reload partition from HMS.
+   * Caching directives are only applied to new partitions that were absent from both the
+   * catalog cache and the HMS.
    */
-  private Table alterTableAddPartition(Table tbl, List<TPartitionKeyValue> partitionSpec,
-      boolean ifNotExists, String location, THdfsCachingOp cacheOp)
-      throws ImpalaException {
+  private Table alterTableAddPartitions(Table tbl,
+      TAlterTableAddPartitionParams addPartParams) throws ImpalaException {
     Preconditions.checkState(tbl.getLock().isHeldByCurrentThread());
-    TableName tableName = tbl.getTableName();
-    if (ifNotExists && catalog_.containsHdfsPartition(tableName.getDb(),
-        tableName.getTbl(), partitionSpec)) {
-      LOG.trace(String.format("Skipping partition creation because (%s) already exists" +
-          " and ifNotExists is true.", Joiner.on(", ").join(partitionSpec)));
-      return null;
-    }
 
-    org.apache.hadoop.hive.metastore.api.Partition partition = null;
-    Table result = null;
-    List<Long> cacheIds = null;
+    TableName tableName = tbl.getTableName();
     org.apache.hadoop.hive.metastore.api.Table msTbl = tbl.getMetaStoreTable().deepCopy();
-    Long parentTblCacheDirId =
-        HdfsCachingUtil.getCacheDirectiveId(msTbl.getParameters());
+    boolean ifNotExists = addPartParams.isIf_not_exists();
+    List<Partition> hmsPartitionsToAdd = Lists.newArrayList();
+    Map<List<String>, THdfsCachingOp> partitionCachingOpMap = Maps.newHashMap();
+    for (TPartitionDef partParams: addPartParams.getPartitions()) {
+      List<TPartitionKeyValue> partitionSpec = partParams.getPartition_spec();
+      if (catalog_.containsHdfsPartition(tableName.getDb(), tableName.getTbl(),
+          partitionSpec)) {
+        String partitionSpecStr = Joiner.on(", ").join(partitionSpec);
+        if (!ifNotExists) {
+          throw new ImpalaRuntimeException(String.format("Partition already " +
+              "exists: (%s)", partitionSpecStr));
+        }
+        LOG.trace(String.format("Skipping partition creation because (%s) already " +
+            "exists and IF NOT EXISTS was specified.", partitionSpecStr));
+        continue;
+      }
+
+      Partition hmsPartition = createHmsPartition(partitionSpec, msTbl, tableName,
+          partParams.getLocation());
+      hmsPartitionsToAdd.add(hmsPartition);
+
+      THdfsCachingOp cacheOp = partParams.getCache_op();
+      if (cacheOp != null) partitionCachingOpMap.put(hmsPartition.getValues(), cacheOp);
+    }
 
-    partition = createHmsPartition(partitionSpec, msTbl, tableName, location);
+    if (hmsPartitionsToAdd.isEmpty()) return null;
 
     try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-      // Add the new partition.
-      partition = msClient.getHiveClient().add_partition(partition);
+      // Add partitions in bulk
+      List<Partition> addedHmsPartitions = null;
+      try {
+        addedHmsPartitions = msClient.getHiveClient().add_partitions(hmsPartitionsToAdd,
+            ifNotExists, true);
+      } catch (TException e) {
+        throw new ImpalaRuntimeException(
+            String.format(HMS_RPC_ERROR_FORMAT_STR, "add_partitions"), e);
+      }
+
+      // Handle HDFS cache. This is done in a separate round bacause we have to apply
+      // caching only to newly added partitions.
+      alterTableCachePartitions(msTbl, msClient, tableName, addedHmsPartitions,
+          partitionCachingOpMap);
+
+      // If 'ifNotExists' is true, add_partitions() may fail to add all the partitions to
+      // HMS because some of them may already exist there. In that case, we load in the
+      // catalog the partitions that already exist in HMS but aren't in the catalog yet.
+      if (hmsPartitionsToAdd.size() != addedHmsPartitions.size()) {
+        List<Partition> difference = computeDifference(hmsPartitionsToAdd,
+            addedHmsPartitions);
+        addedHmsPartitions.addAll(
+            getPartitionsFromHms(msTbl, msClient, tableName, difference));
+      }
+
+      for (Partition partition: addedHmsPartitions) {
+        // Create and add the HdfsPartition to catalog. Return the table object with an
+        // updated catalog version.
+        addHdfsPartition(tbl, partition);
+      }
+      return tbl;
+    }
+  }
+
+  /**
+   * Returns the list of Partition objects from 'aList' that cannot be found in 'bList'.
+   * Partition objects are distinguished by partition values only.
+   */
+  private List<Partition> computeDifference(List<Partition> aList,
+      List<Partition> bList) {
+    Set<List<String>> bSet = Sets.newHashSet();
+    for (Partition b: bList) bSet.add(b.getValues());
+
+    List<Partition> diffList = Lists.newArrayList();
+    for (Partition a: aList) {
+      if (!bSet.contains(a.getValues())) diffList.add(a);
+    }
+    return diffList;
+  }
+
+  /**
+   * Returns a list of partitions retrieved from HMS for each 'hmsPartitions' element.
+   */
+  private List<Partition> getPartitionsFromHms(
+      org.apache.hadoop.hive.metastore.api.Table msTbl, MetaStoreClient msClient,
+      TableName tableName, List<Partition> hmsPartitions)
+      throws ImpalaException {
+    List<String> partitionCols = Lists.newArrayList();
+    for (FieldSchema fs: msTbl.getPartitionKeys()) partitionCols.add(fs.getName());
+
+    List<String> partitionNames = Lists.newArrayListWithCapacity(hmsPartitions.size());
+    for (Partition part: hmsPartitions) {
+      String partName = org.apache.hadoop.hive.common.FileUtils.makePartName(
+          partitionCols, part.getValues());
+      partitionNames.add(partName);
+    }
+    try {
+      return msClient.getHiveClient().getPartitionsByNames(tableName.getDb(),
+          tableName.getTbl(), partitionNames);
+    } catch (TException e) {
+      throw new ImpalaRuntimeException("Metadata inconsistency has occured. Please run "
+          + "'invalidate metadata <tablename>' to resolve the problem.", e);
+    }
+  }
+
+  /**
+   * Applies HDFS caching ops on 'hmsPartitions' and updates their metadata in Hive
+   * Metastore.
+   * 'partitionCachingOpMap' maps partitions (identified by their partition values) to
+   * their corresponding HDFS caching ops.
+   */
+  private void alterTableCachePartitions(org.apache.hadoop.hive.metastore.api.Table msTbl,
+      MetaStoreClient msClient, TableName tableName, List<Partition> hmsPartitions,
+      Map<List<String>, THdfsCachingOp> partitionCachingOpMap)
+      throws ImpalaException {
+    // Handle HDFS cache
+    List<Long> cacheIds = Lists.newArrayList();
+    List<Partition> hmsPartitionsToCache = Lists.newArrayList();
+    Long parentTblCacheDirId = HdfsCachingUtil.getCacheDirectiveId(msTbl.getParameters());
+    for (Partition partition: hmsPartitions) {
+      THdfsCachingOp cacheOp = partitionCachingOpMap.get(partition.getValues());
       String cachePoolName = null;
       Short replication = null;
       if (cacheOp == null && parentTblCacheDirId != null) {
@@ -1964,28 +2073,16 @@ public class CatalogOpExecutor {
       if (cachePoolName != null) {
         long id = HdfsCachingUtil.submitCachePartitionDirective(partition,
             cachePoolName, replication);
-        cacheIds = Lists.<Long>newArrayList(id);
-        // Update the partition metadata to include the cache directive id.
-        msClient.getHiveClient().alter_partition(partition.getDbName(),
-            partition.getTableName(), partition);
-      }
-      updateLastDdlTime(msTbl, msClient);
-    } catch (AlreadyExistsException e) {
-      if (!ifNotExists) {
-        throw new ImpalaRuntimeException(
-            String.format(HMS_RPC_ERROR_FORMAT_STR, "add_partition"), e);
+        cacheIds.add(id);
+        hmsPartitionsToCache.add(partition);
       }
-      LOG.trace(String.format("Ignoring '%s' when adding partition to %s because" +
-          " ifNotExists is true.", e, tableName));
-    } catch (TException e) {
-      throw new ImpalaRuntimeException(
-          String.format(HMS_RPC_ERROR_FORMAT_STR, "add_partition"), e);
     }
-    if (cacheIds != null) catalog_.watchCacheDirs(cacheIds, tableName.toThrift());
-    // Return the table object with an updated catalog version after creating the
-    // partition.
-    result = addHdfsPartition(tbl, partition);
-    return result;
+
+    // Update the partition metadata to include the cache directive id.
+    if (!cacheIds.isEmpty()) {
+      applyAlterHmsPartitions(msTbl, msClient, tableName, hmsPartitionsToCache);
+      catalog_.watchCacheDirs(cacheIds, tableName.toThrift());
+    }
   }
 
   /**
@@ -2699,15 +2796,21 @@ public class CatalogOpExecutor {
   private void applyAlterPartition(Table tbl, HdfsPartition partition)
       throws ImpalaException {
     try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-      TableName tableName = tbl.getTableName();
-      msClient.getHiveClient().alter_partition(
-          tableName.getDb(), tableName.getTbl(), partition.toHmsPartition());
-      org.apache.hadoop.hive.metastore.api.Table msTbl =
-          tbl.getMetaStoreTable().deepCopy();
+      applyAlterHmsPartitions(tbl.getMetaStoreTable().deepCopy(), msClient,
+          tbl.getTableName(), Arrays.asList(partition.toHmsPartition()));
+    }
+  }
+
+  private void applyAlterHmsPartitions(org.apache.hadoop.hive.metastore.api.Table msTbl,
+      MetaStoreClient msClient, TableName tableName, List<Partition> hmsPartitions)
+      throws ImpalaException {
+    try {
+      msClient.getHiveClient().alter_partitions(tableName.getDb(), tableName.getTbl(),
+          hmsPartitions);
       updateLastDdlTime(msTbl, msClient);
     } catch (TException e) {
       throw new ImpalaRuntimeException(
-          String.format(HMS_RPC_ERROR_FORMAT_STR, "alter_partition"), e);
+          String.format(HMS_RPC_ERROR_FORMAT_STR, "alter_partitions"), e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
index 5a08f98..29c59e8 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
@@ -44,6 +44,7 @@ import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.common.FrontendTestBase;
 import org.apache.impala.common.RuntimeEnv;
+import org.apache.impala.service.CatalogOpExecutor;
 import org.apache.impala.testutil.TestUtils;
 import org.apache.impala.thrift.TDescribeTableParams;
 import org.apache.impala.util.MetaStoreUtil;
@@ -260,6 +261,77 @@ public class AnalyzeDDLTest extends FrontendTestBase {
   }
 
   @Test
+  public void TestAlterTableAddMultiplePartitions() {
+    for (String cl: new String[]{"if not exists", ""}) {
+      // Add multiple partitions.
+      AnalyzesOk("alter table functional.alltypes add " + cl +
+          " partition(year=2050, month=10)" +
+          " partition(year=2050, month=11)" +
+          " partition(year=2050, month=12)");
+      // Duplicate partition specifications.
+      AnalysisError("alter table functional.alltypes add " + cl +
+          " partition(year=2050, month=10)" +
+          " partition(year=2050, month=11)" +
+          " partition(Month=10, YEAR=2050)",
+          "Duplicate partition spec: (month=10, year=2050)");
+
+      // Multiple partitions with locations and caching.
+      AnalyzesOk("alter table functional.alltypes add " + cl +
+          " partition(year=2050, month=10) location" +
+          " '/test-warehouse/alltypes/y2050m10' cached in 'testPool'" +
+          " partition(year=2050, month=11) location" +
+          " 'hdfs://localhost:20500/test-warehouse/alltypes/y2050m11'" +
+          " cached in 'testPool' with replication = 7" +
+          " partition(year=2050, month=12) location" +
+          " 'file:///test-warehouse/alltypes/y2050m12' uncached");
+      // One of the partitions points to an invalid URI.
+      AnalysisError("alter table functional.alltypes add " + cl +
+          " partition(year=2050, month=10) location" +
+          " '/test-warehouse/alltypes/y2050m10' cached in 'testPool'" +
+          " partition(year=2050, month=11) location" +
+          " 'hdfs://localhost:20500/test-warehouse/alltypes/y2050m11'" +
+          " cached in 'testPool' with replication = 7" +
+          " partition(year=2050, month=12) location" +
+          " 'fil:///test-warehouse/alltypes/y2050m12' uncached",
+          "No FileSystem for scheme: fil");
+      // One of the partitions is cached in a non-existent pool.
+      AnalysisError("alter table functional.alltypes add " + cl +
+          " partition(year=2050, month=10) location" +
+          " '/test-warehouse/alltypes/y2050m10' cached in 'testPool'" +
+          " partition(year=2050, month=11) location" +
+          " 'hdfs://localhost:20500/test-warehouse/alltypes/y2050m11'" +
+          " cached in 'nonExistentTestPool' with replication = 7" +
+          " partition(year=2050, month=12) location" +
+          " 'file:///test-warehouse/alltypes/y2050m12' uncached",
+          "The specified cache pool does not exist: nonExistentTestPool");
+    }
+
+    // Test the limit for the number of partitions
+    StringBuilder stmt = new StringBuilder("alter table functional.alltypes add");
+    int year;
+    int month;
+    for (int i = 0; i < CatalogOpExecutor.MAX_PARTITION_UPDATES_PER_RPC; ++i) {
+      year = i/12 + 2050;
+      month = i%12 + 1;
+      stmt.append(String.format(" partition(year=%d, month=%d)", year, month));
+    }
+    AnalyzesOk(stmt.toString());
+    // Over the limit by one partition
+    year = CatalogOpExecutor.MAX_PARTITION_UPDATES_PER_RPC/12 + 2050;
+    month = CatalogOpExecutor.MAX_PARTITION_UPDATES_PER_RPC%12 + 1;
+    stmt.append(String.format(" partition(year=%d, month=%d)", year, month));
+    AnalysisError(stmt.toString(),
+        String.format("One ALTER TABLE ADD PARTITION cannot add more than %d partitions.",
+        CatalogOpExecutor.MAX_PARTITION_UPDATES_PER_RPC));
+
+    // If 'IF NOT EXISTS' is not used, ALTER TABLE ADD PARTITION cannot add a preexisting
+    // partition to a table.
+    AnalysisError("alter table functional.alltypes add partition(year=2050, month=1)" +
+        "partition(year=2010, month=1) partition(year=2050, month=2)",
+        "Partition spec already exists: (year=2010, month=1)");
+  }
+
+  @Test
   public void TestAlterTableAddReplaceColumns() throws AnalysisException {
     AnalyzesOk("alter table functional.alltypes add columns (new_col int)");
     AnalyzesOk("alter table functional.alltypes add columns (c1 string comment 'hi')");

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
index 9821694..25ec45c 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
@@ -1219,7 +1219,6 @@ public class AuthorizationTest {
     AuthzOk("ALTER TABLE functional_seq_snap.alltypes SET CACHED IN 'testPool'");
     AuthzOk("ALTER TABLE functional_seq_snap.alltypes RECOVER PARTITIONS");
 
-
     // Alter table and set location to a path the user does not have access to.
     AuthzError("ALTER TABLE functional_seq_snap.alltypes SET LOCATION " +
         "'hdfs://localhost:20500/test-warehouse/no_access'",
@@ -1234,6 +1233,18 @@ public class AuthorizationTest {
         "User '%s' does not have privileges to access: " +
         "hdfs://localhost:20500/test-warehouse/no_access");
 
+    // Add multiple partitions. User has access to location path.
+    AuthzOk("ALTER TABLE functional_seq_snap.alltypes ADD " +
+        "PARTITION(year=2011, month=1) " +
+        "PARTITION(year=2011, month=2) " +
+        "LOCATION 'hdfs://localhost:20500/test-warehouse/new_table'");
+    // For one new partition location is set to a path the user does not have access to.
+    AuthzError("ALTER TABLE functional_seq_snap.alltypes ADD " +
+        "PARTITION(year=2011, month=3) " +
+        "PARTITION(year=2011, month=4) LOCATION '/test-warehouse/no_access'",
+        "User '%s' does not have privileges to access: " +
+        "hdfs://localhost:20500/test-warehouse/no_access");
+
     // Different filesystem, user has permission to base path.
     AuthzError("ALTER TABLE functional_seq_snap.alltypes SET LOCATION " +
         "'hdfs://localhost:20510/test-warehouse/new_table'",

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
index 2b37bf3..14cd036 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
@@ -2046,12 +2046,25 @@ public class ParserTest extends FrontendTestBase {
     ParsesOk("ALTER TABLE Foo ADD PARTITION (i=NULL, j=2, k=NULL)");
     ParsesOk("ALTER TABLE Foo ADD PARTITION (i=abc, j=(5*8+10), k=!true and false)");
 
+    // Multiple partition specs
+    ParsesOk("ALTER TABLE Foo ADD PARTITION (i=1, s='one') " +
+        "PARTITION (i=2, s='two') PARTITION (i=3, s='three')");
+    ParsesOk("ALTER TABLE TestDb.Foo ADD PARTITION (i=1, s='one') LOCATION 'a/b' " +
+        "PARTITION (i=2, s='two') LOCATION 'c/d' " +
+        "PARTITION (i=3, s='three') " +
+        "PARTITION (i=4, s='four') LOCATION 'e/f'");
+    ParsesOk("ALTER TABLE TestDb.Foo ADD IF NOT EXISTS " +
+        "PARTITION (i=1, s='one') " +
+        "PARTITION (i=2, s='two') LOCATION 'c/d'");
+    ParserError("ALTER TABLE TestDb.Foo ADD " +
+        "PARTITION (i=1, s='one') " +
+        "IF NOT EXISTS PARTITION (i=2, s='two') LOCATION 'c/d'");
+
     // Location needs to be a string literal
     ParserError("ALTER TABLE TestDb.Foo ADD PARTITION (i=1, s='Hello') LOCATION a/b");
 
     // Caching ops
     ParsesOk("ALTER TABLE Foo ADD PARTITION (j=2) CACHED IN 'pool'");
-    ParsesOk("ALTER TABLE Foo ADD PARTITION (j=2) CACHED IN 'pool'");
     ParserError("ALTER TABLE Foo ADD PARTITION (j=2) CACHED 'pool'");
     ParserError("ALTER TABLE Foo ADD PARTITION (j=2) CACHED IN");
     ParserError("ALTER TABLE Foo ADD PARTITION (j=2) CACHED");
@@ -2066,6 +2079,13 @@ public class ParserTest extends FrontendTestBase {
     ParserError("ALTER TABLE Foo ADD PARTITION (j=2) CACHED IN 'pool' LOCATION 'a/b'");
     ParserError("ALTER TABLE Foo ADD PARTITION (j=2) UNCACHED LOCATION 'a/b'");
 
+    // Multiple partition specs with caching ops
+    ParsesOk("ALTER TABLE Foo ADD PARTITION (j=2) CACHED IN 'pool' " +
+        "PARTITION (j=3) UNCACHED " +
+        "PARTITION (j=4) CACHED IN 'pool' WITH replication = 3 " +
+        "PARTITION (j=5) LOCATION 'a/b' CACHED IN 'pool' " +
+        "PARTITION (j=5) LOCATION 'c/d' CACHED IN 'pool' with replication = 3");
+
     ParserError("ALTER TABLE Foo ADD IF EXISTS PARTITION (i=1, s='Hello')");
     ParserError("ALTER TABLE TestDb.Foo ADD (i=1, s='Hello')");
     ParserError("ALTER TABLE TestDb.Foo ADD (i=1)");

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/fe/src/test/java/org/apache/impala/analysis/ToSqlTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/ToSqlTest.java b/fe/src/test/java/org/apache/impala/analysis/ToSqlTest.java
index 2b52a68..98b55f9 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ToSqlTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ToSqlTest.java
@@ -1048,6 +1048,48 @@ public class ToSqlTest extends FrontendTestBase {
   }
 
   @Test
+  public void alterTableAddPartitionTest() {
+    // Add partition
+    testToSql(
+        "alter table functional.alltypes add partition (year=2050, month=1)",
+        "ALTER TABLE functional.alltypes ADD PARTITION (year=2050, month=1)");
+    // Add multiple partitions
+    testToSql(
+        "alter table functional.alltypes add partition (year=2050, month=1) " +
+        "partition (year=2050, month=2)",
+        "ALTER TABLE functional.alltypes ADD PARTITION (year=2050, month=1) " +
+        "PARTITION (year=2050, month=2)");
+    // with IF NOT EXISTS
+    testToSql(
+        "alter table functional.alltypes add if not exists " +
+        "partition (year=2050, month=1) " +
+        "partition (year=2050, month=2)",
+        "ALTER TABLE functional.alltypes ADD IF NOT EXISTS " +
+        "PARTITION (year=2050, month=1) " +
+        "PARTITION (year=2050, month=2)");
+    // with location
+    testToSql(
+        "alter table functional.alltypes add if not exists " +
+        "partition (year=2050, month=1) location 'hdfs://localhost:20500/y2050m1' " +
+        "partition (year=2050, month=2) location '/y2050m2'",
+        "ALTER TABLE functional.alltypes ADD IF NOT EXISTS "+
+        "PARTITION (year=2050, month=1) LOCATION 'hdfs://localhost:20500/y2050m1' " +
+        "PARTITION (year=2050, month=2) LOCATION 'hdfs://localhost:20500/y2050m2'");
+    // and caching
+    testToSql(
+        "alter table functional.alltypes add if not exists " +
+        "partition (year=2050, month=1) location 'hdfs://localhost:20500/y2050m1' " +
+        "cached in 'testPool' with replication=3 " +
+        "partition (year=2050, month=2) location '/y2050m2' " +
+        "uncached",
+        "ALTER TABLE functional.alltypes ADD IF NOT EXISTS "+
+        "PARTITION (year=2050, month=1) LOCATION 'hdfs://localhost:20500/y2050m1' " +
+        "CACHED IN 'testPool' WITH REPLICATION = 3 " +
+        "PARTITION (year=2050, month=2) LOCATION 'hdfs://localhost:20500/y2050m2' " +
+        "UNCACHED");
+  }
+
+  @Test
   public void testAnalyticExprs() {
     testToSql(
         "select sum(int_col) over (partition by id order by tinyint_col "

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/testdata/workloads/functional-query/queries/QueryTest/alter-table.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/alter-table.test b/testdata/workloads/functional-query/queries/QueryTest/alter-table.test
index 3230b9e..e067bc1 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/alter-table.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/alter-table.test
@@ -1003,3 +1003,107 @@ select * from i4155_alter;
 ---- TYPES
 INT, STRING
 ====
+---- QUERY
+# IMPALA-1670: Support adding multiple partitions in ALTER TABLE ADD PARTITION
+create table i1670A_alter (s string) partitioned by (i integer);
+alter table i1670A_alter add
+partition (i=1) location '/i1' cached in 'testPool' with replication=3
+partition (i=2) location '/i2'
+partition (i=3) uncached;
+show partitions i1670A_alter;
+---- RESULTS
+'1',-1,0,'0B','0B','3','TEXT','false',regex:.*/i1
+'2',-1,0,'0B','NOT CACHED','NOT CACHED','TEXT','false',regex:.*/i2
+'3',-1,0,'0B','NOT CACHED','NOT CACHED','TEXT','false',regex:.*/i=3
+'Total',-1,0,'0B','0B','','','',''
+---- TYPES
+STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
+====
+---- QUERY
+# IMPALA-1670: Set up i1670C_alter table for the next test case.
+create table i1670C_alter (s string) partitioned by (i integer);
+alter table i1670C_alter add
+partition (i=2) location '/i2A' cached in 'testPool' with replication=2
+partition (i=4) location '/i4A' uncached;
+show partitions i1670C_alter;
+---- RESULTS
+'2',-1,0,'0B','0B','2','TEXT','false',regex:.*/i2A
+'4',-1,0,'0B','NOT CACHED','NOT CACHED','TEXT','false',regex:.*/i4A
+'Total',-1,0,'0B','0B','','','',''
+---- TYPES
+STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
+====
+---- QUERY
+# IMPALA-1670: If 'IF NOT EXISTS' is used ALTER TABLE ADD PARTITION works with preexisting
+# partitions. Location and caching options of existing partitions are not modified.
+alter table i1670C_alter add if not exists
+partition (i=1) location '/i1B'
+partition (i=2) location '/i2B' uncached
+partition (i=3) location '/i3B' cached in 'testPool' with replication=3
+partition (i=4) location '/i4B' cached in 'testPool' with replication=4;
+show partitions i1670C_alter;
+---- RESULTS
+'1',-1,0,'0B','NOT CACHED','NOT CACHED','TEXT','false',regex:.*/i1B
+'2',-1,0,'0B','0B','2','TEXT','false',regex:.*/i2A
+'3',-1,0,'0B','0B','3','TEXT','false',regex:.*/i3B
+'4',-1,0,'0B','NOT CACHED','NOT CACHED','TEXT','false',regex:.*/i4A
+'Total',-1,0,'0B','0B','','','',''
+---- TYPES
+STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
+====
+---- QUERY
+# IMPALA-1670: Partitions without explicit CACHED IN/UNCACHED clause inherit cacheop from
+# the parent table
+create table i1670D_alter (s string) partitioned by (i integer)
+cached in 'testPool' with replication=7;
+alter table i1670D_alter add
+partition (i=1) cached in 'testPool' with replication=5
+partition (i=2)
+partition (i=3) uncached
+partition (i=4);
+show partitions i1670D_alter;
+---- RESULTS
+'1',-1,0,'0B','0B','5','TEXT','false',regex:.*/i=1
+'2',-1,0,'0B','0B','7','TEXT','false',regex:.*/i=2
+'3',-1,0,'0B','NOT CACHED','NOT CACHED','TEXT','false',regex:.*/i=3
+'4',-1,0,'0B','0B','7','TEXT','false',regex:.*/i=4
+'Total',-1,0,'0B','0B','','','',''
+---- TYPES
+STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
+====
+---- QUERY
+# IMPALA-1670: After INVALIDATE METADATA Impala can access previously added partitions and
+# partition data.
+create table i1670E_alter (a int) partitioned by (x int);
+alter table i1670E_alter add partition (x=1)
+partition (x=2) uncached
+partition (x=3) location '/x3' cached in 'testPool' with replication=7;
+insert into i1670E_alter partition(x=1) values (1), (2), (3);
+insert into i1670E_alter partition(x=2) values (1), (2), (3), (4);
+insert into i1670E_alter partition(x=3) values (1);
+invalidate metadata i1670E_alter;
+====
+---- QUERY
+show partitions i1670E_alter;
+---- RESULTS
+'1',-1,1,regex:.*,'NOT CACHED','NOT CACHED','TEXT','false',regex:.*/x=1
+'2',-1,1,regex:.*,'NOT CACHED','NOT CACHED','TEXT','false',regex:.*/x=2
+'3',-1,1,regex:.*,regex:.*,'7','TEXT','false',regex:.*/x3
+'Total',-1,3,regex:.*,regex:.*,'','','',''
+---- TYPES
+STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
+====
+---- QUERY
+select x, a from i1670E_alter order by x, a;
+---- RESULTS
+1,1
+1,2
+1,3
+2,1
+2,2
+2,3
+2,4
+3,1
+---- TYPES
+INT, INT
+====

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/testdata/workloads/functional-query/queries/QueryTest/grant_revoke.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/grant_revoke.test b/testdata/workloads/functional-query/queries/QueryTest/grant_revoke.test
index fe340c2..8beb04f 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/grant_revoke.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/grant_revoke.test
@@ -234,10 +234,37 @@ show tables in grant_rev_db
 STRING
 ====
 ---- QUERY
+# IMPALA-1670: User does not have privileges to access URI when adding partitions
+create table grant_rev_db.test_tbl_partitioned(i int) partitioned by (j int);
+alter table grant_rev_db.test_tbl_partitioned add
+partition (j=1)
+partition (j=2) location '$FILESYSTEM_PREFIX/test-warehouse/grant_rev_test_prt';
+---- CATCH
+does not have privileges to access: $NAMENODE/test-warehouse/grant_rev_test_prt
+====
+---- QUERY
+grant all on uri '$FILESYSTEM_PREFIX/test-warehouse/grant_rev_test_prt'
+to grant_revoke_test_ALL_URI;
+====
+---- QUERY
+# Should now have privileges to add partitions
+alter table grant_rev_db.test_tbl_partitioned add
+partition (j=1)
+partition (j=2) location '$FILESYSTEM_PREFIX/test-warehouse/grant_rev_test_prt';
+show partitions grant_rev_db.test_tbl_partitioned;
+---- RESULTS
+'1',-1,0,'0B','NOT CACHED','NOT CACHED','TEXT','false',regex:.*/j=1
+'2',-1,0,'0B','NOT CACHED','NOT CACHED','TEXT','false','$NAMENODE/test-warehouse/grant_rev_test_prt'
+'Total',-1,0,'0B','0B','','','',''
+---- TYPES
+STRING, BIGINT, BIGINT, STRING, STRING, STRING, STRING, STRING, STRING
+====
+---- QUERY
 show grant role grant_revoke_test_ALL_URI
 ---- RESULTS
 'URI','','','','$NAMENODE/test-warehouse/grant_rev_test_tbl2','ALL',FALSE,regex:.+
 'URI','','','','$NAMENODE/test-warehouse/GRANT_REV_TEST_TBL3','ALL',FALSE,regex:.+
+'URI','','','','$NAMENODE/test-warehouse/grant_rev_test_prt','ALL',FALSE,regex:.+
 ---- LABELS
 scope, database, table, column, uri, privilege, grant_option, create_time
 ---- TYPES

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/tests/common/impala_test_suite.py
----------------------------------------------------------------------
diff --git a/tests/common/impala_test_suite.py b/tests/common/impala_test_suite.py
index e3b98a4..e90c5ac 100644
--- a/tests/common/impala_test_suite.py
+++ b/tests/common/impala_test_suite.py
@@ -197,6 +197,41 @@ class ImpalaTestSuite(BaseTestSuite):
       except Exception as e:
         LOG.info('Unexpected exception when executing ' + query_str + ' : ' + str(e))
 
+  def get_impala_partition_info(self, table_name, *include_fields):
+    """
+    Find information about partitions of a table, as returned by a SHOW PARTITION
+    statement. Return a list that contains one tuple for each partition.
+
+    If 'include_fields' is not specified, the tuples will contain all the fields returned
+    by SHOW PARTITION. Otherwise, return only those fields whose names are listed in
+    'include_fields'. Field names are compared case-insensitively.
+    """
+    exec_result = self.client.execute('show partitions %s' % table_name)
+    fieldSchemas = exec_result.schema.fieldSchemas
+    fields_dict = {}
+    for idx, fs in enumerate(fieldSchemas):
+      fields_dict[fs.name.lower()] = idx
+
+    rows = exec_result.get_data().split('\n')
+    rows.pop()
+    fields_idx = []
+    for fn in include_fields:
+      fn = fn.lower()
+      assert fn in fields_dict, 'Invalid field: %s' % fn
+      fields_idx.append(fields_dict[fn])
+
+    result = []
+    for row in rows:
+      fields = row.split('\t')
+      if not fields_idx:
+        result_fields = fields
+      else:
+        result_fields = []
+        for i in fields_idx:
+          result_fields.append(fields[i])
+      result.append(tuple(result_fields))
+    return result
+
   def __verify_exceptions(self, expected_strs, actual_str, use_db):
     """
     Verifies that at least one of the strings in 'expected_str' is a substring of the
@@ -574,6 +609,15 @@ class ImpalaTestSuite(BaseTestSuite):
       raise RuntimeError(stderr)
     return stdout
 
+  def hive_partition_names(self, table_name):
+    """Find the names of the partitions of a table, as Hive sees them.
+
+    The return format is a list of strings. Each string represents a partition
+    value of a given column in a format like 'column1=7/column2=8'.
+    """
+    return self.run_stmt_in_hive(
+        'show partitions %s' % table_name).split('\n')[1:-1]
+
   @classmethod
   def create_table_info_dimension(cls, exploration_strategy):
     # If the user has specified a specific set of table formats to run against, then

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/tests/metadata/test_hms_integration.py
----------------------------------------------------------------------
diff --git a/tests/metadata/test_hms_integration.py b/tests/metadata/test_hms_integration.py
index 8dca2f6..05bacfc 100644
--- a/tests/metadata/test_hms_integration.py
+++ b/tests/metadata/test_hms_integration.py
@@ -233,31 +233,6 @@ class TestHmsIntegration(ImpalaTestSuite):
       result[stat_names[i]] = stat_values[i]
     return result
 
-  def impala_partition_names(self, table_name):
-    """Find the names of the partitions of a table, as Impala sees them.
-
-    The return format is a list of lists of strings. Each string represents
-    a partition value of a given column.
-    """
-    rows = self.client.execute('show partitions %s' %
-                               table_name).get_data().split('\n')
-    rows.pop()
-    result = []
-    for row in rows:
-      fields = row.split('\t')
-      name = fields[0:-8]
-      result.append(name)
-    return result
-
-  def hive_partition_names(self, table_name):
-    """Find the names of the partitions of a table, as Hive sees them.
-
-    The return format is a list of strings. Each string represents a partition
-    value of a given column in a format like 'column1=7/column2=8'.
-    """
-    return self.run_stmt_in_hive(
-        'show partitions %s' % table_name).split('\n')[1:-1]
-
   def impala_columns(self, table_name):
     """
     Returns a dict with column names as the keys and dicts of type and comments
@@ -290,14 +265,22 @@ class TestHmsIntegration(ImpalaTestSuite):
                     for i in range(0, 16)])
 
   def assert_sql_error(self, engine, command, *strs_in_error):
-    reached_unreachable = False
+    """
+    Passes 'command' to 'engine' callable (e.g. execute method of a BeeswaxConnection
+    object) and makes sure that it raises an exception.
+    It also verifies that the string representation of the exception contains all the
+    strings listed in 'strs_in_error'.
+
+    If the call doesn't raise an exception or the exception doesn't contain one of the
+    strings in 'strs_in_error', it throws AssertError exception.
+    """
+
     try:
       engine(command)
-      reached_unreachable = True
     except Exception as e:
       for str_in_error in strs_in_error:
         assert str_in_error in str(e)
-    if reached_unreachable:
+    else:
       assert False, '%s should have triggered an error containing %s' % (
           command, strs_in_error)
 
@@ -352,7 +335,7 @@ class TestHmsIntegration(ImpalaTestSuite):
             table_name)
         self.client.execute('compute incremental stats %s' % table_name)
         # Impala can see the partition's name
-        assert [['333', '5309']] == self.impala_partition_names(table_name)
+        assert [('333', '5309')] == self.get_impala_partition_info(table_name, 'y', 'z')
         # Impala's compute stats didn't alter Hive's knowledge of the partition
         assert ['y=333/z=5309'] == self.hive_partition_names(table_name)
     self.add_hive_partition_table_stats_helper(vector, DbWrapper, TableWrapper)
@@ -393,7 +376,7 @@ class TestHmsIntegration(ImpalaTestSuite):
         self.client.execute(
             'insert into table %s partition (y=42, z=867) values (2)'
             % table_name)
-        assert [['42', '867']] == self.impala_partition_names(table_name)
+        assert [('42', '867')] == self.get_impala_partition_info(table_name, 'y', 'z')
         assert ['y=42/z=867'] == self.hive_partition_names(table_name)
 
   @pytest.mark.execute_serially
@@ -447,7 +430,7 @@ class TestHmsIntegration(ImpalaTestSuite):
 
       with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
                                    '(x int) partitioned by (y int)') as table_name:
-        assert [] == self.impala_partition_names(table_name)
+        assert [] == self.get_impala_partition_info(table_name, 'y')
         self.run_stmt_in_hive(
             'insert into table %s partition (y=33) values (44)'
             % table_name)
@@ -691,3 +674,127 @@ class TestHmsIntegration(ImpalaTestSuite):
         self.assert_sql_error(self.client.execute,
                               'describe %s' % table_name,
                               'Could not resolve path')
+
+  @pytest.mark.execute_serially
+  def test_add_overlapping_partitions(self, vector):
+    """
+    IMPALA-1670, IMPALA-4141: Test interoperability with Hive when adding overlapping
+    partitions to a table
+    """
+    with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
+      # Create a table in Impala.
+      with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
+                                   '(a int) partitioned by (x int)') as table_name:
+        # Trigger metadata load. No partitions exist yet in Impala.
+        assert [] == self.get_impala_partition_info(table_name, 'x')
+
+        # Add partition in Hive.
+        self.run_stmt_in_hive("alter table %s add partition (x=2)" % table_name)
+        # Impala is not aware of the new partition.
+        assert [] == self.get_impala_partition_info(table_name, 'x')
+
+        # Try to add partitions with caching in Impala, one of them (x=2) exists in HMS.
+        self.assert_sql_error(self.client.execute,
+            "alter table %s add partition (x=1) uncached "
+            "partition (x=2) cached in 'testPool' with replication=2 "
+            "partition (x=3) cached in 'testPool' with replication=3" % table_name,
+            "Partition already exists")
+        # No partitions were added in Impala.
+        assert [] == self.get_impala_partition_info(table_name, 'x')
+
+        # It should succeed with IF NOT EXISTS.
+        self.client.execute("alter table %s add if not exists partition (x=1) uncached "
+            "partition (x=2) cached in 'testPool' with replication=2 "
+            "partition (x=3) cached in 'testPool' with replication=3" % table_name)
+
+        # Hive sees all the partitions.
+        assert ['x=1', 'x=2', 'x=3'] == self.hive_partition_names(table_name)
+
+        # Impala sees the partition that has already existed in HMS (x=2) and the newly
+        # added partitions (x=1) and (x=3).
+        # Caching has been applied only to newly added partitions (x=1) and (x=3), the
+        # preexisting partition (x=2) was not modified.
+        partitions = self.get_impala_partition_info(table_name, 'x', 'Bytes Cached',
+            'Cache Replication')
+        assert [('1', 'NOT CACHED', 'NOT CACHED'),
+            ('2', 'NOT CACHED', 'NOT CACHED'),
+            ('3', '0B', '3')] == partitions
+
+        # Try to add location to a partition that is already in catalog cache (x=1).
+        self.client.execute("alter table %s add if not exists "\
+            "partition (x=1) location '/_X_1'" % table_name)
+        # (x=1) partition's location hasn't changed
+        (x1_value, x1_location) = self.get_impala_partition_info(table_name, 'x',
+            'Location')[0]
+        assert '1' == x1_value
+        assert x1_location.endswith("/x=1");
+
+  @pytest.mark.execute_serially
+  def test_add_preexisting_partitions_with_data(self, vector):
+    """
+    IMPALA-1670, IMPALA-4141: After addding partitions that already exist in HMS, Impala
+    can access the partition data.
+    """
+    with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
+      # Create a table in Impala.
+      with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
+                                   '(a int) partitioned by (x int)') as table_name:
+        # Trigger metadata load. No partitions exist yet in Impala.
+        assert [] == self.get_impala_partition_info(table_name, 'x')
+
+        # Add partitions in Hive.
+        self.run_stmt_in_hive("alter table %s add partition (x=1) "
+            "partition (x=2) "
+            "partition (x=3)" % table_name)
+        # Insert rows in Hive
+        self.run_stmt_in_hive("insert into %s partition(x=1) values (1), (2), (3)"
+            % table_name)
+        self.run_stmt_in_hive("insert into %s partition(x=2) values (1), (2), (3), (4)"
+            % table_name)
+        self.run_stmt_in_hive("insert into %s partition(x=3) values (1)"
+            % table_name)
+        # No partitions exist yet in Impala.
+        assert [] == self.get_impala_partition_info(table_name, 'x')
+
+        # Add the same partitions in Impala with IF NOT EXISTS.
+        self.client.execute("alter table %s add if not exists partition (x=1) "\
+            "partition (x=2) "
+            "partition (x=3)" % table_name)
+        # Impala sees the partitions
+        assert [('1',), ('2',), ('3',)] == self.get_impala_partition_info(table_name, 'x')
+        # Data exists in Impala
+        assert ['1\t1', '1\t2', '1\t3',
+            '2\t1', '2\t2', '2\t3', '2\t4',
+            '3\t1'] ==\
+            self.client.execute('select x, a from %s order by x, a' %
+            table_name).get_data().split('\n')
+
+  @pytest.mark.execute_serially
+  def test_impala_partitions_accessible_in_hive(self, vector):
+    """
+    IMPALA-1670, IMPALA-4141: Partitions added in Impala are accessible through Hive
+    """
+    with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
+      # Create a table in Impala.
+      with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
+                                   '(a int) partitioned by (x int)') as table_name:
+        # Add partitions in Impala.
+        self.client.execute("alter table %s add partition (x=1) "
+            "partition (x=2) "
+            "partition (x=3)" % table_name)
+        # Insert rows in Impala
+        self.client.execute("insert into %s partition(x=1) values (1), (2), (3)"
+            % table_name)
+        self.client.execute("insert into %s partition(x=2) values (1), (2), (3), (4)"
+            % table_name)
+        self.client.execute("insert into %s partition(x=3) values (1)"
+            % table_name)
+
+        # Hive sees the partitions
+        assert ['x=1', 'x=2', 'x=3'] == self.hive_partition_names(table_name)
+        # Data exists in Hive
+        data = self.run_stmt_in_hive('select x, a from %s order by x, a' % table_name)
+        assert ['x,a',
+            '1,1', '1,2', '1,3',
+            '2,1', '2,2', '2,3', '2,4',
+            '3,1'] == data.strip().split('\n')

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/c452595b/tests/metadata/test_refresh_partition.py
----------------------------------------------------------------------
diff --git a/tests/metadata/test_refresh_partition.py b/tests/metadata/test_refresh_partition.py
index 2489c81..4602ebc 100644
--- a/tests/metadata/test_refresh_partition.py
+++ b/tests/metadata/test_refresh_partition.py
@@ -43,29 +43,6 @@ class TestRefreshPartition(ImpalaTestSuite):
     cls.ImpalaTestMatrix.add_dimension(
         create_uncompressed_text_dimension(cls.get_workload()))
 
-  def impala_partition_names(self, table_name):
-    """
-    Find the names of the partitions of a table, as Impala sees them.
-    The return format is a list of lists of strings. Each string represents
-    a partition value of a given column.
-    """
-    rows = self.client.execute('show partitions %s' %
-                               table_name).get_data().split('\n')
-    """
-    According to the output of 'show partitions' query, the first (n-8)
-    columns are the columns on which the table is partitioned
-    """
-    return [row.split('\t')[0:-8] for row in rows[:-1]]
-
-  def hive_partition_names(self, table_name):
-    """
-    Find the names of the partitions of a table, as Hive sees them.
-    The return format is a list of strings. Each string represents a partition
-    value of a given column in a format like 'column1=7/column2=8'.
-    """
-    return self.run_stmt_in_hive(
-        'show partitions %s' % table_name).split('\n')[1:-1]
-
   def test_add_hive_partition_and_refresh(self, vector, unique_database):
     """
     Partition added in Hive can be viewed in Impala after refreshing
@@ -75,14 +52,14 @@ class TestRefreshPartition(ImpalaTestSuite):
     self.client.execute(
         'create table %s (x int) partitioned by (y int, z int)' %
         table_name)
-    assert [] == self.impala_partition_names(table_name)
+    assert [] == self.get_impala_partition_info(table_name, 'y', 'z')
     self.run_stmt_in_hive(
         'alter table %s add partition (y=333, z=5309)' % table_name)
     # Make sure Impala can't see the partition yet
-    assert [] == self.impala_partition_names(table_name)
+    assert [] == self.get_impala_partition_info(table_name, 'y', 'z')
     self.client.execute('refresh %s partition (y=333, z=5309)' % table_name)
     # Impala can see the partition
-    assert [['333', '5309']] == self.impala_partition_names(table_name)
+    assert [('333', '5309')] == self.get_impala_partition_info(table_name, 'y', 'z')
     # Impala's refresh didn't alter Hive's knowledge of the partition
     assert ['y=333/z=5309'] == self.hive_partition_names(table_name)
 
@@ -97,14 +74,14 @@ class TestRefreshPartition(ImpalaTestSuite):
         table_name)
     self.client.execute(
         'alter table %s add partition (y=333, z=5309)' % table_name)
-    assert [['333', '5309']] == self.impala_partition_names(table_name)
+    assert [('333', '5309')] == self.get_impala_partition_info(table_name, 'y', 'z')
     self.run_stmt_in_hive(
         'alter table %s drop partition (y=333, z=5309)' % table_name)
     # Make sure Impala can still see the partition
-    assert [['333', '5309']] == self.impala_partition_names(table_name)
+    assert [('333', '5309')] == self.get_impala_partition_info(table_name, 'y', 'z')
     self.client.execute('refresh %s partition (y=333, z=5309)' % table_name)
     # Impala can see the partition is not there anymore
-    assert [] == self.impala_partition_names(table_name)
+    assert [] == self.get_impala_partition_info(table_name, 'y', 'z')
     # Impala's refresh didn't alter Hive's knowledge of the partition
     assert [] == self.hive_partition_names(table_name)
 
@@ -142,10 +119,10 @@ class TestRefreshPartition(ImpalaTestSuite):
         table_name)
     self.client.execute(
         'alter table %s add partition (y=333, z=5309)' % table_name)
-    assert [['333', '5309']] == self.impala_partition_names(table_name)
+    assert [('333', '5309')] == self.get_impala_partition_info(table_name, 'y', 'z')
     assert ['y=333/z=5309'] == self.hive_partition_names(table_name)
     self.client.execute('refresh %s partition (y=71, z=8857)' % table_name)
-    assert [['333', '5309']] == self.impala_partition_names(table_name)
+    assert [('333', '5309')] == self.get_impala_partition_info(table_name, 'y', 'z')
     assert ['y=333/z=5309'] == self.hive_partition_names(table_name)
 
   def test_remove_data_and_refresh(self, vector, unique_database):