You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2014/01/16 23:01:54 UTC

svn commit: r1558928 [7/8] - in /hive/trunk: metastore/if/ metastore/src/gen/thrift/gen-cpp/ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ metastore/src/gen/thrift/gen-php/metastore/ metastore/src/gen/thrift/gen-py/hive_me...

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Thu Jan 16 22:01:53 2014
@@ -57,6 +57,8 @@ import org.apache.hadoop.hive.common.cli
 import org.apache.hadoop.hive.common.metrics.Metrics;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest;
+import org.apache.hadoop.hive.metastore.api.AddPartitionsResult;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
@@ -622,9 +624,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof AlreadyExistsException) {
           throw (AlreadyExistsException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("create_database", success, ex);
@@ -816,9 +816,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof NoSuchObjectException) {
           throw (NoSuchObjectException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("drop_database", success, ex);
@@ -837,9 +835,7 @@ public class HiveMetaStore extends Thrif
         if (e instanceof MetaException) {
           throw (MetaException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("get_databases", ret != null, ex);
@@ -859,9 +855,7 @@ public class HiveMetaStore extends Thrif
         if (e instanceof MetaException) {
           throw (MetaException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("get_all_databases", ret != null, ex);
@@ -907,9 +901,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof AlreadyExistsException) {
           throw (AlreadyExistsException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("create_type", success, ex);
@@ -935,9 +927,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof NoSuchObjectException) {
           throw (NoSuchObjectException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("get_type", ret != null, ex);
@@ -985,9 +975,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof NoSuchObjectException) {
           throw (NoSuchObjectException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("drop_type", success, ex);
@@ -1139,9 +1127,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof AlreadyExistsException) {
           throw (AlreadyExistsException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("create_table", success, ex, tbl.getTableName());
@@ -1358,9 +1344,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof NoSuchObjectException) {
           throw (NoSuchObjectException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("drop_table", success, ex, name);
@@ -1401,9 +1385,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof NoSuchObjectException) {
           throw (NoSuchObjectException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("get_table", t != null, ex, name);
@@ -1451,9 +1433,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof UnknownDBException) {
           throw (UnknownDBException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("get_multi_table", tables != null, ex, join(names, ","));
@@ -1485,9 +1465,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof UnknownDBException) {
           throw (UnknownDBException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("get_table_names_by_filter", tables != null, ex, join(tables, ","));
@@ -1631,9 +1609,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof AlreadyExistsException) {
           throw (AlreadyExistsException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("append_partition", ret != null, ex, tableName);
@@ -1641,37 +1617,125 @@ public class HiveMetaStore extends Thrif
       return ret;
     }
 
-    private int add_partitions_core(final RawStore ms, final List<Partition> parts)
-        throws MetaException, InvalidObjectException, AlreadyExistsException {
-      String db = parts.get(0).getDbName();
-      String tblName = parts.get(0).getTableName();
-      logInfo("add_partitions : db=" + db + " tbl=" + tblName);
+    private static class PartValEqWrapper {
+      Partition partition;
+
+      public PartValEqWrapper(Partition partition) {
+        this.partition = partition;
+      }
 
+      @Override
+      public int hashCode() {
+        return partition.isSetValues() ? partition.getValues().hashCode() : 0;
+      }
+
+      @Override
+      public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null || !(obj instanceof PartValEqWrapper)) return false;
+        Partition p1 = this.partition, p2 = ((PartValEqWrapper)obj).partition;
+        if (!p1.isSetValues() || !p2.isSetValues()) return p1.isSetValues() == p2.isSetValues();
+        if (p1.getValues().size() != p2.getValues().size()) return false;
+        for (int i = 0; i < p1.getValues().size(); ++i) {
+          String v1 = p1.getValues().get(i), v2 = p2.getValues().get(i);
+          if ((v1 == null && v2 != null) || !v1.equals(v2)) return false;
+        }
+        return true;
+      }
+    }
+
+    private List<Partition> add_partitions_core(
+        RawStore ms, String dbName, String tblName, List<Partition> parts, boolean ifNotExists)
+            throws MetaException, InvalidObjectException, AlreadyExistsException, TException {
+      logInfo("add_partitions");
       boolean success = false;
-      Map<Partition, Boolean> addedPartitions = new HashMap<Partition, Boolean>();
+      // Ensures that the list doesn't have dups, and keeps track of directories we have created.
+      Map<PartValEqWrapper, Boolean> addedPartitions = new HashMap<PartValEqWrapper, Boolean>();
+      List<Partition> result = new ArrayList<Partition>();
+      List<Partition> existingParts = null;
       try {
         ms.openTransaction();
+        Table tbl = ms.getTable(dbName, tblName);
+        if (tbl == null) {
+          throw new InvalidObjectException("Unable to add partitions because "
+              + "database or table " + dbName + "." + tblName + " does not exist");
+        }
         for (Partition part : parts) {
-          // No environment context is passed.
-          Entry<Partition, Boolean> e = add_partition_core_notxn(ms, part, null);
-          addedPartitions.put(e.getKey(), e.getValue());
+          if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) {
+            throw new MetaException("Partition does not belong to target table "
+                + dbName + "." + tblName + ": " + part);
+          }
+          boolean shouldAdd = startAddPartition(ms, part, ifNotExists);
+          if (!shouldAdd) {
+            if (existingParts == null) {
+              existingParts = new ArrayList<Partition>();
+            }
+            existingParts.add(part);
+            LOG.info("Not adding partition " + part + " as it already exists");
+            continue;
+          }
+          boolean madeDir = createLocationForAddedPartition(tbl, part);
+          if (addedPartitions.put(new PartValEqWrapper(part), madeDir) != null) {
+            // Technically, for ifNotExists case, we could insert one and discard the other
+            // because the first one now "exists", but it seems better to report the problem
+            // upstream as such a command doesn't make sense.
+            throw new MetaException("Duplicate partitions in the list: " + part);
+          }
+          initializeAddedPartition(tbl, part, madeDir);
+          result.add(part);
         }
-        success = ms.commitTransaction();
+        if (!result.isEmpty()) {
+          success = ms.addPartitions(dbName, tblName, result);
+        } else {
+          success = true;
+        }
+        success = success && ms.commitTransaction();
       } finally {
         if (!success) {
           ms.rollbackTransaction();
-          for (Entry<Partition, Boolean> e : addedPartitions.entrySet()) {
+          for (Entry<PartValEqWrapper, Boolean> e : addedPartitions.entrySet()) {
             if (e.getValue()) {
-              wh.deleteDir(new Path(e.getKey().getSd().getLocation()), true);
+              wh.deleteDir(new Path(e.getKey().partition.getSd().getLocation()), true);
               // we just created this directory - it's not a case of pre-creation, so we nuke
             }
           }
+          for (Partition part : parts) {
+            fireMetaStoreAddPartitionEvent(ms, part, null, success);
+          }
+        } else {
+          for (Partition part : result) {
+            fireMetaStoreAddPartitionEvent(ms, part, null, success);
+          }
+          if (existingParts != null) {
+            // The request has succeeded but we failed to add these partitions.
+            for (Partition part : existingParts) {
+              fireMetaStoreAddPartitionEvent(ms, part, null, false);
+            }
+          }
         }
-        for (Partition part : parts) {
-          fireMetaStoreAddPartitionEvent(ms, part, null, success);
+      }
+      return result;
+    }
+
+    @Override
+    public AddPartitionsResult add_partitions_req(AddPartitionsRequest request)
+        throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
+      AddPartitionsResult result = new AddPartitionsResult();
+      if (request.getParts().isEmpty()) {
+        return result;
+      }
+      try {
+        List<Partition> parts = add_partitions_core(getMS(), request.getDbName(),
+            request.getTblName(), request.getParts(), request.isIfNotExists());
+        if (request.isNeedResult()) {
+          result.setPartitions(parts);
         }
+      } catch (TException te) {
+        throw te;
+      } catch (Exception e) {
+        throw newMetaException(e);
       }
-      return parts.size();
+      return result;
     }
 
     public int add_partitions(final List<Partition> parts) throws MetaException,
@@ -1684,7 +1748,10 @@ public class HiveMetaStore extends Thrif
       Integer ret = null;
       Exception ex = null;
       try {
-        ret = add_partitions_core(getMS(), parts);
+        // Old API assumed all partitions belong to the same table; keep the same assumption
+        ret = add_partitions_core(getMS(), parts.get(0).getDbName(),
+            parts.get(0).getTableName(), parts, false).size();
+        assert ret == parts.size();
       } catch (Exception e) {
         ex = e;
         if (e instanceof MetaException) {
@@ -1694,9 +1761,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof AlreadyExistsException) {
           throw (AlreadyExistsException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         String tableName = parts.get(0).getTableName();
@@ -1705,149 +1770,131 @@ public class HiveMetaStore extends Thrif
       return ret;
     }
 
+    private boolean startAddPartition(
+        RawStore ms, Partition part, boolean ifNotExists) throws MetaException, TException {
+      firePreEvent(new PreAddPartitionEvent(part, this));
+      MetaStoreUtils.validatePartitionNameCharacters(part.getValues(),
+          partitionValidationPattern);
+      boolean doesExist = ms.doesPartitionExist(
+          part.getDbName(), part.getTableName(), part.getValues());
+      if (doesExist && !ifNotExists) {
+        throw new AlreadyExistsException("Partition already exists: " + part);
+      }
+      return !doesExist;
+    }
+
     /**
-     * An implementation of add_partition_core that does not commit
-     * transaction or rollback transaction as part of its operation
-     * - it is assumed that will be tended to from outside this call
-     *
-     * @param ms
-     * @param part
-     * @param envContext
-     *          parameters passed by the client
-     * @return
-     * @throws InvalidObjectException
-     * @throws AlreadyExistsException
-     * @throws MetaException
+     * Handles the location for a partition being created.
+     * @param tbl Table.
+     * @param part Partition.
+     * @return Whether the partition SD location is set to a newly created directory.
      */
-    private Entry<Partition, Boolean> add_partition_core_notxn(
-        final RawStore ms, final Partition part,
-        final EnvironmentContext envContext)
-        throws InvalidObjectException, AlreadyExistsException, MetaException {
-      boolean success = false, madeDir = false;
+    private boolean createLocationForAddedPartition(
+        final Table tbl, final Partition part) throws MetaException {
       Path partLocation = null;
-      Table tbl = null;
-      try {
-        firePreEvent(new PreAddPartitionEvent(part, this));
-
-        MetaStoreUtils.validatePartitionNameCharacters(part.getValues(),
-            partitionValidationPattern);
-
-        Partition old_part = null;
-        try {
-          old_part = ms.getPartition(part.getDbName(), part
-              .getTableName(), part.getValues());
-        } catch (NoSuchObjectException e) {
-          // this means there is no existing partition
-          old_part = null;
-        }
-        if (old_part != null) {
-          throw new AlreadyExistsException("Partition already exists:" + part);
-        }
-        tbl = ms.getTable(part.getDbName(), part.getTableName());
-        if (tbl == null) {
-          throw new InvalidObjectException(
-              "Unable to add partition because table or database do not exist");
-        }
+      String partLocationStr = null;
+      if (part.getSd() != null) {
+        partLocationStr = part.getSd().getLocation();
+      }
 
-        String partLocationStr = null;
-        if (part.getSd() != null) {
-          partLocationStr = part.getSd().getLocation();
+      if (partLocationStr == null || partLocationStr.isEmpty()) {
+        // set default location if not specified and this is
+        // a physical table partition (not a view)
+        if (tbl.getSd().getLocation() != null) {
+          partLocation = new Path(tbl.getSd().getLocation(), Warehouse
+              .makePartName(tbl.getPartitionKeys(), part.getValues()));
         }
-
-        if (partLocationStr == null || partLocationStr.isEmpty()) {
-          // set default location if not specified and this is
-          // a physical table partition (not a view)
-          if (tbl.getSd().getLocation() != null) {
-            partLocation = new Path(tbl.getSd().getLocation(), Warehouse
-                .makePartName(tbl.getPartitionKeys(), part.getValues()));
-          }
-
-        } else {
-          if (tbl.getSd().getLocation() == null) {
-            throw new MetaException(
-                "Cannot specify location for a view partition");
-          }
-          partLocation = wh.getDnsPath(new Path(partLocationStr));
+      } else {
+        if (tbl.getSd().getLocation() == null) {
+          throw new MetaException("Cannot specify location for a view partition");
         }
+        partLocation = wh.getDnsPath(new Path(partLocationStr));
+      }
 
-        if (partLocation != null) {
-          part.getSd().setLocation(partLocation.toString());
-
+      boolean result = false;
+      if (partLocation != null) {
+        part.getSd().setLocation(partLocation.toString());
 
-          // Check to see if the directory already exists before calling
-          // mkdirs() because if the file system is read-only, mkdirs will
-          // throw an exception even if the directory already exists.
-          if (!wh.isDir(partLocation)) {
-            if (!wh.mkdirs(partLocation)) {
-              throw new MetaException(partLocation
-                  + " is not a directory or unable to create one");
-            }
-            madeDir = true;
+        // Check to see if the directory already exists before calling
+        // mkdirs() because if the file system is read-only, mkdirs will
+        // throw an exception even if the directory already exists.
+        if (!wh.isDir(partLocation)) {
+          if (!wh.mkdirs(partLocation)) {
+            throw new MetaException(partLocation
+                + " is not a directory or unable to create one");
           }
+          result = true;
         }
+      }
+      return result;
+    }
 
-        if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) &&
-            !MetaStoreUtils.isView(tbl)) {
-          MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir);
-        }
+    private void initializeAddedPartition(
+        final Table tbl, final Partition part, boolean madeDir) throws MetaException {
+      if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) &&
+          !MetaStoreUtils.isView(tbl)) {
+        MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir);
+      }
 
-        // set create time
-        long time = System.currentTimeMillis() / 1000;
-        part.setCreateTime((int) time);
-        if (part.getParameters() == null ||
-            part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) {
-          part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
-        }
-
-        // Inherit table properties into partition properties.
-        Map<String, String> tblParams = tbl.getParameters();
-        String inheritProps = hiveConf.getVar(ConfVars.METASTORE_PART_INHERIT_TBL_PROPS).trim();
-        // Default value is empty string in which case no properties will be inherited.
-        // * implies all properties needs to be inherited
-        Set<String> inheritKeys = new HashSet<String>(Arrays.asList(inheritProps.split(",")));
-        if (inheritKeys.contains("*")) {
-          inheritKeys = tblParams.keySet();
-        }
-
-        for (String key : inheritKeys) {
-          String paramVal = tblParams.get(key);
-          if (null != paramVal) { // add the property only if it exists in table properties
-            part.putToParameters(key, paramVal);
-          }
-        }
+      // set create time
+      long time = System.currentTimeMillis() / 1000;
+      part.setCreateTime((int) time);
+      if (part.getParameters() == null ||
+          part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) {
+        part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
+      }
 
-        success = ms.addPartition(part);
+      // Inherit table properties into partition properties.
+      Map<String, String> tblParams = tbl.getParameters();
+      String inheritProps = hiveConf.getVar(ConfVars.METASTORE_PART_INHERIT_TBL_PROPS).trim();
+      // Default value is empty string in which case no properties will be inherited.
+      // * implies all properties needs to be inherited
+      Set<String> inheritKeys = new HashSet<String>(Arrays.asList(inheritProps.split(",")));
+      if (inheritKeys.contains("*")) {
+        inheritKeys = tblParams.keySet();
+      }
 
-      } finally {
-        if (!success) {
-          if (madeDir) {
-            wh.deleteDir(partLocation, true);
-          }
+      for (String key : inheritKeys) {
+        String paramVal = tblParams.get(key);
+        if (null != paramVal) { // add the property only if it exists in table properties
+          part.putToParameters(key, paramVal);
         }
       }
-      Map<Partition, Boolean> returnVal = new HashMap<Partition, Boolean>();
-      returnVal.put(part, madeDir);
-      return returnVal.entrySet().iterator().next();
     }
 
     private Partition add_partition_core(final RawStore ms,
         final Partition part, final EnvironmentContext envContext)
-        throws InvalidObjectException, AlreadyExistsException, MetaException {
+        throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
       boolean success = false;
       Partition retPtn = null;
       try {
         ms.openTransaction();
-        retPtn = add_partition_core_notxn(ms, part, envContext).getKey();
+        Table tbl = ms.getTable(part.getDbName(), part.getTableName());
+        if (tbl == null) {
+          throw new InvalidObjectException(
+              "Unable to add partition because table or database do not exist");
+        }
+        boolean shouldAdd = startAddPartition(ms, part, false);
+        assert shouldAdd; // start would thrrow if it already existed here
+        boolean madeDir = createLocationForAddedPartition(tbl, part);
+        try {
+          initializeAddedPartition(tbl, part, madeDir);
+          success = ms.addPartition(part);
+        } finally {
+          if (!success && madeDir) {
+            wh.deleteDir(new Path(part.getSd().getLocation()), true);
+          }
+        }
         // we proceed only if we'd actually succeeded anyway, otherwise,
         // we'd have thrown an exception
-        success = ms.commitTransaction();
+        success = success && ms.commitTransaction();
       } finally {
         if (!success) {
           ms.rollbackTransaction();
         }
         fireMetaStoreAddPartitionEvent(ms, part, envContext, success);
       }
-      return retPtn;
+      return part;
     }
 
     private void fireMetaStoreAddPartitionEvent(final RawStore ms,
@@ -1888,9 +1935,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof AlreadyExistsException) {
           throw (AlreadyExistsException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("add_partition", ret != null, ex, part != null ?  part.getTableName(): null);
@@ -2091,9 +2136,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof NoSuchObjectException) {
           throw (NoSuchObjectException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("get_partition", ret != null, ex, tbl_name);
@@ -2141,9 +2184,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof NoSuchObjectException) {
           throw (NoSuchObjectException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("get_partitions", ret != null, ex, tbl_name);
@@ -2190,9 +2231,7 @@ public class HiveMetaStore extends Thrif
         if (e instanceof MetaException) {
           throw (MetaException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("get_partition_names", ret != null, ex, tbl_name);
@@ -2272,9 +2311,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof TException) {
           throw (TException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("alter_partition", oldPart != null, ex, tbl_name);
@@ -2342,9 +2379,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof TException) {
           throw (TException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("alter_partition", oldParts != null, ex, tbl_name);
@@ -2382,9 +2417,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof InvalidOperationException) {
           throw (InvalidOperationException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("alter_index", success, ex, base_table_name);
@@ -2444,9 +2477,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof InvalidOperationException) {
           throw (InvalidOperationException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("alter_table", success, ex, name);
@@ -2466,9 +2497,7 @@ public class HiveMetaStore extends Thrif
         if (e instanceof MetaException) {
           throw (MetaException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("get_tables", ret != null, ex);
@@ -2488,9 +2517,7 @@ public class HiveMetaStore extends Thrif
         if (e instanceof MetaException) {
           throw (MetaException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("get_all_tables", ret != null, ex);
@@ -2535,9 +2562,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof MetaException) {
           throw (MetaException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("get_fields", ret != null, ex, tableName);
@@ -2752,9 +2777,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof TException) {
           throw (TException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("append_partition_by_name", ret != null, ex, tbl_name);
@@ -2914,9 +2937,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof TException) {
           throw (TException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         String tableName = indexTable != null ? indexTable.getTableName() : null;
@@ -3114,9 +3135,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof TException) {
           throw (TException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("get_index_names", ret != null, ex, tblName);
@@ -3382,9 +3401,7 @@ public class HiveMetaStore extends Thrif
       } else if (e instanceof TException) {
         throw (TException) e;
       } else {
-        MetaException me = new MetaException(e.toString());
-        me.initCause(e);
-        throw me;
+        throw newMetaException(e);
       }
     }
 
@@ -3926,9 +3943,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof TException) {
           throw (TException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("cancel_delegation_token", success, ex);
@@ -3953,9 +3968,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof TException) {
           throw (TException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("renew_delegation_token", ret != null, ex);
@@ -3987,9 +4000,7 @@ public class HiveMetaStore extends Thrif
         } else if (e instanceof TException) {
           throw (TException) e;
         } else {
-          MetaException me = new MetaException(e.toString());
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       } finally {
         endFunction("get_delegation_token", ret != null, ex);
@@ -4033,9 +4044,7 @@ public class HiveMetaStore extends Thrif
         } else if (original instanceof MetaException) {
           throw (MetaException) original;
         } else {
-          MetaException me = new MetaException(original.toString());
-          me.initCause(original);
-          throw me;
+          throw newMetaException(original);
         }
       } finally {
                 endFunction("markPartitionForEvent", tbl != null, ex, tbl_name);
@@ -4069,9 +4078,7 @@ public class HiveMetaStore extends Thrif
         } else if (original instanceof MetaException) {
           throw (MetaException) original;
         } else {
-          MetaException me = new MetaException(original.toString());
-          me.initCause(original);
-          throw me;
+          throw newMetaException(original);
         }
       } finally {
                 endFunction("isPartitionMarkedForEvent", ret != null, ex, tbl_name);
@@ -4106,14 +4113,18 @@ public class HiveMetaStore extends Thrif
           throw (MetaException)e;
         } else {
           ex = e;
-          MetaException me = new MetaException();
-          me.initCause(e);
-          throw me;
+          throw newMetaException(e);
         }
       }
       endFunction("partition_name_has_valid_characters", true, null);
       return ret;
     }
+
+    private static MetaException newMetaException(Exception e) {
+      MetaException me = new MetaException(e.toString());
+      me.initCause(e);
+      return me;
+    }
   }
 
   public static IHMSHandler newHMSHandler(String name, HiveConf hiveConf) throws MetaException {

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Thu Jan 16 22:01:53 2014
@@ -32,10 +32,12 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import java.util.Set;
 
 import javax.security.auth.login.LoginException;
 
@@ -43,6 +45,8 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest;
+import org.apache.hadoop.hive.metastore.api.AddPartitionsResult;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
@@ -392,6 +396,21 @@ public class HiveMetaStoreClient impleme
     return client.add_partitions(new_parts);
   }
 
+  @Override
+  public List<Partition> add_partitions(
+      List<Partition> parts, boolean ifNotExists, boolean needResults)
+      throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
+    if (parts.isEmpty()) {
+      return needResults ? new ArrayList<Partition>() : null;
+    }
+    Partition part = parts.get(0);
+    AddPartitionsRequest req = new AddPartitionsRequest(
+        part.getDbName(), part.getTableName(), parts, ifNotExists);
+    req.setNeedResult(needResults);
+    AddPartitionsResult result = client.add_partitions_req(req);
+    return needResults ? result.getPartitions() : null;
+  }
+
   /**
    * @param table_name
    * @param db_name

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Thu Jan 16 22:01:53 2014
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.metastore
 
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
@@ -330,8 +331,19 @@ public interface IMetaStoreClient {
    *           Thrift exception
    */
   public int add_partitions(List<Partition> partitions)
-      throws InvalidObjectException, AlreadyExistsException, MetaException,
-      TException;
+      throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+
+  /**
+   * Add partitions to the table.
+   *
+   * @param partitions The partitions to add
+   * @param ifNotExists only add partitions if they don't exist
+   * @param needResults Whether the results are needed
+   * @return the partitions that were added, or null if !needResults
+   */
+  public List<Partition> add_partitions(
+      List<Partition> partitions, boolean ifNotExists, boolean needResults)
+      throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
 
   /**
    * @param tblName

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Thu Jan 16 22:01:53 2014
@@ -1173,6 +1173,58 @@ public class ObjectStore implements RawS
             .getSkewedColValueLocationMaps()), sd.isStoredAsSubDirectories());
   }
 
+  @Override
+  public boolean addPartitions(String dbName, String tblName, List<Partition> parts)
+      throws InvalidObjectException, MetaException {
+    boolean success = false;
+    openTransaction();
+    try {
+      List<MTablePrivilege> tabGrants = null;
+      List<MTableColumnPrivilege> tabColumnGrants = null;
+      MTable table = this.getMTable(dbName, tblName);
+      if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
+        tabGrants = this.listAllTableGrants(dbName, tblName);
+        tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName);
+      }
+      List<Object> toPersist = new ArrayList<Object>();
+      for (Partition part : parts) {
+        if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) {
+          throw new MetaException("Partition does not belong to target table "
+              + dbName + "." + tblName + ": " + part);
+        }
+        MPartition mpart = convertToMPart(part, true);
+        toPersist.add(mpart);
+        int now = (int)(System.currentTimeMillis()/1000);
+        if (tabGrants != null) {
+          for (MTablePrivilege tab: tabGrants) {
+            toPersist.add(new MPartitionPrivilege(tab.getPrincipalName(),
+                tab.getPrincipalType(), mpart, tab.getPrivilege(), now,
+                tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption()));
+          }
+        }
+
+        if (tabColumnGrants != null) {
+          for (MTableColumnPrivilege col : tabColumnGrants) {
+            toPersist.add(new MPartitionColumnPrivilege(col.getPrincipalName(),
+                col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(),
+                now, col.getGrantor(), col.getGrantorType(), col.getGrantOption()));
+          }
+        }
+      }
+      if (toPersist.size() > 0) {
+        pm.makePersistentAll(toPersist);
+      }
+
+      success = commitTransaction();
+    } finally {
+      if (!success) {
+        rollbackTransaction();
+      }
+    }
+    return success;
+  }
+
+  @Override
   public boolean addPartition(Partition part) throws InvalidObjectException,
       MetaException {
     boolean success = false;
@@ -5979,6 +6031,40 @@ public class ObjectStore implements RawS
     }
   }
 
+  @Override
+  public boolean doesPartitionExist(String dbName, String tableName, List<String> partVals)
+      throws MetaException {
+    boolean success = false;
+    try {
+      openTransaction();
+      dbName = dbName.toLowerCase().trim();
+      tableName = tableName.toLowerCase().trim();
+
+      // TODO: this could also be passed from upper layer; or this method should filter the list.
+      MTable mtbl = getMTable(dbName, tableName);
+      if (mtbl == null) {
+        success = commitTransaction();
+        return false;
+      }
+
+      Query query = pm.newQuery(
+          "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
+          + "where table.tableName == t1 && table.database.name == t2 && partitionName == t3");
+      query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3");
+      query.setUnique(true);
+      query.setResult("partitionName");
+      String name = Warehouse.makePartName(
+          convertToFieldSchemas(mtbl.getPartitionKeys()), partVals);
+      String result = (String)query.execute(tableName, dbName, name);
+      success = commitTransaction();
+      return result != null;
+    } finally {
+      if (!success) {
+        rollbackTransaction();
+      }
+    }
+  }
+
   /** Add this to code to debug lexer if needed. DebugTokenStream may also be added here. */
   private void debugLexer(CommonTokenStream stream, FilterLexer lexer) {
     try {

Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (original)
+++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java Thu Jan 16 22:01:53 2014
@@ -123,9 +123,15 @@ public interface RawStore extends Config
   public abstract boolean addPartition(Partition part)
       throws InvalidObjectException, MetaException;
 
+  public abstract boolean addPartitions(String dbName, String tblName, List<Partition> parts)
+      throws InvalidObjectException, MetaException;
+
   public abstract Partition getPartition(String dbName, String tableName,
       List<String> part_vals) throws MetaException, NoSuchObjectException;
 
+  public abstract boolean doesPartitionExist(String dbName, String tableName,
+      List<String> part_vals) throws MetaException, NoSuchObjectException;
+
   public abstract boolean dropPartition(String dbName, String tableName,
       List<String> part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException,
       InvalidInputException;

Modified: hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java (original)
+++ hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java Thu Jan 16 22:01:53 2014
@@ -584,4 +584,16 @@ public class DummyRawStoreControlledComm
 
   }
 
+  @Override
+  public boolean doesPartitionExist(String dbName, String tableName,
+      List<String> partVals) throws MetaException, NoSuchObjectException {
+    return objectStore.doesPartitionExist(dbName, tableName, partVals);
+  }
+
+  @Override
+  public boolean addPartitions(String dbName, String tblName, List<Partition> parts)
+      throws InvalidObjectException, MetaException {
+    return objectStore.addPartitions(dbName, tblName, parts);
+  }
+
 }

Modified: hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java (original)
+++ hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java Thu Jan 16 22:01:53 2014
@@ -614,6 +614,18 @@ public class DummyRawStoreForJdoConnecti
   @Override
   public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException {
   }
+
+  @Override
+  public boolean doesPartitionExist(String dbName, String tableName,
+      List<String> partVals) throws MetaException, NoSuchObjectException {
+    return false;
+  }
+
+  @Override
+  public boolean addPartitions(String dbName, String tblName, List<Partition> parts)
+      throws InvalidObjectException, MetaException {
+    return false;
+  }
 }
 
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Thu Jan 16 22:01:53 2014
@@ -301,7 +301,7 @@ public class DDLTask extends Task<DDLWor
 
       AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
       if (addPartitionDesc != null) {
-        return addPartition(db, addPartitionDesc);
+        return addPartitions(db, addPartitionDesc); // TODO#: here
       }
 
       RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc();
@@ -942,61 +942,20 @@ public class DDLTask extends Task<DDLWor
   }
 
   /**
-   * Add a partition to a table.
+   * Add a partitions to a table.
    *
    * @param db
    *          Database to add the partition to.
    * @param addPartitionDesc
-   *          Add this partition.
+   *          Add these partitions.
    * @return Returns 0 when execution succeeds and above 0 if it fails.
    * @throws HiveException
    */
-  private int addPartition(Hive db, AddPartitionDesc addPartitionDesc) throws HiveException {
-
-    Table tbl = db.getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName());
-
-    // If the add partition was created with IF NOT EXISTS, then we should
-    // not throw an error if the specified part does exist.
-    Partition checkPart = db.getPartition(tbl, addPartitionDesc.getPartSpec(), false);
-    if (checkPart != null && addPartitionDesc.getIfNotExists()) {
-      return 0;
+  private int addPartitions(Hive db, AddPartitionDesc addPartitionDesc) throws HiveException {
+    List<Partition> parts = db.createPartitions(addPartitionDesc); // TODO#: here
+    for (Partition part : parts) {
+      work.getOutputs().add(new WriteEntity(part));
     }
-
-
-
-    if (addPartitionDesc.getLocation() == null) {
-      db.createPartition(tbl, addPartitionDesc.getPartSpec(), null,
-          addPartitionDesc.getPartParams(),
-                    addPartitionDesc.getInputFormat(),
-                    addPartitionDesc.getOutputFormat(),
-                    addPartitionDesc.getNumBuckets(),
-                    addPartitionDesc.getCols(),
-                    addPartitionDesc.getSerializationLib(),
-                    addPartitionDesc.getSerdeParams(),
-                    addPartitionDesc.getBucketCols(),
-                    addPartitionDesc.getSortCols());
-
-    } else {
-      if (tbl.isView()) {
-        throw new HiveException("LOCATION clause illegal for view partition");
-      }
-      // set partition path relative to table
-      db.createPartition(tbl, addPartitionDesc.getPartSpec(), new Path(tbl
-                    .getPath(), addPartitionDesc.getLocation()), addPartitionDesc.getPartParams(),
-                    addPartitionDesc.getInputFormat(),
-                    addPartitionDesc.getOutputFormat(),
-                    addPartitionDesc.getNumBuckets(),
-                    addPartitionDesc.getCols(),
-                    addPartitionDesc.getSerializationLib(),
-                    addPartitionDesc.getSerdeParams(),
-                    addPartitionDesc.getBucketCols(),
-                    addPartitionDesc.getSortCols());
-    }
-
-    Partition part = db
-        .getPartition(tbl, addPartitionDesc.getPartSpec(), false);
-    work.getOutputs().add(new WriteEntity(part));
-
     return 0;
   }
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Thu Jan 16 22:01:53 2014
@@ -2464,21 +2464,6 @@ public final class Utilities {
     jobConf.set(serdeConstants.LIST_COLUMN_TYPES, columnTypesString);
   }
 
-  public static void validatePartSpecColumnNames(Table tbl, Map<String, String> partSpec)
-      throws SemanticException {
-
-    List<FieldSchema> parts = tbl.getPartitionKeys();
-    Set<String> partCols = new HashSet<String>(parts.size());
-    for (FieldSchema col : parts) {
-      partCols.add(col.getName());
-    }
-    for (String col : partSpec.keySet()) {
-      if (!partCols.contains(col)) {
-        throw new SemanticException(ErrorMsg.NONEXISTPARTCOL.getMsg(col));
-      }
-    }
-  }
-
   public static String suffix = ".hashtable";
 
   public static String generatePath(String baseURI, String dumpFilePrefix,

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Thu Jan 16 22:01:53 2014
@@ -85,6 +85,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.index.HiveIndexHandler;
 import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils;
+import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.session.CreateTableAutomaticGrant;
 import org.apache.hadoop.hive.ql.session.SessionState;
@@ -1469,92 +1470,72 @@ private void constructOneLBLocationMap(F
    * @throws HiveException
    *           if table doesn't exist or partition already exists
    */
-  public Partition createPartition(Table tbl, Map<String, String> partSpec)
-      throws HiveException {
-    return createPartition(tbl, partSpec, null, null, null, null, -1,
-        null, null, null, null, null);
-  }
-
-  /**
-   * Creates a partition
-   *
-   * @param tbl
-   *          table for which partition needs to be created
-   * @param partSpec
-   *          partition keys and their values
-   * @param location
-   *          location of this partition
-   * @param partParams
-   *          partition parameters
-   * @param inputFormat the inputformat class
-   * @param outputFormat the outputformat class
-   * @param numBuckets the number of buckets
-   * @param cols the column schema
-   * @param serializationLib the serde class
-   * @param serdeParams the serde parameters
-   * @param bucketCols the bucketing columns
-   * @param sortCols sort columns and order
-   *
-   * @return created partition object
-   * @throws HiveException
-   *           if table doesn't exist or partition already exists
-   */
-  public Partition createPartition(Table tbl, Map<String, String> partSpec,
-      Path location, Map<String, String> partParams, String inputFormat, String outputFormat,
-      int numBuckets, List<FieldSchema> cols,
-      String serializationLib, Map<String, String> serdeParams,
-      List<String> bucketCols, List<Order> sortCols) throws HiveException {
-
-    org.apache.hadoop.hive.metastore.api.Partition partition = null;
-
-    for (FieldSchema field : tbl.getPartCols()) {
-      String val = partSpec.get(field.getName());
-      if (val == null || val.length() == 0) {
-        throw new HiveException("add partition: Value for key "
-            + field.getName() + " is null or empty");
-      }
+  public Partition createPartition(Table tbl, Map<String, String> partSpec) throws HiveException {
+    try {
+      return new Partition(tbl, getMSC().add_partition(
+          Partition.createMetaPartitionObject(tbl, partSpec, null)));
+    } catch (Exception e) {
+      LOG.error(StringUtils.stringifyException(e));
+      throw new HiveException(e);
     }
+  }
 
-    try {
-      Partition tmpPart = new Partition(tbl, partSpec, location);
-      // No need to clear DDL_TIME in parameters since we know it's
-      // not populated on construction.
-      org.apache.hadoop.hive.metastore.api.Partition inPart
-        = tmpPart.getTPartition();
-      if (partParams != null) {
-        inPart.setParameters(partParams);
-      }
-      if (inputFormat != null) {
-        inPart.getSd().setInputFormat(inputFormat);
-      }
-      if (outputFormat != null) {
-        inPart.getSd().setOutputFormat(outputFormat);
-      }
-      if (numBuckets != -1) {
-        inPart.getSd().setNumBuckets(numBuckets);
-      }
-      if (cols != null) {
-        inPart.getSd().setCols(cols);
-      }
-      if (serializationLib != null) {
-          inPart.getSd().getSerdeInfo().setSerializationLib(serializationLib);
-      }
-      if (serdeParams != null) {
-        inPart.getSd().getSerdeInfo().setParameters(serdeParams);
-      }
-      if (bucketCols != null) {
-        inPart.getSd().setBucketCols(bucketCols);
-      }
-      if (sortCols != null) {
-        inPart.getSd().setSortCols(sortCols);
+  public List<Partition> createPartitions(AddPartitionDesc addPartitionDesc) throws HiveException {
+    Table tbl = getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName());
+    int size = addPartitionDesc.getPartitionCount();
+    List<org.apache.hadoop.hive.metastore.api.Partition> in =
+        new ArrayList<org.apache.hadoop.hive.metastore.api.Partition>(size);
+    for (int i = 0; i < size; ++i) {
+      in.add(convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i)));
+    }
+    List<Partition> out = new ArrayList<Partition>();
+    try {
+      // TODO: normally, the result is not necessary; might make sense to pass false
+      for (org.apache.hadoop.hive.metastore.api.Partition outPart
+          : getMSC().add_partitions(in, addPartitionDesc.isIfNotExists(), true)) {
+        out.add(new Partition(tbl, outPart));
       }
-      partition = getMSC().add_partition(inPart);
     } catch (Exception e) {
       LOG.error(StringUtils.stringifyException(e));
       throw new HiveException(e);
     }
+    return out;
+  }
 
-    return new Partition(tbl, partition);
+  private static org.apache.hadoop.hive.metastore.api.Partition convertAddSpecToMetaPartition(
+      Table tbl, AddPartitionDesc.OnePartitionDesc addSpec) throws HiveException {
+    Path location = addSpec.getLocation() != null
+        ? new Path(tbl.getPath(), addSpec.getLocation()) : null;
+    org.apache.hadoop.hive.metastore.api.Partition part =
+        Partition.createMetaPartitionObject(tbl, addSpec.getPartSpec(), location);
+    if (addSpec.getPartParams() != null) {
+      part.setParameters(addSpec.getPartParams());
+    }
+    if (addSpec.getInputFormat() != null) {
+      part.getSd().setInputFormat(addSpec.getInputFormat());
+    }
+    if (addSpec.getOutputFormat() != null) {
+      part.getSd().setOutputFormat(addSpec.getOutputFormat());
+    }
+    if (addSpec.getNumBuckets() != -1) {
+      part.getSd().setNumBuckets(addSpec.getNumBuckets());
+    }
+    if (addSpec.getCols() != null) {
+      part.getSd().setCols(addSpec.getCols());
+    }
+    if (addSpec.getSerializationLib() != null) {
+        part.getSd().getSerdeInfo().setSerializationLib(addSpec.getSerializationLib());
+    }
+    if (addSpec.getSerdeParams() != null) {
+      part.getSd().getSerdeInfo().setParameters(addSpec.getSerdeParams());
+    }
+    if (addSpec.getBucketCols() != null) {
+      part.getSd().setBucketCols(addSpec.getBucketCols());
+    }
+    if (addSpec.getSortCols() != null) {
+      part.getSd().setSortCols(addSpec.getSortCols());
+    }
+    return part;
   }
 
   public Partition getPartition(Table tbl, Map<String, String> partSpec,
@@ -1590,15 +1571,13 @@ private void constructOneLBLocationMap(F
    */
   public Partition getPartition(Table tbl, Map<String, String> partSpec,
       boolean forceCreate, String partPath, boolean inheritTableSpecs) throws HiveException {
-    if (!tbl.isValidSpec(partSpec)) {
-      throw new HiveException("Invalid partition: " + partSpec);
-    }
+    tbl.validatePartColumnNames(partSpec, true);
     List<String> pvals = new ArrayList<String>();
     for (FieldSchema field : tbl.getPartCols()) {
       String val = partSpec.get(field.getName());
       // enable dynamic partitioning
-      if (val == null && !HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)
-          || val.length() == 0) {
+      if ((val == null && !HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING))
+          || (val != null && val.length() == 0)) {
         throw new HiveException("get partition: Value for key "
             + field.getName() + " is null or empty");
       } else if (val != null){

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java Thu Jan 16 22:01:53 2014
@@ -118,50 +118,53 @@ public class Partition implements Serial
    * @throws HiveException
    *           Thrown if we could not create the partition.
    */
-  public Partition(Table tbl, Map<String, String> partSpec, Path location)
-      throws HiveException {
+  public Partition(Table tbl, Map<String, String> partSpec, Path location) throws HiveException {
+    initialize(tbl, createMetaPartitionObject(tbl, partSpec, location));
+  }
 
+  public static org.apache.hadoop.hive.metastore.api.Partition createMetaPartitionObject(
+      Table tbl, Map<String, String> partSpec, Path location) throws HiveException {
     List<String> pvals = new ArrayList<String>();
     for (FieldSchema field : tbl.getPartCols()) {
       String val = partSpec.get(field.getName());
-      if (val == null) {
-        throw new HiveException(
-            "partition spec is invalid. field.getName() does not exist in input.");
+      if (val == null || val.isEmpty()) {
+        throw new HiveException("partition spec is invalid; field "
+            + field.getName() + " does not exist or is empty");
       }
       pvals.add(val);
     }
 
-    org.apache.hadoop.hive.metastore.api.Partition tpart = new org.apache.hadoop.hive.metastore.api.Partition();
+    org.apache.hadoop.hive.metastore.api.Partition tpart =
+        new org.apache.hadoop.hive.metastore.api.Partition();
     tpart.setDbName(tbl.getDbName());
     tpart.setTableName(tbl.getTableName());
     tpart.setValues(pvals);
 
-    if (tbl.isView()) {
-      initialize(tbl, tpart);
-      return;
+    if (!tbl.isView()) {
+      tpart.setSd(cloneSd(tbl));
+      tpart.getSd().setLocation((location != null) ? location.toString() : null);
     }
+    return tpart;
+  }
 
+  /**
+   * We already have methods that clone stuff using XML or Kryo.
+   * And now for something completely different - let's clone SD using Thrift!
+   * Refactored into a method.
+   */
+  public static StorageDescriptor cloneSd(Table tbl) throws HiveException {
     StorageDescriptor sd = new StorageDescriptor();
     try {
       // replace with THRIFT-138
       TMemoryBuffer buffer = new TMemoryBuffer(1024);
       TBinaryProtocol prot = new TBinaryProtocol(buffer);
       tbl.getTTable().getSd().write(prot);
-
       sd.read(prot);
     } catch (TException e) {
       LOG.error("Could not create a copy of StorageDescription");
       throw new HiveException("Could not create a copy of StorageDescription",e);
     }
-
-    tpart.setSd(sd);
-    if (location != null) {
-      tpart.getSd().setLocation(location.toString());
-    } else {
-      tpart.getSd().setLocation(null);
-    }
-
-    initialize(tbl, tpart);
+    return sd;
   }
 
   /**

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java Thu Jan 16 22:01:53 2014
@@ -23,11 +23,13 @@ import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -45,10 +47,12 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat;
 import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe;
@@ -357,35 +361,34 @@ public class Table implements Serializab
     return outputFormatClass;
   }
 
-  final public boolean isValidSpec(Map<String, String> spec)
-      throws HiveException {
-
-    // TODO - types need to be checked.
+  final public void validatePartColumnNames(
+      Map<String, String> spec, boolean shouldBeFull) throws SemanticException {
     List<FieldSchema> partCols = tTable.getPartitionKeys();
     if (partCols == null || (partCols.size() == 0)) {
       if (spec != null) {
-        throw new HiveException(
-            "table is not partitioned but partition spec exists: " + spec);
-      } else {
-        return true;
+        throw new SemanticException("table is not partitioned but partition spec exists: " + spec);
       }
-    }
-
-    if ((spec == null) || (spec.size() != partCols.size())) {
-      throw new HiveException(
-          "table is partitioned but partition spec is not specified or"
-          + " does not fully match table partitioning: "
-          + spec);
-    }
-
-    for (FieldSchema field : partCols) {
-      if (spec.get(field.getName()) == null) {
-        throw new HiveException(field.getName()
-            + " not found in table's partition spec: " + spec);
+      return;
+    } else if (spec == null) {
+      if (shouldBeFull) {
+        throw new SemanticException("table is partitioned but partition spec is not specified");
       }
+      return;
+    }
+    int columnsFound = 0;
+    for (FieldSchema fs : partCols) {
+      if (spec.containsKey(fs.getName())) {
+        ++columnsFound;
+      }
+      if (columnsFound == spec.size()) break;
+    }
+    if (columnsFound < spec.size()) {
+      throw new SemanticException("Partition spec " + spec + " contains non-partition columns");
+    }
+    if (shouldBeFull && (spec.size() != partCols.size())) {
+      throw new SemanticException("partition spec " + spec
+          + " doesn't contain all (" + partCols.size() + ") partition columns");
     }
-
-    return true;
   }
 
   public void setProperty(String name, String value) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java Thu Jan 16 22:01:53 2014
@@ -786,7 +786,7 @@ public abstract class BaseSemanticAnalyz
         }
 
         // check if the columns, as well as value types in the partition() clause are valid
-        validatePartSpec(tableHandle, tmpPartSpec, ast, conf);
+        validatePartSpec(tableHandle, tmpPartSpec, ast, conf, false);
 
         List<FieldSchema> parts = tableHandle.getPartitionKeys();
         partSpec = new LinkedHashMap<String, String>(partspec.getChildCount());
@@ -1188,8 +1188,8 @@ public abstract class BaseSemanticAnalyz
   }
 
   public static void validatePartSpec(Table tbl, Map<String, String> partSpec,
-      ASTNode astNode, HiveConf conf) throws SemanticException {
-    Utilities.validatePartSpecColumnNames(tbl, partSpec);
+      ASTNode astNode, HiveConf conf, boolean shouldBeFull) throws SemanticException {
+    tbl.validatePartColumnNames(partSpec, shouldBeFull);
 
     if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_TYPE_CHECK_ON_INSERT)) {
       return;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Thu Jan 16 22:01:53 2014
@@ -2511,32 +2511,30 @@ public class DDLSemanticAnalyzer extends
     validateAlterTableType(tab, AlterTableTypes.ADDPARTITION, expectView);
     inputs.add(new ReadEntity(tab));
 
-    List<AddPartitionDesc> partitionDescs = new ArrayList<AddPartitionDesc>();
-
     int numCh = ast.getChildCount();
     int start = ifNotExists ? 2 : 1;
 
     String currentLocation = null;
     Map<String, String> currentPart = null;
+    // Parser has done some verification, so the order of tokens doesn't need to be verified here.
+    AddPartitionDesc addPartitionDesc = new AddPartitionDesc(tab.getDbName(), tblName, ifNotExists);
     for (int num = start; num < numCh; num++) {
       ASTNode child = (ASTNode) ast.getChild(num);
       switch (child.getToken().getType()) {
       case HiveParser.TOK_PARTSPEC:
         if (currentPart != null) {
-          Partition partition = getPartitionForOutput(tab, currentPart);
-          if (partition == null || !ifNotExists) {
-            AddPartitionDesc addPartitionDesc = new AddPartitionDesc(
-              tab.getDbName(), tblName, currentPart,
-              currentLocation, ifNotExists, expectView);
-            partitionDescs.add(addPartitionDesc);
-          }
+          addPartitionDesc.addPartition(currentPart, currentLocation);
           currentLocation = null;
         }
         currentPart = getPartSpec(child);
-        validatePartSpec(tab, currentPart, (ASTNode)child, conf);
+        validatePartitionValues(currentPart); // validate reserved values
+        validatePartSpec(tab, currentPart, (ASTNode)child, conf, true);
         break;
       case HiveParser.TOK_PARTITIONLOCATION:
         // if location specified, set in partition
+        if (isView) {
+          throw new SemanticException("LOCATION clause illegal for view partition");
+        }
         currentLocation = unescapeSQLString(child.getChild(0).getText());
         break;
       default:
@@ -2546,47 +2544,25 @@ public class DDLSemanticAnalyzer extends
 
     // add the last one
     if (currentPart != null) {
-      Partition partition = getPartitionForOutput(tab, currentPart);
-      if (partition == null || !ifNotExists) {
-        AddPartitionDesc addPartitionDesc = new AddPartitionDesc(
-          tab.getDbName(), tblName, currentPart,
-          currentLocation, ifNotExists, expectView);
-        partitionDescs.add(addPartitionDesc);
-      }
+      addPartitionDesc.addPartition(currentPart, currentLocation);
     }
 
-    if (partitionDescs.isEmpty()) {
+    if (addPartitionDesc.getPartitionCount() == 0) {
       // nothing to do
       return;
     }
 
-    for (AddPartitionDesc addPartitionDesc : partitionDescs) {
-      try {
-        tab.isValidSpec(addPartitionDesc.getPartSpec());
-      } catch (HiveException ex) {
-        throw new SemanticException(ErrorMsg.INVALID_PARTITION_SPEC.getMsg(ex.getMessage()));
-      }
-      rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
-          addPartitionDesc), conf));
-    }
+    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc), conf));
 
     if (isView) {
-      // Compile internal query to capture underlying table partition
-      // dependencies
+      // Compile internal query to capture underlying table partition dependencies
       StringBuilder cmd = new StringBuilder();
       cmd.append("SELECT * FROM ");
       cmd.append(HiveUtils.unparseIdentifier(tblName));
       cmd.append(" WHERE ");
       boolean firstOr = true;
-      for (AddPartitionDesc partitionDesc : partitionDescs) {
-        // Perform this check early so that we get a better error message.
-        try {
-          // Note that isValidSpec throws an exception (it never
-          // actually returns false).
-          tab.isValidSpec(partitionDesc.getPartSpec());
-        } catch (HiveException ex) {
-          throw new SemanticException(ErrorMsg.INVALID_PARTITION_SPEC.getMsg(ex.getMessage()));
-        }
+      for (int i = 0; i < addPartitionDesc.getPartitionCount(); ++i) {
+        AddPartitionDesc.OnePartitionDesc partitionDesc = addPartitionDesc.getPartition(i);
         if (firstOr) {
           firstOr = false;
         } else {
@@ -2594,8 +2570,7 @@ public class DDLSemanticAnalyzer extends
         }
         boolean firstAnd = true;
         cmd.append("(");
-        for (Map.Entry<String, String> entry : partitionDesc.getPartSpec().entrySet())
-        {
+        for (Map.Entry<String, String> entry : partitionDesc.getPartSpec().entrySet()) {
           if (firstAnd) {
             firstAnd = false;
           } else {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java Thu Jan 16 22:01:53 2014
@@ -129,9 +129,11 @@ public class ImportSemanticAnalyzer exte
         }
         List<Partition> partitions = rv.getValue();
         for (Partition partition : partitions) {
-          AddPartitionDesc partDesc = new AddPartitionDesc(dbname, tblDesc.getTableName(),
+          // TODO: this should not create AddPartitionDesc per partition
+          AddPartitionDesc partsDesc = new AddPartitionDesc(dbname, tblDesc.getTableName(),
               EximUtil.makePartSpec(tblDesc.getPartCols(), partition.getValues()),
               partition.getSd().getLocation(), partition.getParameters());
+          AddPartitionDesc.OnePartitionDesc partDesc = partsDesc.getPartition(0);
           partDesc.setInputFormat(partition.getSd().getInputFormat());
           partDesc.setOutputFormat(partition.getSd().getOutputFormat());
           partDesc.setNumBuckets(partition.getSd().getNumBuckets());
@@ -142,7 +144,7 @@ public class ImportSemanticAnalyzer exte
           partDesc.setSortCols(partition.getSd().getSortCols());
           partDesc.setLocation(new Path(fromPath,
               Warehouse.makePartName(tblDesc.getPartCols(), partition.getValues())).toString());
-          partitionDescs.add(partDesc);
+          partitionDescs.add(partsDesc);
         }
       } catch (IOException e) {
         throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
@@ -188,7 +190,7 @@ public class ImportSemanticAnalyzer exte
             for (Iterator<AddPartitionDesc> partnIter = partitionDescs
                   .listIterator(); partnIter.hasNext();) {
               AddPartitionDesc addPartitionDesc = partnIter.next();
-              if (!found && addPartitionDesc.getPartSpec().equals(partSpec)) {
+              if (!found && addPartitionDesc.getPartition(0).getPartSpec().equals(partSpec)) {
                 found = true;
               } else {
                 partnIter.remove();
@@ -221,12 +223,12 @@ public class ImportSemanticAnalyzer exte
         if (table.isPartitioned()) {
           LOG.debug("table partitioned");
           for (AddPartitionDesc addPartitionDesc : partitionDescs) {
-            if (db.getPartition(table, addPartitionDesc.getPartSpec(), false) == null) {
+            Map<String, String> partSpec = addPartitionDesc.getPartition(0).getPartSpec();
+            if (db.getPartition(table, partSpec, false) == null) {
               rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc));
             } else {
               throw new SemanticException(
-                  ErrorMsg.PARTITION_EXISTS
-                      .getMsg(partSpecToString(addPartitionDesc.getPartSpec())));
+                  ErrorMsg.PARTITION_EXISTS.getMsg(partSpecToString(partSpec)));
             }
           }
         } else {
@@ -297,33 +299,34 @@ public class ImportSemanticAnalyzer exte
   private Task<?> addSinglePartition(URI fromURI, FileSystem fs, CreateTableDesc tblDesc,
       Table table, Warehouse wh,
       AddPartitionDesc addPartitionDesc) throws MetaException, IOException, HiveException {
+    AddPartitionDesc.OnePartitionDesc partSpec = addPartitionDesc.getPartition(0);
     if (tblDesc.isExternal() && tblDesc.getLocation() == null) {
       LOG.debug("Importing in-place: adding AddPart for partition "
-          + partSpecToString(addPartitionDesc.getPartSpec()));
+          + partSpecToString(partSpec.getPartSpec()));
       // addPartitionDesc already has the right partition location
       Task<?> addPartTask = TaskFactory.get(new DDLWork(getInputs(),
           getOutputs(), addPartitionDesc), conf);
       return addPartTask;
     } else {
-      String srcLocation = addPartitionDesc.getLocation();
+      String srcLocation = partSpec.getLocation();
       Path tgtPath = null;
       if (tblDesc.getLocation() == null) {
         if (table.getDataLocation() != null) {
           tgtPath = new Path(table.getDataLocation().toString(),
-              Warehouse.makePartPath(addPartitionDesc.getPartSpec()));
+              Warehouse.makePartPath(partSpec.getPartSpec()));
         } else {
           tgtPath = new Path(wh.getTablePath(
               db.getDatabaseCurrent(), tblDesc.getTableName()),
-              Warehouse.makePartPath(addPartitionDesc.getPartSpec()));
+              Warehouse.makePartPath(partSpec.getPartSpec()));
         }
       } else {
         tgtPath = new Path(tblDesc.getLocation(),
-            Warehouse.makePartPath(addPartitionDesc.getPartSpec()));
+            Warehouse.makePartPath(partSpec.getPartSpec()));
       }
       checkTargetLocationEmpty(fs, tgtPath);
-      addPartitionDesc.setLocation(tgtPath.toString());
+      partSpec.setLocation(tgtPath.toString());
       LOG.debug("adding dependent CopyWork/AddPart/MoveWork for partition "
-          + partSpecToString(addPartitionDesc.getPartSpec())
+          + partSpecToString(partSpec.getPartSpec())
           + " with source location: " + srcLocation);
       Path tmpPath = ctx.getExternalTmpPath(fromURI);
       Task<?> copyTask = TaskFactory.get(new CopyWork(new Path(srcLocation),
@@ -332,7 +335,7 @@ public class ImportSemanticAnalyzer exte
           getOutputs(), addPartitionDesc), conf);
       LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath,
           Utilities.getTableDesc(table),
-          addPartitionDesc.getPartSpec(), true);
+          partSpec.getPartSpec(), true);
       loadTableWork.setInheritTableSpecs(false);
       Task<?> loadPartTask = TaskFactory.get(new MoveWork(
           getInputs(), getOutputs(), loadTableWork, null, false),

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Thu Jan 16 22:01:53 2014
@@ -1020,7 +1020,7 @@ public class SemanticAnalyzer extends Ba
           } catch (HiveException e) {
             LOG.info("Error while getting metadata : ", e);
           }
-          validatePartSpec(table, partition, (ASTNode)tab, conf);
+          validatePartSpec(table, partition, (ASTNode)tab, conf, false);
         }
         skipRecursion = false;
         break;
@@ -5306,7 +5306,7 @@ public class SemanticAnalyzer extends Ba
         }
         dpCtx = qbm.getDPCtx(dest);
         if (dpCtx == null) {
-          Utilities.validatePartSpecColumnNames(dest_tab, partSpec);
+          dest_tab.validatePartColumnNames(partSpec, false);
           dpCtx = new DynamicPartitionCtx(dest_tab, partSpec,
               conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME),
               conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE));

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java?rev=1558928&r1=1558927&r2=1558928&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java Thu Jan 16 22:01:53 2014
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.io.Serializable;
+import java.util.ArrayList;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
@@ -26,27 +27,131 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.Order;
 
 /**
- * Contains the information needed to add a partition.
+ * Contains the information needed to add one or more partitions.
  */
 public class AddPartitionDesc extends DDLDesc implements Serializable {
 
+  public static class OnePartitionDesc {
+    public OnePartitionDesc() {}
+
+    OnePartitionDesc(
+        Map<String, String> partSpec, String location, Map<String, String> params) {
+      this(partSpec, location);
+      this.partParams = params;
+    }
+
+    OnePartitionDesc(Map<String, String> partSpec, String location) {
+      this.partSpec = partSpec;
+      this.location = location;
+    }
+
+    Map<String, String> partSpec;
+    Map<String, String> partParams;
+    String location;
+    String inputFormat = null;
+    String outputFormat = null;
+    int numBuckets = -1;
+    List<FieldSchema> cols = null;
+    String serializationLib = null;
+    Map<String, String> serdeParams = null;
+    List<String> bucketCols = null;
+    List<Order> sortCols = null;
+
+    public Map<String, String> getPartSpec() {
+      return partSpec;
+    }
+
+    /**
+     * @return location of partition in relation to table
+     */
+    public String getLocation() {
+      return location;
+    }
+
+    public void setLocation(String location) {
+      this.location = location;
+    }
+
+    public Map<String, String> getPartParams() {
+      return partParams;
+    }
+
+    public void setPartParams(Map<String, String> partParams) {
+      this.partParams = partParams;
+    }
+
+    public int getNumBuckets() {
+      return numBuckets;
+    }
+
+    public void setNumBuckets(int numBuckets) {
+      this.numBuckets = numBuckets;
+    }
+
+    public List<FieldSchema> getCols() {
+      return cols;
+    }
+
+    public void setCols(List<FieldSchema> cols) {
+      this.cols = cols;
+    }
+
+    public String getSerializationLib() {
+      return serializationLib;
+    }
+
+    public void setSerializationLib(String serializationLib) {
+      this.serializationLib = serializationLib;
+    }
+
+    public Map<String, String> getSerdeParams() {
+      return serdeParams;
+    }
+
+    public void setSerdeParams(Map<String, String> serdeParams) {
+      this.serdeParams = serdeParams;
+    }
+
+    public List<String> getBucketCols() {
+      return bucketCols;
+    }
+
+    public void setBucketCols(List<String> bucketCols) {
+      this.bucketCols = bucketCols;
+    }
+
+    public List<Order> getSortCols() {
+      return sortCols;
+    }
+
+    public void setSortCols(List<Order> sortCols) {
+      this.sortCols = sortCols;
+    }
+
+    public String getInputFormat() {
+      return inputFormat;
+    }
+
+    public void setInputFormat(String inputFormat) {
+      this.inputFormat = inputFormat;
+    }
+
+    public String getOutputFormat() {
+      return outputFormat;
+    }
+
+    public void setOutputFormat(String outputFormat) {
+      this.outputFormat = outputFormat;
+    }
+  }
+
   private static final long serialVersionUID = 1L;
 
   String tableName;
   String dbName;
-  String location;
   boolean ifNotExists;
-  boolean expectView;
-  LinkedHashMap<String, String> partSpec;
-  Map<String, String> partParams;
-  String inputFormat = null;
-  String outputFormat = null;
-  int numBuckets = -1;
-  List<FieldSchema> cols = null;
-  String serializationLib = null;
-  Map<String, String> serdeParams = null;
-  List<String> bucketCols = null;
-  List<Order> sortCols = null;
+  List<OnePartitionDesc> partitions = null;
+
 
   /**
    * For serialization only.
@@ -54,7 +159,16 @@ public class AddPartitionDesc extends DD
   public AddPartitionDesc() {
   }
 
+  public AddPartitionDesc(
+      String dbName, String tableName, boolean ifNotExists) {
+    super();
+    this.dbName = dbName;
+    this.tableName = tableName;
+    this.ifNotExists = ifNotExists;
+  }
+
   /**
+   * Legacy single-partition ctor for ImportSemanticAnalyzer
    * @param dbName
    *          database to add to.
    * @param tableName
@@ -66,36 +180,26 @@ public class AddPartitionDesc extends DD
    * @param params
    *          partition parameters.
    */
+  @Deprecated
   public AddPartitionDesc(String dbName, String tableName,
       Map<String, String> partSpec, String location, Map<String, String> params) {
-    this(dbName, tableName, partSpec, location, true, false);
-    this.partParams = params;
-  }
-
-  /**
-   * @param dbName
-   *          database to add to.
-   * @param tableName
-   *          table to add to.
-   * @param partSpec
-   *          partition specification.
-   * @param location
-   *          partition location, relative to table location.
-   * @param ifNotExists
-   *          if true, the partition is only added if it doesn't exist
-   * @param expectView
-   *          true for ALTER VIEW, false for ALTER TABLE
-   */
-  public AddPartitionDesc(String dbName, String tableName,
-      Map<String, String> partSpec, String location, boolean ifNotExists,
-      boolean expectView) {
     super();
     this.dbName = dbName;
     this.tableName = tableName;
-    this.partSpec = new LinkedHashMap<String,String>(partSpec);
-    this.location = location;
-    this.ifNotExists = ifNotExists;
-    this.expectView = expectView;
+    this.ifNotExists = true;
+    addPartition(partSpec, location, params);
+  }
+
+  public void addPartition(Map<String, String> partSpec, String location) {
+    addPartition(partSpec, location, null);
+  }
+
+  private void addPartition(
+      Map<String, String> partSpec, String location, Map<String, String> params) {
+    if (this.partitions == null) {
+      this.partitions = new ArrayList<OnePartitionDesc>();
+    }
+    this.partitions.add(new OnePartitionDesc(partSpec, location, params));
   }
 
   /**
@@ -132,42 +236,39 @@ public class AddPartitionDesc extends DD
    * @return location of partition in relation to table
    */
   @Explain(displayName = "Location")
-  public String getLocation() {
-    return location;
-  }
-
-  /**
-   * @param location
-   *          location of partition in relation to table
-   */
-  public void setLocation(String location) {
-    this.location = location;
-  }
-
-  /**
-   * @return partition specification.
-   */
-  public LinkedHashMap<String, String> getPartSpec() {
-    return partSpec;
+  public String getLocationForExplain() {
+    if (this.partitions == null || this.partitions.isEmpty()) return "<no partition>";
+    boolean isFirst = true;
+    StringBuilder sb = new StringBuilder();
+    for (OnePartitionDesc desc : this.partitions) {
+      if (!isFirst) {
+        sb.append(", ");
+      }
+      isFirst = false;
+      sb.append(desc.location);
+    }
+    return sb.toString();
   }
 
   @Explain(displayName = "Spec")
-  public String getPartSpecString() {
-    return partSpec.toString();
-  }
-
-  /**
-   * @param partSpec
-   *          partition specification
-   */
-  public void setPartSpec(LinkedHashMap<String, String> partSpec) {
-    this.partSpec = partSpec;
+  public String getPartSpecStringForExplain() {
+    if (this.partitions == null || this.partitions.isEmpty()) return "<no partition>";
+    boolean isFirst = true;
+    StringBuilder sb = new StringBuilder();
+    for (OnePartitionDesc desc : this.partitions) {
+      if (!isFirst) {
+        sb.append(", ");
+      }
+      isFirst = false;
+      sb.append(desc.partSpec.toString());
+    }
+    return sb.toString();
   }
 
   /**
    * @return if the partition should only be added if it doesn't exist already
    */
-  public boolean getIfNotExists() {
+  public boolean isIfNotExists() {
     return this.ifNotExists;
   }
 
@@ -179,98 +280,11 @@ public class AddPartitionDesc extends DD
     this.ifNotExists = ifNotExists;
   }
 
-  /**
-   * @return partition parameters.
-   */
-  public Map<String, String> getPartParams() {
-    return partParams;
-  }
-
-  /**
-   * @param partParams
-   *          partition parameters
-   */
-
-  public void setPartParams(Map<String, String> partParams) {
-    this.partParams = partParams;
-  }
-
-  public int getNumBuckets() {
-    return numBuckets;
-  }
-
-  public void setNumBuckets(int numBuckets) {
-    this.numBuckets = numBuckets;
-  }
-
-  public List<FieldSchema> getCols() {
-    return cols;
-  }
-
-  public void setCols(List<FieldSchema> cols) {
-    this.cols = cols;
-  }
-
-  public String getSerializationLib() {
-    return serializationLib;
-  }
-
-  public void setSerializationLib(String serializationLib) {
-    this.serializationLib = serializationLib;
-  }
-
-  public Map<String, String> getSerdeParams() {
-    return serdeParams;
-  }
-
-  public void setSerdeParams(Map<String, String> serdeParams) {
-    this.serdeParams = serdeParams;
-  }
-
-  public List<String> getBucketCols() {
-    return bucketCols;
-  }
-
-  public void setBucketCols(List<String> bucketCols) {
-    this.bucketCols = bucketCols;
-  }
-
-  public List<Order> getSortCols() {
-    return sortCols;
-  }
-
-  public void setSortCols(List<Order> sortCols) {
-    this.sortCols = sortCols;
-  }
-
-  public String getInputFormat() {
-    return inputFormat;
+  public int getPartitionCount() {
+    return this.partitions.size();
   }
 
-  public void setInputFormat(String inputFormat) {
-    this.inputFormat = inputFormat;
-  }
-
-  public String getOutputFormat() {
-    return outputFormat;
-  }
-
-  public void setOutputFormat(String outputFormat) {
-    this.outputFormat = outputFormat;
-  }
-
-  /*
-   * @return whether to expect a view being altered
-   */
-  public boolean getExpectView() {
-    return expectView;
-  }
-
-  /**
-   * @param expectView
-   *          set whether to expect a view being altered
-   */
-  public void setExpectView(boolean expectView) {
-    this.expectView = expectView;
+  public OnePartitionDesc getPartition(int i) {
+    return this.partitions.get(i);
   }
 }