You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2014/12/05 22:05:54 UTC

svn commit: r1643433 - in /hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata: CheckJDOException.java Hive.java

Author: gates
Date: Fri Dec  5 21:05:54 2014
New Revision: 1643433

URL: http://svn.apache.org/r1643433
Log:
HIVE-8797 Simultaneous dynamic inserts can result in "partition already exists" error (Alan Gates, reviewied by Thejas Nair)

Added:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckJDOException.java
Modified:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java

Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckJDOException.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckJDOException.java?rev=1643433&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckJDOException.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckJDOException.java Fri Dec  5 21:05:54 2014
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.metadata;
+
+import javax.jdo.JDODataStoreException;
+
+/**
+ * Check if this is a javax.jdo.JDODataStoreException
+ */
+public class CheckJDOException {
+
+  public static boolean isJDODataStoreException(Exception e) {
+    return (e instanceof JDODataStoreException);
+  }
+
+}

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1643433&r1=1643432&r2=1643433&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Fri Dec  5 21:05:54 2014
@@ -116,6 +116,7 @@ import org.apache.thrift.TException;
 
 import com.google.common.collect.Sets;
 
+
 /**
  * This class has functions that implement meta data/DDL operations using calls
  * to the metastore.
@@ -1740,31 +1741,34 @@ private void constructOneLBLocationMap(F
         if (tpart == null) {
           LOG.debug("creating partition for table " + tbl.getTableName()
                     + " with partition spec : " + partSpec);
-          tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals);
+          try {
+            tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals);
+          } catch (AlreadyExistsException aee) {
+            LOG.debug("Caught already exists exception, trying to alter partition instead");
+            tpart = getMSC().getPartitionWithAuthInfo(tbl.getDbName(),
+              tbl.getTableName(), pvals, getUserName(), getGroupNames());
+            alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
+          } catch (Exception e) {
+            if (CheckJDOException.isJDODataStoreException(e)) {
+              // Using utility method above, so that JDODataStoreException doesn't
+              // have to be used here. This helps avoid adding jdo dependency for
+              // hcatalog client uses
+              LOG.debug("Caught JDO exception, trying to alter partition instead");
+              tpart = getMSC().getPartitionWithAuthInfo(tbl.getDbName(),
+                tbl.getTableName(), pvals, getUserName(), getGroupNames());
+              if (tpart == null) {
+                // This means the exception was caused by something other than a race condition
+                // in creating the partition, since the partition still doesn't exist.
+                throw e;
+              }
+              alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
+            } else {
+              throw e;
+            }
+          }
         }
         else {
-          LOG.debug("altering partition for table " + tbl.getTableName()
-                    + " with partition spec : " + partSpec);
-          if (inheritTableSpecs) {
-            tpart.getSd().setOutputFormat(tbl.getTTable().getSd().getOutputFormat());
-            tpart.getSd().setInputFormat(tbl.getTTable().getSd().getInputFormat());
-            tpart.getSd().getSerdeInfo().setSerializationLib(tbl.getSerializationLib());
-            tpart.getSd().getSerdeInfo().setParameters(
-                tbl.getTTable().getSd().getSerdeInfo().getParameters());
-            tpart.getSd().setBucketCols(tbl.getBucketCols());
-            tpart.getSd().setNumBuckets(tbl.getNumBuckets());
-            tpart.getSd().setSortCols(tbl.getSortCols());
-          }
-          if (partPath == null || partPath.trim().equals("")) {
-            throw new HiveException("new partition path should not be null or empty.");
-          }
-          tpart.getSd().setLocation(partPath);
-          tpart.getParameters().put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,"true");
-          String fullName = tbl.getTableName();
-          if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) {
-            fullName = tbl.getDbName() + "." + tbl.getTableName();
-          }
-          alterPartition(fullName, new Partition(tbl, tpart));
+          alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
         }
       }
       if (tpart == null) {
@@ -1777,6 +1781,35 @@ private void constructOneLBLocationMap(F
     return new Partition(tbl, tpart);
   }
 
+  private void alterPartitionSpec(Table tbl,
+                                  Map<String, String> partSpec,
+                                  org.apache.hadoop.hive.metastore.api.Partition tpart,
+                                  boolean inheritTableSpecs,
+                                  String partPath) throws HiveException, InvalidOperationException {
+    LOG.debug("altering partition for table " + tbl.getTableName() + " with partition spec : "
+        + partSpec);
+    if (inheritTableSpecs) {
+      tpart.getSd().setOutputFormat(tbl.getTTable().getSd().getOutputFormat());
+      tpart.getSd().setInputFormat(tbl.getTTable().getSd().getInputFormat());
+      tpart.getSd().getSerdeInfo().setSerializationLib(tbl.getSerializationLib());
+      tpart.getSd().getSerdeInfo().setParameters(
+          tbl.getTTable().getSd().getSerdeInfo().getParameters());
+      tpart.getSd().setBucketCols(tbl.getBucketCols());
+      tpart.getSd().setNumBuckets(tbl.getNumBuckets());
+      tpart.getSd().setSortCols(tbl.getSortCols());
+    }
+    if (partPath == null || partPath.trim().equals("")) {
+      throw new HiveException("new partition path should not be null or empty.");
+    }
+    tpart.getSd().setLocation(partPath);
+    tpart.getParameters().put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,"true");
+    String fullName = tbl.getTableName();
+    if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) {
+      fullName = tbl.getDbName() + "." + tbl.getTableName();
+    }
+    alterPartition(fullName, new Partition(tbl, tpart));
+  }
+
   public boolean dropPartition(String tblName, List<String> part_vals, boolean deleteData)
       throws HiveException {
     String[] names = Utilities.getDbTableName(tblName);