You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by vi...@apache.org on 2021/10/29 17:21:44 UTC

[hive] branch branch-3.1 updated: Hive-24741 backport to 3.1 (Nilesh Salian, reviewed by Vihang Karajgaonkar)

This is an automated email from the ASF dual-hosted git repository.

vihangk1 pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new dd7ffab  Hive-24741 backport to 3.1 (Nilesh Salian, reviewed by Vihang Karajgaonkar)
dd7ffab is described below

commit dd7ffab75f550446e5d2d49ce91e03e428a16c3a
Author: Neelesh Srinivas Salian <ns...@users.noreply.github.com>
AuthorDate: Fri Oct 29 10:21:30 2021 -0700

    Hive-24741 backport to 3.1 (Nilesh Salian, reviewed by Vihang Karajgaonkar)
---
 .../apache/hadoop/hive/ql/metadata/TestHive.java   | 57 +++++++++++++++++++++
 .../apache/hadoop/hive/metastore/ObjectStore.java  | 59 +++++++++++++++++-----
 2 files changed, 104 insertions(+), 12 deletions(-)

diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index a24b642..81418de 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -677,6 +677,63 @@ public class TestHive extends TestCase {
         System.err.println(StringUtils.stringifyException(e));
         assertTrue("Unable to create parition for table: " + tableName, false);
       }
+
+      part_spec.clear();
+      part_spec.put("ds", "2008-04-08");
+      part_spec.put("hr", "13");
+      try {
+        hm.createPartition(tbl, part_spec);
+      } catch (HiveException e) {
+        System.err.println(StringUtils.stringifyException(e));
+        assertTrue("Unable to create parition for table: " + tableName, false);
+      }
+      part_spec.clear();
+      part_spec.put("ds", "2008-04-08");
+      part_spec.put("hr", "14");
+      try {
+        hm.createPartition(tbl, part_spec);
+      } catch (HiveException e) {
+        System.err.println(StringUtils.stringifyException(e));
+        assertTrue("Unable to create parition for table: " + tableName, false);
+      }
+      part_spec.clear();
+      part_spec.put("ds", "2008-04-07");
+      part_spec.put("hr", "12");
+      try {
+        hm.createPartition(tbl, part_spec);
+      } catch (HiveException e) {
+        System.err.println(StringUtils.stringifyException(e));
+        assertTrue("Unable to create parition for table: " + tableName, false);
+      }
+      part_spec.clear();
+      part_spec.put("ds", "2008-04-07");
+      part_spec.put("hr", "13");
+      try {
+        hm.createPartition(tbl, part_spec);
+      } catch (HiveException e) {
+        System.err.println(StringUtils.stringifyException(e));
+        assertTrue("Unable to create parition for table: " + tableName, false);
+      }
+
+      Map<String, String> partialSpec = new HashMap<>();
+      partialSpec.put("ds", "2008-04-07");
+      assertEquals(2, hm.getPartitions(tbl, partialSpec).size());
+
+      partialSpec = new HashMap<>();
+      partialSpec.put("ds", "2008-04-08");
+      assertEquals(3, hm.getPartitions(tbl, partialSpec).size());
+
+      partialSpec = new HashMap<>();
+      partialSpec.put("hr", "13");
+      assertEquals(2, hm.getPartitions(tbl, partialSpec).size());
+
+      partialSpec = new HashMap<>();
+      assertEquals(5, hm.getPartitions(tbl, partialSpec).size());
+
+      partialSpec = new HashMap<>();
+      partialSpec.put("hr", "14");
+      assertEquals(1, hm.getPartitions(tbl, partialSpec).size());
+
       hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName);
     } catch (Throwable e) {
       System.err.println(StringUtils.stringifyException(e));
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 6bdae6c..f32e497 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -231,6 +231,7 @@ import org.slf4j.LoggerFactory;
 import com.codahale.metrics.Counter;
 import com.codahale.metrics.MetricRegistry;
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -3177,6 +3178,27 @@ public class ObjectStore implements RawStore, Configurable {
     return (Collection) query.executeWithArray(dbName, catName, tableName, partNameMatcher);
   }
 
+  /**
+   * If partVals all the values are empty strings, it means we are returning
+   * all the partitions and hence we can attempt to use a directSQL equivalent API which
+   * is considerably faster.
+   * @param partVals The partitions values used to filter out the partitions.
+   * @return true only when partVals is non-empty and contains only empty strings,
+   * otherwise false. If user or groups is valid then returns false since the directSQL
+   * doesn't support partition privileges.
+   */
+  private boolean canTryDirectSQL(List<String> partVals) {
+    if (partVals.isEmpty()) {
+      return false;
+    }
+    for (String val : partVals) {
+      if (val != null && !val.isEmpty()) {
+        return false;
+      }
+    }
+    return true;
+  }
+
   @Override
   public List<Partition> listPartitionsPsWithAuth(String catName, String db_name, String tbl_name,
       List<String> part_vals, short max_parts, String userName, List<String> groupNames)
@@ -3187,24 +3209,37 @@ public class ObjectStore implements RawStore, Configurable {
 
     try {
       openTransaction();
-      LOG.debug("executing listPartitionNamesPsWithAuth");
-      Collection parts = getPartitionPsQueryResults(catName, db_name, tbl_name,
-          part_vals, max_parts, null, queryWrapper);
+
       MTable mtbl = getMTable(catName, db_name, tbl_name);
+      if (mtbl == null) {
+        throw new NoSuchObjectException(db_name +  "." + tbl_name + " table not found");
+      }
+      boolean getauth = null != userName && null != groupNames &&
+          "TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"));
+      if(!getauth && canTryDirectSQL(part_vals)) {
+        LOG.debug(
+            "Redirecting to directSQL enabled API: db: {} tbl: {} partVals: {}",
+            db_name, tbl_name, Joiner.on(',').join(part_vals));
+        return getPartitions(catName, db_name, tbl_name, -1);
+      }
+      LOG.debug("executing listPartitionNamesPsWithAuth");
+      Collection parts = getPartitionPsQueryResults(catName, db_name, tbl_name, part_vals,
+          max_parts, null, queryWrapper);
       for (Object o : parts) {
         Partition part = convertToPart((MPartition) o);
-        //set auth privileges
-        if (null != userName && null != groupNames &&
-            "TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
-          String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl
-              .getPartitionKeys()), part.getValues());
-          PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet(catName, db_name,
-              tbl_name, partName, userName, groupNames);
-          part.setPrivileges(partAuth);
-        }
+        // set auth privileges
+        String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl
+            .getPartitionKeys()), part.getValues());
+        PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet(catName, db_name,
+            tbl_name, partName, userName, groupNames);
+        part.setPrivileges(partAuth);
         partitions.add(part);
       }
       success = commitTransaction();
+    } catch (InvalidObjectException | NoSuchObjectException | MetaException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new MetaException(e.getMessage());
     } finally {
       rollbackAndCleanup(success, queryWrapper);
     }