You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by vi...@apache.org on 2020/07/14 19:45:23 UTC

[hive] branch master updated: HIVE-23767: Pass ValidWriteIdList in get_partition* API requests (Kishen Das, reviewed by Vihang Karajgaonkar and Peter Vary)

This is an automated email from the ASF dual-hosted git repository.

vihangk1 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 6b3b000  HIVE-23767: Pass ValidWriteIdList in get_partition* API requests (Kishen Das, reviewed by Vihang Karajgaonkar and Peter Vary)
6b3b000 is described below

commit 6b3b000c5741fc852a76c87625578c37339bf874
Author: Kishen Das <ki...@cloudera.com>
AuthorDate: Tue Jul 14 12:06:41 2020 -0700

    HIVE-23767: Pass ValidWriteIdList in get_partition* API requests (Kishen Das, reviewed by Vihang Karajgaonkar and Peter Vary)
---
 .../org/apache/hadoop/hive/ql/metadata/Hive.java   | 91 ++++++++++++++++++----
 .../ql/metadata/SessionHiveMetaStoreClient.java    | 48 +++++++++++-
 .../hadoop/hive/metastore/TestMetastoreExpr.java   |  3 +-
 ...HiveMetastoreClientListPartitionsTempTable.java | 27 ++++---
 .../ql/parse/TestUpdateDeleteSemanticAnalyzer.java |  5 ++
 .../hadoop/hive/metastore/HiveMetaStoreClient.java | 45 ++++++++++-
 .../hadoop/hive/metastore/IMetaStoreClient.java    | 16 +++-
 .../hadoop/hive/metastore/HiveMetaStore.java       | 11 ++-
 .../metastore/HiveMetaStoreClientPreCatalog.java   | 13 +++-
 9 files changed, 219 insertions(+), 40 deletions(-)

diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 14eec31..e17086f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.metadata;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.google.common.base.Splitter;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
@@ -134,6 +135,10 @@ import org.apache.hadoop.hive.metastore.api.FireEventRequestData;
 import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
+import org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsResponse;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthResponse;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
@@ -148,6 +153,7 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest;
 import org.apache.hadoop.hive.metastore.api.PartitionSpec;
 import org.apache.hadoop.hive.metastore.api.PartitionWithoutSD;
+import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
 import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
@@ -1460,6 +1466,24 @@ public class Hive {
   }
 
   /**
+   * Get ValidWriteIdList for the current transaction.
+   * This fetches the ValidWriteIdList from the metastore for a given table if txnManager has an open transaction.
+   *
+   * @param dbName
+   * @param tableName
+   * @return
+   * @throws LockException
+   */
+  private ValidWriteIdList getValidWriteIdList(String dbName, String tableName) throws LockException {
+    ValidWriteIdList validWriteIdList = null;
+    long txnId = SessionState.get().getTxnMgr() != null ? SessionState.get().getTxnMgr().getCurrentTxnId() : 0;
+    if (txnId > 0) {
+      validWriteIdList = AcidUtils.getTableValidWriteIdListWithTxnList(conf, dbName, tableName);
+    }
+    return validWriteIdList;
+  }
+
+  /**
    * Get all table names for the current database.
    * @return List of table names
    * @throws HiveException
@@ -3550,11 +3574,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
     }
   }
 
-  public List<String> getPartitionNames(String tblName, short max) throws HiveException {
-    String[] names = Utilities.getDbTableName(tblName);
-    return getPartitionNames(names[0], names[1], max);
-  }
-
   public List<String> getPartitionNames(String dbName, String tblName, short max)
       throws HiveException {
     List<String> names = null;
@@ -3580,7 +3599,17 @@ private void constructOneLBLocationMap(FileStatus fSta,
     List<String> pvals = MetaStoreUtils.getPvals(t.getPartCols(), partSpec);
 
     try {
-      names = getMSC().listPartitionNames(dbName, tblName, pvals, max);
+      GetPartitionNamesPsRequest req = new GetPartitionNamesPsRequest();
+      req.setTblName(tblName);
+      req.setDbName(dbName);
+      req.setPartValues(pvals);
+      req.setMaxParts(max);
+      if (AcidUtils.isTransactionalTable(t)) {
+        ValidWriteIdList validWriteIdList = getValidWriteIdList(dbName, tblName);
+        req.setValidWriteIdList(validWriteIdList != null ? validWriteIdList.toString() : null);
+      }
+      GetPartitionNamesPsResponse res = getMSC().listPartitionNamesRequest(req);
+      names = res.getNames();
     } catch (NoSuchObjectException nsoe) {
       // this means no partition exists for the given partition spec
       // key value pairs - thrift cannot handle null return values, hence
@@ -3603,8 +3632,22 @@ private void constructOneLBLocationMap(FileStatus fSta,
     }
     try {
       String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME);
-      names = getMSC().listPartitionNames(tbl.getCatalogName(), tbl.getDbName(),
-          tbl.getTableName(), defaultPartitionName, exprBytes, order, maxParts);
+      PartitionsByExprRequest req =
+          new PartitionsByExprRequest(tbl.getDbName(), tbl.getTableName(), ByteBuffer.wrap(exprBytes));
+      if (defaultPartitionName != null) {
+        req.setDefaultPartitionName(defaultPartitionName);
+      }
+      if (maxParts >= 0) {
+        req.setMaxParts(maxParts);
+      }
+      req.setOrder(order);
+      req.setCatName(tbl.getCatalogName());
+      if (AcidUtils.isTransactionalTable(tbl)) {
+        ValidWriteIdList validWriteIdList = getValidWriteIdList(tbl.getDbName(), tbl.getTableName());
+        req.setValidWriteIdList(validWriteIdList != null ? validWriteIdList.toString() : null);
+      }
+      names = getMSC().listPartitionNames(req);
+
     } catch (NoSuchObjectException nsoe) {
       return Lists.newArrayList();
     } catch (Exception e) {
@@ -3625,8 +3668,19 @@ private void constructOneLBLocationMap(FileStatus fSta,
     if (tbl.isPartitioned()) {
       List<org.apache.hadoop.hive.metastore.api.Partition> tParts;
       try {
-        tParts = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(),
-            (short) -1, getUserName(), getGroupNames());
+        GetPartitionsPsWithAuthRequest req = new GetPartitionsPsWithAuthRequest();
+        req.setTblName(tbl.getTableName());
+        req.setDbName(tbl.getDbName());
+        req.setUserName(getUserName());
+        req.setMaxParts((short) -1);
+        req.setGroupNames(getGroupNames());
+        if (AcidUtils.isTransactionalTable(tbl)) {
+          ValidWriteIdList validWriteIdList = getValidWriteIdList(tbl.getDbName(), tbl.getTableName());
+          req.setValidWriteIdList(validWriteIdList != null ? validWriteIdList.toString() : null);
+        }
+        GetPartitionsPsWithAuthResponse res = getMSC().listPartitionsWithAuthInfoRequest(req);
+        tParts = res.getPartitions();
+
       } catch (Exception e) {
         LOG.error(StringUtils.stringifyException(e));
         throw new HiveException(e);
@@ -3911,20 +3965,27 @@ private void constructOneLBLocationMap(FileStatus fSta,
    * @param tbl The table containing the partitions.
    * @param expr A serialized expression for partition predicates.
    * @param conf Hive config.
-   * @param result the resulting list of partitions
+   * @param partitions the resulting list of partitions
    * @return whether the resulting list contains partitions which may or may not match the expr
    */
   public boolean getPartitionsByExpr(Table tbl, ExprNodeGenericFuncDesc expr, HiveConf conf,
-      List<Partition> result) throws HiveException, TException {
-    assert result != null;
+      List<Partition> partitions) throws HiveException, TException {
+
+    Preconditions.checkNotNull(partitions);
     byte[] exprBytes = SerializationUtilities.serializeExpressionToKryo(expr);
     String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME);
     List<org.apache.hadoop.hive.metastore.api.PartitionSpec> msParts =
         new ArrayList<>();
+    ValidWriteIdList validWriteIdList = null;
+    if (AcidUtils.isTransactionalTable(tbl)) {
+      validWriteIdList = getValidWriteIdList(tbl.getDbName(), tbl.getTableName());
+    }
     boolean hasUnknownParts = getMSC().listPartitionsSpecByExpr(tbl.getDbName(),
-        tbl.getTableName(), exprBytes, defaultPartitionName, (short)-1, msParts);
-    result.addAll(convertFromPartSpec(msParts.iterator(), tbl));
+        tbl.getTableName(), exprBytes, defaultPartitionName, (short)-1, msParts,
+        validWriteIdList != null ? validWriteIdList.toString() : null);
+    partitions.addAll(convertFromPartSpec(msParts.iterator(), tbl));
     return hasUnknownParts;
+
   }
 
   /**
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index 4e77bd4..ed0f141 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -55,6 +55,8 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthResponse;
 import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
@@ -68,6 +70,7 @@ import org.apache.hadoop.hive.metastore.api.PartitionSpec;
 import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest;
 import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 import org.apache.hadoop.hive.metastore.api.PartitionValuesRow;
+import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.TableMeta;
@@ -76,6 +79,7 @@ import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.hadoop.hive.metastore.utils.FilterUtils;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
@@ -1099,6 +1103,24 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
     List<Partition> partitions = tt.listPartitionsWithAuthInfo(userName, groupNames);
     return getPartitionsForMaxParts(tableName, partitions, maxParts);
   }
+  
+  @Override
+  public GetPartitionsPsWithAuthResponse listPartitionsWithAuthInfoRequest(
+      GetPartitionsPsWithAuthRequest req)
+      throws MetaException, TException, NoSuchObjectException {
+    org.apache.hadoop.hive.metastore.api.Table table = getTempTable(req.getDbName(),
+        req.getTblName());
+    if (table == null) {
+      return super.listPartitionsWithAuthInfoRequest(req);
+    }
+    TempTable tt = getPartitionedTempTable(table);
+    List<Partition> partitions = tt
+        .listPartitionsWithAuthInfo(req.getUserName(), req.getGroupNames());
+    GetPartitionsPsWithAuthResponse response = new GetPartitionsPsWithAuthResponse();
+    response.setPartitions(
+        getPartitionsForMaxParts(req.getTblName(), partitions, req.getMaxParts()));
+    return response;
+  }
 
   @Override
   public List<String> listPartitionNames(String catName, String dbName, String tblName,
@@ -1135,6 +1157,26 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
   }
 
   @Override
+  public List<String> listPartitionNames(PartitionsByExprRequest request)
+      throws MetaException, TException, NoSuchObjectException {
+
+    org.apache.hadoop.hive.metastore.api.Table table = getTempTable(request.getDbName(), request.getTblName());
+    if (table == null) {
+      return super.listPartitionNames(request.getCatName(), request.getDbName(), request.getTblName(),
+          request.getMaxParts());
+    }
+    TempTable tt = getPartitionedTempTable(table);
+    List<Partition> partitions = tt.listPartitions();
+    List<String> result = new ArrayList<>();
+    for (int i = 0; i < ((request.getMaxParts() < 0 || request.getMaxParts() > partitions.size()) ? partitions.size()
+        : request.getMaxParts()); i++) {
+      result.add(makePartName(table.getPartitionKeys(), partitions.get(i).getValues()));
+    }
+    Collections.sort(result);
+    return result;
+  }
+
+  @Override
   public List<Partition> listPartitions(String catName, String dbName, String tblName, int maxParts)
       throws TException {
     org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbName, tblName);
@@ -1182,10 +1224,12 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
 
   @Override
   public boolean listPartitionsSpecByExpr(String catName, String dbName, String tblName, byte[] expr,
-      String defaultPartitionName, short maxParts, List<PartitionSpec> result) throws TException {
+      String defaultPartitionName, short maxParts, List<PartitionSpec> result,
+      String validWriteIdList) throws TException {
     org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbName, tblName);
     if (table == null) {
-      return super.listPartitionsSpecByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result);
+      return super.listPartitionsSpecByExpr(catName, dbName, tblName, expr,
+          defaultPartitionName, maxParts, result, validWriteIdList);
     }
     assert result != null;
 
diff --git a/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java b/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java
index ebbbfa6..7160b9f 100644
--- a/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java
+++ b/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java
@@ -186,7 +186,8 @@ public class TestMetastoreExpr {
     // check with partition spec as well
     List<PartitionSpec> partSpec = new ArrayList<>();
     client.listPartitionsSpecByExpr(dbName, tblName,
-        SerializationUtilities.serializeExpressionToKryo(expr), null, (short)-1, partSpec);
+        SerializationUtilities.serializeExpressionToKryo(expr),
+        null, (short)-1, partSpec, null);
     int partSpecSize = 0;
     if(!partSpec.isEmpty()) {
       partSpecSize = partSpec.iterator().next().getSharedSDPartitionSpec().getPartitionsSize();
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java
index 43c3023..f6e7136 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java
@@ -245,7 +245,8 @@ public class TestSessionHiveMetastoreClientListPartitionsTempTable
     createTable4PartColsParts(getClient());
     TestMetastoreExpr.ExprBuilder e = new TestMetastoreExpr.ExprBuilder(TABLE_NAME);
     getClient().listPartitionsByExpr(DB_NAME, TABLE_NAME, SerializationUtilities.serializeExpressionToKryo(
-        e.strCol("yyyy").val("2017").pred("=", 2).build()), null, (short)-1, null);
+        e.strCol("yyyy").val("2017").pred("=", 2).build()), null,
+        (short)-1, null);
   }
 
   @Test
@@ -333,7 +334,8 @@ public class TestSessionHiveMetastoreClientListPartitionsTempTable
     createTable4PartColsParts(getClient());
     TestMetastoreExpr.ExprBuilder e = new TestMetastoreExpr.ExprBuilder(TABLE_NAME);
     getClient().listPartitionsSpecByExpr(DB_NAME, TABLE_NAME, SerializationUtilities.serializeExpressionToKryo(
-        e.strCol("yyyy").val("2017").pred("=", 2).build()), null, (short)-1, null);
+        e.strCol("yyyy").val("2017").pred("=", 2).build()), null,
+        (short)-1, null, null);
   }
 
 
@@ -344,7 +346,8 @@ public class TestSessionHiveMetastoreClientListPartitionsTempTable
     TestMetastoreExpr.ExprBuilder e = new TestMetastoreExpr.ExprBuilder(TABLE_NAME);
     List<PartitionSpec> result = new ArrayList<>();
     getClient().listPartitionsSpecByExpr(DB_NAME, TABLE_NAME, SerializationUtilities.serializeExpressionToKryo(
-        e.strCol("yyyy").val("2017").pred(">=", 2).build()), null, (short)3, result);
+        e.strCol("yyyy").val("2017").pred(">=", 2).build()),
+        null, (short)3, result, null);
     assertEquals(3, result.iterator().next().getSharedSDPartitionSpec().getPartitionsSize());
   }
 
@@ -354,7 +357,8 @@ public class TestSessionHiveMetastoreClientListPartitionsTempTable
     TestMetastoreExpr.ExprBuilder e = new TestMetastoreExpr.ExprBuilder(TABLE_NAME);
     List<PartitionSpec> result = new ArrayList<>();
     getClient().listPartitionsSpecByExpr(DB_NAME, TABLE_NAME, SerializationUtilities.serializeExpressionToKryo(
-        e.strCol("yyyy").val("2017").pred(">=", 2).build()), null, (short)100, result);
+        e.strCol("yyyy").val("2017").pred(">=", 2).build()),
+        null, (short)100, result, null);
     assertEquals(4, result.iterator().next().getSharedSDPartitionSpec().getPartitionsSize());
   }
 
@@ -362,38 +366,38 @@ public class TestSessionHiveMetastoreClientListPartitionsTempTable
   public void testListPartitionsSpecByExprNoDb() throws Exception {
     getClient().dropDatabase(DB_NAME);
     getClient().listPartitionsSpecByExpr(DB_NAME, TABLE_NAME, new byte[] {'f', 'o', 'o'},
-        null, (short)-1, new ArrayList<>());
+        null, (short)-1, new ArrayList<>(), null);
   }
 
   @Test(expected = MetaException.class)
   public void testListPartitionsSpecByExprNoTbl() throws Exception {
     getClient().listPartitionsSpecByExpr(DB_NAME, TABLE_NAME, new byte[] {'f', 'o', 'o'},
-        null, (short)-1, new ArrayList<>());
+        null, (short)-1, new ArrayList<>(), null);
   }
 
   @Test(expected = MetaException.class)
   public void testListPartitionsSpecByExprEmptyDbName() throws Exception {
     getClient().listPartitionsSpecByExpr("", TABLE_NAME, new byte[] {'f', 'o', 'o'},
-        null, (short)-1, new ArrayList<>());
+        null, (short)-1, new ArrayList<>(), null);
   }
 
   @Test(expected = MetaException.class)
   public void testListPartitionsSpecByExprEmptyTblName() throws Exception {
     createTable3PartCols1Part(getClient());
     getClient().listPartitionsSpecByExpr(DB_NAME, "", new byte[] {'f', 'o', 'o'},
-        null, (short)-1, new ArrayList<>());
+        null, (short)-1, new ArrayList<>(), null);
   }
 
   @Test(expected = MetaException.class)
   public void testListPartitionsSpecByExprNullDbName() throws Exception {
     getClient().listPartitionsSpecByExpr(null, TABLE_NAME, new byte[] {'f', 'o', 'o'},
-        null, (short)-1, new ArrayList<>());
+        null, (short)-1, new ArrayList<>(), null);
   }
 
   @Test(expected = MetaException.class)
   public void testListPartitionsSpecByExprNullTblName() throws Exception {
     getClient().listPartitionsSpecByExpr(DB_NAME, null, new byte[] {'f', 'o', 'o' },
-        null, (short)-1, new ArrayList<>());
+        null, (short)-1, new ArrayList<>(), null);
   }
 
   private void checkExprPartitionSpec(int numParts, ExprNodeGenericFuncDesc expr) throws Exception {
@@ -404,7 +408,8 @@ public class TestSessionHiveMetastoreClientListPartitionsTempTable
     // check with partition spec as well
     List<PartitionSpec> partSpec = new ArrayList<>();
     getClient().listPartitionsSpecByExpr(DB_NAME, TABLE_NAME,
-        SerializationUtilities.serializeExpressionToKryo(expr), null, (short)-1, partSpec);
+        SerializationUtilities.serializeExpressionToKryo(expr), null,
+        (short)-1, partSpec, null);
     int partSpecSize = 0;
     if(!partSpec.isEmpty()) {
       partSpecSize = partSpec.iterator().next().getSharedSDPartitionSpec().getPartitionsSize();
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
index 3204669..a0cd60f 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
@@ -20,12 +20,15 @@ package org.apache.hadoop.hive.ql.parse;
 import java.io.File;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.BitSet;
 import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.ValidReadTxnList;
+import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
@@ -232,6 +235,8 @@ public class TestUpdateDeleteSemanticAnalyzer {
     conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict");
     conf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager");
     conf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true);
+    conf.set(ValidTxnList.VALID_TXNS_KEY,
+        new ValidReadTxnList(new long[0], new BitSet(), 1000, Long.MAX_VALUE).writeToString());
     TxnDbUtil.prepDb(conf);
   }
 
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 8041bc5..4a45913 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -1846,6 +1846,10 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     if (req.getValidWriteIdList() == null) {
       req.setValidWriteIdList(getValidWriteIdList(TableName.getDbTable(req.getDbName(), req.getTblName())));
     }
+    if(req.getCatName() == null) {
+      req.setCatName(getDefaultCatalog(conf));
+    }
+    req.setMaxParts(shrinkMaxtoShort(req.getMaxParts()));
     GetPartitionsPsWithAuthResponse res = client.get_partitions_ps_with_auth_req(req);
     List<Partition> parts = deepCopyPartitions(
         FilterUtils.filterPartitionsIfEnabled(isClientFilterEnabled, filterHook, res.getPartitions()));
@@ -1932,6 +1936,11 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     assert result != null;
     PartitionsByExprRequest req = new PartitionsByExprRequest(
         db_name, tbl_name, ByteBuffer.wrap(expr));
+    if( catName == null ) {
+      req.setCatName(getDefaultCatalog(conf));
+    }else {
+      req.setCatName(catName);
+    }
     if (default_partition_name != null) {
       req.setDefaultPartitionName(default_partition_name);
     }
@@ -1961,15 +1970,15 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
 
   @Override
   public boolean listPartitionsSpecByExpr(String dbName, String tblName,
-      byte[] expr, String defaultPartName, short maxParts, List<PartitionSpec> result)
+      byte[] expr, String defaultPartName, short maxParts, List<PartitionSpec> result, String validWriteIdList)
       throws TException {
     return listPartitionsSpecByExpr(getDefaultCatalog(conf), dbName, tblName, expr, defaultPartName,
-        maxParts, result);
+        maxParts, result, validWriteIdList);
   }
 
   @Override
   public boolean listPartitionsSpecByExpr(String catName, String dbName, String tblName, byte[] expr,
-      String defaultPartitionName, short maxParts, List<PartitionSpec> result)
+      String defaultPartitionName, short maxParts, List<PartitionSpec> result, String validWriteIdList)
       throws TException {
     assert result != null;
     PartitionsByExprRequest req = new PartitionsByExprRequest(
@@ -1980,6 +1989,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     if (maxParts >= 0) {
       req.setMaxParts(maxParts);
     }
+    req.setValidWriteIdList(validWriteIdList);
     PartitionsSpecByExprResult r;
     try {
       r = client.get_partitions_spec_by_expr(req);
@@ -2155,6 +2165,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     req.setCapabilities(version);
     req.setGetColumnStats(getColumnStats);
     req.setValidWriteIdList(getValidWriteIdList(TableName.getDbTable(dbName, tableName)));
+
     if (getColumnStats) {
       req.setEngine(engine);
     }
@@ -2414,6 +2425,9 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     if (req.getValidWriteIdList() == null) {
       req.setValidWriteIdList(getValidWriteIdList(TableName.getDbTable(req.getDbName(), req.getTblName())));
     }
+    if( req.getCatName() == null ) {
+      req.setCatName(getDefaultCatalog(conf));
+    }
     GetPartitionNamesPsResponse res = client.get_partition_names_ps_req(req);
     List<String> partNames = FilterUtils.filterPartitionNamesIfEnabled(
             isClientFilterEnabled, filterHook, getDefaultCatalog(conf), req.getDbName(),
@@ -2463,12 +2477,23 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     if (order != null) {
       req.setOrder(order);
     }
-    req.setCatName(catName);
+    if ( catName == null ) {
+      req.setCatName(getDefaultCatalog(conf));
+    }else {
+      req.setCatName(catName);
+    }
     return FilterUtils.filterPartitionNamesIfEnabled(isClientFilterEnabled, filterHook, catName,
         dbName, tblName, client.get_partition_names_req(req));
   }
 
   @Override
+  public List<String> listPartitionNames(PartitionsByExprRequest req)
+      throws MetaException, TException, NoSuchObjectException {
+    return FilterUtils.filterPartitionNamesIfEnabled(isClientFilterEnabled, filterHook, req.getCatName(),
+        req.getDbName(), req.getTblName(), client.get_partition_names_req(req));
+  }
+
+  @Override
   public int getNumPartitionsByFilter(String db_name, String tbl_name,
                                       String filter) throws TException {
     return getNumPartitionsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter);
@@ -4175,6 +4200,18 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     return client.get_serde(new GetSerdeRequest(serDeName));
   }
 
+  /**
+   * This method is called to get the ValidWriteIdList in order to send the same in HMS get_* APIs,
+   * if the validWriteIdList is not explicitly passed (as a method argument) to the HMS APIs.
+   * This method returns the ValidWriteIdList based on the VALID_TABLES_WRITEIDS_KEY key.
+   * Since, VALID_TABLES_WRITEIDS_KEY is set during the lock acquisition phase after query compilation
+   * ( DriverTxnHandler.acquireLocks -> recordValidWriteIds -> setValidWriteIds ),
+   * this only covers a subset of cases, where we invoke get_* APIs after query compilation,
+   * if the validWriteIdList is not explicitly passed (as a method argument) to the HMS APIs.
+   *
+   * @param fullTableName
+   * @return
+   */
   private String getValidWriteIdList(String fullTableName) {
     if (conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY) == null) {
       return null;
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index dbc6d14..1b17a41 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -670,6 +670,7 @@ public interface IMetaStoreClient {
    */
   Table getTable(String dbName, String tableName, boolean getColumnStats, String engine) throws MetaException,
           TException, NoSuchObjectException;
+
   /**
    * Get a table object.
    * @param catName catalog the table is in.
@@ -1253,6 +1254,17 @@ public interface IMetaStoreClient {
       throws MetaException, TException, NoSuchObjectException;
 
   /**
+   * Get a list of partition names matching the specified filter and return in order if specified.
+   * @param request request
+   * @return list of matching partition names.
+   * @throws MetaException error accessing the RDBMS.
+   * @throws TException thrift transport error.
+   * @throws NoSuchObjectException  no such table.
+   */
+  List<String> listPartitionNames(PartitionsByExprRequest request)
+      throws MetaException, TException, NoSuchObjectException;
+
+  /**
    * Get a list of partition values
    * @param request request
    * @return reponse
@@ -1378,7 +1390,7 @@ public interface IMetaStoreClient {
    * @throws TException thrift transport error or error executing the filter.
    */
   boolean listPartitionsSpecByExpr(String dbName, String tblName,
-      byte[] expr, String defaultPartName, short maxParts, List<PartitionSpec> result)
+      byte[] expr, String defaultPartName, short maxParts, List<PartitionSpec> result, String validWriteIdList)
           throws TException;
 
   /**
@@ -1396,7 +1408,7 @@ public interface IMetaStoreClient {
    * @throws TException thrift transport error or error executing the filter.
    */
   boolean listPartitionsSpecByExpr(String catName, String dbName, String tblName,
-      byte[] expr, String defaultPartitionName, short maxParts, List<PartitionSpec> result)
+      byte[] expr, String defaultPartitionName, short maxParts, List<PartitionSpec> result, String validWriteIdList)
       throws TException;
 
   /**
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index ee0f22e..a9175ce 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -6262,8 +6262,15 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     public GetPartitionsPsWithAuthResponse get_partitions_ps_with_auth_req(GetPartitionsPsWithAuthRequest req)
             throws MetaException, NoSuchObjectException, TException {
       String dbName = MetaStoreUtils.prependCatalogToDbName(req.getCatName(), req.getDbName(), conf);
-      List<Partition> partitions = get_partitions_ps_with_auth(dbName, req.getTblName(),
-              req.getPartVals(), req.getMaxParts(), req.getUserName(), req.getGroupNames());
+      List<Partition> partitions = null;
+      if (req.getPartVals() == null) {
+        partitions = get_partitions_with_auth(dbName, req.getTblName(), req.getMaxParts(), req.getUserName(),
+            req.getGroupNames());
+      } else {
+        partitions =
+            get_partitions_ps_with_auth(dbName, req.getTblName(), req.getPartVals(), req.getMaxParts(),
+                req.getUserName(), req.getGroupNames());
+      }
       GetPartitionsPsWithAuthResponse res = new GetPartitionsPsWithAuthResponse();
       res.setPartitions(partitions);
       return res;
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index d7b5a09..97a9507 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@ -1377,15 +1377,15 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
 
   @Override
   public boolean listPartitionsSpecByExpr(String dbName, String tblName,
-      byte[] expr, String defaultPartName, short maxParts, List<PartitionSpec> result)
+      byte[] expr, String defaultPartName, short maxParts, List<PartitionSpec> result, String validWriteIdList)
       throws TException {
     return listPartitionsSpecByExpr(getDefaultCatalog(conf), dbName, tblName, expr, defaultPartName,
-        maxParts, result);
+        maxParts, result, validWriteIdList);
   }
 
   @Override
   public boolean listPartitionsSpecByExpr(String catName, String dbName, String tblName, byte[] expr,
-      String defaultPartitionName, short maxParts, List<PartitionSpec> result)
+      String defaultPartitionName, short maxParts, List<PartitionSpec> result, String validWriteIdList)
       throws TException {
     assert result != null;
     PartitionsByExprRequest req = new PartitionsByExprRequest(dbName, tblName, ByteBuffer.wrap(expr));
@@ -1395,6 +1395,7 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
     if (maxParts >= 0) {
       req.setMaxParts(maxParts);
     }
+    req.setValidWriteIdList(validWriteIdList);
     PartitionsSpecByExprResult r;
     try {
       r = client.get_partitions_spec_by_expr(req);
@@ -1487,6 +1488,12 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
   }
 
   @Override
+  public List<String> listPartitionNames(PartitionsByExprRequest request)
+      throws MetaException, TException, NoSuchObjectException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
   public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request)
       throws MetaException, TException, NoSuchObjectException {
     return client.get_partition_values(request);