This is an automated email from the ASF dual-hosted git repository.

sankarh pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/branch-3 by this push:
     new 47c2b6b07b3 HIVE-27219: Backport Hive-24741: 
get_partitions_ps_with_auth performance can be improved when requesting all the 
partitions (Vihang Karajgaonkar, reviewed by Naveen Gangam)
47c2b6b07b3 is described below

commit 47c2b6b07b32390153d9c9175d760e381ed20965
Author: apoorvaagg <[email protected]>
AuthorDate: Fri May 26 23:13:24 2023 +0530

    HIVE-27219: Backport Hive-24741: get_partitions_ps_with_auth performance 
can be improved when requesting all the partitions (Vihang Karajgaonkar, 
reviewed by Naveen Gangam)
    
    Signed-off-by: Sankar Hariappan <[email protected]>
    Closes (#4202)
---
 .../apache/hadoop/hive/ql/metadata/TestHive.java   | 57 ++++++++++++++++++++++
 .../apache/hadoop/hive/metastore/ObjectStore.java  | 56 +++++++++++++++++----
 2 files changed, 103 insertions(+), 10 deletions(-)

diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java 
b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index a24b6423bae..81418de1f20 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -677,6 +677,63 @@ public class TestHive extends TestCase {
         System.err.println(StringUtils.stringifyException(e));
         assertTrue("Unable to create parition for table: " + tableName, false);
       }
+
+      part_spec.clear();
+      part_spec.put("ds", "2008-04-08");
+      part_spec.put("hr", "13");
+      try {
+        hm.createPartition(tbl, part_spec);
+      } catch (HiveException e) {
+        System.err.println(StringUtils.stringifyException(e));
+        assertTrue("Unable to create parition for table: " + tableName, false);
+      }
+      part_spec.clear();
+      part_spec.put("ds", "2008-04-08");
+      part_spec.put("hr", "14");
+      try {
+        hm.createPartition(tbl, part_spec);
+      } catch (HiveException e) {
+        System.err.println(StringUtils.stringifyException(e));
+        assertTrue("Unable to create parition for table: " + tableName, false);
+      }
+      part_spec.clear();
+      part_spec.put("ds", "2008-04-07");
+      part_spec.put("hr", "12");
+      try {
+        hm.createPartition(tbl, part_spec);
+      } catch (HiveException e) {
+        System.err.println(StringUtils.stringifyException(e));
+        assertTrue("Unable to create parition for table: " + tableName, false);
+      }
+      part_spec.clear();
+      part_spec.put("ds", "2008-04-07");
+      part_spec.put("hr", "13");
+      try {
+        hm.createPartition(tbl, part_spec);
+      } catch (HiveException e) {
+        System.err.println(StringUtils.stringifyException(e));
+        assertTrue("Unable to create parition for table: " + tableName, false);
+      }
+
+      Map<String, String> partialSpec = new HashMap<>();
+      partialSpec.put("ds", "2008-04-07");
+      assertEquals(2, hm.getPartitions(tbl, partialSpec).size());
+
+      partialSpec = new HashMap<>();
+      partialSpec.put("ds", "2008-04-08");
+      assertEquals(3, hm.getPartitions(tbl, partialSpec).size());
+
+      partialSpec = new HashMap<>();
+      partialSpec.put("hr", "13");
+      assertEquals(2, hm.getPartitions(tbl, partialSpec).size());
+
+      partialSpec = new HashMap<>();
+      assertEquals(5, hm.getPartitions(tbl, partialSpec).size());
+
+      partialSpec = new HashMap<>();
+      partialSpec.put("hr", "14");
+      assertEquals(1, hm.getPartitions(tbl, partialSpec).size());
+
       hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName);
     } catch (Throwable e) {
       System.err.println(StringUtils.stringifyException(e));
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 458518278be..4f02e7b8325 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -213,6 +213,7 @@ import org.slf4j.LoggerFactory;
 import com.codahale.metrics.Counter;
 import com.codahale.metrics.MetricRegistry;
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -2977,6 +2978,27 @@ public class ObjectStore implements RawStore, 
Configurable {
     return (Collection) query.executeWithArray(dbName, catName, tableName, 
partNameMatcher);
   }
 
+  /**
+   * If partVals all the values are empty strings, it means we are returning
+   * all the partitions and hence we can attempt to use a directSQL equivalent 
API which
+   * is considerably faster.
+   * @param partVals The partitions values used to filter out the partitions.
+   * @return true only when partVals is non-empty and contains only empty 
strings,
+   * otherwise false. If user or groups is valid then returns false since the 
directSQL
+   * doesn't support partition privileges.
+   */
+  private boolean canTryDirectSQL(List<String> partVals) {
+    if (partVals.isEmpty()) {
+      return false;
+    }
+    for (String val : partVals) {
+      if (val != null && !val.isEmpty()) {
+        return false;
+      }
+    }
+    return true;
+  }
+
   @Override
   public List<Partition> listPartitionsPsWithAuth(String catName, String 
db_name, String tbl_name,
       List<String> part_vals, short max_parts, String userName, List<String> 
groupNames)
@@ -2987,24 +3009,38 @@ public class ObjectStore implements RawStore, 
Configurable {
 
     try {
       openTransaction();
-      LOG.debug("executing listPartitionNamesPsWithAuth");
-      Collection parts = getPartitionPsQueryResults(catName, db_name, tbl_name,
-          part_vals, max_parts, null, queryWrapper);
+
       MTable mtbl = getMTable(catName, db_name, tbl_name);
+      if (mtbl == null) {
+        throw new NoSuchObjectException(db_name +  "." + tbl_name + " table 
not found");
+      }
+      boolean getauth = null != userName && null != groupNames &&
+          
"TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"));
+      if(!getauth && canTryDirectSQL(part_vals)) {
+        LOG.debug(
+            "Redirecting to directSQL enabled API: db: {} tbl: {} partVals: 
{}",
+            db_name, tbl_name, Joiner.on(',').join(part_vals));
+        return getPartitions(catName, db_name, tbl_name, -1);
+      }
+      LOG.debug("executing listPartitionNamesPsWithAuth");
+      Collection parts = getPartitionPsQueryResults(catName, db_name, 
tbl_name, part_vals,
+          max_parts, null, queryWrapper);
       for (Object o : parts) {
         Partition part = convertToPart((MPartition) o);
-        //set auth privileges
-        if (null != userName && null != groupNames &&
-            
"TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) 
{
-          String partName = 
Warehouse.makePartName(this.convertToFieldSchemas(mtbl
-              .getPartitionKeys()), part.getValues());
-          PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet(catName, 
db_name,
-              tbl_name, partName, userName, groupNames);
+        if (getauth) {
+          // set auth privileges
+          String partName = 
Warehouse.makePartName(this.convertToFieldSchemas(mtbl.getPartitionKeys()), 
part.getValues());
+          PrincipalPrivilegeSet partAuth =
+              getPartitionPrivilegeSet(catName, db_name, tbl_name, partName, 
userName, groupNames);
           part.setPrivileges(partAuth);
         }
         partitions.add(part);
       }
       success = commitTransaction();
+    } catch (InvalidObjectException | NoSuchObjectException | MetaException e) 
{
+      throw e;
+    } catch (Exception e) {
+      throw new MetaException(e.getMessage());
     } finally {
       rollbackAndCleanup(success, queryWrapper);
     }

Reply via email to