This is an automated email from the ASF dual-hosted git repository.

ngangam pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 3c53faa  HIVE-25178 [addendum]: Enable 
hive.load.dynamic.partitions.scan.specific.partitions by default
3c53faa is described below

commit 3c53faafe230eb7b611d3dfb5057a81bfd1e96d1
Author: Sourabh Goyal <[email protected]>
AuthorDate: Thu Feb 17 10:49:24 2022 -0800

    HIVE-25178 [addendum]: Enable 
hive.load.dynamic.partitions.scan.specific.partitions by default
    
    Enabling the property hive.load.dynamic.partitions.scan.specific.partitions 
othewise a
    user sees significant performance degradation in insert/insert overwrite 
queries.
---
 common/src/java/org/apache/hadoop/hive/conf/HiveConf.java | 2 +-
 ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java  | 7 ++++---
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 28f309c..8879da4 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -4398,7 +4398,7 @@ public class HiveConf extends Configuration {
     
HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT("hive.load.dynamic.partitions.thread",
 15,
         new  SizeValidator(1L, true, 1024L, true),
         "Number of threads used to load dynamic partitions."),
-    
HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS("hive.load.dynamic.partitions.scan.specific.partitions",
 false,
+    
HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS("hive.load.dynamic.partitions.scan.specific.partitions",
 true,
         "For the dynamic partitioned tables, scan only the specific partitions 
using the name from the list"),
     // If this is set all move tasks at the end of a multi-insert query will 
only begin once all
     // outputs are ready
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 80541fe..b504299 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -31,6 +31,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import static org.apache.hadoop.hive.common.AcidConstants.SOFT_DELETE_TABLE;
 
 import static 
org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW;
+import static 
org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS;
 import static 
org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE;
 import static 
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
 import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
@@ -3070,8 +3071,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
     List<Callable<Partition>> tasks = Lists.newLinkedList();
 
     boolean fetchPartitionInfo = true;
-    final boolean scanPartitionsByName = conf.getBoolean(
-        
ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS.varname, false);
+    final boolean scanPartitionsByName =
+        HiveConf.getBoolVar(conf, 
HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS);
 
     // ACID table can be a bigger change. Filed HIVE-25817 for an appropriate 
fix for ACID tables
     // For now, for ACID tables, skip getting all partitions for a table from 
HMS (since that
@@ -3087,7 +3088,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       }
       List<Partition> partitions = Hive.get().getPartitionsByNames(tbl, 
partitionNames);
       for(Partition partition : partitions) {
-        LOG.info("HMS partition spec: {}", partition.getSpec());
+        LOG.debug("HMS partition spec: {}", partition.getSpec());
         partitionDetailsMap.entrySet().parallelStream()
             .filter(entry -> 
entry.getValue().fullSpec.equals(partition.getSpec()))
             .findAny().ifPresent(entry -> {

Reply via email to