This is an automated email from the ASF dual-hosted git repository.

prasanthj pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new a354bed  Revert "HIVE-19875: increase LLAP IO queue size for perf 
(Prasanth Jayachandran reviewed by Sergey Shelukhin)"
a354bed is described below

commit a354beddd6463fad2fcd4fe22643f65c1f1ef50f
Author: Prasanth Jayachandran <prasan...@apache.org>
AuthorDate: Thu Apr 4 11:36:03 2019 -0700

    Revert "HIVE-19875: increase LLAP IO queue size for perf (Prasanth 
Jayachandran reviewed by Sergey Shelukhin)"
    
    This reverts commit 5909c6efec69b0c27fe1681a15186e58777fce21.
---
 .../java/org/apache/hadoop/hive/conf/HiveConf.java |  2 +-
 .../hive/llap/io/api/impl/LlapRecordReader.java    | 25 ++++------------------
 2 files changed, 5 insertions(+), 22 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 7aaa3a9..b81c47d 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -3848,7 +3848,7 @@ public class HiveConf extends Configuration {
         "MR LineRecordRedader into LLAP cache, if this feature is enabled. 
Safety flag."),
     LLAP_ORC_ENABLE_TIME_COUNTERS("hive.llap.io.orc.time.counters", true,
         "Whether to enable time counters for LLAP IO layer (time spent in 
HDFS, etc.)"),
-    LLAP_IO_VRB_QUEUE_LIMIT_BASE("hive.llap.io.vrb.queue.limit.base", 50000,
+    LLAP_IO_VRB_QUEUE_LIMIT_BASE("hive.llap.io.vrb.queue.limit.base", 10000,
         "The default queue size for VRBs produced by a LLAP IO thread when the 
processing is\n" +
         "slower than the IO. The actual queue size is set per fragment, and is 
adjusted down\n" +
         "from the base, depending on the schema."),
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
index f83fffe..6897336 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
@@ -58,7 +58,6 @@ import org.apache.hadoop.hive.ql.plan.BaseWork;
 import org.apache.hadoop.hive.ql.plan.MapWork;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
-import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.io.NullWritable;
@@ -164,9 +163,7 @@ class LlapRecordReader
 
     int queueLimitBase = getQueueVar(ConfVars.LLAP_IO_VRB_QUEUE_LIMIT_BASE, 
job, daemonConf);
     int queueLimitMin =  getQueueVar(ConfVars.LLAP_IO_VRB_QUEUE_LIMIT_MIN, 
job, daemonConf);
-    final boolean decimal64Support = HiveConf.getVar(job, 
ConfVars.HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED)
-      .equalsIgnoreCase("decimal_64");
-    int limit = determineQueueLimit(queueLimitBase, queueLimitMin, 
rbCtx.getRowColumnTypeInfos(), decimal64Support);
+    int limit = determineQueueLimit(queueLimitBase, queueLimitMin, 
rbCtx.getRowColumnTypeInfos());
     LOG.info("Queue limit for LlapRecordReader is " + limit);
     this.queue = new LinkedBlockingQueue<>(limit);
 
@@ -202,14 +199,14 @@ class LlapRecordReader
   private static final int COL_WEIGHT_COMPLEX = 16, COL_WEIGHT_HIVEDECIMAL = 4,
       COL_WEIGHT_STRING = 8;
   private static int determineQueueLimit(
-    int queueLimitBase, int queueLimitMin, TypeInfo[] typeInfos, final boolean 
decimal64Support) {
+      int queueLimitBase, int queueLimitMin, TypeInfo[] typeInfos) {
     // If the values are equal, the queue limit is fixed.
     if (queueLimitBase == queueLimitMin) return queueLimitBase;
     // If there are no columns (projection only join?) just assume no weight.
     if (typeInfos == null || typeInfos.length == 0) return queueLimitBase;
     double totalWeight = 0;
     for (TypeInfo ti : typeInfos) {
-      int colWeight;
+      int colWeight = 1;
       if (ti.getCategory() != Category.PRIMITIVE) {
         colWeight = COL_WEIGHT_COMPLEX;
       } else {
@@ -220,22 +217,8 @@ class LlapRecordReader
         case VARCHAR:
         case STRING:
           colWeight = COL_WEIGHT_STRING;
-          break;
         case DECIMAL:
-          boolean useDecimal64 = false;
-          if (ti instanceof DecimalTypeInfo) {
-            DecimalTypeInfo dti = (DecimalTypeInfo) ti;
-            if (dti.getPrecision() <= TypeDescription.MAX_DECIMAL64_PRECISION 
&& decimal64Support) {
-              useDecimal64 = true;
-            }
-          }
-          // decimal_64 column vectors gets the same weight as long column 
vectors
-          if (useDecimal64) {
-            colWeight = 1;
-          } else {
-            colWeight = COL_WEIGHT_HIVEDECIMAL;
-          }
-          break;
+          colWeight = COL_WEIGHT_HIVEDECIMAL;
         default:
           colWeight = 1;
         }

Reply via email to