[
https://issues.apache.org/jira/browse/HIVE-23688?focusedWorklogId=636829&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-636829
]
ASF GitHub Bot logged work on HIVE-23688:
-----------------------------------------
Author: ASF GitHub Bot
Created on: 11/Aug/21 09:21
Start Date: 11/Aug/21 09:21
Worklog Time Spent: 10m
Work Description: maheshk114 commented on a change in pull request #2479:
URL: https://github.com/apache/hive/pull/2479#discussion_r686623069
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java
##########
@@ -129,21 +131,16 @@ private boolean
fetchNextValue(PrimitiveObjectInspector.PrimitiveCategory catego
private void addElement(ListColumnVector lcv, List<Object> elements,
PrimitiveObjectInspector.PrimitiveCategory category, int index) throws
IOException {
lcv.offsets[index] = elements.size();
- // Return directly if last value is null
- if (definitionLevel < maxDefLevel) {
- lcv.isNull[index] = true;
- lcv.lengths[index] = 0;
- // fetch the data from parquet data page for next call
- fetchNextValue(category);
- return;
- }
-
do {
// add all data for an element in ListColumnVector, get out the loop if
there is no data or the data is for new element
+ if (definitionLevel < maxDefLevel) {
+ lcv.lengths[index] = 0;
+ lcv.isNull[index] = true;
+ lcv.noNulls = false;
+ }
elements.add(lastValue);
} while (fetchNextValue(category) && (repetitionLevel != 0));
- lcv.isNull[index] = false;
lcv.lengths[index] = elements.size() - lcv.offsets[index];
Review comment:
lcv.lengths[index] is over written ..in the loop for some condition its
set to 0
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java
##########
@@ -129,21 +131,16 @@ private boolean
fetchNextValue(PrimitiveObjectInspector.PrimitiveCategory catego
private void addElement(ListColumnVector lcv, List<Object> elements,
PrimitiveObjectInspector.PrimitiveCategory category, int index) throws
IOException {
lcv.offsets[index] = elements.size();
- // Return directly if last value is null
- if (definitionLevel < maxDefLevel) {
- lcv.isNull[index] = true;
- lcv.lengths[index] = 0;
- // fetch the data from parquet data page for next call
- fetchNextValue(category);
- return;
- }
-
do {
// add all data for an element in ListColumnVector, get out the loop if
there is no data or the data is for new element
+ if (definitionLevel < maxDefLevel) {
+ lcv.lengths[index] = 0;
+ lcv.isNull[index] = true;
Review comment:
why this has to be done in a loop ?
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java
##########
@@ -193,59 +190,59 @@ private List
decodeDictionaryIds(PrimitiveObjectInspector.PrimitiveCategory cate
case SHORT:
resultList = new ArrayList<Integer>(total);
for (int i = 0; i < total; ++i) {
- resultList.add(dictionary.readInteger(intList.get(i)));
+ resultList.add(intList.get(i) == null ? null :
dictionary.readInteger(intList.get(i)));
}
break;
case DATE:
case INTERVAL_YEAR_MONTH:
case LONG:
resultList = new ArrayList<Long>(total);
for (int i = 0; i < total; ++i) {
- resultList.add(dictionary.readLong(intList.get(i)));
+ resultList.add(intList.get(i) == null ? null :
dictionary.readLong(intList.get(i)));
}
break;
case BOOLEAN:
resultList = new ArrayList<Long>(total);
for (int i = 0; i < total; ++i) {
- resultList.add(dictionary.readBoolean(intList.get(i)) ? 1 : 0);
+ resultList.add(intList.get(i) == null ? null :
dictionary.readBoolean(intList.get(i)));
Review comment:
instead of 0 or 1 ..value returned by readBoolean is used
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java
##########
@@ -129,21 +131,16 @@ private boolean
fetchNextValue(PrimitiveObjectInspector.PrimitiveCategory catego
private void addElement(ListColumnVector lcv, List<Object> elements,
PrimitiveObjectInspector.PrimitiveCategory category, int index) throws
IOException {
lcv.offsets[index] = elements.size();
- // Return directly if last value is null
- if (definitionLevel < maxDefLevel) {
- lcv.isNull[index] = true;
- lcv.lengths[index] = 0;
- // fetch the data from parquet data page for next call
- fetchNextValue(category);
- return;
- }
-
do {
// add all data for an element in ListColumnVector, get out the loop if
there is no data or the data is for new element
+ if (definitionLevel < maxDefLevel) {
Review comment:
in fetchNextvalue ..if (definitionLevel != maxDefLevel) { ..then its
considered null..but in this function its >
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java
##########
@@ -479,6 +501,9 @@ private boolean compareBytesColumnVector(BytesColumnVector
cv1, BytesColumnVecto
int length2 = cv2.vector.length;
if (length1 == length2) {
for (int i = 0; i < length1; i++) {
+ if (cv1.vector[i] == null && cv2.vector[i] == null) {
+ continue;
Review comment:
why not check the length first and return false.
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java
##########
@@ -129,21 +131,16 @@ private boolean
fetchNextValue(PrimitiveObjectInspector.PrimitiveCategory catego
private void addElement(ListColumnVector lcv, List<Object> elements,
PrimitiveObjectInspector.PrimitiveCategory category, int index) throws
IOException {
lcv.offsets[index] = elements.size();
- // Return directly if last value is null
Review comment:
i think make it simple ..addElement should add element .. fetchNextValue
can be done by the caller in the loop.
##########
File path: ql/src/test/queries/clientpositive/parquet_map_null_vectorization.q
##########
@@ -0,0 +1,20 @@
+set hive.mapred.mode=nonstrict;
+set hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=none;
+
+DROP TABLE parquet_map_type;
+
+
+CREATE TABLE parquet_map_type (
+id int,
+stringMap map<string, string>
Review comment:
create a table with all datatypes and check for null. You may refer
update_all_types.q.
##########
File path:
ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java
##########
@@ -323,7 +341,11 @@ private void
fillColumnVector(PrimitiveObjectInspector.PrimitiveCategory categor
int scale = logicalType.getScale();
lcv.child = new DecimalColumnVector(total, precision, scale);
for (int i = 0; i < valueList.size(); i++) {
- ((DecimalColumnVector) lcv.child).vector[i].set(((List<byte[]>)
valueList).get(i), scale);
+ if (valueList.get(i) == null) {
+ lcv.child.isNull[i] = true;
Review comment:
now that value is not set to null ..compareDecimalColumnVector has to be
modified to check fo isNull ..same for other types also
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
Issue Time Tracking
-------------------
Worklog Id: (was: 636829)
Time Spent: 1.5h (was: 1h 20m)
> Vectorization: IndexArrayOutOfBoundsException For map type column which
> includes null value
> -------------------------------------------------------------------------------------------
>
> Key: HIVE-23688
> URL: https://issues.apache.org/jira/browse/HIVE-23688
> Project: Hive
> Issue Type: Bug
> Components: Parquet, storage-api, Vectorization
> Affects Versions: All Versions
> Reporter: 范宜臻
> Assignee: László Bodor
> Priority: Critical
> Labels: pull-request-available
> Fix For: 3.0.0, 4.0.0
>
> Attachments: HIVE-23688.patch
>
> Time Spent: 1.5h
> Remaining Estimate: 0h
>
> {color:#de350b}start{color} and {color:#de350b}length{color} are empty arrays
> in MapColumnVector.values(BytesColumnVector) when values in map contain
> {color:#de350b}null{color}
> reproduce in master branch:
> {code:java}
> set hive.vectorized.execution.enabled=true;
> CREATE TABLE parquet_map_type (id int,stringMap map<string, string>)
> stored as parquet;
> insert overwrite table parquet_map_typeSELECT 1, MAP('k1', null, 'k2',
> 'bar');
> select id, stringMap['k1'] from parquet_map_type group by 1,2;
> {code}
> query explain:
> {code:java}
> Stage-0
> Fetch Operator
> limit:-1
> Stage-1
> Reducer 2 vectorized
> File Output Operator [FS_12]
> Group By Operator [GBY_11] (rows=5 width=2)
> Output:["_col0","_col1"],keys:KEY._col0, KEY._col1
> <-Map 1 [SIMPLE_EDGE] vectorized
> SHUFFLE [RS_10]
> PartitionCols:_col0, _col1
> Group By Operator [GBY_9] (rows=10 width=2)
> Output:["_col0","_col1"],keys:_col0, _col1
> Select Operator [SEL_8] (rows=10 width=2)
> Output:["_col0","_col1"]
> TableScan [TS_0] (rows=10 width=2)
>
> temp@parquet_map_type_fyz,parquet_map_type_fyz,Tbl:COMPLETE,Col:NONE,Output:["id","stringmap"]
> {code}
> runtime error:
> {code:java}
> Vertex failed, vertexName=Map 1, vertexId=vertex_1592040015150_0001_3_00,
> diagnostics=[Task failed, taskId=task_1592040015150_0001_3_00_000000,
> diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task (
> failure ) :
> attempt_1592040015150_0001_3_00_000000_0:java.lang.RuntimeException:
> java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException:
> Hive Runtime Error while processing row
> at
> org.apache.hadoop.hive.ql.exec.tez.TezProcessor.initializeAndRunProcessor(TezProcessor.java:296)
> at
> org.apache.hadoop.hive.ql.exec.tez.TezProcessor.run(TezProcessor.java:250)
> at
> org.apache.tez.runtime.LogicalIOProcessorRuntimeTask.run(LogicalIOProcessorRuntimeTask.java:374)
> at
> org.apache.tez.runtime.task.TaskRunner2Callable$1.run(TaskRunner2Callable.java:73)
> at
> org.apache.tez.runtime.task.TaskRunner2Callable$1.run(TaskRunner2Callable.java:61)
> at java.security.AccessController.doPrivileged(Native Method)
> at javax.security.auth.Subject.doAs(Subject.java:422)
> at
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1682)
> at
> org.apache.tez.runtime.task.TaskRunner2Callable.callInternal(TaskRunner2Callable.java:61)
> at
> org.apache.tez.runtime.task.TaskRunner2Callable.callInternal(TaskRunner2Callable.java:37)
> at org.apache.tez.common.CallableWithNdc.call(CallableWithNdc.java:36)
> at
> com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:108)
> at
> com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:41)
> at
> com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:77)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
> at java.lang.Thread.run(Thread.java:748)
> Caused by: java.lang.RuntimeException:
> org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while
> processing row
> at
> org.apache.hadoop.hive.ql.exec.tez.MapRecordSource.processRow(MapRecordSource.java:101)
> at
> org.apache.hadoop.hive.ql.exec.tez.MapRecordSource.pushRecord(MapRecordSource.java:76)
> at
> org.apache.hadoop.hive.ql.exec.tez.MapRecordProcessor.run(MapRecordProcessor.java:403)
> at
> org.apache.hadoop.hive.ql.exec.tez.TezProcessor.initializeAndRunProcessor(TezProcessor.java:267)
> ... 16 more
> Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime
> Error while processing row
> at
> org.apache.hadoop.hive.ql.exec.vector.VectorMapOperator.process(VectorMapOperator.java:970)
> at
> org.apache.hadoop.hive.ql.exec.tez.MapRecordSource.processRow(MapRecordSource.java:92)
> ... 19 more
> Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Error evaluating
> id
> at
> org.apache.hadoop.hive.ql.exec.vector.VectorSelectOperator.process(VectorSelectOperator.java:149)
> at
> org.apache.hadoop.hive.ql.exec.Operator.vectorForward(Operator.java:918)
> at
> org.apache.hadoop.hive.ql.exec.TableScanOperator.process(TableScanOperator.java:172)
> at
> org.apache.hadoop.hive.ql.exec.vector.VectorMapOperator.deliverVectorizedRowBatch(VectorMapOperator.java:809)
> at
> org.apache.hadoop.hive.ql.exec.vector.VectorMapOperator.process(VectorMapOperator.java:842)
> ... 20 more
> Caused by: java.lang.ArrayIndexOutOfBoundsException: 0
> at
> org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector.setElement(BytesColumnVector.java:506)
> at
> org.apache.hadoop.hive.ql.exec.vector.expressions.VectorUDFMapIndexBaseScalar.evaluate(VectorUDFMapIndexBaseScalar.java:83)
> at
> org.apache.hadoop.hive.ql.exec.vector.VectorSelectOperator.process(VectorSelectOperator.java:146)
> ... 24 more
> {code}
--
This message was sent by Atlassian Jira
(v8.3.4#803005)