[ 
https://issues.apache.org/jira/browse/HIVE-10598?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14694789#comment-14694789
 ] 

Matt McCline commented on HIVE-10598:
-------------------------------------


Looked a bunch (TestCliDriver) failures.

Fair number of "Execution mode: vectorized" which is expected because patch 
turns on vectorization.

Command Failures:
{code}
bucketsortoptimize_insert_2
•       INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, 
concat(a.value, b.value)  FROM test_table1 a JOIN test_table2 b  ON a.key = 
b.key WHERE a.ds is not null and b.ds = '1'
skewjoin
•       FROM src src1 JOIN src src2 ON (src1.key = src2.key) INSERT OVERWRITE 
TABLE dest_j1 SELECT src1.key, src2.value
windowing_decimal
•       select p_mfgr, p_retailprice,  first_value(p_retailprice) 
over(partition by p_mfgr order by p_retailprice) , sum(p_retailprice) 
over(partition by p_mfgr order by p_retailprice) from part_dec
list_bucket_dml_2
•       select count(1) from list_bucketing_static_part where key < '51'
parquet_partitioned
•       SELECT part, COUNT(0) FROM parquet_partitioned GROUP BY part
windowing_streaming
•       select *  from ( select p_mfgr, rank() over(partition by p_mfgr order 
by p_name) r from part) a  where r < 4 
windowing_udaf2
•       select sum(key) over (), mysum(key) over () from src limit 1
cbo_rp_subq_in
•       -- non agg, non corr, windowing select p_mfgr, p_name, avg(p_size)  
from part  group by p_mfgr, p_name having p_name in    (select 
first_value(p_name) over(partition by p_mfgr order by p_size) from part) order 
by p_mfgr
cbo_subq_in
•       -- non agg, non corr, windowing select p_mfgr, p_name, avg(p_size)  
from part  group by p_mfgr, p_name having p_name in    (select 
first_value(p_name) over(partition by p_mfgr order by p_size) from part) order 
by p_mfgr
rcfile_lazydecompress
•       SELECT key, count(1) FROM rcfileTableLazyDecompress where key > 238 
group by key ORDER BY key ASC
windowing_multipartitioning
•       select s, rank() over (partition by s order by si), sum(b) over 
(partition by s order by si) from over10k limit 100
windowing_rank
•       select s, rank() over (partition by f order by t) from over10k limit 100
avro_date
•       SELECT d, COUNT(d) FROM avro_date GROUP BY d
auto_sortmerge_join_1
•       select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
mapjoin1
•       SELECT /*+ MAPJOIN(a) */ * FROM src a RIGHT OUTER JOIN src b on 
a.key=b.key AND true limit 10
cbo_windowing
•       select count(c_int) over() from cbo_t1
parquet_types
•       SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar) FROM 
parquet_types
auto_sortmerge_join_9
•       select count(*) from (   select a.key as key, a.value as val1, b.value 
as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1
ptf_rcfile
cp_mj_rc
•       SELECT /*+ MAPJOIN(six) */ six.*, two.k1 from src_six_columns six join 
src_two_columns two on (six.k3=two.k1)
auto_sortmerge_join_3
•       select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
{code}

Wrong Results:
{code}
•       orc_empty_strings
•       smb_mapjoin_2
•       partition_wise_fileformat7
•       vectorization_2
•       timestamp_3
•       partition_multilevels
•       cbo_views
•       vectorized_parquet
•       udtf_json_tuple
•       bucketcontext_3
•       date_3
•       bucketcontext_1
•       auto_sortmerge_join_13
•       truncate_column
•       vector_decimal_1
•       bucketcontext_2
•       ppd_union_view
•       str_to_map
•       bucketsortoptimize_insert_6
•       groupby_sort_10
•       bucketcontext_5
•       date_1
•       bucketcontext_4
•       cbo_rp_views
•       smb_mapjoin_11
•       bucketcontext_8
•       vector_binary_join_groupby
•       bucketcontext_7
{code}

> Vectorization borks when column is added to table.
> --------------------------------------------------
>
>                 Key: HIVE-10598
>                 URL: https://issues.apache.org/jira/browse/HIVE-10598
>             Project: Hive
>          Issue Type: Bug
>          Components: Vectorization
>            Reporter: Mithun Radhakrishnan
>            Assignee: Matt McCline
>         Attachments: HIVE-10598.01.patch, HIVE-10598.02.patch
>
>
> Consider the following table definition:
> {code:sql}
> create table foobar ( foo string, bar string ) partitioned by (dt string) 
> stored as orc;
> alter table foobar add partition( dt='20150101' ) ;
> {code}
> Say the partition has the following data:
> {noformat}
> 1     one     20150101
> 2     two     20150101
> 3     three   20150101
> {noformat}
> If a new column is added to the table-schema (and the partition continues to 
> have the old schema), vectorized read from the old partitions fail thus:
> {code:sql}
> alter table foobar add columns( goo string );
> select count(1) from foobar;
> {code}
> {code:title=stacktrace}
> java.lang.Exception: java.lang.RuntimeException: Error creating a batch
>       at 
> org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:462)
>       at 
> org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:522)
> Caused by: java.lang.RuntimeException: Error creating a batch
>       at 
> org.apache.hadoop.hive.ql.io.orc.VectorizedOrcInputFormat$VectorizedOrcRecordReader.createValue(VectorizedOrcInputFormat.java:114)
>       at 
> org.apache.hadoop.hive.ql.io.orc.VectorizedOrcInputFormat$VectorizedOrcRecordReader.createValue(VectorizedOrcInputFormat.java:52)
>       at 
> org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.createValue(CombineHiveRecordReader.java:84)
>       at 
> org.apache.hadoop.hive.ql.io.CombineHiveRecordReader.createValue(CombineHiveRecordReader.java:42)
>       at 
> org.apache.hadoop.hive.shims.HadoopShimsSecure$CombineFileRecordReader.createValue(HadoopShimsSecure.java:156)
>       at 
> org.apache.hadoop.mapred.MapTask$TrackedRecordReader.createValue(MapTask.java:180)
>       at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:50)
>       at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:450)
>       at org.apache.hadoop.mapred.MapTask.run(MapTask.java:343)
>       at 
> org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:243)
>       at 
> java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
>       at java.util.concurrent.FutureTask.run(FutureTask.java:262)
>       at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>       at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>       at java.lang.Thread.run(Thread.java:744)
> Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: No type entry 
> found for column 3 in map {4=Long}
>       at 
> org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx.addScratchColumnsToBatch(VectorizedRowBatchCtx.java:632)
>       at 
> org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx.createVectorizedRowBatch(VectorizedRowBatchCtx.java:343)
>       at 
> org.apache.hadoop.hive.ql.io.orc.VectorizedOrcInputFormat$VectorizedOrcRecordReader.createValue(VectorizedOrcInputFormat.java:112)
>       ... 14 more
> {code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to