[
https://issues.apache.org/jira/browse/HIVE-27898?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17819169#comment-17819169
]
Butao Zhang commented on HIVE-27898:
------------------------------------
*I have tested the two case: task and no task, both are fine.*
// set conf in beeline:
set hive.fetch.task.conversion=none; //enforce launching tez task
set hive.explain.user=false;
{code:java}
0: jdbc:hive2://127.0.0.1:10004/default> select * from (select * from
testdb.test_data_02 limit 10) s1
----------------------------------------------------------------------------------------------
VERTICES MODE STATUS TOTAL COMPLETED RUNNING PENDING
FAILED KILLED
----------------------------------------------------------------------------------------------
Map 1 .......... container SUCCEEDED 1 1 0 0
0 0
----------------------------------------------------------------------------------------------
VERTICES: 01/01 [==========================>>] 100% ELAPSED TIME: 21.55 s
----------------------------------------------------------------------------------------------
INFO : Completed executing
command(queryId=hive_20240221181020_b4a5968f-3594-4b1a-bc24-1b968cf6a993); Time
taken: 6.12 seconds
+--------+----------+
| s1.id | s1.name |
+--------+----------+
| 1 | a |
| 2 | b |
+--------+----------+
2 rows selected (6.442 seconds)
0: jdbc:hive2://127.0.0.1:10000/default> explain select * from (select * from
testdb.test_data_02 limit 10) s1;
+----------------------------------------------------+
| Explain |
+----------------------------------------------------+
| STAGE DEPENDENCIES: |
| Stage-1 is a root stage |
| Stage-0 depends on stages: Stage-1 |
| |
| STAGE PLANS: |
| Stage: Stage-1 |
| Tez |
| DagId: hive_20240221181118_d4e74ca2-cc3b-426c-830a-ee32fd3801b4:4 |
| DagName: hive_20240221181118_d4e74ca2-cc3b-426c-830a-ee32fd3801b4:4 |
| Vertices: |
| Map 1 |
| Map Operator Tree: |
| TableScan |
| alias: test_data_02 |
| Statistics: Num rows: 2 Data size: 736 Basic stats:
COMPLETE Column stats: NONE |
| Limit |
| Number of rows: 10 |
| Statistics: Num rows: 2 Data size: 736 Basic stats:
COMPLETE Column stats: NONE |
| Select Operator |
| expressions: id (type: string), name (type: string) |
| outputColumnNames: _col0, _col1 |
| Statistics: Num rows: 2 Data size: 736 Basic stats:
COMPLETE Column stats: NONE |
| File Output Operator |
| compressed: false |
| Statistics: Num rows: 2 Data size: 736 Basic stats:
COMPLETE Column stats: NONE |
| table: |
| input format:
org.apache.hadoop.mapred.SequenceFileInputFormat |
| output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat |
| serde:
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe |
| Execution mode: vectorized |
| |
| Stage: Stage-0 |
| Fetch Operator |
| limit: 10 |
| Processor Tree: |
| ListSink |
| |
+----------------------------------------------------+
37 rows selected (0.294 seconds) {code}
> HIVE4 can't use ICEBERG table in subqueries
> -------------------------------------------
>
> Key: HIVE-27898
> URL: https://issues.apache.org/jira/browse/HIVE-27898
> Project: Hive
> Issue Type: Improvement
> Components: Iceberg integration
> Affects Versions: 4.0.0-beta-1
> Reporter: yongzhi.shao
> Priority: Critical
>
> Currently, we found that when using HIVE4-BETA1 version, if we use ICEBERG
> table in the subquery, we can't get any data in the end.
> I have used HIVE3-TEZ for cross validation and HIVE3 does not have this
> problem when querying ICEBERG.
> {code:java}
> --spark3.4.1+iceberg 1.4.2
> CREATE TABLE datacenter.dwd.b_std_trade (
> uni_order_id STRING,
> data_from BIGINT,
> partner STRING,
> plat_code STRING,
> order_id STRING,
> uni_shop_id STRING,
> uni_id STRING,
> guide_id STRING,
> shop_id STRING,
> plat_account STRING,
> total_fee DOUBLE,
> item_discount_fee DOUBLE,
> trade_discount_fee DOUBLE,
> adjust_fee DOUBLE,
> post_fee DOUBLE,
> discount_rate DOUBLE,
> payment_no_postfee DOUBLE,
> payment DOUBLE,
> pay_time STRING,
> product_num BIGINT,
> order_status STRING,
> is_refund STRING,
> refund_fee DOUBLE,
> insert_time STRING,
> created STRING,
> endtime STRING,
> modified STRING,
> trade_type STRING,
> receiver_name STRING,
> receiver_country STRING,
> receiver_state STRING,
> receiver_city STRING,
> receiver_district STRING,
> receiver_town STRING,
> receiver_address STRING,
> receiver_mobile STRING,
> trade_source STRING,
> delivery_type STRING,
> consign_time STRING,
> orders_num BIGINT,
> is_presale BIGINT,
> presale_status STRING,
> first_fee_paytime STRING,
> last_fee_paytime STRING,
> first_paid_fee DOUBLE,
> tenant STRING,
> tidb_modified STRING,
> step_paid_fee DOUBLE,
> seller_flag STRING,
> is_used_store_card BIGINT,
> store_card_used DOUBLE,
> store_card_basic_used DOUBLE,
> store_card_expand_used DOUBLE,
> order_promotion_num BIGINT,
> item_promotion_num BIGINT,
> buyer_remark STRING,
> seller_remark STRING,
> trade_business_type STRING)
> USING iceberg
> PARTITIONED BY (uni_shop_id, truncate(4, created))
> LOCATION '/iceberg-catalog/warehouse/dwd/b_std_trade'
> TBLPROPERTIES (
> 'current-snapshot-id' = '7217819472703702905',
> 'format' = 'iceberg/orc',
> 'format-version' = '1',
> 'hive.stored-as' = 'iceberg',
> 'read.orc.vectorization.enabled' = 'true',
> 'sort-order' = 'uni_shop_id ASC NULLS FIRST, created ASC NULLS FIRST',
> 'write.distribution-mode' = 'hash',
> 'write.format.default' = 'orc',
> 'write.metadata.delete-after-commit.enabled' = 'true',
> 'write.metadata.previous-versions-max' = '3',
> 'write.orc.bloom.filter.columns' = 'order_id',
> 'write.orc.compression-codec' = 'zstd')
> --hive-iceberg
> CREATE EXTERNAL TABLE iceberg_dwd.b_std_trade
> STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler'
> LOCATION 'hdfs://xxxx/iceberg-catalog/warehouse/dwd/b_std_trade'
> TBLPROPERTIES
> ('iceberg.catalog'='location_based_table','engine.hive.enabled'='true');
> select * from iceberg_dwd.b_std_trade
> where uni_shop_id = 'TEST|11111' limit 10 --10 rows
> select *
> from (
> select * from iceberg_dwd.b_std_trade
> where uni_shop_id = 'TEST|11111' limit 10
> ) t1; --10 rows
> select uni_shop_id
> from (
> select * from iceberg_dwd.b_std_trade
> where uni_shop_id = 'TEST|11111' limit 10
> ) t1; --0 rows
> select uni_shop_id
> from (
> select uni_shop_id as uni_shop_id from iceberg_dwd.b_std_trade
> where uni_shop_id = 'TEST|11111' limit 10
> ) t1; --0 rows
> --hive-orc
> select uni_shop_id
> from (
> select * from iceberg_dwd.trade_test
> where uni_shop_id = 'TEST|11111' limit 10
> ) t1; --10 ROWS{code}
>
--
This message was sent by Atlassian Jira
(v8.20.10#820010)