This is an automated email from the ASF dual-hosted git repository.
dengzh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 82e2d617d45 HIVE-28346: Make ALTER CHANGE COLUMN more efficient with
many partitions (#6126)
82e2d617d45 is described below
commit 82e2d617d45791a3c6031e82f679965e36729007
Author: dengzh <[email protected]>
AuthorDate: Tue Oct 21 09:09:13 2025 +0800
HIVE-28346: Make ALTER CHANGE COLUMN more efficient with many partitions
(#6126)
---
.../llap/schema_evol_orc_nonvec_part.q.out | 12 +-
...schema_evol_orc_nonvec_part_all_primitive.q.out | 18 +-
...vol_orc_nonvec_part_all_primitive_llap_io.q.out | 18 +-
.../llap/schema_evol_orc_nonvec_part_llap_io.q.out | 12 +-
.../llap/schema_evol_orc_nonvec_table.q.out | 6 +-
.../schema_evol_orc_nonvec_table_llap_io.q.out | 6 +-
.../llap/schema_evol_orc_vec_part.q.out | 12 +-
.../schema_evol_orc_vec_part_all_primitive.q.out | 18 +-
...a_evol_orc_vec_part_all_primitive_llap_io.q.out | 18 +-
.../llap/schema_evol_orc_vec_table.q.out | 6 +-
.../llap/schema_evol_orc_vec_table_llap_io.q.out | 6 +-
.../llap/schema_evol_text_nonvec_part.q.out | 12 +-
...chema_evol_text_nonvec_part_all_primitive.q.out | 18 +-
...ol_text_nonvec_part_all_primitive_llap_io.q.out | 18 +-
.../schema_evol_text_nonvec_part_llap_io.q.out | 12 +-
.../llap/schema_evol_text_nonvec_table.q.out | 6 +-
.../schema_evol_text_nonvec_table_llap_io.q.out | 6 +-
.../llap/schema_evol_text_vec_part.q.out | 12 +-
..._evol_text_vec_part_all_primitive_llap_io.q.out | 18 +-
.../llap/schema_evol_text_vec_table.q.out | 6 +-
.../llap/schema_evol_text_vec_table_llap_io.q.out | 6 +-
.../llap/schema_evol_text_vecrow_part.q.out | 12 +-
...chema_evol_text_vecrow_part_all_primitive.q.out | 18 +-
...ol_text_vecrow_part_all_primitive_llap_io.q.out | 18 +-
.../schema_evol_text_vecrow_part_llap_io.q.out | 12 +-
.../llap/schema_evol_text_vecrow_table.q.out | 6 +-
.../schema_evol_text_vecrow_table_llap_io.q.out | 6 +-
.../hadoop/hive/metastore/HiveAlterHandler.java | 224 +++++----------------
.../hadoop/hive/metastore/cache/CachedStore.java | 48 ++---
.../hive/metastore/utils/MetaStoreServerUtils.java | 17 +-
.../hive/metastore/TestHiveAlterHandler.java | 29 +--
.../metastore/utils/TestMetaStoreServerUtils.java | 14 +-
32 files changed, 251 insertions(+), 399 deletions(-)
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part.q.out
index c8ff2c06115..25e20ec8587 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part.q.out
@@ -953,14 +953,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_numeric_group_string_group_floating_string_group_n7
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5
(type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)),
c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12
(type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type:
varchar(7)), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1488,14 +1488,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_lower_to_higher_numeric_group_decimal_to_float_n7
- Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: float), c2 (type: double), c3 (type: double), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_primitive.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_primitive.q.out
index 42abb6bd385..e57fa6b8e5d 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_primitive.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_primitive.q.out
@@ -272,14 +272,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_boolean_to_bigint_n6
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: boolean), c2 (type: boolean), c3 (type: boolean), c4 (type: boolean), c5
(type: boolean), c6 (type: boolean), c7 (type: boolean), c8 (type: boolean), c9
(type: boolean), c10 (type: tinyint), c11 (type: tinyint), c12 (type: tinyint),
c13 (type: tinyint), c14 (type: tinyint), c15 (type: tinyint), c16 (type:
tinyint), c17 (type: tinyint), c18 (type: tinyint), c19 (type: tinyint), c20
(type: tinyint), c21 (t [...]
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24,
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34,
_col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44,
_col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54,
_col55
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -505,14 +505,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_decimal_to_double_n6
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: decimal(38,18)), c2 (type: decimal(38,18)), c3 (type: decimal(38,18)),
c4 (type: decimal(38,18)), c5 (type: decimal(38,18)), c6 (type:
decimal(38,18)), c7 (type: decimal(38,18)), c8 (type: decimal(38,18)), c9
(type: decimal(38,18)), c10 (type: decimal(38,18)), c11 (type: decimal(38,18)),
c12 (type: float), c13 (type: float), c14 (type: float), c15 (type: float), c16
(type: float), c17 (type: float), c18 [...]
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24,
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34,
_col35
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -660,14 +660,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_timestamp_n6
- Statistics: Num rows: 6 Data size: 840 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: timestamp), c2 (type: timestamp), c3 (type: timestamp), c4 (type:
timestamp), c5 (type: timestamp), c6 (type: timestamp), c7 (type: timestamp),
c8 (type: timestamp), c9 (type: timestamp), c10 (type: timestamp), c11 (type:
timestamp), c12 (type: timestamp), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_primitive_llap_io.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_primitive_llap_io.q.out
index 2620d7d5dd8..824141e82c3 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_primitive_llap_io.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_primitive_llap_io.q.out
@@ -272,14 +272,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_boolean_to_bigint_n5
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: boolean), c2 (type: boolean), c3 (type: boolean), c4 (type: boolean), c5
(type: boolean), c6 (type: boolean), c7 (type: boolean), c8 (type: boolean), c9
(type: boolean), c10 (type: tinyint), c11 (type: tinyint), c12 (type: tinyint),
c13 (type: tinyint), c14 (type: tinyint), c15 (type: tinyint), c16 (type:
tinyint), c17 (type: tinyint), c18 (type: tinyint), c19 (type: tinyint), c20
(type: tinyint), c21 (t [...]
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24,
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34,
_col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44,
_col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54,
_col55
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -506,14 +506,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_decimal_to_double_n5
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: decimal(38,18)), c2 (type: decimal(38,18)), c3 (type: decimal(38,18)),
c4 (type: decimal(38,18)), c5 (type: decimal(38,18)), c6 (type:
decimal(38,18)), c7 (type: decimal(38,18)), c8 (type: decimal(38,18)), c9
(type: decimal(38,18)), c10 (type: decimal(38,18)), c11 (type: decimal(38,18)),
c12 (type: float), c13 (type: float), c14 (type: float), c15 (type: float), c16
(type: float), c17 (type: float), c18 [...]
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24,
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34,
_col35
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -662,14 +662,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_timestamp_n5
- Statistics: Num rows: 6 Data size: 840 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: timestamp), c2 (type: timestamp), c3 (type: timestamp), c4 (type:
timestamp), c5 (type: timestamp), c6 (type: timestamp), c7 (type: timestamp),
c8 (type: timestamp), c9 (type: timestamp), c10 (type: timestamp), c11 (type:
timestamp), c12 (type: timestamp), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_llap_io.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_llap_io.q.out
index b69594b616e..3001eb7d1da 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_llap_io.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_llap_io.q.out
@@ -958,14 +958,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_numeric_group_string_group_floating_string_group_n2
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5
(type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)),
c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12
(type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type:
varchar(7)), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1496,14 +1496,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_lower_to_higher_numeric_group_decimal_to_float_n2
- Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: float), c2 (type: double), c3 (type: double), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_table.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_table.q.out
index c0f920fe096..a2c1ad6d5f4 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_table.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_table.q.out
@@ -904,14 +904,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
table_change_numeric_group_string_group_floating_string_group_n10
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), c1 (type: string), c2
(type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6
(type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)),
c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13
(type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type:
string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_table_llap_io.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_table_llap_io.q.out
index 3fde0f0dce1..a3e4eb00303 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_table_llap_io.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_table_llap_io.q.out
@@ -908,14 +908,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
table_change_numeric_group_string_group_floating_string_group_n7
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), c1 (type: string), c2
(type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6
(type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)),
c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13
(type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type:
string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out
index eb3f6d08cb2..02df5deb9d0 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out
@@ -1083,7 +1083,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_numeric_group_string_group_floating_string_group_n4
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:string, 2:c2:string, 3:c3:string, 4:c4:char(50), 5:c5:char(50),
6:c6:char(50), 7:c7:char(7), 8:c8:char(7), 9:c9:char(7), 10:c10:varchar(50),
11:c11:varchar(50), 12:c12:varchar(50), 13:c13:varchar(7), 14:c14:varchar(7),
15:c15:varchar(7), 16:b:string, 17:part:int,
18:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
19:ROW__IS__DELETED:boolean]
@@ -1094,13 +1094,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 17, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1696,7 +1696,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_lower_to_higher_numeric_group_decimal_to_float_n4
- Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:float, 2:c2:double, 3:c3:double, 4:b:string, 5:part:int,
6:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
7:ROW__IS__DELETED:boolean]
@@ -1707,13 +1707,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 5, 1, 2, 3, 4]
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
index f6dde0e087f..8e097343a8f 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
@@ -272,7 +272,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_boolean_to_bigint_n0
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:boolean, 2:c2:boolean, 3:c3:boolean, 4:c4:boolean, 5:c5:boolean,
6:c6:boolean, 7:c7:boolean, 8:c8:boolean, 9:c9:boolean, 10:c10:tinyint,
11:c11:tinyint, 12:c12:tinyint, 13:c13:tinyint, 14:c14:tinyint, 15:c15:tinyint,
16:c16:tinyint, 17:c17:tinyint, 18:c18:tinyint, 19:c19:tinyint, 20:c20:tinyint,
21:c21:smallint, 22:c22:smallint, 23:c23:smallint, 24:c24:smallint,
25:c25:smallint, 26:c26:smallint, 27:c27:smallint, 28 [...]
@@ -283,13 +283,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 55, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 49, 50, 51, 52, 53, 54]
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -531,7 +531,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_decimal_to_double_n0
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:decimal(38,18), 2:c2:decimal(38,18), 3:c3:decimal(38,18),
4:c4:decimal(38,18), 5:c5:decimal(38,18), 6:c6:decimal(38,18),
7:c7:decimal(38,18), 8:c8:decimal(38,18), 9:c9:decimal(38,18),
10:c10:decimal(38,18), 11:c11:decimal(38,18), 12:c12:float, 13:c13:float,
14:c14:float, 15:c15:float, 16:c16:float, 17:c17:float, 18:c18:float,
19:c19:float, 20:c20:float, 21:c21:float, 22:c22:float, 23:c23:double,
24:c24:double, 25:c [...]
@@ -542,13 +542,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 35, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34]
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -712,7 +712,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_timestamp_n0
- Statistics: Num rows: 6 Data size: 840 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:timestamp, 2:c2:timestamp, 3:c3:timestamp, 4:c4:timestamp, 5:c5:timestamp,
6:c6:timestamp, 7:c7:timestamp, 8:c8:timestamp, 9:c9:timestamp,
10:c10:timestamp, 11:c11:timestamp, 12:c12:timestamp, 13:b:string, 14:part:int,
15:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
16:ROW__IS__DELETED:boolean]
@@ -723,13 +723,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 14, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13]
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive_llap_io.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive_llap_io.q.out
index 3155010afba..33f73f9d688 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive_llap_io.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive_llap_io.q.out
@@ -272,7 +272,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_boolean_to_bigint_n4
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:boolean, 2:c2:boolean, 3:c3:boolean, 4:c4:boolean, 5:c5:boolean,
6:c6:boolean, 7:c7:boolean, 8:c8:boolean, 9:c9:boolean, 10:c10:tinyint,
11:c11:tinyint, 12:c12:tinyint, 13:c13:tinyint, 14:c14:tinyint, 15:c15:tinyint,
16:c16:tinyint, 17:c17:tinyint, 18:c18:tinyint, 19:c19:tinyint, 20:c20:tinyint,
21:c21:smallint, 22:c22:smallint, 23:c23:smallint, 24:c24:smallint,
25:c25:smallint, 26:c26:smallint, 27:c27:smallint, 28 [...]
@@ -283,13 +283,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 55, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 49, 50, 51, 52, 53, 54]
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -532,7 +532,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_decimal_to_double_n4
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:decimal(38,18), 2:c2:decimal(38,18), 3:c3:decimal(38,18),
4:c4:decimal(38,18), 5:c5:decimal(38,18), 6:c6:decimal(38,18),
7:c7:decimal(38,18), 8:c8:decimal(38,18), 9:c9:decimal(38,18),
10:c10:decimal(38,18), 11:c11:decimal(38,18), 12:c12:float, 13:c13:float,
14:c14:float, 15:c15:float, 16:c16:float, 17:c17:float, 18:c18:float,
19:c19:float, 20:c20:float, 21:c21:float, 22:c22:float, 23:c23:double,
24:c24:double, 25:c [...]
@@ -543,13 +543,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 35, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34]
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -714,7 +714,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_timestamp_n4
- Statistics: Num rows: 6 Data size: 840 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:timestamp, 2:c2:timestamp, 3:c3:timestamp, 4:c4:timestamp, 5:c5:timestamp,
6:c6:timestamp, 7:c7:timestamp, 8:c8:timestamp, 9:c9:timestamp,
10:c10:timestamp, 11:c11:timestamp, 12:c12:timestamp, 13:b:string, 14:part:int,
15:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
16:ROW__IS__DELETED:boolean]
@@ -725,13 +725,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 14, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13]
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
index f36d104bcc1..c579377f813 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
@@ -1004,7 +1004,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
table_change_numeric_group_string_group_floating_string_group_n3
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:string, 2:c2:string, 3:c3:string, 4:c4:char(50), 5:c5:char(50),
6:c6:char(50), 7:c7:char(7), 8:c8:char(7), 9:c9:char(7), 10:c10:varchar(50),
11:c11:varchar(50), 12:c12:varchar(50), 13:c13:varchar(7), 14:c14:varchar(7),
15:c15:varchar(7), 16:b:string,
17:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
18:ROW__IS__DELETED:boolean]
@@ -1015,13 +1015,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16]
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table_llap_io.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table_llap_io.q.out
index 7afc8f27812..68a6922cd42 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table_llap_io.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table_llap_io.q.out
@@ -1008,7 +1008,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
table_change_numeric_group_string_group_floating_string_group_n5
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:string, 2:c2:string, 3:c3:string, 4:c4:char(50), 5:c5:char(50),
6:c6:char(50), 7:c7:char(7), 8:c8:char(7), 9:c9:char(7), 10:c10:varchar(50),
11:c11:varchar(50), 12:c12:varchar(50), 13:c13:varchar(7), 14:c14:varchar(7),
15:c15:varchar(7), 16:b:string,
17:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
18:ROW__IS__DELETED:boolean]
@@ -1019,13 +1019,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16]
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part.q.out
index fa84297addf..7006e21bece 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part.q.out
@@ -953,14 +953,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_numeric_group_string_group_floating_string_group_n8
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5
(type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)),
c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12
(type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type:
varchar(7)), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1488,14 +1488,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_lower_to_higher_numeric_group_decimal_to_float_n8
- Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: float), c2 (type: double), c3 (type: double), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_all_primitive.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_all_primitive.q.out
index c1a9dc9a146..382a659567f 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_all_primitive.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_all_primitive.q.out
@@ -272,14 +272,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_boolean_to_bigint_n2
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: boolean), c2 (type: boolean), c3 (type: boolean), c4 (type: boolean), c5
(type: boolean), c6 (type: boolean), c7 (type: boolean), c8 (type: boolean), c9
(type: boolean), c10 (type: tinyint), c11 (type: tinyint), c12 (type: tinyint),
c13 (type: tinyint), c14 (type: tinyint), c15 (type: tinyint), c16 (type:
tinyint), c17 (type: tinyint), c18 (type: tinyint), c19 (type: tinyint), c20
(type: tinyint), c21 (t [...]
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24,
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34,
_col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44,
_col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54,
_col55
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -505,14 +505,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_decimal_to_double_n2
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: decimal(38,18)), c2 (type: decimal(38,18)), c3 (type: decimal(38,18)),
c4 (type: decimal(38,18)), c5 (type: decimal(38,18)), c6 (type:
decimal(38,18)), c7 (type: decimal(38,18)), c8 (type: decimal(38,18)), c9
(type: decimal(38,18)), c10 (type: decimal(38,18)), c11 (type: decimal(38,18)),
c12 (type: float), c13 (type: float), c14 (type: float), c15 (type: float), c16
(type: float), c17 (type: float), c18 [...]
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24,
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34,
_col35
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -660,14 +660,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_timestamp_n2
- Statistics: Num rows: 6 Data size: 840 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: timestamp), c2 (type: timestamp), c3 (type: timestamp), c4 (type:
timestamp), c5 (type: timestamp), c6 (type: timestamp), c7 (type: timestamp),
c8 (type: timestamp), c9 (type: timestamp), c10 (type: timestamp), c11 (type:
timestamp), c12 (type: timestamp), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_all_primitive_llap_io.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_all_primitive_llap_io.q.out
index 5949b45466e..18ff6b59527 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_all_primitive_llap_io.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_all_primitive_llap_io.q.out
@@ -323,14 +323,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_boolean_to_bigint_n1
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: boolean), c2 (type: boolean), c3 (type: boolean), c4 (type: boolean), c5
(type: boolean), c6 (type: boolean), c7 (type: boolean), c8 (type: boolean), c9
(type: boolean), c10 (type: tinyint), c11 (type: tinyint), c12 (type: tinyint),
c13 (type: tinyint), c14 (type: tinyint), c15 (type: tinyint), c16 (type:
tinyint), c17 (type: tinyint), c18 (type: tinyint), c19 (type: tinyint), c20
(type: tinyint), c21 (t [...]
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24,
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34,
_col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44,
_col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54,
_col55
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -608,14 +608,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_decimal_to_double_n1
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: decimal(38,18)), c2 (type: decimal(38,18)), c3 (type: decimal(38,18)),
c4 (type: decimal(38,18)), c5 (type: decimal(38,18)), c6 (type:
decimal(38,18)), c7 (type: decimal(38,18)), c8 (type: decimal(38,18)), c9
(type: decimal(38,18)), c10 (type: decimal(38,18)), c11 (type: decimal(38,18)),
c12 (type: float), c13 (type: float), c14 (type: float), c15 (type: float), c16
(type: float), c17 (type: float), c18 [...]
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24,
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34,
_col35
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -815,14 +815,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_timestamp_n1
- Statistics: Num rows: 6 Data size: 840 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: timestamp), c2 (type: timestamp), c3 (type: timestamp), c4 (type:
timestamp), c5 (type: timestamp), c6 (type: timestamp), c7 (type: timestamp),
c8 (type: timestamp), c9 (type: timestamp), c10 (type: timestamp), c11 (type:
timestamp), c12 (type: timestamp), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_llap_io.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_llap_io.q.out
index 59acc6c82e4..b59ab0ccf66 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_llap_io.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_llap_io.q.out
@@ -958,14 +958,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_numeric_group_string_group_floating_string_group_n1
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5
(type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)),
c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12
(type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type:
varchar(7)), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1496,14 +1496,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_lower_to_higher_numeric_group_decimal_to_float_n1
- Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: float), c2 (type: double), c3 (type: double), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_table.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_table.q.out
index 842e9791311..65fe46683f2 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_table.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_table.q.out
@@ -904,14 +904,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
table_change_numeric_group_string_group_floating_string_group_n9
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), c1 (type: string), c2
(type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6
(type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)),
c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13
(type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type:
string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_table_llap_io.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_table_llap_io.q.out
index 605d4862b99..f4a097b7334 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_table_llap_io.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_table_llap_io.q.out
@@ -908,14 +908,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
table_change_numeric_group_string_group_floating_string_group_n2
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), c1 (type: string), c2
(type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6
(type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)),
c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13
(type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type:
string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part.q.out
index 2a7b615f9f5..94871c681e4 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part.q.out
@@ -1083,7 +1083,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_numeric_group_string_group_floating_string_group_n10
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:string, 2:c2:string, 3:c3:string, 4:c4:char(50), 5:c5:char(50),
6:c6:char(50), 7:c7:char(7), 8:c8:char(7), 9:c9:char(7), 10:c10:varchar(50),
11:c11:varchar(50), 12:c12:varchar(50), 13:c13:varchar(7), 14:c14:varchar(7),
15:c15:varchar(7), 16:b:string, 17:part:int,
18:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
19:ROW__IS__DELETED:boolean]
@@ -1094,13 +1094,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 17, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1696,7 +1696,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_lower_to_higher_numeric_group_decimal_to_float_n10
- Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:float, 2:c2:double, 3:c3:double, 4:b:string, 5:part:int,
6:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
7:ROW__IS__DELETED:boolean]
@@ -1707,13 +1707,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 5, 1, 2, 3, 4]
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive_llap_io.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive_llap_io.q.out
index 28fb24874c1..5f9aae47572 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive_llap_io.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive_llap_io.q.out
@@ -349,14 +349,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_boolean_to_bigint_n3
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: boolean), c2 (type: boolean), c3 (type: boolean), c4 (type: boolean), c5
(type: boolean), c6 (type: boolean), c7 (type: boolean), c8 (type: boolean), c9
(type: boolean), c10 (type: tinyint), c11 (type: tinyint), c12 (type: tinyint),
c13 (type: tinyint), c14 (type: tinyint), c15 (type: tinyint), c16 (type:
tinyint), c17 (type: tinyint), c18 (type: tinyint), c19 (type: tinyint), c20
(type: tinyint), c21 (t [...]
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24,
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34,
_col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44,
_col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54,
_col55
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -665,14 +665,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_decimal_to_double_n3
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: decimal(38,18)), c2 (type: decimal(38,18)), c3 (type: decimal(38,18)),
c4 (type: decimal(38,18)), c5 (type: decimal(38,18)), c6 (type:
decimal(38,18)), c7 (type: decimal(38,18)), c8 (type: decimal(38,18)), c9
(type: decimal(38,18)), c10 (type: decimal(38,18)), c11 (type: decimal(38,18)),
c12 (type: float), c13 (type: float), c14 (type: float), c15 (type: float), c16
(type: float), c17 (type: float), c18 [...]
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24,
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34,
_col35
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -903,14 +903,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_timestamp_n3
- Statistics: Num rows: 6 Data size: 840 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: timestamp), c2 (type: timestamp), c3 (type: timestamp), c4 (type:
timestamp), c5 (type: timestamp), c6 (type: timestamp), c7 (type: timestamp),
c8 (type: timestamp), c9 (type: timestamp), c10 (type: timestamp), c11 (type:
timestamp), c12 (type: timestamp), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table.q.out
index 5b9873f05e6..a62e6a01708 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table.q.out
@@ -1004,7 +1004,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
table_change_numeric_group_string_group_floating_string_group_n4
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:string, 2:c2:string, 3:c3:string, 4:c4:char(50), 5:c5:char(50),
6:c6:char(50), 7:c7:char(7), 8:c8:char(7), 9:c9:char(7), 10:c10:varchar(50),
11:c11:varchar(50), 12:c12:varchar(50), 13:c13:varchar(7), 14:c14:varchar(7),
15:c15:varchar(7), 16:b:string,
17:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
18:ROW__IS__DELETED:boolean]
@@ -1015,13 +1015,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16]
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table_llap_io.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table_llap_io.q.out
index df4a23121fe..49c2d406aa7 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table_llap_io.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table_llap_io.q.out
@@ -1271,7 +1271,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
table_change_numeric_group_string_group_floating_string_group_n1
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:string, 2:c2:string, 3:c3:string, 4:c4:char(50), 5:c5:char(50),
6:c6:char(50), 7:c7:char(7), 8:c8:char(7), 9:c9:char(7), 10:c10:varchar(50),
11:c11:varchar(50), 12:c12:varchar(50), 13:c13:varchar(7), 14:c14:varchar(7),
15:c15:varchar(7), 16:b:string,
17:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
18:ROW__IS__DELETED:boolean]
@@ -1282,13 +1282,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16]
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part.q.out
index 37e793f64ab..03e3646b186 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part.q.out
@@ -1083,7 +1083,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_numeric_group_string_group_floating_string_group_n11
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:string, 2:c2:string, 3:c3:string, 4:c4:char(50), 5:c5:char(50),
6:c6:char(50), 7:c7:char(7), 8:c8:char(7), 9:c9:char(7), 10:c10:varchar(50),
11:c11:varchar(50), 12:c12:varchar(50), 13:c13:varchar(7), 14:c14:varchar(7),
15:c15:varchar(7), 16:b:string, 17:part:int,
18:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
19:ROW__IS__DELETED:boolean]
@@ -1094,13 +1094,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 17, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1696,7 +1696,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_lower_to_higher_numeric_group_decimal_to_float_n11
- Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:float, 2:c2:double, 3:c3:double, 4:b:string, 5:part:int,
6:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
7:ROW__IS__DELETED:boolean]
@@ -1707,13 +1707,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 5, 1, 2, 3, 4]
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_primitive.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_primitive.q.out
index 76eb7dabcff..69edadb88d0 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_primitive.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_primitive.q.out
@@ -272,7 +272,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_boolean_to_bigint_n7
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:boolean, 2:c2:boolean, 3:c3:boolean, 4:c4:boolean, 5:c5:boolean,
6:c6:boolean, 7:c7:boolean, 8:c8:boolean, 9:c9:boolean, 10:c10:tinyint,
11:c11:tinyint, 12:c12:tinyint, 13:c13:tinyint, 14:c14:tinyint, 15:c15:tinyint,
16:c16:tinyint, 17:c17:tinyint, 18:c18:tinyint, 19:c19:tinyint, 20:c20:tinyint,
21:c21:smallint, 22:c22:smallint, 23:c23:smallint, 24:c24:smallint,
25:c25:smallint, 26:c26:smallint, 27:c27:smallint, 28 [...]
@@ -283,13 +283,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 55, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 49, 50, 51, 52, 53, 54]
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -531,7 +531,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_decimal_to_double_n7
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:decimal(38,18), 2:c2:decimal(38,18), 3:c3:decimal(38,18),
4:c4:decimal(38,18), 5:c5:decimal(38,18), 6:c6:decimal(38,18),
7:c7:decimal(38,18), 8:c8:decimal(38,18), 9:c9:decimal(38,18),
10:c10:decimal(38,18), 11:c11:decimal(38,18), 12:c12:float, 13:c13:float,
14:c14:float, 15:c15:float, 16:c16:float, 17:c17:float, 18:c18:float,
19:c19:float, 20:c20:float, 21:c21:float, 22:c22:float, 23:c23:double,
24:c24:double, 25:c [...]
@@ -542,13 +542,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 35, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34]
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -712,7 +712,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_timestamp_n7
- Statistics: Num rows: 6 Data size: 840 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:timestamp, 2:c2:timestamp, 3:c3:timestamp, 4:c4:timestamp, 5:c5:timestamp,
6:c6:timestamp, 7:c7:timestamp, 8:c8:timestamp, 9:c9:timestamp,
10:c10:timestamp, 11:c11:timestamp, 12:c12:timestamp, 13:b:string, 14:part:int,
15:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
16:ROW__IS__DELETED:boolean]
@@ -723,13 +723,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 14, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13]
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_primitive_llap_io.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_primitive_llap_io.q.out
index 0d1b0da0d0c..06b29b26193 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_primitive_llap_io.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_all_primitive_llap_io.q.out
@@ -349,14 +349,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_boolean_to_bigint
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: boolean), c2 (type: boolean), c3 (type: boolean), c4 (type: boolean), c5
(type: boolean), c6 (type: boolean), c7 (type: boolean), c8 (type: boolean), c9
(type: boolean), c10 (type: tinyint), c11 (type: tinyint), c12 (type: tinyint),
c13 (type: tinyint), c14 (type: tinyint), c15 (type: tinyint), c16 (type:
tinyint), c17 (type: tinyint), c18 (type: tinyint), c19 (type: tinyint), c20
(type: tinyint), c21 (t [...]
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24,
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34,
_col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44,
_col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54,
_col55
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 10 Data size: 1228 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 1000 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -644,14 +644,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_decimal_to_double
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: decimal(38,18)), c2 (type: decimal(38,18)), c3 (type: decimal(38,18)),
c4 (type: decimal(38,18)), c5 (type: decimal(38,18)), c6 (type:
decimal(38,18)), c7 (type: decimal(38,18)), c8 (type: decimal(38,18)), c9
(type: decimal(38,18)), c10 (type: decimal(38,18)), c11 (type: decimal(38,18)),
c12 (type: float), c13 (type: float), c14 (type: float), c15 (type: float), c16
(type: float), c17 (type: float), c18 [...]
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24,
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34,
_col35
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 1320 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -849,14 +849,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: part_change_various_various_timestamp
- Statistics: Num rows: 6 Data size: 840 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: timestamp), c2 (type: timestamp), c3 (type: timestamp), c4 (type:
timestamp), c5 (type: timestamp), c6 (type: timestamp), c7 (type: timestamp),
c8 (type: timestamp), c9 (type: timestamp), c10 (type: timestamp), c11 (type:
timestamp), c12 (type: timestamp), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 840 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_llap_io.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_llap_io.q.out
index bfbe3f1efdf..d6e672194b6 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_llap_io.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part_llap_io.q.out
@@ -1363,14 +1363,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_numeric_group_string_group_floating_string_group
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5
(type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)),
c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12
(type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type:
varchar(7)), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14,
_col15, _col16, _col17
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 3300 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -2096,14 +2096,14 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
part_change_lower_to_higher_numeric_group_decimal_to_float
- Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE
Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE
Column stats: PARTIAL
Select Operator
expressions: insert_num (type: int), part (type: int), c1
(type: float), c2 (type: double), c3 (type: double), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 6 Data size: 648 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 600 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_table.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_table.q.out
index 5aab1f7ee0d..1c1dde61400 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_table.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_table.q.out
@@ -1004,7 +1004,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
table_change_numeric_group_string_group_floating_string_group_n8
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:string, 2:c2:string, 3:c3:string, 4:c4:char(50), 5:c5:char(50),
6:c6:char(50), 7:c7:char(7), 8:c8:char(7), 9:c9:char(7), 10:c10:varchar(50),
11:c11:varchar(50), 12:c12:varchar(50), 13:c13:varchar(7), 14:c14:varchar(7),
15:c15:varchar(7), 16:b:string,
17:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
18:ROW__IS__DELETED:boolean]
@@ -1015,13 +1015,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16]
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_table_llap_io.q.out
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_table_llap_io.q.out
index 80afaa6ab09..061e0ee9b17 100644
---
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_table_llap_io.q.out
+++
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_table_llap_io.q.out
@@ -1387,7 +1387,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias:
table_change_numeric_group_string_group_floating_string_group_n11
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:insert_num:int,
1:c1:string, 2:c2:string, 3:c3:string, 4:c4:char(50), 5:c5:char(50),
6:c6:char(50), 7:c7:char(7), 8:c8:char(7), 9:c9:char(7), 10:c10:varchar(50),
11:c11:varchar(50), 12:c12:varchar(50), 13:c13:varchar(7), 14:c14:varchar(7),
15:c15:varchar(7), 16:b:string,
17:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>,
18:ROW__IS__DELETED:boolean]
@@ -1398,13 +1398,13 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16]
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
- Statistics: Num rows: 6 Data size: 10884 Basic stats:
COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 6 Data size: 11988 Basic stats:
COMPLETE Column stats: PARTIAL
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index d78dd9ab8ce..36af8abc04d 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -41,9 +41,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -63,6 +60,7 @@
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -74,6 +72,7 @@
import static org.apache.hadoop.hive.metastore.HiveMetaHook.ALTERLOCATION;
import static
org.apache.hadoop.hive.metastore.HiveMetaHook.ALTER_TABLE_OPERATION_TYPE;
import static
org.apache.hadoop.hive.metastore.HiveMetaStoreClient.RENAME_PARTITION_MAKE_COPY;
+import static
org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.findStaleColumns;
import static
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
import static
org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
@@ -261,8 +260,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String
catName, String dbnam
boolean isRenameIcebergTable =
rename && MetaStoreUtils.isIcebergTable(newt.getParameters());
- List<ColumnStatistics> columnStatistics = getColumnStats(msdb, oldt);
- columnStatistics = deleteTableColumnStats(msdb, oldt, newt,
columnStatistics);
+ deleteTableColumnStats(msdb, oldt, newt);
if (!isRenameIcebergTable &&
(replDataLocationChanged || renamedManagedTable ||
renamedTranslatedToExternalTable ||
@@ -423,6 +421,7 @@ public List<Void> run(List<Partition> input) throws
Exception {
Table table = oldt;
int partitionBatchSize =
MetastoreConf.getIntVar(handler.getConf(),
MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX);
+ Map<List<String>, List<List<String>>> changedColsToPartNames =
new HashMap<>();
Batchable.runBatched(partitionBatchSize, parts, new
Batchable<Partition, Void>() {
@Override
public List<Void> run(List<Partition> input) throws Exception {
@@ -432,9 +431,16 @@ public List<Void> run(List<Partition> input) throws
Exception {
Partition oldPart = new Partition(part);
List<FieldSchema> oldCols = part.getSd().getCols();
part.getSd().setCols(newt.getSd().getCols());
- List<ColumnStatistics> colStats =
updateOrGetPartitionColumnStats(msdb, catalogName, databaseName,
- tableName, part.getValues(), oldCols, table, part,
null, null);
- assert (colStats.isEmpty());
+ List<String> deletedCols = new ArrayList<>();
+ updateOrGetPartitionColumnStats(msdb, catalogName,
databaseName,
+ tableName, part.getValues(), oldCols, table, part,
deletedCols);
+ if (!deletedCols.isEmpty()) {
+ changedColsToPartNames.compute(deletedCols, (k, v) -> {
+ if (v == null) v = new ArrayList<>();
+ v.add(part.getValues());
+ return v;
+ });
+ }
if (!cascade) {
// update changed properties (stats)
oldPart.setParameters(part.getParameters());
@@ -447,6 +453,14 @@ public List<Void> run(List<Partition> input) throws
Exception {
return Collections.emptyList();
}
});
+
+ for (Map.Entry<List<String>, List<List<String>>> entry :
changedColsToPartNames.entrySet()) {
+ List<String> partNames = new ArrayList<>();
+ for (List<String> part_vals : entry.getValue()) {
+
partNames.add(Warehouse.makePartName(table.getPartitionKeys(), part_vals));
+ }
+ msdb.deletePartitionColumnStatistics(catalogName,
databaseName, tableName, partNames, entry.getKey(), null);
+ }
} else {
// clear all column stats to prevent incorract behaviour in case
same column is reintroduced
msdb.deleteAllPartitionColumnStatistics(
@@ -461,9 +475,6 @@ public List<Void> run(List<Partition> input) throws
Exception {
}
}
- //HIVE-26504: Table columns stats may exist even for partitioned tables,
so it must be updated in all cases
- updateTableColumnStats(msdb, newt, writeIdList, columnStatistics);
-
if (transactionalListeners != null && !transactionalListeners.isEmpty())
{
txnAlterTableEventResponses =
MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
EventMessage.EventType.ALTER_TABLE,
@@ -598,7 +609,7 @@ public Partition alterPartition(RawStore msdb, Warehouse
wh, String catName, Str
// PartitionView does not have SD. We do not need update its column
stats
if (oldPart.getSd() != null) {
updateOrGetPartitionColumnStats(msdb, catName, dbname, name,
new_part.getValues(),
- oldPart.getSd().getCols(), tbl, new_part, null, null);
+ oldPart.getSd().getCols(), tbl, new_part, null);
}
Deadline.checkTimeout();
msdb.alterPartition(
@@ -754,24 +765,10 @@ public Partition alterPartition(RawStore msdb, Warehouse
wh, String catName, Str
new_part, tbl, wh, false, true, environmentContext, false);
}
- String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(),
new_part.getValues());
- List<ColumnStatistics> multiColumnStats =
updateOrGetPartitionColumnStats(msdb, catName, dbname, name,
oldPart.getValues(),
- oldPart.getSd().getCols(), tbl, new_part, null, null);
- msdb.alterPartition(catName, dbname, name, part_vals, new_part,
validWriteIds);
- if (!multiColumnStats.isEmpty()) {
- for (ColumnStatistics cs : multiColumnStats) {
- cs.getStatsDesc().setPartName(newPartName);
- try {
- msdb.updatePartitionColumnStatistics(tbl, mTable, cs,
new_part.getValues(),
- validWriteIds, new_part.getWriteId());
- } catch (InvalidInputException iie) {
- throw new InvalidOperationException("Unable to update partition
stats in table rename." + iie);
- } catch (NoSuchObjectException nsoe) {
- // It is ok, ignore
- }
- }
- }
+ updateOrGetPartitionColumnStats(msdb, catName, dbname, name,
oldPart.getValues(),
+ oldPart.getSd().getCols(), tbl, new_part, null);
+ msdb.alterPartition(catName, dbname, name, part_vals, new_part,
validWriteIds);
if (transactionalListeners != null && !transactionalListeners.isEmpty())
{
MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
EventMessage.EventType.ALTER_PARTITION,
@@ -886,7 +883,7 @@ public List<Partition> alterPartitions(final RawStore msdb,
Warehouse wh, final
// PartitionView does not have SD and we do not need to update its
column stats
if (oldTmpPart.getSd() != null) {
updateOrGetPartitionColumnStats(msdb, catName, dbname, name,
oldTmpPart.getValues(),
- oldTmpPart.getSd().getCols(), tbl, tmpPart, null, null);
+ oldTmpPart.getSd().getCols(), tbl, tmpPart, null);
}
}
@@ -1031,61 +1028,23 @@ private Path constructRenamedPath(Path defaultNewPath,
Path currentPath) {
defaultNewPath.toUri().getPath());
}
- public static List<ColumnStatistics> getColumnStats(RawStore msdb, Table
oldTable)
- throws NoSuchObjectException, MetaException {
- String catName = normalizeIdentifier(oldTable.isSetCatName()
- ? oldTable.getCatName()
- : getDefaultCatalog(msdb.getConf()));
- String dbName = oldTable.getDbName().toLowerCase();
- String tableName = normalizeIdentifier(oldTable.getTableName());
- List<String> columnNames =
oldTable.getSd().getCols().stream().map(FieldSchema::getName).collect(Collectors.toList());
- return msdb.getTableColumnStatistics(catName, dbName, tableName,
columnNames);
- }
-
@VisibleForTesting
- public static List<ColumnStatistics> deleteTableColumnStats(RawStore msdb,
Table oldTable, Table newTable, List<ColumnStatistics> multiColStats)
+ public void deleteTableColumnStats(RawStore msdb, Table oldTable, Table
newTable)
throws InvalidObjectException, MetaException {
- List<ColumnStatistics> newMultiColStats = new ArrayList<>();
try {
String catName = normalizeIdentifier(oldTable.isSetCatName()
? oldTable.getCatName()
: getDefaultCatalog(msdb.getConf()));
String dbName = oldTable.getDbName().toLowerCase();
String tableName = normalizeIdentifier(oldTable.getTableName());
- String newDbName = newTable.getDbName().toLowerCase();
- String newTableName = normalizeIdentifier(newTable.getTableName());
- List<FieldSchema> oldTableCols = oldTable.getSd().getCols();
- List<FieldSchema> newTableCols = newTable.getSd().getCols();
-
- boolean nameChanged = !newDbName.equals(dbName) ||
!newTableName.equals(tableName);
-
- if ((nameChanged ||
!MetaStoreServerUtils.columnsIncludedByNameType(oldTableCols, newTableCols)) &&
+ List<String> staleColumns = findStaleColumns(oldTable.getSd().getCols(),
newTable.getSd().getCols());
+ if (!staleColumns.isEmpty() &&
// Don't bother in the case of ACID conversion.
TxnUtils.isAcidTable(oldTable) == TxnUtils.isAcidTable(newTable)) {
- for (ColumnStatistics colStats : multiColStats) {
- List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
- List<ColumnStatisticsObj> newStatsObjs = new ArrayList<>();
-
- if (statsObjs != null) {
- for (ColumnStatisticsObj statsObj : statsObjs) {
- boolean found = newTableCols.stream().anyMatch(c ->
statsObj.getColName().equalsIgnoreCase(c.getName()) &&
- statsObj.getColType().equalsIgnoreCase(c.getType()));
- if (nameChanged || !found) {
- msdb.deleteTableColumnStatistics(catName,
oldTable.getDbName().toLowerCase(),
- normalizeIdentifier(oldTable.getTableName()),
statsObj.getColName(), colStats.getEngine());
- }
- if (found) {
- newStatsObjs.add(statsObj);
- }
- }
- StatsSetupConst.removeColumnStatsState(newTable.getParameters(),
-
statsObjs.stream().map(ColumnStatisticsObj::getColName).collect(Collectors.toList()));
- }
- ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
- statsDesc.setDbName(newDbName);
- statsDesc.setTableName(newTableName);
- colStats.setStatsObj(newStatsObjs);
- newMultiColStats.add(colStats);
+ msdb.deleteTableColumnStatistics(catName, dbName, tableName,
staleColumns, null);
+ Map<String, String> parameters = newTable.getParameters();
+ if (parameters != null &&
parameters.containsKey(StatsSetupConst.COLUMN_STATS_ACCURATE)) {
+ StatsSetupConst.removeColumnStatsState(parameters, staleColumns);
}
}
} catch (NoSuchObjectException nsoe) {
@@ -1094,120 +1053,33 @@ public static List<ColumnStatistics>
deleteTableColumnStats(RawStore msdb, Table
//should not happen since the input were verified before passed in
throw new InvalidObjectException("Invalid inputs to update table column
stats: " + e);
}
- return newMultiColStats;
- }
-
- @VisibleForTesting
- public void updateTableColumnStats(RawStore msdb, Table newTable, String
validWriteIds, List<ColumnStatistics> columnStatistics)
- throws MetaException, InvalidObjectException {
- Deadline.checkTimeout();
- // Change to new table and append stats for the new table
- for (ColumnStatistics colStats : columnStatistics) {
- try {
- msdb.updateTableColumnStatistics(colStats, validWriteIds,
newTable.getWriteId());
- } catch (NoSuchObjectException nsoe) {
- LOG.debug("Could not find db entry." + nsoe);
- } catch (InvalidInputException e) {
- //should not happen since the input were verified before passed in
- throw new InvalidObjectException("Invalid inputs to update table
column stats: " + e);
- }
- }
}
- public static List<ColumnStatisticsObj>
filterColumnStatsForTableColumns(List<FieldSchema> columns, ColumnStatistics
colStats) {
- return colStats.getStatsObj()
- .stream()
- .filter(o -> columns
- .stream()
- .anyMatch(column ->
o.getColName().equalsIgnoreCase(column.getName()) &&
o.getColType().equalsIgnoreCase(column.getType())))
- .collect(Collectors.toList());
- }
-
- public static List<ColumnStatistics> updateOrGetPartitionColumnStats(
+ public static void updateOrGetPartitionColumnStats(
RawStore msdb, String catName, String dbname, String tblname,
List<String> partVals,
- List<FieldSchema> oldCols, Table table, Partition part,
List<FieldSchema> newCols, List<String> deletedCols)
+ List<FieldSchema> oldCols, Table table, Partition part, List<String>
deletedCols)
throws MetaException, InvalidObjectException {
- List<ColumnStatistics> newPartsColStats = new ArrayList<>();
- boolean updateColumnStats = true;
try {
- // if newCols are not specified, use default ones.
- if (newCols == null) {
- newCols = part.getSd() == null ? new ArrayList<>() :
part.getSd().getCols();
- }
+ List<FieldSchema> newCols = part.getSd().getCols();
String oldPartName = Warehouse.makePartName(table.getPartitionKeys(),
partVals);
- String newPartName = Warehouse.makePartName(table.getPartitionKeys(),
part.getValues());
- boolean rename = !part.getDbName().equals(dbname) ||
!part.getTableName().equals(tblname)
- || !oldPartName.equals(newPartName);
-
- // do not need to update column stats if alter partition is not for
rename or changing existing columns
- if (!rename && MetaStoreServerUtils.columnsIncludedByNameType(oldCols,
newCols)) {
- return newPartsColStats;
+ // do not need to update column stats if existing columns haven't been
changed
+ List<String> staleColumns = findStaleColumns(oldCols, newCols);
+ if (staleColumns.isEmpty()) {
+ return;
}
- List<String> oldColNames = new ArrayList<>(oldCols.size());
- for (FieldSchema oldCol : oldCols) {
- oldColNames.add(oldCol.getName());
+ if (deletedCols == null) {
+ msdb.deletePartitionColumnStatistics(catName, dbname, tblname,
Lists.newArrayList(oldPartName), staleColumns, null);
+ } else {
+ deletedCols.addAll(staleColumns);
}
- List<String> oldPartNames = Lists.newArrayList(oldPartName);
- // TODO: doesn't take txn stats into account. This method can only
remove stats.
- List<List<ColumnStatistics>> multiPartsColStats =
msdb.getPartitionColumnStatistics(catName, dbname, tblname,
- oldPartNames, oldColNames);
- for (List<ColumnStatistics> partsColStats : multiPartsColStats) {
- assert (partsColStats.size() <= 1);
-
- // for out para, this value is initialized by caller.
- if (deletedCols == null) {
- deletedCols = new ArrayList<>();
- } else {
- // in case deletedCols is provided by caller, stats will be updated
by caller.
- updateColumnStats = false;
- }
- for (ColumnStatistics partColStats : partsColStats) { //actually only
at most one loop
- List<ColumnStatisticsObj> newStatsObjs = new ArrayList<>();
- List<ColumnStatisticsObj> statsObjs = partColStats.getStatsObj();
- for (ColumnStatisticsObj statsObj : statsObjs) {
- boolean found = false;
- for (FieldSchema newCol : newCols) {
- if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
- && statsObj.getColType().equalsIgnoreCase(newCol.getType()))
{
- found = true;
- break;
- }
- }
- Deadline.checkTimeout();
- if (found) {
- if (rename) {
- if (updateColumnStats) {
- msdb.deletePartitionColumnStatistics(catName, dbname,
tblname,
- partColStats.getStatsDesc().getPartName(), partVals,
statsObj.getColName(),
- partColStats.getEngine());
- } else {
- deletedCols.add(statsObj.getColName());
- }
- newStatsObjs.add(statsObj);
- }
- } else {
- if (updateColumnStats) {
- msdb.deletePartitionColumnStatistics(catName, dbname, tblname,
partColStats.getStatsDesc().getPartName(),
- partVals, statsObj.getColName(), partColStats.getEngine());
- }
- deletedCols.add(statsObj.getColName());
- }
- }
- if (updateColumnStats) {
- StatsSetupConst.removeColumnStatsState(part.getParameters(),
deletedCols);
- }
- if (!newStatsObjs.isEmpty()) {
- partColStats.setStatsObj(newStatsObjs);
- newPartsColStats.add(partColStats);
- }
- }
+ Map<String, String> parameters = part.getParameters();
+ if (parameters != null &&
parameters.containsKey(StatsSetupConst.COLUMN_STATS_ACCURATE)) {
+ StatsSetupConst.removeColumnStatsState(parameters, staleColumns);
}
} catch (NoSuchObjectException nsoe) {
// ignore this exception, actually this exception won't be thrown from
getPartitionColumnStatistics
} catch (InvalidInputException iie) {
throw new InvalidObjectException("Invalid input to delete partition
column stats." + iie);
}
-
- return newPartsColStats;
}
}
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index eb25f7e4452..4d11854ab5d 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -27,6 +27,7 @@
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.Stack;
@@ -82,6 +83,7 @@
import com.google.common.annotations.VisibleForTesting;
import static org.apache.hadoop.hive.metastore.HMSHandler.getPartValsFromName;
+import static
org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.findStaleColumns;
import static
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
import static
org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
@@ -209,51 +211,43 @@ private void initSharedCache(Configuration conf) {
return sharedCache;
}
- private static ColumnStatistics updateStatsForAlterPart(RawStore rawStore,
Table before, String catalogName,
+ private static void updateStatsForAlterPart(RawStore rawStore, Table before,
String catalogName,
String dbName, String tableName, Partition part) throws Exception {
List<String> deletedCols = new ArrayList<>();
- List<ColumnStatistics> multiColumnStats = HiveAlterHandler
- .updateOrGetPartitionColumnStats(rawStore, catalogName, dbName,
tableName, part.getValues(),
- part.getSd().getCols(), before, part, null, deletedCols);
- if (multiColumnStats.size() > 1) {
- throw new RuntimeException("CachedStore can only be enabled for Hive
engine");
+ // if this is the table rename, change the cache
+ boolean rename = !Objects.equals(before.getDbName(), dbName) ||
+ !Objects.equals(before.getTableName(), tableName);
+ if (rename) {
+ deletedCols =
part.getSd().getCols().stream().map(FieldSchema::getName).toList();
+ } else {
+ HiveAlterHandler
+ .updateOrGetPartitionColumnStats(rawStore, catalogName, dbName,
tableName, part.getValues(),
+ part.getSd().getCols(), before, part, deletedCols);
}
- ColumnStatistics colStats = multiColumnStats.isEmpty() ? null :
multiColumnStats.get(0);
for (String column : deletedCols) {
sharedCache.removePartitionColStatsFromCache(catalogName, dbName,
tableName, part.getValues(), column);
}
- if (colStats != null) {
- sharedCache.alterPartitionAndStatsInCache(catalogName, dbName,
tableName, part.getWriteId(), part.getValues(),
- part.getParameters(), colStats.getStatsObj());
- }
- return colStats;
}
private static void updateStatsForAlterTable(RawStore rawStore, Table
tblBefore, Table tblAfter, String catalogName,
String dbName, String tableName) throws Exception {
- ColumnStatistics colStats = null;
if (tblBefore.isSetPartitionKeys()) {
List<Partition> parts = sharedCache.listCachedPartitions(catalogName,
dbName, tableName, -1);
for (Partition part : parts) {
- colStats = updateStatsForAlterPart(rawStore, tblBefore, catalogName,
dbName, tableName, part);
+ updateStatsForAlterPart(rawStore, tblBefore, catalogName, dbName,
tableName, part);
}
}
rawStore.alterTable(catalogName, dbName, tblBefore.getTableName(),
tblAfter, null);
- Set<String> deletedCols = new HashSet<>();
- List<ColumnStatistics> multiColumnStats =
HiveAlterHandler.getColumnStats(rawStore, tblBefore);
- multiColumnStats.forEach(cs ->
-
deletedCols.addAll(HiveAlterHandler.filterColumnStatsForTableColumns(tblBefore.getSd().getCols(),
cs)
-
.stream().map(ColumnStatisticsObj::getColName).collect(Collectors.toList())));
-
- if (multiColumnStats.size() > 1) {
- throw new RuntimeException("CachedStore can only be enabled for Hive
engine");
- }
- List<ColumnStatisticsObj> statisticsObjs = multiColumnStats.isEmpty() ?
null : multiColumnStats.get(0).getStatsObj();
- if (colStats != null) {
- sharedCache.alterTableAndStatsInCache(catalogName, dbName, tableName,
tblAfter.getWriteId(), statisticsObjs,
- tblAfter.getParameters());
+ // if this is the table rename, change the cache
+ boolean rename = !Objects.equals(tblBefore.getDbName(),
tblAfter.getDbName()) ||
+ !Objects.equals(tblBefore.getTableName(), tblAfter.getTableName());
+ final List<String> deletedCols;
+ if (rename) {
+ deletedCols =
tblBefore.getSd().getCols().stream().map(FieldSchema::getName).toList();
+ } else {
+ deletedCols = findStaleColumns(tblBefore.getSd().getCols(),
tblAfter.getSd().getCols());
}
for (String column : deletedCols) {
sharedCache.removeTableColStatsFromCache(catalogName, dbName, tableName,
column);
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java
index 77e93f838d5..6606634a116 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java
@@ -685,28 +685,25 @@ public static boolean
updatePartitionStatsFast(PartitionSpecProxy.PartitionItera
return true;
}
- /*
+ /**
* This method is to check if the new column list includes all the old
columns with same name and
* type. The column comment does not count.
+ *
+ * @return A list of columns that not appear in the new column list
*/
- public static boolean columnsIncludedByNameType(List<FieldSchema> oldCols,
- List<FieldSchema> newCols) {
- if (oldCols.size() > newCols.size()) {
- return false;
- }
-
+ public static List<String> findStaleColumns(List<FieldSchema> oldCols,
List<FieldSchema> newCols) {
Map<String, String> columnNameTypePairMap = new HashMap<>(newCols.size());
for (FieldSchema newCol : newCols) {
columnNameTypePairMap.put(newCol.getName().toLowerCase(),
newCol.getType());
}
+ List<String> changedCols = new ArrayList<>();
for (final FieldSchema oldCol : oldCols) {
if (!columnNameTypePairMap.containsKey(oldCol.getName())
||
!columnNameTypePairMap.get(oldCol.getName()).equalsIgnoreCase(oldCol.getType()))
{
- return false;
+ changedCols.add(oldCol.getName());
}
}
-
- return true;
+ return changedCols;
}
/** Duplicates AcidUtils; used in a couple places in metastore. */
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
index 4d92c5c7fc5..40bf21ad19e 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
@@ -27,7 +27,6 @@
import org.mockito.Mockito;
import java.util.Arrays;
-import java.util.List;
import static
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
@@ -56,14 +55,13 @@ public void testAlterTableAddColNotUpdateStats() throws
MetaException, InvalidOb
newTable.setSd(newSd);
RawStore msdb = Mockito.mock(RawStore.class);
- Mockito.doThrow(new RuntimeException("shouldn't be
called")).when(msdb).updateTableColumnStatistics(
- Mockito.any(), Mockito.eq(null), Mockito.anyLong());
+ Mockito.doThrow(new RuntimeException("shouldn't be
called")).when(msdb).deleteTableColumnStatistics(
+ Mockito.anyString(), Mockito.anyString(), Mockito.anyString(),
Mockito.anyList(), Mockito.anyString());
HiveAlterHandler handler = new HiveAlterHandler();
handler.setConf(conf);
Deadline.registerIfNot(100_000);
Deadline.startTimer("updateTableColumnStats");
- List<ColumnStatistics> colstats = handler.deleteTableColumnStats(msdb,
oldTable, newTable, handler.getColumnStats(msdb, oldTable));
- handler.updateTableColumnStats(msdb, newTable, null, colstats);
+ handler.deleteTableColumnStats(msdb, oldTable, newTable);
}
@Test
@@ -90,17 +88,9 @@ public void testAlterTableDelColUpdateStats() throws
Exception {
handler.setConf(conf);
Deadline.registerIfNot(100_000);
Deadline.startTimer("updateTableColumnStats");
- try {
- List<ColumnStatistics> colstats = handler.deleteTableColumnStats(msdb,
oldTable, newTable, handler.getColumnStats(msdb, oldTable));
- handler.updateTableColumnStats(msdb, newTable, null, colstats);
- } catch (Throwable t) {
- System.err.println(t);
- t.printStackTrace(System.err);
- throw t;
- }
- Mockito.verify(msdb, Mockito.times(1)).getTableColumnStatistics(
- getDefaultCatalog(conf), oldTable.getDbName(),
oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4")
- );
+ handler.deleteTableColumnStats(msdb, oldTable, newTable);
+ Mockito.verify(msdb, Mockito.times(1)).deleteTableColumnStatistics(
+ getDefaultCatalog(conf), oldTable.getDbName(),
oldTable.getTableName(), Arrays.asList("col4"), null);
}
@Test
@@ -123,14 +113,13 @@ public void testAlterTableChangePosNotUpdateStats()
throws MetaException, Invali
newTable.setSd(newSd);
RawStore msdb = Mockito.mock(RawStore.class);
- Mockito.doThrow(new RuntimeException("shouldn't be
called")).when(msdb).updateTableColumnStatistics(
- Mockito.any(), Mockito.eq(null), Mockito.anyLong());
+ Mockito.doThrow(new RuntimeException("shouldn't be
called")).when(msdb).deleteTableColumnStatistics(
+ Mockito.anyString(), Mockito.anyString(), Mockito.anyString(),
Mockito.anyList(), Mockito.anyString());
HiveAlterHandler handler = new HiveAlterHandler();
handler.setConf(conf);
Deadline.registerIfNot(100_000);
Deadline.startTimer("updateTableColumnStats");
- List<ColumnStatistics> colstats = handler.deleteTableColumnStats(msdb,
oldTable, newTable, handler.getColumnStats(msdb, oldTable));
- handler.updateTableColumnStats(msdb, newTable, null, colstats);
+ handler.deleteTableColumnStats(msdb, oldTable, newTable);
}
}
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestMetaStoreServerUtils.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestMetaStoreServerUtils.java
index bb22300b335..dcc84219899 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestMetaStoreServerUtils.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestMetaStoreServerUtils.java
@@ -167,13 +167,13 @@ public void testcolumnsIncludedByNameType() {
FieldSchema col1a = new FieldSchema("col1", "string", "col1 but with a
different comment");
FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
-
Assert.assertTrue(MetaStoreServerUtils.columnsIncludedByNameType(Arrays.asList(col1),
Arrays.asList(col1)));
-
Assert.assertTrue(MetaStoreServerUtils.columnsIncludedByNameType(Arrays.asList(col1),
Arrays.asList(col1a)));
-
Assert.assertTrue(MetaStoreServerUtils.columnsIncludedByNameType(Arrays.asList(col1,
col2), Arrays.asList(col1, col2)));
-
Assert.assertTrue(MetaStoreServerUtils.columnsIncludedByNameType(Arrays.asList(col1,
col2), Arrays.asList(col2, col1)));
-
Assert.assertTrue(MetaStoreServerUtils.columnsIncludedByNameType(Arrays.asList(col1,
col2), Arrays.asList(col1, col2, col3)));
-
Assert.assertTrue(MetaStoreServerUtils.columnsIncludedByNameType(Arrays.asList(col1,
col2), Arrays.asList(col3, col2, col1)));
-
Assert.assertFalse(MetaStoreServerUtils.columnsIncludedByNameType(Arrays.asList(col1,
col2), Arrays.asList(col1)));
+
Assert.assertTrue(MetaStoreServerUtils.findStaleColumns(Arrays.asList(col1),
Arrays.asList(col1)).isEmpty());
+
Assert.assertTrue(MetaStoreServerUtils.findStaleColumns(Arrays.asList(col1),
Arrays.asList(col1a)).isEmpty());
+
Assert.assertTrue(MetaStoreServerUtils.findStaleColumns(Arrays.asList(col1,
col2), Arrays.asList(col1, col2)).isEmpty());
+
Assert.assertTrue(MetaStoreServerUtils.findStaleColumns(Arrays.asList(col1,
col2), Arrays.asList(col2, col1)).isEmpty());
+
Assert.assertTrue(MetaStoreServerUtils.findStaleColumns(Arrays.asList(col1,
col2), Arrays.asList(col1, col2, col3)).isEmpty());
+
Assert.assertTrue(MetaStoreServerUtils.findStaleColumns(Arrays.asList(col1,
col2), Arrays.asList(col3, col2, col1)).isEmpty());
+
Assert.assertFalse(MetaStoreServerUtils.findStaleColumns(Arrays.asList(col1,
col2), Arrays.asList(col1)).isEmpty());
}
/**