This is an automated email from the ASF dual-hosted git repository.

joemcdonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git


The following commit(s) were added to refs/heads/master by this push:
     new 047cf9ff4 IMPALA-13954: Validate num inserted rows via NumModifiedRows 
counter
047cf9ff4 is described below

commit 047cf9ff4d3c68c32be0965fbc3cb0c245d3b3b1
Author: Riza Suminto <[email protected]>
AuthorDate: Thu Apr 10 17:37:58 2025 -0700

    IMPALA-13954: Validate num inserted rows via NumModifiedRows counter
    
    This patch changes the way test validate num inserted rows from checking
    the beeswax-specific result to checking NumModifiedRows counter from
    query profile.
    
    Remove skiping over hs2 protocol in test_chars.py and refactor
    test_date_queries.py a bit to reduce test skiping. Added HS2_TYPES in
    tests that requires it and fix some flake8 issues.
    
    Testing:
    Run and pass all affected tests.
    
    Change-Id: I96eae9967298f75b2c9e4d0662fcd4a62bf5fffc
    Reviewed-on: http://gerrit.cloudera.org:8080/22770
    Reviewed-by: Impala Public Jenkins <[email protected]>
    Tested-by: Riza Suminto <[email protected]>
---
 .../queries/QueryTest/alter-table.test             | 12 ++---
 .../queries/QueryTest/chars-tmp-tables.test        | 20 ++++++--
 .../chars-values-stmt-lossy-char-padding.test      | 10 ++--
 .../chars-values-stmt-no-lossy-char-padding.test   | 38 +++++++++------
 .../queries/QueryTest/create-table-like-table.test |  4 +-
 .../queries/QueryTest/create-table.test            |  4 +-
 .../queries/QueryTest/date-fileformat-support.test |  4 +-
 .../QueryTest/decimal-insert-overflow-exprs.test   | 12 ++---
 .../queries/QueryTest/delimited-text.test          |  8 ++--
 .../queries/QueryTest/grant_revoke.test            |  4 +-
 .../queries/QueryTest/hbase-inserts.test           | 56 +++++++++++-----------
 .../queries/QueryTest/hdfs-caching.test            |  4 +-
 .../queries/QueryTest/iceberg-insert.test          | 44 ++++++++---------
 .../functional-query/queries/QueryTest/insert.test | 28 +++++------
 .../queries/QueryTest/insert_null.test             |  8 ++--
 .../queries/QueryTest/insert_overwrite.test        | 12 ++---
 .../queries/QueryTest/insert_permutation.test      | 36 +++++++-------
 .../QueryTest/parquet-error-propagation-race.test  |  4 +-
 .../tpcds-insert/queries/expr-insert.test          |  4 +-
 .../workloads/tpch/queries/insert_parquet.test     |  8 ++--
 tests/query_test/test_chars.py                     | 21 ++++----
 tests/query_test/test_date_queries.py              | 23 ++++++---
 22 files changed, 196 insertions(+), 168 deletions(-)

diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/alter-table.test 
b/testdata/workloads/functional-query/queries/QueryTest/alter-table.test
index 0818abb37..09d28ac20 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/alter-table.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/alter-table.test
@@ -177,8 +177,8 @@ string,string,string
 ---- QUERY
 # Should be able to read/write using the new column types
 insert overwrite table t2 select 1, '50', 2 from functional.alltypes limit 2
----- RESULTS
-: 2
+---- RUNTIME_PROFILE
+NumModifiedRows: 2
 ====
 ---- QUERY
 select * from t2
@@ -752,8 +752,8 @@ CREATE TABLE imp1016 (string1 string)
 ====
 ---- QUERY
 INSERT INTO imp1016 VALUES ('test')
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 ALTER TABLE imp1016 ADD COLUMNS (string2 string)
@@ -813,8 +813,8 @@ CREATE TABLE imp1016Large (string1 string)
 # There is a codepath that operates on chunks of 1024 tuples, inserting
 # more than 1024 tuples
 INSERT INTO imp1016Large SELECT 'test' FROM functional.alltypes LIMIT 2000
----- RESULTS
-: 2000
+---- RUNTIME_PROFILE
+NumModifiedRows: 2000
 ====
 ---- QUERY
 ALTER TABLE imp1016Large ADD COLUMNS (string2 string)
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/chars-tmp-tables.test 
b/testdata/workloads/functional-query/queries/QueryTest/chars-tmp-tables.test
index 684095110..14bc81995 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/chars-tmp-tables.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/chars-tmp-tables.test
@@ -6,8 +6,8 @@ create table test_char_tmp (c char(5))
 ====
 ---- QUERY
 insert into test_char_tmp select cast("hello" as char(5))
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select * from test_char_tmp
@@ -74,6 +74,8 @@ insert into test_varchar_tmp values (cast("hello" as 
varchar(5)))
 select * from test_varchar_tmp
 ---- TYPES
 string
+---- HS2_TYPES
+varchar
 ---- RESULTS
 'hello'
 ====
@@ -95,6 +97,8 @@ insert into test_varchar_tmp values (cast("hel" as 
varchar(4)))
 select * from test_varchar_tmp
 ---- TYPES
 string
+---- HS2_TYPES
+varchar
 ---- RESULTS
 'hello'
 'hel'
@@ -113,6 +117,8 @@ cast("123456" as varchar(5)))
 select cshort, clong, vc from allchars
 ---- TYPES
 char,char,string
+---- HS2_TYPES
+char,char,varchar
 ---- RESULTS
 '12345','123456                                                                
                                                                      ','12345'
 ====
@@ -130,6 +136,8 @@ cast("123456" as varchar(5)))
 select cshort, clong, vc from allchars_par
 ---- TYPES
 char,char,string
+---- HS2_TYPES
+char,char,varchar
 ---- RESULTS
 '12345','123456                                                                
                                                                      ','12345'
 ====
@@ -144,6 +152,8 @@ insert into char_parts (csp, clp, vcp, vc) select cs, cl, 
vc, vc from functional
 select csp, clp, vcp from char_parts where csp != cast('dne' as char(5)) order 
by csp
 ---- TYPES
 char, char, string
+---- HS2_TYPES
+char,char,varchar
 ---- RESULTS
 
'1aaaa','1bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb','1cccc'
 
'2aaaa','2bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb','2cccccc'
@@ -163,6 +173,8 @@ vcp=cast('myvar' as varchar(32))) select cast('val' as 
varchar(32));
 select csp, clp, vcp from char_parts where csp = cast('foo' as char(5))
 ---- TYPES
 char, char, string
+---- HS2_TYPES
+char,char,varchar
 ---- RESULTS
 'foo  
','01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789','myvar'
 ====
@@ -203,8 +215,8 @@ test_char_nulls ( c20 char(20),
 insert into test_char_nulls
 values (NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL),
        (NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)
----- RESULTS
-: 2
+---- RUNTIME_PROFILE
+NumModifiedRows: 2
 ====
 ---- QUERY
 # Regression test for IMPALA-1339
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/chars-values-stmt-lossy-char-padding.test
 
b/testdata/workloads/functional-query/queries/QueryTest/chars-values-stmt-lossy-char-padding.test
index fc78bc32d..8b3a90ceb 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/chars-values-stmt-lossy-char-padding.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/chars-values-stmt-lossy-char-padding.test
@@ -4,8 +4,8 @@ insert overwrite test_char_values_string_col_tmp values
 (cast("1" as char(1))),
 (cast("12" as char(2))),
 (cast("123" as char(3)));
----- RESULTS
-: 3
+---- RUNTIME_PROFILE
+NumModifiedRows: 3
 ====
 ---- QUERY
 select length(s) from test_char_values_string_col_tmp;
@@ -20,13 +20,15 @@ int
 insert overwrite test_char_values_mixed_cols_tmp values
 (cast("1" as char(1)), cast("str" as char(3)), cast("vchar" as char(5))),
 (cast("10" as char(4)), cast("str_2" as char(5)), cast("vchar_2" as char(7)))
----- RESULTS
-: 2
+---- RUNTIME_PROFILE
+NumModifiedRows: 2
 ====
 ---- QUERY
 select c, s, v from test_char_values_mixed_cols_tmp;
 ---- TYPES
 char,string,string
+---- HS2_TYPES
+char,string,varchar
 ---- RESULTS
 '1    ','str  ','vchar  '
 '10   ','str_2','vchar_2'
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/chars-values-stmt-no-lossy-char-padding.test
 
b/testdata/workloads/functional-query/queries/QueryTest/chars-values-stmt-no-lossy-char-padding.test
index cd53170dd..4f92faf62 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/chars-values-stmt-no-lossy-char-padding.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/chars-values-stmt-no-lossy-char-padding.test
@@ -9,8 +9,8 @@ insert overwrite test_char_values_string_col_tmp values
 (cast("1" as char(1))),
 (cast("12" as char(2))),
 (cast("123" as char(3)));
----- RESULTS
-: 3
+---- RUNTIME_PROFILE
+NumModifiedRows: 3
 ====
 ---- QUERY
 select length(s) from test_char_values_string_col_tmp;
@@ -30,13 +30,15 @@ create table test_char_values_varchar_col_tmp (v 
varchar(5));
 insert overwrite test_char_values_varchar_col_tmp values
 (cast("1" as char(1))),
 (cast("12" as char(2)));
----- RESULTS
-: 2
+---- RUNTIME_PROFILE
+NumModifiedRows: 2
 ====
 ---- QUERY
 select v from test_char_values_varchar_col_tmp;
 ---- TYPES
 string
+---- HS2_TYPES
+varchar
 ---- RESULTS
 '1'
 '12'
@@ -58,8 +60,8 @@ create table test_char_values_char_col_tmp (c char(5));
 insert overwrite test_char_values_char_col_tmp values
 (cast("1" as char(1))),
 (cast("12" as char(2)));
----- RESULTS
-: 2
+---- RUNTIME_PROFILE
+NumModifiedRows: 2
 ====
 ---- QUERY
 select c from test_char_values_char_col_tmp;
@@ -78,13 +80,15 @@ create table test_char_values_mixed_cols_tmp (c char(5), s 
string, v varchar(8))
 insert overwrite test_char_values_mixed_cols_tmp values
 (cast("1" as char(1)), cast("str" as char(3)), cast("vchar" as char(5))),
 (cast("10" as char(4)), cast("str_2" as char(5)), cast("vchar_2" as char(7)))
----- RESULTS
-: 2
+---- RUNTIME_PROFILE
+NumModifiedRows: 2
 ====
 ---- QUERY
 select c, s, v from test_char_values_mixed_cols_tmp;
 ---- TYPES
 char,string,string
+---- HS2_TYPES
+char,string,varchar
 ---- RESULTS
 '1    ','str','vchar'
 '10   ','str_2','vchar_2'
@@ -98,13 +102,15 @@ create table 
test_char_values_mixed_cols_different_order_tmp (s string, c char(5
 insert overwrite test_char_values_mixed_cols_different_order_tmp values
 (cast("str" as char(3)), cast("1" as char(1)), cast("vchar" as char(5))),
 (cast("str_2" as char(5)), cast("10" as char(4)), cast("vchar_2" as char(7)))
----- RESULTS
-: 2
+---- RUNTIME_PROFILE
+NumModifiedRows: 2
 ====
 ---- QUERY
 select s, c, v from test_char_values_mixed_cols_different_order_tmp;
 ---- TYPES
 string,char,string
+---- HS2_TYPES
+string,char,varchar
 ---- RESULTS
 'str','1    ','vchar'
 'str_2','10   ','vchar_2'
@@ -115,13 +121,15 @@ string,char,string
 insert overwrite test_char_values_mixed_cols_different_order_tmp values
 (cast("str" as char(3)), cast("1" as char(1)), cast("vchar" as char(5))),
 (cast("str" as char(3)), cast("10" as char(4)), cast("vchar_2" as char(7)))
----- RESULTS
-: 2
+---- RUNTIME_PROFILE
+NumModifiedRows: 2
 ====
 ---- QUERY
 select s, c, v from test_char_values_mixed_cols_different_order_tmp;
 ---- TYPES
 string,char,string
+---- HS2_TYPES
+string,char,varchar
 ---- RESULTS
 'str','1    ','vchar'
 'str','10   ','vchar_2'
@@ -133,13 +141,15 @@ insert overwrite 
test_char_values_mixed_cols_different_order_tmp values
 (cast("str" as char(3)), cast("1" as char(1)), cast("vchar" as char(5))),
 (cast("str_1" as char(5)), cast("1" as char(1)), cast("vchar" as char(5))),
 ("str_2", cast("10" as char(4)), cast("vchar_2" as char(7)))
----- RESULTS
-: 3
+---- RUNTIME_PROFILE
+NumModifiedRows: 3
 ====
 ---- QUERY
 select s, c, v from test_char_values_mixed_cols_different_order_tmp;
 ---- TYPES
 string,char,string
+---- HS2_TYPES
+string,char,varchar
 ---- RESULTS
 'str','1    ','vchar'
 'str_1','1    ','vchar'
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/create-table-like-table.test
 
b/testdata/workloads/functional-query/queries/QueryTest/create-table-like-table.test
index 5a11796da..c9f7c5f6a 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/create-table-like-table.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/create-table-like-table.test
@@ -126,8 +126,8 @@ create table if not exists jointbl_like like jointbl_like
 ---- QUERY
 insert overwrite table jointbl_like
 select * from functional.jointbl order by test_id limit 5
----- RESULTS
-: 5
+---- RUNTIME_PROFILE
+NumModifiedRows: 5
 ====
 ---- QUERY
 # Make sure we can read the data.
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/create-table.test 
b/testdata/workloads/functional-query/queries/QueryTest/create-table.test
index e293849e3..22add51ed 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/create-table.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/create-table.test
@@ -34,8 +34,8 @@ STRING, STRING, STRING
 ---- QUERY
 insert overwrite table $DATABASE.testtbl SELECT 1, 'Hi'
 from functional.alltypes limit 10
----- RESULTS
-: 10
+---- RUNTIME_PROFILE
+NumModifiedRows: 10
 ====
 ---- QUERY
 select * from $DATABASE.testtbl
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/date-fileformat-support.test
 
b/testdata/workloads/functional-query/queries/QueryTest/date-fileformat-support.test
index 493fd41cb..2d71f5b0b 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/date-fileformat-support.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/date-fileformat-support.test
@@ -3,8 +3,8 @@
 # Inserting into parquet table works.
 insert into $DATABASE.parquet_date_tbl
 values ('2011-05-06'), ('9999-12-31'), ('0001-01-01');
----- RESULTS
-: 3
+---- RUNTIME_PROFILE
+NumModifiedRows: 3
 ====
 ---- QUERY
 # Querying parquet table written by Parquet-MR and Impala works.
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/decimal-insert-overflow-exprs.test
 
b/testdata/workloads/functional-query/queries/QueryTest/decimal-insert-overflow-exprs.test
index 2851f377d..214a02021 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/decimal-insert-overflow-exprs.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/decimal-insert-overflow-exprs.test
@@ -6,8 +6,8 @@ set decimal_v2=true;
 insert into table overflowed_decimal_tbl_1
 select 1, cast(cast(65496456343.9565 as decimal (28,7))*44658*2.111
 as decimal (28,10));
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 # Verify that decimal value, which is expressed with decimal expression with 
one
@@ -16,8 +16,8 @@ set decimal_v2=true;
 insert into table overflowed_decimal_tbl_1
 select 2, cast(a*44658*2.111 as decimal (28,10)) from
 (select cast(65496456.9565 as decimal (28,7)) as a) q;
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 # Verify that decimal value, which is expressed with decimal expression with 
three
@@ -28,8 +28,8 @@ select 3, cast(a*b*c as decimal (28,10)) from
 (select cast(65496456.9565 as decimal (28,7)) as a,
  cast(44658 as decimal (28,7)) as b,
  cast(2.111 as decimal (28,7)) as c) q;
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 # Verify that overflow behaviour of decimal_v1 is unchanged.
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/delimited-text.test 
b/testdata/workloads/functional-query/queries/QueryTest/delimited-text.test
index 9dde519fe..f07955052 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/delimited-text.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/delimited-text.test
@@ -42,8 +42,8 @@ insert into cbn values
 ('abc , abc', 'xyz \\ xyz', 1, 2),
 ('abc ,,, abc', 'xyz \\\\\\ xyz', 3, 4),
 ('abc \\,\\, abc', 'xyz ,\\,\\ xyz', 5, 6)
----- RESULTS
-: 3
+---- RUNTIME_PROFILE
+NumModifiedRows: 3
 ====
 ---- QUERY
 select * from cbn
@@ -60,8 +60,8 @@ insert into dhp values
 ('abc $ abc', 'xyz # xyz', 1, 2),
 ('abc $$$ abc', 'xyz ### xyz', 3, 4),
 ('abc #$#$ abc', 'xyz $#$# xyz', 5, 6)
----- RESULTS
-: 3
+---- RUNTIME_PROFILE
+NumModifiedRows: 3
 ====
 ---- QUERY
 select * from dhp
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/grant_revoke.test 
b/testdata/workloads/functional-query/queries/QueryTest/grant_revoke.test
index 9620442dc..47903d071 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/grant_revoke.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/grant_revoke.test
@@ -454,8 +454,8 @@ STRING, STRING, STRING, STRING, STRING, STRING, STRING, 
STRING, STRING, STRING,
 ====
 ---- QUERY
 insert overwrite grant_rev_db.test_tbl1 select 1
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select * from grant_rev_db.test_tbl1
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/hbase-inserts.test 
b/testdata/workloads/functional-query/queries/QueryTest/hbase-inserts.test
index 3e43e3098..cbf9f1d2a 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/hbase-inserts.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/hbase-inserts.test
@@ -3,8 +3,8 @@
 insert into table insertalltypesagg
 select id, bigint_col, cast(string_col as binary), bool_col, date_string_col, 
day, double_col, float_col,
 int_col, month, smallint_col, string_col, timestamp_col, tinyint_col, year 
from functional.alltypesagg
----- RESULTS
-: 11000
+---- RUNTIME_PROFILE
+NumModifiedRows: 11000
 ====
 ---- QUERY
 select id, bool_col from insertalltypesagg
@@ -21,8 +21,8 @@ INT, BOOLEAN
 insert into table insertalltypesagg
 select 9999999, bigint_col, cast(string_col as binary), false, 
date_string_col, day, double_col, float_col,
 int_col, month, smallint_col, string_col, timestamp_col, tinyint_col, year 
from functional.alltypesagg
----- RESULTS
-: 11000
+---- RUNTIME_PROFILE
+NumModifiedRows: 11000
 ====
 ---- QUERY
 select id, bool_col from insertalltypesagg
@@ -39,8 +39,8 @@ INT, BOOLEAN
 # using limit 1 to reduce execution time
 insert into table insertalltypesagg
 select * from insertalltypesagg limit 1
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 # test inserting Hive's default text representation of NULL '\N'
@@ -48,8 +48,8 @@ select * from insertalltypesagg limit 1
 insert into table insertalltypesagg
 select 9999999, bigint_col, cast("\\N" as binary), false, "\\N", day, 
double_col, float_col,
 int_col, month, smallint_col, "\\N", timestamp_col, tinyint_col, year from 
functional.alltypesagg limit 1
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select id, date_string_col, string_col, binary_col from insertalltypesagg
@@ -63,8 +63,8 @@ INT, STRING, STRING, BINARY
 insert into table insertalltypesaggbinary
 select id, bigint_col, cast(string_col as binary), bool_col, date_string_col, 
day, double_col, float_col,
 int_col, month, smallint_col, string_col, timestamp_col, tinyint_col, year 
from functional.alltypesagg
----- RESULTS
-: 11000
+---- RUNTIME_PROFILE
+NumModifiedRows: 11000
 ====
 ---- QUERY
 select count(*) from (
@@ -111,8 +111,8 @@ INT, BOOLEAN
 insert into table insertalltypesaggbinary
 select 9999999, bigint_col,  cast(string_col as binary), false, 
date_string_col, day, double_col, float_col,
 int_col, month, smallint_col, string_col, timestamp_col, tinyint_col, year 
from functional.alltypesagg
----- RESULTS
-: 11000
+---- RUNTIME_PROFILE
+NumModifiedRows: 11000
 ====
 ---- QUERY
 select id, bool_col from insertalltypesaggbinary
@@ -129,8 +129,8 @@ INT, BOOLEAN
 # using limit 1 to reduce execution time
 insert into table insertalltypesaggbinary
 select * from insertalltypesaggbinary limit 1
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 # test inserting Hive's default text representation of NULL '\N'
@@ -138,8 +138,8 @@ select * from insertalltypesaggbinary limit 1
 insert into table insertalltypesaggbinary
 select 9999999, bigint_col,  cast("\\N" as binary), false, "\\N", day, 
double_col, float_col,
 int_col, month, smallint_col, "\\N", timestamp_col, tinyint_col, year from 
functional.alltypesagg limit 1
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select id, date_string_col, string_col, binary_col from insertalltypesaggbinary
@@ -152,8 +152,8 @@ INT, STRING, STRING, BINARY
 ---- QUERY
 #IMPALA-715 handle large string value
 insert into table insertalltypesagg(id, string_col) values(9999999, rpad('a', 
50000, 'b'))
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select id, length(string_col) from insertalltypesagg
@@ -166,8 +166,8 @@ INT, INT
 ---- QUERY
 # IMPALA-2133
 insert into table insertalltypesagg (id, string_col) values (99999999, 
'William\'s'), (999999999, "Other\"s")
----- RESULTS
-: 2
+---- RUNTIME_PROFILE
+NumModifiedRows: 2
 ====
 ---- QUERY
 select id, string_col from insertalltypesagg where id = 99999999
@@ -194,8 +194,8 @@ INT, STRING
 insert into table insert_date_tbl
 select id_col, date_col, date_part
 from functional.date_tbl
----- RESULTS
-: 22
+---- RUNTIME_PROFILE
+NumModifiedRows: 22
 ====
 ---- QUERY
 select id_col, date_col from insert_date_tbl
@@ -212,8 +212,8 @@ INT, DATE
 insert into table insert_date_tbl
 select 9999999, date_col, '1521-12-13'
 from functional.date_tbl
----- RESULTS
-: 22
+---- RUNTIME_PROFILE
+NumModifiedRows: 22
 ====
 ---- QUERY
 select id_col, date_part from insert_date_tbl
@@ -230,14 +230,14 @@ INT, DATE
 # using limit 1 to reduce execution time
 insert into table insert_date_tbl
 select * from insert_date_tbl limit 1
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 # Insert special characters to binary_col.
 insert into table insertalltypesagg (id, binary_col) values (99999999, 
cast(unhex('00112233445566778899AABBCCDDEEFF') as binary))
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select id, hex(cast(binary_col as string)) from insertalltypesagg where id = 
99999999
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/hdfs-caching.test 
b/testdata/workloads/functional-query/queries/QueryTest/hdfs-caching.test
index a82c09127..c00cad202 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/hdfs-caching.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/hdfs-caching.test
@@ -7,8 +7,8 @@ create table cached_tbl_nopart (i int) cached in 'testPool'
 ====
 ---- QUERY
 insert into cached_tbl_nopart select 1
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select * from cached_tbl_nopart
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-insert.test 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-insert.test
index 0773f0b63..1da839975 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/iceberg-insert.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/iceberg-insert.test
@@ -25,8 +25,8 @@ CAST(date_string_col as date FORMAT 'MM/DD/YY'), string_col, 
timestamp_col
 from functional.alltypes
 order by id
 limit 5;
----- RESULTS
-: 5
+---- RUNTIME_PROFILE
+NumModifiedRows: 5
 ====
 ---- QUERY
 select * from iceberg_alltypes;
@@ -104,8 +104,8 @@ stored as iceberg
 tblproperties('iceberg.catalog'='hadoop.catalog',
   
'iceberg.catalog_location'='$WAREHOUSE_LOCATION_PREFIX/test-warehouse/$DATABASE.db/hadoop_catalog_test');
 insert into iceberg_hadoop_cat values (1), (2), (3);
----- RESULTS
-: 3
+---- RUNTIME_PROFILE
+NumModifiedRows: 3
 ====
 ---- QUERY
 select * from iceberg_hadoop_cat;
@@ -131,8 +131,8 @@ tblproperties('iceberg.catalog'='hadoop.catalog',
   
'iceberg.catalog_location'='$WAREHOUSE_LOCATION_PREFIX/test-warehouse/$DATABASE.db/hadoop_catalog_test',
   'iceberg.table_identifier'='test.custom_db.int_table');
 insert into iceberg_hadoop_cat_ti values (1), (2), (3);
----- RESULTS
-: 3
+---- RUNTIME_PROFILE
+NumModifiedRows: 3
 ====
 ---- QUERY
 select * from iceberg_hadoop_cat_ti;
@@ -179,8 +179,8 @@ stored as iceberg
 location 
'$WAREHOUSE_LOCATION_PREFIX/test-warehouse/$DATABASE.db/custom_hive_cat'
 tblproperties('iceberg.catalog'='hive.catalog');
 insert into iceberg_hive_cat_custom_loc values (1), (2), (3);
----- RESULTS
-: 3
+---- RUNTIME_PROFILE
+NumModifiedRows: 3
 ====
 ---- QUERY
 select * from iceberg_hive_cat_custom_loc;
@@ -227,8 +227,8 @@ insert into iceberg_alltypes_parq_tblprop
 select id, bool_col, int_col, bigint_col, float_col, double_col,
 CAST(date_string_col as date FORMAT 'MM/DD/YY'), string_col, timestamp_col
 from functional.alltypes;
----- RESULTS
-: 7300
+---- RUNTIME_PROFILE
+NumModifiedRows: 7300
 ====
 ---- QUERY
 alter table iceberg_alltypes_parq_tblprop set tblproperties (
@@ -242,8 +242,8 @@ insert into iceberg_alltypes_parq_tblprop
 select id, bool_col, int_col, bigint_col, float_col, double_col,
 CAST(date_string_col as date FORMAT 'MM/DD/YY'), string_col, timestamp_col
 from functional.alltypes;
----- RESULTS
-: 7300
+---- RUNTIME_PROFILE
+NumModifiedRows: 7300
 ====
 ---- QUERY
 alter table iceberg_alltypes_parq_tblprop set tblproperties (
@@ -258,8 +258,8 @@ insert into iceberg_alltypes_parq_tblprop
 select id, bool_col, int_col, bigint_col, float_col, double_col,
 CAST(date_string_col as date FORMAT 'MM/DD/YY'), string_col, timestamp_col
 from functional.alltypes;
----- RESULTS
-: 7300
+---- RUNTIME_PROFILE
+NumModifiedRows: 7300
 ====
 ---- QUERY
 alter table iceberg_alltypes_parq_tblprop set tblproperties (
@@ -274,8 +274,8 @@ insert into iceberg_alltypes_parq_tblprop
 select id, bool_col, int_col, bigint_col, float_col, double_col,
 CAST(date_string_col as date FORMAT 'MM/DD/YY'), string_col, timestamp_col
 from functional.alltypes;
----- RESULTS
-: 7300
+---- RUNTIME_PROFILE
+NumModifiedRows: 7300
 ====
 ---- QUERY
 alter table iceberg_alltypes_parq_tblprop set tblproperties (
@@ -290,8 +290,8 @@ insert into iceberg_alltypes_parq_tblprop
 select id, bool_col, int_col, bigint_col, float_col, double_col,
 CAST(date_string_col as date FORMAT 'MM/DD/YY'), string_col, timestamp_col
 from functional.alltypes;
----- RESULTS
-: 7300
+---- RUNTIME_PROFILE
+NumModifiedRows: 7300
 ====
 ---- QUERY
 alter table iceberg_alltypes_parq_tblprop set tblproperties (
@@ -306,8 +306,8 @@ insert into iceberg_alltypes_parq_tblprop
 select id, bool_col, int_col, bigint_col, float_col, double_col,
 CAST(date_string_col as date FORMAT 'MM/DD/YY'), string_col, timestamp_col
 from functional.alltypes;
----- RESULTS
-: 7300
+---- RUNTIME_PROFILE
+NumModifiedRows: 7300
 ====
 ---- QUERY
 alter table iceberg_alltypes_parq_tblprop unset tblproperties (
@@ -322,8 +322,8 @@ insert into iceberg_alltypes_parq_tblprop
 select id, bool_col, int_col, bigint_col, float_col, double_col,
 CAST(date_string_col as date FORMAT 'MM/DD/YY'), string_col, timestamp_col
 from functional.alltypes;
----- RESULTS
-: 7300
+---- RUNTIME_PROFILE
+NumModifiedRows: 7300
 ====
 ---- QUERY
 select count(*) from iceberg_alltypes_parq_tblprop;
diff --git a/testdata/workloads/functional-query/queries/QueryTest/insert.test 
b/testdata/workloads/functional-query/queries/QueryTest/insert.test
index 190fa1f36..19d5ca99b 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/insert.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/insert.test
@@ -12,8 +12,8 @@ select id, bool_col, tinyint_col, smallint_col, int_col, 
bigint_col,
 float_col, double_col, date_string_col, string_col, timestamp_col
 from functional.alltypessmall
 where year=2009 and month=04
----- RESULTS
-: 25
+---- RUNTIME_PROFILE
+NumModifiedRows: 25
 ====
 ---- QUERY
 # search the overwritten table to verify the results
@@ -57,8 +57,8 @@ select id, bool_col, tinyint_col, smallint_col, int_col, 
bigint_col,
 float_col, double_col, date_string_col, string_col, timestamp_col
 from functional.alltypessmall
 where year=2009 and month=04
----- RESULTS
-: 25
+---- RUNTIME_PROFILE
+NumModifiedRows: 25
 ====
 ---- QUERY
 # search the table to verify it contains 25 rows
@@ -630,8 +630,8 @@ year=2009/month=4: 4
 insert overwrite table alltypesnopart_insert
 select NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
 from functional.alltypessmall limit 10
----- RESULTS
-: 10
+---- RUNTIME_PROFILE
+NumModifiedRows: 10
 ====
 ---- QUERY
 select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
@@ -655,8 +655,8 @@ int, boolean, tinyint, smallint, int, bigint, float, 
double, string, string, tim
 insert overwrite alltypesnopart_insert(float_col, double_col)
 values(CAST(1/0 AS FLOAT), 1/0), (CAST(-1/0 AS FLOAT), -1/0),
       (CAST(0/0 AS FLOAT), 0/0), (CAST(-sqrt(-1) AS FLOAT), -sqrt(-1))
----- RESULTS
-: 4
+---- RUNTIME_PROFILE
+NumModifiedRows: 4
 ====
 ---- QUERY
 # Results have to be cast to strings, because nan == f is always false for all 
f
@@ -693,8 +693,8 @@ BIGINT
 insert overwrite table alltypesnopart_insert
 select 1, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
 from functional.alltypessmall limit 0
----- RESULTS
-: 0
+---- RUNTIME_PROFILE
+NumModifiedRows: 0
 ====
 ---- QUERY
 select count(*) from alltypesnopart_insert
@@ -756,8 +756,8 @@ INT, BOOLEAN, TINYINT, SMALLINT, INT, BIGINT, FLOAT, 
DOUBLE, STRING, STRING, TIM
 # IMPALA-1740: Test inserting into table with the skip.header.line.count 
tblproperty
 truncate table_with_header_insert;
 insert into table_with_header_insert values (1), (2), (3), (4);
----- RESULTS
-: 4
+---- RUNTIME_PROFILE
+NumModifiedRows: 4
 ====
 ---- QUERY
 select * from table_with_header_insert;
@@ -917,8 +917,8 @@ insert into table alltypesnopart_insert
  /*+ noclustered,shuffle */
 select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col,
 double_col, date_string_col, string_col, timestamp_col from 
functional.alltypessmall;
----- RESULTS
-: 100
+---- RUNTIME_PROFILE
+NumModifiedRows: 100
 ====
 ---- QUERY
 # IMPALA-6280: clustered (default) with outer join, inline view, and 
TupleisNullPredicate
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/insert_null.test 
b/testdata/workloads/functional-query/queries/QueryTest/insert_null.test
index 92b0db967..b9e3ac1c7 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/insert_null.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/insert_null.test
@@ -21,8 +21,8 @@ create table nullformat_custom like 
$ORIGINAL_DB.nullformat_custom;
 # Test that we properly write null values to text tables.
 insert overwrite table nullinsert
 select NULL, "", "NULL", "\\N", NULL from functional.alltypes limit 1
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select * from nullinsert
@@ -162,8 +162,8 @@ select 2, true, "", 1, 1 union all
 select 3, false, "NULL", 2, 2 union all
 select 4, false, "xyz", 3, 3 union all
 select 5, false, "xyzbar", 4, 4
----- RESULTS
-: 5
+---- RUNTIME_PROFILE
+NumModifiedRows: 5
 ====
 ---- QUERY
 # Test correct interpretation of NULLs with custom
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/insert_overwrite.test 
b/testdata/workloads/functional-query/queries/QueryTest/insert_overwrite.test
index 133db70c2..fb8ffd066 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/insert_overwrite.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/insert_overwrite.test
@@ -8,8 +8,8 @@ create table insert_overwrite_partitioned like 
$ORIGINAL_DB.insert_overwrite_par
 insert overwrite table insert_overwrite_nopart
 select int_col
 from functional.tinyinttable
----- RESULTS
-: 10
+---- RUNTIME_PROFILE
+NumModifiedRows: 10
 ====
 ---- QUERY
 # Check results - note larger limit than expected in case there's more data 
written than there should be
@@ -36,8 +36,8 @@ int
 insert overwrite table insert_overwrite_nopart
 select 10
 from functional.tinyinttable
----- RESULTS
-: 10
+---- RUNTIME_PROFILE
+NumModifiedRows: 10
 ====
 ---- QUERY
 # check results from previous insert
@@ -64,8 +64,8 @@ insert overwrite table insert_overwrite_nopart
 select 3
 from functional.tinyinttable
 limit 0
----- RESULTS
-: 0
+---- RUNTIME_PROFILE
+NumModifiedRows: 0
 ====
 ---- QUERY
 select count(*) from insert_overwrite_nopart
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/insert_permutation.test 
b/testdata/workloads/functional-query/queries/QueryTest/insert_permutation.test
index 174550c3a..7fba6b9af 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/insert_permutation.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/insert_permutation.test
@@ -21,8 +21,8 @@ partitioned by (p1 int, p2 string) stored as parquet;
 truncate insert_permutation_test.perm_nopart;
 # Simple non-permutation
 insert into perm_nopart(int_col1, string_col, int_col2) values(1,'str',2)
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select * from perm_nopart
@@ -35,8 +35,8 @@ INT,STRING,INT
 truncate insert_permutation_test.perm_nopart;
 # Permute the int columns
 insert into perm_nopart(int_col2, string_col, int_col1) values(1,'str',2)
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select * from perm_nopart
@@ -49,8 +49,8 @@ INT,STRING,INT
 truncate insert_permutation_test.perm_nopart;
 # Leave out two columns, check they are assigned NULL
 insert into perm_nopart(int_col2) values(1)
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select * from perm_nopart
@@ -162,8 +162,8 @@ INT,STRING,INT,STRING
 truncate insert_permutation_test.perm_nopart;
 # Check behaviour of empty permutation clause with unpartitioned table
 insert into perm_nopart()
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select * from perm_nopart
@@ -191,8 +191,8 @@ truncate insert_permutation_test.perm_nopart;
 # Perform the same set of queries, but with SELECT clauses rather than VALUES
 # Simple non-permutation
 insert into perm_nopart(int_col1, string_col, int_col2) select 1,'str',2
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select * from perm_nopart
@@ -205,8 +205,8 @@ INT,STRING,INT
 truncate insert_permutation_test.perm_nopart;
 # Permute the int columns
 insert into perm_nopart(int_col2, string_col, int_col1) select 1,'str',2
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select * from perm_nopart
@@ -219,8 +219,8 @@ INT,STRING,INT
 truncate insert_permutation_test.perm_nopart;
 # Leave out two columns, check they are assigned NULL
 insert into perm_nopart(int_col2) select 1
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 select * from perm_nopart
@@ -292,8 +292,8 @@ truncate insert_permutation_test.perm_nopart;
 # Simple non-permutation
 insert into perm_nopart(int_col1, string_col, int_col2) select 1,'str',2 FROM
 functional.alltypes LIMIT 2
----- RESULTS
-: 2
+---- RUNTIME_PROFILE
+NumModifiedRows: 2
 ====
 ---- QUERY
 select * from perm_nopart
@@ -306,8 +306,8 @@ INT,STRING,INT
 ---- QUERY
 truncate insert_permutation_test.perm_nopart;
 insert into perm_nopart(int_col1) select id FROM functional.alltypes ORDER BY 
ID LIMIT 2
----- RESULTS
-: 2
+---- RUNTIME_PROFILE
+NumModifiedRows: 2
 ====
 ---- QUERY
 select * from perm_nopart
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/parquet-error-propagation-race.test
 
b/testdata/workloads/functional-query/queries/QueryTest/parquet-error-propagation-race.test
index 8d7b97b75..ca2e9d25d 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/parquet-error-propagation-race.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/parquet-error-propagation-race.test
@@ -2,8 +2,8 @@
 ---- QUERY
 # Add a valid file with a single row to the table.
 INSERT INTO bad_magic_number SELECT 'good';
----- RESULTS
-: 1
+---- RUNTIME_PROFILE
+NumModifiedRows: 1
 ====
 ---- QUERY
 set debug_action="0:SCANNER_ERROR:DELAY";
diff --git a/testdata/workloads/tpcds-insert/queries/expr-insert.test 
b/testdata/workloads/tpcds-insert/queries/expr-insert.test
index d38416cca..b1bfce9a8 100644
--- a/testdata/workloads/tpcds-insert/queries/expr-insert.test
+++ b/testdata/workloads/tpcds-insert/queries/expr-insert.test
@@ -8,8 +8,8 @@ CREATE TABLE str_insert (s string) STORED AS $FILE_FORMAT
 INSERT INTO str_insert
 SELECT case when ss_promo_sk % 2 = 0 then 'even' else 'odd' end
 FROM tpcds.store_sales
----- RESULTS
-: 2880404
+---- RUNTIME_PROFILE
+NumModifiedRows: 2880404
 ====
 ---- QUERY: TPCDS-STR-INSERT-CASE
 SELECT COUNT(*) FROM str_insert
diff --git a/testdata/workloads/tpch/queries/insert_parquet.test 
b/testdata/workloads/tpch/queries/insert_parquet.test
index 1cc8a6284..4438ed420 100644
--- a/testdata/workloads/tpch/queries/insert_parquet.test
+++ b/testdata/workloads/tpch/queries/insert_parquet.test
@@ -4,8 +4,8 @@
 create table if not exists orders_insert_test like tpch_parquet.orders
 location '$FILESYSTEM_PREFIX/test-warehouse/$DATABASE.db/orders_insert_table';
 insert overwrite table orders_insert_test select * from tpch.orders
----- RESULTS
-: 1500000
+---- RUNTIME_PROFILE
+NumModifiedRows: 1500000
 ====
 ---- QUERY
 select count(distinct o_orderkey) from orders_insert_test
@@ -63,8 +63,8 @@ insert overwrite table test_insert_huge_vals
   select cast(l_orderkey as string) from tpch.lineitem
   union select group_concat(concat(s_name, s_address, s_phone)) from 
tpch.supplier
   union select group_concat(concat(s_name, s_address, s_phone, s_name)) from 
tpch.supplier
----- RESULTS
-: 1500002
+---- RUNTIME_PROFILE
+NumModifiedRows: 1500002
 ====
 ---- QUERY
 # Verify the values written to test_insert_huge_vals were as expected by 
counting
diff --git a/tests/query_test/test_chars.py b/tests/query_test/test_chars.py
index 7eba22f42..2405e4358 100644
--- a/tests/query_test/test_chars.py
+++ b/tests/query_test/test_chars.py
@@ -18,12 +18,10 @@
 from __future__ import absolute_import, division, print_function
 from copy import deepcopy
 
-import pytest
-
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.test_dimensions import (create_exec_option_dimension,
     create_client_protocol_dimension, hs2_parquet_constraint, 
hs2_text_constraint)
-from tests.util.filesystem_utils import get_fs_path
+
 
 class TestStringQueries(ImpalaTestSuite):
   @classmethod
@@ -43,15 +41,11 @@ class TestStringQueries(ImpalaTestSuite):
     self.run_test_case('QueryTest/chars', vector)
 
   def test_chars_tmp_tables(self, vector, unique_database):
-    if vector.get_value('protocol') in ['hs2', 'hs2-http']:
-      pytest.skip("HS2 does not return row counts for inserts")
     # Tests that create temporary tables and require a unique database.
     self.run_test_case('QueryTest/chars-tmp-tables', vector, unique_database)
 
   # Regression tests for IMPALA-10753.
   def test_chars_values_stmt(self, vector, unique_database):
-    if vector.get_value('protocol') in ['hs2', 'hs2-http']:
-      pytest.skip("HS2 does not return row counts for inserts")
     vector = deepcopy(vector)
     vector.get_value('exec_option')['values_stmt_avoid_lossy_char_padding'] = 
True
     self.run_test_case('QueryTest/chars-values-stmt-no-lossy-char-padding',
@@ -61,6 +55,7 @@ class TestStringQueries(ImpalaTestSuite):
     self.run_test_case('QueryTest/chars-values-stmt-lossy-char-padding',
         vector, unique_database)
 
+
 class TestCharFormats(ImpalaTestSuite):
   @classmethod
   def add_test_dimensions(cls):
@@ -68,12 +63,12 @@ class TestCharFormats(ImpalaTestSuite):
     cls.ImpalaTestMatrix.add_dimension(
       create_exec_option_dimension(disable_codegen_options=[False, True]))
     cls.ImpalaTestMatrix.add_constraint(lambda v:
-        (v.get_value('table_format').file_format in ['avro'] and
-        v.get_value('table_format').compression_codec in ['snap']) or
-        v.get_value('table_format').file_format in ['parquet'] or
-        v.get_value('table_format').file_format in ['orc'] or
-        (v.get_value('table_format').file_format in ['text', 'json']
-         and v.get_value('table_format').compression_codec in ['none']))
+        (v.get_value('table_format').file_format in ['avro']
+         and v.get_value('table_format').compression_codec in ['snap'])
+        or v.get_value('table_format').file_format in ['parquet']
+        or v.get_value('table_format').file_format in ['orc']
+        or (v.get_value('table_format').file_format in ['text', 'json']
+            and v.get_value('table_format').compression_codec in ['none']))
     # Run these queries through both beeswax and HS2 to get coverage of 
CHAR/VARCHAR
     # returned via both protocols.
     cls.ImpalaTestMatrix.add_dimension(create_client_protocol_dimension())
diff --git a/tests/query_test/test_date_queries.py 
b/tests/query_test/test_date_queries.py
index ccb20514f..310159203 100644
--- a/tests/query_test/test_date_queries.py
+++ b/tests/query_test/test_date_queries.py
@@ -18,7 +18,6 @@
 # Targeted tests for date type.
 
 from __future__ import absolute_import, division, print_function
-import pytest
 from tests.common.file_utils import create_table_and_copy_files
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import SkipIfFS
@@ -27,10 +26,11 @@ from tests.common.test_dimensions import 
(create_exec_option_dimension_from_dict
 from tests.shell.util import create_impala_shell_executable_dimension
 
 
-class TestDateQueries(ImpalaTestSuite):
+class TestDateQueriesBase(ImpalaTestSuite):
+
   @classmethod
   def add_test_dimensions(cls):
-    super(TestDateQueries, cls).add_test_dimensions()
+    super(TestDateQueriesBase, cls).add_test_dimensions()
     cls.ImpalaTestMatrix.add_dimension(
       create_exec_option_dimension_from_dict({
         'batch_size': [0, 1],
@@ -49,6 +49,9 @@ class TestDateQueries(ImpalaTestSuite):
     cls.ImpalaTestMatrix.add_constraint(hs2_parquet_constraint)
     
cls.ImpalaTestMatrix.add_dimension(create_impala_shell_executable_dimension())
 
+
+class TestDateQueriesAllFormat(TestDateQueriesBase):
+
   def test_queries(self, vector):
     if vector.get_value('table_format').file_format == 'avro':
       # Avro date test queries are in a separate test file.
@@ -60,12 +63,20 @@ class TestDateQueries(ImpalaTestSuite):
     else:
       self.run_test_case('QueryTest/date', vector)
 
+
+class TestDateQueriesTextFormat(TestDateQueriesBase):
+
+  @classmethod
+  def add_test_dimensions(cls):
+    super(TestDateQueriesTextFormat, cls).add_test_dimensions()
+    # Only run this test class with 'text' table_format.
+    cls.ImpalaTestMatrix.add_constraint(lambda v:
+        v.get_value('table_format').file_format == 'text')
+
   def test_partitioning(self, vector, unique_database):
     """ Test partitioning by DATE. """
     # This test specifies databases explicitly. No need to execute it for 
anything other
     # than text fileformat.
-    if vector.get_value('table_format').file_format != 'text':
-      pytest.skip()
     self.run_test_case('QueryTest/date-partitioning', vector, 
use_db=unique_database)
 
   @SkipIfFS.qualified_path
@@ -75,8 +86,6 @@ class TestDateQueries(ImpalaTestSuite):
     """
     # This test specifies databases and locations explicitly. No need to 
execute it for
     # anything other than text fileformat on HDFS.
-    if vector.get_value('table_format').file_format != 'text':
-      pytest.skip()
 
     # Parquet table with date column.
     TABLE_NAME = "parquet_date_tbl"


Reply via email to