This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-4.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-4.0 by this push:
     new 4c3565f7b8a branch-4.0: [fix] (parquet-reader) Fix parquet all row 
groups are filtered. #57490 (#57589)
4c3565f7b8a is described below

commit 4c3565f7b8a51c5246b9723e5b3ab31ff785c31e
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Mon Nov 3 11:13:43 2025 +0800

    branch-4.0: [fix] (parquet-reader) Fix parquet all row groups are filtered. 
#57490 (#57589)
    
    Cherry-picked from #57490
    
    Co-authored-by: Qi Chen <[email protected]>
---
 be/src/vec/exec/scan/file_scanner.cpp | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/be/src/vec/exec/scan/file_scanner.cpp 
b/be/src/vec/exec/scan/file_scanner.cpp
index 25deb1a8a3e..116fcbd33e7 100644
--- a/be/src/vec/exec/scan/file_scanner.cpp
+++ b/be/src/vec/exec/scan/file_scanner.cpp
@@ -1170,7 +1170,13 @@ Status FileScanner::_get_next_reader() {
             // to filter the row group. But if this is count push down, the 
offset is undefined,
             // causing incorrect row group filter and may return empty result.
         } else {
-            
RETURN_IF_ERROR(_set_fill_or_truncate_columns(need_to_get_parsed_schema));
+            Status status = 
_set_fill_or_truncate_columns(need_to_get_parsed_schema);
+            if (status.is<END_OF_FILE>()) { // all parquet row groups are 
filtered
+                continue;
+            } else if (!status.ok()) {
+                return Status::InternalError("failed to 
set_fill_or_truncate_columns, err: {}",
+                                             status.to_string());
+            }
         }
         _cur_reader_eof = false;
         break;


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to