This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 0715a612b95 [Fix](regression-test) fix test_export_max_file_size case 
(#50795)
0715a612b95 is described below

commit 0715a612b95030e26439d1fa3c96eda20666a6a3
Author: Tiewei Fang <[email protected]>
AuthorDate: Mon May 12 17:28:15 2025 +0800

    [Fix](regression-test) fix test_export_max_file_size case (#50795)
    
    Problem Summary:
    If the Outfile may generate an empty file with zero rows, the import of
    such a file will fail.
---
 .../export_p2/test_export_max_file_size.groovy     | 24 +++++++++++++++++++++-
 1 file changed, 23 insertions(+), 1 deletion(-)

diff --git a/regression-test/suites/export_p2/test_export_max_file_size.groovy 
b/regression-test/suites/export_p2/test_export_max_file_size.groovy
index ebe89ed3ef5..159104c7c2d 100644
--- a/regression-test/suites/export_p2/test_export_max_file_size.groovy
+++ b/regression-test/suites/export_p2/test_export_max_file_size.groovy
@@ -149,7 +149,29 @@ suite("test_export_max_file_size", "p2,external") {
             assertEquals("3", json.fileNumber[0][0])
             def outfile_url = json.url[0][0]
 
-            for (int j = 0; j < json.fileNumber[0][0].toInteger(); ++j ) {
+            for (int j = 0; j < json.fileNumber[0][0].toInteger(); ++j) {
+                def res = sql """ 
+                    select count(*) from hdfs(
+                        "uri" = "${outfile_url}${j}.csv",
+                        "format" = "csv",
+                        "dfs.data.transfer.protection" = "integrity",
+                        'dfs.nameservices'="${dfsNameservices}",
+                        
'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}",
+                        
'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}",
+                        
'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}",
+                        
'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}",
+                        
'hadoop.security.authentication'="${hadoopSecurityAuthentication}",
+                        
'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}",   
+                        
'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}",
+                        'hadoop.security.auth_to_local' = 
"${hadoopSecurityAutoToLocal}",
+                        
'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+                    );
+                """
+                logger.info("res[0][0] = " + res[0][0]);
+                if(res[0][0] == 0) {
+                    continue;
+                }
+
                 // check data correctness
                 sql """ 
                     insert into ${table_load_name}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to