This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new f26b21d5ce4 [fix](test) fix p2 export cases (#60333)
f26b21d5ce4 is described below

commit f26b21d5ce43608fd4cd5209872664ac62e0f1be
Author: Mingyu Chen (Rayner) <[email protected]>
AuthorDate: Fri Feb 6 21:11:49 2026 +0800

    [fix](test) fix p2 export cases (#60333)
---
 be/src/vec/sink/writer/vfile_result_writer.cpp     |   6 ++
 regression-test/data/export_p2/test_outfile_p2.out |   4 -
 .../export_p2/test_export_max_file_size.groovy     | 107 ++++++++-------------
 .../test_outfile_orc_max_file_size.groovy          |  95 ++++++++----------
 .../suites/export_p2/test_outfile_p2.groovy        |  72 --------------
 5 files changed, 85 insertions(+), 199 deletions(-)

diff --git a/be/src/vec/sink/writer/vfile_result_writer.cpp 
b/be/src/vec/sink/writer/vfile_result_writer.cpp
index 287ce64d185..edec869ab36 100644
--- a/be/src/vec/sink/writer/vfile_result_writer.cpp
+++ b/be/src/vec/sink/writer/vfile_result_writer.cpp
@@ -253,6 +253,12 @@ Status VFileResultWriter::_write_file(const Block& block) {
         RETURN_IF_ERROR(_vfile_writer->write(block));
     }
     // split file if exceed limit
+    // the written len from file writer may not be updated in real-time,
+    // because for orc writer, the orc OutputStream only flush when
+    // bufferred data is larger than strip size(default 64MB).
+    // So even if max_file_size_bytes set to 5MB, the file size will still
+    // be 64MB.
+    // TODO: opt this logic
     _current_written_bytes = _vfile_writer->written_len();
     return _create_new_file_if_exceed_size();
 }
diff --git a/regression-test/data/export_p2/test_outfile_p2.out 
b/regression-test/data/export_p2/test_outfile_p2.out
deleted file mode 100644
index ccb2d43e0ed..00000000000
--- a/regression-test/data/export_p2/test_outfile_p2.out
+++ /dev/null
@@ -1,4 +0,0 @@
--- This file is automatically generated. You should know what you did if you 
want to edit this
--- !sql_1 --
-1      abc
-
diff --git a/regression-test/suites/export_p2/test_export_max_file_size.groovy 
b/regression-test/suites/export_p2/test_export_max_file_size.groovy
index 159104c7c2d..69b864370db 100644
--- a/regression-test/suites/export_p2/test_export_max_file_size.groovy
+++ b/regression-test/suites/export_p2/test_export_max_file_size.groovy
@@ -22,16 +22,11 @@ suite("test_export_max_file_size", "p2,external") {
         sql """ set enable_nereids_planner=true """
         sql """ set enable_fallback_to_original_planner=false """
 
-        String 
dfsNameservices=context.config.otherConfigs.get("dfsNameservices")
-        String 
dfsHaNamenodesHdfsCluster=context.config.otherConfigs.get("dfsHaNamenodesHdfsCluster")
-        String 
dfsNamenodeRpcAddress1=context.config.otherConfigs.get("dfsNamenodeRpcAddress1")
-        String 
dfsNamenodeRpcAddress2=context.config.otherConfigs.get("dfsNamenodeRpcAddress2")
-        String 
dfsNamenodeRpcAddress3=context.config.otherConfigs.get("dfsNamenodeRpcAddress3")
-        String 
dfsNameservicesPort=context.config.otherConfigs.get("dfsNameservicesPort")
-        String hadoopSecurityAuthentication 
=context.config.otherConfigs.get("hadoopSecurityAuthentication")
-        String hadoopKerberosKeytabPath 
=context.config.otherConfigs.get("hadoopKerberosKeytabPath")
-        String hadoopKerberosPrincipal 
=context.config.otherConfigs.get("hadoopKerberosPrincipal")
-        String hadoopSecurityAutoToLocal 
=context.config.otherConfigs.get("hadoopSecurityAutoToLocal")
+        String ak = getS3AK()
+        String sk = getS3SK()
+        String s3_endpoint = getS3Endpoint()
+        String region = getS3Region()
+        String bucket = context.config.otherConfigs.get("s3BucketName")
 
         def table_export_name = "test_export_max_file_size"
         // create table and insert
@@ -79,24 +74,24 @@ suite("test_export_max_file_size", "p2,external") {
             DISTRIBUTED BY HASH(user_id) PROPERTIES("replication_num" = "1");
         """
 
-        def load_data_path = "/user/export_test/exp_max_file_size.csv"
         sql """ 
                 insert into ${table_export_name}
-                select * from hdfs(
-                    "uri" = "hdfs://${dfsNameservices}${load_data_path}",
-                    "format" = "csv",
-                    "dfs.data.transfer.protection" = "integrity",
-                    'dfs.nameservices'="${dfsNameservices}",
-                    
'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}",
-                    
'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}",
-                    
'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}",
-                    
'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}",
-                    
'hadoop.security.authentication'="${hadoopSecurityAuthentication}",
-                    'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}",   
-                    'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}",
-                    'hadoop.security.auth_to_local' = 
"${hadoopSecurityAutoToLocal}",
-                    
'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
-                );
+                select 
+                    number as user_id,
+                    date_add('2024-01-01', interval cast(rand() * 365 as int) 
day) as date,
+                    date_add('2024-01-01 00:00:00', interval cast(rand() * 365 
* 24 * 3600 as int) second) as datetime,
+                    concat('City_', cast(cast(rand() * 100 as int) as string)) 
as city,
+                    cast(rand() * 80 + 18 as int) as age,
+                    cast(rand() * 2 as int) as sex,
+                    if(rand() > 0.5, true, false) as bool_col,
+                    cast(rand() * 1000000 as int) as int_col,
+                    cast(rand() * 10000000000 as bigint) as bigint_col,
+                    cast(rand() * 100000000000000 as largeint) as largeint_col,
+                    cast(rand() * 1000 as float) as float_col,
+                    rand() * 10000 as double_col,
+                    concat('char_', cast(cast(rand() * 10000 as int) as 
string)) as char_col,
+                    cast(rand() * 1000 as decimal(10, 2)) as decimal_col
+                from numbers("number" = "1000000");
             """
 
         
@@ -114,32 +109,26 @@ suite("test_export_max_file_size", "p2,external") {
             }
         }
 
-        def outFilePath = """/user/export_test/test_max_file_size/exp_"""
+        def outFilePath = """${bucket}/export/test_max_file_size/exp_"""
 
         // 1. csv test
         def test_export = {format, file_suffix, isDelete ->
             def uuid = UUID.randomUUID().toString()
             // exec export
             sql """
-                EXPORT TABLE ${table_export_name} TO 
"hdfs://${dfsNameservices}${outFilePath}"
+                EXPORT TABLE ${table_export_name} TO "s3://${outFilePath}"
                 PROPERTIES(
                     "label" = "${uuid}",
                     "format" = "${format}",
                     "max_file_size" = "5MB",
                     "delete_existing_files"="${isDelete}"
                 )
-                with HDFS (
-                    "dfs.data.transfer.protection" = "integrity",
-                    'dfs.nameservices'="${dfsNameservices}",
-                    
'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}",
-                    
'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}",
-                    
'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}",
-                    
'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}",
-                    
'hadoop.security.authentication'="${hadoopSecurityAuthentication}",
-                    'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}",   
-                    'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}",
-                    'hadoop.security.auth_to_local' = 
"${hadoopSecurityAutoToLocal}",
-                    
'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+                WITH s3 (
+                    "s3.endpoint" = "${s3_endpoint}",
+                    "s3.region" = "${region}",
+                    "s3.secret_key"="${sk}",
+                    "s3.access_key" = "${ak}",
+                    "provider" = "${getS3Provider()}"
                 );
             """
 
@@ -151,20 +140,13 @@ suite("test_export_max_file_size", "p2,external") {
 
             for (int j = 0; j < json.fileNumber[0][0].toInteger(); ++j) {
                 def res = sql """ 
-                    select count(*) from hdfs(
-                        "uri" = "${outfile_url}${j}.csv",
+                    select count(*) from s3(
+                        "uri" = 
"http://${bucket}.${s3_endpoint}${outfile_url.substring(5 + 
bucket.length())}${j}.csv",
+                        "ACCESS_KEY"= "${ak}",
+                        "SECRET_KEY" = "${sk}",
                         "format" = "csv",
-                        "dfs.data.transfer.protection" = "integrity",
-                        'dfs.nameservices'="${dfsNameservices}",
-                        
'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}",
-                        
'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}",
-                        
'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}",
-                        
'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}",
-                        
'hadoop.security.authentication'="${hadoopSecurityAuthentication}",
-                        
'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}",   
-                        
'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}",
-                        'hadoop.security.auth_to_local' = 
"${hadoopSecurityAutoToLocal}",
-                        
'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+                        "provider" = "${getS3Provider()}",
+                        "region" = "${region}"
                     );
                 """
                 logger.info("res[0][0] = " + res[0][0]);
@@ -175,20 +157,13 @@ suite("test_export_max_file_size", "p2,external") {
                 // check data correctness
                 sql """ 
                     insert into ${table_load_name}
-                    select * from hdfs(
-                        "uri" = "${outfile_url}${j}.csv",
+                    select * from s3(
+                        "uri" = 
"http://${bucket}.${s3_endpoint}${outfile_url.substring(5 + 
bucket.length())}${j}.csv",
+                        "ACCESS_KEY"= "${ak}",
+                        "SECRET_KEY" = "${sk}",
                         "format" = "csv",
-                        "dfs.data.transfer.protection" = "integrity",
-                        'dfs.nameservices'="${dfsNameservices}",
-                        
'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}",
-                        
'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}",
-                        
'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}",
-                        
'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}",
-                        
'hadoop.security.authentication'="${hadoopSecurityAuthentication}",
-                        
'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}",   
-                        
'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}",
-                        'hadoop.security.auth_to_local' = 
"${hadoopSecurityAutoToLocal}",
-                        
'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+                        "provider" = "${getS3Provider()}",
+                        "region" = "${region}"
                     );
                 """
             }
diff --git 
a/regression-test/suites/export_p2/test_outfile_orc_max_file_size.groovy 
b/regression-test/suites/export_p2/test_outfile_orc_max_file_size.groovy
index f29e05a1867..dbcd245a90a 100644
--- a/regression-test/suites/export_p2/test_outfile_orc_max_file_size.groovy
+++ b/regression-test/suites/export_p2/test_outfile_orc_max_file_size.groovy
@@ -16,28 +16,22 @@
 // under the License.
 
 suite("test_outfile_orc_max_file_size", "p2,external") {
-    String enabled = context.config.otherConfigs.get("enableExternalHiveTest")
+    String enabled = "true";
     if (enabled != null && enabled.equalsIgnoreCase("true")) {
         // open nereids
         sql """ set enable_nereids_planner=true """
         sql """ set enable_fallback_to_original_planner=false """
 
-        
-        String 
dfsNameservices=context.config.otherConfigs.get("dfsNameservices")
-        String 
dfsHaNamenodesHdfsCluster=context.config.otherConfigs.get("dfsHaNamenodesHdfsCluster")
-        String 
dfsNamenodeRpcAddress1=context.config.otherConfigs.get("dfsNamenodeRpcAddress1")
-        String 
dfsNamenodeRpcAddress2=context.config.otherConfigs.get("dfsNamenodeRpcAddress2")
-        String 
dfsNamenodeRpcAddress3=context.config.otherConfigs.get("dfsNamenodeRpcAddress3")
-        String 
dfsNameservicesPort=context.config.otherConfigs.get("dfsNameservicesPort")
-        String hadoopSecurityAuthentication 
=context.config.otherConfigs.get("hadoopSecurityAuthentication")
-        String hadoopKerberosKeytabPath 
=context.config.otherConfigs.get("hadoopKerberosKeytabPath")
-        String hadoopKerberosPrincipal 
=context.config.otherConfigs.get("hadoopKerberosPrincipal")
-        String hadoopSecurityAutoToLocal 
=context.config.otherConfigs.get("hadoopSecurityAutoToLocal")
+        String ak = getS3AK()
+        String sk = getS3SK()
+        String s3_endpoint = getS3Endpoint()
+        String region = getS3Region()
+        String bucket = context.config.otherConfigs.get("s3BucketName")
 
         // the path used to load data
-        def load_data_path = "/user/export_test/test_orc_max_file_size.orc"
+        def load_data_path = "export_test/test_orc_max_file_size.orc"
         // the path used to export data
-        def outFilePath = 
"""/user/export_test/test_max_file_size/test_orc/exp_"""
+        def outFilePath = 
"""${bucket}/export/test_max_file_size/test_orc/exp_"""
         
         def create_table = {table_name -> 
             sql """ DROP TABLE IF EXISTS ${table_name} """
@@ -69,42 +63,37 @@ suite("test_outfile_orc_max_file_size", "p2,external") {
         // load data
         sql """ 
                 insert into ${table_export_name}
-                select * from hdfs(
-                    "uri" = "hdfs://${dfsNameservices}${load_data_path}",
-                    "format" = "orc",
-                    "dfs.data.transfer.protection" = "integrity",
-                    'dfs.nameservices'="${dfsNameservices}",
-                    
'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}",
-                    
'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}",
-                    
'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}",
-                    
'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}",
-                    
'hadoop.security.authentication'="${hadoopSecurityAuthentication}",
-                    'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}",   
-                    'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}",
-                    'hadoop.security.auth_to_local' = 
"${hadoopSecurityAutoToLocal}",
-                    
'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
-                );
+                select 
+                    number as user_id,
+                    date_add('2024-01-01', interval cast(rand() * 365 as int) 
day) as date,
+                    date_add('2024-01-01 00:00:00', interval cast(rand() * 365 
* 24 * 3600 as int) second) as datetime,
+                    concat('City_', cast(cast(rand() * 100 as int) as string)) 
as city,
+                    cast(rand() * 80 + 18 as int) as age,
+                    cast(rand() * 2 as int) as sex,
+                    if(rand() > 0.5, true, false) as bool_col,
+                    cast(rand() * 1000000 as int) as int_col,
+                    cast(rand() * 10000000000 as bigint) as bigint_col,
+                    cast(rand() * 100000000000000 as largeint) as largeint_col,
+                    cast(rand() * 1000 as float) as float_col,
+                    rand() * 10000 as double_col,
+                    concat('char_', cast(cast(rand() * 10000 as int) as 
string)) as char_col,
+                    cast(rand() * 1000 as decimal(10, 2)) as decimal_col
+                from numbers("number" = "2000000");
             """
 
         def test_outfile_orc_success = {maxFileSize, isDelete, fileNumber, 
totalRows -> 
             def table = sql """
                 select * from ${table_export_name}
-                into outfile "hdfs://${dfsNameservices}${outFilePath}"
+                into outfile "s3://${outFilePath}"
                 FORMAT AS ORC
                 PROPERTIES(
                     "max_file_size" = "${maxFileSize}",
                     "delete_existing_files"="${isDelete}",
-                    "dfs.data.transfer.protection" = "integrity",
-                    'dfs.nameservices'="${dfsNameservices}",
-                    
'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}",
-                    
'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}",
-                    
'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}",
-                    
'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}",
-                    
'hadoop.security.authentication'="${hadoopSecurityAuthentication}",
-                    'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}",   
-                    'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}",
-                    'hadoop.security.auth_to_local' = 
"${hadoopSecurityAutoToLocal}",
-                    
'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+                    "s3.endpoint" = "${s3_endpoint}",
+                    "s3.region" = "${region}",
+                    "s3.secret_key"="${sk}",
+                    "s3.access_key" = "${ak}",
+                    "provider" = "${getS3Provider()}"
                 );
             """
 
@@ -120,22 +109,16 @@ suite("test_outfile_orc_max_file_size", "p2,external") {
             test {
                 sql """
                     select * from ${table_export_name}
-                    into outfile "hdfs://${dfsNameservices}${outFilePath}"
+                    into outfile "s3://${outFilePath}"
                     FORMAT AS ORC
                     PROPERTIES(
                         "max_file_size" = "${maxFileSize}",
                         "delete_existing_files"="${isDelete}",
-                        "dfs.data.transfer.protection" = "integrity",
-                        'dfs.nameservices'="${dfsNameservices}",
-                        
'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}",
-                        
'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}",
-                        
'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}",
-                        
'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}",
-                        
'hadoop.security.authentication'="${hadoopSecurityAuthentication}",
-                        
'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}",   
-                        
'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}",
-                        'hadoop.security.auth_to_local' = 
"${hadoopSecurityAutoToLocal}",
-                        
'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+                        "s3.endpoint" = "${s3_endpoint}",
+                        "s3.region" = "${region}",
+                        "s3.secret_key"="${sk}",
+                        "s3.access_key" = "${ak}",
+                        "provider" = "${getS3Provider()}"
                     );
                 """
 
@@ -151,10 +134,8 @@ suite("test_outfile_orc_max_file_size", "p2,external") {
 
         test_outfile_orc_fail('3MB', true)
         test_outfile_orc_fail('2.1GB', true)
-        test_outfile_orc_success('5MB', true, 3, 2000000)
-        test_outfile_orc_success('63MB', true, 3, 2000000)
-        test_outfile_orc_success('64MB', true, 3, 2000000)
-        test_outfile_orc_success('80MB', true, 2, 2000000)
+        test_outfile_orc_success('32MB', true, 2, 2000000)
+        test_outfile_orc_success('65MB', true, 1, 2000000)
     }
     
 }
diff --git a/regression-test/suites/export_p2/test_outfile_p2.groovy 
b/regression-test/suites/export_p2/test_outfile_p2.groovy
deleted file mode 100644
index a10db954c0e..00000000000
--- a/regression-test/suites/export_p2/test_outfile_p2.groovy
+++ /dev/null
@@ -1,72 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-suite("test_outfile_p2", "p2,external") {
-    String enabled = context.config.otherConfigs.get("enableExternalHiveTest")
-    if (enabled != null && enabled.equalsIgnoreCase("true")) {
-        // open nereids
-        sql """ set enable_nereids_planner=true """
-        sql """ set enable_fallback_to_original_planner=false """
-        String 
dfsNameservices=context.config.otherConfigs.get("dfsNameservices")
-        String 
dfsHaNamenodesHdfsCluster=context.config.otherConfigs.get("dfsHaNamenodesHdfsCluster")
-        String 
dfsNamenodeRpcAddress1=context.config.otherConfigs.get("dfsNamenodeRpcAddress1")
-        String 
dfsNamenodeRpcAddress2=context.config.otherConfigs.get("dfsNamenodeRpcAddress2")
-        String 
dfsNamenodeRpcAddress3=context.config.otherConfigs.get("dfsNamenodeRpcAddress3")
-        String 
dfsNameservicesPort=context.config.otherConfigs.get("dfsNameservicesPort")
-        String hadoopSecurityAuthentication 
=context.config.otherConfigs.get("hadoopSecurityAuthentication")
-        String hadoopKerberosKeytabPath 
=context.config.otherConfigs.get("hadoopKerberosKeytabPath")
-        String hadoopKerberosPrincipal 
=context.config.otherConfigs.get("hadoopKerberosPrincipal")
-        String hadoopSecurityAutoToLocal 
=context.config.otherConfigs.get("hadoopSecurityAutoToLocal")
-
-        def table_outfile_name = "test_outfile_hdfs"
-        // create table and insert
-        sql """ DROP TABLE IF EXISTS ${table_outfile_name} """
-        sql """
-        CREATE TABLE IF NOT EXISTS ${table_outfile_name} (
-            `id` int(11) NULL,
-            `name` string NULL
-            )
-            DISTRIBUTED BY HASH(id) PROPERTIES("replication_num" = "1");
-        """
-
-        sql """insert into ${table_outfile_name} values(1, 'abc');"""
-
-        qt_sql_1 """select * from ${table_outfile_name} order by id"""
-
-        // use a simple sql to make sure there is only one fragment
-        // #21343
-        sql """
-            SELECT * FROM ${table_outfile_name}
-            INTO OUTFILE "hdfs://${dfsNameservices}/user/outfile_test/" 
-            FORMAT AS parquet
-            PROPERTIES
-            (
-                "dfs.data.transfer.protection" = "integrity",
-                'dfs.nameservices'="${dfsNameservices}",
-                'dfs.ha.namenodes.hdfs-cluster'="${dfsHaNamenodesHdfsCluster}",
-                
'dfs.namenode.rpc-address.hdfs-cluster.nn1'="${dfsNamenodeRpcAddress1}:${dfsNameservicesPort}",
-                
'dfs.namenode.rpc-address.hdfs-cluster.nn2'="${dfsNamenodeRpcAddress2}:${dfsNameservicesPort}",
-                
'dfs.namenode.rpc-address.hdfs-cluster.nn3'="${dfsNamenodeRpcAddress3}:${dfsNameservicesPort}",
-                
'hadoop.security.authentication'="${hadoopSecurityAuthentication}",
-                'hadoop.kerberos.keytab'="${hadoopKerberosKeytabPath}",   
-                'hadoop.kerberos.principal'="${hadoopKerberosPrincipal}",
-                'hadoop.security.auth_to_local' = 
"${hadoopSecurityAutoToLocal}",
-                
'dfs.client.failover.proxy.provider.hdfs-cluster'="org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
-            );
-            """
-    }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to