This is an automated email from the ASF dual-hosted git repository.

zuston pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-uniffle.git


The following commit(s) were added to refs/heads/master by this push:
     new 1dc833f20 [MINOR] improvement(test): Use standard way to set storage 
basePath in `RepartitionWithHadoopHybridStorageRssTest` (#1829)
1dc833f20 is described below

commit 1dc833f20f8232f016e3b122d98224df372747be
Author: Zhen Wang <[email protected]>
AuthorDate: Tue Jun 25 10:38:02 2024 +0800

    [MINOR] improvement(test): Use standard way to set storage basePath in 
`RepartitionWithHadoopHybridStorageRssTest` (#1829)
    
    ### What changes were proposed in this pull request?
    
    Use standard way to set storage basePath
    
    ### Why are the changes needed?
    
    rss.storage.basePath should be configured as separated path list.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    Fixed test case
---
 .../test/RepartitionWithHadoopHybridStorageRssTest.java     | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git 
a/integration-test/spark-common/src/test/java/org/apache/uniffle/test/RepartitionWithHadoopHybridStorageRssTest.java
 
b/integration-test/spark-common/src/test/java/org/apache/uniffle/test/RepartitionWithHadoopHybridStorageRssTest.java
index d778f2aea..64626237a 100644
--- 
a/integration-test/spark-common/src/test/java/org/apache/uniffle/test/RepartitionWithHadoopHybridStorageRssTest.java
+++ 
b/integration-test/spark-common/src/test/java/org/apache/uniffle/test/RepartitionWithHadoopHybridStorageRssTest.java
@@ -19,6 +19,7 @@ package org.apache.uniffle.test;
 
 import java.io.File;
 import java.util.Arrays;
+import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
@@ -59,14 +60,16 @@ public class RepartitionWithHadoopHybridStorageRssTest 
extends RepartitionTest {
     // local storage config
     File dataDir1 = new File(tmpDir, "data1");
     File dataDir2 = new File(tmpDir, "data2");
-    String grpcBasePath = dataDir1.getAbsolutePath() + "," + 
dataDir2.getAbsolutePath();
+    List<String> grpcBasePath =
+        Arrays.asList(dataDir1.getAbsolutePath(), dataDir2.getAbsolutePath());
     ShuffleServerConf grpcShuffleServerConf = 
buildShuffleServerConf(ServerType.GRPC, grpcBasePath);
     createShuffleServer(grpcShuffleServerConf);
 
     // local storage config
     File dataDir3 = new File(tmpDir, "data3");
     File dataDir4 = new File(tmpDir, "data4");
-    String nettyBasePath = dataDir3.getAbsolutePath() + "," + 
dataDir4.getAbsolutePath();
+    List<String> nettyBasePath =
+        Arrays.asList(dataDir3.getAbsolutePath(), dataDir4.getAbsolutePath());
     ShuffleServerConf nettyShuffleServerConf =
         buildShuffleServerConf(ServerType.GRPC_NETTY, nettyBasePath);
     createShuffleServer(nettyShuffleServerConf);
@@ -74,10 +77,10 @@ public class RepartitionWithHadoopHybridStorageRssTest 
extends RepartitionTest {
     startServers();
   }
 
-  private static ShuffleServerConf buildShuffleServerConf(ServerType 
serverType, String basePath)
-      throws Exception {
+  private static ShuffleServerConf buildShuffleServerConf(
+      ServerType serverType, List<String> basePath) throws Exception {
     ShuffleServerConf shuffleServerConf = getShuffleServerConf(serverType);
-    shuffleServerConf.set(ShuffleServerConf.RSS_STORAGE_BASE_PATH, 
Arrays.asList(basePath));
+    shuffleServerConf.set(ShuffleServerConf.RSS_STORAGE_BASE_PATH, basePath);
     shuffleServerConf.setString(
         ShuffleServerConf.RSS_STORAGE_TYPE.key(), 
StorageType.LOCALFILE_HDFS.name());
     
shuffleServerConf.setLong(ShuffleServerConf.FLUSH_COLD_STORAGE_THRESHOLD_SIZE, 
1024L * 1024L);

Reply via email to