This is an automated email from the ASF dual-hosted git repository.

zhifgli pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-uniffle.git


The following commit(s) were added to refs/heads/master by this push:
     new aa02ee6  Enable spotbugs and fix high priority bugs (#38)
aa02ee6 is described below

commit aa02ee66c2f76a84430b9f9c0ef3b94ab83e30ea
Author: Kaijie Chen <[email protected]>
AuthorDate: Wed Jul 13 17:18:16 2022 +0800

    Enable spotbugs and fix high priority bugs (#38)
    
    ### What changes were proposed in this pull request?
    
    1. Enable (previously experimental) spotbugs check in CI.
    2. Set spotbugs to only check high priority bugs.
        Because currently there are too many medium (default) priority bugs,
        and many of them are difficult to fix.
    3. Fix bugs reported by spotbugs.
    
    ### Why are the changes needed?
    
    To improve code quality.
    
    ### Does this PR introduce _any_ user-facing change?
    
    One notable change is we are comparing content instead of reference of the 
array in `ShufflePartitionedBlock#equals()`, see: 
a994cffed033158bfbd84d5b83e900892b739c18. We can revert it if it's not desired.
    
    Another is `toString()` methods will call `Arrays.toString()` instead of 
printing array reference as string now.
    
    ### How was this patch tested?
    
    1. `mvn test-compile spotbugs:check -Pspark3`
    2. `mvn test-compile spotbugs:check -Pspark2`
    3. `mvn test-compile spotbugs:check -Pmr`
    
    Also by CI: 
https://github.com/kaijchen/incubator-uniffle/actions/runs/2634089750
---
 .github/workflows/build.yml                        |  1 -
 .../org/apache/hadoop/mapreduce/RssMRConfig.java   | 18 +++++++-------
 .../org/apache/uniffle/common/BufferSegment.java   |  7 ++++++
 .../uniffle/common/ShufflePartitionedBlock.java    | 10 +++++++-
 .../uniffle/common/ShufflePartitionedData.java     |  4 +++-
 .../apache/uniffle/common/config/ConfigOption.java |  5 ++++
 .../apache/uniffle/common/config/ConfigUtils.java  | 10 ++++----
 .../apache/uniffle/common/config/RssBaseConf.java  |  2 +-
 .../org/apache/uniffle/common/config/RssConf.java  | 10 ++++----
 .../uniffle/coordinator/CoordinatorConf.java       |  8 +++----
 .../uniffle/coordinator/SimpleClusterManager.java  |  3 ++-
 .../client/factory/CoordinatorClientFactory.java   |  3 ++-
 pom.xml                                            |  1 +
 .../apache/uniffle/server/LocalStorageChecker.java |  5 ++--
 .../apache/uniffle/server/ShuffleFlushManager.java |  2 +-
 .../apache/uniffle/server/ShuffleServerConf.java   | 28 +++++++++++-----------
 .../storage/common/FileBasedShuffleSegment.java    |  2 +-
 .../uniffle/storage/util/ShuffleStorageUtils.java  |  3 ++-
 18 files changed, 73 insertions(+), 49 deletions(-)

diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 30803a0..b29b860 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -37,7 +37,6 @@ jobs:
     uses: ./.github/workflows/sequential.yml
     with:
       maven-args: test-compile spotbugs:check
-      experimental: true
 
   build:
     uses: ./.github/workflows/parallel.yml
diff --git 
a/client-mr/src/main/java/org/apache/hadoop/mapreduce/RssMRConfig.java 
b/client-mr/src/main/java/org/apache/hadoop/mapreduce/RssMRConfig.java
index e52b1ca..1dfb673 100644
--- a/client-mr/src/main/java/org/apache/hadoop/mapreduce/RssMRConfig.java
+++ b/client-mr/src/main/java/org/apache/hadoop/mapreduce/RssMRConfig.java
@@ -61,7 +61,7 @@ public class RssMRConfig {
       RssClientConfig.RSS_CLIENT_DEFAULT_SEND_NUM;
   public static final String RSS_CLIENT_SEND_THRESHOLD = MR_RSS_CONFIG_PREFIX 
+ "rss.client.send.threshold";
   public static final double RSS_CLIENT_DEFAULT_SEND_THRESHOLD = 0.2f;
-  public static boolean RSS_DATA_REPLICA_SKIP_ENABLED_DEFAULT_VALUE =
+  public static final boolean RSS_DATA_REPLICA_SKIP_ENABLED_DEFAULT_VALUE =
       RssClientConfig.RSS_DATA_REPLICA_SKIP_ENABLED_DEFAULT_VALUE;
   public static final String RSS_HEARTBEAT_INTERVAL =
       MR_RSS_CONFIG_PREFIX + RssClientConfig.RSS_HEARTBEAT_INTERVAL;
@@ -109,26 +109,26 @@ public class RssMRConfig {
       MR_RSS_CONFIG_PREFIX + RssClientConfig.RSS_INDEX_READ_LIMIT;
   public static final int RSS_INDEX_READ_LIMIT_DEFAULT_VALUE =
       RssClientConfig.RSS_INDEX_READ_LIMIT_DEFAULT_VALUE;
-  public static String RSS_CLIENT_READ_BUFFER_SIZE =
+  public static final String RSS_CLIENT_READ_BUFFER_SIZE =
       MR_RSS_CONFIG_PREFIX + RssClientConfig.RSS_CLIENT_READ_BUFFER_SIZE;
 
   // When the size of read buffer reaches the half of JVM region (i.e., 32m),
   // it will incur humongous allocation, so we set it to 14m.
-  public static String RSS_CLIENT_READ_BUFFER_SIZE_DEFAULT_VALUE =
+  public static final String RSS_CLIENT_READ_BUFFER_SIZE_DEFAULT_VALUE =
       RssClientConfig.RSS_CLIENT_READ_BUFFER_SIZE_DEFAULT_VALUE;
 
-  public static String RSS_DYNAMIC_CLIENT_CONF_ENABLED =
+  public static final String RSS_DYNAMIC_CLIENT_CONF_ENABLED =
       MR_RSS_CONFIG_PREFIX + RssClientConfig.RSS_DYNAMIC_CLIENT_CONF_ENABLED;
-  public static boolean RSS_DYNAMIC_CLIENT_CONF_ENABLED_DEFAULT_VALUE =
+  public static final boolean RSS_DYNAMIC_CLIENT_CONF_ENABLED_DEFAULT_VALUE =
       RssClientConfig.RSS_DYNAMIC_CLIENT_CONF_ENABLED_DEFAULT_VALUE;
-  public static String RSS_ACCESS_TIMEOUT_MS = MR_RSS_CONFIG_PREFIX + 
RssClientConfig.RSS_ACCESS_TIMEOUT_MS;
-  public static int RSS_ACCESS_TIMEOUT_MS_DEFAULT_VALUE = 
RssClientConfig.RSS_ACCESS_TIMEOUT_MS_DEFAULT_VALUE;
+  public static final String RSS_ACCESS_TIMEOUT_MS = MR_RSS_CONFIG_PREFIX + 
RssClientConfig.RSS_ACCESS_TIMEOUT_MS;
+  public static final int RSS_ACCESS_TIMEOUT_MS_DEFAULT_VALUE = 
RssClientConfig.RSS_ACCESS_TIMEOUT_MS_DEFAULT_VALUE;
 
   public static final String RSS_CLIENT_ASSIGNMENT_TAGS =
           MR_RSS_CONFIG_PREFIX + RssClientConfig.RSS_CLIENT_ASSIGNMENT_TAGS;
 
-  public static String RSS_CONF_FILE = "rss_conf.xml";
+  public static final String RSS_CONF_FILE = "rss_conf.xml";
 
-  public static Set<String> RSS_MANDATORY_CLUSTER_CONF = Sets.newHashSet(
+  public static final Set<String> RSS_MANDATORY_CLUSTER_CONF = Sets.newHashSet(
       RSS_STORAGE_TYPE, RSS_REMOTE_STORAGE_PATH);
 }
diff --git a/common/src/main/java/org/apache/uniffle/common/BufferSegment.java 
b/common/src/main/java/org/apache/uniffle/common/BufferSegment.java
index 47c1740..bf0e302 100644
--- a/common/src/main/java/org/apache/uniffle/common/BufferSegment.java
+++ b/common/src/main/java/org/apache/uniffle/common/BufferSegment.java
@@ -17,6 +17,8 @@
 
 package org.apache.uniffle.common;
 
+import java.util.Objects;
+
 public class BufferSegment {
 
   private long blockId;
@@ -48,6 +50,11 @@ public class BufferSegment {
     return false;
   }
 
+  @Override
+  public int hashCode() {
+    return Objects.hash(blockId, offset, length, uncompressLength, crc, 
taskAttemptId);
+  }
+
   @Override
   public String toString() {
     return "BufferSegment{blockId[" + blockId + "], taskAttemptId[" + 
taskAttemptId
diff --git 
a/common/src/main/java/org/apache/uniffle/common/ShufflePartitionedBlock.java 
b/common/src/main/java/org/apache/uniffle/common/ShufflePartitionedBlock.java
index d68ba59..9bee3fe 100644
--- 
a/common/src/main/java/org/apache/uniffle/common/ShufflePartitionedBlock.java
+++ 
b/common/src/main/java/org/apache/uniffle/common/ShufflePartitionedBlock.java
@@ -17,6 +17,9 @@
 
 package org.apache.uniffle.common;
 
+import java.util.Arrays;
+import java.util.Objects;
+
 public class ShufflePartitionedBlock {
 
   private int length;
@@ -59,7 +62,12 @@ public class ShufflePartitionedBlock {
     return length == that.length
         && crc == that.crc
         && blockId == that.blockId
-        && data.equals(that.data);
+        && Arrays.equals(data, that.data);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(length, crc, blockId, Arrays.hashCode(data));
   }
 
   public int getLength() {
diff --git 
a/common/src/main/java/org/apache/uniffle/common/ShufflePartitionedData.java 
b/common/src/main/java/org/apache/uniffle/common/ShufflePartitionedData.java
index 30deb73..64a1914 100644
--- a/common/src/main/java/org/apache/uniffle/common/ShufflePartitionedData.java
+++ b/common/src/main/java/org/apache/uniffle/common/ShufflePartitionedData.java
@@ -17,6 +17,8 @@
 
 package org.apache.uniffle.common;
 
+import java.util.Arrays;
+
 public class ShufflePartitionedData {
 
   private int partitionId;
@@ -29,7 +31,7 @@ public class ShufflePartitionedData {
 
   @Override
   public String toString() {
-    return "ShufflePartitionedData{partitionId=" + partitionId + ", 
blockList=" + blockList + '}';
+    return "ShufflePartitionedData{partitionId=" + partitionId + ", 
blockList=" + Arrays.toString(blockList) + '}';
   }
 
   public int getPartitionId() {
diff --git 
a/common/src/main/java/org/apache/uniffle/common/config/ConfigOption.java 
b/common/src/main/java/org/apache/uniffle/common/config/ConfigOption.java
index ac9cca5..c3693da 100644
--- a/common/src/main/java/org/apache/uniffle/common/config/ConfigOption.java
+++ b/common/src/main/java/org/apache/uniffle/common/config/ConfigOption.java
@@ -162,6 +162,11 @@ public class ConfigOption<T> {
     }
   }
 
+  @Override
+  public int hashCode() {
+    return Objects.hash(key, defaultValue);
+  }
+
   @Override
   public String toString() {
     return String.format("Key: '%s' , default: %s", key, defaultValue);
diff --git 
a/common/src/main/java/org/apache/uniffle/common/config/ConfigUtils.java 
b/common/src/main/java/org/apache/uniffle/common/config/ConfigUtils.java
index 023f287..a9f989a 100644
--- a/common/src/main/java/org/apache/uniffle/common/config/ConfigUtils.java
+++ b/common/src/main/java/org/apache/uniffle/common/config/ConfigUtils.java
@@ -177,17 +177,17 @@ public class ConfigUtils {
     return configOptionList;
   }
 
-  public static Function<Long, Boolean> positiveLongValidator = value -> value 
> 0;
+  public static final Function<Long, Boolean> POSITIVE_LONG_VALIDATOR = value 
-> value > 0;
 
-  public static Function<Long, Boolean> non_negativeLongValidator = value -> 
value >= 0;
+  public static final Function<Long, Boolean> NON_NEGATIVE_LONG_VALIDATOR = 
value -> value >= 0;
 
-  public static Function<Long, Boolean> positiveIntegerValidator =
+  public static final Function<Long, Boolean> POSITIVE_INTEGER_VALIDATOR =
       value -> value > 0L && value <= Integer.MAX_VALUE;
 
-  public static Function<Integer, Boolean> positiveIntegerValidator2 =
+  public static final Function<Integer, Boolean> POSITIVE_INTEGER_VALIDATOR_2 =
       value -> value > 0;
 
-  public static Function<Double, Boolean> percentageDoubleValidator =
+  public static final Function<Double, Boolean> PERCENTAGE_DOUBLE_VALIDATOR =
       (Function<Double, Boolean>) value -> Double.compare(value, 100.0) <= 0 
&& Double.compare(value, 0.0) >= 0;
   
 }
diff --git 
a/common/src/main/java/org/apache/uniffle/common/config/RssBaseConf.java 
b/common/src/main/java/org/apache/uniffle/common/config/RssBaseConf.java
index d1db6d1..09df89d 100644
--- a/common/src/main/java/org/apache/uniffle/common/config/RssBaseConf.java
+++ b/common/src/main/java/org/apache/uniffle/common/config/RssBaseConf.java
@@ -116,7 +116,7 @@ public class RssBaseConf extends RssConf {
   public static final ConfigOption<Long> RPC_MESSAGE_MAX_SIZE = ConfigOptions
       .key("rss.rpc.message.max.size")
       .longType()
-      .checkValue(ConfigUtils.positiveIntegerValidator,
+      .checkValue(ConfigUtils.POSITIVE_INTEGER_VALIDATOR,
         "The value must be positive integer")
       .defaultValue(1024L * 1024L * 1024L)
       .withDescription("Max size of rpc message (byte)");
diff --git a/common/src/main/java/org/apache/uniffle/common/config/RssConf.java 
b/common/src/main/java/org/apache/uniffle/common/config/RssConf.java
index 17d896b..57ae03c 100644
--- a/common/src/main/java/org/apache/uniffle/common/config/RssConf.java
+++ b/common/src/main/java/org/apache/uniffle/common/config/RssConf.java
@@ -27,12 +27,12 @@ import com.google.common.collect.Sets;
 
 import org.apache.uniffle.common.util.UnitConverter;
 
-public class RssConf {
+public class RssConf implements Cloneable {
 
   /**
    * Stores the concrete key/value pairs of this configuration object.
    */
-  private final ConcurrentHashMap<String, Object> settings;
+  private ConcurrentHashMap<String, Object> settings;
 
   /**
    * Creates a new empty configuration.
@@ -532,9 +532,9 @@ public class RssConf {
   // 
--------------------------------------------------------------------------------------------
 
   @Override
-  public RssConf clone() {
-    RssConf config = new RssConf();
-    config.addAll(this);
+  public RssConf clone() throws CloneNotSupportedException {
+    RssConf config = (RssConf) super.clone();
+    config.settings = new ConcurrentHashMap<>(settings);
     return config;
   }
 
diff --git 
a/coordinator/src/main/java/org/apache/uniffle/coordinator/CoordinatorConf.java 
b/coordinator/src/main/java/org/apache/uniffle/coordinator/CoordinatorConf.java
index 0fbd562..bdad2d6 100644
--- 
a/coordinator/src/main/java/org/apache/uniffle/coordinator/CoordinatorConf.java
+++ 
b/coordinator/src/main/java/org/apache/uniffle/coordinator/CoordinatorConf.java
@@ -71,7 +71,7 @@ public class CoordinatorConf extends RssBaseConf {
   public static final ConfigOption<Integer> 
COORDINATOR_ACCESS_CANDIDATES_UPDATE_INTERVAL_SEC = ConfigOptions
       .key("rss.coordinator.access.candidates.updateIntervalSec")
       .intType()
-      .checkValue(ConfigUtils.positiveIntegerValidator2, "access candidates 
update interval must be positive")
+      .checkValue(ConfigUtils.POSITIVE_INTEGER_VALIDATOR_2, "access candidates 
update interval must be positive")
       .defaultValue(120)
       .withDescription("Accessed candidates update interval in seconds");
   public static final ConfigOption<String> COORDINATOR_ACCESS_CANDIDATES_PATH 
= ConfigOptions
@@ -82,14 +82,14 @@ public class CoordinatorConf extends RssBaseConf {
   public static final ConfigOption<Double> 
COORDINATOR_ACCESS_LOADCHECKER_MEMORY_PERCENTAGE = ConfigOptions
       .key("rss.coordinator.access.loadChecker.memory.percentage")
       .doubleType()
-      .checkValue(ConfigUtils.percentageDoubleValidator,
+      .checkValue(ConfigUtils.PERCENTAGE_DOUBLE_VALIDATOR,
           "The recovery usage percentage must be between 0.0 and 100.0")
       .defaultValue(15.0)
       .withDescription("The minimal percentage of available memory percentage 
of a server");
   public static final ConfigOption<Integer> 
COORDINATOR_ACCESS_LOADCHECKER_SERVER_NUM_THRESHOLD = ConfigOptions
       .key("rss.coordinator.access.loadChecker.serverNum.threshold")
       .intType()
-      .checkValue(ConfigUtils.positiveIntegerValidator2, "load checker 
serverNum threshold must be positive")
+      .checkValue(ConfigUtils.POSITIVE_INTEGER_VALIDATOR_2, "load checker 
serverNum threshold must be positive")
       .noDefaultValue()
       .withDescription("The minimal required number of healthy shuffle servers 
when being accessed by client");
   public static final ConfigOption<Boolean> 
COORDINATOR_DYNAMIC_CLIENT_CONF_ENABLED = ConfigOptions
@@ -110,7 +110,7 @@ public class CoordinatorConf extends RssBaseConf {
   public static final ConfigOption<Integer> 
COORDINATOR_DYNAMIC_CLIENT_CONF_UPDATE_INTERVAL_SEC = ConfigOptions
       .key("rss.coordinator.dynamicClientConf.updateIntervalSec")
       .intType()
-      .checkValue(ConfigUtils.positiveIntegerValidator2, "dynamic client conf 
update interval in seconds")
+      .checkValue(ConfigUtils.POSITIVE_INTEGER_VALIDATOR_2, "dynamic client 
conf update interval in seconds")
       .defaultValue(120)
       .withDescription("Accessed candidates update interval in seconds");
   public static final ConfigOption<String> 
COORDINATOR_REMOTE_STORAGE_CLUSTER_CONF = ConfigOptions
diff --git 
a/coordinator/src/main/java/org/apache/uniffle/coordinator/SimpleClusterManager.java
 
b/coordinator/src/main/java/org/apache/uniffle/coordinator/SimpleClusterManager.java
index 98c69a6..83b29c8 100644
--- 
a/coordinator/src/main/java/org/apache/uniffle/coordinator/SimpleClusterManager.java
+++ 
b/coordinator/src/main/java/org/apache/uniffle/coordinator/SimpleClusterManager.java
@@ -22,6 +22,7 @@ import java.io.DataInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStreamReader;
+import java.nio.charset.StandardCharsets;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -127,7 +128,7 @@ public class SimpleClusterManager implements ClusterManager 
{
 
   private void parseExcludeNodesFile(DataInputStream fsDataInputStream) throws 
IOException {
     Set<String> nodes = Sets.newConcurrentHashSet();
-    try (BufferedReader br = new BufferedReader(new 
InputStreamReader(fsDataInputStream))) {
+    try (BufferedReader br = new BufferedReader(new 
InputStreamReader(fsDataInputStream, StandardCharsets.UTF_8))) {
       String line;
       while ((line = br.readLine()) != null) {
         if (!StringUtils.isEmpty(line)) {
diff --git 
a/internal-client/src/main/java/org/apache/uniffle/client/factory/CoordinatorClientFactory.java
 
b/internal-client/src/main/java/org/apache/uniffle/client/factory/CoordinatorClientFactory.java
index 16f68b7..0c17cda 100644
--- 
a/internal-client/src/main/java/org/apache/uniffle/client/factory/CoordinatorClientFactory.java
+++ 
b/internal-client/src/main/java/org/apache/uniffle/client/factory/CoordinatorClientFactory.java
@@ -17,6 +17,7 @@
 
 package org.apache.uniffle.client.factory;
 
+import java.util.Arrays;
 import java.util.List;
 import java.util.stream.Collectors;
 
@@ -58,7 +59,7 @@ public class CoordinatorClientFactory {
     for (String coordinator: coordinatorList) {
       String[] ipPort = coordinator.trim().split(":");
       if (ipPort.length != 2) {
-        String msg = "Invalid coordinator format " + ipPort;
+        String msg = "Invalid coordinator format " + Arrays.toString(ipPort);
         LOG.error(msg);
         throw new RuntimeException(msg);
       }
diff --git a/pom.xml b/pom.xml
index 449318b..7880732 100644
--- a/pom.xml
+++ b/pom.xml
@@ -820,6 +820,7 @@
           </dependency>
         </dependencies>
         <configuration>
+          <threshold>high</threshold>
           <excludeFilterFile>spotbugs-exclude.xml</excludeFilterFile>
         </configuration>
       </plugin>
diff --git 
a/server/src/main/java/org/apache/uniffle/server/LocalStorageChecker.java 
b/server/src/main/java/org/apache/uniffle/server/LocalStorageChecker.java
index 79c2be4..e6a1811 100644
--- a/server/src/main/java/org/apache/uniffle/server/LocalStorageChecker.java
+++ b/server/src/main/java/org/apache/uniffle/server/LocalStorageChecker.java
@@ -22,11 +22,11 @@ import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.util.List;
-import java.util.Random;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -156,8 +156,7 @@ public class LocalStorageChecker extends Checker {
         if (!writeFile.createNewFile()) {
           return false;
         }
-        byte[] data = new byte[1024];
-        new Random().nextBytes(data);
+        byte[] data = RandomUtils.nextBytes(1024);
         try (FileOutputStream fos = new FileOutputStream(writeFile)) {
           fos.write(data);
           fos.flush();
diff --git 
a/server/src/main/java/org/apache/uniffle/server/ShuffleFlushManager.java 
b/server/src/main/java/org/apache/uniffle/server/ShuffleFlushManager.java
index edf2b5c..46f16d3 100644
--- a/server/src/main/java/org/apache/uniffle/server/ShuffleFlushManager.java
+++ b/server/src/main/java/org/apache/uniffle/server/ShuffleFlushManager.java
@@ -46,7 +46,7 @@ import 
org.apache.uniffle.storage.request.CreateShuffleWriteHandlerRequest;
 public class ShuffleFlushManager {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(ShuffleFlushManager.class);
-  public static AtomicLong ATOMIC_EVENT_ID = new AtomicLong(0);
+  public static final AtomicLong ATOMIC_EVENT_ID = new AtomicLong(0);
   private final ShuffleServer shuffleServer;
   private final BlockingQueue<ShuffleDataFlushEvent> flushQueue = 
Queues.newLinkedBlockingQueue();
   private final ThreadPoolExecutor threadPoolExecutor;
diff --git 
a/server/src/main/java/org/apache/uniffle/server/ShuffleServerConf.java 
b/server/src/main/java/org/apache/uniffle/server/ShuffleServerConf.java
index b5b0249..2c54655 100644
--- a/server/src/main/java/org/apache/uniffle/server/ShuffleServerConf.java
+++ b/server/src/main/java/org/apache/uniffle/server/ShuffleServerConf.java
@@ -149,42 +149,42 @@ public class ShuffleServerConf extends RssBaseConf {
   public static final ConfigOption<Double> CLEANUP_THRESHOLD = ConfigOptions
       .key("rss.server.cleanup.threshold")
       .doubleType()
-      .checkValue(ConfigUtils.percentageDoubleValidator, "clean threshold must 
be between 0.0 and 100.0")
+      .checkValue(ConfigUtils.PERCENTAGE_DOUBLE_VALIDATOR, "clean threshold 
must be between 0.0 and 100.0")
       .defaultValue(10.0)
       .withDescription("Threshold for disk cleanup");
 
   public static final ConfigOption<Double> HIGH_WATER_MARK_OF_WRITE = 
ConfigOptions
       .key("rss.server.high.watermark.write")
       .doubleType()
-      .checkValue(ConfigUtils.percentageDoubleValidator, "high write watermark 
must be between 0.0 and 100.0")
+      .checkValue(ConfigUtils.PERCENTAGE_DOUBLE_VALIDATOR, "high write 
watermark must be between 0.0 and 100.0")
       .defaultValue(95.0)
       .withDescription("If disk usage is bigger than this value, disk cannot 
been written");
 
   public static final ConfigOption<Double> LOW_WATER_MARK_OF_WRITE = 
ConfigOptions
       .key("rss.server.low.watermark.write")
       .doubleType()
-      .checkValue(ConfigUtils.percentageDoubleValidator, "low write watermark 
must be between 0.0 and 100.0")
+      .checkValue(ConfigUtils.PERCENTAGE_DOUBLE_VALIDATOR, "low write 
watermark must be between 0.0 and 100.0")
       .defaultValue(85.0)
       .withDescription("If disk usage is smaller than this value, disk can 
been written again");
 
   public static final ConfigOption<Long> PENDING_EVENT_TIMEOUT_SEC = 
ConfigOptions
       .key("rss.server.pending.event.timeout.sec")
       .longType()
-      .checkValue(ConfigUtils.positiveLongValidator, "pending event timeout 
must be positive")
+      .checkValue(ConfigUtils.POSITIVE_LONG_VALIDATOR, "pending event timeout 
must be positive")
       .defaultValue(600L)
       .withDescription("If disk cannot be written for timeout seconds, the 
flush data event will fail");
 
   public static final ConfigOption<Long> DISK_CAPACITY = ConfigOptions
       .key("rss.server.disk.capacity")
       .longType()
-      .checkValue(ConfigUtils.positiveLongValidator, "disk capacity must be 
positive")
+      .checkValue(ConfigUtils.POSITIVE_LONG_VALIDATOR, "disk capacity must be 
positive")
       .defaultValue(1024L * 1024L * 1024L * 1024L)
       .withDescription("Disk capacity that shuffle server can use");
 
   public static final ConfigOption<Long> SHUFFLE_EXPIRED_TIMEOUT_MS = 
ConfigOptions
       .key("rss.server.shuffle.expired.timeout.ms")
       .longType()
-      .checkValue(ConfigUtils.positiveLongValidator, "shuffle expired timeout 
must be positive")
+      .checkValue(ConfigUtils.POSITIVE_LONG_VALIDATOR, "shuffle expired 
timeout must be positive")
       .defaultValue(60L * 1000 * 2)
       .withDescription("If the shuffle is not read for the long time, and 
shuffle is uploaded totally,"
           + " , we can delete the shuffle");
@@ -198,7 +198,7 @@ public class ShuffleServerConf extends RssBaseConf {
   public static final ConfigOption<Double> HEALTH_STORAGE_MAX_USAGE_PERCENTAGE 
= ConfigOptions
       .key("rss.server.health.max.storage.usage.percentage")
       .doubleType()
-      .checkValue(ConfigUtils.percentageDoubleValidator,
+      .checkValue(ConfigUtils.PERCENTAGE_DOUBLE_VALIDATOR,
           "The max usage percentage must be between 0.0 and 100.0")
       .defaultValue(90.0)
       .withDescription("The usage percentage of a storage exceed the value, 
the disk become unavailable");
@@ -206,7 +206,7 @@ public class ShuffleServerConf extends RssBaseConf {
   public static final ConfigOption<Double> 
HEALTH_STORAGE_RECOVERY_USAGE_PERCENTAGE = ConfigOptions
       .key("rss.server.health.storage.recovery.usage.percentage")
       .doubleType()
-      .checkValue(ConfigUtils.percentageDoubleValidator,
+      .checkValue(ConfigUtils.PERCENTAGE_DOUBLE_VALIDATOR,
           "The recovery usage percentage must be between 0.0 and 100.0")
       .defaultValue(80.0)
       .withDescription("The usage percentage of an unavailable storage decline 
the value, the disk"
@@ -215,14 +215,14 @@ public class ShuffleServerConf extends RssBaseConf {
   public static final ConfigOption<Long> HEALTH_CHECK_INTERVAL = ConfigOptions
       .key("rss.server.health.check.interval.ms")
       .longType()
-      .checkValue(ConfigUtils.positiveLongValidator,  "The interval for health 
check must be positive")
+      .checkValue(ConfigUtils.POSITIVE_LONG_VALIDATOR,  "The interval for 
health check must be positive")
       .defaultValue(5000L)
       .withDescription("The interval for health check");
 
   public static final ConfigOption<Double> HEALTH_MIN_STORAGE_PERCENTAGE = 
ConfigOptions
       .key("rss.server.health.min.storage.percentage")
       .doubleType()
-      .checkValue(ConfigUtils.percentageDoubleValidator,
+      .checkValue(ConfigUtils.PERCENTAGE_DOUBLE_VALIDATOR,
           "The minimum for healthy storage percentage must be between 0.0 and 
100.0")
       .defaultValue(80.0)
       .withDescription("The minimum fraction of storage that must pass the 
check mark the node as healthy");
@@ -243,7 +243,7 @@ public class ShuffleServerConf extends RssBaseConf {
   public static final ConfigOption<Double> 
SERVER_MEMORY_SHUFFLE_LOWWATERMARK_PERCENTAGE = ConfigOptions
       .key("rss.server.memory.shuffle.lowWaterMark.percentage")
       .doubleType()
-      .checkValue(ConfigUtils.percentageDoubleValidator,
+      .checkValue(ConfigUtils.PERCENTAGE_DOUBLE_VALIDATOR,
           "The lowWaterMark for memory percentage must be between 0.0 and 
100.0")
       .defaultValue(25.0)
       .withDescription("LowWaterMark of memory in percentage style");
@@ -251,7 +251,7 @@ public class ShuffleServerConf extends RssBaseConf {
   public static final ConfigOption<Double> 
SERVER_MEMORY_SHUFFLE_HIGHWATERMARK_PERCENTAGE = ConfigOptions
       .key("rss.server.memory.shuffle.highWaterMark.percentage")
       .doubleType()
-      .checkValue(ConfigUtils.percentageDoubleValidator,
+      .checkValue(ConfigUtils.PERCENTAGE_DOUBLE_VALIDATOR,
           "The highWaterMark for memory percentage must be between 0.0 and 
100.0")
       .defaultValue(75.0)
       .withDescription("HighWaterMark of memory in percentage style");
@@ -259,14 +259,14 @@ public class ShuffleServerConf extends RssBaseConf {
   public static final ConfigOption<Long> FLUSH_COLD_STORAGE_THRESHOLD_SIZE = 
ConfigOptions
       .key("rss.server.flush.cold.storage.threshold.size")
       .longType()
-      .checkValue(ConfigUtils.positiveLongValidator, "flush cold storage 
threshold must be positive")
+      .checkValue(ConfigUtils.POSITIVE_LONG_VALIDATOR, "flush cold storage 
threshold must be positive")
       .defaultValue(64L * 1024L * 1024L)
       .withDescription("For multistorage, the event size exceed this value, 
flush data  to cold storage");
 
   public static final ConfigOption<Long> FALLBACK_MAX_FAIL_TIMES = 
ConfigOptions
       .key("rss.server.multistorage.fallback.max.fail.times")
       .longType()
-      .checkValue(ConfigUtils.non_negativeLongValidator, " fallback times must 
be non-negative")
+      .checkValue(ConfigUtils.NON_NEGATIVE_LONG_VALIDATOR, " fallback times 
must be non-negative")
       .defaultValue(0L)
       .withDescription("For multistorage, fail times exceed the number, will 
switch storage");
 
diff --git 
a/storage/src/main/java/org/apache/uniffle/storage/common/FileBasedShuffleSegment.java
 
b/storage/src/main/java/org/apache/uniffle/storage/common/FileBasedShuffleSegment.java
index 3f0f09c..4df9bd2 100644
--- 
a/storage/src/main/java/org/apache/uniffle/storage/common/FileBasedShuffleSegment.java
+++ 
b/storage/src/main/java/org/apache/uniffle/storage/common/FileBasedShuffleSegment.java
@@ -21,7 +21,7 @@ import java.util.Objects;
 
 public class FileBasedShuffleSegment extends ShuffleSegment implements 
Comparable<FileBasedShuffleSegment> {
 
-  public static int SEGMENT_SIZE = 4 * Long.BYTES + 2 * Integer.BYTES;
+  public static final int SEGMENT_SIZE = 4 * Long.BYTES + 2 * Integer.BYTES;
   private long offset;
   private int length;
   private int uncompressLength;
diff --git 
a/storage/src/main/java/org/apache/uniffle/storage/util/ShuffleStorageUtils.java
 
b/storage/src/main/java/org/apache/uniffle/storage/util/ShuffleStorageUtils.java
index 096702a..16774b9 100644
--- 
a/storage/src/main/java/org/apache/uniffle/storage/util/ShuffleStorageUtils.java
+++ 
b/storage/src/main/java/org/apache/uniffle/storage/util/ShuffleStorageUtils.java
@@ -20,6 +20,7 @@ package org.apache.uniffle.storage.util;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.Collections;
 import java.util.List;
 
@@ -199,7 +200,7 @@ public class ShuffleStorageUtils {
 
   public static int getStorageIndex(int max, String appId, int shuffleId, int 
startPartition) {
     String hash = appId + "_" + shuffleId + "_" + startPartition;
-    int index = MurmurHash.getInstance().hash(hash.getBytes()) % max;
+    int index = 
MurmurHash.getInstance().hash(hash.getBytes(StandardCharsets.UTF_8)) % max;
     if (index < 0) {
       index = -index;
     }

Reply via email to