This is an automated email from the ASF dual-hosted git repository.
enricomi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-uniffle.git
The following commit(s) were added to refs/heads/master by this push:
new b39ff24b0 [MINOR] Remove meaningless string concatenation, use slf4j
(#1647)
b39ff24b0 is described below
commit b39ff24b00ed86223329941112cadaf31f65b72e
Author: RickyMa <[email protected]>
AuthorDate: Tue Apr 16 17:53:29 2024 +0800
[MINOR] Remove meaningless string concatenation, use slf4j (#1647)
---
.../mapreduce/task/reduce/RssBypassWriter.java | 3 ---
.../mapreduce/task/reduce/RssEventFetcher.java | 8 ++++----
.../hadoop/mapreduce/task/reduce/RssFetcher.java | 8 ++++----
.../task/reduce/RssInMemoryRemoteMerger.java | 6 +++---
.../task/reduce/RssRemoteMergeManagerImpl.java | 9 ++++-----
.../hadoop/mapreduce/task/reduce/RssShuffle.java | 6 +++---
.../hadoop/mapreduce/v2/app/RssMRAppMaster.java | 2 +-
.../apache/uniffle/hadoop/shim/HadoopShimImpl.java | 6 +++---
.../common/shuffle/impl/RssShuffleManager.java | 21 ++++++++++-----------
.../impl/RssSimpleFetchedInputAllocator.java | 2 +-
.../common/shuffle/orderedgrouped/RssShuffle.java | 11 +++++------
.../shuffle/orderedgrouped/RssShuffleScheduler.java | 18 +++++++++---------
.../orderedgrouped/RssTezShuffleDataFetcher.java | 2 +-
.../input/RssConcatenatedMergedKeyValueInput.java | 2 +-
.../input/RssConcatenatedMergedKeyValuesInput.java | 2 +-
.../apache/uniffle/common/ShuffleServerInfo.java | 2 +-
.../coordinator/web/request/ApplicationRequest.java | 2 +-
.../coordinator/conf/YamlClientConfParserTest.java | 2 +-
.../test/ShuffleServerWithMemLocalHadoopTest.java | 2 +-
.../uniffle/test/TezSimpleSessionExampleTest.java | 2 +-
.../apache/uniffle/server/ShuffleServerConf.java | 2 +-
.../uniffle/server/ShuffleServerGrpcService.java | 6 +++---
22 files changed, 59 insertions(+), 65 deletions(-)
diff --git
a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssBypassWriter.java
b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssBypassWriter.java
index 3bcfddb83..d81ab331f 100644
---
a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssBypassWriter.java
+++
b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssBypassWriter.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.mapreduce.task.reduce;
import java.lang.reflect.Field;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.Decompressor;
@@ -29,7 +27,6 @@ import org.apache.uniffle.common.exception.RssException;
// In MR shuffle, MapOutput encapsulates the logic to fetch map task's output
data via http.
// So, in RSS, we should bypass this logic, and directly write data to
MapOutput.
public class RssBypassWriter {
- private static final Log LOG = LogFactory.getLog(RssBypassWriter.class);
public static void write(MapOutput mapOutput, byte[] buffer) {
// Write and commit uncompressed data to MapOutput.
diff --git
a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssEventFetcher.java
b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssEventFetcher.java
index 397d45fb2..d2fdcdbac 100644
---
a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssEventFetcher.java
+++
b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssEventFetcher.java
@@ -21,8 +21,6 @@ import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapTaskCompletionEventsUpdate;
import org.apache.hadoop.mapred.TaskCompletionEvent;
@@ -30,11 +28,13 @@ import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
import org.apache.hadoop.mapreduce.RssMRUtils;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.roaringbitmap.longlong.Roaring64NavigableMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.uniffle.common.exception.RssException;
public class RssEventFetcher<K, V> {
- private static final Log LOG = LogFactory.getLog(RssEventFetcher.class);
+ private static final Logger LOG =
LoggerFactory.getLogger(RssEventFetcher.class);
private final TaskAttemptID reduce;
private final TaskUmbilicalProtocol umbilical;
@@ -104,7 +104,7 @@ public class RssEventFetcher<K, V> {
if (taskIdBitmap.getLongCardinality() + tipFailedCount != totalMapsCount) {
for (int index = 0; index < totalMapsCount; index++) {
if (!mapIndexBitmap.contains(index)) {
- LOG.error("Fail to fetch " + " map task on index: " + index);
+ LOG.error("Fail to fetch map task on index: {}", index);
}
}
throw new IllegalStateException(errMsg);
diff --git
a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssFetcher.java
b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssFetcher.java
index 27d382cce..b07581a2e 100644
---
a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssFetcher.java
+++
b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssFetcher.java
@@ -22,8 +22,6 @@ import java.nio.ByteBuffer;
import java.text.DecimalFormat;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
@@ -32,6 +30,8 @@ import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.util.Progress;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.uniffle.client.api.ShuffleReadClient;
import org.apache.uniffle.client.response.CompressedShuffleBlock;
@@ -42,7 +42,7 @@ import org.apache.uniffle.common.util.ByteUnit;
public class RssFetcher<K, V> {
- private static final Log LOG = LogFactory.getLog(RssFetcher.class);
+ private static final Logger LOG = LoggerFactory.getLogger(RssFetcher.class);
private final Reporter reporter;
@@ -235,7 +235,7 @@ public class RssFetcher<K, V> {
}
// Check if we can shuffle *now* ...
if (mapOutput == null) {
- LOG.info("RssMRFetcher" + " - MergeManager returned status WAIT ...");
+ LOG.info("RssMRFetcher - MergeManager returned status WAIT ...");
// Not an error but wait to process data.
// Use a retry flag to avoid re-fetch and re-uncompress.
hasPendingData = true;
diff --git
a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssInMemoryRemoteMerger.java
b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssInMemoryRemoteMerger.java
index e29601e4f..bb8309c77 100644
---
a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssInMemoryRemoteMerger.java
+++
b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssInMemoryRemoteMerger.java
@@ -21,8 +21,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -40,9 +38,11 @@ import org.apache.hadoop.mapreduce.CryptoUtils;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class RssInMemoryRemoteMerger<K, V> extends
MergeThread<InMemoryMapOutput<K, V>, K, V> {
- private static final Log LOG =
LogFactory.getLog(RssInMemoryRemoteMerger.class);
+ private static final Logger LOG =
LoggerFactory.getLogger(RssInMemoryRemoteMerger.class);
private static final String SPILL_OUTPUT_PREFIX = "spill";
private final RssRemoteMergeManagerImpl<K, V> manager;
diff --git
a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssRemoteMergeManagerImpl.java
b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssRemoteMergeManagerImpl.java
index c9f0d45f0..16e3a3a6f 100644
---
a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssRemoteMergeManagerImpl.java
+++
b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssRemoteMergeManagerImpl.java
@@ -22,8 +22,6 @@ import java.util.Set;
import java.util.TreeSet;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
@@ -40,13 +38,15 @@ import org.apache.hadoop.mapred.Task;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.util.Progress;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.uniffle.common.exception.RssException;
import org.apache.uniffle.common.filesystem.HadoopFilesystemProvider;
public class RssRemoteMergeManagerImpl<K, V> extends MergeManagerImpl<K, V> {
- private static final Log LOG =
LogFactory.getLog(RssRemoteMergeManagerImpl.class);
+ private static final Logger LOG =
LoggerFactory.getLogger(RssRemoteMergeManagerImpl.class);
private final String appId;
private final TaskAttemptID reduceId;
@@ -173,8 +173,7 @@ public class RssRemoteMergeManagerImpl<K, V> extends
MergeManagerImpl<K, V> {
(this.memoryLimit
* jobConf.getFloat(
MRJobConfig.SHUFFLE_MERGE_PERCENT,
MRJobConfig.DEFAULT_SHUFFLE_MERGE_PERCENT));
- LOG.info(
- "MergerManager: memoryLimit=" + memoryLimit + ", " + "mergeThreshold="
+ mergeThreshold);
+ LOG.info("MergerManager: memoryLimit={}, mergeThreshold={}", memoryLimit,
mergeThreshold);
this.inMemoryMerger = createRssInMemoryMerger();
this.inMemoryMerger.start();
diff --git
a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssShuffle.java
b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssShuffle.java
index df3f667be..704c26714 100644
---
a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssShuffle.java
+++
b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssShuffle.java
@@ -23,8 +23,6 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RawKeyValueIterator;
@@ -38,6 +36,8 @@ import org.apache.hadoop.mapreduce.RssMRConfig;
import org.apache.hadoop.mapreduce.RssMRUtils;
import org.apache.hadoop.util.Progress;
import org.roaringbitmap.longlong.Roaring64NavigableMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.uniffle.client.api.ShuffleReadClient;
import org.apache.uniffle.client.api.ShuffleWriteClient;
@@ -48,7 +48,7 @@ import org.apache.uniffle.hadoop.shim.HadoopShimImpl;
public class RssShuffle<K, V> implements ShuffleConsumerPlugin<K, V>,
ExceptionReporter {
- private static final Log LOG = LogFactory.getLog(RssShuffle.class);
+ private static final Logger LOG = LoggerFactory.getLogger(RssShuffle.class);
private static final int MAX_EVENTS_TO_FETCH = 10000;
diff --git
a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/v2/app/RssMRAppMaster.java
b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/v2/app/RssMRAppMaster.java
index a631696c7..4fdf48b8f 100644
---
a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/v2/app/RssMRAppMaster.java
+++
b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/v2/app/RssMRAppMaster.java
@@ -446,7 +446,7 @@ public class RssMRAppMaster extends MRAppMaster {
MRJobConfig.CACHE_FILE_TIMESTAMPS,
ts == null ? String.valueOf(currentTs) : currentTs + "," + ts);
String vis = conf.get(MRJobConfig.CACHE_FILE_VISIBILITIES);
- conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES, vis == null ? "false" :
"false" + "," + vis);
+ conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES, vis == null ? "false" :
"false," + vis);
long size = status.getLen();
String sizes = conf.get(MRJobConfig.CACHE_FILES_SIZES);
conf.set(
diff --git
a/client-mr/hadoop2.8/src/main/java/org/apache/uniffle/hadoop/shim/HadoopShimImpl.java
b/client-mr/hadoop2.8/src/main/java/org/apache/uniffle/hadoop/shim/HadoopShimImpl.java
index 4b78c3c5c..690596102 100644
---
a/client-mr/hadoop2.8/src/main/java/org/apache/uniffle/hadoop/shim/HadoopShimImpl.java
+++
b/client-mr/hadoop2.8/src/main/java/org/apache/uniffle/hadoop/shim/HadoopShimImpl.java
@@ -20,8 +20,6 @@ package org.apache.uniffle.hadoop.shim;
import java.io.IOException;
import java.lang.reflect.Constructor;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.task.reduce.ShuffleClientMetrics;
@@ -30,10 +28,12 @@ import
org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class HadoopShimImpl {
- private static final Log LOG = LogFactory.getLog(HadoopShimImpl.class);
+ private static final Logger LOG =
LoggerFactory.getLogger(HadoopShimImpl.class);
public static ShuffleClientMetrics createShuffleClientMetrics(
TaskAttemptID taskAttemptID, JobConf jobConf) {
diff --git
a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssShuffleManager.java
b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssShuffleManager.java
index a734b102b..bcd089b4f 100644
---
a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssShuffleManager.java
+++
b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssShuffleManager.java
@@ -545,7 +545,7 @@ public class RssShuffleManager extends ShuffleManager {
}
if (LOG.isDebugEnabled()) {
- LOG.debug(srcNameTrimmed + ": " + "NumCompletedInputs: " +
numCompletedInputs);
+ LOG.debug("{}: NumCompletedInputs: {}", srcNameTrimmed,
numCompletedInputs);
}
if (!isAllInputFetched() && !isShutdown.get()) {
@@ -576,7 +576,7 @@ public class RssShuffleManager extends ShuffleManager {
}
if (LOG.isDebugEnabled()) {
- LOG.debug(srcNameTrimmed + ": " + "Processing pending
partition: " + partition);
+ LOG.debug("{}: Processing pending partition: {}",
srcNameTrimmed, partition);
}
if (!isShutdown.get()
@@ -810,8 +810,7 @@ public class RssShuffleManager extends ShuffleManager {
}
}
if (LOG.isDebugEnabled()) {
- LOG.debug(
- srcNameTrimmed + ": " + "Adding input: " + srcAttemptIdentifier + ",
to host: " + host);
+ LOG.debug("{}: Adding input: {}, to host: {}", srcNameTrimmed,
srcAttemptIdentifier, host);
}
if (!validateInputAttemptForPipelinedShuffle(srcAttemptIdentifier)) {
@@ -1196,7 +1195,7 @@ public class RssShuffleManager extends ShuffleManager {
if (Thread.currentThread().isInterrupted()) {
// need to cleanup all FetchedInput (DiskFetchedInput,
LocalDisFetchedInput), lockFile
// As of now relying on job cleanup (when all directories would be
cleared)
- LOG.info(srcNameTrimmed + ": " + "Thread interrupted. Need to cleanup
the local dirs");
+ LOG.info("{}: Thread interrupted. Need to cleanup the local dirs",
srcNameTrimmed);
}
if (!isShutdown.getAndSet(true)) {
// Shut down any pending fetchers
@@ -1364,17 +1363,17 @@ public class RssShuffleManager extends ShuffleManager {
private class SchedulerFutureCallback implements FutureCallback<Void> {
@Override
public void onSuccess(Void result) {
- LOG.info(srcNameTrimmed + ": " + "Scheduler thread completed");
+ LOG.info("{}: Scheduler thread completed", srcNameTrimmed);
}
@Override
public void onFailure(Throwable t) {
if (isShutdown.get()) {
if (LOG.isDebugEnabled()) {
- LOG.debug(srcNameTrimmed + ": " + "Already shutdown. Ignoring error:
" + t);
+ LOG.debug("{}: Already shutdown. Ignoring error: ", srcNameTrimmed,
t);
}
} else {
- LOG.error(srcNameTrimmed + ": " + "Scheduler failed with error: ", t);
+ LOG.error("{}: Scheduler failed with error: ", srcNameTrimmed, t);
inputContext.reportFailure(TaskFailureType.NON_FATAL, t, "Shuffle
Scheduler Failed");
}
}
@@ -1405,7 +1404,7 @@ public class RssShuffleManager extends ShuffleManager {
fetcher.shutdown();
if (isShutdown.get()) {
if (LOG.isDebugEnabled()) {
- LOG.debug(srcNameTrimmed + ": " + "Already shutdown. Ignoring event
from fetcher");
+ LOG.debug("{}: Already shutdown. Ignoring event from fetcher",
srcNameTrimmed);
}
} else {
lock.lock();
@@ -1430,10 +1429,10 @@ public class RssShuffleManager extends ShuffleManager {
fetcher.shutdown();
if (isShutdown.get()) {
if (LOG.isDebugEnabled()) {
- LOG.debug(srcNameTrimmed + ": " + "Already shutdown. Ignoring error
from fetcher: " + t);
+ LOG.debug("{}: Already shutdown. Ignoring error from fetcher: ",
srcNameTrimmed, t);
}
} else {
- LOG.error(srcNameTrimmed + ": " + "Fetcher failed with error: ", t);
+ LOG.error("{}: Fetcher failed with error: ", srcNameTrimmed, t);
shuffleError = t;
inputContext.reportFailure(TaskFailureType.NON_FATAL, t, "Fetch
failed");
doBookKeepingForFetcherComplete();
diff --git
a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssSimpleFetchedInputAllocator.java
b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssSimpleFetchedInputAllocator.java
index 584c5718b..7abfce6b8 100644
---
a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssSimpleFetchedInputAllocator.java
+++
b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssSimpleFetchedInputAllocator.java
@@ -316,7 +316,7 @@ public class RssSimpleFetchedInputAllocator extends
SimpleFetchedInputAllocator
private synchronized void unreserve(long size) {
this.usedMemory -= size;
if (LOG.isDebugEnabled()) {
- LOG.debug(srcNameTrimmed + ": " + "Used memory after freeing " + size +
" : " + usedMemory);
+ LOG.debug("{}: Used memory after freeing {}: {}", srcNameTrimmed, size,
usedMemory);
}
}
diff --git
a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffle.java
b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffle.java
index 56e00b324..f8e873dc5 100644
---
a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffle.java
+++
b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffle.java
@@ -444,8 +444,7 @@ public class RssShuffle implements ExceptionReporter {
}
} catch (Throwable e) {
if (ignoreErrors) {
- LOG.info(
- srcNameTrimmed + ": " + "Exception while trying to shutdown
merger, Ignoring", e);
+ LOG.info("{}: Exception while trying to shutdown merger, Ignoring",
srcNameTrimmed, e);
} else {
throw e;
}
@@ -467,7 +466,7 @@ public class RssShuffle implements ExceptionReporter {
}
cleanupMerger(true);
} catch (Throwable t) {
- LOG.info(srcNameTrimmed + ": " + "Error in cleaning up.., ", t);
+ LOG.info("{}: Error in cleaning up.., ", srcNameTrimmed, t);
}
}
@@ -516,15 +515,15 @@ public class RssShuffle implements ExceptionReporter {
private class RssShuffleRunnerFutureCallback implements
FutureCallback<TezRawKeyValueIterator> {
@Override
public void onSuccess(TezRawKeyValueIterator result) {
- LOG.info(srcNameTrimmed + ": " + "RSSShuffle Runner thread complete");
+ LOG.info(srcNameTrimmed + ": RSSShuffle Runner thread complete");
}
@Override
public void onFailure(Throwable t) {
if (isShutDown.get()) {
- LOG.info(srcNameTrimmed + ": " + "Already shutdown. Ignoring error");
+ LOG.info(srcNameTrimmed + ": Already shutdown. Ignoring error");
} else {
- LOG.error(srcNameTrimmed + ": " + "RSSShuffleRunner failed with
error", t);
+ LOG.error(srcNameTrimmed + ": RSSShuffleRunner failed with error", t);
// In case of an abort / Interrupt - the runtime makes sure that this
is ignored.
inputContext.reportFailure(TaskFailureType.NON_FATAL, t, "RSSShuffle
Runner Failed");
cleanupIgnoreErrors();
diff --git
a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffleScheduler.java
b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffleScheduler.java
index 0528037d3..ea2fe457c 100644
---
a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffleScheduler.java
+++
b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffleScheduler.java
@@ -1056,7 +1056,7 @@ class RssShuffleScheduler extends ShuffleScheduler {
@Override
public void reportLocalError(IOException ioe) {
- LOG.error(srcNameTrimmed + ": " + "Shuffle failed : caused by local
error", ioe);
+ LOG.error("{}: Shuffle failed: caused by local error", srcNameTrimmed,
ioe);
// Shuffle knows how to deal with failures post shutdown via the onFailure
hook
exceptionReporter.reportException(ioe);
}
@@ -1320,7 +1320,7 @@ class RssShuffleScheduler extends ShuffleScheduler {
@Override
public void obsoleteInput(InputAttemptIdentifier srcAttempt) {
// The incoming srcAttempt does not contain a path component.
- LOG.info(srcNameTrimmed + ": " + "Adding obsolete input: " + srcAttempt);
+ LOG.info("{}: Adding obsolete input: {}", srcNameTrimmed, srcAttempt);
ShuffleEventInfo eventInfo =
pipelinedShuffleInfoEventsMap.get(srcAttempt.getInputIdentifier());
// Pipelined shuffle case (where pipelinedShuffleInfoEventsMap gets
populated).
@@ -1684,8 +1684,7 @@ class RssShuffleScheduler extends ShuffleScheduler {
}
if (LOG.isDebugEnabled()) {
- LOG.debug(
- srcNameTrimmed + ": " + "NumCompletedInputs: {}" + (numInputs -
remainingMaps.get()));
+ LOG.debug("{}: NumCompletedInputs: {}", srcNameTrimmed, numInputs -
remainingMaps.get());
}
// Ensure there's memory available before scheduling the next Fetcher.
try {
@@ -1731,13 +1730,14 @@ class RssShuffleScheduler extends ShuffleScheduler {
break; // Check for the exit condition.
}
if (LOG.isDebugEnabled()) {
- LOG.debug(srcNameTrimmed + ": " + "Processing pending host: "
+ mapHost.toString());
+ LOG.debug("{}: Processing pending host: {}", srcNameTrimmed,
mapHost.toString());
}
if (!isShutdown.get()) {
count++;
if (LOG.isDebugEnabled()) {
LOG.debug(
- srcNameTrimmed + ": " + "Scheduling fetch for inputHost:
{}",
+ "{}: Scheduling fetch for inputHost: {}",
+ srcNameTrimmed,
mapHost.getHostIdentifier() + ":" +
mapHost.getPartitionId());
}
@@ -1937,7 +1937,7 @@ class RssShuffleScheduler extends ShuffleScheduler {
rssFetcherOrderedGrouped.shutDown();
if (isShutdown.get()) {
- LOG.info(srcNameTrimmed + ": " + "Already shutdown. Ignoring fetch
complete");
+ LOG.info("{}: Already shutdown. Ignoring fetch complete",
srcNameTrimmed);
} else {
successRssPartitionSet.add(partitionId);
MapHost mapHost = runningRssPartitionMap.remove(partitionId);
@@ -1962,9 +1962,9 @@ class RssShuffleScheduler extends ShuffleScheduler {
LOG.error("Failed to fetch.", t);
rssFetcherOrderedGrouped.shutDown();
if (isShutdown.get()) {
- LOG.info(srcNameTrimmed + ": " + "Already shutdown. Ignoring fetch
complete");
+ LOG.info("{}: Already shutdown. Ignoring fetch complete",
srcNameTrimmed);
} else {
- LOG.error(srcNameTrimmed + ": " + "Fetcher failed with error", t);
+ LOG.error("{}: Fetcher failed with error", srcNameTrimmed, t);
exceptionReporter.reportException(t);
doBookKeepingForFetcherComplete();
}
diff --git
a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssTezShuffleDataFetcher.java
b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssTezShuffleDataFetcher.java
index 66859c28c..992f509d7 100644
---
a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssTezShuffleDataFetcher.java
+++
b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssTezShuffleDataFetcher.java
@@ -241,7 +241,7 @@ public class RssTezShuffleDataFetcher extends
CallableWithNdc<Void> {
}
// Check if we can shuffle *now* ...
if (mapOutput == null || mapOutput.getType() == MapOutput.Type.WAIT) {
- LOG.info("RssMRFetcher" + " - MergeManager returned status WAIT ...");
+ LOG.info("RssMRFetcher - MergeManager returned status WAIT ...");
// Not an error but wait to process data.
// Use a retry flag to avoid re-fetch and re-uncompress.
hasPendingData = true;
diff --git
a/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValueInput.java
b/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValueInput.java
index 8215ecc23..176b434a4 100644
---
a/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValueInput.java
+++
b/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValueInput.java
@@ -60,7 +60,7 @@ public class RssConcatenatedMergedKeyValueInput extends
MergedLogicalInput {
Reader reader = getInputs().get(currentReaderIndex).getReader();
if (!(reader instanceof KeyValueReader)) {
throw new TezUncheckedException(
- "Expected KeyValueReader. " + "Got: " +
reader.getClass().getName());
+ "Expected KeyValueReader. Got: " +
reader.getClass().getName());
}
currentReader = (KeyValueReader) reader;
currentReaderIndex++;
diff --git
a/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValuesInput.java
b/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValuesInput.java
index aafa046a3..f4abebc8c 100644
---
a/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValuesInput.java
+++
b/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValuesInput.java
@@ -61,7 +61,7 @@ public class RssConcatenatedMergedKeyValuesInput extends
MergedLogicalInput {
Reader reader = getInputs().get(currentReaderIndex).getReader();
if (!(reader instanceof KeyValuesReader)) {
throw new TezUncheckedException(
- "Expected KeyValuesReader. " + "Got: " +
reader.getClass().getName());
+ "Expected KeyValuesReader. Got: " +
reader.getClass().getName());
}
currentReader = (KeyValuesReader) reader;
currentReaderIndex++;
diff --git
a/common/src/main/java/org/apache/uniffle/common/ShuffleServerInfo.java
b/common/src/main/java/org/apache/uniffle/common/ShuffleServerInfo.java
index 181e4a583..259c81ea4 100644
--- a/common/src/main/java/org/apache/uniffle/common/ShuffleServerInfo.java
+++ b/common/src/main/java/org/apache/uniffle/common/ShuffleServerInfo.java
@@ -110,7 +110,7 @@ public class ShuffleServerInfo implements Serializable {
+ nettyPort
+ "]}";
} else {
- return "ShuffleServerInfo{host[" + host + "]," + " grpc port[" +
grpcPort + "]}";
+ return "ShuffleServerInfo{host[" + host + "], grpc port[" + grpcPort +
"]}";
}
}
diff --git
a/coordinator/src/main/java/org/apache/uniffle/coordinator/web/request/ApplicationRequest.java
b/coordinator/src/main/java/org/apache/uniffle/coordinator/web/request/ApplicationRequest.java
index d3d75addc..583019476 100644
---
a/coordinator/src/main/java/org/apache/uniffle/coordinator/web/request/ApplicationRequest.java
+++
b/coordinator/src/main/java/org/apache/uniffle/coordinator/web/request/ApplicationRequest.java
@@ -80,6 +80,6 @@ public class ApplicationRequest {
@Override
public String toString() {
- return "ApplicationRequest{" + "applications=" +
StringUtils.join(applications, ",") + '}';
+ return "ApplicationRequest{applications=" + StringUtils.join(applications,
",") + '}';
}
}
diff --git
a/coordinator/src/test/java/org/apache/uniffle/coordinator/conf/YamlClientConfParserTest.java
b/coordinator/src/test/java/org/apache/uniffle/coordinator/conf/YamlClientConfParserTest.java
index 489465c06..00133e0cb 100644
---
a/coordinator/src/test/java/org/apache/uniffle/coordinator/conf/YamlClientConfParserTest.java
+++
b/coordinator/src/test/java/org/apache/uniffle/coordinator/conf/YamlClientConfParserTest.java
@@ -43,7 +43,7 @@ public class YamlClientConfParserTest {
// rssClientConf with format of 'k : v'
- String yaml = "rssClientConf:\n" + " k1: v1\n" + " k2: v2";
+ String yaml = "rssClientConf:\n k1: v1\n k2: v2";
ClientConf conf = parser.tryParse(IOUtils.toInputStream(yaml));
assertEquals(2, conf.getRssClientConf().size());
diff --git
a/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithMemLocalHadoopTest.java
b/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithMemLocalHadoopTest.java
index bae77cfec..549ae21e9 100644
---
a/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithMemLocalHadoopTest.java
+++
b/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithMemLocalHadoopTest.java
@@ -162,7 +162,7 @@ public class ShuffleServerWithMemLocalHadoopTest extends
ShuffleReadWriteBase {
LOG.info("checkSkippedMetrics={}, isNettyMode={}", checkSkippedMetrics,
isNettyMode);
ShuffleServerGrpcClient shuffleServerClient =
isNettyMode ? nettyShuffleServerClient : grpcShuffleServerClient;
- String testAppId = "memoryLocalFileHDFSReadWithFilterTest_" + "ship_" +
checkSkippedMetrics;
+ String testAppId = "memoryLocalFileHDFSReadWithFilterTest_ship_" +
checkSkippedMetrics;
int shuffleId = 0;
int partitionId = 0;
RssRegisterShuffleRequest rrsr =
diff --git
a/integration-test/tez/src/test/java/org/apache/uniffle/test/TezSimpleSessionExampleTest.java
b/integration-test/tez/src/test/java/org/apache/uniffle/test/TezSimpleSessionExampleTest.java
index 750fffe16..4add6e887 100644
---
a/integration-test/tez/src/test/java/org/apache/uniffle/test/TezSimpleSessionExampleTest.java
+++
b/integration-test/tez/src/test/java/org/apache/uniffle/test/TezSimpleSessionExampleTest.java
@@ -103,7 +103,7 @@ public class TezSimpleSessionExampleTest extends
TezIntegrationTestBase {
@Override
public String[] getTestArgs(String uniqueOutputName) {
return new String[] {
- inputPath + ".0" + "," + inputPath + ".1" + "," + inputPath + ".2",
+ inputPath + ".0," + inputPath + ".1," + inputPath + ".2",
outputPath
+ "/"
+ uniqueOutputName
diff --git
a/server/src/main/java/org/apache/uniffle/server/ShuffleServerConf.java
b/server/src/main/java/org/apache/uniffle/server/ShuffleServerConf.java
index a7b140003..a7d8254f4 100644
--- a/server/src/main/java/org/apache/uniffle/server/ShuffleServerConf.java
+++ b/server/src/main/java/org/apache/uniffle/server/ShuffleServerConf.java
@@ -468,7 +468,7 @@ public class ShuffleServerConf extends RssBaseConf {
.intType()
.checkValue(
ConfigUtils.SERVER_PORT_VALIDATOR,
- "check server port value is 0 " + "or value >= 1024 && value <=
65535")
+ "check server port value is 0 or value >= 1024 && value <=
65535")
.defaultValue(-1)
.withDescription("Shuffle netty server port");
diff --git
a/server/src/main/java/org/apache/uniffle/server/ShuffleServerGrpcService.java
b/server/src/main/java/org/apache/uniffle/server/ShuffleServerGrpcService.java
index 47a0e807e..c088242c8 100644
---
a/server/src/main/java/org/apache/uniffle/server/ShuffleServerGrpcService.java
+++
b/server/src/main/java/org/apache/uniffle/server/ShuffleServerGrpcService.java
@@ -699,7 +699,7 @@ public class ShuffleServerGrpcService extends
ShuffleServerImplBase {
.getGrpcMetrics()
.recordProcessTime(ShuffleServerGrpcMetrics.GET_SHUFFLE_DATA_METHOD, readTime);
LOG.info(
- "Successfully getShuffleData cost {} ms for shuffle" + " data with
{}",
+ "Successfully getShuffleData cost {} ms for shuffle data with {}",
readTime,
requestInfo);
reply =
@@ -791,7 +791,7 @@ public class ShuffleServerGrpcService extends
ShuffleServerImplBase {
.getGrpcMetrics()
.recordProcessTime(ShuffleServerGrpcMetrics.GET_SHUFFLE_INDEX_METHOD, readTime);
LOG.info(
- "Successfully getShuffleIndex cost {} ms for {}" + " bytes with
{}",
+ "Successfully getShuffleIndex cost {} ms for {} bytes with {}",
readTime,
data.remaining(),
requestInfo);
@@ -894,7 +894,7 @@ public class ShuffleServerGrpcService extends
ShuffleServerImplBase {
.getGrpcMetrics()
.recordProcessTime(ShuffleServerGrpcMetrics.GET_MEMORY_SHUFFLE_DATA_METHOD,
costTime);
LOG.info(
- "Successfully getInMemoryShuffleData cost {} ms with {} bytes
shuffle" + " data for {}",
+ "Successfully getInMemoryShuffleData cost {} ms with {} bytes
shuffle data for {}",
costTime,
data.length,
requestInfo);