This is an automated email from the ASF dual-hosted git repository.
dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 59a9ee77657
[SPARK-45682][CORE][SQL][ML][MLLIB][GRAPHX][YARN][DSTREAM][UI][EXAMPLES] Fix
"method + in class Byte/Short/Char/Long/Double/Int is deprecated"
59a9ee77657 is described below
commit 59a9ee776570de987c36e3e8e995d067017064b5
Author: yangjie01 <[email protected]>
AuthorDate: Sun Oct 29 22:37:49 2023 -0700
[SPARK-45682][CORE][SQL][ML][MLLIB][GRAPHX][YARN][DSTREAM][UI][EXAMPLES]
Fix "method + in class Byte/Short/Char/Long/Double/Int is deprecated"
### What changes were proposed in this pull request?
There are some compilation warnings similar to the following:
```
[error]
/Users/yangjie01/SourceCode/git/spark-mine-sbt/mllib/src/test/scala/org/apache/spark/mllib/regression/LassoSuite.scala:65:58:
method + in class Double is deprecated (since 2.13.0): Adding a number and a
String is deprecated. Use the string interpolation `s"$num$str"`
[error] Applicable -Wconf / nowarn filters for this fatal warning:
msg=<part of the message>, cat=deprecation,
site=org.apache.spark.mllib.regression.LassoSuite, origin=scala.Double.+,
version=2.13.0
[error] assert(weight1 >= -1.60 && weight1 <= -1.40, weight1 + " not in
[-1.6, -1.4]")
[error] ^
[error]
/Users/yangjie01/SourceCode/git/spark-mine-sbt/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala:1314:50:
method + in class Int is deprecated (since 2.13.0): Adding a number and a
String is deprecated. Use the string interpolation `s"$num$str"`
[error] Applicable -Wconf / nowarn filters for this fatal warning:
msg=<part of the message>, cat=deprecation,
site=org.apache.spark.sql.hive.execution.SQLQuerySuiteBase, origin=scala.Int.+,
version=2.13.0
[error] checkAnswer(df, (0 until 5).map(i => Row(i + "#", i + "#")))
[error] ^
```
This pr fix them refer to `Adding a number and a String is deprecated. Use
the string interpolation `s"$num$str"``
### Why are the changes needed?
Clean up deprecated Scala API usage.
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
Pass GitHub Acitons
### Was this patch authored or co-authored using generative AI tooling?
No
Closes #43573 from LuciferYang/SPARK-45682.
Authored-by: yangjie01 <[email protected]>
Signed-off-by: Dongjoon Hyun <[email protected]>
---
.../main/scala/org/apache/spark/SparkConf.scala | 2 +-
.../spark/internal/config/ConfigBuilder.scala | 4 ++--
.../scala/org/apache/spark/scheduler/TaskSet.scala | 2 +-
.../org/apache/spark/ui/ConsoleProgressBar.scala | 4 ++--
.../main/scala/org/apache/spark/ui/UIUtils.scala | 2 +-
.../scala/org/apache/spark/ui/jobs/StagePage.scala | 6 +++---
.../scala/org/apache/spark/util/Distribution.scala | 4 ++--
.../scala/org/apache/spark/rdd/PipedRDDSuite.scala | 2 +-
.../spark/util/collection/AppendOnlyMapSuite.scala | 6 +++---
.../spark/util/collection/OpenHashMapSuite.scala | 6 +++---
.../collection/PrimitiveKeyOpenHashMapSuite.scala | 6 +++---
.../apache/spark/examples/graphx/Analytics.scala | 2 +-
.../apache/spark/graphx/util/GraphGenerators.scala | 2 +-
.../apache/spark/mllib/util/MFDataGenerator.scala | 4 ++--
.../spark/ml/classification/LinearSVCSuite.scala | 2 +-
.../classification/LogisticRegressionSuite.scala | 10 +++++-----
.../GeneralizedLinearRegressionSuite.scala | 22 +++++++++++-----------
.../ml/regression/LinearRegressionSuite.scala | 8 ++++----
.../apache/spark/mllib/regression/LassoSuite.scala | 12 ++++++------
.../spark/deploy/yarn/ExecutorRunnable.scala | 2 +-
.../yarn/YarnShuffleServiceMetricsSuite.scala | 2 +-
.../spark/sql/catalyst/expressions/literals.scala | 10 +++++-----
.../expressions/IntervalExpressionsSuite.scala | 4 ++--
.../sql/catalyst/util/IntervalUtilsSuite.scala | 2 +-
.../test/scala/org/apache/spark/sql/UDFSuite.scala | 2 +-
.../sql/execution/datasources/FileIndexSuite.scala | 2 +-
.../parquet/ParquetColumnIndexSuite.scala | 6 +++---
.../datasources/parquet/ParquetFilterSuite.scala | 2 +-
.../datasources/v2/V2PredicateSuite.scala | 2 +-
.../spark/sql/hive/execution/SQLQuerySuite.scala | 4 ++--
.../apache/spark/streaming/CheckpointSuite.scala | 2 +-
.../apache/spark/streaming/InputStreamsSuite.scala | 4 ++--
.../spark/streaming/StreamingContextSuite.scala | 2 +-
33 files changed, 76 insertions(+), 76 deletions(-)
diff --git a/core/src/main/scala/org/apache/spark/SparkConf.scala
b/core/src/main/scala/org/apache/spark/SparkConf.scala
index b688604beea..b8fd2700771 100644
--- a/core/src/main/scala/org/apache/spark/SparkConf.scala
+++ b/core/src/main/scala/org/apache/spark/SparkConf.scala
@@ -323,7 +323,7 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable
with Logging with Seria
* @throws NumberFormatException If the value cannot be interpreted as bytes
*/
def getSizeAsBytes(key: String, defaultValue: Long): Long =
catchIllegalValue(key) {
- Utils.byteStringAsBytes(get(key, defaultValue + "B"))
+ Utils.byteStringAsBytes(get(key, s"${defaultValue}B"))
}
/**
diff --git
a/core/src/main/scala/org/apache/spark/internal/config/ConfigBuilder.scala
b/core/src/main/scala/org/apache/spark/internal/config/ConfigBuilder.scala
index e3190269a53..954980dcb94 100644
--- a/core/src/main/scala/org/apache/spark/internal/config/ConfigBuilder.scala
+++ b/core/src/main/scala/org/apache/spark/internal/config/ConfigBuilder.scala
@@ -55,7 +55,7 @@ private object ConfigHelpers {
def timeFromString(str: String, unit: TimeUnit): Long =
JavaUtils.timeStringAs(str, unit)
- def timeToString(v: Long, unit: TimeUnit): String =
TimeUnit.MILLISECONDS.convert(v, unit) + "ms"
+ def timeToString(v: Long, unit: TimeUnit): String =
s"${TimeUnit.MILLISECONDS.convert(v, unit)}ms"
def byteFromString(str: String, unit: ByteUnit): Long = {
val (input, multiplier) =
@@ -67,7 +67,7 @@ private object ConfigHelpers {
multiplier * JavaUtils.byteStringAs(input, unit)
}
- def byteToString(v: Long, unit: ByteUnit): String = unit.convertTo(v,
ByteUnit.BYTE) + "b"
+ def byteToString(v: Long, unit: ByteUnit): String = s"${unit.convertTo(v,
ByteUnit.BYTE)}b"
def regexFromString(str: String, key: String): Regex = {
try str.r catch {
diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSet.scala
b/core/src/main/scala/org/apache/spark/scheduler/TaskSet.scala
index 6411757313e..e03c4101709 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSet.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSet.scala
@@ -31,7 +31,7 @@ private[spark] class TaskSet(
val properties: Properties,
val resourceProfileId: Int,
val shuffleId: Option[Int]) {
- val id: String = stageId + "." + stageAttemptId
+ val id: String = s"$stageId.$stageAttemptId"
override def toString: String = "TaskSet " + id
}
diff --git a/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
b/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
index 64a786e5825..0693c9af144 100644
--- a/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
+++ b/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
@@ -94,7 +94,7 @@ private[spark] class ConsoleProgressBar(sc: SparkContext)
extends Logging {
// only refresh if it's changed OR after 1 minute (or the ssh connection
will be closed
// after idle some time)
if (bar != lastProgressBar || now - lastUpdateTime > 60 * 1000L) {
- System.err.print(CR + bar + CR)
+ System.err.print(s"${CR}bar$CR")
lastUpdateTime = now
}
lastProgressBar = bar
@@ -105,7 +105,7 @@ private[spark] class ConsoleProgressBar(sc: SparkContext)
extends Logging {
*/
private def clear(): Unit = {
if (!lastProgressBar.isEmpty) {
- System.err.printf(CR + " " * TerminalWidth + CR)
+ System.err.printf(s"$CR${" " * TerminalWidth}$CR")
lastProgressBar = ""
}
}
diff --git a/core/src/main/scala/org/apache/spark/ui/UIUtils.scala
b/core/src/main/scala/org/apache/spark/ui/UIUtils.scala
index ccddbd1c608..4d3baabc495 100644
--- a/core/src/main/scala/org/apache/spark/ui/UIUtils.scala
+++ b/core/src/main/scala/org/apache/spark/ui/UIUtils.scala
@@ -401,7 +401,7 @@ private[spark] object UIUtils extends Logging {
}
}
val colWidth = 100.toDouble / headers.size
- val colWidthAttr = if (fixedWidth) colWidth + "%" else ""
+ val colWidthAttr = if (fixedWidth) s"$colWidth%" else ""
def getClass(index: Int): String = {
if (index < headerClasses.size) {
diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala
b/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala
index 717501ac0c1..ca9f4e72326 100644
--- a/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala
+++ b/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala
@@ -50,10 +50,10 @@ private[ui] class StagePage(parent: StagesTab, store:
AppStatusStore) extends We
legendPairs.zipWithIndex.map {
case ((classAttr, name), index) =>
- <rect x={5 + (index / 3) * 210 + "px"} y={10 + (index % 3) * 15
+ "px"}
+ <rect x={s"${5 + (index / 3) * 210}px"} y={s"${10 + (index % 3)
* 15}px"}
width="10px" height="10px" class={classAttr}></rect>
- <text x={25 + (index / 3) * 210 + "px"}
- y={20 + (index % 3) * 15 + "px"}>{name}</text>
+ <text x={s"${25 + (index / 3) * 210}px"}
+ y={s"${20 + (index % 3) * 15}px"}>{name}</text>
}
}
</svg>
diff --git a/core/src/main/scala/org/apache/spark/util/Distribution.scala
b/core/src/main/scala/org/apache/spark/util/Distribution.scala
index 550884c8732..49aab5575f8 100644
--- a/core/src/main/scala/org/apache/spark/util/Distribution.scala
+++ b/core/src/main/scala/org/apache/spark/util/Distribution.scala
@@ -54,7 +54,7 @@ private[spark] class Distribution(val data: Array[Double],
val startIdx: Int, va
def showQuantiles(out: PrintStream = System.out): Unit = {
// scalastyle:off println
out.println("min\t25%\t50%\t75%\tmax")
- getQuantiles(defaultProbabilities).foreach{q => out.print(q + "\t")}
+ getQuantiles(defaultProbabilities).foreach{q => out.print(s"$q\t")}
out.println
// scalastyle:on println
}
@@ -86,7 +86,7 @@ private[spark] object Distribution {
def showQuantiles(out: PrintStream = System.out, quantiles:
Iterable[Double]): Unit = {
// scalastyle:off println
out.println("min\t25%\t50%\t75%\tmax")
- quantiles.foreach{q => out.print(q + "\t")}
+ quantiles.foreach{q => out.print(s"$q\t")}
out.println
// scalastyle:on println
}
diff --git a/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala
b/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala
index 2b9dd3250c7..2acd648b98d 100644
--- a/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala
+++ b/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala
@@ -124,7 +124,7 @@ class PipedRDDSuite extends SparkFunSuite with
SharedSparkContext with Eventuall
(f: String => Unit) => {
bl.value.foreach(f); f("\u0001")
},
- (i: Int, f: String => Unit) => f(i + "_"))
+ (i: Int, f: String => Unit) => f(s"${i}_"))
val c = piped.collect()
diff --git
a/core/src/test/scala/org/apache/spark/util/collection/AppendOnlyMapSuite.scala
b/core/src/test/scala/org/apache/spark/util/collection/AppendOnlyMapSuite.scala
index f2057703d32..f6e679a08a4 100644
---
a/core/src/test/scala/org/apache/spark/util/collection/AppendOnlyMapSuite.scala
+++
b/core/src/test/scala/org/apache/spark/util/collection/AppendOnlyMapSuite.scala
@@ -115,16 +115,16 @@ class AppendOnlyMapSuite extends SparkFunSuite {
assert(oldValue === "" + i)
oldValue + "!"
})
- assert(res === i + "!")
+ assert(res === s"$i!")
}
// Iterate from 101 to 400 to make sure the map grows a couple of times,
because we had a
// bug where changeValue would return the wrong result when the map grew
on that insert
for (i <- 101 to 400) {
val res = map.changeValue("" + i, (hadValue, oldValue) => {
assert(hadValue === false)
- i + "!"
+ s"$i!"
})
- assert(res === i + "!")
+ assert(res === s"$i!")
}
assert(map.size === 400)
assert(map(null) === null)
diff --git
a/core/src/test/scala/org/apache/spark/util/collection/OpenHashMapSuite.scala
b/core/src/test/scala/org/apache/spark/util/collection/OpenHashMapSuite.scala
index 1af99e9017c..155c855c872 100644
---
a/core/src/test/scala/org/apache/spark/util/collection/OpenHashMapSuite.scala
+++
b/core/src/test/scala/org/apache/spark/util/collection/OpenHashMapSuite.scala
@@ -146,13 +146,13 @@ class OpenHashMapSuite extends SparkFunSuite with
Matchers {
assert(v === i.toString)
v + "!"
})
- assert(res === i + "!")
+ assert(res === s"$i!")
}
// Iterate from 101 to 400 to make sure the map grows a couple of times,
because we had a
// bug where changeValue would return the wrong result when the map grew
on that insert
for (i <- 101 to 400) {
- val res = map.changeValue(i.toString, { i + "!" }, v => { assert(false);
v })
- assert(res === i + "!")
+ val res = map.changeValue(i.toString, { s"$i!" }, v => { assert(false);
v })
+ assert(res === s"$i!")
}
assert(map.size === 400)
assert(map(null) === null)
diff --git
a/core/src/test/scala/org/apache/spark/util/collection/PrimitiveKeyOpenHashMapSuite.scala
b/core/src/test/scala/org/apache/spark/util/collection/PrimitiveKeyOpenHashMapSuite.scala
index 3b42300ccdd..636ea7b0ee7 100644
---
a/core/src/test/scala/org/apache/spark/util/collection/PrimitiveKeyOpenHashMapSuite.scala
+++
b/core/src/test/scala/org/apache/spark/util/collection/PrimitiveKeyOpenHashMapSuite.scala
@@ -95,13 +95,13 @@ class PrimitiveKeyOpenHashMapSuite extends SparkFunSuite
with Matchers {
assert(v === i.toString)
v + "!"
})
- assert(res === i + "!")
+ assert(res === s"$i!")
}
// Iterate from 101 to 400 to make sure the map grows a couple of times,
because we had a
// bug where changeValue would return the wrong result when the map grew
on that insert
for (i <- 101 to 400) {
- val res = map.changeValue(i.toLong, { i + "!" }, v => { assert(false); v
})
- assert(res === i + "!")
+ val res = map.changeValue(i.toLong, { s"$i!" }, v => { assert(false); v
})
+ assert(res === s"$i!")
}
assert(map.size === 400)
}
diff --git
a/examples/src/main/scala/org/apache/spark/examples/graphx/Analytics.scala
b/examples/src/main/scala/org/apache/spark/examples/graphx/Analytics.scala
index a1e6b03ba10..a8f9b32b0f3 100644
--- a/examples/src/main/scala/org/apache/spark/examples/graphx/Analytics.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/graphx/Analytics.scala
@@ -101,7 +101,7 @@ object Analytics {
if (!outFname.isEmpty) {
println(s"Saving pageranks of pages to $outFname")
- pr.map { case (id, r) => id + "\t" + r }.saveAsTextFile(outFname)
+ pr.map { case (id, r) => s"$id\t$r" }.saveAsTextFile(outFname)
}
sc.stop()
diff --git
a/graphx/src/main/scala/org/apache/spark/graphx/util/GraphGenerators.scala
b/graphx/src/main/scala/org/apache/spark/graphx/util/GraphGenerators.scala
index 7b09c76fa11..7d58c19f175 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/util/GraphGenerators.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/util/GraphGenerators.scala
@@ -132,7 +132,7 @@ object GraphGenerators extends Logging {
val edges = mutable.Set.empty[Edge[Int]]
while (edges.size < numEdges) {
if (edges.size % 100 == 0) {
- logDebug(edges.size + " edges")
+ logDebug(s"${edges.size} edges")
}
edges += addEdge(numVertices)
}
diff --git
a/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala
b/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala
index f6f5edb5dc0..31417ddeb2d 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala
@@ -98,7 +98,7 @@ object MFDataGenerator {
trainData.map(x => (x._1, x._2, x._3 + rand.nextGaussian() * sigma))
}
- trainData.map(x => x._1 + "," + x._2 + "," +
x._3).saveAsTextFile(outputPath)
+ trainData.map(x => s"${x._1},${x._2},${x._3}").saveAsTextFile(outputPath)
// optionally generate testing data
if (test) {
@@ -107,7 +107,7 @@ object MFDataGenerator {
val testOrdered = testOmega.sortWith(_ < _).toArray
val testData: RDD[(Int, Int, Double)] = sc.parallelize(testOrdered)
.map(x => (x % m, x / m, fullData.values(x)))
- testData.map(x => x._1 + "," + x._2 + "," +
x._3).saveAsTextFile(outputPath)
+ testData.map(x => s"${x._1},${x._2},${x._3}").saveAsTextFile(outputPath)
}
sc.stop()
diff --git
a/mllib/src/test/scala/org/apache/spark/ml/classification/LinearSVCSuite.scala
b/mllib/src/test/scala/org/apache/spark/ml/classification/LinearSVCSuite.scala
index cf47ebd173d..7376dd686b2 100644
---
a/mllib/src/test/scala/org/apache/spark/ml/classification/LinearSVCSuite.scala
+++
b/mllib/src/test/scala/org/apache/spark/ml/classification/LinearSVCSuite.scala
@@ -69,7 +69,7 @@ class LinearSVCSuite extends MLTest with DefaultReadWriteTest
{
*/
ignore("export test data into CSV format") {
binaryDataset.rdd.map { case Row(label: Double, features: Vector) =>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile("target/tmp/LinearSVC/binaryDataset")
}
diff --git
a/mllib/src/test/scala/org/apache/spark/ml/classification/LogisticRegressionSuite.scala
b/mllib/src/test/scala/org/apache/spark/ml/classification/LogisticRegressionSuite.scala
index 50d8bcb1865..8e54262e2f6 100644
---
a/mllib/src/test/scala/org/apache/spark/ml/classification/LogisticRegressionSuite.scala
+++
b/mllib/src/test/scala/org/apache/spark/ml/classification/LogisticRegressionSuite.scala
@@ -160,20 +160,20 @@ class LogisticRegressionSuite extends MLTest with
DefaultReadWriteTest {
*/
ignore("export test data into CSV format") {
binaryDataset.rdd.map { case Row(l: Double, f: Vector, w: Double) =>
- l + "," + w + "," + f.toArray.mkString(",")
+ s"$l,$w,${f.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile("target/tmp/LogisticRegressionSuite/binaryDataset")
binaryDatasetWithSmallVar.rdd.map { case Row(l: Double, f: Vector, w:
Double) =>
- l + "," + w + "," + f.toArray.mkString(",")
+ s"$l,$w,${f.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile("target/tmp/LogisticRegressionSuite/binaryDatasetWithSmallVar")
multinomialDataset.rdd.map { case Row(l: Double, f: Vector, w: Double) =>
- l + "," + w + "," + f.toArray.mkString(",")
+ s"$l,$w,${f.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile("target/tmp/LogisticRegressionSuite/multinomialDataset")
multinomialDatasetWithSmallVar.rdd.map { case Row(l: Double, f: Vector, w:
Double) =>
- l + "," + w + "," + f.toArray.mkString(",")
+ s"$l,$w,${f.toArray.mkString(",")}"
}.repartition(1)
.saveAsTextFile("target/tmp/LogisticRegressionSuite/multinomialDatasetWithSmallVar")
multinomialDatasetWithZeroVar.rdd.map { case Row(l: Double, f: Vector, w:
Double) =>
- l + "," + w + "," + f.toArray.mkString(",")
+ s"$l,$w,${f.toArray.mkString(",")}"
}.repartition(1)
.saveAsTextFile("target/tmp/LogisticRegressionSuite/multinomialDatasetWithZeroVar")
}
diff --git
a/mllib/src/test/scala/org/apache/spark/ml/regression/GeneralizedLinearRegressionSuite.scala
b/mllib/src/test/scala/org/apache/spark/ml/regression/GeneralizedLinearRegressionSuite.scala
index df038487204..8c56b8e1f69 100644
---
a/mllib/src/test/scala/org/apache/spark/ml/regression/GeneralizedLinearRegressionSuite.scala
+++
b/mllib/src/test/scala/org/apache/spark/ml/regression/GeneralizedLinearRegressionSuite.scala
@@ -129,47 +129,47 @@ class GeneralizedLinearRegressionSuite extends MLTest
with DefaultReadWriteTest
*/
ignore("export test data into CSV format") {
datasetGaussianIdentity.rdd.map { case Row(label: Double, features:
Vector) =>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGaussianIdentity")
datasetGaussianLog.rdd.map { case Row(label: Double, features: Vector) =>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGaussianLog")
datasetGaussianInverse.rdd.map { case Row(label: Double, features: Vector)
=>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGaussianInverse")
datasetBinomial.rdd.map { case Row(label: Double, features: Vector) =>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetBinomial")
datasetPoissonLog.rdd.map { case Row(label: Double, features: Vector) =>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonLog")
datasetPoissonLogWithZero.rdd.map { case Row(label: Double, features:
Vector) =>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonLogWithZero")
datasetPoissonIdentity.rdd.map { case Row(label: Double, features: Vector)
=>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonIdentity")
datasetPoissonSqrt.rdd.map { case Row(label: Double, features: Vector) =>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonSqrt")
datasetGammaInverse.rdd.map { case Row(label: Double, features: Vector) =>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGammaInverse")
datasetGammaIdentity.rdd.map { case Row(label: Double, features: Vector) =>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGammaIdentity")
datasetGammaLog.rdd.map { case Row(label: Double, features: Vector) =>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGammaLog")
}
diff --git
a/mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala
b/mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala
index 9e4be2a964a..62f216b03ca 100644
---
a/mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala
+++
b/mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala
@@ -131,21 +131,21 @@ class LinearRegressionSuite extends MLTest with
DefaultReadWriteTest with PMMLRe
*/
ignore("export test data into CSV format") {
datasetWithDenseFeature.rdd.map { case Row(label: Double, features:
Vector) =>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile("target/tmp/LinearRegressionSuite/datasetWithDenseFeature")
datasetWithDenseFeatureWithoutIntercept.rdd.map {
case Row(label: Double, features: Vector) =>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile(
"target/tmp/LinearRegressionSuite/datasetWithDenseFeatureWithoutIntercept")
datasetWithSparseFeature.rdd.map { case Row(label: Double, features:
Vector) =>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile("target/tmp/LinearRegressionSuite/datasetWithSparseFeature")
datasetWithOutlier.rdd.map { case Row(label: Double, features: Vector) =>
- label + "," + features.toArray.mkString(",")
+ s"$label,${features.toArray.mkString(",")}"
}.repartition(1).saveAsTextFile("target/tmp/LinearRegressionSuite/datasetWithOutlier")
}
diff --git
a/mllib/src/test/scala/org/apache/spark/mllib/regression/LassoSuite.scala
b/mllib/src/test/scala/org/apache/spark/mllib/regression/LassoSuite.scala
index f336dac0ccb..8eb142541c9 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/regression/LassoSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/regression/LassoSuite.scala
@@ -61,9 +61,9 @@ class LassoSuite extends SparkFunSuite with
MLlibTestSparkContext {
val weight0 = model.weights(0)
val weight1 = model.weights(1)
val weight2 = model.weights(2)
- assert(weight0 >= 1.9 && weight0 <= 2.1, weight0 + " not in [1.9, 2.1]")
- assert(weight1 >= -1.60 && weight1 <= -1.40, weight1 + " not in [-1.6,
-1.4]")
- assert(weight2 >= -1.0e-3 && weight2 <= 1.0e-3, weight2 + " not in
[-0.001, 0.001]")
+ assert(weight0 >= 1.9 && weight0 <= 2.1, s"$weight0 not in [1.9, 2.1]")
+ assert(weight1 >= -1.60 && weight1 <= -1.40, s"$weight1 not in [-1.6,
-1.4]")
+ assert(weight2 >= -1.0e-3 && weight2 <= 1.0e-3, s"$weight2 not in [-0.001,
0.001]")
val validationData = LinearDataGenerator
.generateLinearInput(A, Array[Double](B, C), nPoints, 17)
@@ -105,9 +105,9 @@ class LassoSuite extends SparkFunSuite with
MLlibTestSparkContext {
val weight0 = model.weights(0)
val weight1 = model.weights(1)
val weight2 = model.weights(2)
- assert(weight0 >= 1.9 && weight0 <= 2.1, weight0 + " not in [1.9, 2.1]")
- assert(weight1 >= -1.60 && weight1 <= -1.40, weight1 + " not in [-1.6,
-1.4]")
- assert(weight2 >= -1.0e-3 && weight2 <= 1.0e-3, weight2 + " not in
[-0.001, 0.001]")
+ assert(weight0 >= 1.9 && weight0 <= 2.1, s"$weight0 not in [1.9, 2.1]")
+ assert(weight1 >= -1.60 && weight1 <= -1.40, s"$weight1 not in [-1.6,
-1.4]")
+ assert(weight2 >= -1.0e-3 && weight2 <= 1.0e-3, s"$weight2 not in [-0.001,
0.001]")
val validationData = LinearDataGenerator
.generateLinearInput(A, Array[Double](B, C), nPoints, 17)
diff --git
a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
index cdaf04173c2..6a3fa50916b 100644
---
a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
+++
b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
@@ -153,7 +153,7 @@ private[yarn] class ExecutorRunnable(
val javaOpts = ListBuffer[String]()
// Set the JVM memory
- val executorMemoryString = executorMemory + "m"
+ val executorMemoryString = s"${executorMemory}m"
javaOpts += "-Xmx" + executorMemoryString
// Set extra Java options for the executor, if defined
diff --git
a/resource-managers/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceMetricsSuite.scala
b/resource-managers/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceMetricsSuite.scala
index b04aa0aa533..30b6d770ccc 100644
---
a/resource-managers/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceMetricsSuite.scala
+++
b/resource-managers/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceMetricsSuite.scala
@@ -77,7 +77,7 @@ class YarnShuffleServiceMetricsSuite extends SparkFunSuite
with Matchers {
assert(counterNames === Seq(s"${testname}_count"))
val rates = Seq("rate1", "rate5", "rate15", "rateMean")
val percentiles =
- "1stPercentile" +: Seq(5, 25, 50, 75, 95, 98, 99, 999).map(_ +
"thPercentile")
+ "1stPercentile" +: Seq(5, 25, 50, 75, 95, 98, 99, 999).map(i =>
s"${i}thPercentile")
val (expectLong, expectDouble) =
if (testname.matches("blockTransfer(Message)?Rate(Bytes)?$")) {
// blockTransfer(Message)?Rate(Bytes)? metrics are Meter so just
have rate information
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala
index ddf5f84311a..ec76c70002d 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala
@@ -480,9 +480,9 @@ case class Literal (value: Any, dataType: DataType) extends
LeafExpression {
case (v: UTF8String, StringType) =>
// Escapes all backslashes and single quotes.
"'" + v.toString.replace("\\", "\\\\").replace("'", "\\'") + "'"
- case (v: Byte, ByteType) => v + "Y"
- case (v: Short, ShortType) => v + "S"
- case (v: Long, LongType) => v + "L"
+ case (v: Byte, ByteType) => s"${v}Y"
+ case (v: Short, ShortType) => s"${v}S"
+ case (v: Long, LongType) => s"${v}L"
// Float type doesn't have a suffix
case (v: Float, FloatType) =>
val castedValue = v match {
@@ -497,9 +497,9 @@ case class Literal (value: Any, dataType: DataType) extends
LeafExpression {
case _ if v.isNaN => s"CAST('NaN' AS ${DoubleType.sql})"
case Double.PositiveInfinity => s"CAST('Infinity' AS
${DoubleType.sql})"
case Double.NegativeInfinity => s"CAST('-Infinity' AS
${DoubleType.sql})"
- case _ => v + "D"
+ case _ => s"${v}D"
}
- case (v: Decimal, t: DecimalType) => v + "BD"
+ case (v: Decimal, t: DecimalType) => s"${v}BD"
case (v: Int, DateType) =>
s"DATE '$toString'"
case (v: Long, TimestampType) =>
diff --git
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/IntervalExpressionsSuite.scala
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/IntervalExpressionsSuite.scala
index b9c7629f692..a60ab778623 100644
---
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/IntervalExpressionsSuite.scala
+++
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/IntervalExpressionsSuite.scala
@@ -146,7 +146,7 @@ class IntervalExpressionsSuite extends SparkFunSuite with
ExpressionEvalHelper {
check("2 months 4 seconds", -0.5, "-1 months -2 seconds")
check("1 month 2 microseconds", 1.5, "1 months 3 microseconds")
check("2 months", Int.MaxValue, "integer overflow", Some(true))
- check("2 months", Int.MaxValue, Int.MaxValue + " months", Some(false))
+ check("2 months", Int.MaxValue, s"${Int.MaxValue} months", Some(false))
}
test("divide") {
@@ -179,7 +179,7 @@ class IntervalExpressionsSuite extends SparkFunSuite with
ExpressionEvalHelper {
check("1 second", 0, "Division by zero", Some(true))
check("1 second", 0, null, Some(false))
check(s"${Int.MaxValue} months", 0.9, "integer overflow", Some(true))
- check(s"${Int.MaxValue} months", 0.9, Int.MaxValue + " months",
Some(false))
+ check(s"${Int.MaxValue} months", 0.9, s"${Int.MaxValue} months",
Some(false))
}
test("make interval") {
diff --git
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala
index 3ba6787045e..457a53fde0b 100644
---
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala
+++
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala
@@ -216,7 +216,7 @@ class IntervalUtilsSuite extends SparkFunSuite with
SQLHelper {
assert(duration("1 month -30 days", TimeUnit.DAYS, 31) === 1)
val e = intercept[ArithmeticException] {
- duration(Integer.MAX_VALUE + " month", TimeUnit.SECONDS, 31)
+ duration(s"${Integer.MAX_VALUE} month", TimeUnit.SECONDS, 31)
}
assert(e.getMessage.contains("overflow"))
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala
index 60d82fd1ac3..c5e65d2e65f 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala
@@ -442,7 +442,7 @@ class UDFSuite extends QueryTest with SharedSparkSession {
Seq((1, "2"), (2, "4")).toDF("a",
"b").write.format("json").saveAsTable("x")
sql("insert into table x values(3, null)")
sql("insert into table x values(null, '4')")
- spark.udf.register("f", (a: Int, b: String) => a + b)
+ spark.udf.register("f", (a: Int, b: String) => s"$a$b")
val df = spark.sql("SELECT f(a, b) FROM x")
val plan = spark.sessionState.executePlan(df.logicalPlan).analyzed
comparePlans(df.logicalPlan, plan)
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileIndexSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileIndexSuite.scala
index 418bff602b0..110c330f169 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileIndexSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileIndexSuite.scala
@@ -503,7 +503,7 @@ class FileIndexSuite extends SharedSparkSession {
val partitionDirectory = new File(dir, "a=foo")
partitionDirectory.mkdir()
for (i <- 1 to 8) {
- val file = new File(partitionDirectory, i + ".txt")
+ val file = new File(partitionDirectory, s"$i.txt")
stringToFile(file, "text")
}
val path = new Path(dir.getCanonicalPath)
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnIndexSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnIndexSuite.scala
index 64bfcdadcf4..e1e44697be7 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnIndexSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnIndexSuite.scala
@@ -59,7 +59,7 @@ class ParquetColumnIndexSuite extends QueryTest with
ParquetTest with SharedSpar
}
test("reading from unaligned pages - test filters") {
- val df = spark.range(0, 2000).map(i => (i, i + ":" + "o" * (i /
100).toInt)).toDF()
+ val df = spark.range(0, 2000).map(i => (i, s"$i:${"o" * (i /
100).toInt}")).toDF()
checkUnalignedPages(df)(actions: _*)
}
@@ -97,14 +97,14 @@ class ParquetColumnIndexSuite extends QueryTest with
ParquetTest with SharedSpar
// insert 50 null values in [400, 450) to verify that they are skipped
during processing row
// range [500, 1000) against the second page of col_2 [400, 800)
val df = spark.range(0, 2000).map { i =>
- val strVal = if (i >= 400 && i < 450) null else i + ":" + "o" * (i /
100).toInt
+ val strVal = if (i >= 400 && i < 450) null else s"$i:${"o" * (i /
100).toInt}"
(i, strVal)
}.toDF()
checkUnalignedPages(df)(actions: _*)
}
test("reading unaligned pages - struct type") {
- val df = (0 until 2000).map(i => Tuple1((i.toLong, i + ":" + "o" * (i /
100)))).toDF("s")
+ val df = (0 until 2000).map(i => Tuple1((i.toLong, s"$i:${"o" * (i /
100)}"))).toDF("s")
checkUnalignedPages(df)(
df => df.filter("s._1 = 500"),
df => df.filter("s._1 = 500 or s._1 = 1500"),
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala
index 0d64d8fa4e6..4532ce48938 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala
@@ -1556,7 +1556,7 @@ abstract class ParquetFilterSuite extends QueryTest with
ParquetTest with Shared
private def checkStringFilterPushdown(
stringPredicate: String => Expression,
sourceFilter: (String, String) => sources.Filter): Unit = {
- withParquetDataFrame((1 to 4).map(i => Tuple1(i + "str" + i))) { implicit
df =>
+ withParquetDataFrame((1 to 4).map(i => Tuple1(s"${i}str$i"))) { implicit
df =>
checkFilterPredicate(
stringPredicate("").asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2PredicateSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2PredicateSuite.scala
index a5fee51dc91..d2e04eab05c 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2PredicateSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2PredicateSuite.scala
@@ -186,7 +186,7 @@ class V2PredicateSuite extends SparkFunSuite {
var expected = "a IN ("
for (i <- 0 until 1000) {
values(i) = LiteralValue(i, IntegerType)
- expected += i + ", "
+ expected += s"$i, "
}
val predicate3 = new Predicate("IN", (ref("a") +:
values).toArray[Expression])
expected = expected.dropRight(2) // remove the last ", "
diff --git
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index d8c28e14ceb..90b1073fa4e 100644
---
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -1288,7 +1288,7 @@ abstract class SQLQuerySuiteBase extends QueryTest with
SQLTestUtils with TestHi
|) t
|SELECT c
""".stripMargin),
- (0 until 5).map(i => Row(i + "#")))
+ (0 until 5).map(i => Row(s"$i#")))
}
}
@@ -1311,7 +1311,7 @@ abstract class SQLQuerySuiteBase extends QueryTest with
SQLTestUtils with TestHi
|WITH SERDEPROPERTIES('field.delim' = '|')
""".stripMargin)
- checkAnswer(df, (0 until 5).map(i => Row(i + "#", i + "#")))
+ checkAnswer(df, (0 until 5).map(i => Row(s"$i#", s"$i#")))
}
}
diff --git
a/streaming/src/test/scala/org/apache/spark/streaming/CheckpointSuite.scala
b/streaming/src/test/scala/org/apache/spark/streaming/CheckpointSuite.scala
index 97606823aac..43b0835df7c 100644
--- a/streaming/src/test/scala/org/apache/spark/streaming/CheckpointSuite.scala
+++ b/streaming/src/test/scala/org/apache/spark/streaming/CheckpointSuite.scala
@@ -649,7 +649,7 @@ class CheckpointSuite extends TestSuiteBase with
LocalStreamingContext with DStr
*/
def writeFile(i: Int, clock: Clock): Unit = {
val file = new File(testDir, i.toString)
- Files.write(i + "\n", file, StandardCharsets.UTF_8)
+ Files.write(s"$i\n", file, StandardCharsets.UTF_8)
assert(file.setLastModified(clock.getTimeMillis()))
// Check that the file's modification date is actually the value we
wrote, since rounding or
// truncation will break the test:
diff --git
a/streaming/src/test/scala/org/apache/spark/streaming/InputStreamsSuite.scala
b/streaming/src/test/scala/org/apache/spark/streaming/InputStreamsSuite.scala
index ca8307b3628..66fd1ac7bb2 100644
---
a/streaming/src/test/scala/org/apache/spark/streaming/InputStreamsSuite.scala
+++
b/streaming/src/test/scala/org/apache/spark/streaming/InputStreamsSuite.scala
@@ -215,7 +215,7 @@ class InputStreamsSuite extends TestSuiteBase with
BeforeAndAfter {
def createFileAndAdvanceTime(data: Int, dir: File): Unit = {
val file = new File(testSubDir1, data.toString)
- Files.write(data + "\n", file, StandardCharsets.UTF_8)
+ Files.write(s"$data\n", file, StandardCharsets.UTF_8)
assert(file.setLastModified(clock.getTimeMillis()))
assert(file.lastModified === clock.getTimeMillis())
logInfo(s"Created file $file")
@@ -502,7 +502,7 @@ class InputStreamsSuite extends TestSuiteBase with
BeforeAndAfter {
val input = Seq(1, 2, 3, 4, 5)
input.foreach { i =>
val file = new File(testDir, i.toString)
- Files.write(i + "\n", file, StandardCharsets.UTF_8)
+ Files.write(s"$i\n", file, StandardCharsets.UTF_8)
assert(file.setLastModified(clock.getTimeMillis()))
assert(file.lastModified === clock.getTimeMillis())
logInfo("Created file " + file)
diff --git
a/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala
b/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala
index c704a41c8a3..4e0fa05d79b 100644
---
a/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala
+++
b/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala
@@ -153,7 +153,7 @@ class StreamingContextSuite
addInputStream(ssc).foreachRDD { rdd =>
// Refer to this.appName from inside closure so that this closure refers
to
// the instance of StreamingContextSuite, and is therefore not
serializable
- rdd.count() + appName
+ s"${rdd.count()}$appName"
}
// Test whether start() fails early when checkpointing is enabled
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]