This is an automated email from the ASF dual-hosted git repository.
yangjie01 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 7e689c7ed548 [SPARK-45689][SPARK-45690][SPARK-45691][CORE][SQL] Clean
up the deprecated API usage related to
`StringContext/StringOps/RightProjection/LeftProjection/Either` and type use of
`BufferedIterator/CanBuildFrom/Traversable`
7e689c7ed548 is described below
commit 7e689c7ed548e109f4d7f26338934789810719eb
Author: yangjie01 <[email protected]>
AuthorDate: Mon Oct 30 23:26:40 2023 +0800
[SPARK-45689][SPARK-45690][SPARK-45691][CORE][SQL] Clean up the deprecated
API usage related to
`StringContext/StringOps/RightProjection/LeftProjection/Either` and type use of
`BufferedIterator/CanBuildFrom/Traversable`
### What changes were proposed in this pull request?
This pr Clean up the deprecated API usage related to
`StringContext/StringOps/RightProjection/LeftProjection/Either`
- StringContext
```scala
deprecated("use same-named method on StringContext companion object",
"2.13.0")
def checkLengths(args: scala.collection.Seq[Any]): Unit =
scCheckLengths(args, parts)
deprecated("use processEscapes", "2.13.0")
def treatEscapes(str: String): String = processEscapes(str)
```
- StringOps
```scala
deprecated("Use `s.replace` as an exact replacement", "2.13.2")
def replaceAllLiterally(literal: String, replacement: String): String =
s.replace(literal, replacement)
```
- Either
```scala
deprecated("Either is now right-biased, use methods directly on Either",
"2.13.0")
def right = Either.RightProjection(this)
```
- LeftProjection
```scala
deprecated("use `Either.swap.getOrElse` instead", "2.13.0")
def get: A = e match {
case Left(a) => a
case _ => throw new NoSuchElementException("Either.left.get on
Right")
}
```
- RightProjection
```scala
deprecated("Either is now right-biased, calls to `right` should be
removed", "2.13.0")
final case class RightProjection[+A, +B](e: Either[A, B]) {
...
deprecated("Use `Either.toOption.get` instead", "2.13.0")
def get: B = e match {
case Right(b) => b
case _ => throw new NoSuchElementException("Either.right.get
on Left")
}
...
}
```
This pr also clean up type use of
`BufferedIterator/CanBuildFrom/Traversable`
```scala
deprecated("Use scala.collection.BufferedIterator instead of
scala.BufferedIterator", "2.13.0")
type BufferedIterator[+A] = scala.collection.BufferedIterator[A]
```
```scala
deprecated("Use Iterable instead of Traversable", "2.13.0")
type Traversable[+A] = scala.collection.Iterable[A]
```
```scala
deprecated("Use scala.collection.BuildFrom instead", "2.13.0")
type CanBuildFrom[-From, -A, +C] = scala.collection.BuildFrom[From, A, C]
```
### Why are the changes needed?
Clean up the deprecated Scala API usage
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
Pass GitHub Actions
### Was this patch authored or co-authored using generative AI tooling?
No
Closes #43576 from LuciferYang/SPARK-45689.
Lead-authored-by: yangjie01 <[email protected]>
Co-authored-by: YangJie <[email protected]>
Signed-off-by: yangjie01 <[email protected]>
---
.../scala/org/apache/spark/storage/BlockManagerMaster.scala | 4 ++--
.../org/apache/spark/util/collection/ExternalSorter.scala | 1 +
.../scala/org/apache/spark/storage/MemoryStoreSuite.scala | 11 ++++++-----
.../spark/storage/ShuffleBlockFetcherIteratorSuite.scala | 2 +-
.../spark/sql/catalyst/expressions/codegen/javaCode.scala | 6 +++---
.../org/apache/spark/sql/execution/GroupedIterator.scala | 2 ++
.../v2/GroupBasedRowLevelOperationScanPlanning.scala | 8 ++++++--
.../sql/execution/datasources/v2/V2ScanRelationPushDown.scala | 9 ++++++---
.../org/apache/spark/sql/sources/HadoopFsRelationTest.scala | 4 ++--
.../main/scala/org/apache/spark/streaming/ui/BatchPage.scala | 2 +-
10 files changed, 30 insertions(+), 19 deletions(-)
diff --git
a/core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala
b/core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala
index aee7f1f76264..2b961317e01d 100644
--- a/core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala
+++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala
@@ -17,7 +17,7 @@
package org.apache.spark.storage
-import scala.collection.generic.CanBuildFrom
+import scala.collection.BuildFrom
import scala.collection.immutable.Iterable
import scala.concurrent.Future
@@ -261,7 +261,7 @@ class BlockManagerMaster(
val (blockManagerIds, futures) = response.unzip
val cbf =
implicitly[
- CanBuildFrom[Iterable[Future[Option[BlockStatus]]],
+ BuildFrom[Iterable[Future[Option[BlockStatus]]],
Option[BlockStatus],
Iterable[Option[BlockStatus]]]]
val blockStatus = timeout.awaitResult(
diff --git
a/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala
b/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala
index 5051b88c08ef..9db7fd18b07a 100644
--- a/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala
+++ b/core/src/main/scala/org/apache/spark/util/collection/ExternalSorter.scala
@@ -20,6 +20,7 @@ package org.apache.spark.util.collection
import java.io._
import java.util.Comparator
+import scala.collection.BufferedIterator
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
diff --git
a/core/src/test/scala/org/apache/spark/storage/MemoryStoreSuite.scala
b/core/src/test/scala/org/apache/spark/storage/MemoryStoreSuite.scala
index d4f04e8d2917..0cf3348235c8 100644
--- a/core/src/test/scala/org/apache/spark/storage/MemoryStoreSuite.scala
+++ b/core/src/test/scala/org/apache/spark/storage/MemoryStoreSuite.scala
@@ -196,7 +196,7 @@ class MemoryStoreSuite
assert(memoryStore.currentUnrollMemoryForThisTask > 0) // we returned an
iterator
assert(!memoryStore.contains("someBlock2"))
assert(putResult.isLeft)
- assertSameContents(bigList, putResult.left.get.toSeq, "putIterator")
+ assertSameContents(bigList, putResult.swap.getOrElse(fail()).toSeq,
"putIterator")
// The unroll memory was freed once the iterator returned by putIterator()
was fully traversed.
assert(memoryStore.currentUnrollMemoryForThisTask === 0)
}
@@ -264,7 +264,7 @@ class MemoryStoreSuite
assert(memoryStore.contains("b3"))
assert(!memoryStore.contains("b4"))
assert(memoryStore.currentUnrollMemoryForThisTask > 0) // we returned an
iterator
- result4.left.get.close()
+ result4.swap.getOrElse(fail()).close()
assert(memoryStore.currentUnrollMemoryForThisTask === 0) // close released
the unroll memory
}
@@ -340,7 +340,7 @@ class MemoryStoreSuite
assert(memoryStore.contains("b3"))
assert(!memoryStore.contains("b4"))
assert(memoryStore.currentUnrollMemoryForThisTask > 0) // we returned an
iterator
- result4.left.get.discard()
+ result4.swap.getOrElse(fail()).discard()
assert(memoryStore.currentUnrollMemoryForThisTask === 0) // discard
released the unroll memory
}
@@ -357,7 +357,8 @@ class MemoryStoreSuite
blockInfoManager.unlock("b1")
assert(res.isLeft)
assert(memoryStore.currentUnrollMemoryForThisTask > 0)
- val valuesReturnedFromFailedPut = res.left.get.valuesIterator.toSeq //
force materialization
+ val valuesReturnedFromFailedPut = res.swap.getOrElse(fail())
+ .valuesIterator.toSeq // force materialization
assertSameContents(
bigList, valuesReturnedFromFailedPut,
"PartiallySerializedBlock.valuesIterator()")
// The unroll memory was freed once the iterator was fully traversed.
@@ -378,7 +379,7 @@ class MemoryStoreSuite
assert(res.isLeft)
assert(memoryStore.currentUnrollMemoryForThisTask > 0)
val bos = new ByteBufferOutputStream()
- res.left.get.finishWritingToStream(bos)
+ res.swap.getOrElse(fail()).finishWritingToStream(bos)
// The unroll memory was freed once the block was fully written.
assert(memoryStore.currentUnrollMemoryForThisTask === 0)
val deserializedValues = serializerManager.dataDeserializeStream[Any](
diff --git
a/core/src/test/scala/org/apache/spark/storage/ShuffleBlockFetcherIteratorSuite.scala
b/core/src/test/scala/org/apache/spark/storage/ShuffleBlockFetcherIteratorSuite.scala
index 769939a0a223..75a450f43589 100644
---
a/core/src/test/scala/org/apache/spark/storage/ShuffleBlockFetcherIteratorSuite.scala
+++
b/core/src/test/scala/org/apache/spark/storage/ShuffleBlockFetcherIteratorSuite.scala
@@ -229,7 +229,7 @@ class ShuffleBlockFetcherIteratorSuite extends
SparkFunSuite with PrivateMethodT
* same size and index.
*/
private def toBlockList(
- blockIds: Traversable[BlockId],
+ blockIds: Iterable[BlockId],
blockSize: Long,
blockMapIndex: Int): Seq[(BlockId, Long, Int)] = {
blockIds.map(blockId => (blockId, blockSize, blockMapIndex)).toSeq
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/javaCode.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/javaCode.scala
index 552c17990ca8..d5226fef3c73 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/javaCode.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/javaCode.scala
@@ -230,7 +230,7 @@ object Block {
* the code parts, and will not treat escapes in the input arguments.
*/
def code(args: Any*): Block = {
- sc.checkLengths(args)
+ StringContext.checkLengths(args, sc.parts)
if (sc.parts.length == 0) {
EmptyBlock
} else {
@@ -255,7 +255,7 @@ object Block {
val inputs = args.iterator
val buf = new StringBuilder(Block.CODE_BLOCK_BUFFER_LENGTH)
- buf.append(StringContext.treatEscapes(strings.next()))
+ buf.append(StringContext.processEscapes(strings.next()))
while (strings.hasNext) {
val input = inputs.next()
input match {
@@ -267,7 +267,7 @@ object Block {
case _ =>
buf.append(input)
}
- buf.append(StringContext.treatEscapes(strings.next()))
+ buf.append(StringContext.processEscapes(strings.next()))
}
codeParts += buf.toString
diff --git
a/sql/core/src/main/scala/org/apache/spark/sql/execution/GroupedIterator.scala
b/sql/core/src/main/scala/org/apache/spark/sql/execution/GroupedIterator.scala
index ccf325828953..a2cd0c5d3821 100644
---
a/sql/core/src/main/scala/org/apache/spark/sql/execution/GroupedIterator.scala
+++
b/sql/core/src/main/scala/org/apache/spark/sql/execution/GroupedIterator.scala
@@ -17,6 +17,8 @@
package org.apache.spark.sql.execution
+import scala.collection.BufferedIterator
+
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Ascending, Attribute,
Expression, SortOrder}
import org.apache.spark.sql.catalyst.expressions.codegen.{GenerateOrdering,
GenerateUnsafeProjection}
diff --git
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/GroupBasedRowLevelOperationScanPlanning.scala
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/GroupBasedRowLevelOperationScanPlanning.scala
index 11dddb508312..87f70eb696b6 100644
---
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/GroupBasedRowLevelOperationScanPlanning.scala
+++
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/GroupBasedRowLevelOperationScanPlanning.scala
@@ -51,9 +51,13 @@ object GroupBasedRowLevelOperationScanPlanning extends
Rule[LogicalPlan] with Pr
pushFilters(cond, relation.output, scanBuilder)
val pushedFiltersStr = if (pushedFilters.isLeft) {
- pushedFilters.left.get.mkString(", ")
+ pushedFilters.swap
+ .getOrElse(throw new NoSuchElementException("The left node doesn't
have pushedFilters"))
+ .mkString(", ")
} else {
- pushedFilters.right.get.mkString(", ")
+ pushedFilters
+ .getOrElse(throw new NoSuchElementException("The right node doesn't
have pushedFilters"))
+ .mkString(", ")
}
val (scan, output) = PushDownUtils.pruneColumns(scanBuilder, relation,
relation.output, Nil)
diff --git
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala
index ef3982ff9087..d4e40530058f 100644
---
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala
+++
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala
@@ -73,10 +73,13 @@ object V2ScanRelationPushDown extends Rule[LogicalPlan]
with PredicateHelper {
val (pushedFilters, postScanFiltersWithoutSubquery) =
PushDownUtils.pushFilters(
sHolder.builder, normalizedFiltersWithoutSubquery)
val pushedFiltersStr = if (pushedFilters.isLeft) {
- pushedFilters.left.get.mkString(", ")
+ pushedFilters.swap
+ .getOrElse(throw new NoSuchElementException("The left node doesn't
have pushedFilters"))
+ .mkString(", ")
} else {
- sHolder.pushedPredicates = pushedFilters.right.get
- pushedFilters.right.get.mkString(", ")
+ sHolder.pushedPredicates = pushedFilters
+ .getOrElse(throw new NoSuchElementException("The right node doesn't
have pushedFilters"))
+ sHolder.pushedPredicates.mkString(", ")
}
val postScanFilters = postScanFiltersWithoutSubquery ++
normalizedFiltersWithSubquery
diff --git
a/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala
index b4140f17775f..f183c732250e 100644
---
a/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala
+++
b/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala
@@ -713,7 +713,7 @@ abstract class HadoopFsRelationTest extends QueryTest with
SQLTestUtils with Tes
.format(dataSourceName)
.load(path)
assert(expectedResult.isLeft, s"Error was expected with $path but
result found")
- checkAnswer(testDf, expectedResult.left.get)
+ checkAnswer(testDf, expectedResult.swap.getOrElse(fail()))
} catch {
case e: java.util.NoSuchElementException if
e.getMessage.contains("dataSchema") =>
// Ignore error, the source format requires schema to be provided
by user
@@ -722,7 +722,7 @@ abstract class HadoopFsRelationTest extends QueryTest with
SQLTestUtils with Tes
case e: Throwable =>
assert(expectedResult.isRight, s"Was not expecting error with
$path: " + e)
assert(
- e.getMessage.contains(expectedResult.right.get),
+ e.getMessage.contains(expectedResult.getOrElse(fail())),
s"Did not find expected error message with $path")
}
}
diff --git
a/streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala
b/streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala
index a27e2b46225a..949f47783ed2 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala
@@ -405,7 +405,7 @@ private[ui] class BatchPage(parent: StreamingTab) extends
WebUIPage("batch") {
private def metadataDescriptionToHTML(metadataDescription: String):
Seq[Node] = {
// tab to 4 spaces and "\n" to "<br/>"
Unparsed(StringEscapeUtils.escapeHtml4(metadataDescription).
- replaceAllLiterally("\t",
" ").replaceAllLiterally("\n", "<br/>"))
+ replace("\t", " ").replace("\n", "<br/>"))
}
private def outputOpStatusCell(outputOp: OutputOperationUIData, rowspan:
Int): Seq[Node] = {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]