This is an automated email from the ASF dual-hosted git repository.
maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new ea7d00a8424b [SPARK-46454][SQL][DSTREAM] Remove redundant `.headOption`
ea7d00a8424b is described below
commit ea7d00a8424b9369a5e8807dce29718a3450b28a
Author: yangjie01 <[email protected]>
AuthorDate: Tue Dec 19 17:07:42 2023 +0300
[SPARK-46454][SQL][DSTREAM] Remove redundant `.headOption`
### What changes were proposed in this pull request?
This pr just remove redundant `.headOption` due to `
Option(xxx).headOption` is a redundant call.
### Why are the changes needed?
Remove redundant `.headOption`
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
Pass GitHub Actions
### Was this patch authored or co-authored using generative AI tooling?
No
Closes #44411 from LuciferYang/redundant-headOption.
Authored-by: yangjie01 <[email protected]>
Signed-off-by: Max Gekk <[email protected]>
---
.../src/main/scala/org/apache/spark/sql/catalyst/StructFilters.scala | 2 +-
.../main/scala/org/apache/spark/streaming/scheduler/BatchInfo.scala | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/StructFilters.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/StructFilters.scala
index 6e7d8a058ae1..4ac62b987b15 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/StructFilters.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/StructFilters.scala
@@ -95,7 +95,7 @@ object StructFilters {
}
private def zip[A, B](a: Option[A], b: Option[B]): Option[(A, B)] = {
- a.zip(b).headOption
+ a.zip(b)
}
private def toLiteral(value: Any): Option[Literal] = {
diff --git
a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/BatchInfo.scala
b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/BatchInfo.scala
index 5b2b959f8138..57009570b257 100644
---
a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/BatchInfo.scala
+++
b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/BatchInfo.scala
@@ -53,14 +53,14 @@ case class BatchInfo(
* processing. Essentially, it is `processingEndTime` -
`processingStartTime`.
*/
def processingDelay: Option[Long] =
processingEndTime.zip(processingStartTime)
- .map(x => x._1 - x._2).headOption
+ .map(x => x._1 - x._2)
/**
* Time taken for all the jobs of this batch to finish processing from the
time they
* were submitted. Essentially, it is `processingDelay` + `schedulingDelay`.
*/
def totalDelay: Option[Long] = schedulingDelay.zip(processingDelay)
- .map(x => x._1 + x._2).headOption
+ .map(x => x._1 + x._2)
/**
* The number of recorders received by the receivers in this batch.
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]