szehon-ho commented on code in PR #52669:
URL: https://github.com/apache/spark/pull/52669#discussion_r2453556421
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/WriteToDataSourceV2Exec.scala:
##########
@@ -481,9 +482,45 @@ trait V2TableWriteExec extends V2CommandExec with
UnaryExecNode with AdaptiveSpa
}
private def getOperationMetrics(query: SparkPlan): util.Map[String,
lang.Long] = {
- collectFirst(query) { case m: MergeRowsExec => m }.map{ n =>
- n.metrics.map { case (name, metric) => s"merge.$name" ->
lang.Long.valueOf(metric.value) }
- }.getOrElse(Map.empty[String, lang.Long]).asJava
+ collectFirst(query) { case m: MergeRowsExec => m } match {
+ case Some(mergeRowsExec) =>
+ val mergeMetrics = mergeRowsExec.metrics.map {
+ case (name, metric) => s"merge.$name" ->
lang.Long.valueOf(metric.value)
+ }
+ val numSourceRows = getNumSourceRows(mergeRowsExec)
+ (mergeMetrics + ("merge.numSourceRows" ->
lang.Long.valueOf(numSourceRows))).asJava
+ case None =>
+ Map.empty[String, lang.Long].asJava
+ }
+ }
+
+ private def getNumSourceRows(mergeRowsExec: MergeRowsExec): Long = {
+ def isTargetTableScan(plan: SparkPlan): Boolean = {
+ collectFirst(plan) {
+ case scan: BatchScanExec if
scan.table.isInstanceOf[RowLevelOperationTable] => true
+ }.getOrElse(false)
+ }
+
+ val joinOpt = collectFirst(mergeRowsExec.child) { case j: BaseJoinExec =>
j }
+
+ joinOpt.flatMap { join =>
Review Comment:
style: wdyt for comprehension here to avoid nested flatmap:
```
for {
join <- collectFirst(mergeRowsExec.child) { case j: BaseJoinExec => j }
sourceChild <- findSourceChild(join)
plan <- collectFirst(sourceChild) {
case plan if plan.metrics.contains("numOutputRows") => plan
}
metric <- plan.metrics.get("numOutputRows")
} yield metric.value).getOrElse(-1L)
private def findSourceChild(join: BaseJoinExec): Option[SparkPlan] = {
val leftIsTarget = isTargetTableScan(join.left)
val rightIsTarget = isTargetTableScan(join.right)
if (leftIsTarget) {
Some(join.right)
} else if (rightIsTarget) {
Some(join.left)
} else {
None
}
}
```
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/WriteToDataSourceV2Exec.scala:
##########
@@ -481,9 +482,45 @@ trait V2TableWriteExec extends V2CommandExec with
UnaryExecNode with AdaptiveSpa
}
private def getOperationMetrics(query: SparkPlan): util.Map[String,
lang.Long] = {
- collectFirst(query) { case m: MergeRowsExec => m }.map{ n =>
- n.metrics.map { case (name, metric) => s"merge.$name" ->
lang.Long.valueOf(metric.value) }
- }.getOrElse(Map.empty[String, lang.Long]).asJava
+ collectFirst(query) { case m: MergeRowsExec => m } match {
+ case Some(mergeRowsExec) =>
+ val mergeMetrics = mergeRowsExec.metrics.map {
+ case (name, metric) => s"merge.$name" ->
lang.Long.valueOf(metric.value)
+ }
+ val numSourceRows = getNumSourceRows(mergeRowsExec)
+ (mergeMetrics + ("merge.numSourceRows" ->
lang.Long.valueOf(numSourceRows))).asJava
+ case None =>
+ Map.empty[String, lang.Long].asJava
+ }
+ }
+
+ private def getNumSourceRows(mergeRowsExec: MergeRowsExec): Long = {
+ def isTargetTableScan(plan: SparkPlan): Boolean = {
+ collectFirst(plan) {
+ case scan: BatchScanExec if
scan.table.isInstanceOf[RowLevelOperationTable] => true
+ }.getOrElse(false)
Review Comment:
isDefined seems more clear. but its personal preference
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/WriteToDataSourceV2Exec.scala:
##########
@@ -481,9 +482,45 @@ trait V2TableWriteExec extends V2CommandExec with
UnaryExecNode with AdaptiveSpa
}
private def getOperationMetrics(query: SparkPlan): util.Map[String,
lang.Long] = {
- collectFirst(query) { case m: MergeRowsExec => m }.map{ n =>
- n.metrics.map { case (name, metric) => s"merge.$name" ->
lang.Long.valueOf(metric.value) }
- }.getOrElse(Map.empty[String, lang.Long]).asJava
+ collectFirst(query) { case m: MergeRowsExec => m } match {
+ case Some(mergeRowsExec) =>
+ val mergeMetrics = mergeRowsExec.metrics.map {
+ case (name, metric) => s"merge.$name" ->
lang.Long.valueOf(metric.value)
+ }
+ val numSourceRows = getNumSourceRows(mergeRowsExec)
+ (mergeMetrics + ("merge.numSourceRows" ->
lang.Long.valueOf(numSourceRows))).asJava
+ case None =>
+ Map.empty[String, lang.Long].asJava
+ }
+ }
+
+ private def getNumSourceRows(mergeRowsExec: MergeRowsExec): Long = {
+ def isTargetTableScan(plan: SparkPlan): Boolean = {
+ collectFirst(plan) {
+ case scan: BatchScanExec if
scan.table.isInstanceOf[RowLevelOperationTable] => true
Review Comment:
can we pattern match using ( case BatchScanExec(t: RowLevelOperationTable))
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]