amaliujia commented on code in PR #43010:
URL: https://github.com/apache/spark/pull/43010#discussion_r1332578697
##########
python/pyspark/sql/connect/plan.py:
##########
@@ -1197,6 +1197,7 @@ def plan(self, session: "SparkConnectClient") ->
proto.Relation:
plan.collect_metrics.input.CopyFrom(self._child.plan(session))
plan.collect_metrics.name = self._name
plan.collect_metrics.metrics.extend([self.col_to_expr(x, session) for
x in self._exprs])
+ plan.collect_metrics.dataframe_id = self._child._plan_id
Review Comment:
yes. plan_id is enough functionally.
##########
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala:
##########
@@ -1097,17 +1097,15 @@ trait CheckAnalysis extends PredicateHelper with
LookupCatalog with QueryErrorsB
* are allowed (e.g. self-joins).
*/
private def checkCollectedMetrics(plan: LogicalPlan): Unit = {
- val metricsMap = mutable.Map.empty[String, LogicalPlan]
+ val metricsMap = mutable.Map.empty[String, CollectMetrics]
def check(plan: LogicalPlan): Unit = plan.foreach { node =>
node match {
- case metrics @ CollectMetrics(name, _, _) =>
- val simplifiedMetrics =
simplifyPlanForCollectedMetrics(metrics.canonicalized)
Review Comment:
done
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]