Github user andrewor14 commented on a diff in the pull request:
https://github.com/apache/spark/pull/10815#discussion_r50056228
--- Diff: core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala
---
@@ -141,32 +183,93 @@ class TaskMetrics extends Serializable {
}
/**
- * ShuffleReadMetrics per dependency for collecting independently while
task is in progress.
+ * Temporary list of [[ShuffleReadMetrics]], one per shuffle dependency.
+ *
+ * A task may have multiple shuffle readers for multiple dependencies.
To avoid synchronization
+ * issues from readers in different threads, in-progress tasks use a
[[ShuffleReadMetrics]] for
+ * each dependency and merge these metrics before reporting them to the
driver.
+ */
+ @transient private lazy val tempShuffleReadMetrics = new
ArrayBuffer[ShuffleReadMetrics]
+
+ /**
+ * Create a temporary [[ShuffleReadMetrics]] for a particular shuffle
dependency.
+ *
+ * All usages are expected to be followed by a call to
[[mergeShuffleReadMetrics]], which
+ * merges the temporary values synchronously. Otherwise, all temporary
data collected will
+ * be lost.
*/
- @transient private lazy val depsShuffleReadMetrics:
ArrayBuffer[ShuffleReadMetrics] =
- new ArrayBuffer[ShuffleReadMetrics]()
+ private[spark] def registerTempShuffleReadMetrics(): ShuffleReadMetrics
= synchronized {
+ val readMetrics = new ShuffleReadMetrics
+ tempShuffleReadMetrics += readMetrics
+ readMetrics
+ }
+
+ /**
+ * Merge values across all temporary [[ShuffleReadMetrics]] into
`_shuffleReadMetrics`.
+ * This is expected to be called on executor heartbeat and at the end of
a task.
+ */
+ private[spark] def mergeShuffleReadMetrics(): Unit = synchronized {
+ if (tempShuffleReadMetrics.nonEmpty) {
+ val merged = new ShuffleReadMetrics
+ for (depMetrics <- tempShuffleReadMetrics) {
+ merged.incFetchWaitTime(depMetrics.fetchWaitTime)
+ merged.incLocalBlocksFetched(depMetrics.localBlocksFetched)
+ merged.incRemoteBlocksFetched(depMetrics.remoteBlocksFetched)
+ merged.incRemoteBytesRead(depMetrics.remoteBytesRead)
+ merged.incLocalBytesRead(depMetrics.localBytesRead)
+ merged.incRecordsRead(depMetrics.recordsRead)
+ }
+ _shuffleReadMetrics = Some(merged)
+ }
+ }
+
+ private var _shuffleWriteMetrics: Option[ShuffleWriteMetrics] = None
/**
- * If this task writes to shuffle output, metrics on the written shuffle
data will be collected
- * here
+ * Metrics related to shuffle write, defined only in shuffle map stages.
*/
- var shuffleWriteMetrics: Option[ShuffleWriteMetrics] = None
+ def shuffleWriteMetrics: Option[ShuffleWriteMetrics] =
_shuffleWriteMetrics
+
+ @deprecated("setting ShuffleWriteMetrics is for internal use only",
"2.0.0")
+ def shuffleWriteMetrics_=(swm: Option[ShuffleWriteMetrics]): Unit = {
+ _shuffleWriteMetrics = swm
+ }
/**
- * Storage statuses of any blocks that have been updated as a result of
this task.
+ * Get or create a new [[ShuffleWriteMetrics]] associated with this task.
*/
- var updatedBlocks: Option[Seq[(BlockId, BlockStatus)]] = None
+ private[spark] def registerShuffleWriteMetrics(): ShuffleWriteMetrics =
synchronized {
+ _shuffleWriteMetrics.getOrElse {
+ val metrics = new ShuffleWriteMetrics
+ _shuffleWriteMetrics = Some(metrics)
+ metrics
+ }
+ }
+
+ private var _updatedBlockStatuses: Seq[(BlockId, BlockStatus)] =
+ Seq.empty[(BlockId, BlockStatus)]
/**
- * A task may have multiple shuffle readers for multiple dependencies.
To avoid synchronization
- * issues from readers in different threads, in-progress tasks use a
ShuffleReadMetrics for each
- * dependency, and merge these metrics before reporting them to the
driver. This method returns
- * a ShuffleReadMetrics for a dependency and registers it for merging
later.
+ * Storage statuses of any blocks that have been updated as a result of
this task.
*/
- private [spark] def createShuffleReadMetricsForDependency():
ShuffleReadMetrics = synchronized {
- val readMetrics = new ShuffleReadMetrics()
- depsShuffleReadMetrics += readMetrics
- readMetrics
+ def updatedBlockStatuses: Seq[(BlockId, BlockStatus)] =
_updatedBlockStatuses
+
+ private[spark] def incUpdatedBlockStatuses(v: Seq[(BlockId,
BlockStatus)]): Unit = {
+ _updatedBlockStatuses ++= v
+ }
+
+ private[spark] def setUpdatedBlockStatuses(v: Seq[(BlockId,
BlockStatus)]): Unit = {
+ _updatedBlockStatuses = v
+ }
+
+ @deprecated("use updatedBlockStatuses instead", "2.0.0")
+ def updatedBlocks: Option[Seq[(BlockId, BlockStatus)]] = {
+ if (_updatedBlockStatuses.nonEmpty) Some(_updatedBlockStatuses) else
None
+ }
+
+ @deprecated("setting updated blocks is for internal use only", "2.0.0")
--- End diff --
ok, I'd love to.
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]