ivoson commented on code in PR #39459:
URL: https://github.com/apache/spark/pull/39459#discussion_r1107226569


##########
core/src/main/scala/org/apache/spark/storage/BlockManager.scala:
##########
@@ -1325,31 +1328,71 @@ private[spark] class BlockManager(
     blockInfoManager.releaseAllLocksForTask(taskAttemptId)
   }
 
+  /**
+   * Retrieve the given rdd block if it exists and is visible, otherwise call 
the provided
+   * `makeIterator` method to compute the block, persist it, and return its 
values.
+   *
+   * @return either a BlockResult if the block was successfully cached, or an 
iterator if the block
+   *         could not be cached.
+   */
+  def getOrElseUpdateRDDBlock[T](
+      taskId: Long,
+      blockId: RDDBlockId,
+      level: StorageLevel,
+      classTag: ClassTag[T],
+      makeIterator: () => Iterator[T]): Either[BlockResult, Iterator[T]] = {
+    val isCacheVisible = isRDDBlockVisible(blockId)
+    val res = getOrElseUpdate(blockId, level, classTag, makeIterator, 
isCacheVisible)
+    if (res.isLeft && !isCacheVisible) {
+      // Block exists but not visible, report taskId -> blockId info to master.
+      master.updateRDDBlockTaskInfo(blockId, taskId)
+    }
+
+    res
+  }
+
   /**
    * Retrieve the given block if it exists, otherwise call the provided 
`makeIterator` method
    * to compute the block, persist it, and return its values.
    *
    * @return either a BlockResult if the block was successfully cached, or an 
iterator if the block
    *         could not be cached.
    */
-  def getOrElseUpdate[T](
+  private def getOrElseUpdate[T](
       blockId: BlockId,
       level: StorageLevel,
       classTag: ClassTag[T],
-      makeIterator: () => Iterator[T]): Either[BlockResult, Iterator[T]] = {
-    // Attempt to read the block from local or remote storage. If it's 
present, then we don't need
-    // to go through the local-get-or-put path.
-    get[T](blockId)(classTag) match {
-      case Some(block) =>
-        return Left(block)
-      case _ =>
-        // Need to compute the block.
+      makeIterator: () => Iterator[T],
+      isCacheVisible: Boolean = true): Either[BlockResult, Iterator[T]] = {
+    // Track whether the data is computed or not, force to do the computation 
later if need to.
+    // The reason we push the force computing later is that once the executor 
is decommissioned we
+    // will have a better chance to replicate the cache block because of the 
`checkShouldStore`
+    // validation when putting a new block.
+    var computed: Boolean = false
+    val iterator = () => {
+      computed = true
+      makeIterator()
+    }
+    if (isCacheVisible) {
+      // Attempt to read the block from local or remote storage. If it's 
present, then we don't need
+      // to go through the local-get-or-put path.
+      get[T](blockId)(classTag) match {
+        case Some(block) =>
+          return Left(block)
+        case _ =>
+          // Need to compute the block.
+      }
     }
+
     // Initially we hold no locks on this block.
-    doPutIterator(blockId, makeIterator, level, classTag, keepReadLock = true) 
match {
+    doPutIterator(blockId, iterator, level, classTag, keepReadLock = true) 
match {
       case None =>
         // doPut() didn't hand work back to us, so the block already existed 
or was successfully
         // stored. Therefore, we now hold a read lock on the block.
+        if (!isCacheVisible && !computed) {
+          // Force compute to report accumulator updates.

Review Comment:
   Yes, if the block already exists in the node, here just computed it again 
but not putting the result.
   
   > Could we force put the result iterator even if the block exists in this 
case?
   This will need to modify the locking mechanism a little bit. This is also a 
issue about indeterminate operation, for derterminate operations no need to 
replace the cache here.  I am wondering can the inderterminate framework cover 
this  case with some other solutions?
   



##########
core/src/main/scala/org/apache/spark/storage/BlockManager.scala:
##########
@@ -1325,31 +1328,71 @@ private[spark] class BlockManager(
     blockInfoManager.releaseAllLocksForTask(taskAttemptId)
   }
 
+  /**
+   * Retrieve the given rdd block if it exists and is visible, otherwise call 
the provided
+   * `makeIterator` method to compute the block, persist it, and return its 
values.
+   *
+   * @return either a BlockResult if the block was successfully cached, or an 
iterator if the block
+   *         could not be cached.
+   */
+  def getOrElseUpdateRDDBlock[T](
+      taskId: Long,
+      blockId: RDDBlockId,
+      level: StorageLevel,
+      classTag: ClassTag[T],
+      makeIterator: () => Iterator[T]): Either[BlockResult, Iterator[T]] = {
+    val isCacheVisible = isRDDBlockVisible(blockId)
+    val res = getOrElseUpdate(blockId, level, classTag, makeIterator, 
isCacheVisible)
+    if (res.isLeft && !isCacheVisible) {
+      // Block exists but not visible, report taskId -> blockId info to master.
+      master.updateRDDBlockTaskInfo(blockId, taskId)
+    }
+
+    res
+  }
+
   /**
    * Retrieve the given block if it exists, otherwise call the provided 
`makeIterator` method
    * to compute the block, persist it, and return its values.
    *
    * @return either a BlockResult if the block was successfully cached, or an 
iterator if the block
    *         could not be cached.
    */
-  def getOrElseUpdate[T](
+  private def getOrElseUpdate[T](
       blockId: BlockId,
       level: StorageLevel,
       classTag: ClassTag[T],
-      makeIterator: () => Iterator[T]): Either[BlockResult, Iterator[T]] = {
-    // Attempt to read the block from local or remote storage. If it's 
present, then we don't need
-    // to go through the local-get-or-put path.
-    get[T](blockId)(classTag) match {
-      case Some(block) =>
-        return Left(block)
-      case _ =>
-        // Need to compute the block.
+      makeIterator: () => Iterator[T],
+      isCacheVisible: Boolean = true): Either[BlockResult, Iterator[T]] = {
+    // Track whether the data is computed or not, force to do the computation 
later if need to.
+    // The reason we push the force computing later is that once the executor 
is decommissioned we
+    // will have a better chance to replicate the cache block because of the 
`checkShouldStore`
+    // validation when putting a new block.
+    var computed: Boolean = false
+    val iterator = () => {
+      computed = true
+      makeIterator()
+    }
+    if (isCacheVisible) {
+      // Attempt to read the block from local or remote storage. If it's 
present, then we don't need
+      // to go through the local-get-or-put path.
+      get[T](blockId)(classTag) match {
+        case Some(block) =>
+          return Left(block)
+        case _ =>
+          // Need to compute the block.
+      }
     }
+
     // Initially we hold no locks on this block.
-    doPutIterator(blockId, makeIterator, level, classTag, keepReadLock = true) 
match {
+    doPutIterator(blockId, iterator, level, classTag, keepReadLock = true) 
match {
       case None =>
         // doPut() didn't hand work back to us, so the block already existed 
or was successfully
         // stored. Therefore, we now hold a read lock on the block.
+        if (!isCacheVisible && !computed) {
+          // Force compute to report accumulator updates.

Review Comment:
   Yes, if the block already exists in the node, here just computed it again 
but not putting the result.
   
   > Could we force put the result iterator even if the block exists in this 
case?
   
   This will need to modify the locking mechanism a little bit. This is also a 
issue about indeterminate operation, for derterminate operations no need to 
replace the cache here.  I am wondering can the inderterminate framework cover 
this  case with some other solutions?
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to