Ngone51 commented on a change in pull request #31496:
URL: https://github.com/apache/spark/pull/31496#discussion_r578147035
##########
File path:
core/src/main/scala/org/apache/spark/resource/ResourceProfileBuilder.scala
##########
@@ -17,71 +17,67 @@
package org.apache.spark.resource
-import java.util.{Map => JMap}
import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConverters._
+import org.apache.spark.SparkException
import org.apache.spark.annotation.{Evolving, Since}
+import org.apache.spark.util.Utils
/**
- * Resource profile builder to build a Resource profile to associate with an
RDD.
- * A ResourceProfile allows the user to specify executor and task requirements
for an RDD
- * that will get applied during a stage. This allows the user to change the
resource
+ * Resource profile builder to build a [[ResourceProfile]] to associate with
an RDD.
+ * A [[ResourceProfile]] allows the user to specify executor and task resource
requirements
+ * for an RDD that will get applied during a stage. This allows the user to
change the resource
* requirements between stages.
*
*/
@Evolving
@Since("3.1.0")
class ResourceProfileBuilder() {
+ // Task resource requests that specified by users, mapped from resource name
to the request.
private val _taskResources = new ConcurrentHashMap[String,
TaskResourceRequest]()
+ // Executor resource requests that specified by users, mapped from resource
name to the request.
private val _executorResources = new ConcurrentHashMap[String,
ExecutorResourceRequest]()
- def taskResources: Map[String, TaskResourceRequest] =
_taskResources.asScala.toMap
- def executorResources: Map[String, ExecutorResourceRequest] =
_executorResources.asScala.toMap
-
- /**
- * (Java-specific) gets a Java Map of resources to TaskResourceRequest
- */
- def taskResourcesJMap: JMap[String, TaskResourceRequest] =
_taskResources.asScala.asJava
-
/**
- * (Java-specific) gets a Java Map of resources to ExecutorResourceRequest
+ * Add executor resource requests
+ * @param requests The detailed executor resource requests, see
[[ExecutorResourceRequests]]
+ * @return this.type
*/
- def executorResourcesJMap: JMap[String, ExecutorResourceRequest] = {
- _executorResources.asScala.asJava
- }
-
- def require(requests: ExecutorResourceRequests): this.type = {
+ def executorRequire(requests: ExecutorResourceRequests): this.type = {
_executorResources.putAll(requests.requests.asJava)
this
}
- def require(requests: TaskResourceRequests): this.type = {
+ /**
+ * Add task resource requests
+ * @param requests The detailed task resource requests, see
[[TaskResourceRequest]]
+ * @return this.type
+ */
+ def taskRequire(requests: TaskResourceRequests): this.type = {
Review comment:
That's a good idea, but I have reverted this change as @tgravescs
objects. We probably could make it in a separate PR later.
##########
File path:
core/src/main/scala/org/apache/spark/resource/ResourceProfileBuilder.scala
##########
@@ -17,71 +17,67 @@
package org.apache.spark.resource
-import java.util.{Map => JMap}
import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConverters._
+import org.apache.spark.SparkException
import org.apache.spark.annotation.{Evolving, Since}
+import org.apache.spark.util.Utils
/**
- * Resource profile builder to build a Resource profile to associate with an
RDD.
- * A ResourceProfile allows the user to specify executor and task requirements
for an RDD
- * that will get applied during a stage. This allows the user to change the
resource
+ * Resource profile builder to build a [[ResourceProfile]] to associate with
an RDD.
+ * A [[ResourceProfile]] allows the user to specify executor and task resource
requirements
+ * for an RDD that will get applied during a stage. This allows the user to
change the resource
* requirements between stages.
*
*/
@Evolving
@Since("3.1.0")
class ResourceProfileBuilder() {
+ // Task resource requests that specified by users, mapped from resource name
to the request.
private val _taskResources = new ConcurrentHashMap[String,
TaskResourceRequest]()
+ // Executor resource requests that specified by users, mapped from resource
name to the request.
private val _executorResources = new ConcurrentHashMap[String,
ExecutorResourceRequest]()
- def taskResources: Map[String, TaskResourceRequest] =
_taskResources.asScala.toMap
- def executorResources: Map[String, ExecutorResourceRequest] =
_executorResources.asScala.toMap
-
- /**
- * (Java-specific) gets a Java Map of resources to TaskResourceRequest
- */
- def taskResourcesJMap: JMap[String, TaskResourceRequest] =
_taskResources.asScala.asJava
-
/**
- * (Java-specific) gets a Java Map of resources to ExecutorResourceRequest
+ * Add executor resource requests
+ * @param requests The detailed executor resource requests, see
[[ExecutorResourceRequests]]
+ * @return this.type
*/
- def executorResourcesJMap: JMap[String, ExecutorResourceRequest] = {
- _executorResources.asScala.asJava
- }
-
- def require(requests: ExecutorResourceRequests): this.type = {
+ def executorRequire(requests: ExecutorResourceRequests): this.type = {
_executorResources.putAll(requests.requests.asJava)
this
}
- def require(requests: TaskResourceRequests): this.type = {
+ /**
+ * Add task resource requests
+ * @param requests The detailed task resource requests, see
[[TaskResourceRequest]]
+ * @return this.type
+ */
+ def taskRequire(requests: TaskResourceRequests): this.type = {
_taskResources.putAll(requests.requests.asJava)
this
}
- def clearExecutorResourceRequests(): this.type = {
- _executorResources.clear()
- this
- }
-
- def clearTaskResourceRequests(): this.type = {
- _taskResources.clear()
- this
- }
Review comment:
reverted
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]