dongjoon-hyun commented on a change in pull request #23743: 
[SPARK-26843][MESOS] Use ConfigEntry for hardcoded configs for "mesos" resource 
manager
URL: https://github.com/apache/spark/pull/23743#discussion_r255362947
 
 

 ##########
 File path: 
resource-managers/mesos/src/main/scala/org/apache/spark/deploy/mesos/config.scala
 ##########
 @@ -118,6 +150,221 @@ package object config {
       .stringConf
       .createWithDefault("")
 
+  private[spark] val DRIVER_FRAMEWORK_ID =
+    ConfigBuilder("spark.mesos.driver.frameworkId")
+      .stringConf
+      .createOptional
+
   private[spark] val EXECUTOR_URI =
     ConfigBuilder("spark.executor.uri").stringConf.createOptional
+
+  private[spark] val PROXY_BASE_URL =
+    ConfigBuilder("spark.mesos.proxy.baseURL").stringConf.createOptional
+
+  private[spark] val COARSE_MODE =
+    ConfigBuilder("spark.mesos.coarse")
+      .doc("If set to true, runs over Mesos clusters in \"coarse-grained\" 
sharing mode, where " +
+        "Spark acquires one long-lived Mesos task on each machine. If set to 
false, runs over " +
+        "Mesos cluster in \"fine-grained\" sharing mode, where one Mesos task 
is created per " +
+        "Spark task.")
+      .booleanConf.createWithDefault(true)
+
+  private[spark] val COARSE_SHUTDOWN_TIMEOUT =
+    ConfigBuilder("spark.mesos.coarse.shutdownTimeout")
+      .timeConf(TimeUnit.MILLISECONDS)
+      .checkValue(_ >= 0, s"spark.mesos.coarse.shutdownTimeout must be >= 0")
+      .createWithDefaultString("10s")
+
+  private[spark] val MAX_DRIVERS =
+    ConfigBuilder("spark.mesos.maxDrivers").intConf.createWithDefault(200)
+
+  private[spark] val RETAINED_DRIVERS =
+    ConfigBuilder("spark.mesos.retainedDrivers").intConf.createWithDefault(200)
+
+  private[spark] val CLUSTER_RETRY_WAIT_MAX_SECONDS =
+    ConfigBuilder("spark.mesos.cluster.retry.wait.max")
+      .intConf
+      .createWithDefault(60) // 1 minute
+
+  private[spark] val ENABLE_FETCHER_CACHE =
+    ConfigBuilder("spark.mesos.fetcherCache.enable")
+      .doc("If set to true, all URIs (example: `spark.executor.uri`, 
`spark.mesos.uris`) will be " +
+        "cached by the Mesos Fetcher Cache.")
+      .booleanConf
+      .createWithDefault(false)
+
+  private[spark] val APP_JAR_LOCAL_RESOLUTION_MODE =
+    ConfigBuilder("spark.mesos.appJar.local.resolution.mode")
+      .doc("Provides support for the `local:///` scheme to reference the app 
jar resource in " +
+        "cluster mode. If user uses a local resource (`local:///path/to/jar`) 
and the config " +
+        "option is not used it defaults to `host` eg. the mesos fetcher tries 
to get the " +
+        "resource from the host's file system. If the value is unknown it 
prints a warning msg " +
+        "in the dispatcher logs and defaults to `host`. If the value is 
`container` then spark " +
+        "submit in the container will use the jar in the container's path: 
`/path/to/jar`.")
+      .stringConf
+      .checkValues(Set("host", "container"))
+      .createWithDefault("host")
+
+  private[spark] val REJECT_OFFER_DURATION =
+    ConfigBuilder("spark.mesos.rejectOfferDuration")
+      .doc("Time to consider unused resources refused, serves as a fallback of 
" +
+        "`spark.mesos.rejectOfferDurationForUnmetConstraints`, " +
+        "`spark.mesos.rejectOfferDurationForReachedMaxCores`.")
+      .timeConf(TimeUnit.SECONDS)
+      .createWithDefaultString("120s")
+
+  private[spark] val REJECT_OFFER_DURATION_FOR_UNMET_CONSTRAINTS =
+    ConfigBuilder("spark.mesos.rejectOfferDurationForUnmetConstraints")
+      .doc("Time to consider unused resources refused with unmet constraints.")
+      .timeConf(TimeUnit.SECONDS)
+      .createOptional
+
+  private[spark] val REJECT_OFFER_DURATION_FOR_REACHED_MAX_CORES =
+    ConfigBuilder("spark.mesos.rejectOfferDurationForReachedMaxCores")
+      .doc("Time to consider unused resources refused when maximum number of 
cores " +
+        "`spark.cores.max` is reached.")
+      .timeConf(TimeUnit.SECONDS)
+      .createOptional
+
+  private[spark] val URIS_TO_DOWNLOAD =
+    ConfigBuilder("spark.mesos.uris")
+      .doc("A comma-separated list of URIs to be downloaded to the sandbox 
when driver or " +
+        "executor is launched by Mesos. This applies to both coarse-grained 
and fine-grained " +
+        "mode.")
+      .stringConf
+      .toSequence
+      .createWithDefault(Nil)
+
+  private[spark] val EXECUTOR_HOME =
+    ConfigBuilder("spark.mesos.executor.home")
+      .doc("Set the directory in which Spark is installed on the executors in 
Mesos. " +
+        "By default, the executors will simply use the driver's Spark home 
directory, which may " +
+        "not be visible to them. Note that this is only relevant if a Spark 
binary package is " +
+        "not specified through `spark.executor.uri`.")
+      .stringConf
+      .createOptional
+
+  private[spark] val EXECUTOR_CORES =
+    ConfigBuilder("spark.mesos.mesosExecutor.cores")
+      .doc("(Fine-grained mode only) Number of cores to give each Mesos 
executor. This does not " +
+        "include the cores used to run the Spark tasks. In other words, even 
if no Spark task " +
+        "is being run, each Mesos executor will occupy the number of cores 
configured here. " +
+        "The value can be a floating point number.")
+      .doubleConf
+      .createWithDefault(1.0)
+
+  private[spark] val EXTRA_CORES_PER_EXECUTOR =
+    ConfigBuilder("spark.mesos.extra.cores")
+      .doc("Set the extra number of cores for an executor to advertise. This 
does not result in " +
+        "more cores allocated. It instead means that an executor will 
\"pretend\" it has more " +
+        "cores, so that the driver will send it more tasks. Use this to 
increase parallelism. " +
+        "This setting is only used for Mesos coarse-grained mode.")
+      .intConf
+      .createWithDefault(0)
+
+  private[spark] val EXECUTOR_MEMORY_OVERHEAD =
+    ConfigBuilder("spark.mesos.executor.memoryOverhead")
+      .doc("The amount of additional memory, specified in MiB, to be allocated 
per executor. " +
+        "By default, the overhead will be larger of either 384 or 10% of " +
+        "`spark.executor.memory`. If set, the final overhead will be this 
value.")
+      .intConf
+      .createOptional
+
+  private[spark] val EXECUTOR_DOCKER_IMAGE =
+    ConfigBuilder("spark.mesos.executor.docker.image")
+      .doc("Set the name of the docker image that the Spark executors will run 
in. The selected " +
+        "image must have Spark installed, as well as a compatible version of 
the Mesos library. " +
+        "The installed path of Spark in the image can be specified with " +
+        "`spark.mesos.executor.home`; the installed path of the Mesos library 
can be specified " +
+        "with `spark.executorEnv.MESOS_NATIVE_JAVA_LIBRARY`.")
+      .stringConf
+      .createOptional
+
+  private[spark] val EXECUTOR_DOCKER_FORCE_PULL_IMAGE =
+    ConfigBuilder("spark.mesos.executor.docker.forcePullImage")
+      .doc("Force Mesos agents to pull the image specified in " +
+        "`spark.mesos.executor.docker.image`. By default Mesos agents will not 
pull images they " +
+        "already have cached.")
+      .booleanConf
+      .createOptional
+
+  private[spark] val EXECUTOR_DOCKER_PORT_MAPS =
+    ConfigBuilder("spark.mesos.executor.docker.portmaps")
+      .stringConf
+      .toSequence
+      .createOptional
+
+  private[spark] val EXECUTOR_DOCKER_PARAMETERS =
+    ConfigBuilder("spark.mesos.executor.docker.parameters")
+      .doc("Set the list of custom parameters which will be passed into the 
`docker run` " +
+        "command when launching the Spark executor on Mesos using the docker 
containerizer. " +
+        "The format of this property is a list of key/value pairs which pair 
looks key1=value1.")
+      .stringConf
+      .toSequence
+      .createOptional
+
+  private[spark] val EXECUTOR_DOCKER_VOLUMES =
+    ConfigBuilder("spark.mesos.executor.docker.volumes")
+      .doc("Set the list of volumes which will be mounted into the Docker 
image, which was set " +
+        "using `spark.mesos.executor.docker.image`. The format of this 
property is a list of " +
+        "mappings following the form passed to `docker run -v`. That is they 
take the form:  " +
+        "`[host_path:]container_path[:ro|:rw]`")
+      .stringConf
+      .toSequence
+      .createOptional
+
+  private[spark] val MAX_GPUS =
+    ConfigBuilder("spark.mesos.gpus.max")
+      .doc("Set the maximum number GPU resources to acquire for this job. Note 
that executors " +
+        "will still launch when no GPU resources are found since this 
configuration is just an " +
+        "upper limit and not a guaranteed amount.")
+      .intConf
+      .createWithDefault(0)
+
+  private[spark] val TASK_LABELS =
+    ConfigBuilder("spark.mesos.task.labels")
+      .doc("Set the Mesos labels to add to each task. Labels are free-form 
key-value pairs. " +
+        "Key-value pairs should be separated by a colon, and commas used to 
list more than one. " +
+        "If your label includes a colon or comma, you can escape it with a 
backslash. " +
+        "Ex. key:value,key2:a\\:b.")
+      .stringConf
+      .createWithDefault("")
+
+  private[spark] val CONSTRAINTS =
+    ConfigBuilder("spark.mesos.constraints")
+      .doc("Attribute-based constraints on mesos resource offers. By default, 
all resource " +
+        "offers will be accepted. This setting applies only to executors. 
Refer to Mesos " +
+        "Attributes & Resources doc for more information on attributes. " +
 
 Review comment:
   Hi, @HeartSaVioR .
   Could you remove the followings line 338 ~ 347? Since it says `Refer to 
Mesos ...` already, it looks okay to remove the rest.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to