Github user andrewor14 commented on a diff in the pull request:

    https://github.com/apache/spark/pull/3916#discussion_r25399597
  
    --- Diff: 
launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java 
---
    @@ -0,0 +1,335 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.spark.launcher;
    +
    +import java.io.IOException;
    +import java.util.ArrayList;
    +import java.util.Arrays;
    +import java.util.Collections;
    +import java.util.HashMap;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.Properties;
    +
    +import static org.apache.spark.launcher.CommandBuilderUtils.*;
    +
    +/**
    + * Special launcher for handling a CLI invocation of SparkSubmit.
    + * <p/>
    + * This launcher extends SparkLauncher to add command line parsing 
compatible with
    + * SparkSubmit. It handles setting driver-side options and special parsing 
needed
    + * for the different specialClasses.
    + * <p/>
    + * This class has also some special features to aid PySparkLauncher.
    + */
    +class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
    +
    +  /**
    +   * Name of the app resource used to identify the PySpark shell. The 
command line parser expects
    +   * the resource name to be the very first argument to spark-submit in 
this case.
    +   *
    +   * NOTE: this cannot be "pyspark-shell" since that identifies the 
PySpark shell to SparkSubmit
    +   * (see java_gateway.py), and can cause this code to enter into an 
infinite loop.
    +   */
    +  static final String PYSPARK_SHELL = "pyspark-shell-main";
    +
    +  /**
    +   * This is the actual resource name that identifies the PySpark shell to 
SparkSubmit.
    +   */
    +  static final String PYSPARK_SHELL_RESOURCE = "pyspark-shell";
    +
    +  /**
    +   * This map must match the class names for available special classes, 
since this modifies the way
    +   * command line parsing works. This maps the class name to the resource 
to use when calling
    +   * spark-submit.
    +   */
    +  private static final Map<String, String> specialClasses = new 
HashMap<String, String>();
    +  static {
    +    specialClasses.put("org.apache.spark.repl.Main", "spark-shell");
    +    
specialClasses.put("org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver",
    +      "spark-internal");
    +  }
    +
    +  private final List<String> sparkArgs;
    +  private boolean hasMixedArguments;
    +
    +  SparkSubmitCommandBuilder() {
    +    this.sparkArgs = new ArrayList<String>();
    +  }
    +
    +  SparkSubmitCommandBuilder(List<String> args) {
    +    this(false, args);
    +  }
    +
    +  SparkSubmitCommandBuilder(boolean hasMixedArguments, List<String> args) {
    +    this();
    +    List<String> submitArgs = args;
    +    if (args.size() > 0 && args.get(0).equals(PYSPARK_SHELL)) {
    +      this.hasMixedArguments = true;
    +      appResource = PYSPARK_SHELL_RESOURCE;
    +      submitArgs = args.subList(1, args.size());
    +    } else {
    +      this.hasMixedArguments = hasMixedArguments;
    +    }
    +
    +    new OptionParser().parse(submitArgs);
    +  }
    +
    +  @Override
    +  public List<String> buildCommand(Map<String, String> env) throws 
IOException {
    +    if (PYSPARK_SHELL_RESOURCE.equals(appResource)) {
    +      return buildPySparkShellCommand(env);
    +    } else {
    +      return buildSparkSubmitCommand(env);
    +    }
    +  }
    +
    +  List<String> buildSparkSubmitArgs() {
    +    List<String> args = new ArrayList<String>();
    +    SparkSubmitOptionParser parser = new SparkSubmitOptionParser();
    +
    +    if (verbose) {
    +      args.add(parser.VERBOSE);
    +    }
    +
    +    if (master != null) {
    +      args.add(parser.MASTER);
    +      args.add(master);
    +    }
    +
    +    if (deployMode != null) {
    +      args.add(parser.DEPLOY_MODE);
    +      args.add(deployMode);
    +    }
    +
    +    if (appName != null) {
    +      args.add(parser.NAME);
    +      args.add(appName);
    +    }
    +
    +    for (Map.Entry<String, String> e : conf.entrySet()) {
    +      args.add(parser.CONF);
    +      args.add(String.format("%s=%s", e.getKey(), e.getValue()));
    +    }
    +
    +    if (propertiesFile != null) {
    +      args.add(parser.PROPERTIES_FILE);
    +      args.add(propertiesFile);
    +    }
    +
    +    if (!jars.isEmpty()) {
    +      args.add(parser.JARS);
    +      args.add(join(",", jars));
    +    }
    +
    +    if (!files.isEmpty()) {
    +      args.add(parser.FILES);
    +      args.add(join(",", files));
    +    }
    +
    +    if (!pyFiles.isEmpty()) {
    +      args.add(parser.PY_FILES);
    +      args.add(join(",", pyFiles));
    +    }
    +
    +    if (mainClass != null) {
    +      args.add(parser.CLASS);
    +      args.add(mainClass);
    +    }
    +
    +    args.addAll(sparkArgs);
    +    if (appResource != null) {
    +      args.add(appResource);
    +    }
    +    args.addAll(appArgs);
    +
    +    return args;
    +  }
    +
    +  private List<String> buildSparkSubmitCommand(Map<String, String> env) 
throws IOException {
    +    // Load the properties file and check whether spark-submit will be 
running the app's driver
    +    // or just launching a cluster app. When running the driver, the JVM's 
argument will be
    +    // modified to cover the driver's configuration.
    +    Properties props = loadPropertiesFile();
    +    boolean isClientMode = isClientMode(props);
    +    String extraClassPath = isClientMode ?
    +      find(SparkLauncher.DRIVER_EXTRA_CLASSPATH, conf, props) : null;
    +
    +    List<String> cmd = buildJavaCommand(extraClassPath);
    +    addOptionString(cmd, System.getenv("SPARK_SUBMIT_OPTS"));
    +    addOptionString(cmd, System.getenv("SPARK_JAVA_OPTS"));
    +
    +    if (isClientMode) {
    +      // Figuring out where the memory value come from is a little tricky 
due to precedence.
    +      // Precedence is observed in the following order:
    +      // - explicit configuration (setConf()), which also covers 
--driver-memory cli argument.
    +      // - properties file.
    +      // - SPARK_DRIVER_MEMORY env variable
    +      // - SPARK_MEM env variable
    +      // - default value (512m)
    +      String memory = firstNonEmpty(find(SparkLauncher.DRIVER_MEMORY, 
conf, props),
    +        System.getenv("SPARK_DRIVER_MEMORY"), System.getenv("SPARK_MEM"), 
DEFAULT_MEM);
    +      cmd.add("-Xms" + memory);
    +      cmd.add("-Xmx" + memory);
    +      addOptionString(cmd, find(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS, 
conf, props));
    +      mergeEnvPathList(env, getLibPathEnvName(),
    +        find(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH, conf, props));
    +    }
    +
    +    addPermGenSizeOpt(cmd);
    +    cmd.add("org.apache.spark.deploy.SparkSubmit");
    +    cmd.addAll(buildSparkSubmitArgs());
    +    return cmd;
    +  }
    +
    +  private List<String> buildPySparkShellCommand(Map<String, String> env) 
throws IOException {
    --- End diff --
    
    I'm not saying that we should document the particular option there. What I 
meant was to add a comment that is more general and conveys on a high level 
what the method does without referring to specific environment variables or 
configs at all. For someone who is not already familiar with how pyspark shell 
works he won't know whether the command launches another JVM or calls python 
directly or how application arguments are passed etc.
    
    By the way if you feel strongly against adding it then we can leave it. I 
just don't see a reason to not make the code easier to follow when we can just 
do so by adding 1 or 2 lines.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to