Github user andrewor14 commented on a diff in the pull request:

    https://github.com/apache/spark/pull/13637#discussion_r66884061
  
    --- Diff: sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala ---
    @@ -736,6 +736,290 @@ class SQLContext private[sql](val sparkSession: 
SparkSession)
       private[sql] def parseDataType(dataTypeString: String): DataType = {
         DataType.fromJson(dataTypeString)
       }
    +
    +  
////////////////////////////////////////////////////////////////////////////
    +  
////////////////////////////////////////////////////////////////////////////
    +  // Deprecated methods
    +  
////////////////////////////////////////////////////////////////////////////
    +  
////////////////////////////////////////////////////////////////////////////
    +
    +  /**
    +   * @deprecated As of 1.3.0, replaced by `createDataFrame()`. This will 
be removed in Spark 2.0.
    +   */
    +  @deprecated("Use createDataFrame instead.", "1.3.0")
    +  def applySchema(rowRDD: RDD[Row], schema: StructType): DataFrame = {
    +    createDataFrame(rowRDD, schema)
    +  }
    +
    +  /**
    +   * @deprecated As of 1.3.0, replaced by `createDataFrame()`. This will 
be removed in Spark 2.0.
    +   */
    +  @deprecated("Use createDataFrame instead.", "1.3.0")
    +  def applySchema(rowRDD: JavaRDD[Row], schema: StructType): DataFrame = {
    +    createDataFrame(rowRDD, schema)
    +  }
    +
    +  /**
    +   * @deprecated As of 1.3.0, replaced by `createDataFrame()`. This will 
be removed in Spark 2.0.
    +   */
    +  @deprecated("Use createDataFrame instead.", "1.3.0")
    +  def applySchema(rdd: RDD[_], beanClass: Class[_]): DataFrame = {
    +    createDataFrame(rdd, beanClass)
    +  }
    +
    +  /**
    +   * @deprecated As of 1.3.0, replaced by `createDataFrame()`. This will 
be removed in Spark 2.0.
    +   */
    +  @deprecated("Use createDataFrame instead.", "1.3.0")
    +  def applySchema(rdd: JavaRDD[_], beanClass: Class[_]): DataFrame = {
    +    createDataFrame(rdd, beanClass)
    +  }
    +
    +  /**
    +   * Loads a Parquet file, returning the result as a [[DataFrame]]. This 
function returns an empty
    +   * [[DataFrame]] if no paths are passed in.
    +   *
    +   * @group specificdata
    +   * @deprecated As of 1.4.0, replaced by `read().parquet()`. This will be 
removed in Spark 2.0.
    +   */
    +  @deprecated("Use read.parquet() instead.", "1.4.0")
    +  @scala.annotation.varargs
    +  def parquetFile(paths: String*): DataFrame = {
    +    if (paths.isEmpty) {
    +      emptyDataFrame
    +    } else {
    +      read.parquet(paths : _*)
    +    }
    +  }
    +
    +  /**
    +   * Loads a JSON file (one object per line), returning the result as a 
[[DataFrame]].
    +   * It goes through the entire dataset once to determine the schema.
    +   *
    +   * @group specificdata
    +   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be 
removed in Spark 2.0.
    +   */
    +  @deprecated("Use read.json() instead.", "1.4.0")
    +  def jsonFile(path: String): DataFrame = {
    +    read.json(path)
    +  }
    +
    +  /**
    +   * Loads a JSON file (one object per line) and applies the given schema,
    +   * returning the result as a [[DataFrame]].
    +   *
    +   * @group specificdata
    +   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be 
removed in Spark 2.0.
    +   */
    +  @deprecated("Use read.json() instead.", "1.4.0")
    +  def jsonFile(path: String, schema: StructType): DataFrame = {
    +    read.schema(schema).json(path)
    +  }
    +
    +  /**
    +   * @group specificdata
    +   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be 
removed in Spark 2.0.
    +   */
    +  @deprecated("Use read.json() instead.", "1.4.0")
    +  def jsonFile(path: String, samplingRatio: Double): DataFrame = {
    +    read.option("samplingRatio", samplingRatio.toString).json(path)
    +  }
    +
    +  /**
    +   * Loads an RDD[String] storing JSON objects (one object per record), 
returning the result as a
    +   * [[DataFrame]].
    +   * It goes through the entire dataset once to determine the schema.
    +   *
    +   * @group specificdata
    +   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be 
removed in Spark 2.0.
    +   */
    +  @deprecated("Use read.json() instead.", "1.4.0")
    +  def jsonRDD(json: RDD[String]): DataFrame = read.json(json)
    +
    +  /**
    +   * Loads an RDD[String] storing JSON objects (one object per record), 
returning the result as a
    +   * [[DataFrame]].
    +   * It goes through the entire dataset once to determine the schema.
    +   *
    +   * @group specificdata
    +   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be 
removed in Spark 2.0.
    +   */
    +  @deprecated("Use read.json() instead.", "1.4.0")
    +  def jsonRDD(json: JavaRDD[String]): DataFrame = read.json(json)
    +
    +  /**
    +   * Loads an RDD[String] storing JSON objects (one object per record) and 
applies the given schema,
    +   * returning the result as a [[DataFrame]].
    +   *
    +   * @group specificdata
    +   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be 
removed in Spark 2.0.
    +   */
    +  @deprecated("Use read.json() instead.", "1.4.0")
    +  def jsonRDD(json: RDD[String], schema: StructType): DataFrame = {
    +    read.schema(schema).json(json)
    +  }
    +
    +  /**
    +   * Loads an JavaRDD<String> storing JSON objects (one object per record) 
and applies the given
    +   * schema, returning the result as a [[DataFrame]].
    +   *
    +   * @group specificdata
    +   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be 
removed in Spark 2.0.
    +   */
    +  @deprecated("Use read.json() instead.", "1.4.0")
    +  def jsonRDD(json: JavaRDD[String], schema: StructType): DataFrame = {
    +    read.schema(schema).json(json)
    +  }
    +
    +  /**
    +   * Loads an RDD[String] storing JSON objects (one object per record) 
inferring the
    +   * schema, returning the result as a [[DataFrame]].
    +   *
    +   * @group specificdata
    +   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be 
removed in Spark 2.0.
    +   */
    +  @deprecated("Use read.json() instead.", "1.4.0")
    +  def jsonRDD(json: RDD[String], samplingRatio: Double): DataFrame = {
    +    read.option("samplingRatio", samplingRatio.toString).json(json)
    +  }
    +
    +  /**
    +   * Loads a JavaRDD[String] storing JSON objects (one object per record) 
inferring the
    +   * schema, returning the result as a [[DataFrame]].
    +   *
    +   * @group specificdata
    +   * @deprecated As of 1.4.0, replaced by `read().json()`. This will be 
removed in Spark 2.0.
    +   */
    +  @deprecated("Use read.json() instead.", "1.4.0")
    +  def jsonRDD(json: JavaRDD[String], samplingRatio: Double): DataFrame = {
    +    read.option("samplingRatio", samplingRatio.toString).json(json)
    +  }
    +
    +  /**
    +   * Returns the dataset stored at path as a DataFrame,
    +   * using the default data source configured by spark.sql.sources.default.
    +   *
    +   * @group genericdata
    +   * @deprecated As of 1.4.0, replaced by `read().load(path)`. This will 
be removed in Spark 2.0.
    +   */
    +  @deprecated("Use read.load(path) instead.", "1.4.0")
    +  def load(path: String): DataFrame = {
    +    read.load(path)
    +  }
    +
    +  /**
    +   * Returns the dataset stored at path as a DataFrame, using the given 
data source.
    +   *
    +   * @group genericdata
    +   * @deprecated As of 1.4.0, replaced by 
`read().format(source).load(path)`.
    +   *             This will be removed in Spark 2.0.
    +   */
    +  @deprecated("Use read.format(source).load(path) instead.", "1.4.0")
    +  def load(path: String, source: String): DataFrame = {
    +    read.format(source).load(path)
    +  }
    +
    +  /**
    +   * (Java-specific) Returns the dataset specified by the given data 
source and
    +   * a set of options as a DataFrame.
    +   *
    +   * @group genericdata
    +   * @deprecated As of 1.4.0, replaced by 
`read().format(source).options(options).load()`.
    +   *             This will be removed in Spark 2.0.
    +   */
    +  @deprecated("Use read.format(source).options(options).load() instead.", 
"1.4.0")
    +  def load(source: String, options: java.util.Map[String, String]): 
DataFrame = {
    +    read.options(options).format(source).load()
    +  }
    +
    +  /**
    +   * (Scala-specific) Returns the dataset specified by the given data 
source and
    +   * a set of options as a DataFrame.
    +   *
    +   * @group genericdata
    +   * @deprecated As of 1.4.0, replaced by 
`read().format(source).options(options).load()`.
    +   */
    +  @deprecated("Use read.format(source).options(options).load() instead.", 
"1.4.0")
    +  def load(source: String, options: Map[String, String]): DataFrame = {
    +    read.options(options).format(source).load()
    +  }
    +
    +  /**
    +   * (Java-specific) Returns the dataset specified by the given data 
source and
    +   * a set of options as a DataFrame, using the given schema as the schema 
of the DataFrame.
    +   *
    +   * @group genericdata
    +   * @deprecated As of 1.4.0, replaced by
    +   *            
`read().format(source).schema(schema).options(options).load()`.
    +   */
    +  @deprecated("Use 
read.format(source).schema(schema).options(options).load() instead.", "1.4.0")
    +  def load(source: String, schema: StructType, options: 
java.util.Map[String, String]): DataFrame =
    --- End diff --
    
    nit the right style is:
    ```
    def load(
        sources: String,
        schema: StructType,
        options: ...): DataFrame = {
      read.format...
    }
    ```


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to