dongjoon-hyun commented on a change in pull request #30044:
URL: https://github.com/apache/spark/pull/30044#discussion_r505142429



##########
File path: core/src/main/scala/org/apache/spark/TestUtils.scala
##########
@@ -255,6 +255,19 @@ private[spark] object TestUtils {
     attempt.isSuccess && attempt.get == 0
   }
 
+  def isPythonVersionAtLeast38(): Boolean = {

Review comment:
       ```scala
     private def prepare(): Unit = {
       val tempPyFile = File.createTempFile("test", ".py")
       // scalastyle:off line.size.limit
       Files.write(tempPyFile.toPath,
         s"""
           |from pyspark.sql import SparkSession
           |import os
           |
           |spark = SparkSession.builder.enableHiveSupport().getOrCreate()
           |version_index = spark.conf.get("spark.sql.test.version.index", None)
           |
           |spark.sql("create table data_source_tbl_{} using json as select 1 
i".format(version_index))
           |
           |spark.sql("create table hive_compatible_data_source_tbl_{} using 
parquet as select 1 i".format(version_index))
           |
           |json_file = "${genDataDir("json_")}" + str(version_index)
           |spark.range(1, 2).selectExpr("cast(id as int) as 
i").write.json(json_file)
           |spark.sql("create table external_data_source_tbl_{}(i int) using 
json options (path '{}')".format(version_index, json_file))
           |
           |parquet_file = "${genDataDir("parquet_")}" + str(version_index)
           |spark.range(1, 2).selectExpr("cast(id as int) as 
i").write.parquet(parquet_file)
           |spark.sql("create table 
hive_compatible_external_data_source_tbl_{}(i int) using parquet options (path 
'{}')".format(version_index, parquet_file))
           |
           |json_file2 = "${genDataDir("json2_")}" + str(version_index)
           |spark.range(1, 2).selectExpr("cast(id as int) as 
i").write.json(json_file2)
           |spark.sql("create table external_table_without_schema_{} using json 
options (path '{}')".format(version_index, json_file2))
           |
           |parquet_file2 = "${genDataDir("parquet2_")}" + str(version_index)
           |spark.range(1, 3).selectExpr("1 as i", "cast(id as int) as p", "1 
as j").write.parquet(os.path.join(parquet_file2, "p=1"))
           |spark.sql("create table tbl_with_col_overlap_{} using parquet 
options(path '{}')".format(version_index, parquet_file2))
           |
           |spark.sql("create view v_{} as select 1 i".format(version_index))
         """.stripMargin.getBytes("utf8"))
       // scalastyle:on line.size.limit
   
   ```




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to