bvaradar commented on a change in pull request #4910:
URL: https://github.com/apache/hudi/pull/4910#discussion_r840599932
##########
File path:
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieSparkSqlWriter.scala
##########
@@ -231,8 +237,14 @@ object HoodieSparkSqlWriter {
Array(classOf[org.apache.avro.generic.GenericData],
classOf[org.apache.avro.Schema]))
var schema =
AvroConversionUtils.convertStructTypeToAvroSchema(df.schema, structName,
nameSpace)
+ val lastestSchema = getLatestTableSchema(fs, basePath,
sparkContext, schema)
if (reconcileSchema) {
- schema = getLatestTableSchema(fs, basePath, sparkContext, schema)
+ schema = lastestSchema
+ }
+ schema = {
Review comment:
@xiarixiaoyao : Do we need to do this for all cases ? Is it safe to do
this only cases where internalSchema is not empty ?
##########
File path:
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/BaseFileOnlyRelation.scala
##########
@@ -74,7 +75,7 @@ class BaseFileOnlyRelation(sqlContext: SQLContext,
options = optParams,
// NOTE: We have to fork the Hadoop Config here as Spark will be
modifying it
// to configure Parquet reader appropriately
- hadoopConf = new Configuration(conf)
+ hadoopConf =
HoodieDataSourceHelper.getConfigurationForInternalSchema(new
Configuration(conf), requiredSchema.internalSchema, metaClient.getBasePath,
validCommits)
Review comment:
nit: getConfigurationForInternalSchema to
getConfigurationWithInternalSchema
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]