xiarixiaoyao commented on a change in pull request #4910:
URL: https://github.com/apache/hudi/pull/4910#discussion_r840636504
##########
File path:
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieSparkSqlWriter.scala
##########
@@ -231,8 +237,14 @@ object HoodieSparkSqlWriter {
Array(classOf[org.apache.avro.generic.GenericData],
classOf[org.apache.avro.Schema]))
var schema =
AvroConversionUtils.convertStructTypeToAvroSchema(df.schema, structName,
nameSpace)
+ val lastestSchema = getLatestTableSchema(fs, basePath,
sparkContext, schema)
if (reconcileSchema) {
- schema = getLatestTableSchema(fs, basePath, sparkContext, schema)
+ schema = lastestSchema
+ }
+ schema = {
Review comment:
it workd for all case in hudi 0.9
for safe, let me add some logcial to fallback original logical
fixed. as follow code.
```
if (internalSchemaOpt.isDefined) {
schema = {
val newSparkSchema =
AvroConversionUtils.convertAvroSchemaToStructType(AvroSchemaEvolutionUtils.canonicalizeColumnNullability(schema,
lastestSchema))
AvroConversionUtils.convertStructTypeToAvroSchema(newSparkSchema, structName,
nameSpace)
}
}
```
##########
File path:
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/BaseFileOnlyRelation.scala
##########
@@ -74,7 +75,7 @@ class BaseFileOnlyRelation(sqlContext: SQLContext,
options = optParams,
// NOTE: We have to fork the Hadoop Config here as Spark will be
modifying it
// to configure Parquet reader appropriately
- hadoopConf = new Configuration(conf)
+ hadoopConf =
HoodieDataSourceHelper.getConfigurationForInternalSchema(new
Configuration(conf), requiredSchema.internalSchema, metaClient.getBasePath,
validCommits)
Review comment:
fixed
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]