dongkelun commented on a change in pull request #3745:
URL: https://github.com/apache/hudi/pull/3745#discussion_r787275863
##########
File path:
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieSparkSqlWriter.scala
##########
@@ -364,52 +366,50 @@ object HoodieSparkSqlWriter {
schema = HoodieAvroUtils.getNullSchema.toString
}
-
- // Handle various save modes
if (mode == SaveMode.Ignore && tableExists) {
log.warn(s"hoodie table at $basePath already exists. Ignoring & not
performing actual writes.")
false
} else {
handleSaveModes(sqlContext.sparkSession, mode, basePath, tableConfig,
tableName, WriteOperationType.BOOTSTRAP, fs)
- }
- if (!tableExists) {
- val archiveLogFolder =
hoodieConfig.getStringOrDefault(HoodieTableConfig.ARCHIVELOG_FOLDER)
- val partitionColumns = HoodieWriterUtils.getPartitionColumns(parameters)
- val recordKeyFields =
hoodieConfig.getString(DataSourceWriteOptions.RECORDKEY_FIELD)
- val keyGenProp =
hoodieConfig.getString(HoodieTableConfig.KEY_GENERATOR_CLASS_NAME)
- val populateMetaFields =
parameters.getOrElse(HoodieTableConfig.POPULATE_META_FIELDS.key(),
HoodieTableConfig.POPULATE_META_FIELDS.defaultValue()).toBoolean
- val baseFileFormat =
hoodieConfig.getStringOrDefault(HoodieTableConfig.BASE_FILE_FORMAT)
-
- HoodieTableMetaClient.withPropertyBuilder()
- .setTableType(HoodieTableType.valueOf(tableType))
- .setTableName(tableName)
- .setRecordKeyFields(recordKeyFields)
- .setArchiveLogFolder(archiveLogFolder)
-
.setPayloadClassName(hoodieConfig.getStringOrDefault(PAYLOAD_CLASS_NAME))
- .setPreCombineField(hoodieConfig.getStringOrDefault(PRECOMBINE_FIELD,
null))
- .setBootstrapIndexClass(bootstrapIndexClass)
- .setBaseFileFormat(baseFileFormat)
- .setBootstrapBasePath(bootstrapBasePath)
- .setPartitionFields(partitionColumns)
- .setPopulateMetaFields(populateMetaFields)
- .setKeyGeneratorClassProp(keyGenProp)
-
.setHiveStylePartitioningEnable(hoodieConfig.getBoolean(HIVE_STYLE_PARTITIONING))
-
.setUrlEncodePartitioning(hoodieConfig.getBoolean(URL_ENCODE_PARTITIONING))
-
.setCommitTimezone(HoodieTimelineTimeZone.valueOf(hoodieConfig.getStringOrDefault(HoodieTableConfig.TIMELINE_TIMEZONE)))
- .initTable(sparkContext.hadoopConfiguration, path)
- }
+ if (!tableExists) {
+ val archiveLogFolder =
hoodieConfig.getStringOrDefault(HoodieTableConfig.ARCHIVELOG_FOLDER)
+ val partitionColumns =
HoodieWriterUtils.getPartitionColumns(parameters)
+ val recordKeyFields =
hoodieConfig.getString(DataSourceWriteOptions.RECORDKEY_FIELD)
+ val keyGenProp =
hoodieConfig.getString(HoodieTableConfig.KEY_GENERATOR_CLASS_NAME)
+ val populateMetaFields =
parameters.getOrElse(HoodieTableConfig.POPULATE_META_FIELDS.key(),
HoodieTableConfig.POPULATE_META_FIELDS.defaultValue()).toBoolean
+ val baseFileFormat =
hoodieConfig.getStringOrDefault(HoodieTableConfig.BASE_FILE_FORMAT)
- val jsc = new JavaSparkContext(sqlContext.sparkContext)
- val writeClient =
hoodieWriteClient.getOrElse(DataSourceUtils.createHoodieClient(jsc,
- schema, path, tableName, mapAsJavaMap(parameters)))
- try {
- writeClient.bootstrap(org.apache.hudi.common.util.Option.empty())
- } finally {
- writeClient.close()
+ HoodieTableMetaClient.withPropertyBuilder()
+ .setTableType(HoodieTableType.valueOf(tableType))
+ .setTableName(tableName)
+ .setRecordKeyFields(recordKeyFields)
+ .setArchiveLogFolder(archiveLogFolder)
+
.setPayloadClassName(hoodieConfig.getStringOrDefault(PAYLOAD_CLASS_NAME))
+
.setPreCombineField(hoodieConfig.getStringOrDefault(PRECOMBINE_FIELD, null))
+ .setBootstrapIndexClass(bootstrapIndexClass)
+ .setBaseFileFormat(baseFileFormat)
+ .setBootstrapBasePath(bootstrapBasePath)
+ .setPartitionFields(partitionColumns)
+ .setPopulateMetaFields(populateMetaFields)
+ .setKeyGeneratorClassProp(keyGenProp)
+
.setHiveStylePartitioningEnable(hoodieConfig.getBoolean(HIVE_STYLE_PARTITIONING))
+
.setUrlEncodePartitioning(hoodieConfig.getBoolean(URL_ENCODE_PARTITIONING))
+
.setCommitTimezone(HoodieTimelineTimeZone.valueOf(hoodieConfig.getStringOrDefault(HoodieTableConfig.TIMELINE_TIMEZONE)))
+ .initTable(sparkContext.hadoopConfiguration, path)
+ }
+
+ val jsc = new JavaSparkContext(sqlContext.sparkContext)
+ val writeClient =
hoodieWriteClient.getOrElse(DataSourceUtils.createHoodieClient(jsc,
+ schema, path, tableName, mapAsJavaMap(parameters)))
+ try {
+ writeClient.bootstrap(org.apache.hudi.common.util.Option.empty())
+ } finally {
+ writeClient.close()
+ }
+ val metaSyncSuccess = metaSync(sqlContext.sparkSession, hoodieConfig,
basePath, df.schema)
+ metaSyncSuccess
Review comment:
I don't think we need to sync hive since we don't actually write
data.Also, the same logic is used in `HoodieSparkSqlWriter.write`.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]