alexeykudinkin commented on code in PR #7370:
URL: https://github.com/apache/hudi/pull/7370#discussion_r1042778369
##########
hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/CreateHoodieTableAsSelectCommand.scala:
##########
@@ -72,47 +71,44 @@ case class CreateHoodieTableAsSelectCommand(
}
}
- // ReOrder the query which move the partition columns to the last of the
project list
- val reOrderedQuery = reOrderPartitionColumn(query,
table.partitionColumnNames)
// Remove some properties should not be used
- val newStorage = new CatalogStorageFormat(
- table.storage.locationUri,
- table.storage.inputFormat,
- table.storage.outputFormat,
- table.storage.serde,
- table.storage.compressed,
- table.storage.properties.--(needFilterProps))
- val newTable = table.copy(
- identifier = tableIdentWithDB,
- storage = newStorage,
- schema = reOrderedQuery.schema,
- properties = table.properties.--(needFilterProps)
+ val updatedStorageFormat = table.storage.copy(
Review Comment:
Simplifying existing code
##########
hudi-client/hudi-spark-client/src/main/scala/org/apache/hudi/HoodieDatasetBulkInsertHelper.scala:
##########
@@ -92,11 +69,44 @@ object HoodieDatasetBulkInsertHelper extends Logging {
val updatedSchema = StructType(metaFields ++ schema.fields)
- val updatedDF = if (populateMetaFields &&
config.shouldCombineBeforeInsert) {
- val dedupedRdd = dedupeRows(prependedRdd, updatedSchema,
config.getPreCombineField, SparkHoodieIndexFactory.isGlobalIndex(config))
+ val updatedDF = if (populateMetaFields) {
Review Comment:
This code doesn't change -- simply moved around to avoid dereferencing
Dataset into RDD when meta-fields are disabled (we can add them as simple
Projection in that case)
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]