danny0405 commented on code in PR #8107:
URL: https://github.com/apache/hudi/pull/8107#discussion_r1185818843


##########
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieSparkSqlWriter.scala:
##########
@@ -1120,47 +1126,69 @@ object HoodieSparkSqlWriter {
           Some(writerSchema))
 
         avroRecords.mapPartitions(it => {
+          val sparkPartitionId = TaskContext.getPartitionId()
+          val keyGenProps = new TypedProperties(config.getProps)
+          if (autoGenerateRecordKeys) {
+            
keyGenProps.setProperty(KeyGenUtils.RECORD_KEY_GEN_PARTITION_ID_CONFIG, 
String.valueOf(sparkPartitionId))
+            
keyGenProps.setProperty(KeyGenUtils.RECORD_KEY_GEN_INSTANT_TIME_CONFIG, 
instantTime)
+          }
+          val keyGenerator = 
HoodieSparkKeyGeneratorFactory.createKeyGenerator(keyGenProps)
+            .asInstanceOf[BaseKeyGenerator]
+
           val dataFileSchema = new Schema.Parser().parse(dataFileSchemaStr)
           val consistentLogicalTimestampEnabled = parameters.getOrElse(
             
DataSourceWriteOptions.KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED.key(),
             
DataSourceWriteOptions.KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED.defaultValue()).toBoolean
 
-          it.map { avroRecord =>
+          // handle dropping partition columns
+          it.map { avroRec =>
             val processedRecord = if (shouldDropPartitionColumns) {
-              HoodieAvroUtils.rewriteRecord(avroRecord, dataFileSchema)
+              HoodieAvroUtils.rewriteRecord(avroRec, dataFileSchema)
             } else {
-              avroRecord
+              avroRec
             }
+
+            val hoodieKey = new HoodieKey(keyGenerator.getRecordKey(avroRec), 
keyGenerator.getPartitionPath(avroRec))
             val hoodieRecord = if (shouldCombine) {
-              val orderingVal = HoodieAvroUtils.getNestedFieldVal(avroRecord, 
config.getString(PRECOMBINE_FIELD),
+              val orderingVal = HoodieAvroUtils.getNestedFieldVal(avroRec, 
config.getString(PRECOMBINE_FIELD),
                 false, 
consistentLogicalTimestampEnabled).asInstanceOf[Comparable[_]]
-              DataSourceUtils.createHoodieRecord(processedRecord, orderingVal, 
keyGenerator.getKey(avroRecord),
+              DataSourceUtils.createHoodieRecord(processedRecord, orderingVal, 
hoodieKey,
                 config.getString(PAYLOAD_CLASS_NAME))
             } else {
-              DataSourceUtils.createHoodieRecord(processedRecord, 
keyGenerator.getKey(avroRecord),
+              DataSourceUtils.createHoodieRecord(processedRecord, hoodieKey,
                 config.getString(PAYLOAD_CLASS_NAME))
             }
             hoodieRecord
           }
         }).toJavaRDD()
 
       case HoodieRecord.HoodieRecordType.SPARK =>
-        val sparkKeyGenerator = 
keyGenerator.asInstanceOf[SparkKeyGeneratorInterface]
         val dataFileSchema = new Schema.Parser().parse(dataFileSchemaStr)
         val dataFileStructType = 
HoodieInternalRowUtils.getCachedSchema(dataFileSchema)
         val writerStructType = 
HoodieInternalRowUtils.getCachedSchema(writerSchema)
         val sourceStructType = df.schema
 
         df.queryExecution.toRdd.mapPartitions { it =>
+          val sparkPartitionId = TaskContext.getPartitionId()
+          val keyGenProps = new TypedProperties(config.getProps)
+          if (autoGenerateRecordKeys) {
+            
keyGenProps.setProperty(KeyGenUtils.RECORD_KEY_GEN_PARTITION_ID_CONFIG, 
String.valueOf(sparkPartitionId))
+            
keyGenProps.setProperty(KeyGenUtils.RECORD_KEY_GEN_INSTANT_TIME_CONFIG, 
instantTime)
+          }
+          val sparkKeyGenerator = 
HoodieSparkKeyGeneratorFactory.createKeyGenerator(keyGenProps).asInstanceOf[SparkKeyGeneratorInterface]
           val targetStructType = if (shouldDropPartitionColumns) 
dataFileStructType else writerStructType
           // NOTE: To make sure we properly transform records
           val targetStructTypeRowWriter = 
getCachedUnsafeRowWriter(sourceStructType, targetStructType)
 
-          it.map { sourceRow =>
-            val recordKey = sparkKeyGenerator.getRecordKey(sourceRow, 
sourceStructType)
+          val recordRecordKeyPairItr = it.map(avroRecord => {
+            (avroRecord, sparkKeyGenerator.getRecordKey(avroRecord, 
sourceStructType))
+          })
+

Review Comment:
   Guess this is not necessary change?



##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/keygen/AutoRecordGenWrapperKeyGenerator.java:
##########
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hudi.keygen;
+
+import org.apache.hudi.common.config.TypedProperties;
+import org.apache.hudi.common.model.HoodieRecord;
+
+import org.apache.avro.generic.GenericRecord;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.catalyst.InternalRow;
+import org.apache.spark.sql.types.StructType;
+import org.apache.spark.unsafe.types.UTF8String;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * A wrapper key generator to intercept getRecordKey calls for auto record key 
generator.
+ * <ol>
+ *   <li>Generated keys will be unique not only w/in provided 
[[org.apache.spark.sql.DataFrame]], but
+ *   globally unique w/in the target table</li>
+ *   <li>Generated keys have minimal overhead (to compute, persist and 
read)</li>
+ * </ol>
+ *
+ * Keys adhere to the following format:
+ *
+ * [instantTime]_[PartitionId]_[RowId]
+ *
+ * where
+ * instantTime refers to the commit time of the batch being ingested.
+ * PartitionId refers to spark's partition Id.
+ * RowId refers to the row index within the spark partition.
+ */
+public class AutoRecordGenWrapperKeyGenerator extends BuiltinKeyGenerator {
+
+  private final BuiltinKeyGenerator builtinKeyGenerator;
+  private final AtomicBoolean initializeAutoKeyGenProps = new 
AtomicBoolean(false);
+  private int partitionId;
+  private String instantTime;
+  private int rowId;
+
+  public AutoRecordGenWrapperKeyGenerator(TypedProperties config, 
BuiltinKeyGenerator builtinKeyGenerator) {
+    super(config);
+    this.builtinKeyGenerator = builtinKeyGenerator;
+  }
+
+  @Override
+  public String getRecordKey(GenericRecord record) {
+    initializeAutoKeyGenProps();
+    return HoodieRecord.generateSequenceId(instantTime, partitionId, rowId++);
+  }
+
+  private void initializeAutoKeyGenProps() {
+    if (!initializeAutoKeyGenProps.getAndSet(true)) {
+      this.rowId = 0;
+      this.partitionId = 
config.getInteger(KeyGenUtils.RECORD_KEY_GEN_PARTITION_ID_CONFIG);

Review Comment:
   Can we move it to the constructor?



##########
hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/DeltaSync.java:
##########
@@ -609,28 +616,49 @@ private Pair<SchemaProvider, Pair<String, 
JavaRDD<HoodieRecord>>> fetchFromSourc
     SerializableSchema avroSchema = new 
SerializableSchema(schemaProvider.getTargetSchema());
     SerializableSchema processedAvroSchema = new 
SerializableSchema(isDropPartitionColumns() ? 
HoodieAvroUtils.removeMetadataFields(avroSchema.get()) : avroSchema.get());
     if (recordType == HoodieRecordType.AVRO) {
-      records = avroRDD.map(record -> {
-        GenericRecord gr = isDropPartitionColumns() ? 
HoodieAvroUtils.removeFields(record, partitionColumns) : record;
+      JavaRDD<Tuple2<GenericRecord, HoodieKey>> recordHoodieKeyPairRdd = 
avroRDD.mapPartitions(
+          (FlatMapFunction<Iterator<GenericRecord>, Tuple2<GenericRecord, 
HoodieKey>>) genericRecordIterator -> {
+            if (autoGenerateRecordKeys) {
+              
props.setProperty(KeyGenUtils.RECORD_KEY_GEN_PARTITION_ID_CONFIG, 
String.valueOf(TaskContext.getPartitionId()));
+              
props.setProperty(KeyGenUtils.RECORD_KEY_GEN_INSTANT_TIME_CONFIG, instantTime);
+            }
+            BuiltinKeyGenerator builtinKeyGenerator = (BuiltinKeyGenerator) 
HoodieSparkKeyGeneratorFactory.createKeyGenerator(props);
+            List<Tuple2<GenericRecord, HoodieKey>> recordWithRecordKeyOverride 
= new ArrayList<>();
+            while (genericRecordIterator.hasNext()) {
+              GenericRecord genRec = genericRecordIterator.next();
+              recordWithRecordKeyOverride.add(new Tuple2(genRec, new 
HoodieKey(builtinKeyGenerator.getRecordKey(genRec),
+                  builtinKeyGenerator.getPartitionPath(genRec))));
+            }
+            return recordWithRecordKeyOverride.iterator();
+          });
+
+      records = recordHoodieKeyPairRdd.map(recordHoodieKeyPair -> {

Review Comment:
   Still didn't quite get why we must get a (record, key) pair iterator first?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to