codope commented on code in PR #10272:
URL: https://github.com/apache/hudi/pull/10272#discussion_r1419165609


##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java:
##########
@@ -1390,6 +1390,10 @@ public boolean shouldAllowMultiWriteOnSameInstant() {
     return getBoolean(ALLOW_MULTI_WRITE_ON_SAME_INSTANT_ENABLE);
   }
 
+  public boolean shouldDropPartitionColumns() {

Review Comment:
   why is this needed? Isn't there aleady a getter for this config?



##########
hudi-client/hudi-spark-client/src/main/scala/org/apache/hudi/HoodieDatasetBulkInsertHelper.scala:
##########
@@ -243,21 +238,17 @@ object HoodieDatasetBulkInsertHelper
     }
   }
 
-  private def dropPartitionColumns(df: DataFrame, config: HoodieWriteConfig): 
DataFrame = {
-    val partitionPathFields = getPartitionPathFields(config).toSet
-    val nestedPartitionPathFields = partitionPathFields.filter(f => 
f.contains('.'))
-    if (nestedPartitionPathFields.nonEmpty) {
-      logWarning(s"Can not drop nested partition path fields: 
$nestedPartitionPathFields")
-    }
-
-    val partitionPathCols = (partitionPathFields -- 
nestedPartitionPathFields).toSeq
-
-    df.drop(partitionPathCols: _*)
-  }
-
   private def getPartitionPathFields(config: HoodieWriteConfig): Seq[String] = 
{
     val keyGeneratorClassName = 
config.getString(HoodieWriteConfig.KEYGENERATOR_CLASS_NAME)
     val keyGenerator = ReflectionUtils.loadClass(keyGeneratorClassName, new 
TypedProperties(config.getProps)).asInstanceOf[BuiltinKeyGenerator]
     keyGenerator.getPartitionPathFields.asScala
   }
+
+   def getPartitionPathCols(config: HoodieWriteConfig): Seq[String] = {
+    val partitionPathFields = getPartitionPathFields(config).toSet
+    val nestedPartitionPathFields = partitionPathFields.filter(f => 
f.contains('.'))
+
+    return (partitionPathFields -- nestedPartitionPathFields).toSeq

Review Comment:
   should we not include `nestedPartitionPathFields`?



##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/BulkInsertDataInternalWriterHelper.java:
##########
@@ -124,7 +129,31 @@ public void write(InternalRow row) throws IOException {
         lastKnownPartitionPath = partitionPath.clone();
       }
 
-      handle.write(row);
+      boolean shouldDropPartitionColumns = 
writeConfig.shouldDropPartitionColumns();
+      if (shouldDropPartitionColumns) {
+        // Drop the partition columns from the row
+        List<String> partitionCols = 
JavaConverters.<String>seqAsJavaList(HoodieDatasetBulkInsertHelper.getPartitionPathCols(this.writeConfig));
+        Set<Integer> partitionIdx = new HashSet<Integer>();
+        for (String col : partitionCols) {
+          partitionIdx.add(this.structType.fieldIndex(col));
+        }
+
+        // TODO: Assumes that InternalRow::toSeq(...) preserves the column 
ordering based on
+        // the supplied schema

Review Comment:
   let's ensure that this is indeed the case



##########
hudi-client/hudi-spark-client/src/main/scala/org/apache/hudi/HoodieDatasetBulkInsertHelper.scala:
##########
@@ -186,7 +179,9 @@ object HoodieDatasetBulkInsertHelper
       }
 
       try {
-        iter.foreach(writer.write)
+        iter.foreach(row => {
+          writer.write(row)
+        })

Review Comment:
   can keep as before if not doing any manipulation on row



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to