vinothchandar commented on a change in pull request #1149:
URL: https://github.com/apache/hudi/pull/1149#discussion_r457859023



##########
File path: 
hudi-client/src/main/java/org/apache/hudi/execution/bulkinsert/RDDPartitionRangePartitioner.java
##########
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.execution.bulkinsert;
+
+import org.apache.hudi.common.model.HoodieRecord;
+import org.apache.hudi.common.model.HoodieRecordPayload;
+
+import org.apache.spark.RangePartitioner;
+import org.apache.spark.api.java.JavaPairRDD;
+import org.apache.spark.api.java.JavaRDD;
+
+import java.io.Serializable;
+import java.util.Comparator;
+
+import scala.Tuple2;
+import scala.math.Ordering;
+import scala.math.Ordering$;
+import scala.reflect.ClassTag;
+import scala.reflect.ClassTag$;
+
+public class RDDPartitionRangePartitioner<T extends HoodieRecordPayload>
+    extends BulkInsertInternalPartitioner<T> implements Serializable {
+  @Override
+  public JavaRDD<HoodieRecord<T>> repartitionRecords(JavaRDD<HoodieRecord<T>> 
records,
+      int outputSparkPartitions) {
+    JavaPairRDD<String, HoodieRecord<T>> pairRDD = records.mapToPair(record ->
+        new Tuple2(
+            new StringBuilder()
+                .append(record.getPartitionPath())
+                .append("+")
+                .append(record.getRecordKey())
+                .toString(), record));
+    Ordering<String> ordering = Ordering$.MODULE$

Review comment:
       can we write this without importing scala classes here? it will be 
problematic in terms of bundling..  

##########
File path: 
hudi-client/src/main/java/org/apache/hudi/execution/CopyOnWriteInsertHandler.java
##########
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.execution;
+
+import org.apache.hudi.client.SparkTaskContextSupplier;
+import org.apache.hudi.client.WriteStatus;
+import org.apache.hudi.common.model.HoodieRecord;
+import org.apache.hudi.common.model.HoodieRecordPayload;
+import org.apache.hudi.common.util.queue.BoundedInMemoryQueueConsumer;
+import org.apache.hudi.config.HoodieWriteConfig;
+import org.apache.hudi.execution.LazyInsertIterable.HoodieInsertValueGenResult;
+import org.apache.hudi.io.HoodieWriteHandle;
+import org.apache.hudi.io.WriteHandleFactory;
+import org.apache.hudi.table.HoodieTable;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Consumes stream of hoodie records from in-memory queue and writes to one or 
more create-handles.
+ */
+public class CopyOnWriteInsertHandler<T extends HoodieRecordPayload>
+    extends
+    BoundedInMemoryQueueConsumer<HoodieInsertValueGenResult<HoodieRecord>, 
List<WriteStatus>> {
+
+  protected HoodieWriteConfig config;
+  protected String instantTime;
+  protected HoodieTable<T> hoodieTable;
+  protected String idPrefix;
+  protected int numFilesWritten;
+  protected SparkTaskContextSupplier sparkTaskContextSupplier;
+  protected WriteHandleFactory<T> writeHandleFactory;
+
+  protected final List<WriteStatus> statuses = new ArrayList<>();
+  protected Map<String, HoodieWriteHandle> handles = new HashMap<>();
+
+  public CopyOnWriteInsertHandler(
+      HoodieWriteConfig config, String instantTime, HoodieTable<T> 
hoodieTable, String idPrefix,
+      SparkTaskContextSupplier sparkTaskContextSupplier, WriteHandleFactory<T> 
writeHandleFactory) {
+    this.config = config;
+    this.instantTime = instantTime;
+    this.hoodieTable = hoodieTable;
+    this.idPrefix = idPrefix;
+    this.numFilesWritten = 0;
+    this.sparkTaskContextSupplier = sparkTaskContextSupplier;
+    this.writeHandleFactory = writeHandleFactory;
+  }
+
+  @Override
+  public void consumeOneRecord(HoodieInsertValueGenResult<HoodieRecord> 
payload) {
+    final HoodieRecord insertPayload = payload.record;
+    String partitionPath = insertPayload.getPartitionPath();
+    HoodieWriteHandle handle = handles.get(partitionPath);
+    // lazily initialize the handle, for the first time
+    if (handle == null) {
+      handle = writeHandleFactory.create(
+          config, instantTime, hoodieTable, insertPayload.getPartitionPath(),
+          idPrefix, sparkTaskContextSupplier);
+      handles.put(partitionPath, handle);
+    }
+
+    if (handle.canWrite(payload.record)) {
+      // write the payload, if the handle has capacity
+      handle.write(insertPayload, payload.insertValue, payload.exception);
+    } else {
+      // handle is full.
+      statuses.add(handle.close());
+      // Need to handle the rejected payload & open new handle
+      handle = writeHandleFactory.create(
+          config, instantTime, hoodieTable, insertPayload.getPartitionPath(),
+          idPrefix, sparkTaskContextSupplier);
+      handles.put(partitionPath, handle);
+      handle.write(insertPayload, payload.insertValue,
+          payload.exception); // we should be able to write 1 payload.
+    }
+  }
+
+  @Override
+  public void finish() {
+    for (HoodieWriteHandle handle : handles.values()) {

Review comment:
       so this implies that `handle.close()` needs to be idempotent? iff we are 
closing the handle in L85 already, why close everything from the map again? 

##########
File path: 
hudi-client/src/main/java/org/apache/hudi/execution/bulkinsert/BulkInsertMapFunctionForNonSortedRecords.java
##########
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.execution.bulkinsert;
+
+import org.apache.hudi.client.WriteStatus;
+import org.apache.hudi.common.model.HoodieRecord;
+import org.apache.hudi.common.model.HoodieRecordPayload;
+import org.apache.hudi.config.HoodieWriteConfig;
+import org.apache.hudi.execution.CopyOnWriteInsertHandler;
+import org.apache.hudi.execution.LazyInsertIterable.HoodieInsertValueGenResult;
+import org.apache.hudi.io.CreateHandleFactory;
+import org.apache.hudi.table.HoodieTable;
+
+import org.apache.avro.Schema;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+public class BulkInsertMapFunctionForNonSortedRecords<T extends 
HoodieRecordPayload>
+    extends BulkInsertMapFunction<T> {
+
+  Map<String, CopyOnWriteInsertHandler> parallelWritersMap;

Review comment:
       make this private? 

##########
File path: 
hudi-client/src/main/java/org/apache/hudi/table/action/commit/BulkInsertHelper.java
##########
@@ -56,31 +59,50 @@
 
     final JavaRDD<HoodieRecord<T>> repartitionedRecords;
     final int parallelism = config.getBulkInsertShuffleParallelism();
+    boolean arePartitionRecordsSorted = true;
     if (bulkInsertPartitioner.isPresent()) {
-      repartitionedRecords = 
bulkInsertPartitioner.get().repartitionRecords(dedupedRecords, parallelism);
+      repartitionedRecords = bulkInsertPartitioner.get()
+          .repartitionRecords(dedupedRecords, parallelism);
+      arePartitionRecordsSorted = 
bulkInsertPartitioner.get().arePartitionRecordsSorted();
     } else {
-      // Now, sort the records and line them up nicely for loading.
-      repartitionedRecords = dedupedRecords.sortBy(record -> {
-        // Let's use "partitionPath + key" as the sort key. Spark, will ensure
-        // the records split evenly across RDD partitions, such that small 
partitions fit
-        // into 1 RDD partition, while big ones spread evenly across multiple 
RDD partitions
-        return String.format("%s+%s", record.getPartitionPath(), 
record.getRecordKey());
-      }, true, parallelism);
+      BulkInsertInternalPartitioner partitioner =
+          BulkInsertInternalPartitioner.get(config.getBulkInsertSortMode());
+      repartitionedRecords = partitioner.repartitionRecords(dedupedRecords, 
parallelism);
+      arePartitionRecordsSorted = partitioner.arePartitionRecordsSorted();

Review comment:
       this assignment can be done just once outside?  (intellij tips also 
indicated that IIRC)

##########
File path: 
hudi-client/src/main/java/org/apache/hudi/table/action/commit/BulkInsertHelper.java
##########
@@ -56,31 +59,50 @@
 
     final JavaRDD<HoodieRecord<T>> repartitionedRecords;
     final int parallelism = config.getBulkInsertShuffleParallelism();
+    boolean arePartitionRecordsSorted = true;
     if (bulkInsertPartitioner.isPresent()) {
-      repartitionedRecords = 
bulkInsertPartitioner.get().repartitionRecords(dedupedRecords, parallelism);
+      repartitionedRecords = bulkInsertPartitioner.get()

Review comment:
       just like here.. lets bring 64-65 lines to a single line? 

##########
File path: 
hudi-client/src/main/java/org/apache/hudi/table/UserDefinedBulkInsertPartitioner.java
##########
@@ -31,4 +31,6 @@
 public interface UserDefinedBulkInsertPartitioner<T extends 
HoodieRecordPayload> {
 
   JavaRDD<HoodieRecord<T>> repartitionRecords(JavaRDD<HoodieRecord<T>> 
records, int outputSparkPartitions);
+
+  boolean arePartitionRecordsSorted();

Review comment:
       javadoc please 

##########
File path: 
hudi-client/src/main/java/org/apache/hudi/execution/bulkinsert/RDDPartitionRangePartitioner.java
##########
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.execution.bulkinsert;
+
+import org.apache.hudi.common.model.HoodieRecord;
+import org.apache.hudi.common.model.HoodieRecordPayload;
+
+import org.apache.spark.RangePartitioner;
+import org.apache.spark.api.java.JavaPairRDD;
+import org.apache.spark.api.java.JavaRDD;
+
+import java.io.Serializable;
+import java.util.Comparator;
+
+import scala.Tuple2;
+import scala.math.Ordering;
+import scala.math.Ordering$;
+import scala.reflect.ClassTag;
+import scala.reflect.ClassTag$;
+
+public class RDDPartitionRangePartitioner<T extends HoodieRecordPayload>
+    extends BulkInsertInternalPartitioner<T> implements Serializable {
+  @Override
+  public JavaRDD<HoodieRecord<T>> repartitionRecords(JavaRDD<HoodieRecord<T>> 
records,
+      int outputSparkPartitions) {
+    JavaPairRDD<String, HoodieRecord<T>> pairRDD = records.mapToPair(record ->
+        new Tuple2(
+            new StringBuilder()
+                .append(record.getPartitionPath())
+                .append("+")
+                .append(record.getRecordKey())
+                .toString(), record));
+    Ordering<String> ordering = Ordering$.MODULE$
+        .comparatorToOrdering(Comparator.<String>naturalOrder());
+    ClassTag<String> classTag = ClassTag$.MODULE$.apply(String.class);
+    return pairRDD.partitionBy(new RangePartitioner<String, HoodieRecord<T>>(

Review comment:
       is nt this exactly what `sortBy` will do? are we somehow picking a 
better range? I'd like to skip this implementation otherwise.

##########
File path: 
hudi-client/src/main/java/org/apache/hudi/execution/bulkinsert/RDDPartitionRangePartitioner.java
##########
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.execution.bulkinsert;
+
+import org.apache.hudi.common.model.HoodieRecord;
+import org.apache.hudi.common.model.HoodieRecordPayload;
+
+import org.apache.spark.RangePartitioner;
+import org.apache.spark.api.java.JavaPairRDD;
+import org.apache.spark.api.java.JavaRDD;
+
+import java.io.Serializable;
+import java.util.Comparator;
+
+import scala.Tuple2;
+import scala.math.Ordering;
+import scala.math.Ordering$;
+import scala.reflect.ClassTag;
+import scala.reflect.ClassTag$;
+
+public class RDDPartitionRangePartitioner<T extends HoodieRecordPayload>
+    extends BulkInsertInternalPartitioner<T> implements Serializable {
+  @Override
+  public JavaRDD<HoodieRecord<T>> repartitionRecords(JavaRDD<HoodieRecord<T>> 
records,
+      int outputSparkPartitions) {
+    JavaPairRDD<String, HoodieRecord<T>> pairRDD = records.mapToPair(record ->
+        new Tuple2(
+            new StringBuilder()
+                .append(record.getPartitionPath())
+                .append("+")
+                .append(record.getRecordKey())
+                .toString(), record));
+    Ordering<String> ordering = Ordering$.MODULE$
+        .comparatorToOrdering(Comparator.<String>naturalOrder());
+    ClassTag<String> classTag = ClassTag$.MODULE$.apply(String.class);

Review comment:
       lets implement these using just java class objects ?

##########
File path: 
hudi-client/src/main/java/org/apache/hudi/execution/bulkinsert/RDDPartitionRangePartitioner.java
##########
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.execution.bulkinsert;
+
+import org.apache.hudi.common.model.HoodieRecord;
+import org.apache.hudi.common.model.HoodieRecordPayload;
+
+import org.apache.spark.RangePartitioner;
+import org.apache.spark.api.java.JavaPairRDD;
+import org.apache.spark.api.java.JavaRDD;
+
+import java.io.Serializable;
+import java.util.Comparator;
+
+import scala.Tuple2;
+import scala.math.Ordering;
+import scala.math.Ordering$;
+import scala.reflect.ClassTag;
+import scala.reflect.ClassTag$;
+
+public class RDDPartitionRangePartitioner<T extends HoodieRecordPayload>

Review comment:
       also please add javadocs for all these classes.. even if its a single 
line, it makes the code so much more readable in the long run

##########
File path: 
hudi-client/src/main/java/org/apache/hudi/table/action/commit/BulkInsertHelper.java
##########
@@ -56,31 +59,50 @@
 
     final JavaRDD<HoodieRecord<T>> repartitionedRecords;
     final int parallelism = config.getBulkInsertShuffleParallelism();
+    boolean arePartitionRecordsSorted = true;
     if (bulkInsertPartitioner.isPresent()) {
-      repartitionedRecords = 
bulkInsertPartitioner.get().repartitionRecords(dedupedRecords, parallelism);
+      repartitionedRecords = bulkInsertPartitioner.get()
+          .repartitionRecords(dedupedRecords, parallelism);
+      arePartitionRecordsSorted = 
bulkInsertPartitioner.get().arePartitionRecordsSorted();
     } else {
-      // Now, sort the records and line them up nicely for loading.
-      repartitionedRecords = dedupedRecords.sortBy(record -> {
-        // Let's use "partitionPath + key" as the sort key. Spark, will ensure
-        // the records split evenly across RDD partitions, such that small 
partitions fit
-        // into 1 RDD partition, while big ones spread evenly across multiple 
RDD partitions
-        return String.format("%s+%s", record.getPartitionPath(), 
record.getRecordKey());
-      }, true, parallelism);
+      BulkInsertInternalPartitioner partitioner =
+          BulkInsertInternalPartitioner.get(config.getBulkInsertSortMode());
+      repartitionedRecords = partitioner.repartitionRecords(dedupedRecords, 
parallelism);
+      arePartitionRecordsSorted = partitioner.arePartitionRecordsSorted();
     }
 
     // generate new file ID prefixes for each output partition
     final List<String> fileIDPrefixes =
-        IntStream.range(0, parallelism).mapToObj(i -> 
FSUtils.createNewFileIdPfx()).collect(Collectors.toList());
+        IntStream.range(0, parallelism).mapToObj(i -> 
FSUtils.createNewFileIdPfx())

Review comment:
       same thing here.  our large monitors to the rescue :) lets make 76-77 
into a single line if poss

##########
File path: 
hudi-client/src/main/java/org/apache/hudi/table/action/commit/BulkInsertHelper.java
##########
@@ -56,31 +59,50 @@
 
     final JavaRDD<HoodieRecord<T>> repartitionedRecords;
     final int parallelism = config.getBulkInsertShuffleParallelism();
+    boolean arePartitionRecordsSorted = true;
     if (bulkInsertPartitioner.isPresent()) {
-      repartitionedRecords = 
bulkInsertPartitioner.get().repartitionRecords(dedupedRecords, parallelism);
+      repartitionedRecords = bulkInsertPartitioner.get()
+          .repartitionRecords(dedupedRecords, parallelism);
+      arePartitionRecordsSorted = 
bulkInsertPartitioner.get().arePartitionRecordsSorted();
     } else {
-      // Now, sort the records and line them up nicely for loading.
-      repartitionedRecords = dedupedRecords.sortBy(record -> {
-        // Let's use "partitionPath + key" as the sort key. Spark, will ensure
-        // the records split evenly across RDD partitions, such that small 
partitions fit
-        // into 1 RDD partition, while big ones spread evenly across multiple 
RDD partitions
-        return String.format("%s+%s", record.getPartitionPath(), 
record.getRecordKey());
-      }, true, parallelism);
+      BulkInsertInternalPartitioner partitioner =
+          BulkInsertInternalPartitioner.get(config.getBulkInsertSortMode());
+      repartitionedRecords = partitioner.repartitionRecords(dedupedRecords, 
parallelism);
+      arePartitionRecordsSorted = partitioner.arePartitionRecordsSorted();
     }
 
     // generate new file ID prefixes for each output partition
     final List<String> fileIDPrefixes =
-        IntStream.range(0, parallelism).mapToObj(i -> 
FSUtils.createNewFileIdPfx()).collect(Collectors.toList());
+        IntStream.range(0, parallelism).mapToObj(i -> 
FSUtils.createNewFileIdPfx())
+            .collect(Collectors.toList());
 
     table.getActiveTimeline().transitionRequestedToInflight(new 
HoodieInstant(State.REQUESTED,
         table.getMetaClient().getCommitActionType(), instantTime), 
Option.empty(),
         config.shouldAllowMultiWriteOnSameInstant());
 
+    /*

Review comment:
       remove? 




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to