satishkotha commented on a change in pull request #2275:
URL: https://github.com/apache/hudi/pull/2275#discussion_r548903860



##########
File path: 
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieClusteringConfig.java
##########
@@ -73,9 +73,17 @@
   public static final String CLUSTERING_TARGET_FILE_MAX_BYTES = 
CLUSTERING_STRATEGY_PARAM_PREFIX + "target.file.max.bytes";
   public static final String DEFAULT_CLUSTERING_TARGET_FILE_MAX_BYTES = 
String.valueOf(1 * 1024 * 1024 * 1024L); // 1GB
   
-  // constants related to clustering that may be used by more than 1 strategy.
+  // Constants related to clustering that may be used by more than 1 strategy.
   public static final String CLUSTERING_SORT_COLUMNS_PROPERTY = 
HoodieClusteringConfig.CLUSTERING_STRATEGY_PARAM_PREFIX + "sort.columns";
 
+  // When file groups is in clustering, need to handle the update to these 
file groups. Default strategy just reject the update
+  public static final String CLUSTERING_UPDATES_STRATEGY_PROP = 
"hoodie.clustering.updates.strategy";
+  public static final String DEFAULT_CLUSTERING_UPDATES_STRATEGY = 
"org.apache.hudi.client.clustering.update.strategy.SparkRejectUpdateStrategy";
+
+  // Async clustering
+  public static final String ASYNC_CLUSTERING_ENABLE_OPT_KEY = 
"hoodie.clustering.async";

Review comment:
       Change property to hoodie.clustering.async.enabled?

##########
File path: 
hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/TestHoodieClientOnCopyOnWriteStorage.java
##########
@@ -110,6 +120,13 @@
 public class TestHoodieClientOnCopyOnWriteStorage extends HoodieClientTestBase 
{
 
   private static final Logger LOG = 
LogManager.getLogger(TestHoodieClientOnCopyOnWriteStorage.class);
+  private static final String CLUSTERING_STRATEGY_CLASS = 
"org.apache.hudi.DefaultClusteringStrategy";

Review comment:
       the class 'org.apache.hudi.DefaultClusteringStrategy' doesnt seem to 
exist. could you explain what this is doing? can we use default from clustering 
config?

##########
File path: 
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/update/strategy/SparkRejectUpdateStrategy.java
##########
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.client.clustering.update.strategy;
+
+import org.apache.hudi.client.common.HoodieSparkEngineContext;
+import org.apache.hudi.common.model.HoodieFileGroupId;
+import org.apache.hudi.common.model.HoodieRecord;
+import org.apache.hudi.common.model.HoodieRecordPayload;
+import org.apache.hudi.exception.HoodieClusteringUpdateException;
+import org.apache.hudi.table.action.cluster.strategy.UpdateStrategy;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.spark.api.java.JavaRDD;
+import scala.Tuple2;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * Update strategy based on following.
+ * if some file group have update record, just throw exception now
+ */
+public class SparkRejectUpdateStrategy<T extends HoodieRecordPayload<T>> 
extends UpdateStrategy<T, JavaRDD<HoodieRecord<T>>> {
+  private static final Logger LOG = 
LogManager.getLogger(SparkRejectUpdateStrategy.class);
+
+  public SparkRejectUpdateStrategy(HoodieSparkEngineContext engineContext, 
HashSet<HoodieFileGroupId> fileGroupsInPendingClustering) {
+    super(engineContext, fileGroupsInPendingClustering);
+  }
+
+  private List<Tuple2<String, String>> 
getPartitionPathToFileGroupsIdWithUpdates(JavaRDD<HoodieRecord<T>> 
inputRecords) {
+    List<Tuple2<String, String>> fileGroupsIdWithUpdates = inputRecords
+        .filter(record -> record.getCurrentLocation() != null)
+        .map(record -> new Tuple2<>(record.getPartitionPath(), 
record.getCurrentLocation().getFileId())).distinct().collect();
+    return fileGroupsIdWithUpdates;
+  }
+
+  @Override
+  public JavaRDD<HoodieRecord<T>> handleRecordUpdate(JavaRDD<HoodieRecord<T>> 
taggedRecordsRDD) {
+    Set<String> pendingClusteringFileGroupIds =

Review comment:
       What is the reason for collecting only fileId? HoodieFileGroupId will be 
more accurate because there may be two partitions with same fileId (Unlikely, 
but can happen). FileGroupId supports equals/hashcode, so it should not be a 
big change to do this on HoodieFileGroupId.

##########
File path: 
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
##########
@@ -394,6 +395,15 @@ public boolean isInlineClustering() {
     return 
Boolean.parseBoolean(props.getProperty(HoodieClusteringConfig.INLINE_CLUSTERING_PROP));
   }
 
+  public boolean isAsyncClustering() {
+    return 
Boolean.parseBoolean(props.getProperty(HoodieClusteringConfig.ASYNC_CLUSTERING_ENABLE_OPT_KEY));
+  }
+
+  public boolean isClusteringEnable() {

Review comment:
       nit: isClusteringEnabled?

##########
File path: 
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
##########
@@ -394,6 +395,15 @@ public boolean isInlineClustering() {
     return 
Boolean.parseBoolean(props.getProperty(HoodieClusteringConfig.INLINE_CLUSTERING_PROP));
   }
 
+  public boolean isAsyncClustering() {

Review comment:
       nit: isAsyncClusteringEnabled?

##########
File path: 
hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/TestHoodieClientOnCopyOnWriteStorage.java
##########
@@ -681,6 +698,75 @@ private void 
assertActualAndExpectedPartitionPathRecordKeyMatches(Set<Pair<Strin
     }
   }
 
+  private Pair<List<WriteStatus>, List<HoodieRecord>> 
insertBatchRecords(SparkRDDWriteClient client, String commitTime,
+                                                                         
Integer recordNum, int expectStatueSize) {
+    client.startCommitWithTime(commitTime);
+    List<HoodieRecord> inserts1 = dataGen.generateInserts(commitTime, 
recordNum);
+    JavaRDD<HoodieRecord> insertRecordsRDD1 = jsc.parallelize(inserts1, 1);
+    List<WriteStatus> statuses = client.upsert(insertRecordsRDD1, 
commitTime).collect();
+    assertNoWriteErrors(statuses);
+    assertEquals(expectStatueSize, statuses.size(), "check expect statue 
size.");
+    return Pair.of(statuses, inserts1);
+  }
+
+  @Test
+  public void testUpdateRejectForClustering() throws IOException {
+    final String testPartitionPath = "2016/09/26";
+    dataGen = new HoodieTestDataGenerator(new String[] {testPartitionPath});
+    Properties props = new Properties();
+    props.setProperty("hoodie.clustering.async", "true");
+    HoodieWriteConfig config = getSmallInsertWriteConfig(100,
+        TRIP_EXAMPLE_SCHEMA, dataGen.getEstimatedFileSizeInBytes(150), props);
+    SparkRDDWriteClient client = getHoodieWriteClient(config, false);
+    HoodieSparkCopyOnWriteTable table = (HoodieSparkCopyOnWriteTable) 
HoodieSparkTable.create(config, context, metaClient);
+
+    //1. insert to generate 2 file group
+    String commitTime1 = "001";
+    Pair<List<WriteStatus>, List<HoodieRecord>> upsertResult = 
insertBatchRecords(client, commitTime1, 600, 2);
+    List<WriteStatus> statuses = upsertResult.getKey();
+    List<HoodieRecord> inserts1 = upsertResult.getValue();
+    List<String> fileGroupIds1 = 
table.getFileSystemView().getAllFileGroups(testPartitionPath)
+        .map(fileGroup -> 
fileGroup.getFileGroupId().getFileId()).collect(Collectors.toList());
+    assertEquals(2, fileGroupIds1.size());
+
+    // 2. generate clustering plan for fileGroupIds1 file groups
+    String commitTime2 = "002";
+    List<List<FileSlice>> firstInsertFileSlicesList = 
table.getFileSystemView().getAllFileGroups(testPartitionPath)
+        .map(fileGroup -> 
fileGroup.getAllFileSlices().collect(Collectors.toList())).collect(Collectors.toList());
+    List<FileSlice>[] fileSlices = 
(List<FileSlice>[])firstInsertFileSlicesList.toArray(new 
List[firstInsertFileSlicesList.size()]);
+    createRequestedReplaceInstant(this.metaClient, commitTime2, fileSlices);
+
+    // 3. insert one record with no updating reject exception, and not merge 
the small file, just generate a new file group
+    String commitTime3 = "003";
+    statuses = insertBatchRecords(client, commitTime3, 1, 1).getKey();
+    List<String> fileGroupIds2 = 
table.getFileSystemView().getAllFileGroups(testPartitionPath)
+        .map(fileGroup -> 
fileGroup.getFileGroupId().getFileId()).collect(Collectors.toList());
+    assertEquals(3, fileGroupIds2.size());
+
+
+    // 4. update one record for the clustering two file groups, throw reject 
update exception
+    String commitTime4 = "004";
+    client.startCommitWithTime(commitTime4);
+    List<HoodieRecord> insertsAndUpdates3 = new ArrayList<>();
+    insertsAndUpdates3.addAll(dataGen.generateUpdates(commitTime4, inserts1));
+    assertNoWriteErrors(statuses);

Review comment:
       this looks out of place. move this to line 742?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to