YuweiXiao commented on a change in pull request #4480:
URL: https://github.com/apache/hudi/pull/4480#discussion_r816736220



##########
File path: 
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/index/bucket/HoodieSparkConsistentBucketIndex.java
##########
@@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.index.bucket;
+
+import org.apache.hudi.client.WriteStatus;
+import org.apache.hudi.common.data.HoodieData;
+import org.apache.hudi.common.engine.HoodieEngineContext;
+import org.apache.hudi.common.fs.FSUtils;
+import org.apache.hudi.common.fs.HoodieWrapperFileSystem;
+import org.apache.hudi.common.model.ConsistentHashingNode;
+import org.apache.hudi.common.model.HoodieConsistentHashingMetadata;
+import org.apache.hudi.common.model.HoodieKey;
+import org.apache.hudi.common.model.HoodieRecordLocation;
+import org.apache.hudi.common.table.timeline.HoodieTimeline;
+import org.apache.hudi.common.util.FileIOUtils;
+import org.apache.hudi.config.HoodieWriteConfig;
+import org.apache.hudi.exception.HoodieIOException;
+import org.apache.hudi.exception.HoodieIndexException;
+import org.apache.hudi.table.HoodieTable;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Predicate;
+
+/**
+ * Consistent hashing bucket index implementation, with auto-adjust bucket 
number.
+ * NOTE: bucket resizing is triggered by clustering.
+ */
+public class HoodieSparkConsistentBucketIndex extends HoodieBucketIndex {
+
+  private static final Logger LOG = 
LogManager.getLogger(HoodieSparkConsistentBucketIndex.class);
+
+  private Map<String, ConsistentBucketIdentifier> partitionToIdentifier;
+
+  public HoodieSparkConsistentBucketIndex(HoodieWriteConfig config) {
+    super(config);
+  }
+
+  @Override
+  public HoodieData<WriteStatus> updateLocation(HoodieData<WriteStatus> 
writeStatuses, HoodieEngineContext context, HoodieTable hoodieTable) throws 
HoodieIndexException {
+    return writeStatuses;
+  }
+
+  /**
+   * Do nothing.
+   * A failed write may create a hashing metadata for a partition. In this 
case, we still do nothing when rolling back
+   * the failed write. Because the hashing metadata created by a write must 
have 00000000000000 timestamp and can be viewed
+   * as the initialization of a partition rather than as a part of the failed 
write.
+   * @param instantTime
+   * @return
+   */
+  @Override
+  public boolean rollbackCommit(String instantTime) {
+    return true;
+  }
+
+  /**
+   * Initialize bucket metadata for each partition
+   * @param table
+   * @param partitions partitions that need to be initialized
+   */
+  @Override
+  protected void initialize(HoodieTable table, List<String> partitions) {
+    partitionToIdentifier = new HashMap(partitions.size() + partitions.size() 
/ 3);
+
+    // TODO maybe parallel
+    partitions.stream().forEach(p -> {
+      HoodieConsistentHashingMetadata metadata = loadOrCreateMetadata(table, 
p);
+      ConsistentBucketIdentifier identifier = new 
ConsistentBucketIdentifier(metadata);
+      partitionToIdentifier.put(p, identifier);
+    });
+  }
+
+  /**
+   * Get bucket location for given key and partition
+   *
+   * @param key
+   * @param partitionPath
+   * @return
+   */
+  @Override
+  protected HoodieRecordLocation getBucket(HoodieKey key, String 
partitionPath) {
+    ConsistentHashingNode node = 
partitionToIdentifier.get(partitionPath).getBucket(key, indexKeyFields);
+    if (node.getFileIdPfx() != null && !node.getFileIdPfx().isEmpty()) {
+      /**
+       * Dynamic Bucket Index doesn't need the instant time of the latest file 
group.
+       * We add suffix 0 here to the file uuid, following the naming 
convention.
+       */
+      return new HoodieRecordLocation(null, 
FSUtils.createNewFileId(node.getFileIdPfx(), 0));
+    }
+
+    LOG.error("Consistent hashing node has no file group, partition: " + 
partitionPath + ", meta: "
+        + partitionToIdentifier.get(partitionPath).getMetadata().getFilename() 
+ ", record_key: " + key.toString());
+    throw new HoodieIndexException("Failed to getBucket as hashing node has no 
file group");
+  }
+
+  /**
+   * Load hashing metadata of the given partition, if it is not existed, 
create a new one (also persist it into storage)
+   *
+   * @param table     hoodie table
+   * @param partition table partition
+   * @return Consistent hashing metadata
+   */
+  public HoodieConsistentHashingMetadata loadOrCreateMetadata(HoodieTable 
table, String partition) {
+    int retry = 3;
+    // TODO maybe use ConsistencyGuard to do the retry thing
+    // retry to allow concurrent creation of metadata (only one attempt can 
succeed)
+    while (retry-- > 0) {

Review comment:
       Dig into it and find out `HoodieWrapperFileSystem` already includes 
`ConsistencyGuard` to handle the consistency problem. Will remove the retry 
logic here.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to