pvary commented on code in PR #13277: URL: https://github.com/apache/iceberg/pull/13277#discussion_r2137629920
########## flink/v2.0/flink/src/main/java/org/apache/iceberg/flink/sink/dynamic/HashKeyGenerator.java: ########## @@ -0,0 +1,381 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.flink.sink.dynamic; + +import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE; + +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import java.util.Collections; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import org.apache.flink.annotation.VisibleForTesting; +import org.apache.flink.api.java.functions.KeySelector; +import org.apache.flink.runtime.state.KeyGroupRangeAssignment; +import org.apache.flink.table.data.RowData; +import org.apache.iceberg.DistributionMode; +import org.apache.iceberg.PartitionField; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; +import org.apache.iceberg.flink.FlinkSchemaUtil; +import org.apache.iceberg.flink.sink.EqualityFieldKeySelector; +import org.apache.iceberg.flink.sink.PartitionKeySelector; +import org.apache.iceberg.relocated.com.google.common.base.MoreObjects; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The HashKeyGenerator is responsible for creating the appropriate hash key for Flink's keyBy + * operation. The hash key is generated depending on the user-provided DynamicRecord and the table + * metadata. Under the hood, we maintain a set of Flink {@link KeySelector}s which implement the + * appropriate Iceberg {@link DistributionMode}. For every table, we randomly select a consistent + * subset of writer subtasks which receive data via their associated keys, depending on the chosen + * DistributionMode. + * + * <p>Caching ensures that a new key selector is also created when the table metadata (e.g. schema, + * spec) or the user-provided metadata changes (e.g. distribution mode, write parallelism). + * + * <p>Note: The hashing must be deterministic given the same parameters of the KeySelector and the + * same provided values. + */ +class HashKeyGenerator { + private static final Logger LOG = LoggerFactory.getLogger(HashKeyGenerator.class); + + private final int maxWriteParallelism; + private final Cache<SelectorKey, KeySelector<RowData, Integer>> keySelectorCache; + + HashKeyGenerator(int maxCacheSize, int maxWriteParallelism) { + this.maxWriteParallelism = maxWriteParallelism; + this.keySelectorCache = Caffeine.newBuilder().maximumSize(maxCacheSize).build(); + } + + int generateKey(DynamicRecord dynamicRecord) throws Exception { + return generateKey(dynamicRecord, null, null, null); + } + + int generateKey( + DynamicRecord dynamicRecord, + @Nullable Schema tableSchema, + @Nullable PartitionSpec tableSpec, + @Nullable RowData overrideRowData) + throws Exception { + String tableIdent = dynamicRecord.tableIdentifier().toString(); + SelectorKey cacheKey = + new SelectorKey( + tableIdent, + dynamicRecord.branch(), + tableSchema != null ? tableSchema.schemaId() : null, + tableSpec != null ? tableSpec.specId() : null, + dynamicRecord.schema(), + dynamicRecord.spec(), + dynamicRecord.equalityFields()); + return keySelectorCache + .get( + cacheKey, + k -> + getKeySelector( + tableIdent, + MoreObjects.firstNonNull(tableSchema, dynamicRecord.schema()), + MoreObjects.firstNonNull(tableSpec, dynamicRecord.spec()), + MoreObjects.firstNonNull( + dynamicRecord.distributionMode(), DistributionMode.NONE), + MoreObjects.firstNonNull( + dynamicRecord.equalityFields(), Collections.emptySet()), + dynamicRecord.writeParallelism())) + .getKey(overrideRowData != null ? overrideRowData : dynamicRecord.rowData()); + } + + private KeySelector<RowData, Integer> getKeySelector( + String tableName, + Schema schema, + PartitionSpec spec, + DistributionMode mode, + Set<String> equalityFields, + int writeParallelism) { + LOG.debug( + "Creating new KeySelector for table '{}' with distribution mode '{}'", tableName, mode); + switch (mode) { + case NONE: + if (equalityFields.isEmpty()) { + return tableKeySelector(tableName, writeParallelism, maxWriteParallelism); + } else { + LOG.info( + "{}Distribute rows by equality fields, because there are equality fields set", + tableName); + return equalityFieldKeySelector( + tableName, schema, equalityFields, writeParallelism, maxWriteParallelism); + } + + case HASH: + if (equalityFields.isEmpty()) { + if (spec.isUnpartitioned()) { + LOG.warn( + "{}: Fallback to use 'none' distribution mode, because there are no equality fields set " + + "and table is unpartitioned", + tableName); + return tableKeySelector(tableName, writeParallelism, maxWriteParallelism); + } else { + return partitionKeySelector( + tableName, schema, spec, writeParallelism, maxWriteParallelism); + } + } else { + if (spec.isUnpartitioned()) { + LOG.info( + "{}: Distribute rows by equality fields, because there are equality fields set " + + "and table is unpartitioned", + tableName); + return equalityFieldKeySelector( + tableName, schema, equalityFields, writeParallelism, maxWriteParallelism); + } else { + for (PartitionField partitionField : spec.fields()) { + Preconditions.checkState( + equalityFields.contains(partitionField.name()), + "%s: In 'hash' distribution mode with equality fields set, partition field '%s' " + + "should be included in equality fields: '%s'", + tableName, + partitionField, + schema.columns().stream() + .filter(c -> equalityFields.contains(c.name())) + .collect(Collectors.toList())); + } + return partitionKeySelector( + tableName, schema, spec, writeParallelism, maxWriteParallelism); + } + } + + case RANGE: + if (schema.identifierFieldIds().isEmpty()) { + LOG.warn( + "{}: Fallback to use 'none' distribution mode, because there are no equality fields set " + + "and {}=range is not supported yet in flink", + tableName, + WRITE_DISTRIBUTION_MODE); + return tableKeySelector(tableName, writeParallelism, maxWriteParallelism); + } else { + LOG.info( + "{}: Distribute rows by equality fields, because there are equality fields set " + + "and{}=range is not supported yet in flink", + tableName, + WRITE_DISTRIBUTION_MODE); + return equalityFieldKeySelector( + tableName, schema, equalityFields, writeParallelism, maxWriteParallelism); + } + + default: + throw new IllegalArgumentException( + tableName + ": Unrecognized " + WRITE_DISTRIBUTION_MODE + ": " + mode); + } + } + + private static KeySelector<RowData, Integer> equalityFieldKeySelector( + String tableName, + Schema schema, + Set<String> equalityFields, + int writeParallelism, + int maxWriteParallelism) { + return new TargetLimitedKeySelector( + new EqualityFieldKeySelector( + schema, + FlinkSchemaUtil.convert(schema), + DynamicSinkUtil.getEqualityFieldIds(equalityFields, schema)), + tableName.hashCode(), + writeParallelism, + maxWriteParallelism); + } + + private static KeySelector<RowData, Integer> partitionKeySelector( + String tableName, + Schema schema, + PartitionSpec spec, + int writeParallelism, + int maxWriteParallelism) { + KeySelector<RowData, String> inner = + new PartitionKeySelector(spec, schema, FlinkSchemaUtil.convert(schema)); + return new TargetLimitedKeySelector( + in -> inner.getKey(in).hashCode(), + tableName.hashCode(), + writeParallelism, + maxWriteParallelism); + } + + private static KeySelector<RowData, Integer> tableKeySelector( + String tableName, int writeParallelism, int maxWriteParallelism) { + return new TargetLimitedKeySelector( + new RoundRobinKeySelector<>(writeParallelism), + tableName.hashCode(), + writeParallelism, + maxWriteParallelism); + } + + /** + * Generates a new key using the salt as a base, and reduces the target key range of the {@link + * #wrapped} {@link KeySelector} to {@link #writeParallelism}. + */ + private static class TargetLimitedKeySelector implements KeySelector<RowData, Integer> { + private final KeySelector<RowData, Integer> wrapped; + private final int writeParallelism; + private final int[] distinctKeys; + + @SuppressWarnings("checkstyle:ParameterAssignment") + TargetLimitedKeySelector( + KeySelector<RowData, Integer> wrapped, + int salt, + int writeParallelism, + int maxWriteParallelism) { + if (writeParallelism > maxWriteParallelism) { + LOG.warn( Review Comment: I know it is strange, but we might want to push more info to the `TargetLimitedKeySelector` so the warn message is more meaningful -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org