tsreaper commented on a change in pull request #23: URL: https://github.com/apache/flink-table-store/pull/23#discussion_r811715223
########## File path: flink-table-store-connector/src/test/java/org/apache/flink/table/store/connector/FileStoreITCase.java ########## @@ -0,0 +1,197 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.table.store.connector; + +import org.apache.flink.api.common.RuntimeExecutionMode; +import org.apache.flink.api.common.eventtime.WatermarkStrategy; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.streaming.api.datastream.DataStreamSource; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.streaming.util.FiniteTestSource; +import org.apache.flink.table.data.GenericRowData; +import org.apache.flink.table.data.RowData; +import org.apache.flink.table.data.StringData; +import org.apache.flink.table.data.conversion.DataStructureConverter; +import org.apache.flink.table.data.conversion.DataStructureConverters; +import org.apache.flink.table.runtime.typeutils.InternalSerializers; +import org.apache.flink.table.runtime.typeutils.InternalTypeInfo; +import org.apache.flink.table.store.connector.sink.StoreSink; +import org.apache.flink.table.store.connector.sink.global.GlobalCommittingSinkTranslator; +import org.apache.flink.table.store.connector.source.FileStoreSource; +import org.apache.flink.table.store.file.FileStore; +import org.apache.flink.table.store.file.FileStoreImpl; +import org.apache.flink.table.store.file.mergetree.compact.DeduplicateAccumulator; +import org.apache.flink.table.types.logical.IntType; +import org.apache.flink.table.types.logical.RowType; +import org.apache.flink.table.types.logical.VarCharType; +import org.apache.flink.table.types.utils.TypeConversions; +import org.apache.flink.test.util.AbstractTestBase; +import org.apache.flink.types.Row; +import org.apache.flink.util.CloseableIterator; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.apache.flink.table.store.file.FileStoreOptions.BUCKET; +import static org.apache.flink.table.store.file.FileStoreOptions.FILE_FORMAT; +import static org.apache.flink.table.store.file.FileStoreOptions.FILE_PATH; +import static org.assertj.core.api.Assertions.assertThat; + +/** ITCase for {@link FileStoreSource} and {@link StoreSink}. */ +@RunWith(Parameterized.class) +public class FileStoreITCase extends AbstractTestBase { + + private static final RowType PARTITION_TYPE = + new RowType(Collections.singletonList(new RowType.RowField("p", new VarCharType()))); + + private static final RowType KEY_TYPE = + new RowType(Collections.singletonList(new RowType.RowField("k", new IntType()))); + + private static final RowType VALUE_TYPE = + new RowType( + Arrays.asList( + new RowType.RowField("v", new IntType()), + new RowType.RowField("p", new VarCharType()), + // rename key + new RowType.RowField("_k", new IntType()))); + + @SuppressWarnings({"unchecked", "rawtypes"}) + private static final DataStructureConverter<RowData, Row> CONVERTER = + (DataStructureConverter) + DataStructureConverters.getConverter( + TypeConversions.fromLogicalToDataType(VALUE_TYPE)); + + private static final int NUM_BUCKET = 3; + + private static final List<RowData> SOURCE_DATA = + Arrays.asList( + wrap(GenericRowData.of(0, StringData.fromString("p1"), 1)), + wrap(GenericRowData.of(0, StringData.fromString("p1"), 2)), + wrap(GenericRowData.of(5, StringData.fromString("p1"), 1)), + wrap(GenericRowData.of(6, StringData.fromString("p2"), 1)), + wrap(GenericRowData.of(3, StringData.fromString("p2"), 5)), + wrap(GenericRowData.of(5, StringData.fromString("p2"), 1))); + + private Configuration options; + + private final boolean isBounded; + + public FileStoreITCase(boolean isBounded) { + this.isBounded = isBounded; + } + + @Parameterized.Parameters(name = "isBounded-{0}") + public static List<Boolean> getVarSeg() { + return Arrays.asList(true, false); + } + + private static SerializableRowData wrap(RowData row) { + return new SerializableRowData(row, InternalSerializers.create(VALUE_TYPE)); + } + + @Before + public void before() throws IOException { + options = new Configuration(); + options.set(BUCKET, NUM_BUCKET); + options.set(FILE_PATH, TEMPORARY_FOLDER.newFolder().toURI().toString()); + options.set(FILE_FORMAT, "avro"); + } + + @Test + public void testPartitioned() throws Exception { + innerTest(true); + } + + @Test + public void testNonPartitioned() throws Exception { + innerTest(false); + } + + private void innerTest(boolean partitioned) throws Exception { + int[] partitions = partitioned ? new int[] {1} : new int[0]; + int[] keys = new int[] {2}; + InternalTypeInfo<RowData> typeInfo = InternalTypeInfo.of(VALUE_TYPE); + + StreamExecutionEnvironment env = isBounded ? buildBatchEnv() : buildStreamEnv(); + env.setParallelism(1); // no key by, must single parallelism Review comment: If there is only one parallelism what's the point of setting the number of buckets to 3? Also this test fails to cover writing into multiple buckets and partitions at the same time. ########## File path: flink-table-store-core/src/main/java/org/apache/flink/table/store/file/FileStoreOptions.java ########## @@ -20,33 +20,111 @@ import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.ConfigOptions; +import org.apache.flink.configuration.Configuration; import org.apache.flink.configuration.MemorySize; -import org.apache.flink.configuration.ReadableConfig; +import org.apache.flink.core.fs.Path; +import org.apache.flink.table.store.file.mergetree.MergeTreeOptions; + +import java.io.Serializable; +import java.time.Duration; + +import static org.apache.flink.configuration.ConfigOptions.key; /** Options for {@link FileStore}. */ -public class FileStoreOptions { +public class FileStoreOptions implements Serializable { public static final ConfigOption<Integer> BUCKET = ConfigOptions.key("bucket") .intType() .defaultValue(1) .withDescription("Bucket number for file store."); + public static final ConfigOption<String> FILE_PATH = + ConfigOptions.key("file.path") + .stringType() + .noDefaultValue() + .withDescription("The file path of the table store in the filesystem."); + + public static final ConfigOption<String> FILE_FORMAT = + ConfigOptions.key("file.format") + .stringType() + .defaultValue("orc") + .withDescription("Specify the message format of data files."); + + public static final ConfigOption<String> MANIFEST_FORMAT = + ConfigOptions.key("manifest.format") + .stringType() + .defaultValue("avro") + .withDescription("Specify the message format of manifest files."); + public static final ConfigOption<MemorySize> MANIFEST_TARGET_FILE_SIZE = ConfigOptions.key("manifest.target-file-size") .memoryType() .defaultValue(MemorySize.ofMebiBytes(8)) .withDescription("Suggested file size of a manifest file."); - public final int bucket; - public final MemorySize manifestSuggestedSize; + public static final ConfigOption<String> PARTITION_DEFAULT_NAME = + key("partition.default-name") + .stringType() + .defaultValue("__DEFAULT_PARTITION__") + .withDescription( + "The default partition name in case the dynamic partition" + + " column value is null/empty string."); + + public static final ConfigOption<Integer> SNAPSHOT_NUM_RETAINED = + ConfigOptions.key("snapshot.num-retained") + .intType() + .defaultValue(Integer.MAX_VALUE) + .withDescription("The maximum number of completed snapshots to retain."); + + public static final ConfigOption<Duration> SNAPSHOT_TIME_RETAINED = + ConfigOptions.key("snapshot.time-retained") + .durationType() + .defaultValue(Duration.ofDays(1)) + .withDescription("The maximum time of completed snapshots to retain."); + + private final Configuration options; + + public FileStoreOptions(Configuration options) { + this.options = options; + // TODO validate all keys + } + + public int bucket() { + return options.get(BUCKET); + } + + public Path path() { + return new Path(options.get(FILE_PATH)); + } + + public FileFormat fileFormat() { + return FileFormat.fromTableOptions( + Thread.currentThread().getContextClassLoader(), options, FILE_FORMAT); + } + + public FileFormat manifestFormat() { + return FileFormat.fromTableOptions( + Thread.currentThread().getContextClassLoader(), options, MANIFEST_FORMAT); + } Review comment: You're creating a new file format with each call. Why not create them in advance? ########## File path: flink-table-store-connector/src/test/resources/log4j2-test.properties ########## @@ -0,0 +1,28 @@ +################################################################################ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +# Set root logger level to OFF to not flood build logs +# set manually to INFO for debugging purposes +rootLogger.level = OFF Review comment: Also change the `log4j2-test.properties` in `flink-table-store-core` to this one. ########## File path: flink-table-store-connector/src/test/java/org/apache/flink/table/store/connector/FileStoreITCase.java ########## @@ -0,0 +1,197 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.table.store.connector; + +import org.apache.flink.api.common.RuntimeExecutionMode; +import org.apache.flink.api.common.eventtime.WatermarkStrategy; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.streaming.api.datastream.DataStreamSource; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.streaming.util.FiniteTestSource; +import org.apache.flink.table.data.GenericRowData; +import org.apache.flink.table.data.RowData; +import org.apache.flink.table.data.StringData; +import org.apache.flink.table.data.conversion.DataStructureConverter; +import org.apache.flink.table.data.conversion.DataStructureConverters; +import org.apache.flink.table.runtime.typeutils.InternalSerializers; +import org.apache.flink.table.runtime.typeutils.InternalTypeInfo; +import org.apache.flink.table.store.connector.sink.StoreSink; +import org.apache.flink.table.store.connector.sink.global.GlobalCommittingSinkTranslator; +import org.apache.flink.table.store.connector.source.FileStoreSource; +import org.apache.flink.table.store.file.FileStore; +import org.apache.flink.table.store.file.FileStoreImpl; +import org.apache.flink.table.store.file.mergetree.compact.DeduplicateAccumulator; +import org.apache.flink.table.types.logical.IntType; +import org.apache.flink.table.types.logical.RowType; +import org.apache.flink.table.types.logical.VarCharType; +import org.apache.flink.table.types.utils.TypeConversions; +import org.apache.flink.test.util.AbstractTestBase; +import org.apache.flink.types.Row; +import org.apache.flink.util.CloseableIterator; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.apache.flink.table.store.file.FileStoreOptions.BUCKET; +import static org.apache.flink.table.store.file.FileStoreOptions.FILE_FORMAT; +import static org.apache.flink.table.store.file.FileStoreOptions.FILE_PATH; +import static org.assertj.core.api.Assertions.assertThat; + +/** ITCase for {@link FileStoreSource} and {@link StoreSink}. */ +@RunWith(Parameterized.class) +public class FileStoreITCase extends AbstractTestBase { + + private static final RowType PARTITION_TYPE = + new RowType(Collections.singletonList(new RowType.RowField("p", new VarCharType()))); + + private static final RowType KEY_TYPE = + new RowType(Collections.singletonList(new RowType.RowField("k", new IntType()))); + + private static final RowType VALUE_TYPE = + new RowType( + Arrays.asList( + new RowType.RowField("v", new IntType()), + new RowType.RowField("p", new VarCharType()), + // rename key + new RowType.RowField("_k", new IntType()))); + + @SuppressWarnings({"unchecked", "rawtypes"}) + private static final DataStructureConverter<RowData, Row> CONVERTER = + (DataStructureConverter) + DataStructureConverters.getConverter( + TypeConversions.fromLogicalToDataType(VALUE_TYPE)); + + private static final int NUM_BUCKET = 3; + + private static final List<RowData> SOURCE_DATA = + Arrays.asList( + wrap(GenericRowData.of(0, StringData.fromString("p1"), 1)), + wrap(GenericRowData.of(0, StringData.fromString("p1"), 2)), + wrap(GenericRowData.of(5, StringData.fromString("p1"), 1)), + wrap(GenericRowData.of(6, StringData.fromString("p2"), 1)), + wrap(GenericRowData.of(3, StringData.fromString("p2"), 5)), + wrap(GenericRowData.of(5, StringData.fromString("p2"), 1))); + + private Configuration options; + + private final boolean isBounded; + + public FileStoreITCase(boolean isBounded) { + this.isBounded = isBounded; + } + + @Parameterized.Parameters(name = "isBounded-{0}") + public static List<Boolean> getVarSeg() { + return Arrays.asList(true, false); + } + + private static SerializableRowData wrap(RowData row) { + return new SerializableRowData(row, InternalSerializers.create(VALUE_TYPE)); + } + + @Before + public void before() throws IOException { + options = new Configuration(); + options.set(BUCKET, NUM_BUCKET); + options.set(FILE_PATH, TEMPORARY_FOLDER.newFolder().toURI().toString()); + options.set(FILE_FORMAT, "avro"); + } + + @Test + public void testPartitioned() throws Exception { + innerTest(true); + } + + @Test + public void testNonPartitioned() throws Exception { + innerTest(false); + } + + private void innerTest(boolean partitioned) throws Exception { Review comment: What about failure tests? We need checkpoints and failures to make sure this 2pc stuff actually works and guarantees exactly-once semantics. Also what about overwrite tests? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
