masteryhx commented on a change in pull request #18224: URL: https://github.com/apache/flink/pull/18224#discussion_r807107658
########## File path: flink-tests/src/test/java/org/apache/flink/test/checkpointing/ChangelogPeriodicMaterializationITCase.java ########## @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.test.checkpointing; + +import org.apache.flink.api.common.restartstrategy.RestartStrategies; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.configuration.StateChangelogOptions; +import org.apache.flink.runtime.state.AbstractStateBackend; +import org.apache.flink.runtime.state.KeyedStateHandle; +import org.apache.flink.runtime.state.SnapshotResult; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.util.function.FutureTaskWithException; + +import org.junit.Before; +import org.junit.Test; + +import java.time.Duration; +import java.util.concurrent.Future; +import java.util.concurrent.RunnableFuture; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * This verifies that checkpointing works correctly for Changelog state backend with materialized + * state / non-materialized state. + */ +public class ChangelogPeriodicMaterializationITCase + extends ChangelogPeriodicMaterializationTestBase { + + private static final AtomicBoolean triggerDelegatedSnapshot = new AtomicBoolean(); + + public ChangelogPeriodicMaterializationITCase(AbstractStateBackend delegatedStateBackend) { + super(delegatedStateBackend); + } + + @Before + public void setup() throws Exception { + super.setup(); + triggerDelegatedSnapshot.set(false); + } + + /** Recovery from checkpoint only containing non-materialized state. */ + @Test + public void testNonMaterialization() throws Exception { + StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + env.enableCheckpointing(10) + .enableChangelogStateBackend(true) + .getCheckpointConfig() + .setCheckpointStorage(TEMPORARY_FOLDER.newFolder().toURI()); + env.setStateBackend(new DelegatedStateBackendWrapper(delegatedStateBackend, t -> t)) + .setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0)); + env.configure( + new Configuration() + .set( + StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, + Duration.ofMinutes(3))); + waitAndAssert( + env, + buildStreamGraph( + env, + new ControlledSource() { + @Override + protected void beforeElement() throws Exception { + if (getRuntimeContext().getAttemptNumber() == 0 + && currentIndex == TOTAL_ELEMENTS / 2) { + waitWhile(() -> completedCheckpointNum.get() <= 0); + throwArtificialFailure(); + } + } + })); + } + + /** Recovery from checkpoint containing non-materialized state and materialized state. */ + @Test + public void testMaterialization() throws Exception { + StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + env.enableCheckpointing(10) + .enableChangelogStateBackend(true) + .getCheckpointConfig() + .setCheckpointStorage(TEMPORARY_FOLDER.newFolder().toURI()); + + SerializableFunctionWithException<RunnableFuture<SnapshotResult<KeyedStateHandle>>> + snapshotResultConsumer = + snapshotResultFuture -> { + PENDING_MATERIALIZATION.add(snapshotResultFuture); + return snapshotResultFuture; + }; + env.setStateBackend( + new DelegatedStateBackendWrapper( + delegatedStateBackend, snapshotResultConsumer)) + .setRestartStrategy(RestartStrategies.fixedDelayRestart(2, 0)); + env.configure( + new Configuration() + .set( + StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, + Duration.ofMillis(10))); + waitAndAssert( + env, + buildStreamGraph( + env, + new ControlledSource() { + @Override + protected void beforeElement() throws Exception { + if (getRuntimeContext().getAttemptNumber() == 0 + && currentIndex == TOTAL_ELEMENTS / 4) { + waitWhile( + () -> + completedCheckpointNum.get() <= 0 + || PENDING_MATERIALIZATION.stream() + .noneMatch(Future::isDone)); + PENDING_MATERIALIZATION.clear(); + throwArtificialFailure(); Review comment: Yes, you are right. I plan to add a condition to check the checkpoint status (like `ResumeCheckpointManuallyITCase#findExternalizedCheckpoint`), and the wrappered state backend will still be used because I cannot find other way to make materialization fail(for `testFailedMaterialization`). WDYT? ########## File path: flink-tests/src/test/java/org/apache/flink/test/checkpointing/ChangelogPeriodicMaterializationITCase.java ########## @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.test.checkpointing; + +import org.apache.flink.api.common.restartstrategy.RestartStrategies; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.configuration.StateChangelogOptions; +import org.apache.flink.runtime.state.AbstractStateBackend; +import org.apache.flink.runtime.state.KeyedStateHandle; +import org.apache.flink.runtime.state.SnapshotResult; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.util.function.FutureTaskWithException; + +import org.junit.Before; +import org.junit.Test; + +import java.time.Duration; +import java.util.concurrent.Future; +import java.util.concurrent.RunnableFuture; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * This verifies that checkpointing works correctly for Changelog state backend with materialized + * state / non-materialized state. + */ +public class ChangelogPeriodicMaterializationITCase + extends ChangelogPeriodicMaterializationTestBase { + + private static final AtomicBoolean triggerDelegatedSnapshot = new AtomicBoolean(); + + public ChangelogPeriodicMaterializationITCase(AbstractStateBackend delegatedStateBackend) { + super(delegatedStateBackend); + } + + @Before + public void setup() throws Exception { + super.setup(); + triggerDelegatedSnapshot.set(false); + } + + /** Recovery from checkpoint only containing non-materialized state. */ + @Test + public void testNonMaterialization() throws Exception { + StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + env.enableCheckpointing(10) + .enableChangelogStateBackend(true) + .getCheckpointConfig() + .setCheckpointStorage(TEMPORARY_FOLDER.newFolder().toURI()); + env.setStateBackend(new DelegatedStateBackendWrapper(delegatedStateBackend, t -> t)) + .setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0)); + env.configure( + new Configuration() + .set( + StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, + Duration.ofMinutes(3))); + waitAndAssert( + env, + buildStreamGraph( + env, + new ControlledSource() { + @Override + protected void beforeElement() throws Exception { + if (getRuntimeContext().getAttemptNumber() == 0 + && currentIndex == TOTAL_ELEMENTS / 2) { + waitWhile(() -> completedCheckpointNum.get() <= 0); + throwArtificialFailure(); + } + } + })); + } + + /** Recovery from checkpoint containing non-materialized state and materialized state. */ + @Test + public void testMaterialization() throws Exception { + StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + env.enableCheckpointing(10) + .enableChangelogStateBackend(true) + .getCheckpointConfig() + .setCheckpointStorage(TEMPORARY_FOLDER.newFolder().toURI()); + + SerializableFunctionWithException<RunnableFuture<SnapshotResult<KeyedStateHandle>>> + snapshotResultConsumer = + snapshotResultFuture -> { + PENDING_MATERIALIZATION.add(snapshotResultFuture); + return snapshotResultFuture; + }; + env.setStateBackend( + new DelegatedStateBackendWrapper( + delegatedStateBackend, snapshotResultConsumer)) + .setRestartStrategy(RestartStrategies.fixedDelayRestart(2, 0)); + env.configure( + new Configuration() + .set( + StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, + Duration.ofMillis(10))); + waitAndAssert( + env, + buildStreamGraph( + env, + new ControlledSource() { + @Override + protected void beforeElement() throws Exception { + if (getRuntimeContext().getAttemptNumber() == 0 + && currentIndex == TOTAL_ELEMENTS / 4) { + waitWhile( + () -> + completedCheckpointNum.get() <= 0 + || PENDING_MATERIALIZATION.stream() + .noneMatch(Future::isDone)); + PENDING_MATERIALIZATION.clear(); + throwArtificialFailure(); + } else if (getRuntimeContext().getAttemptNumber() == 1 + && currentIndex == TOTAL_ELEMENTS / 2) { Review comment: I just want to fail it twice: consume some elements -> fail -> recovery -> continue to consume some elements -> fail -> recovery ########## File path: flink-tests/src/test/java/org/apache/flink/test/checkpointing/ChangelogPeriodicMaterializationITCase.java ########## @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.test.checkpointing; + +import org.apache.flink.api.common.restartstrategy.RestartStrategies; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.configuration.StateChangelogOptions; +import org.apache.flink.runtime.state.AbstractStateBackend; +import org.apache.flink.runtime.state.KeyedStateHandle; +import org.apache.flink.runtime.state.SnapshotResult; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.util.function.FutureTaskWithException; + +import org.junit.Before; +import org.junit.Test; + +import java.time.Duration; +import java.util.concurrent.Future; +import java.util.concurrent.RunnableFuture; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * This verifies that checkpointing works correctly for Changelog state backend with materialized + * state / non-materialized state. + */ +public class ChangelogPeriodicMaterializationITCase + extends ChangelogPeriodicMaterializationTestBase { + + private static final AtomicBoolean triggerDelegatedSnapshot = new AtomicBoolean(); + + public ChangelogPeriodicMaterializationITCase(AbstractStateBackend delegatedStateBackend) { + super(delegatedStateBackend); + } + + @Before + public void setup() throws Exception { + super.setup(); + triggerDelegatedSnapshot.set(false); + } + + /** Recovery from checkpoint only containing non-materialized state. */ + @Test + public void testNonMaterialization() throws Exception { + StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + env.enableCheckpointing(10) + .enableChangelogStateBackend(true) + .getCheckpointConfig() + .setCheckpointStorage(TEMPORARY_FOLDER.newFolder().toURI()); + env.setStateBackend(new DelegatedStateBackendWrapper(delegatedStateBackend, t -> t)) + .setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0)); + env.configure( + new Configuration() + .set( + StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, + Duration.ofMinutes(3))); + waitAndAssert( + env, + buildStreamGraph( + env, + new ControlledSource() { + @Override + protected void beforeElement() throws Exception { + if (getRuntimeContext().getAttemptNumber() == 0 + && currentIndex == TOTAL_ELEMENTS / 2) { + waitWhile(() -> completedCheckpointNum.get() <= 0); + throwArtificialFailure(); + } + } + })); + } + + /** Recovery from checkpoint containing non-materialized state and materialized state. */ + @Test + public void testMaterialization() throws Exception { + StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + env.enableCheckpointing(10) + .enableChangelogStateBackend(true) + .getCheckpointConfig() + .setCheckpointStorage(TEMPORARY_FOLDER.newFolder().toURI()); + + SerializableFunctionWithException<RunnableFuture<SnapshotResult<KeyedStateHandle>>> + snapshotResultConsumer = + snapshotResultFuture -> { + PENDING_MATERIALIZATION.add(snapshotResultFuture); + return snapshotResultFuture; + }; + env.setStateBackend( + new DelegatedStateBackendWrapper( + delegatedStateBackend, snapshotResultConsumer)) + .setRestartStrategy(RestartStrategies.fixedDelayRestart(2, 0)); + env.configure( + new Configuration() + .set( + StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, + Duration.ofMillis(10))); + waitAndAssert( + env, + buildStreamGraph( + env, + new ControlledSource() { + @Override + protected void beforeElement() throws Exception { + if (getRuntimeContext().getAttemptNumber() == 0 + && currentIndex == TOTAL_ELEMENTS / 4) { + waitWhile( + () -> + completedCheckpointNum.get() <= 0 + || PENDING_MATERIALIZATION.stream() + .noneMatch(Future::isDone)); + PENDING_MATERIALIZATION.clear(); + throwArtificialFailure(); + } else if (getRuntimeContext().getAttemptNumber() == 1 + && currentIndex == TOTAL_ELEMENTS / 2) { + waitWhile( + () -> + completedCheckpointNum.get() <= 1 Review comment: Yes, you are right. I will modify the logic to make sure there is more checkpoints after the 1st recovery. ########## File path: flink-tests/src/test/java/org/apache/flink/test/checkpointing/ChangelogPeriodicMaterializationITCase.java ########## @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.test.checkpointing; + +import org.apache.flink.api.common.restartstrategy.RestartStrategies; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.configuration.StateChangelogOptions; +import org.apache.flink.runtime.state.AbstractStateBackend; +import org.apache.flink.runtime.state.KeyedStateHandle; +import org.apache.flink.runtime.state.SnapshotResult; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.util.function.FutureTaskWithException; + +import org.junit.Before; +import org.junit.Test; + +import java.time.Duration; +import java.util.concurrent.Future; +import java.util.concurrent.RunnableFuture; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * This verifies that checkpointing works correctly for Changelog state backend with materialized + * state / non-materialized state. + */ +public class ChangelogPeriodicMaterializationITCase + extends ChangelogPeriodicMaterializationTestBase { + + private static final AtomicBoolean triggerDelegatedSnapshot = new AtomicBoolean(); + + public ChangelogPeriodicMaterializationITCase(AbstractStateBackend delegatedStateBackend) { + super(delegatedStateBackend); + } + + @Before + public void setup() throws Exception { + super.setup(); + triggerDelegatedSnapshot.set(false); + } + + /** Recovery from checkpoint only containing non-materialized state. */ + @Test + public void testNonMaterialization() throws Exception { + StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + env.enableCheckpointing(10) + .enableChangelogStateBackend(true) + .getCheckpointConfig() + .setCheckpointStorage(TEMPORARY_FOLDER.newFolder().toURI()); + env.setStateBackend(new DelegatedStateBackendWrapper(delegatedStateBackend, t -> t)) + .setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0)); + env.configure( + new Configuration() + .set( + StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, + Duration.ofMinutes(3))); + waitAndAssert( + env, + buildStreamGraph( + env, + new ControlledSource() { + @Override + protected void beforeElement() throws Exception { + if (getRuntimeContext().getAttemptNumber() == 0 + && currentIndex == TOTAL_ELEMENTS / 2) { + waitWhile(() -> completedCheckpointNum.get() <= 0); + throwArtificialFailure(); + } + } + })); + } + + /** Recovery from checkpoint containing non-materialized state and materialized state. */ + @Test + public void testMaterialization() throws Exception { + StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + env.enableCheckpointing(10) + .enableChangelogStateBackend(true) + .getCheckpointConfig() + .setCheckpointStorage(TEMPORARY_FOLDER.newFolder().toURI()); + + SerializableFunctionWithException<RunnableFuture<SnapshotResult<KeyedStateHandle>>> + snapshotResultConsumer = + snapshotResultFuture -> { + PENDING_MATERIALIZATION.add(snapshotResultFuture); + return snapshotResultFuture; + }; + env.setStateBackend( + new DelegatedStateBackendWrapper( + delegatedStateBackend, snapshotResultConsumer)) + .setRestartStrategy(RestartStrategies.fixedDelayRestart(2, 0)); + env.configure( + new Configuration() + .set( + StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, + Duration.ofMillis(10))); + waitAndAssert( + env, + buildStreamGraph( + env, + new ControlledSource() { + @Override + protected void beforeElement() throws Exception { + if (getRuntimeContext().getAttemptNumber() == 0 + && currentIndex == TOTAL_ELEMENTS / 4) { + waitWhile( + () -> + completedCheckpointNum.get() <= 0 + || PENDING_MATERIALIZATION.stream() + .noneMatch(Future::isDone)); + PENDING_MATERIALIZATION.clear(); + throwArtificialFailure(); + } else if (getRuntimeContext().getAttemptNumber() == 1 + && currentIndex == TOTAL_ELEMENTS / 2) { + waitWhile( + () -> + completedCheckpointNum.get() <= 1 + || PENDING_MATERIALIZATION.stream() + .noneMatch(Future::isDone)); + throwArtificialFailure(); + } + } + })); + } + + @Test + public void testFailedMaterialization() throws Exception { + StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + env.enableCheckpointing(10) + .enableChangelogStateBackend(true) + .getCheckpointConfig() + .setCheckpointStorage(TEMPORARY_FOLDER.newFolder().toURI()); + env.setStateBackend( + new DelegatedStateBackendWrapper( + delegatedStateBackend, + snapshotResultFuture -> { + RunnableFuture<SnapshotResult<KeyedStateHandle>> + snapshotResultFutureWrapper = + new FutureTaskWithException<>( + () -> { + SnapshotResult<KeyedStateHandle> + snapshotResult = + snapshotResultFuture + .get(); + if (PENDING_MATERIALIZATION.size() + == 0) { + throw new RuntimeException(); + } else { + return snapshotResult; + } + }); + PENDING_MATERIALIZATION.add(snapshotResultFutureWrapper); + return snapshotResultFutureWrapper; + })) + .setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0)); + env.configure( + new Configuration() + .set( + StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, + Duration.ofMillis(20)) + .set(StateChangelogOptions.MATERIALIZATION_MAX_FAILURES_ALLOWED, 1)); + waitAndAssert( + env, + buildStreamGraph( + env, + new ControlledSource() { + @Override + protected void beforeElement() throws Exception { + waitWhile( + () -> + currentIndex >= TOTAL_ELEMENTS / 2 + && PENDING_MATERIALIZATION.size() == 0); Review comment: Sure. After half of elements have been consumed, if the materialization is still not done (it will throw a exception and cause the materation process fail), just wait. ########## File path: flink-tests/src/test/java/org/apache/flink/test/checkpointing/ChangelogPeriodicMaterializationRescaleITCase.java ########## @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.test.checkpointing; + +import org.apache.flink.api.common.restartstrategy.RestartStrategies; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.configuration.StateChangelogOptions; +import org.apache.flink.runtime.jobgraph.JobGraph; +import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings; +import org.apache.flink.runtime.state.AbstractStateBackend; +import org.apache.flink.runtime.state.KeyedStateHandle; +import org.apache.flink.runtime.state.SnapshotResult; +import org.apache.flink.streaming.api.environment.CheckpointConfig; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.streaming.api.graph.StreamGraph; +import org.apache.flink.util.ExceptionUtils; + +import org.junit.Test; + +import java.io.File; +import java.nio.file.Path; +import java.time.Duration; +import java.util.Optional; +import java.util.concurrent.Future; +import java.util.concurrent.RunnableFuture; + +import static org.apache.flink.test.util.TestUtils.findExternalizedCheckpoint; +import static org.junit.Assert.assertTrue; + +/** + * This verifies that rescale works correctly for Changelog state backend with materialized state / + * non-materialized state. + */ +public class ChangelogPeriodicMaterializationRescaleITCase + extends ChangelogPeriodicMaterializationTestBase { + + public ChangelogPeriodicMaterializationRescaleITCase( + AbstractStateBackend delegatedStateBackend) { + super(delegatedStateBackend); + } + + @Test + public void testRescaleOut() throws Exception { + testRescale(NUM_SLOTS / 2, NUM_SLOTS); + } + + @Test + public void testRescaleIn() throws Exception { + testRescale(NUM_SLOTS, NUM_SLOTS / 2); + } + + private void testRescale(int firstParallelism, int secondParallelism) throws Exception { + File checkpointFile = TEMPORARY_FOLDER.newFolder(); + StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + env.enableCheckpointing(10) + .enableChangelogStateBackend(true) + .getCheckpointConfig() + .setCheckpointStorage(checkpointFile.toURI()); + SerializableFunctionWithException<RunnableFuture<SnapshotResult<KeyedStateHandle>>> + snapshotResultConsumer = + snapshotResultFuture -> { + PENDING_MATERIALIZATION.add(snapshotResultFuture); + return snapshotResultFuture; + }; + env.setStateBackend( + new DelegatedStateBackendWrapper( + delegatedStateBackend, snapshotResultConsumer)) + .setRestartStrategy(RestartStrategies.noRestart()); + env.getCheckpointConfig() + .setExternalizedCheckpointCleanup( + CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); + env.configure( + new Configuration() + .set( + StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, + Duration.ofMillis(10))); + env.setParallelism(firstParallelism); + + StreamGraph firstStreamGraph = + buildStreamGraph( + env, + new ControlledSource() { + @Override + protected void beforeElement() throws Exception { + if (currentIndex == TOTAL_ELEMENTS / 2) { + waitWhile( + () -> + completedCheckpointNum.get() <= 0 + || PENDING_MATERIALIZATION.stream() + .noneMatch(Future::isDone)); + } else if (currentIndex > TOTAL_ELEMENTS / 2) { + throwArtificialFailure(); + } + } + }); + + JobGraph firstJobGraph = firstStreamGraph.getJobGraph(); + try { + cluster.getMiniCluster().submitJob(firstJobGraph).get(); + cluster.getMiniCluster().requestJobResult(firstJobGraph.getJobID()).get(); Review comment: The intention is just to wait till it throws a `ArtificialFailure`. You mean we should check the `JobResult`? ########## File path: flink-tests/src/test/java/org/apache/flink/test/checkpointing/ChangelogPeriodicMaterializationTestBase.java ########## @@ -0,0 +1,485 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.test.checkpointing; + +import org.apache.flink.api.common.JobExecutionResult; +import org.apache.flink.api.common.JobID; +import org.apache.flink.api.common.JobStatus; +import org.apache.flink.api.common.functions.RichMapFunction; +import org.apache.flink.api.common.state.CheckpointListener; +import org.apache.flink.api.common.state.ListState; +import org.apache.flink.api.common.state.ListStateDescriptor; +import org.apache.flink.api.common.state.State; +import org.apache.flink.api.common.state.StateDescriptor; +import org.apache.flink.api.common.state.ValueState; +import org.apache.flink.api.common.state.ValueStateDescriptor; +import org.apache.flink.api.common.typeutils.TypeSerializer; +import org.apache.flink.api.java.tuple.Tuple2; +import org.apache.flink.changelog.fs.FsStateChangelogStorageFactory; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend; +import org.apache.flink.core.fs.CloseableRegistry; +import org.apache.flink.metrics.MetricGroup; +import org.apache.flink.runtime.checkpoint.CheckpointOptions; +import org.apache.flink.runtime.execution.Environment; +import org.apache.flink.runtime.query.TaskKvStateRegistry; +import org.apache.flink.runtime.state.AbstractKeyedStateBackend; +import org.apache.flink.runtime.state.AbstractStateBackend; +import org.apache.flink.runtime.state.CheckpointStreamFactory; +import org.apache.flink.runtime.state.FunctionInitializationContext; +import org.apache.flink.runtime.state.FunctionSnapshotContext; +import org.apache.flink.runtime.state.KeyGroupRange; +import org.apache.flink.runtime.state.KeyGroupedInternalPriorityQueue; +import org.apache.flink.runtime.state.Keyed; +import org.apache.flink.runtime.state.KeyedStateHandle; +import org.apache.flink.runtime.state.OperatorStateBackend; +import org.apache.flink.runtime.state.OperatorStateHandle; +import org.apache.flink.runtime.state.PriorityComparable; +import org.apache.flink.runtime.state.SavepointResources; +import org.apache.flink.runtime.state.SnapshotResult; +import org.apache.flink.runtime.state.StateSnapshotTransformer; +import org.apache.flink.runtime.state.hashmap.HashMapStateBackend; +import org.apache.flink.runtime.state.heap.HeapPriorityQueueElement; +import org.apache.flink.runtime.state.ttl.TtlTimeProvider; +import org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration; +import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.streaming.api.functions.sink.SinkFunction; +import org.apache.flink.streaming.api.functions.source.RichSourceFunction; +import org.apache.flink.streaming.api.graph.StreamGraph; +import org.apache.flink.test.util.MiniClusterWithClientResource; +import org.apache.flink.util.TestLogger; +import org.apache.flink.util.function.FunctionWithException; + +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import javax.annotation.Nonnull; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.RunnableFuture; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BooleanSupplier; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.flink.shaded.guava30.com.google.common.collect.Iterables.get; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +/** Base class for tests related to period materialization of ChangelogStateBackend. */ +@RunWith(Parameterized.class) +public abstract class ChangelogPeriodicMaterializationTestBase extends TestLogger { + + private static final int NUM_TASK_MANAGERS = 1; + private static final int NUM_TASK_SLOTS = 4; + protected static final int NUM_SLOTS = NUM_TASK_MANAGERS * NUM_TASK_SLOTS; + protected static final int TOTAL_ELEMENTS = 10_000; + + protected final AbstractStateBackend delegatedStateBackend; + + protected MiniClusterWithClientResource cluster; + + // Maintain the RunnableFuture of SnapshotResult to make sure the materialization phase has + // been done + protected static final Queue<RunnableFuture<SnapshotResult<KeyedStateHandle>>> + PENDING_MATERIALIZATION = new ConcurrentLinkedQueue<>(); + + @Rule public Timeout globalTimeout = Timeout.seconds(60); + + @ClassRule public static final TemporaryFolder TEMPORARY_FOLDER = new TemporaryFolder(); + + @Parameterized.Parameters(name = "delegated state backend type ={0}") + public static Collection<AbstractStateBackend> parameter() { + return Arrays.asList( + new HashMapStateBackend(), + new EmbeddedRocksDBStateBackend(true), + new EmbeddedRocksDBStateBackend()); + } + + public ChangelogPeriodicMaterializationTestBase(AbstractStateBackend delegatedStateBackend) { + this.delegatedStateBackend = delegatedStateBackend; + } + + @Before + public void setup() throws Exception { + cluster = + new MiniClusterWithClientResource( + new MiniClusterResourceConfiguration.Builder() + .setConfiguration(configureDSTL()) + .setNumberTaskManagers(NUM_TASK_MANAGERS) + .setNumberSlotsPerTaskManager(NUM_TASK_SLOTS) + .build()); + cluster.before(); + cluster.getMiniCluster().overrideRestoreModeForChangelogStateBackend(); + + // init source data randomly + ControlledSource.initSourceData(TOTAL_ELEMENTS); + } + + @After + public void tearDown() throws IOException { + cluster.after(); + // clear result in sink + CollectionSink.clearExpectedResult(); + PENDING_MATERIALIZATION.clear(); + } + + protected StreamGraph buildStreamGraph( + StreamExecutionEnvironment env, ControlledSource controlledSource) { + env.addSource(controlledSource) + .keyBy(element -> element) + .map(new CountMapper()) Review comment: You mean the state size will be lager and lager ? Maybe we could modify the logic in `CountMapper`, for example, just to count `element % 100` ? ########## File path: flink-tests/src/test/java/org/apache/flink/test/checkpointing/ChangelogPeriodicMaterializationITCase.java ########## @@ -0,0 +1,734 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.test.checkpointing; + +import org.apache.flink.api.common.JobExecutionResult; +import org.apache.flink.api.common.JobID; +import org.apache.flink.api.common.JobStatus; +import org.apache.flink.api.common.functions.RichFlatMapFunction; +import org.apache.flink.api.common.restartstrategy.RestartStrategies; +import org.apache.flink.api.common.state.CheckpointListener; +import org.apache.flink.api.common.state.ListState; +import org.apache.flink.api.common.state.ListStateDescriptor; +import org.apache.flink.api.common.state.State; +import org.apache.flink.api.common.state.StateDescriptor; +import org.apache.flink.api.common.state.ValueState; +import org.apache.flink.api.common.state.ValueStateDescriptor; +import org.apache.flink.api.common.typeutils.TypeSerializer; +import org.apache.flink.api.java.tuple.Tuple2; +import org.apache.flink.changelog.fs.FsStateChangelogStorageFactory; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.configuration.StateChangelogOptions; +import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend; +import org.apache.flink.core.fs.CloseableRegistry; +import org.apache.flink.metrics.MetricGroup; +import org.apache.flink.runtime.checkpoint.CheckpointOptions; +import org.apache.flink.runtime.execution.Environment; +import org.apache.flink.runtime.jobgraph.JobGraph; +import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings; +import org.apache.flink.runtime.query.TaskKvStateRegistry; +import org.apache.flink.runtime.state.AbstractKeyedStateBackend; +import org.apache.flink.runtime.state.AbstractStateBackend; +import org.apache.flink.runtime.state.CheckpointStreamFactory; +import org.apache.flink.runtime.state.FunctionInitializationContext; +import org.apache.flink.runtime.state.FunctionSnapshotContext; +import org.apache.flink.runtime.state.KeyGroupRange; +import org.apache.flink.runtime.state.KeyGroupedInternalPriorityQueue; +import org.apache.flink.runtime.state.Keyed; +import org.apache.flink.runtime.state.KeyedStateHandle; +import org.apache.flink.runtime.state.OperatorStateBackend; +import org.apache.flink.runtime.state.OperatorStateHandle; +import org.apache.flink.runtime.state.PriorityComparable; +import org.apache.flink.runtime.state.SavepointResources; +import org.apache.flink.runtime.state.SnapshotResult; +import org.apache.flink.runtime.state.StateSnapshotTransformer; +import org.apache.flink.runtime.state.hashmap.HashMapStateBackend; +import org.apache.flink.runtime.state.heap.HeapPriorityQueueElement; +import org.apache.flink.runtime.state.ttl.TtlTimeProvider; +import org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration; +import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction; +import org.apache.flink.streaming.api.environment.CheckpointConfig; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.streaming.api.functions.sink.SinkFunction; +import org.apache.flink.streaming.api.functions.source.RichSourceFunction; +import org.apache.flink.streaming.api.graph.StreamGraph; +import org.apache.flink.test.util.MiniClusterWithClientResource; +import org.apache.flink.util.Collector; +import org.apache.flink.util.TestLogger; +import org.apache.flink.util.function.FunctionWithException; + +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import javax.annotation.Nonnull; + +import java.io.File; +import java.io.IOException; +import java.io.Serializable; +import java.net.URI; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Queue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.RunnableFuture; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BooleanSupplier; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.flink.shaded.guava30.com.google.common.collect.Iterables.get; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * This verifies that checkpointing works correctly for Changelog state backend with materialized + * state / non-materialized state. + */ +@RunWith(Parameterized.class) +public class ChangelogPeriodicMaterializationITCase extends TestLogger { + + private static final int TOTAL_ELEMENTS = 20; + private static final int NUM_TASK_MANAGERS = 1; + private static final int NUM_TASK_SLOTS = 4; + private static final int NUM_SLOTS = NUM_TASK_MANAGERS * NUM_TASK_SLOTS; + + private final AbstractStateBackend delegatedStateBackend; + + private StreamExecutionEnvironment env; + + private URI checkpointURI; + + private MiniClusterWithClientResource cluster; + + @ClassRule public static final TemporaryFolder TEMPORARY_FOLDER = new TemporaryFolder(); + + @Parameterized.Parameters(name = "delegated state backend type ={0}") + public static Collection<AbstractStateBackend> parameter() { + return Arrays.asList( + new HashMapStateBackend(), + new EmbeddedRocksDBStateBackend(), + new EmbeddedRocksDBStateBackend(true)); + } + + public ChangelogPeriodicMaterializationITCase(AbstractStateBackend delegatedStateBackend) { + this.delegatedStateBackend = delegatedStateBackend; + } + + @Before + public void setup() throws Exception { + cluster = + new MiniClusterWithClientResource( + new MiniClusterResourceConfiguration.Builder() + .setConfiguration(configureDSTL()) + .setNumberTaskManagers(NUM_TASK_MANAGERS) + .setNumberSlotsPerTaskManager(NUM_TASK_SLOTS) + .build()); + cluster.before(); + env = StreamExecutionEnvironment.getExecutionEnvironment(); + checkpointURI = TEMPORARY_FOLDER.newFolder().toURI(); + env.enableCheckpointing(10) + .enableChangelogStateBackend(true) + .getCheckpointConfig() + .setCheckpointStorage(checkpointURI); + // init source data randomly + PreHandlingSource.initSourceData(TOTAL_ELEMENTS); + triggerDelegatedSnapshot = new AtomicBoolean(false); + } + + @After + public void teardown() throws IOException { + cluster.after(); + // clear result in sink + CollectionSink.clearExpectedResult(); + } + + /** Recovery from checkpoint only containing non-materialized state. */ + @Test + public void testNonMaterialization() { + env.setStateBackend(new DelegatedStateBackendWrapper(delegatedStateBackend, t -> t)) + .setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0)); + env.configure( + new Configuration() + .set( + StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, + Duration.ofMinutes(3))); + waitAndAssert( + buildStreamGraph( + new PreHandlingSource() { + @Override + protected void preHandle() throws Exception { + if (getRuntimeContext().getAttemptNumber() == 0 + && currentIndex == TOTAL_ELEMENTS / 2) { + waitUntilFalse(() -> completedCheckpointNum.get() <= 0); + throwArtificialFailure(); + } + } + })); + } + + /** Recovery from checkpoint containing non-materialized state and materialized state. */ + @Test + public void testMaterialization() { + // Maintain the RunnableFuture of SnapshotResult to make sure the materialization phase has + // been done + Queue<RunnableFuture<SnapshotResult<KeyedStateHandle>>> pendingMaterialization = + new ConcurrentLinkedQueue<>(); + SerializableFunctionWithException<RunnableFuture<SnapshotResult<KeyedStateHandle>>> + snapshotResultConsumer = + snapshotResultFuture -> { + pendingMaterialization.add(snapshotResultFuture); + return snapshotResultFuture; + }; + env.setStateBackend( + new DelegatedStateBackendWrapper( + delegatedStateBackend, snapshotResultConsumer)) + .setRestartStrategy(RestartStrategies.fixedDelayRestart(2, 0)); + env.configure( + new Configuration() + .set( + StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, + Duration.ofMillis(10))); + waitAndAssert( + buildStreamGraph( + new PreHandlingSource() { + @Override + protected void preHandle() throws Exception { + if (getRuntimeContext().getAttemptNumber() == 0 + && currentIndex == TOTAL_ELEMENTS / 4) { + waitUntilFalse( + () -> + completedCheckpointNum.get() <= 0 + && pendingMaterialization.stream() + .noneMatch(Future::isDone)); + Thread.sleep(10); + pendingMaterialization.clear(); + throwArtificialFailure(); + } else if (getRuntimeContext().getAttemptNumber() == 1 + && currentIndex == TOTAL_ELEMENTS / 2) { + waitUntilFalse( + () -> + completedCheckpointNum.get() <= 1 + && pendingMaterialization.stream() + .noneMatch(Future::isDone)); + Thread.sleep(10); + throwArtificialFailure(); + } + } + })); + } + + private static AtomicBoolean triggerDelegatedSnapshot = new AtomicBoolean(); + + @Test + public void testFailedMaterialization() { + env.setStateBackend( + new DelegatedStateBackendWrapper( + delegatedStateBackend, + snapshotResultFuture -> + triggerDelegatedSnapshot.compareAndSet(false, true) + ? new RunnableFutureWithException<>( + snapshotResultFuture) + : snapshotResultFuture)) + .setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0)); + env.configure( + new Configuration() + .set( + StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, + Duration.ofMillis(20)) + .set(StateChangelogOptions.MATERIALIZATION_MAX_FAILURES_ALLOWED, 1)); + waitAndAssert( + buildStreamGraph( + new PreHandlingSource() { + @Override + protected void preHandle() throws Exception { + waitUntilFalse( + () -> + currentIndex >= 15 + && !triggerDelegatedSnapshot.get()); + } + })); + } + + @Test + public void testRescaleOut() throws IOException { + testRescale(NUM_SLOTS / 2, NUM_SLOTS); + } + + @Test + public void testRescaleIn() throws IOException { + testRescale(NUM_SLOTS, NUM_SLOTS / 2); + } + + private void testRescale(int firstParallelism, int secondParallelism) throws IOException { + Queue<RunnableFuture<SnapshotResult<KeyedStateHandle>>> pendingMaterialization = + new ConcurrentLinkedQueue<>(); + SerializableFunctionWithException<RunnableFuture<SnapshotResult<KeyedStateHandle>>> + snapshotResultConsumer = + snapshotResultFuture -> { + pendingMaterialization.add(snapshotResultFuture); + return snapshotResultFuture; + }; + env.setStateBackend( + new DelegatedStateBackendWrapper( + delegatedStateBackend, snapshotResultConsumer)) + .setRestartStrategy(RestartStrategies.noRestart()); + env.getCheckpointConfig() + .setExternalizedCheckpointCleanup( + CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); + env.configure( + new Configuration() + .set( + StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL, + Duration.ofMillis(10))); + env.setParallelism(firstParallelism); + + StreamGraph firstStreamGraph = + buildStreamGraph( + new PreHandlingSource() { + @Override + protected void preHandle() throws Exception { + if (currentIndex == TOTAL_ELEMENTS / 2) { + waitUntilFalse( + () -> + completedCheckpointNum.get() <= 0 + && pendingMaterialization.stream() + .noneMatch(Future::isDone)); + } else if (currentIndex > TOTAL_ELEMENTS / 2) { + throwArtificialFailure(); + } + } + }); + + JobGraph firstJobGraph = firstStreamGraph.getJobGraph(); + try { + cluster.getMiniCluster().submitJob(firstJobGraph).get(); + cluster.getMiniCluster().requestJobResult(firstJobGraph.getJobID()).get(); + } catch (Exception ex) { + assertThat(ex.getCause().getCause().getMessage(), containsString("Artificial Failure")); + } + + env.setParallelism(secondParallelism); + StreamGraph streamGraph = buildStreamGraph(new PreHandlingSource()); + Optional<Path> checkpoint = + findExternalizedCheckpoint(checkpointURI.getPath(), firstJobGraph.getJobID()); + assertTrue(checkpoint.isPresent()); + streamGraph.setSavepointRestoreSettings( + SavepointRestoreSettings.forPath(checkpoint.get().toString())); + waitAndAssert(streamGraph); + } + + private static Optional<Path> findExternalizedCheckpoint(String checkpointDir, JobID jobId) + throws IOException { + try (Stream<Path> checkpoints = + Files.list(new File(checkpointDir).toPath().resolve(jobId.toString()))) { + return checkpoints + .filter(path -> path.getFileName().toString().startsWith("chk-")) + .filter( + path -> { + try (Stream<Path> checkpointFiles = Files.list(path)) { + return checkpointFiles.anyMatch( + child -> + child.getFileName() + .toString() + .contains("meta")); + } catch (IOException ignored) { + return false; + } + }) + .findAny(); + } + } + + private Configuration configureDSTL() throws IOException { + Configuration configuration = new Configuration(); + FsStateChangelogStorageFactory.configure(configuration, TEMPORARY_FOLDER.newFolder()); + return configuration; + } + + private StreamGraph buildStreamGraph(PreHandlingSource preHandlingSource) { + env.addSource(preHandlingSource) + .keyBy(element -> element) + .flatMap(new CountMapper()) + .addSink(new CollectionSink()) + .setParallelism(1); + return env.getStreamGraph(); + } + + private void waitUntilJobFinished(StreamGraph streamGraph) { + try { + JobExecutionResult jobExecutionResult = env.execute(streamGraph); + JobStatus jobStatus = + cluster.getClusterClient().getJobStatus(jobExecutionResult.getJobID()).get(); + assertSame(jobStatus, JobStatus.FINISHED); + } catch (Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } + + private void waitAndAssert(StreamGraph streamGraph) { + waitUntilJobFinished(streamGraph); + assertEquals(CollectionSink.getExpectedResult(), PreHandlingSource.getActualResult()); + } + + /** A source consumes data which is generated randomly and supports pre-handling record. */ + private static class PreHandlingSource extends RichSourceFunction<Integer> + implements CheckpointedFunction, CheckpointListener { + + protected volatile int currentIndex; + + protected final AtomicInteger completedCheckpointNum; + + protected volatile boolean isCanceled; + + private static List<Integer> sourceList; + + private transient ListState<Integer> lastSentStored; + + public PreHandlingSource() { + this.completedCheckpointNum = new AtomicInteger(); + } + + public static void initSourceData(int totalNum) { + sourceList = new ArrayList<>(totalNum); + for (int i = 0; i < totalNum; i++) { + sourceList.add(ThreadLocalRandom.current().nextInt(totalNum)); + } + } + + public static Map<Integer, Integer> getActualResult() { + return sourceList.stream() + .collect( + Collectors.toConcurrentMap( + Function.identity(), element -> 1, Integer::sum)); + } + + @Override + public void snapshotState(FunctionSnapshotContext context) throws Exception { + lastSentStored.update(Arrays.asList(currentIndex, completedCheckpointNum.get())); + } + + @Override + public void initializeState(FunctionInitializationContext context) throws Exception { + lastSentStored = + context.getOperatorStateStore() + .getListState(new ListStateDescriptor<>("counter", Integer.class)); + if (context.isRestored()) { + Iterable<Integer> indexAndCheckpointNum = lastSentStored.get(); + currentIndex = get(indexAndCheckpointNum, 0); + completedCheckpointNum.compareAndSet(0, get(indexAndCheckpointNum, 1)); + } + } + + @Override + public void run(SourceContext<Integer> ctx) throws Exception { + while (!isCanceled && currentIndex < sourceList.size()) { + preHandle(); + synchronized (ctx.getCheckpointLock()) { + if (!isCanceled && currentIndex < sourceList.size()) { + int currentElement = sourceList.get(currentIndex++); + ctx.collect(currentElement); + } + } + } + } + + protected void preHandle() throws Exception { + // do nothing by default + } + + protected void waitUntilFalse(SerializableBooleanSupplier supplier) + throws InterruptedException { + while (supplier.getAsBoolean()) { + Thread.sleep(10); + } + } + + protected void throwArtificialFailure() throws Exception { + throw new Exception("Artificial Failure"); + } + + @Override + public void cancel() { + isCanceled = true; + } + + @Override + public void notifyCheckpointComplete(long checkpointId) throws Exception { + completedCheckpointNum.getAndIncrement(); + } + } + + /** A sink puts the result of WordCount into a Map using for validating. */ + private static class CollectionSink implements SinkFunction<Tuple2<Integer, Integer>> { + + private static final Map<Integer, Integer> expectedResult = new ConcurrentHashMap<>(); + + @Override + public synchronized void invoke(Tuple2<Integer, Integer> value) throws Exception { + expectedResult.put( + value.f0, Math.max(value.f1, expectedResult.getOrDefault(value.f0, 0))); Review comment: Why in the step 4, the mapper will restores to the `(7 -> 0)` not `(7 -> 1)` which has been in the newest checkpoint ? I think the result will be eventually consistent, not strictly consistent. right ? Or maybe I missed something ? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
