StefanRRichter commented on a change in pull request #6875: [FLINK-9808] [state
backends] Migrate state when necessary in state backends
URL: https://github.com/apache/flink/pull/6875#discussion_r226688632
##########
File path:
flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBKeyedStateBackend.java
##########
@@ -1389,6 +1388,149 @@ private void copyStateDataHandleData(
return Tuple2.of(stateInfo.f0, newMetaInfo);
}
+ private <N, S extends State, SV>
RegisteredKeyValueStateBackendMetaInfo<N, SV> migrateStateIfNecessary(
+ StateDescriptor<S, SV> stateDesc,
+ TypeSerializer<N> namespaceSerializer,
+ Tuple2<ColumnFamilyHandle, RegisteredStateMetaInfoBase>
stateInfo,
+ @Nullable StateSnapshotTransformer<SV>
snapshotTransformer) throws Exception {
+
+ StateMetaInfoSnapshot restoredMetaInfoSnapshot =
restoredKvStateMetaInfos.get(stateDesc.getName());
+
+ Preconditions.checkState(
+ restoredMetaInfoSnapshot != null,
+ "Requested to check compatibility of a restored
RegisteredKeyedBackendStateMetaInfo," +
+ " but its corresponding restored snapshot
cannot be found.");
+
+
Preconditions.checkState(restoredMetaInfoSnapshot.getBackendStateType()
+ ==
StateMetaInfoSnapshot.BackendStateType.KEY_VALUE,
+ "Incompatible state types. " +
+ "Was [" +
restoredMetaInfoSnapshot.getBackendStateType() + "], " +
+ "registered as [" +
StateMetaInfoSnapshot.BackendStateType.KEY_VALUE + "].");
+
+ Preconditions.checkState(
+ Objects.equals(stateDesc.getName(),
restoredMetaInfoSnapshot.getName()),
+ "Incompatible state names. " +
+ "Was [" + restoredMetaInfoSnapshot.getName() +
"], " +
+ "registered with [" + stateDesc.getName() +
"].");
+
+ final StateDescriptor.Type restoredType =
+ StateDescriptor.Type.valueOf(
+ restoredMetaInfoSnapshot.getOption(
+
StateMetaInfoSnapshot.CommonOptionsKeys.KEYED_STATE_TYPE));
+
+ if (!Objects.equals(stateDesc.getType(),
StateDescriptor.Type.UNKNOWN)
+ && !Objects.equals(restoredType,
StateDescriptor.Type.UNKNOWN)) {
+
+ Preconditions.checkState(
+ stateDesc.getType() == restoredType,
+ "Incompatible key/value state types. " +
+ "Was [" + restoredType + "], " +
+ "registered with [" +
stateDesc.getType() + "].");
+ }
+
+ TypeSerializer<SV> stateSerializer = stateDesc.getSerializer();
+
+ RegisteredKeyValueStateBackendMetaInfo<N, SV> newMetaInfo = new
RegisteredKeyValueStateBackendMetaInfo<>(
+ stateDesc.getType(),
+ stateDesc.getName(),
+ namespaceSerializer,
+ stateSerializer,
+ snapshotTransformer);
+
+ CompatibilityResult<N> namespaceCompatibility =
CompatibilityUtil.resolveCompatibilityResult(
+
restoredMetaInfoSnapshot.getTypeSerializer(StateMetaInfoSnapshot.CommonSerializerKeys.NAMESPACE_SERIALIZER.toString()),
+ null,
+
restoredMetaInfoSnapshot.getTypeSerializerConfigSnapshot(StateMetaInfoSnapshot.CommonSerializerKeys.NAMESPACE_SERIALIZER.toString()),
+ namespaceSerializer);
+
+ CompatibilityResult<SV> stateCompatibility =
CompatibilityUtil.resolveCompatibilityResult(
+
restoredMetaInfoSnapshot.getTypeSerializer(StateMetaInfoSnapshot.CommonSerializerKeys.VALUE_SERIALIZER.toString()),
+ null,
+
restoredMetaInfoSnapshot.getTypeSerializerConfigSnapshot(StateMetaInfoSnapshot.CommonSerializerKeys.VALUE_SERIALIZER.toString()),
+ stateSerializer);
+
+ if (namespaceCompatibility.isRequiresMigration()) {
+ throw new UnsupportedOperationException("The new
namespace serializer requires state migration in order for the job to proceed."
+
+ " However, migration for state namespace
currently isn't supported.");
+ }
+
+ if (stateCompatibility.isRequiresMigration()) {
+ migrateStateValue(stateDesc, stateInfo,
restoredMetaInfoSnapshot, newMetaInfo);
+ } else {
+ newMetaInfo = new
RegisteredKeyValueStateBackendMetaInfo<>(
+ newMetaInfo.getStateType(),
+ newMetaInfo.getName(),
+ newMetaInfo.getNamespaceSerializer(),
+ stateSerializer,
+ snapshotTransformer);
+ }
+
+ stateInfo.f1 = newMetaInfo;
+ return newMetaInfo;
+ }
+
+ /**
+ * Migrate only the state value, that is the "value" that is stored in
RocksDB. We don't migrate
+ * the key here, which is made up of key group, key, namespace and map
key
+ * (in case of MapState).
+ */
+ private <N, S extends State, SV> void migrateStateValue(
+ StateDescriptor<S, SV> stateDesc,
+ Tuple2<ColumnFamilyHandle, RegisteredStateMetaInfoBase>
stateInfo,
+ StateMetaInfoSnapshot restoredMetaInfoSnapshot,
+ RegisteredKeyValueStateBackendMetaInfo<N, SV> newMetaInfo)
throws Exception {
+
+ if (stateDesc.getType().equals(StateDescriptor.Type.MAP)) {
+ throw new StateMigrationException("The new serializer
for a MapState requires state migration in order for the job to proceed." +
+ " However, migration for MapState currently
isn't supported.");
+ }
+
+ LOG.info(
+ "Performing state migration for state {} because the
state serializer changed in an incompatible way.",
+ stateDesc);
+
+ // we need to get an actual state instance because migration is
different
+ // for different state types. For example, ListState needs to
deal with
+ // individual elements
+ StateFactory stateFactory =
STATE_FACTORIES.get(stateDesc.getClass());
+ if (stateFactory == null) {
+ String message = String.format("State %s is not
supported by %s",
+ stateDesc.getClass(), this.getClass());
+ throw new FlinkRuntimeException(message);
+ }
+ State state = stateFactory.createState(
+ stateDesc,
+ Tuple2.of(stateInfo.f0, newMetaInfo),
+ RocksDBKeyedStateBackend.this);
+ if (!(state instanceof AbstractRocksDBState)) {
+ throw new FlinkRuntimeException(
+ "State should be an AbstractRocksDBState but is
" + state);
+ }
+
+ @SuppressWarnings("unchecked")
+ AbstractRocksDBState<?, ?, SV, S> rocksDBState =
(AbstractRocksDBState<?, ?, SV, S>) state;
+
+ Snapshot rocksDBSnapshot = db.getSnapshot();
+ try (RocksIteratorWrapper iterator = getRocksIterator(db,
stateInfo.f0)) {
+
+ iterator.seekToFirst();
+ while (iterator.isValid()) {
+ byte[] serializedValue = iterator.value();
+ byte[] migratedSerializedValue =
rocksDBState.migrateSerializedValue(
+ serializedValue,
+ (TypeSerializer<SV>)
restoredMetaInfoSnapshot.getTypeSerializerConfigSnapshot(StateMetaInfoSnapshot.CommonSerializerKeys.VALUE_SERIALIZER).restoreSerializer(),
+ stateDesc.getSerializer());
+ db.put(stateInfo.f0, iterator.key(),
migratedSerializedValue);
Review comment:
You should insert the values using a `RocksDBWriteBatchWrapper`. This will
result in bulk writes and better performance.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services