zentol commented on code in PR #23553:
URL: https://github.com/apache/flink/pull/23553#discussion_r1380547823


##########
flink-connectors/flink-connector-datagen/src/main/java/org/apache/flink/connector/datagen/functions/FromElementsGeneratorFunction.java:
##########
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.datagen.functions;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.api.common.ExecutionConfig;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.common.typeutils.TypeSerializer;
+import org.apache.flink.api.connector.source.SourceReaderContext;
+import org.apache.flink.api.java.typeutils.OutputTypeConfigurable;
+import org.apache.flink.connector.datagen.source.GeneratorFunction;
+import org.apache.flink.core.memory.DataInputView;
+import org.apache.flink.core.memory.DataInputViewStreamWrapper;
+import org.apache.flink.core.memory.DataOutputViewStreamWrapper;
+import org.apache.flink.util.Preconditions;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nullable;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Objects;
+
+/**
+ * A stream generator function that returns a sequence of elements.
+ *
+ * <p>This generator function serializes the elements using Flink's type 
information. That way, any
+ * object transport using Java serialization will not be affected by the 
serializability of the
+ * elements.
+ *
+ * <p><b>NOTE:</b> This source has a parallelism of 1.
+ *
+ * @param <OUT> The type of elements returned by this function.
+ */
+@Internal
+public class FromElementsGeneratorFunction<OUT>
+        implements GeneratorFunction<Long, OUT>, OutputTypeConfigurable<OUT> {
+
+    private static final long serialVersionUID = 1L;
+
+    private static final Logger LOG = 
LoggerFactory.getLogger(FromElementsGeneratorFunction.class);
+
+    /** The (de)serializer to be used for the data elements. */
+    private @Nullable TypeSerializer<OUT> serializer;
+
+    /** The actual data elements, in serialized form. */
+    private byte[] elementsSerialized;
+
+    /** The number of elements emitted already. */
+    private int numElementsEmitted;
+
+    private final transient Iterable<OUT> elements;
+    private transient DataInputView input;
+
+    public FromElementsGeneratorFunction(TypeSerializer<OUT> serializer, 
OUT... elements)
+            throws IOException {
+        this(serializer, Arrays.asList(elements));
+    }
+
+    public FromElementsGeneratorFunction(TypeSerializer<OUT> serializer, 
Iterable<OUT> elements)
+            throws IOException {
+        this.serializer = Preconditions.checkNotNull(serializer);
+        this.elements = elements;
+        serializeElements();
+    }
+
+    @SafeVarargs
+    public FromElementsGeneratorFunction(OUT... elements) {
+        this(Arrays.asList(elements));
+    }
+
+    public FromElementsGeneratorFunction(Iterable<OUT> elements) {
+        this.serializer = null;
+        this.elements = elements;
+        checkIterable(elements, Object.class);
+    }
+
+    @VisibleForTesting
+    @Nullable
+    public TypeSerializer<OUT> getSerializer() {
+        return serializer;
+    }
+
+    private void serializeElements() throws IOException {
+        Preconditions.checkState(serializer != null, "serializer not set");
+        LOG.info("Serializing elements using  " + serializer);
+        ByteArrayOutputStream baos = new ByteArrayOutputStream();
+        DataOutputViewStreamWrapper wrapper = new 
DataOutputViewStreamWrapper(baos);
+
+        try {
+            for (OUT element : elements) {
+                serializer.serialize(element, wrapper);
+            }
+        } catch (Exception e) {
+            throw new IOException("Serializing the source elements failed: " + 
e.getMessage(), e);
+        }
+        this.elementsSerialized = baos.toByteArray();
+    }
+
+    @Override
+    public void open(SourceReaderContext readerContext) throws Exception {
+        ByteArrayInputStream bais = new 
ByteArrayInputStream(elementsSerialized);
+        this.input = new DataInputViewStreamWrapper(bais);
+    }
+
+    @Override
+    public OUT map(Long nextIndex) throws Exception {
+        // Move iterator to the required position in case of failure recovery
+        while (numElementsEmitted < nextIndex) {
+            numElementsEmitted++;
+            tryDeserialize(serializer, input);
+        }
+        numElementsEmitted++;
+        return tryDeserialize(serializer, input);
+    }
+
+    private OUT tryDeserialize(TypeSerializer<OUT> serializer, DataInputView 
input)
+            throws IOException {
+        try {
+            return serializer.deserialize(input);
+        } catch (Exception e) {
+            throw new IOException(
+                    "Failed to deserialize an element from the source. "
+                            + "If you are using user-defined serialization 
(Value and Writable types), check the "
+                            + "serialization functions.\nSerializer is "
+                            + serializer,
+                    e);
+        }
+    }
+
+    @Override
+    public void setOutputType(TypeInformation<OUT> outTypeInfo, 
ExecutionConfig executionConfig) {
+        Preconditions.checkState(
+                elements != null,
+                "The output type should've been specified before shipping the 
graph to the cluster");
+        checkIterable(elements, outTypeInfo.getTypeClass());
+        TypeSerializer<OUT> newSerializer = 
outTypeInfo.createSerializer(executionConfig);
+        if (Objects.equals(serializer, newSerializer)) {
+            return;
+        }
+        serializer = newSerializer;
+        try {
+            serializeElements();
+        } catch (IOException ex) {
+            throw new UncheckedIOException(ex);
+        }
+    }
+
+    // ------------------------------------------------------------------------
+    //  Utilities
+    // ------------------------------------------------------------------------
+
+    /**
+     * Verifies that all elements in the collection are non-null, and are of 
the given class, or a
+     * subclass thereof.
+     *
+     * @param elements The collection to check.
+     * @param viewedAs The class to which the elements must be assignable to.
+     * @param <OUT> The generic type of the collection to be checked.
+     */
+    public static <OUT> void checkCollection(Collection<OUT> elements, 
Class<OUT> viewedAs) {

Review Comment:
   Maybe it's actually even better to keep it because one should simply expect 
`returns` to work properly, even if the underlying implementation is pretty bad 
due to how the DataStream class works as a whole.



##########
flink-streaming-java/src/test/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGeneratorTest.java:
##########
@@ -1188,14 +1191,14 @@ void testChainingOfOperatorsWithDifferentMaxParallelism(
         configuration.set(
                 
PipelineOptions.OPERATOR_CHAINING_CHAIN_OPERATORS_WITH_DIFFERENT_MAX_PARALLELISM,
                 chainingOfOperatorsWithDifferentMaxParallelismEnabled);
-        configuration.set(PipelineOptions.MAX_PARALLELISM, 10);
+        configuration.set(PipelineOptions.MAX_PARALLELISM, 1);
         try (StreamExecutionEnvironment chainEnv =
                 StreamExecutionEnvironment.createLocalEnvironment(1, 
configuration)) {
             chainEnv.fromElements(1)
                     .map(x -> x)
                     // should automatically break chain here
                     .map(x -> x)
-                    .setMaxParallelism(1)
+                    .setMaxParallelism(10)

Review Comment:
   Can we revert this now?



##########
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/processor/MultipleInputNodeCreationProcessorTest.java:
##########
@@ -123,7 +124,8 @@ private void createChainableStream(TableTestUtil util) {
     }
 
     private void createNonChainableStream(TableTestUtil util) {
-        DataStreamSource<Integer> dataStream = 
util.getStreamEnv().fromElements(1, 2, 3);
+        DataStreamSource<Integer> dataStream =
+                util.getStreamEnv().fromCollection(Arrays.asList(1, 2, 3));

Review Comment:
   Yeah by default I think we still allow chains between operators with 
different maxParallelism. Or did we change this in master already? 🤔 



##########
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/processor/MultipleInputNodeCreationProcessorTest.java:
##########
@@ -123,7 +124,8 @@ private void createChainableStream(TableTestUtil util) {
     }
 
     private void createNonChainableStream(TableTestUtil util) {
-        DataStreamSource<Integer> dataStream = 
util.getStreamEnv().fromElements(1, 2, 3);
+        DataStreamSource<Integer> dataStream =
+                util.getStreamEnv().fromCollection(Arrays.asList(1, 2, 3));

Review Comment:
   Yeah by default I think we still allow chains between operators with 
different maxParallelism. Or did we change this in master already? 🤔 



##########
flink-architecture-tests/flink-architecture-tests-production/src/test/resources/archunit.properties:
##########
@@ -26,6 +26,6 @@ freeze.store.default.allowStoreUpdate=true
 # NOTE: Adding new violations should be avoided when possible. If the rule was 
correct to flag a new
 #       violation, please try to avoid creating the violation. If the 
violation was created due to a
 #       shortcoming of the rule, file a JIRA issue so the rule can be improved.
-#freeze.refreeze=true
+freeze.refreeze=false

Review Comment:
   will need to revert this



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@flink.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to