ibzib commented on a change in pull request #15267:
URL: https://github.com/apache/beam/pull/15267#discussion_r683039093
##########
File path:
runners/spark/src/main/java/org/apache/beam/runners/spark/structuredstreaming/translation/batch/GroupByKeyTranslatorBatch.java
##########
@@ -17,74 +17,143 @@
*/
package org.apache.beam.runners.spark.structuredstreaming.translation.batch;
-import java.io.Serializable;
-import org.apache.beam.runners.core.InMemoryStateInternals;
-import org.apache.beam.runners.core.StateInternals;
-import org.apache.beam.runners.core.StateInternalsFactory;
-import org.apache.beam.runners.core.SystemReduceFn;
-import
org.apache.beam.runners.spark.structuredstreaming.translation.AbstractTranslationContext;
+import java.util.ArrayList;
+import java.util.List;
import
org.apache.beam.runners.spark.structuredstreaming.translation.TransformTranslator;
-import
org.apache.beam.runners.spark.structuredstreaming.translation.batch.functions.GroupAlsoByWindowViaOutputBufferFn;
+import
org.apache.beam.runners.spark.structuredstreaming.translation.AbstractTranslationContext;
import
org.apache.beam.runners.spark.structuredstreaming.translation.helpers.EncoderHelpers;
import
org.apache.beam.runners.spark.structuredstreaming.translation.helpers.KVHelpers;
+import org.apache.beam.sdk.coders.CannotProvideCoderException;
import org.apache.beam.sdk.coders.Coder;
-import org.apache.beam.sdk.coders.IterableCoder;
+import org.apache.beam.sdk.coders.CoderRegistry;
import org.apache.beam.sdk.coders.KvCoder;
+import org.apache.beam.sdk.coders.ListCoder;
+import org.apache.beam.sdk.transforms.Combine;
+import org.apache.beam.sdk.transforms.GroupByKey;
import org.apache.beam.sdk.transforms.PTransform;
+import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
import org.apache.beam.sdk.util.WindowedValue;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.WindowingStrategy;
+import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.KeyValueGroupedDataset;
+import scala.Tuple2;
class GroupByKeyTranslatorBatch<K, V>
- implements TransformTranslator<
- PTransform<PCollection<KV<K, V>>, PCollection<KV<K, Iterable<V>>>>> {
+ implements TransformTranslator<
+ PTransform<PCollection<KV<K, V>>, PCollection<KV<K, Iterable<V>>>>> {
+ /**
+ * Combiner that combines {@code T}s into a single {@code List<T>}
containing all inputs.
+ *
+ * <p>For internal use to translate {@link GroupByKey}. For a large {@link
PCollection} this is
+ * expected to crash!
+ *
+ * <p>This is copied from the dataflow runner code.
Review comment:
It looks like there are already several implementations of this
function. We should define it in a common place so we can reuse it. Possibly in
the runner libraries, though it looks like Python even defines the equivalent
function publicly:
https://beam.apache.org/releases/pydoc/2.31.0/apache_beam.transforms.combiners.html#apache_beam.transforms.combiners.ToList
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]