[ 
https://issues.apache.org/jira/browse/FLINK-3871?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15983210#comment-15983210
 ] 

ASF GitHub Bot commented on FLINK-3871:
---------------------------------------

Github user fhueske commented on a diff in the pull request:

    https://github.com/apache/flink/pull/3663#discussion_r113237296
  
    --- Diff: 
flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/util/serialization/AvroRowSerializationSchema.java
 ---
    @@ -0,0 +1,122 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.flink.streaming.util.serialization;
    +
    +import java.io.ByteArrayOutputStream;
    +import java.io.IOException;
    +import java.util.List;
    +import org.apache.avro.Schema;
    +import org.apache.avro.generic.GenericData;
    +import org.apache.avro.generic.GenericRecord;
    +import org.apache.avro.io.DatumWriter;
    +import org.apache.avro.io.Encoder;
    +import org.apache.avro.io.EncoderFactory;
    +import org.apache.avro.reflect.ReflectDatumWriter;
    +import org.apache.avro.specific.SpecificData;
    +import org.apache.avro.specific.SpecificRecord;
    +import org.apache.avro.util.Utf8;
    +import org.apache.flink.types.Row;
    +import org.apache.flink.util.Preconditions;
    +
    +/**
    + * Serialization schema that serializes {@link Row} over {@link 
SpecificRecord} into a Avro bytes.
    + */
    +public class AvroRowSerializationSchema implements 
SerializationSchema<Row> {
    +
    +   /**
    +    * Avro serialization schema.
    +    */
    +   private final Schema schema;
    +
    +   /**
    +    * Writer to serialize Avro record into a byte array.
    +    */
    +   private final DatumWriter<GenericRecord> datumWriter;
    +
    +   /**
    +    * Output stream to serialize records into byte array.
    +    */
    +   private final ByteArrayOutputStream arrayOutputStream =  new 
ByteArrayOutputStream();
    +
    +   /**
    +    * Low-level class for serialization of Avro values.
    +    */
    +   private final Encoder encoder = 
EncoderFactory.get().binaryEncoder(arrayOutputStream, null);
    +
    +   /**
    +    * Creates a Avro serialization schema for the given schema.
    +    *
    +    * @param recordClazz Avro record class used to deserialize Avro's 
record to Flink's row
    +    */
    +   @SuppressWarnings("unchecked")
    +   public AvroRowSerializationSchema(Class<? extends SpecificRecord> 
recordClazz) {
    +           Preconditions.checkNotNull(recordClazz, "Avro record class must 
not be null.");
    +           this.schema = SpecificData.get().getSchema(recordClazz);
    +           this.datumWriter = new ReflectDatumWriter<>(schema);
    +   }
    +
    +   @Override
    +   @SuppressWarnings("unchecked")
    +   public byte[] serialize(Row row) {
    +           // convert to record
    +           final Object record = convertToRecord(schema, row);
    +
    +           // write
    +           try {
    +                   arrayOutputStream.reset();
    +                   datumWriter.write((GenericRecord) record, encoder);
    +                   encoder.flush();
    +                   return arrayOutputStream.toByteArray();
    +           } catch (IOException e) {
    +                   throw new RuntimeException("Failed to serialize Row.", 
e);
    +           }
    +   }
    +
    +   /**
    +    * Converts a (nested) Flink Row into Avro's {@link GenericRecord}.
    +    * Strings are converted into Avro's {@link Utf8} fields.
    +    */
    +   private static Object convertToRecord(Schema schema, Object rowObj) {
    +           if (rowObj instanceof Row) {
    +                   // records can be wrapped in a union
    +                   if (schema.getType() == Schema.Type.UNION) {
    +                           final List<Schema> types = schema.getTypes();
    +                           if (types.size() == 2 && types.get(0).getType() 
== Schema.Type.NULL && types.get(1).getType() == Schema.Type.RECORD) {
    --- End diff --
    
    This limitation exists because the Table API cannot handle UNION types 
either, right?
    Isn't this the same as having a nullable record field?


> Add Kafka TableSource with Avro serialization
> ---------------------------------------------
>
>                 Key: FLINK-3871
>                 URL: https://issues.apache.org/jira/browse/FLINK-3871
>             Project: Flink
>          Issue Type: New Feature
>          Components: Table API & SQL
>            Reporter: Fabian Hueske
>            Assignee: Ivan Mushketyk
>
> Add a Kafka TableSource which supports Avro serialized data.
> The KafkaAvroTableSource should support two modes:
> # SpecificRecord Mode: In this case the user specifies a class which was 
> code-generated by Avro depending on a schema. Flink treats these classes as 
> regular POJOs. Hence, they are also natively supported by the Table API and 
> SQL. Classes generated by Avro contain their Schema in a static field. The 
> schema should be used to automatically derive field names and types. Hence, 
> there is no additional information required than the name of the class.
> # GenericRecord Mode: In this case the user specifies an Avro Schema. The 
> schema is used to deserialize the data into a GenericRecord which must be 
> translated into possibly nested {{Row}} based on the schema information. 
> Again, the Avro Schema is used to automatically derive the field names and 
> types. This mode is less efficient than the SpecificRecord mode because the 
> {{GenericRecord}} needs to be converted into {{Row}}.
> This feature depends on FLINK-5280, i.e., support for nested data in 
> {{TableSource}}.



--
This message was sent by Atlassian JIRA
(v6.3.15#6346)

Reply via email to