[ 
https://issues.apache.org/jira/browse/FLINK-18354?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17139144#comment-17139144
 ] 

Yangyingbo commented on FLINK-18354:
------------------------------------

also, you can make youself Tuple2 and extends 
org.apache.flink.api.java.tuple.Tuple2 

implements GenericRecord, Comparable<GenericData.Record>

and use yousrself Tuple2 in  instnace TupleTypeInfo.
{code:java}
// code placeholder
public class Tuple2<T0, T1> extends org.apache.flink.api.java.tuple.Tuple2<T0, 
T1> implements GenericRecord, Comparable<GenericData.Record>{
    private  Object[] values = new Object[2];
    private  Schema schema = null;
    public Tuple2(){
        super();
    }
    public Tuple2(Schema schema) {
        super();
        if (schema == null || !Schema.Type.RECORD.equals(schema.getType()))
            throw new AvroRuntimeException("Not a record schema: "+schema);
        this.schema = schema;
        this.values = new Object[schema.getFields().size()];
    }

    public Tuple2(Schema schema, T0 value0, T1 value1) {
        super(value0, value1);
        if (schema == null || !Schema.Type.RECORD.equals(schema.getType()))
            throw new AvroRuntimeException("Not a record schema: "+schema);
        this.schema = schema;
        this.values = new Object[2];
        this.values[0] = value0;
        this.values[1] = value1;
    }

    public Tuple2(T0 value0, T1 value1) {
        super(value0, value1);
        this.values = new Object[2];
        this.values[0] = value0;
        this.values[1] = value1;
    }

    @Override
    public int compareTo(GenericData.Record o) {
        return GenericData.get().compare(this, o, schema);
    }

    @Override
    public void put(String key, Object v) {
        Schema.Field field = schema.getField(key);
        if (field == null)
            throw new AvroRuntimeException("Not a valid schema field: "+key);

        values[field.pos()] = v;
    }

    @Override
    public Object get(String key) {
        Schema.Field field = schema.getField(key);
        if (field == null) return null;
        return values[field.pos()];
    }

    @Override
    public void put(int i, Object v) {
        values[i] = v;
    }

    @Override
    public Object get(int i) {
        return values[i];
    }

    @Override
    public Schema getSchema() {
        return schema;
    }

    @Override
    public <T> void setField(T value, int pos){
        super.setField(value, pos);
        this.values[pos] = value;
    }

    @Override
    public void setFields(T0 value0, T1 value1){
        super.setFields(value0, value1);
        this.values[0] = value0;
        this.values[1] = value1;
    }

    public static <T0, T1> org.apache.flink.api.java.tuple.Tuple2<T0, T1> of(T0 
value0, T1 value1) {
        return new Tuple2<T0, T1>(value0,
                value1);
    }
{code}
 
{code:java}
// code placeholder
//        TupleTypeInfo tupleTypeInfo = new 
TupleTypeInfo(GenericData.Record.class, BasicTypeInfo.STRING_TYPE_INFO, 
BasicTypeInfo.STRING_TYPE_INFO);
        TupleTypeInfo tupleTypeInfo = new TupleTypeInfo(Tuple2.class, 
BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
        DataStream testDataStream = flinkTableEnv.toAppendStream(test, 
tupleTypeInfo);
        testDataStream.print().setParallelism(1);


        ArrayList<org.apache.avro.Schema.Field> fields = new 
ArrayList<org.apache.avro.Schema.Field>();
        fields.add(new org.apache.avro.Schema.Field("id", 
org.apache.avro.Schema.create(org.apache.avro.Schema.Type.STRING), "id", 
JsonProperties.NULL_VALUE));
        fields.add(new org.apache.avro.Schema.Field("time", 
org.apache.avro.Schema.create(org.apache.avro.Schema.Type.STRING), "time", 
JsonProperties.NULL_VALUE));
        org.apache.avro.Schema parquetSinkSchema = 
org.apache.avro.Schema.createRecord("pi", "flinkParquetSink", "flink.parquet", 
true, fields);
        String fileSinkPath = "./xxx.text/rs6/";


        StreamingFileSink<GenericRecord> parquetSink = StreamingFileSink.
                forBulkFormat(new Path(fileSinkPath),
                        ParquetAvroWriters.forGenericRecord(parquetSinkSchema))
                .withRollingPolicy(OnCheckpointRollingPolicy.build())
                .build();
        testDataStream.addSink(parquetSink).setParallelism(1);
        flinkTableEnv.execute("ReadFromKafkaConnectorWriteToLocalFileJava");
{code}

> when use ParquetAvroWriters.forGenericRecord(Schema schema) error  
> java.lang.ClassCastException: org.apache.flink.api.java.tuple.Tuple2 cannot 
> be cast to org.apache.avro.generic.IndexedRecord
> -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
>
>                 Key: FLINK-18354
>                 URL: https://issues.apache.org/jira/browse/FLINK-18354
>             Project: Flink
>          Issue Type: Bug
>          Components: API / DataStream, Formats (JSON, Avro, Parquet, ORC, 
> SequenceFile)
>    Affects Versions: 1.10.0
>            Reporter: Yangyingbo
>            Priority: Major
>
> {code:java}
>  {code}
> when i use ParquetAvroWriters.forGenericRecord(Schema schema) write data to 
> parquet ,it has occur some error:
> mycode:
>  
> {code:java}
> // 
> //transfor 2 dataStream
>  // TupleTypeInfo tupleTypeInfo = new TupleTypeInfo(GenericData.Record.class, 
> BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
>  TupleTypeInfo tupleTypeInfo = new 
> TupleTypeInfo(BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
>  DataStream testDataStream = flinkTableEnv.toAppendStream(test, 
> tupleTypeInfo);
>  testDataStream.print().setParallelism(1);
> ArrayList<org.apache.avro.Schema.Field> fields = new 
> ArrayList<org.apache.avro.Schema.Field>();
>  fields.add(new org.apache.avro.Schema.Field("id", 
> org.apache.avro.Schema.create(org.apache.avro.Schema.Type.STRING), "id", 
> JsonProperties.NULL_VALUE));
>  fields.add(new org.apache.avro.Schema.Field("time", 
> org.apache.avro.Schema.create(org.apache.avro.Schema.Type.STRING), "time", 
> JsonProperties.NULL_VALUE));
>  org.apache.avro.Schema parquetSinkSchema = 
> org.apache.avro.Schema.createRecord("pi", "flinkParquetSink", 
> "flink.parquet", true, fields);
>  String fileSinkPath = "./xxx.text/rs6/";
> StreamingFileSink<GenericRecord> parquetSink = StreamingFileSink.
>  forBulkFormat(new Path(fileSinkPath),
>  ParquetAvroWriters.forGenericRecord(parquetSinkSchema))
>  .withRollingPolicy(OnCheckpointRollingPolicy.build())
>  .build();
>  testDataStream.addSink(parquetSink).setParallelism(1);
>  flinkTableEnv.execute("ReadFromKafkaConnectorWriteToLocalFileJava");
>  {code}
> and this error:
> {code:java}
> // code placeholder
> 09:29:50,283 INFO  org.apache.flink.runtime.taskmanager.Task                  
>    - Sink: Unnamed (1/1) (79505cb6ab2df38886663fd99461315a) switched from 
> RUNNING to FAILED.09:29:50,283 INFO  
> org.apache.flink.runtime.taskmanager.Task                     - Sink: Unnamed 
> (1/1) (79505cb6ab2df38886663fd99461315a) switched from RUNNING to 
> FAILED.java.lang.ClassCastException: org.apache.flink.api.java.tuple.Tuple2 
> cannot be cast to org.apache.avro.generic.IndexedRecord at 
> org.apache.avro.generic.GenericData.getField(GenericData.java:697) at 
> org.apache.parquet.avro.AvroWriteSupport.writeRecordFields(AvroWriteSupport.java:188)
>  at org.apache.parquet.avro.AvroWriteSupport.write(AvroWriteSupport.java:165) 
> at 
> org.apache.parquet.hadoop.InternalParquetRecordWriter.write(InternalParquetRecordWriter.java:128)
>  at org.apache.parquet.hadoop.ParquetWriter.write(ParquetWriter.java:299) at 
> org.apache.flink.formats.parquet.ParquetBulkWriter.addElement(ParquetBulkWriter.java:52)
>  at 
> org.apache.flink.streaming.api.functions.sink.filesystem.BulkPartWriter.write(BulkPartWriter.java:50)
>  at 
> org.apache.flink.streaming.api.functions.sink.filesystem.Bucket.write(Bucket.java:214)
>  at 
> org.apache.flink.streaming.api.functions.sink.filesystem.Buckets.onElement(Buckets.java:274)
>  at 
> org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink.invoke(StreamingFileSink.java:445)
>  at 
> org.apache.flink.streaming.api.operators.StreamSink.processElement(StreamSink.java:56)
>  at 
> org.apache.flink.streaming.runtime.tasks.OneInputStreamTask$StreamTaskNetworkOutput.emitRecord(OneInputStreamTask.java:173)
>  at 
> org.apache.flink.streaming.runtime.io.StreamTaskNetworkInput.processElement(StreamTaskNetworkInput.java:151)
>  at 
> org.apache.flink.streaming.runtime.io.StreamTaskNetworkInput.emitNext(StreamTaskNetworkInput.java:128)
>  at 
> org.apache.flink.streaming.runtime.io.StreamOneInputProcessor.processInput(StreamOneInputProcessor.java:69)
>  at 
> org.apache.flink.streaming.runtime.tasks.StreamTask.processInput(StreamTask.java:311)
>  at 
> org.apache.flink.streaming.runtime.tasks.mailbox.MailboxProcessor.runMailboxLoop(MailboxProcessor.java:187)
>  at 
> org.apache.flink.streaming.runtime.tasks.StreamTask.runMailboxLoop(StreamTask.java:487)
>  at 
> org.apache.flink.streaming.runtime.tasks.StreamTask.invoke(StreamTask.java:470)
>  at org.apache.flink.runtime.taskmanager.Task.doRun(Task.java:707) at 
> org.apache.flink.runtime.taskmanager.Task.run(Task.java:532) at 
> java.lang.Thread.run(Thread.java:748)09:29:50,284 INFO  
> org.apache.flink.runtime.taskmanager.Task                     - Freeing task 
> resources for Sink: Unnamed (1/1) 
> (79505cb6ab2df38886663fd99461315a).09:29:50,285 INFO  
> org.apache.flink.runtime.taskmanager.Task                     - Ensuring all 
> FileSystem streams are closed for task Sink: Unnamed (1/1) 
> (79505cb6ab2df38886663fd99461315a) [FAILED]09:29:50,289 INFO  
> org.apache.flink.runtime.taskexecutor.TaskExecutor            - 
> Un-registering task and sending final execution state FAILED to JobManager 
> for task Sink: Unnamed (1/1) 79505cb6ab2df38886663fd99461315a.09:29:50,293 
> INFO  org.apache.flink.runtime.executiongraph.ExecutionGraph        - Sink: 
> Unnamed (1/1) (79505cb6ab2df38886663fd99461315a) switched from RUNNING to 
> FAILED.java.lang.ClassCastException: org.apache.flink.api.java.tuple.Tuple2 
> cannot be cast to org.apache.avro.generic.IndexedRecord at 
> org.apache.avro.generic.GenericData.getField(GenericData.java:697) at 
> org.apache.parquet.avro.AvroWriteSupport.writeRecordFields(AvroWriteSupport.java:188)
>  at org.apache.parquet.avro.AvroWriteSupport.write(AvroWriteSupport.java:165)
> {code}
>  



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to