lvyanquan commented on code in PR #3495: URL: https://github.com/apache/flink-cdc/pull/3495#discussion_r1692402071
########## flink-cdc-connect/flink-cdc-pipeline-connectors/flink-cdc-pipeline-connector-elasticsearch/src/main/java/org/apache/flink/cdc/connectors/elasticsearch/serializer/ElasticsearchEventSerializer.java: ########## @@ -0,0 +1,230 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.cdc.connectors.elasticsearch.serializer; + +import org.apache.flink.api.connector.sink2.Sink; +import org.apache.flink.api.connector.sink2.SinkWriter; +import org.apache.flink.cdc.common.data.RecordData; +import org.apache.flink.cdc.common.event.*; +import org.apache.flink.cdc.common.schema.Column; +import org.apache.flink.cdc.common.schema.Schema; +import org.apache.flink.cdc.common.types.*; +import org.apache.flink.cdc.common.utils.Preconditions; +import org.apache.flink.cdc.common.utils.SchemaUtils; +import org.apache.flink.connector.base.sink.writer.ElementConverter; + +import co.elastic.clients.elasticsearch.core.bulk.BulkOperationVariant; +import co.elastic.clients.elasticsearch.core.bulk.DeleteOperation; +import co.elastic.clients.elasticsearch.core.bulk.IndexOperation; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; + +import java.io.IOException; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** A serializer for Event to BulkOperationVariant. */ +public class ElasticsearchEventSerializer implements ElementConverter<Event, BulkOperationVariant> { + private final ObjectMapper objectMapper = new ObjectMapper(); + private final Map<TableId, Schema> schemaMaps = new HashMap<>(); + + /** Format DATE type data. */ + public static final DateTimeFormatter DATE_FORMATTER = + DateTimeFormatter.ofPattern("yyyy-MM-dd"); + + /** Format timestamp-related type data. */ + public static final DateTimeFormatter DATE_TIME_FORMATTER = + DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSSSSS"); + + /** ZoneId from pipeline config to support timestamp with local time zone. */ + private final ZoneId pipelineZoneId; + + public ElasticsearchEventSerializer(ZoneId zoneId) { + this.pipelineZoneId = zoneId; + } + + @Override + public BulkOperationVariant apply(Event event, SinkWriter.Context context) { + try { + if (event instanceof DataChangeEvent) { + return applyDataChangeEvent((DataChangeEvent) event); + } else if (event instanceof SchemaChangeEvent) { + IndexOperation<Map<String, Object>> indexOperation = + applySchemaChangeEvent((SchemaChangeEvent) event); + if (indexOperation != null) { + return indexOperation; + } + } + } catch (IOException e) { + throw new RuntimeException("Failed to serialize event", e); + } + return null; + } + + private IndexOperation<Map<String, Object>> applySchemaChangeEvent( + SchemaChangeEvent schemaChangeEvent) throws IOException { + TableId tableId = schemaChangeEvent.tableId(); + if (schemaChangeEvent instanceof CreateTableEvent) { + Schema schema = ((CreateTableEvent) schemaChangeEvent).getSchema(); + schemaMaps.put(tableId, schema); + return createSchemaIndexOperation(tableId, schema); + } else if (schemaChangeEvent instanceof AddColumnEvent + || schemaChangeEvent instanceof DropColumnEvent) { + if (!schemaMaps.containsKey(tableId)) { + throw new RuntimeException("Schema of " + tableId + " does not exist."); + } + Schema updatedSchema = + SchemaUtils.applySchemaChangeEvent(schemaMaps.get(tableId), schemaChangeEvent); + schemaMaps.put(tableId, updatedSchema); + return createSchemaIndexOperation(tableId, updatedSchema); + } else { + if (!schemaMaps.containsKey(tableId)) { + throw new RuntimeException("Schema of " + tableId + " does not exist."); + } + Schema updatedSchema = + SchemaUtils.applySchemaChangeEvent(schemaMaps.get(tableId), schemaChangeEvent); + schemaMaps.put(tableId, updatedSchema); + } + return null; + } + + private IndexOperation<Map<String, Object>> createSchemaIndexOperation( + TableId tableId, Schema schema) { + Map<String, Object> schemaMap = new HashMap<>(); + schemaMap.put( + "columns", + schema.getColumns().stream() + .map(Column::asSummaryString) + .collect(Collectors.toList())); + schemaMap.put("primaryKeys", schema.primaryKeys()); + schemaMap.put("options", schema.options()); + + return new IndexOperation.Builder<Map<String, Object>>() + .index(tableId.toString()) + .id(tableId.getTableName()) + .document(schemaMap) + .build(); + } + + private BulkOperationVariant applyDataChangeEvent(DataChangeEvent event) + throws JsonProcessingException { + TableId tableId = event.tableId(); + Schema schema = schemaMaps.get(tableId); + Preconditions.checkNotNull(schema, event.tableId() + " does not exist."); + Map<String, Object> valueMap; + OperationType op = event.op(); + + Object[] uniqueId = + generateUniqueId( + op == OperationType.DELETE ? event.before() : event.after(), schema); + String id = Arrays.stream(uniqueId).map(Object::toString).collect(Collectors.joining("_")); + + switch (op) { + case INSERT: + case REPLACE: + case UPDATE: + valueMap = serializeRecord(event.after(), schema, pipelineZoneId); + return new IndexOperation.Builder<>() + .index(tableId.toString()) + .id(id) + .document(valueMap) + .build(); + case DELETE: + return new DeleteOperation.Builder().index(tableId.toString()).id(id).build(); + default: + throw new UnsupportedOperationException("Unsupported Operation " + op); + } + } + + private Object[] generateUniqueId(RecordData recordData, Schema schema) { + List<String> primaryKeys = schema.primaryKeys(); + return primaryKeys.stream() + .map( + primaryKey -> { + Column column = + schema.getColumns().stream() + .filter(col -> col.getName().equals(primaryKey)) + .findFirst() + .orElseThrow( + () -> + new IllegalStateException( + "Primary key column not found: " + + primaryKey)); + int index = schema.getColumns().indexOf(column); + return getFieldValue(recordData, column.getType(), index); + }) + .toArray(); + } + + private Object getFieldValue(RecordData recordData, DataType dataType, int index) { + switch (dataType.getTypeRoot()) { + case BOOLEAN: + return recordData.getBoolean(index); + case TINYINT: + return recordData.getByte(index); + case SMALLINT: + return recordData.getShort(index); + case INTEGER: + case DATE: + case TIME_WITHOUT_TIME_ZONE: + return recordData.getInt(index); + case BIGINT: + return recordData.getLong(index); + case FLOAT: + return recordData.getFloat(index); + case DOUBLE: + return recordData.getDouble(index); + case CHAR: + case VARCHAR: + return recordData.getString(index); + default: Review Comment: Looks like Decimal type was missed, do you plan to support that? ########## flink-cdc-connect/flink-cdc-pipeline-connectors/flink-cdc-pipeline-connector-elasticsearch/src/main/java/org/apache/flink/cdc/connectors/elasticsearch/serializer/ElasticsearchEventSerializer.java: ########## @@ -0,0 +1,230 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.cdc.connectors.elasticsearch.serializer; + +import org.apache.flink.api.connector.sink2.Sink; +import org.apache.flink.api.connector.sink2.SinkWriter; +import org.apache.flink.cdc.common.data.RecordData; +import org.apache.flink.cdc.common.event.*; +import org.apache.flink.cdc.common.schema.Column; +import org.apache.flink.cdc.common.schema.Schema; +import org.apache.flink.cdc.common.types.*; +import org.apache.flink.cdc.common.utils.Preconditions; +import org.apache.flink.cdc.common.utils.SchemaUtils; +import org.apache.flink.connector.base.sink.writer.ElementConverter; + +import co.elastic.clients.elasticsearch.core.bulk.BulkOperationVariant; +import co.elastic.clients.elasticsearch.core.bulk.DeleteOperation; +import co.elastic.clients.elasticsearch.core.bulk.IndexOperation; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; + +import java.io.IOException; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** A serializer for Event to BulkOperationVariant. */ +public class ElasticsearchEventSerializer implements ElementConverter<Event, BulkOperationVariant> { + private final ObjectMapper objectMapper = new ObjectMapper(); + private final Map<TableId, Schema> schemaMaps = new HashMap<>(); + + /** Format DATE type data. */ + public static final DateTimeFormatter DATE_FORMATTER = + DateTimeFormatter.ofPattern("yyyy-MM-dd"); + + /** Format timestamp-related type data. */ + public static final DateTimeFormatter DATE_TIME_FORMATTER = + DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSSSSS"); + + /** ZoneId from pipeline config to support timestamp with local time zone. */ + private final ZoneId pipelineZoneId; + + public ElasticsearchEventSerializer(ZoneId zoneId) { + this.pipelineZoneId = zoneId; + } + + @Override + public BulkOperationVariant apply(Event event, SinkWriter.Context context) { + try { + if (event instanceof DataChangeEvent) { + return applyDataChangeEvent((DataChangeEvent) event); + } else if (event instanceof SchemaChangeEvent) { + IndexOperation<Map<String, Object>> indexOperation = + applySchemaChangeEvent((SchemaChangeEvent) event); + if (indexOperation != null) { + return indexOperation; + } + } + } catch (IOException e) { + throw new RuntimeException("Failed to serialize event", e); + } + return null; + } + + private IndexOperation<Map<String, Object>> applySchemaChangeEvent( + SchemaChangeEvent schemaChangeEvent) throws IOException { + TableId tableId = schemaChangeEvent.tableId(); + if (schemaChangeEvent instanceof CreateTableEvent) { + Schema schema = ((CreateTableEvent) schemaChangeEvent).getSchema(); + schemaMaps.put(tableId, schema); + return createSchemaIndexOperation(tableId, schema); + } else if (schemaChangeEvent instanceof AddColumnEvent + || schemaChangeEvent instanceof DropColumnEvent) { + if (!schemaMaps.containsKey(tableId)) { + throw new RuntimeException("Schema of " + tableId + " does not exist."); + } + Schema updatedSchema = + SchemaUtils.applySchemaChangeEvent(schemaMaps.get(tableId), schemaChangeEvent); + schemaMaps.put(tableId, updatedSchema); + return createSchemaIndexOperation(tableId, updatedSchema); + } else { + if (!schemaMaps.containsKey(tableId)) { + throw new RuntimeException("Schema of " + tableId + " does not exist."); + } + Schema updatedSchema = + SchemaUtils.applySchemaChangeEvent(schemaMaps.get(tableId), schemaChangeEvent); + schemaMaps.put(tableId, updatedSchema); + } + return null; + } + + private IndexOperation<Map<String, Object>> createSchemaIndexOperation( + TableId tableId, Schema schema) { + Map<String, Object> schemaMap = new HashMap<>(); + schemaMap.put( + "columns", + schema.getColumns().stream() + .map(Column::asSummaryString) + .collect(Collectors.toList())); + schemaMap.put("primaryKeys", schema.primaryKeys()); + schemaMap.put("options", schema.options()); + + return new IndexOperation.Builder<Map<String, Object>>() + .index(tableId.toString()) + .id(tableId.getTableName()) + .document(schemaMap) + .build(); + } + + private BulkOperationVariant applyDataChangeEvent(DataChangeEvent event) + throws JsonProcessingException { + TableId tableId = event.tableId(); + Schema schema = schemaMaps.get(tableId); + Preconditions.checkNotNull(schema, event.tableId() + " does not exist."); + Map<String, Object> valueMap; + OperationType op = event.op(); + + Object[] uniqueId = + generateUniqueId( + op == OperationType.DELETE ? event.before() : event.after(), schema); + String id = Arrays.stream(uniqueId).map(Object::toString).collect(Collectors.joining("_")); + + switch (op) { + case INSERT: + case REPLACE: + case UPDATE: + valueMap = serializeRecord(event.after(), schema, pipelineZoneId); + return new IndexOperation.Builder<>() + .index(tableId.toString()) + .id(id) + .document(valueMap) + .build(); + case DELETE: + return new DeleteOperation.Builder().index(tableId.toString()).id(id).build(); + default: + throw new UnsupportedOperationException("Unsupported Operation " + op); + } + } + + private Object[] generateUniqueId(RecordData recordData, Schema schema) { + List<String> primaryKeys = schema.primaryKeys(); + return primaryKeys.stream() + .map( + primaryKey -> { + Column column = + schema.getColumns().stream() + .filter(col -> col.getName().equals(primaryKey)) + .findFirst() + .orElseThrow( + () -> + new IllegalStateException( + "Primary key column not found: " + + primaryKey)); + int index = schema.getColumns().indexOf(column); + return getFieldValue(recordData, column.getType(), index); + }) + .toArray(); + } + + private Object getFieldValue(RecordData recordData, DataType dataType, int index) { + switch (dataType.getTypeRoot()) { + case BOOLEAN: + return recordData.getBoolean(index); + case TINYINT: + return recordData.getByte(index); + case SMALLINT: + return recordData.getShort(index); + case INTEGER: + case DATE: + case TIME_WITHOUT_TIME_ZONE: + return recordData.getInt(index); + case BIGINT: + return recordData.getLong(index); + case FLOAT: + return recordData.getFloat(index); + case DOUBLE: + return recordData.getDouble(index); + case CHAR: + case VARCHAR: + return recordData.getString(index); + default: + throw new IllegalArgumentException("Unsupported type: " + dataType); + } + } + + public Map<String, Object> serializeRecord( + RecordData recordData, Schema schema, ZoneId pipelineZoneId) { + List<Column> columns = schema.getColumns(); + Map<String, Object> record = new HashMap<>(); + Preconditions.checkState( + columns.size() == recordData.getArity(), + "Column size does not match the data size."); + + for (int i = 0; i < recordData.getArity(); i++) { + Column column = columns.get(i); + ColumnType columnType = ColumnType.valueOf(column.getType().getTypeRoot().name()); + ElasticsearchRowConverter.SerializationConverter converter = Review Comment: Can we cache TableId and list of converter to avoid creating convert everytime we meet a old TableId? ########## flink-cdc-connect/flink-cdc-pipeline-connectors/flink-cdc-pipeline-connector-elasticsearch/src/main/java/org/apache/flink/cdc/connectors/elasticsearch/v2/Elasticsearch8AsyncWriter.java: ########## @@ -0,0 +1,217 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +package org.apache.flink.cdc.connectors.elasticsearch.v2; + +import org.apache.flink.api.connector.sink2.Sink; +import org.apache.flink.connector.base.sink.throwable.FatalExceptionClassifier; +import org.apache.flink.connector.base.sink.writer.AsyncSinkWriter; +import org.apache.flink.connector.base.sink.writer.BufferedRequestState; +import org.apache.flink.connector.base.sink.writer.ElementConverter; +import org.apache.flink.connector.base.sink.writer.config.AsyncSinkWriterConfiguration; +import org.apache.flink.metrics.Counter; +import org.apache.flink.metrics.groups.SinkWriterMetricGroup; +import org.apache.flink.util.FlinkRuntimeException; + +import co.elastic.clients.elasticsearch.ElasticsearchAsyncClient; +import co.elastic.clients.elasticsearch.core.BulkRequest; +import co.elastic.clients.elasticsearch.core.BulkResponse; +import co.elastic.clients.elasticsearch.core.bulk.BulkOperation; +import co.elastic.clients.elasticsearch.core.bulk.BulkResponseItem; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.ConnectException; +import java.net.NoRouteToHostException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.function.Consumer; + +import static org.apache.flink.util.Preconditions.checkNotNull; + +/** + * Elasticsearch8AsyncWriter Apache Flink's Async Sink Writer that submits Operations into an + * Elasticsearch cluster. + * + * @param <InputT> type of Operations + */ +public class Elasticsearch8AsyncWriter<InputT> extends AsyncSinkWriter<InputT, Operation> { Review Comment: Do we support writing to es7 using this Writer? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
