lvyanquan commented on code in PR #3495:
URL: https://github.com/apache/flink-cdc/pull/3495#discussion_r1712867361


##########
flink-cdc-connect/flink-cdc-pipeline-connectors/flink-cdc-pipeline-connector-elasticsearch/src/test/java/org/apache/flink/cdc/connectors/elasticsearch/sink/Elasticsearch6DataSinkITCaseTest.java:
##########
@@ -0,0 +1,275 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.flink.cdc.connectors.elasticsearch.sink;
+
+import org.apache.flink.api.common.restartstrategy.RestartStrategies;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.connector.sink2.Sink;
+import org.apache.flink.cdc.common.event.Event;
+import org.apache.flink.cdc.common.event.TableId;
+import org.apache.flink.cdc.common.sink.FlinkSinkProvider;
+import 
org.apache.flink.cdc.connectors.elasticsearch.config.ElasticsearchSinkOptions;
+import 
org.apache.flink.cdc.connectors.elasticsearch.sink.utils.ElasticsearchTestUtils;

Review Comment:
   This class was not uploaded.



##########
flink-cdc-connect/flink-cdc-pipeline-connectors/flink-cdc-pipeline-connector-elasticsearch/src/main/java/org/apache/flink/cdc/connectors/elasticsearch/config/ElasticsearchSinkOptions.java:
##########
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.cdc.connectors.elasticsearch.config;
+
+import org.apache.flink.cdc.connectors.elasticsearch.v2.NetworkConfig;
+
+import org.apache.http.HttpHost;
+
+import java.io.Serializable;
+import java.util.List;
+
+/** Elasticsearch DataSink Options reference {@link ElasticsearchSinkOptions}. 
*/
+public class ElasticsearchSinkOptions implements Serializable {
+
+    private final int maxBatchSize;
+    private final int maxInFlightRequests;
+    private final int maxBufferedRequests;
+    private final long maxBatchSizeInBytes;
+    private final long maxTimeInBufferMS;
+    private final long maxRecordSizeInBytes;
+    private final NetworkConfig networkConfig;
+    private final int version; // 新增字段

Review Comment:
   These comments are still existed.



##########
flink-cdc-connect/flink-cdc-pipeline-connectors/flink-cdc-pipeline-connector-elasticsearch/src/main/java/org/apache/flink/cdc/connectors/elasticsearch/serializer/ElasticsearchEventSerializer.java:
##########
@@ -0,0 +1,228 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.cdc.connectors.elasticsearch.serializer;
+
+import org.apache.flink.api.connector.sink2.Sink;
+import org.apache.flink.api.connector.sink2.SinkWriter;
+import org.apache.flink.cdc.common.data.RecordData;
+import org.apache.flink.cdc.common.event.AddColumnEvent;
+import org.apache.flink.cdc.common.event.CreateTableEvent;
+import org.apache.flink.cdc.common.event.DataChangeEvent;
+import org.apache.flink.cdc.common.event.DropColumnEvent;
+import org.apache.flink.cdc.common.event.Event;
+import org.apache.flink.cdc.common.event.OperationType;
+import org.apache.flink.cdc.common.event.RenameColumnEvent;
+import org.apache.flink.cdc.common.event.SchemaChangeEvent;
+import org.apache.flink.cdc.common.event.TableId;
+import org.apache.flink.cdc.common.schema.Column;
+import org.apache.flink.cdc.common.schema.Schema;
+import org.apache.flink.cdc.common.utils.Preconditions;
+import org.apache.flink.cdc.common.utils.SchemaUtils;
+import org.apache.flink.connector.base.sink.writer.ElementConverter;
+
+import co.elastic.clients.elasticsearch.core.bulk.BulkOperationVariant;
+import co.elastic.clients.elasticsearch.core.bulk.DeleteOperation;
+import co.elastic.clients.elasticsearch.core.bulk.IndexOperation;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+import java.io.IOException;
+import java.time.ZoneId;
+import java.time.format.DateTimeFormatter;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.stream.Collectors;
+
+/** A serializer for Event to BulkOperationVariant. */
+public class ElasticsearchEventSerializer implements ElementConverter<Event, 
BulkOperationVariant> {
+    private final ObjectMapper objectMapper = new ObjectMapper();
+    private final Map<TableId, Schema> schemaMaps = new HashMap<>();
+    private final ConcurrentHashMap<TableId, 
List<ElasticsearchRowConverter.SerializationConverter>>
+            converterCache = new ConcurrentHashMap<>();
+
+    /** Format DATE type data. */
+    public static final DateTimeFormatter DATE_FORMATTER =
+            DateTimeFormatter.ofPattern("yyyy-MM-dd");
+
+    /** Format timestamp-related type data. */
+    public static final DateTimeFormatter DATE_TIME_FORMATTER =
+            DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSSSSS");
+
+    /** ZoneId from pipeline config to support timestamp with local time zone. 
*/
+    private final ZoneId pipelineZoneId;
+
+    public ElasticsearchEventSerializer(ZoneId zoneId) {
+        this.pipelineZoneId = zoneId;
+    }
+
+    @Override
+    public BulkOperationVariant apply(Event event, SinkWriter.Context context) 
{
+        try {
+            if (event instanceof DataChangeEvent) {
+                return createBulkOperationVariant((DataChangeEvent) event);
+            } else if (event instanceof SchemaChangeEvent) {
+                IndexOperation<Map<String, Object>> indexOperation =
+                        applySchemaChangeEvent((SchemaChangeEvent) event);
+                if (indexOperation != null) {
+                    return indexOperation;
+                }
+            }
+        } catch (IOException e) {
+            throw new RuntimeException("Failed to serialize event", e);
+        }
+        return null;
+    }
+
+    private IndexOperation<Map<String, Object>> applySchemaChangeEvent(
+            SchemaChangeEvent schemaChangeEvent) throws IOException {
+        TableId tableId = schemaChangeEvent.tableId();
+        if (schemaChangeEvent instanceof CreateTableEvent) {
+            Schema schema = ((CreateTableEvent) schemaChangeEvent).getSchema();
+            schemaMaps.put(tableId, schema);
+            // Cache new converters
+            getOrCreateConverters(tableId, schema);
+        } else if (schemaChangeEvent instanceof AddColumnEvent
+                || schemaChangeEvent instanceof DropColumnEvent
+                || schemaChangeEvent instanceof RenameColumnEvent) {
+            if (!schemaMaps.containsKey(tableId)) {
+                throw new RuntimeException("Schema of " + tableId + " does not 
exist.");
+            }
+            Schema updatedSchema =
+                    
SchemaUtils.applySchemaChangeEvent(schemaMaps.get(tableId), schemaChangeEvent);
+            schemaMaps.put(tableId, updatedSchema);
+            // Update cached converters
+            getOrCreateConverters(tableId, updatedSchema);
+        } else {
+            if (!schemaMaps.containsKey(tableId)) {
+                throw new RuntimeException("Schema of " + tableId + " does not 
exist.");
+            }
+            Schema updatedSchema =
+                    
SchemaUtils.applySchemaChangeEvent(schemaMaps.get(tableId), schemaChangeEvent);
+            schemaMaps.put(tableId, updatedSchema);
+            // Update cached converters
+            getOrCreateConverters(tableId, updatedSchema);
+        }
+        return null;
+    }
+
+    private BulkOperationVariant createBulkOperationVariant(DataChangeEvent 
event)
+            throws JsonProcessingException {
+        TableId tableId = event.tableId();
+        Schema schema = schemaMaps.get(tableId);
+        Preconditions.checkNotNull(schema, event.tableId() + " does not 
exist.");
+        // Ensure converters are cached
+        getOrCreateConverters(tableId, schema);
+        Map<String, Object> valueMap;
+        OperationType op = event.op();
+        Object[] uniqueId =
+                generateUniqueId(
+                        op == OperationType.DELETE ? event.before() : 
event.after(),
+                        schema,
+                        tableId);
+        String id = 
Arrays.stream(uniqueId).map(Object::toString).collect(Collectors.joining("_"));
+        switch (op) {
+            case INSERT:
+            case REPLACE:
+            case UPDATE:
+                valueMap = serializeRecord(tableId, event.after(), schema, 
pipelineZoneId);
+                return new IndexOperation.Builder<>()
+                        .index(tableId.toString())
+                        .id(id)
+                        .document(valueMap)
+                        .build();
+            case DELETE:
+                return new 
DeleteOperation.Builder().index(tableId.toString()).id(id).build();
+            default:
+                throw new UnsupportedOperationException("Unsupported Operation 
" + op);
+        }
+    }
+
+    private Object[] generateUniqueId(RecordData recordData, Schema schema, 
TableId tableId) {
+        List<String> primaryKeys = schema.primaryKeys();
+        List<ElasticsearchRowConverter.SerializationConverter> converters =
+                converterCache.get(tableId);
+        Preconditions.checkNotNull(converters, "No converters found for table: 
" + tableId);
+
+        return primaryKeys.stream()
+                .map(
+                        primaryKey -> {
+                            Column column =
+                                    schema.getColumns().stream()
+                                            .filter(col -> 
col.getName().equals(primaryKey))
+                                            .findFirst()
+                                            .orElseThrow(
+                                                    () ->
+                                                            new 
IllegalStateException(
+                                                                    "Primary 
key column not found: "
+                                                                            + 
primaryKey));
+                            int index = schema.getColumns().indexOf(column);

Review Comment:
   It's better to use a for loop to avoid traversing twice.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to