whhe commented on code in PR #3360:
URL: https://github.com/apache/flink-cdc/pull/3360#discussion_r1637472845


##########
flink-cdc-connect/flink-cdc-pipeline-connectors/flink-cdc-pipeline-connector-oceanbase/src/main/java/org/apache/flink/cdc/connectors/oceanbase/sink/OceanBaseEventSerializationSchema.java:
##########
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.cdc.connectors.oceanbase.sink;
+
+import org.apache.flink.cdc.common.data.RecordData;
+import org.apache.flink.cdc.common.event.CreateTableEvent;
+import org.apache.flink.cdc.common.event.DataChangeEvent;
+import org.apache.flink.cdc.common.event.Event;
+import org.apache.flink.cdc.common.event.OperationType;
+import org.apache.flink.cdc.common.event.SchemaChangeEvent;
+import org.apache.flink.cdc.common.event.TableId;
+import org.apache.flink.cdc.common.schema.Column;
+import org.apache.flink.cdc.common.schema.Schema;
+import org.apache.flink.cdc.common.utils.Preconditions;
+import org.apache.flink.cdc.common.utils.SchemaUtils;
+
+import org.apache.flink.shaded.guava31.com.google.common.collect.Lists;
+
+import com.oceanbase.connector.flink.table.DataChangeRecord;
+import com.oceanbase.connector.flink.table.Record;
+import com.oceanbase.connector.flink.table.RecordSerializationSchema;
+import com.oceanbase.connector.flink.table.TableInfo;
+
+import java.time.ZoneId;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/** A serializer for Event to Record. */
+public class OceanBaseEventSerializationSchema implements 
RecordSerializationSchema<Event> {
+
+    private final Map<TableId, Schema> schemaMaps = new HashMap<>();
+
+    /** ZoneId from pipeline config to support timestamp with local time zone. 
*/
+    public final ZoneId pipelineZoneId;
+
+    public OceanBaseEventSerializationSchema(ZoneId zoneId) {
+        pipelineZoneId = zoneId;
+    }
+
+    @Override
+    public Record serialize(Event event) {
+        if (event instanceof DataChangeEvent) {
+            return applyDataChangeEvent((DataChangeEvent) event);
+        } else if (event instanceof SchemaChangeEvent) {
+            SchemaChangeEvent schemaChangeEvent = (SchemaChangeEvent) event;
+            TableId tableId = schemaChangeEvent.tableId();
+            if (event instanceof CreateTableEvent) {
+                schemaMaps.put(tableId, ((CreateTableEvent) 
event).getSchema());
+            } else {
+                if (!schemaMaps.containsKey(tableId)) {
+                    throw new RuntimeException("schema of " + tableId + " is 
not existed.");
+                }
+                schemaMaps.put(
+                        tableId,
+                        SchemaUtils.applySchemaChangeEvent(
+                                schemaMaps.get(tableId), schemaChangeEvent));
+            }
+        }
+        return null;
+    }
+
+    private Record applyDataChangeEvent(DataChangeEvent event) {
+        TableId tableId = event.tableId();
+        Schema schema = schemaMaps.get(tableId);
+        Preconditions.checkNotNull(schema, event.tableId() + " is not 
existed");
+        Object[] values;
+        OperationType op = event.op();
+        boolean isDelete = false;
+        switch (op) {
+            case INSERT:
+            case UPDATE:
+            case REPLACE:
+                values = serializerRecord(event.after(), schema);
+                break;
+            case DELETE:
+                values = serializerRecord(event.before(), schema);
+                isDelete = true;
+                break;
+            default:
+                throw new UnsupportedOperationException("Unsupport Operation " 
+ op);
+        }
+        return buildDataChangeRecord(tableId, schema, values, isDelete);
+    }
+
+    private DataChangeRecord buildDataChangeRecord(
+            TableId tableId, Schema schema, Object[] values, boolean isDelete) 
{
+        com.oceanbase.connector.flink.table.TableId oceanBaseTableId =
+                new com.oceanbase.connector.flink.table.TableId(
+                        tableId.getSchemaName(), tableId.getTableName());

Review Comment:
   Better to add null check for tableId.getSchemaName() here.



##########
flink-cdc-connect/flink-cdc-pipeline-connectors/flink-cdc-pipeline-connector-oceanbase/src/main/java/org/apache/flink/cdc/connectors/oceanbase/sink/OceanBaseDataSinkOptions.java:
##########
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.cdc.connectors.oceanbase.sink;
+
+import org.apache.flink.cdc.common.configuration.ConfigOption;
+import org.apache.flink.cdc.common.configuration.ConfigOptions;
+
+import 
com.alipay.oceanbase.rpc.protocol.payload.impl.direct_load.ObLoadDupActionType;
+
+import java.time.Duration;
+
+/** Options for {@link OceanBaseDataSink}. */
+public class OceanBaseDataSinkOptions {
+    // 
------------------------------------------------------------------------------------------
+    // Options for sink connector
+    // 
------------------------------------------------------------------------------------------
+    public static final ConfigOption<String> URL =
+            ConfigOptions.key("url")
+                    .stringType()
+                    .noDefaultValue()
+                    .withDescription("The connection URL.");
+
+    public static final ConfigOption<String> USERNAME =
+            ConfigOptions.key("username")
+                    .stringType()
+                    .noDefaultValue()
+                    .withDescription("The username.");
+
+    public static final ConfigOption<String> PASSWORD =
+            ConfigOptions.key("password")
+                    .stringType()
+                    .noDefaultValue()
+                    .withDescription("The password.");
+
+    public static final ConfigOption<String> DRIVER_CLASS_NAME =
+            ConfigOptions.key("driver-class-name")
+                    .stringType()
+                    .defaultValue("com.mysql.cj.jdbc.Driver")
+                    .withDescription(
+                            "JDBC driver class name, use 
'com.mysql.cj.jdbc.Driver' by default.");
+
+    public static final ConfigOption<String> DRUID_PROPERTIES =
+            ConfigOptions.key("druid-properties")
+                    .stringType()
+                    .noDefaultValue()
+                    .withDescription("Properties for specific connection 
pool.");
+
+    public static final ConfigOption<Boolean> MEMSTORE_CHECK_ENABLED =
+            ConfigOptions.key("memstore-check.enabled")
+                    .booleanType()
+                    .defaultValue(true)
+                    .withDescription("Whether enable memstore check. Default 
value is 'true'");
+
+    public static final ConfigOption<Double> MEMSTORE_THRESHOLD =
+            ConfigOptions.key("memstore-check.threshold")
+                    .doubleType()
+                    .defaultValue(0.9)
+                    .withDescription(
+                            "Memory usage threshold ratio relative to the 
limit value. Default value is '0.9'.");
+
+    public static final ConfigOption<Duration> MEMSTORE_CHECK_INTERVAL =
+            ConfigOptions.key("memstore-check.interval")
+                    .durationType()
+                    .defaultValue(Duration.ofSeconds(30))
+                    .withDescription(
+                            "The check interval, over this time, the writer 
will check if memstore reaches threshold. Default value is '30s'.");
+
+    public static final ConfigOption<Boolean> PARTITION_ENABLED =
+            ConfigOptions.key("partition.enabled")
+                    .booleanType()
+                    .defaultValue(false)
+                    .withDescription(
+                            "Whether to enable partition calculation and flush 
records by partitions. Default value is 'false'.");
+
+    public static final ConfigOption<Boolean> DIRECT_LOAD_ENABLED =
+            ConfigOptions.key("direct-load.enabled")

Review Comment:
   Mark 'direct-load' related options as @Experimental.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to