This is an automated email from the ASF dual-hosted git repository.
wanghailin pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/seatunnel.git
The following commit(s) were added to refs/heads/dev by this push:
new 9fb5499295 [Bugfix][Connector-v2] fix file sink
`isPartitionFieldWriteInFile` occurred exception when no columns are given
(#5508)
9fb5499295 is described below
commit 9fb5499295ad3281aac0ed49264cb070a0d3f8fa
Author: wei zhao <[email protected]>
AuthorDate: Wed Oct 25 11:59:27 2023 +0800
[Bugfix][Connector-v2] fix file sink `isPartitionFieldWriteInFile` occurred
exception when no columns are given (#5508)
---
.../seatunnel/file/sink/config/FileSinkConfig.java | 6 ++-
.../seatunnel/file/writer/FileSinkConfigTest.java | 50 ++++++++++++++++++++++
.../src/test/resources/test_write_hdfs.conf | 28 ++++++++++++
3 files changed, 83 insertions(+), 1 deletion(-)
diff --git
a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/config/FileSinkConfig.java
b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/config/FileSinkConfig.java
index 87b005fec6..20ce8a5286 100644
---
a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/config/FileSinkConfig.java
+++
b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/config/FileSinkConfig.java
@@ -32,6 +32,7 @@ import org.apache.commons.lang3.StringUtils;
import lombok.Data;
import lombok.NonNull;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
@@ -83,7 +84,10 @@ public class FileSinkConfig extends BaseFileSinkConfig
implements PartitionConfi
// if the config sink_columns is empty, all fields in
SeaTunnelRowTypeInfo will being write
if (CollectionUtils.isEmpty(this.sinkColumnList)) {
- this.sinkColumnList =
Arrays.asList(seaTunnelRowTypeInfo.getFieldNames());
+ // construct a new ArrayList, because `list` generated by
`Arrays.asList` do not support
+ // remove and add operations.
+ this.sinkColumnList =
+ new
ArrayList<>(Arrays.asList(seaTunnelRowTypeInfo.getFieldNames()));
}
if (config.hasPath(BaseSinkConfig.PARTITION_BY.key())) {
diff --git
a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/FileSinkConfigTest.java
b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/FileSinkConfigTest.java
new file mode 100644
index 0000000000..3b12aad0ec
--- /dev/null
+++
b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/FileSinkConfigTest.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.seatunnel.connectors.seatunnel.file.writer;
+
+import org.apache.seatunnel.shade.com.typesafe.config.Config;
+import org.apache.seatunnel.shade.com.typesafe.config.ConfigFactory;
+
+import org.apache.seatunnel.api.table.type.BasicType;
+import org.apache.seatunnel.api.table.type.SeaTunnelDataType;
+import org.apache.seatunnel.api.table.type.SeaTunnelRowType;
+import
org.apache.seatunnel.connectors.seatunnel.file.sink.config.FileSinkConfig;
+
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+import java.io.File;
+import java.net.URL;
+import java.nio.file.Paths;
+
+public class FileSinkConfigTest {
+
+ @Test
+ public void testConfigInit() throws Exception {
+ URL conf =
OrcReadStrategyTest.class.getResource("/test_write_hdfs.conf");
+ Assertions.assertNotNull(conf);
+ String confPath = Paths.get(conf.toURI()).toString();
+ Config config = ConfigFactory.parseFile(new File(confPath));
+
+ SeaTunnelRowType rowType =
+ new SeaTunnelRowType(
+ new String[] {"data", "ts"},
+ new SeaTunnelDataType[] {BasicType.STRING_TYPE,
BasicType.STRING_TYPE});
+ Assertions.assertDoesNotThrow(() -> new FileSinkConfig(config,
rowType));
+ }
+}
diff --git
a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/test_write_hdfs.conf
b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/test_write_hdfs.conf
new file mode 100644
index 0000000000..cfa6bb9e23
--- /dev/null
+++
b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/test_write_hdfs.conf
@@ -0,0 +1,28 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+{
+ fs.defaultFS = "hdfs://hadoop01:9000"
+ have_partition = true
+ partition_by = ["ts"]
+ partition_dir_expression = "${v0}"
+ is_partition_field_write_in_file = false
+ path = "/data/test"
+ file_format_type = "json"
+ batch_size=10
+}
+