This is an automated email from the ASF dual-hosted git repository.
yiguolei pushed a commit to branch branch-4.0
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-4.0 by this push:
new ae5ed0f7ab1 branch-4.0: [fix](sql function) Fix the unix_timestamp
return value type in Nereids #57424 (#57485)
ae5ed0f7ab1 is described below
commit ae5ed0f7ab13894742dedc7a28e681d31a805119
Author: github-actions[bot]
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Fri Oct 31 09:36:18 2025 +0800
branch-4.0: [fix](sql function) Fix the unix_timestamp return value type in
Nereids #57424 (#57485)
Cherry-picked from #57424
Co-authored-by: Refrain <[email protected]>
---
.../nereids/load/NereidsStreamLoadPlanner.java | 1 +
.../org/apache/doris/qe/CoordinatorContext.java | 1 +
.../org/apache/doris/qe/NereidsCoordinator.java | 1 +
.../apache/doris/service/FrontendServiceImpl.java | 1 +
.../data/datatype_p0/date/data_by_line.json | 3 +
.../date/test_unix_timestamp_func_load.groovy | 95 ++++++++++++++++++++++
6 files changed, 102 insertions(+)
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/nereids/load/NereidsStreamLoadPlanner.java
b/fe/fe-core/src/main/java/org/apache/doris/nereids/load/NereidsStreamLoadPlanner.java
index a576a8113f4..81c99d8c9d9 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/nereids/load/NereidsStreamLoadPlanner.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/nereids/load/NereidsStreamLoadPlanner.java
@@ -311,6 +311,7 @@ public class NereidsStreamLoadPlanner {
? taskInfo.isMemtableOnSinkNode()
: false;
queryOptions.setEnableMemtableOnSinkNode(enableMemtableOnSinkNode);
+ queryOptions.setNewVersionUnixTimestamp(true);
params.setQueryOptions(queryOptions);
TQueryGlobals queryGlobals = new TQueryGlobals();
queryGlobals.setNowString(TimeUtils.getDatetimeFormatWithTimeZone().format(LocalDateTime.now()));
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/qe/CoordinatorContext.java
b/fe/fe-core/src/main/java/org/apache/doris/qe/CoordinatorContext.java
index c03253341fc..1c60b218777 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/CoordinatorContext.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/CoordinatorContext.java
@@ -306,6 +306,7 @@ public class CoordinatorContext {
queryOptions.setEnableProfile(enableProfile);
queryOptions.setProfileLevel(2);
queryOptions.setBeExecVersion(Config.be_exec_version);
+ queryOptions.setNewVersionUnixTimestamp(true);
TQueryGlobals queryGlobals = new TQueryGlobals();
queryGlobals.setNowString(TimeUtils.getDatetimeFormatWithTimeZone().format(LocalDateTime.now()));
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/qe/NereidsCoordinator.java
b/fe/fe-core/src/main/java/org/apache/doris/qe/NereidsCoordinator.java
index 04455f11248..f695ee2d024 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/NereidsCoordinator.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/NereidsCoordinator.java
@@ -487,6 +487,7 @@ public class NereidsCoordinator extends Coordinator {
this.coordinatorContext.setJobProcessor(jobProc);
// Set this field to true to avoid data entering the normal cache LRU
queue
this.coordinatorContext.queryOptions.setDisableFileCache(true);
+ this.coordinatorContext.queryOptions.setNewVersionUnixTimestamp(true);
}
private void setForQuery() {
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java
b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java
index dd3a6836246..c12e4eddedf 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java
@@ -2176,6 +2176,7 @@ public class FrontendServiceImpl implements
FrontendService.Iface {
: false;
coord.getQueryOptions().setEnableMemtableOnSinkNode(isEnableMemtableOnSinkNode);
}
+ // coord.getQueryOptions().setNewVersionUnixTimestamp(true);
httpStreamParams.setParams(coord.getStreamLoadPlan());
} catch (UserException e) {
LOG.warn("exec sql error", e);
diff --git a/regression-test/data/datatype_p0/date/data_by_line.json
b/regression-test/data/datatype_p0/date/data_by_line.json
new file mode 100644
index 00000000000..ced110a3f64
--- /dev/null
+++ b/regression-test/data/datatype_p0/date/data_by_line.json
@@ -0,0 +1,3 @@
+{"a": "foo", "b": "bar"}
+{"a": "baz", "b": "qux"}
+
diff --git
a/regression-test/suites/datatype_p0/date/test_unix_timestamp_func_load.groovy
b/regression-test/suites/datatype_p0/date/test_unix_timestamp_func_load.groovy
new file mode 100644
index 00000000000..b3d405e4c36
--- /dev/null
+++
b/regression-test/suites/datatype_p0/date/test_unix_timestamp_func_load.groovy
@@ -0,0 +1,95 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_unix_timestamp_func_load") {
+ def s3BucketName = getS3BucketName()
+ def s3Endpoint = getS3Endpoint()
+ def s3Region = getS3Region()
+ def ak = getS3AK()
+ def sk = getS3SK()
+
+ sql """
+ CREATE TABLE IF NOT EXISTS t (
+ id BIGINT
+ )
+ PROPERTIES("replication_num" = "1");
+ """
+
+ try {
+ streamLoad {
+ table "t"
+ set 'format', 'json'
+ set 'columns', 'a,b,id=unix_timestamp()'
+ file 'data_by_line.json'
+ time 10000
+
+ check { result, exception, startTime, endTime ->
+ if (exception != null) {
+ throw exception
+ }
+ log.info("Stream load result: ${result}".toString())
+ def json = parseJson(result)
+ assertEquals("success", json.Status.toLowerCase())
+ }
+ }
+
+ def res1 = sql """
+ insert into t
+ select unix_timestamp();
+ """
+ log.info("select frm s3 result: ${res1}".toString())
+ assertTrue(res1.size() == 1)
+
+ def label = "s3_load_default_" +
UUID.randomUUID().toString().replaceAll("-", "")
+ sql """
+ LOAD LABEL ${label} (
+ DATA INFILE("s3://${s3BucketName}/load/data_by_line.json")
+ INTO TABLE t
+ FORMAT AS "json"
+ (a, b)
+ SET (
+ id = unix_timestamp()
+ )
+ )
+ WITH S3 (
+ "s3.access_key" = "${ak}",
+ "s3.secret_key" = "${sk}",
+ "s3.endpoint" = "${s3Endpoint}",
+ "s3.region" = "${s3Region}"
+ );
+ """
+
+ // Wait for load to complete
+ def max_try_time = 60000
+ while (max_try_time > 0) {
+ def result = sql "SHOW LOAD WHERE label = '${label}'"
+ if (result[0][2] == "FINISHED") {
+ break
+ } else if (result[0][2] == "CANCELLED") {
+ throw new Exception("Load job cancelled: " + result[0][7])
+ }
+ Thread.sleep(1000)
+ max_try_time -= 1000
+ if (max_try_time <= 0) {
+ throw new Exception("Load job timeout")
+ }
+ }
+ } finally {
+ try_sql("DROP TABLE IF EXISTS t")
+ }
+
+}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]