This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
     new 460605ae3c8 [branch-2.1] pick some prs (#39860)
460605ae3c8 is described below

commit 460605ae3c87232ad3b837582f428e7cf8737c63
Author: zclllhhjj <[email protected]>
AuthorDate: Sat Aug 24 17:26:42 2024 +0800

    [branch-2.1] pick some prs (#39860)
    
    ## Proposed changes
    
    Issue Number: close #xxx
    
    https://github.com/apache/doris/pull/38385 optimize parsing datetime
    https://github.com/apache/doris/pull/38978 make stream load failure
    message more clear and disable some error's stacktrace by default
    https://github.com/apache/doris/pull/39255 fix random function coredump
    https://github.com/apache/doris/pull/39324 fix function corr
    inconsistency with doc
    https://github.com/apache/doris/pull/39449 check auto partitoin nullity
    when creating partition
    https://github.com/apache/doris/pull/39695 make
    DynamicPartitionScheduler immediately know interval's change
    https://github.com/apache/doris/pull/39754 Add some partition expr check
    on creating table
---
 be/src/common/status.h                             |  44 ++++---
 be/src/http/action/http_stream.cpp                 |  23 ++--
 be/src/http/action/stream_load.cpp                 |  51 ++++----
 .../aggregate_function_corr.cpp                    |   3 +-
 be/src/vec/functions/function_cast.h               |  81 +++++-------
 be/src/vec/functions/random.cpp                    |  11 +-
 be/src/vec/runtime/vdatetime_value.cpp             |  29 +++--
 .../java/org/apache/doris/common/ConfigBase.java   |   4 +
 .../main/java/org/apache/doris/catalog/Env.java    |  25 +++-
 .../apache/doris/datasource/InternalCatalog.java   | 138 ++++++++++++++++-----
 .../apache/doris/httpv2/rest/SetConfigAction.java  |   3 +-
 .../trees/expressions/functions/scalar/Random.java |  11 +-
 .../trees/plans/commands/CreateTableCommand.java   |   7 +-
 .../org/apache/doris/catalog/CreateTableTest.java  |  29 +++--
 .../datasource/hive/HiveDDLAndDMLPlanTest.java     |   2 +-
 .../trees/plans/CreateTableCommandTest.java        |  49 ++++----
 .../nereids_function_p0/agg_function/test_corr.out |  18 +++
 .../load_p0/stream_load/test_stream_load.groovy    |   4 +-
 .../agg_function/test_corr.groovy                  |   7 ++
 .../suites/nereids_p0/system/test_query_sys.groovy |  21 ++++
 .../auto_partition/test_auto_list_partition.groovy |  16 +++
 .../test_auto_range_partition.groovy               |  75 +++++++++++
 .../suites/partition_p0/test_null_partition.groovy |  84 +++++++++++++
 23 files changed, 536 insertions(+), 199 deletions(-)

diff --git a/be/src/common/status.h b/be/src/common/status.h
index 8847bb7c087..b2754bc3c57 100644
--- a/be/src/common/status.h
+++ b/be/src/common/status.h
@@ -306,8 +306,8 @@ extern ErrorCodeState 
error_states[MAX_ERROR_CODE_DEFINE_NUM];
 class ErrorCodeInitializer {
 public:
     ErrorCodeInitializer(int temp) : signal_value(temp) {
-        for (int i = 0; i < MAX_ERROR_CODE_DEFINE_NUM; ++i) {
-            error_states[i].error_code = 0;
+        for (auto& error_state : error_states) {
+            error_state.error_code = 0;
         }
 #define M(NAME, ENABLESTACKTRACE)                                  \
     error_states[TStatusCode::NAME].stacktrace = ENABLESTACKTRACE; \
@@ -330,7 +330,7 @@ public:
 #undef M
     }
 
-    void check_init() {
+    void check_init() const {
         //the signal value is 0, it means the global error states not inited, 
it's logical error
         // DO NOT use dcheck here, because dcheck depend on glog, and glog 
maybe not inited at this time.
         if (signal_value == 0) {
@@ -435,41 +435,49 @@ public:
         return status;
     }
 
-    static Status OK() { return Status(); }
+    static Status OK() { return {}; }
 
+// default have stacktrace. could disable manually.
 #define ERROR_CTOR(name, code)                                                 
      \
     template <bool stacktrace = true, typename... Args>                        
      \
     static Status name(std::string_view msg, Args&&... args) {                 
      \
         return Error<ErrorCode::code, stacktrace>(msg, 
std::forward<Args>(args)...); \
     }
 
+// default have no stacktrace. could enable manually.
+#define ERROR_CTOR_NOSTACK(name, code)                                         
      \
+    template <bool stacktrace = false, typename... Args>                       
      \
+    static Status name(std::string_view msg, Args&&... args) {                 
      \
+        return Error<ErrorCode::code, stacktrace>(msg, 
std::forward<Args>(args)...); \
+    }
+
     ERROR_CTOR(PublishTimeout, PUBLISH_TIMEOUT)
     ERROR_CTOR(MemoryAllocFailed, MEM_ALLOC_FAILED)
     ERROR_CTOR(BufferAllocFailed, BUFFER_ALLOCATION_FAILED)
-    ERROR_CTOR(InvalidArgument, INVALID_ARGUMENT)
-    ERROR_CTOR(InvalidJsonPath, INVALID_JSON_PATH)
+    ERROR_CTOR_NOSTACK(InvalidArgument, INVALID_ARGUMENT)
+    ERROR_CTOR_NOSTACK(InvalidJsonPath, INVALID_JSON_PATH)
     ERROR_CTOR(MinimumReservationUnavailable, MINIMUM_RESERVATION_UNAVAILABLE)
     ERROR_CTOR(Corruption, CORRUPTION)
     ERROR_CTOR(IOError, IO_ERROR)
     ERROR_CTOR(NotFound, NOT_FOUND)
-    ERROR_CTOR(AlreadyExist, ALREADY_EXIST)
+    ERROR_CTOR_NOSTACK(AlreadyExist, ALREADY_EXIST)
     ERROR_CTOR(NotSupported, NOT_IMPLEMENTED_ERROR)
-    ERROR_CTOR(EndOfFile, END_OF_FILE)
+    ERROR_CTOR_NOSTACK(EndOfFile, END_OF_FILE)
     ERROR_CTOR(InternalError, INTERNAL_ERROR)
-    ERROR_CTOR(WaitForRf, PIP_WAIT_FOR_RF)
-    ERROR_CTOR(WaitForScannerContext, PIP_WAIT_FOR_SC)
+    ERROR_CTOR_NOSTACK(WaitForRf, PIP_WAIT_FOR_RF)
+    ERROR_CTOR_NOSTACK(WaitForScannerContext, PIP_WAIT_FOR_SC)
     ERROR_CTOR(RuntimeError, RUNTIME_ERROR)
-    ERROR_CTOR(Cancelled, CANCELLED)
+    ERROR_CTOR_NOSTACK(Cancelled, CANCELLED)
     ERROR_CTOR(MemoryLimitExceeded, MEM_LIMIT_EXCEEDED)
     ERROR_CTOR(RpcError, THRIFT_RPC_ERROR)
     ERROR_CTOR(TimedOut, TIMEOUT)
-    ERROR_CTOR(TooManyTasks, TOO_MANY_TASKS)
+    ERROR_CTOR_NOSTACK(TooManyTasks, TOO_MANY_TASKS)
     ERROR_CTOR(Uninitialized, UNINITIALIZED)
     ERROR_CTOR(Aborted, ABORTED)
-    ERROR_CTOR(DataQualityError, DATA_QUALITY_ERROR)
-    ERROR_CTOR(NotAuthorized, NOT_AUTHORIZED)
+    ERROR_CTOR_NOSTACK(DataQualityError, DATA_QUALITY_ERROR)
+    ERROR_CTOR_NOSTACK(NotAuthorized, NOT_AUTHORIZED)
     ERROR_CTOR(HttpError, HTTP_ERROR)
-    ERROR_CTOR(NeedSendAgain, NEED_SEND_AGAIN)
+    ERROR_CTOR_NOSTACK(NeedSendAgain, NEED_SEND_AGAIN)
     ERROR_CTOR(CgroupError, CGROUP_ERROR)
 #undef ERROR_CTOR
 
@@ -577,13 +585,13 @@ public:
         return error_st_;
     }
 
+    AtomicStatus(const AtomicStatus&) = delete;
+    void operator=(const AtomicStatus&) = delete;
+
 private:
     std::atomic_int16_t error_code_ = 0;
     Status error_st_;
     mutable std::mutex mutex_;
-
-    AtomicStatus(const AtomicStatus&) = delete;
-    void operator=(const AtomicStatus&) = delete;
 };
 
 inline std::ostream& operator<<(std::ostream& ostr, const Status& status) {
diff --git a/be/src/http/action/http_stream.cpp 
b/be/src/http/action/http_stream.cpp
index b07166a3717..7dd85653002 100644
--- a/be/src/http/action/http_stream.cpp
+++ b/be/src/http/action/http_stream.cpp
@@ -18,9 +18,7 @@
 #include "http/action/http_stream.h"
 
 #include <cstddef>
-#include <deque>
 #include <future>
-#include <shared_mutex>
 #include <sstream>
 
 // use string iequal
@@ -31,7 +29,6 @@
 #include <thrift/protocol/TDebugProtocol.h>
 
 #include "common/config.h"
-#include "common/consts.h"
 #include "common/logging.h"
 #include "common/status.h"
 #include "common/utils.h"
@@ -42,7 +39,6 @@
 #include "http/http_common.h"
 #include "http/http_headers.h"
 #include "http/http_request.h"
-#include "http/http_response.h"
 #include "http/utils.h"
 #include "io/fs/stream_load_pipe.h"
 #include "olap/storage_engine.h"
@@ -57,9 +53,7 @@
 #include "runtime/stream_load/stream_load_executor.h"
 #include "runtime/stream_load/stream_load_recorder.h"
 #include "util/byte_buffer.h"
-#include "util/debug_util.h"
 #include "util/doris_metrics.h"
-#include "util/load_util.h"
 #include "util/metrics.h"
 #include "util/string_util.h"
 #include "util/thrift_rpc_helper.h"
@@ -132,7 +126,7 @@ Status HttpStreamAction::_handle(HttpRequest* http_req, 
std::shared_ptr<StreamLo
     if (ctx->body_bytes > 0 && ctx->receive_bytes != ctx->body_bytes) {
         LOG(WARNING) << "recevie body don't equal with body bytes, 
body_bytes=" << ctx->body_bytes
                      << ", receive_bytes=" << ctx->receive_bytes << ", id=" << 
ctx->id;
-        return Status::InternalError("receive body don't equal with body 
bytes");
+        return Status::Error<ErrorCode::NETWORK_ERROR>("receive body don't 
equal with body bytes");
     }
     RETURN_IF_ERROR(ctx->body_sink->finish());
 
@@ -195,7 +189,7 @@ Status HttpStreamAction::_on_header(HttpRequest* http_req, 
std::shared_ptr<Strea
     // auth information
     if (!parse_basic_auth(*http_req, &ctx->auth)) {
         LOG(WARNING) << "parse basic authorization failed." << ctx->brief();
-        return Status::InternalError("no valid Basic authorization");
+        return Status::NotAuthorized("no valid Basic authorization");
     }
 
     // TODO(zs) : need Need to request an FE to obtain information such as 
format
@@ -207,8 +201,10 @@ Status HttpStreamAction::_on_header(HttpRequest* http_req, 
std::shared_ptr<Strea
         // csv max body size
         if (ctx->body_bytes > csv_max_body_bytes) {
             LOG(WARNING) << "body exceed max size." << ctx->brief();
-            return Status::InternalError("body exceed max size: {}, data: {}", 
csv_max_body_bytes,
-                                         ctx->body_bytes);
+            return Status::Error<ErrorCode::EXCEEDED_LIMIT>(
+                    "body size {} exceed BE's conf `streaming_load_max_mb` {}. 
increase it if you "
+                    "are sure this load is reasonable",
+                    ctx->body_bytes, csv_max_body_bytes);
         }
     }
 
@@ -380,7 +376,8 @@ Status HttpStreamAction::_handle_group_commit(HttpRequest* 
req,
     std::string group_commit_mode = req->header(HTTP_GROUP_COMMIT);
     if (!group_commit_mode.empty() && !iequal(group_commit_mode, "sync_mode") 
&&
         !iequal(group_commit_mode, "async_mode") && !iequal(group_commit_mode, 
"off_mode")) {
-        return Status::InternalError("group_commit can only be [async_mode, 
sync_mode, off_mode]");
+        return Status::InvalidArgument(
+                "group_commit can only be [async_mode, sync_mode, off_mode]");
     }
     if (config::wait_internal_group_commit_finish) {
         group_commit_mode = "sync_mode";
@@ -393,7 +390,7 @@ Status HttpStreamAction::_handle_group_commit(HttpRequest* 
req,
         ss << "This http load content length <0 (" << content_length
            << "), please check your content length.";
         LOG(WARNING) << ss.str();
-        return Status::InternalError(ss.str());
+        return Status::InvalidArgument(ss.str());
     }
     // allow chunked stream load in flink
     auto is_chunk =
@@ -415,7 +412,7 @@ Status HttpStreamAction::_handle_group_commit(HttpRequest* 
req,
     auto partitions = !req->header(HTTP_PARTITIONS).empty();
     if (!partial_columns && !partitions && !temp_partitions && 
!ctx->two_phase_commit) {
         if (!config::wait_internal_group_commit_finish && !ctx->label.empty()) 
{
-            return Status::InternalError("label and group_commit can't be set 
at the same time");
+            return Status::InvalidArgument("label and group_commit can't be 
set at the same time");
         }
         ctx->group_commit = true;
         if (iequal(group_commit_mode, "async_mode")) {
diff --git a/be/src/http/action/stream_load.cpp 
b/be/src/http/action/stream_load.cpp
index feea93446c8..3f32655cf14 100644
--- a/be/src/http/action/stream_load.cpp
+++ b/be/src/http/action/stream_load.cpp
@@ -26,15 +26,13 @@
 #include <gen_cpp/PaloInternalService_types.h>
 #include <gen_cpp/PlanNodes_types.h>
 #include <gen_cpp/Types_types.h>
-#include <stdint.h>
-#include <stdlib.h>
 #include <sys/time.h>
 #include <thrift/protocol/TDebugProtocol.h>
-#include <time.h>
 
-#include <algorithm>
+#include <cstdint>
+#include <cstdlib>
+#include <ctime>
 #include <future>
-#include <map>
 #include <sstream>
 #include <stdexcept>
 #include <utility>
@@ -120,7 +118,7 @@ void StreamLoadAction::handle(HttpRequest* req) {
             _exec_env->stream_load_executor()->rollback_txn(ctx.get());
             ctx->need_rollback = false;
         }
-        if (ctx->body_sink.get() != nullptr) {
+        if (ctx->body_sink != nullptr) {
             ctx->body_sink->cancel(ctx->status.to_string());
         }
     }
@@ -144,7 +142,7 @@ Status 
StreamLoadAction::_handle(std::shared_ptr<StreamLoadContext> ctx) {
     if (ctx->body_bytes > 0 && ctx->receive_bytes != ctx->body_bytes) {
         LOG(WARNING) << "recevie body don't equal with body bytes, 
body_bytes=" << ctx->body_bytes
                      << ", receive_bytes=" << ctx->receive_bytes << ", id=" << 
ctx->id;
-        return Status::InternalError<false>("receive body don't equal with 
body bytes");
+        return Status::Error<ErrorCode::NETWORK_ERROR>("receive body don't 
equal with body bytes");
     }
 
     // if we use non-streaming, MessageBodyFileSink.finish will close the file
@@ -208,7 +206,7 @@ int StreamLoadAction::on_header(HttpRequest* req) {
             _exec_env->stream_load_executor()->rollback_txn(ctx.get());
             ctx->need_rollback = false;
         }
-        if (ctx->body_sink.get() != nullptr) {
+        if (ctx->body_sink != nullptr) {
             ctx->body_sink->cancel(ctx->status.to_string());
         }
         auto str = ctx->to_json();
@@ -230,13 +228,13 @@ Status StreamLoadAction::_on_header(HttpRequest* 
http_req, std::shared_ptr<Strea
     // auth information
     if (!parse_basic_auth(*http_req, &ctx->auth)) {
         LOG(WARNING) << "parse basic authorization failed." << ctx->brief();
-        return Status::InternalError<false>("no valid Basic authorization");
+        return Status::NotAuthorized("no valid Basic authorization");
     }
 
     // get format of this put
     if (!http_req->header(HTTP_COMPRESS_TYPE).empty() &&
         iequal(http_req->header(HTTP_FORMAT_KEY), "JSON")) {
-        return Status::InternalError<false>("compress data of JSON format is 
not supported.");
+        return Status::NotSupported("compress data of JSON format is not 
supported.");
     }
     std::string format_str = http_req->header(HTTP_FORMAT_KEY);
     if (iequal(format_str, BeConsts::CSV_WITH_NAMES) ||
@@ -252,8 +250,8 @@ Status StreamLoadAction::_on_header(HttpRequest* http_req, 
std::shared_ptr<Strea
     LoadUtil::parse_format(format_str, http_req->header(HTTP_COMPRESS_TYPE), 
&ctx->format,
                            &ctx->compress_type);
     if (ctx->format == TFileFormatType::FORMAT_UNKNOWN) {
-        return Status::InternalError<false>("unknown data format, format={}",
-                                            http_req->header(HTTP_FORMAT_KEY));
+        return Status::Error<ErrorCode::DATA_FILE_TYPE_ERROR>("unknown data 
format, format={}",
+                                                              
http_req->header(HTTP_FORMAT_KEY));
     }
 
     // check content length
@@ -271,16 +269,18 @@ Status StreamLoadAction::_on_header(HttpRequest* 
http_req, std::shared_ptr<Strea
         // json max body size
         if ((ctx->format == TFileFormatType::FORMAT_JSON) &&
             (ctx->body_bytes > json_max_body_bytes) && !read_json_by_line) {
-            return Status::InternalError<false>(
-                    "The size of this batch exceed the max size [{}]  of json 
type data "
-                    " data [ {} ]. Split the file, or use 'read_json_by_line'",
-                    json_max_body_bytes, ctx->body_bytes);
+            return Status::Error<ErrorCode::EXCEEDED_LIMIT>(
+                    "json body size {} exceed BE's conf 
`streaming_load_json_max_mb` {}. increase "
+                    "it if you are sure this load is reasonable",
+                    ctx->body_bytes, json_max_body_bytes);
         }
         // csv max body size
         else if (ctx->body_bytes > csv_max_body_bytes) {
             LOG(WARNING) << "body exceed max size." << ctx->brief();
-            return Status::InternalError<false>("body exceed max size: {}, 
data: {}",
-                                                csv_max_body_bytes, 
ctx->body_bytes);
+            return Status::Error<ErrorCode::EXCEEDED_LIMIT>(
+                    "body size {} exceed BE's conf `streaming_load_max_mb` {}. 
increase it if you "
+                    "are sure this load is reasonable",
+                    ctx->body_bytes, csv_max_body_bytes);
         }
     } else {
 #ifndef BE_TEST
@@ -298,13 +298,13 @@ Status StreamLoadAction::_on_header(HttpRequest* 
http_req, std::shared_ptr<Strea
                   !ctx->is_chunked_transfer))) {
         LOG(WARNING) << "content_length is empty and 
transfer-encoding!=chunked, please set "
                         "content_length or transfer-encoding=chunked";
-        return Status::InternalError<false>(
+        return Status::InvalidArgument(
                 "content_length is empty and transfer-encoding!=chunked, 
please set content_length "
                 "or transfer-encoding=chunked");
     } else if (UNLIKELY(!http_req->header(HttpHeaders::CONTENT_LENGTH).empty() 
&&
                         ctx->is_chunked_transfer)) {
         LOG(WARNING) << "please do not set both content_length and 
transfer-encoding";
-        return Status::InternalError<false>(
+        return Status::InvalidArgument(
                 "please do not set both content_length and transfer-encoding");
     }
 
@@ -428,7 +428,7 @@ Status StreamLoadAction::_process_put(HttpRequest* http_req,
     if (!http_req->header(HTTP_LINE_DELIMITER).empty()) {
         request.__set_line_delimiter(http_req->header(HTTP_LINE_DELIMITER));
     }
-    if (!http_req->header(HTTP_ENCLOSE).empty() && 
http_req->header(HTTP_ENCLOSE).size() > 0) {
+    if (!http_req->header(HTTP_ENCLOSE).empty() && 
!http_req->header(HTTP_ENCLOSE).empty()) {
         const auto& enclose_str = http_req->header(HTTP_ENCLOSE);
         if (enclose_str.length() != 1) {
             return Status::InvalidArgument("enclose must be single-char, 
actually is {}",
@@ -436,7 +436,7 @@ Status StreamLoadAction::_process_put(HttpRequest* http_req,
         }
         request.__set_enclose(http_req->header(HTTP_ENCLOSE)[0]);
     }
-    if (!http_req->header(HTTP_ESCAPE).empty() && 
http_req->header(HTTP_ESCAPE).size() > 0) {
+    if (!http_req->header(HTTP_ESCAPE).empty() && 
!http_req->header(HTTP_ESCAPE).empty()) {
         const auto& escape_str = http_req->header(HTTP_ESCAPE);
         if (escape_str.length() != 1) {
             return Status::InvalidArgument("escape must be single-char, 
actually is {}",
@@ -717,7 +717,7 @@ Status StreamLoadAction::_handle_group_commit(HttpRequest* 
req,
     std::string group_commit_mode = req->header(HTTP_GROUP_COMMIT);
     if (!group_commit_mode.empty() && !iequal(group_commit_mode, "sync_mode") 
&&
         !iequal(group_commit_mode, "async_mode") && !iequal(group_commit_mode, 
"off_mode")) {
-        return Status::InternalError<false>(
+        return Status::InvalidArgument(
                 "group_commit can only be [async_mode, sync_mode, off_mode]");
     }
     if (config::wait_internal_group_commit_finish) {
@@ -731,7 +731,7 @@ Status StreamLoadAction::_handle_group_commit(HttpRequest* 
req,
         ss << "This stream load content length <0 (" << content_length
            << "), please check your content length.";
         LOG(WARNING) << ss.str();
-        return Status::InternalError<false>(ss.str());
+        return Status::InvalidArgument(ss.str());
     }
     // allow chunked stream load in flink
     auto is_chunk = !req->header(HttpHeaders::TRANSFER_ENCODING).empty() &&
@@ -752,8 +752,7 @@ Status StreamLoadAction::_handle_group_commit(HttpRequest* 
req,
     auto partitions = !req->header(HTTP_PARTITIONS).empty();
     if (!partial_columns && !partitions && !temp_partitions && 
!ctx->two_phase_commit) {
         if (!config::wait_internal_group_commit_finish && !ctx->label.empty()) 
{
-            return Status::InternalError<false>(
-                    "label and group_commit can't be set at the same time");
+            return Status::InvalidArgument("label and group_commit can't be 
set at the same time");
         }
         ctx->group_commit = true;
         if (iequal(group_commit_mode, "async_mode")) {
diff --git a/be/src/vec/aggregate_functions/aggregate_function_corr.cpp 
b/be/src/vec/aggregate_functions/aggregate_function_corr.cpp
index fb84e92e0e6..8237f588298 100644
--- a/be/src/vec/aggregate_functions/aggregate_function_corr.cpp
+++ b/be/src/vec/aggregate_functions/aggregate_function_corr.cpp
@@ -68,7 +68,8 @@ struct CorrMoment {
     }
 
     T get() const {
-        if ((m0 * x2 - x1 * x1) * (m0 * y2 - y1 * y1) == 0) [[unlikely]] {
+        // avoid float error(silent nan) when x or y is constant
+        if (m0 * x2 <= x1 * x1 || m0 * y2 <= y1 * y1) [[unlikely]] {
             return 0;
         }
         return (m0 * xy - x1 * y1) / sqrt((m0 * x2 - x1 * x1) * (m0 * y2 - y1 
* y1));
diff --git a/be/src/vec/functions/function_cast.h 
b/be/src/vec/functions/function_cast.h
index acb267b83e5..54f6a834a9e 100644
--- a/be/src/vec/functions/function_cast.h
+++ b/be/src/vec/functions/function_cast.h
@@ -1008,9 +1008,9 @@ struct NameToDateTime {
     static constexpr auto name = "toDateTime";
 };
 
-template <typename DataType, typename Additions = void*, typename FromDataType 
= void*>
+template <typename DataType, typename FromDataType = void*>
 bool try_parse_impl(typename DataType::FieldType& x, ReadBuffer& rb, 
FunctionContext* context,
-                    Additions additions [[maybe_unused]] = Additions()) {
+                    UInt32 scale [[maybe_unused]] = 0) {
     if constexpr (IsDateTimeType<DataType>) {
         return try_read_datetime_text(x, rb, context->state()->timezone_obj());
     }
@@ -1024,7 +1024,6 @@ bool try_parse_impl(typename DataType::FieldType& x, 
ReadBuffer& rb, FunctionCon
     }
 
     if constexpr (IsDateTimeV2Type<DataType>) {
-        UInt32 scale = additions;
         return try_read_datetime_v2_text(x, rb, 
context->state()->timezone_obj(), scale);
     }
 
@@ -1062,7 +1061,6 @@ bool try_parse_impl(typename DataType::FieldType& x, 
ReadBuffer& rb, FunctionCon
 
 template <typename DataType, typename Additions = void*>
 StringParser::ParseResult try_parse_decimal_impl(typename DataType::FieldType& 
x, ReadBuffer& rb,
-                                                 const cctz::time_zone& 
local_time_zone,
                                                  Additions additions
                                                  [[maybe_unused]] = 
Additions()) {
     if constexpr (IsDataTypeDecimalV2<DataType>) {
@@ -1491,15 +1489,9 @@ private:
     const char* name;
 };
 
-struct NameCast {
-    static constexpr auto name = "CAST";
-};
-
-template <typename FromDataType, typename ToDataType, typename Name>
-struct ConvertThroughParsing {
-    static_assert(std::is_same_v<FromDataType, DataTypeString>,
-                  "ConvertThroughParsing is only applicable for String or 
FixedString data types");
-
+// always from DataTypeString
+template <typename ToDataType, typename Name>
+struct StringParsing {
     using ToFieldType = typename ToDataType::FieldType;
 
     static bool is_all_read(ReadBuffer& in) { return in.eof(); }
@@ -1512,48 +1504,38 @@ struct ConvertThroughParsing {
                                             ColumnDecimal<ToFieldType>, 
ColumnVector<ToFieldType>>;
 
         const IColumn* col_from = 
block.get_by_position(arguments[0]).column.get();
-        const ColumnString* col_from_string = 
check_and_get_column<ColumnString>(col_from);
+        const auto* col_from_string = 
check_and_get_column<ColumnString>(col_from);
 
-        if (std::is_same_v<FromDataType, DataTypeString> && !col_from_string) {
+        if (!col_from_string) {
             return Status::RuntimeError("Illegal column {} of first argument 
of function {}",
                                         col_from->get_name(), Name::name);
         }
 
-        size_t size = input_rows_count;
+        size_t row = input_rows_count;
         typename ColVecTo::MutablePtr col_to = nullptr;
 
         if constexpr (IsDataTypeDecimal<ToDataType>) {
             UInt32 scale = ((PrecisionScaleArg)additions).scale;
             ToDataType::check_type_scale(scale);
-            col_to = ColVecTo::create(size, scale);
+            col_to = ColVecTo::create(row, scale);
         } else {
-            col_to = ColVecTo::create(size);
+            col_to = ColVecTo::create(row);
         }
 
         typename ColVecTo::Container& vec_to = col_to->get_data();
 
         ColumnUInt8::MutablePtr col_null_map_to;
         ColumnUInt8::Container* vec_null_map_to [[maybe_unused]] = nullptr;
-        col_null_map_to = ColumnUInt8::create(size);
+        col_null_map_to = ColumnUInt8::create(row);
         vec_null_map_to = &col_null_map_to->get_data();
 
-        const ColumnString::Chars* chars = nullptr;
-        const IColumn::Offsets* offsets = nullptr;
-        size_t fixed_string_size = 0;
-
-        if constexpr (std::is_same_v<FromDataType, DataTypeString>) {
-            chars = &col_from_string->get_chars();
-            offsets = &col_from_string->get_offsets();
-        }
+        const ColumnString::Chars* chars = &col_from_string->get_chars();
+        const IColumn::Offsets* offsets = &col_from_string->get_offsets();
 
         size_t current_offset = 0;
-        for (size_t i = 0; i < size; ++i) {
-            size_t next_offset = std::is_same_v<FromDataType, DataTypeString>
-                                         ? (*offsets)[i]
-                                         : (current_offset + 
fixed_string_size);
-            size_t string_size = std::is_same_v<FromDataType, DataTypeString>
-                                         ? next_offset - current_offset
-                                         : fixed_string_size;
+        for (size_t i = 0; i < row; ++i) {
+            size_t next_offset = (*offsets)[i];
+            size_t string_size = next_offset - current_offset;
 
             ReadBuffer read_buffer(&(*chars)[current_offset], string_size);
 
@@ -1561,8 +1543,7 @@ struct ConvertThroughParsing {
             if constexpr (IsDataTypeDecimal<ToDataType>) {
                 
ToDataType::check_type_precision((PrecisionScaleArg(additions).precision));
                 StringParser::ParseResult res = 
try_parse_decimal_impl<ToDataType>(
-                        vec_to[i], read_buffer, 
context->state()->timezone_obj(),
-                        PrecisionScaleArg(additions));
+                        vec_to[i], read_buffer, PrecisionScaleArg(additions));
                 parsed = (res == StringParser::PARSE_SUCCESS ||
                           res == StringParser::PARSE_OVERFLOW ||
                           res == StringParser::PARSE_UNDERFLOW);
@@ -1572,8 +1553,8 @@ struct ConvertThroughParsing {
                 parsed = try_parse_impl<ToDataType>(vec_to[i], read_buffer, 
context,
                                                     type->get_scale());
             } else {
-                parsed = try_parse_impl<ToDataType, void*, 
FromDataType>(vec_to[i], read_buffer,
-                                                                         
context);
+                parsed =
+                        try_parse_impl<ToDataType, DataTypeString>(vec_to[i], 
read_buffer, context);
             }
             (*vec_null_map_to)[i] = !parsed || !is_all_read(read_buffer);
             current_offset = next_offset;
@@ -1587,25 +1568,27 @@ struct ConvertThroughParsing {
 
 template <typename Name>
 struct ConvertImpl<DataTypeString, DataTypeDecimal<Decimal32>, Name>
-        : ConvertThroughParsing<DataTypeString, DataTypeDecimal<Decimal32>, 
Name> {};
+        : StringParsing<DataTypeDecimal<Decimal32>, Name> {};
 template <typename Name>
 struct ConvertImpl<DataTypeString, DataTypeDecimal<Decimal64>, Name>
-        : ConvertThroughParsing<DataTypeString, DataTypeDecimal<Decimal64>, 
Name> {};
+        : StringParsing<DataTypeDecimal<Decimal64>, Name> {};
 template <typename Name>
 struct ConvertImpl<DataTypeString, DataTypeDecimal<Decimal128V2>, Name>
-        : ConvertThroughParsing<DataTypeString, DataTypeDecimal<Decimal128V2>, 
Name> {};
+        : StringParsing<DataTypeDecimal<Decimal128V2>, Name> {};
 template <typename Name>
 struct ConvertImpl<DataTypeString, DataTypeDecimal<Decimal128V3>, Name>
-        : ConvertThroughParsing<DataTypeString, DataTypeDecimal<Decimal128V3>, 
Name> {};
+        : StringParsing<DataTypeDecimal<Decimal128V3>, Name> {};
 template <typename Name>
 struct ConvertImpl<DataTypeString, DataTypeDecimal<Decimal256>, Name>
-        : ConvertThroughParsing<DataTypeString, DataTypeDecimal<Decimal256>, 
Name> {};
+        : StringParsing<DataTypeDecimal<Decimal256>, Name> {};
 template <typename Name>
-struct ConvertImpl<DataTypeString, DataTypeIPv4, Name>
-        : ConvertThroughParsing<DataTypeString, DataTypeIPv4, Name> {};
+struct ConvertImpl<DataTypeString, DataTypeIPv4, Name> : 
StringParsing<DataTypeIPv4, Name> {};
 template <typename Name>
-struct ConvertImpl<DataTypeString, DataTypeIPv6, Name>
-        : ConvertThroughParsing<DataTypeString, DataTypeIPv6, Name> {};
+struct ConvertImpl<DataTypeString, DataTypeIPv6, Name> : 
StringParsing<DataTypeIPv6, Name> {};
+
+struct NameCast {
+    static constexpr auto name = "CAST";
+};
 
 template <typename ToDataType, typename Name>
 class FunctionConvertFromString : public IFunction {
@@ -1638,8 +1621,8 @@ public:
         const IDataType* from_type = 
block.get_by_position(arguments[0]).type.get();
 
         if (check_and_get_data_type<DataTypeString>(from_type)) {
-            return ConvertThroughParsing<DataTypeString, ToDataType, 
Name>::execute(
-                    context, block, arguments, result, input_rows_count);
+            return StringParsing<ToDataType, Name>::execute(context, block, 
arguments, result,
+                                                            input_rows_count);
         }
 
         return Status::RuntimeError(
diff --git a/be/src/vec/functions/random.cpp b/be/src/vec/functions/random.cpp
index 564a51d932c..d1c3bab66c8 100644
--- a/be/src/vec/functions/random.cpp
+++ b/be/src/vec/functions/random.cpp
@@ -70,15 +70,19 @@ public:
                 // This is a call to RandSeed, initialize the seed
                 // TODO: should we support non-constant seed?
                 if (!context->is_col_constant(0)) {
-                    return Status::InvalidArgument("Seed argument to rand() 
must be constant.");
+                    return Status::InvalidArgument("The param of rand function 
must be literal");
                 }
                 uint32_t seed = 0;
                 if (!context->get_constant_col(0)->column_ptr->is_null_at(0)) {
                     seed = context->get_constant_col(0)->column_ptr->get64(0);
                 }
                 generator->seed(seed);
-            } else {
-                // 0 or 2 args
+            } else if (context->get_num_args() == 2) {
+                if (!context->is_col_constant(0) || 
!context->is_col_constant(1)) {
+                    return Status::InvalidArgument("The param of rand function 
must be literal");
+                }
+                generator->seed(std::random_device()());
+            } else { // zero args
                 generator->seed(std::random_device()());
             }
         }
@@ -109,6 +113,7 @@ private:
                 context->get_function_state(FunctionContext::THREAD_LOCAL));
         DCHECK(generator != nullptr);
 
+        // checked in open()
         Int64 min = assert_cast<const ColumnInt64*>(
                             assert_cast<const ColumnConst*>(
                                     
block.get_by_position(arguments[0]).column.get())
diff --git a/be/src/vec/runtime/vdatetime_value.cpp 
b/be/src/vec/runtime/vdatetime_value.cpp
index b82f706e2eb..2e9efcd55a7 100644
--- a/be/src/vec/runtime/vdatetime_value.cpp
+++ b/be/src/vec/runtime/vdatetime_value.cpp
@@ -55,6 +55,15 @@ uint8_t mysql_week_mode(uint32_t mode) {
     return mode;
 }
 
+static bool check_space(char ch) {
+    // \t, \n, \v, \f, \r are 9~13, respectively.
+    return UNLIKELY(ch == ' ' || (ch >= 9 && ch <= 13));
+}
+
+static bool check_date_punct(char ch) {
+    return UNLIKELY(!(isdigit(ch) || isalpha(ch)));
+}
+
 static bool time_zone_begins(const char* ptr, const char* end) {
     return *ptr == '+' || (*ptr == '-' && ptr + 3 < end && *(ptr + 3) == ':') 
||
            (isalpha(*ptr) && *ptr != 'T');
@@ -102,7 +111,7 @@ bool VecDateTimeValue::from_date_str_base(const char* 
date_str, int len,
 
     _neg = false;
     // Skip space character
-    while (ptr < end && isspace(*ptr)) {
+    while (ptr < end && check_space(*ptr)) {
         ptr++;
     }
     if (ptr == end || !isdigit(*ptr)) {
@@ -200,8 +209,8 @@ bool VecDateTimeValue::from_date_str_base(const char* 
date_str, int len,
             continue;
         }
         // escape separator
-        while (ptr < end && (ispunct(*ptr) || isspace(*ptr))) {
-            if (isspace(*ptr)) {
+        while (ptr < end && (check_date_punct(*ptr) || check_space(*ptr))) {
+            if (check_space(*ptr)) {
                 if (((1 << field_idx) & allow_space_mask) == 0) {
                     return false;
                 }
@@ -1233,7 +1242,7 @@ bool VecDateTimeValue::from_date_format_str(const char* 
format, int format_len,
     auto [year, month, day, hour, minute, second] = std::tuple {0, 0, 0, 0, 0, 
0};
     while (ptr < end && val < val_end) {
         // Skip space character
-        while (val < val_end && isspace(*val)) {
+        while (val < val_end && check_space(*val)) {
             val++;
         }
         if (val >= val_end) {
@@ -1498,7 +1507,7 @@ bool VecDateTimeValue::from_date_format_str(const char* 
format, int format_len,
             default:
                 return false;
             }
-        } else if (!isspace(*ptr)) {
+        } else if (!check_space(*ptr)) {
             if (*ptr != *val) {
                 return false;
             }
@@ -1983,13 +1992,13 @@ bool DateV2Value<T>::from_date_str(const char* 
date_str, int len, int scale /* =
                                    bool convert_zero) {
     return from_date_str_base(date_str, len, scale, nullptr, convert_zero);
 }
-// when we parse
 template <typename T>
 bool DateV2Value<T>::from_date_str(const char* date_str, int len,
                                    const cctz::time_zone& local_time_zone, int 
scale /* = -1*/,
                                    bool convert_zero) {
     return from_date_str_base(date_str, len, scale, &local_time_zone, 
convert_zero);
 }
+// if local_time_zone is null, only be able to parse time without timezone
 template <typename T>
 bool DateV2Value<T>::from_date_str_base(const char* date_str, int len, int 
scale,
                                         const cctz::time_zone* 
local_time_zone, bool convert_zero) {
@@ -2001,7 +2010,7 @@ bool DateV2Value<T>::from_date_str_base(const char* 
date_str, int len, int scale
     int32_t date_len[MAX_DATE_PARTS] = {0};
 
     // Skip space character
-    while (ptr < end && isspace(*ptr)) {
+    while (ptr < end && check_space(*ptr)) {
         ptr++;
     }
     if (ptr == end || !isdigit(*ptr)) {
@@ -2149,8 +2158,8 @@ bool DateV2Value<T>::from_date_str_base(const char* 
date_str, int len, int scale
             continue;
         }
         // escape separator
-        while (ptr < end && (ispunct(*ptr) || isspace(*ptr))) {
-            if (isspace(*ptr)) {
+        while (ptr < end && (check_date_punct(*ptr) || check_space(*ptr))) {
+            if (check_space(*ptr)) {
                 if (((1 << field_idx) & allow_space_mask) == 0) {
                     return false;
                 }
@@ -2282,7 +2291,7 @@ bool DateV2Value<T>::from_date_format_str(const char* 
format, int format_len, co
     auto [year, month, day, hour, minute, second, microsecond] = std::tuple 
{0, 0, 0, 0, 0, 0, 0};
     while (ptr < end && val < val_end) {
         // Skip space character
-        while (val < val_end && isspace(*val)) {
+        while (val < val_end && check_space(*val)) {
             val++;
         }
         if (val >= val_end) {
diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/ConfigBase.java 
b/fe/fe-common/src/main/java/org/apache/doris/common/ConfigBase.java
index 3306f314f7d..da828552f4d 100644
--- a/fe/fe-common/src/main/java/org/apache/doris/common/ConfigBase.java
+++ b/fe/fe-common/src/main/java/org/apache/doris/common/ConfigBase.java
@@ -123,6 +123,10 @@ public class ConfigBase {
         }
     }
 
+    public static Field getField(String name) {
+        return confFields.get(name);
+    }
+
     public void initCustom(String customConfFile) throws Exception {
         this.customConfFile = customConfFile;
         File file = new File(customConfFile);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java
index 36590775a52..3f39ca4867f 100755
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java
@@ -291,6 +291,7 @@ import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Multimap;
@@ -328,6 +329,7 @@ import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Supplier;
 import java.util.stream.Collectors;
 
 /**
@@ -553,6 +555,10 @@ public class Env {
 
     private final SplitSourceManager splitSourceManager;
 
+    // if a config is relative to a daemon thread. record the relation here. 
we will proactively change interval of it.
+    private final Map<String, Supplier<MasterDaemon>> configtoThreads = 
ImmutableMap
+            .of("dynamic_partition_check_interval_seconds", 
this::getDynamicPartitionScheduler);
+
     public List<TFrontendInfo> getFrontendInfos() {
         List<TFrontendInfo> res = new ArrayList<>();
 
@@ -5538,13 +5544,30 @@ public class Env {
         globalFunctionMgr.replayDropFunction(functionSearchDesc);
     }
 
+    /**
+     * we can't set callback which is in fe-core to config items which are in 
fe-common. so wrap them here. it's not so
+     * good but is best for us now.
+     */
+    public void setMutableConfigwithCallback(String key, String value) throws 
ConfigException {
+        ConfigBase.setMutableConfig(key, value);
+        if (configtoThreads.get(key) != null) {
+            try {
+                
configtoThreads.get(key).get().setInterval(Config.getField(key).getLong(null) * 
1000L);
+                configtoThreads.get(key).get().interrupt();
+                LOG.info("set config " + key + " to " + value);
+            } catch (IllegalAccessException e) {
+                LOG.warn("set config " + key + " failed: " + e.getMessage());
+            }
+        }
+    }
+
     public void setConfig(AdminSetConfigStmt stmt) throws Exception {
         Map<String, String> configs = stmt.getConfigs();
         Preconditions.checkState(configs.size() == 1);
 
         for (Map.Entry<String, String> entry : configs.entrySet()) {
             try {
-                ConfigBase.setMutableConfig(entry.getKey(), entry.getValue());
+                setMutableConfigwithCallback(entry.getKey(), entry.getValue());
             } catch (ConfigException e) {
                 throw new DdlException(e.getMessage());
             }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
index 296c787a54b..732776832a2 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java
@@ -44,6 +44,8 @@ import org.apache.doris.analysis.KeysDesc;
 import org.apache.doris.analysis.LiteralExpr;
 import org.apache.doris.analysis.PartitionDesc;
 import org.apache.doris.analysis.PartitionKeyDesc;
+import org.apache.doris.analysis.PartitionKeyDesc.PartitionKeyValueType;
+import org.apache.doris.analysis.PartitionValue;
 import org.apache.doris.analysis.QueryStmt;
 import org.apache.doris.analysis.RecoverDbStmt;
 import org.apache.doris.analysis.RecoverPartitionStmt;
@@ -2117,6 +2119,108 @@ public class InternalCatalog implements 
CatalogIf<Database> {
         throw new AnalysisException("Cannot find column `" + name + "` in 
table's columns");
     }
 
+    private boolean findAllowNullforSlotRef(List<Column> baseSchema, SlotRef 
slot) throws AnalysisException {
+        for (Column col : baseSchema) {
+            if (col.nameEquals(slot.getColumnName(), true)) {
+                return col.isAllowNull();
+            }
+        }
+        throw new AnalysisException("Unknown partition column name:" + 
slot.getColumnName());
+    }
+
+    private void checkNullityEqual(ArrayList<Boolean> partitionSlotNullables, 
List<PartitionValue> item)
+            throws AnalysisException {
+        // for MAX_VALUE or somethings
+        if (item == null) {
+            return;
+        }
+        for (int i = 0; i < item.size(); i++) {
+            try {
+                if (!partitionSlotNullables.get(i) && 
item.get(i).isNullPartition()) {
+                    throw new AnalysisException("Can't have null partition is 
for NOT NULL partition "
+                            + "column in partition expr's index " + i);
+                }
+            } catch (IndexOutOfBoundsException e) {
+                throw new AnalysisException("partition item's size out of 
partition columns: " + e.getMessage());
+            }
+        }
+    }
+
+    private void checkPartitionNullity(List<Column> baseSchema, PartitionDesc 
partitionDesc,
+            SinglePartitionDesc partition)
+            throws AnalysisException {
+        // in creating OlapTable, expr.desc is null. so we should find the 
column ourself.
+        ArrayList<Expr> partitionExprs = partitionDesc.getPartitionExprs();
+        ArrayList<Boolean> partitionSlotNullables = new ArrayList<Boolean>();
+        for (Expr expr : partitionExprs) {
+            if (expr instanceof SlotRef) {
+                partitionSlotNullables.add(findAllowNullforSlotRef(baseSchema, 
(SlotRef) expr));
+            } else if (expr instanceof FunctionCallExpr) {
+                partitionSlotNullables.add(Expr.isNullable(((FunctionCallExpr) 
expr).getFn(), expr.getChildren()));
+            } else {
+                throw new AnalysisException("Unknown partition expr type:" + 
expr.getExprName());
+            }
+        }
+
+        if (partition.getPartitionKeyDesc().getPartitionType() == 
PartitionKeyValueType.IN) {
+            List<List<PartitionValue>> inValues = 
partition.getPartitionKeyDesc().getInValues();
+            for (List<PartitionValue> item : inValues) {
+                checkNullityEqual(partitionSlotNullables, item);
+            }
+        } else if (partition.getPartitionKeyDesc().getPartitionType() == 
PartitionKeyValueType.LESS_THAN) {
+            // only upper
+            List<PartitionValue> upperValues = 
partition.getPartitionKeyDesc().getUpperValues();
+            checkNullityEqual(partitionSlotNullables, upperValues);
+        } else {
+            // fixed. upper and lower
+            List<PartitionValue> lowerValues = 
partition.getPartitionKeyDesc().getLowerValues();
+            List<PartitionValue> upperValues = 
partition.getPartitionKeyDesc().getUpperValues();
+            checkNullityEqual(partitionSlotNullables, lowerValues);
+            checkNullityEqual(partitionSlotNullables, upperValues);
+        }
+    }
+
+    private void checkLegalityofPartitionExprs(CreateTableStmt stmt, 
PartitionDesc partitionDesc)
+            throws AnalysisException {
+        for (Expr expr : partitionDesc.getPartitionExprs()) {
+            if (expr instanceof FunctionCallExpr) { // test them
+                if (!partitionDesc.isAutoCreatePartitions() || 
partitionDesc.getType() != PartitionType.RANGE) {
+                    throw new AnalysisException("only Auto Range Partition 
support FunctionCallExpr");
+                }
+
+                FunctionCallExpr func = (FunctionCallExpr) expr;
+                ArrayList<Expr> children = func.getChildren();
+                Type[] childTypes = new Type[children.size()];
+                for (int i = 0; i < children.size(); i++) {
+                    if (children.get(i) instanceof LiteralExpr) {
+                        childTypes[i] = children.get(i).getType();
+                    } else if (children.get(i) instanceof SlotRef) {
+                        childTypes[i] = 
getChildTypeByName(children.get(i).getExprName(), stmt);
+                    } else {
+                        throw new AnalysisException(String.format(
+                                "partition expr %s has unrecognized parameter 
in slot %d", func.getExprName(), i));
+                    }
+                }
+                Function fn = null;
+                try {
+                    fn = 
func.getBuiltinFunction(func.getFnName().getFunction(), childTypes,
+                            Function.CompareMode.IS_INDISTINGUISHABLE); // 
only for test
+                } catch (Exception e) {
+                    throw new AnalysisException("partition expr " + 
func.getExprName() + " is illegal!");
+                }
+                if (fn == null) {
+                    throw new AnalysisException("partition expr " + 
func.getExprName() + " is illegal!");
+                }
+            } else if (expr instanceof SlotRef) {
+                if (partitionDesc.isAutoCreatePartitions() && 
partitionDesc.getType() == PartitionType.RANGE) {
+                    throw new AnalysisException("Auto Range Partition need 
FunctionCallExpr");
+                }
+            } else {
+                throw new AnalysisException("partition expr " + 
expr.getExprName() + " is illegal!");
+            }
+        }
+    }
+
     // Create olap table and related base index synchronously.
     private boolean createOlapTable(Database db, CreateTableStmt stmt) throws 
UserException {
         String tableName = stmt.getTableName();
@@ -2162,43 +2266,21 @@ public class InternalCatalog implements 
CatalogIf<Database> {
         // create partition info
         PartitionDesc partitionDesc = stmt.getPartitionDesc();
 
-        // check legality of partiton exprs
         ConnectContext ctx = ConnectContext.get();
         Env env = Env.getCurrentEnv();
+
+        // check legality of partiton exprs.
         if (ctx != null && env != null && partitionDesc != null && 
partitionDesc.getPartitionExprs() != null) {
-            for (Expr expr : partitionDesc.getPartitionExprs()) {
-                if (expr != null && expr instanceof FunctionCallExpr) { // 
test them
-                    FunctionCallExpr func = (FunctionCallExpr) expr;
-                    ArrayList<Expr> children = func.getChildren();
-                    Type[] childTypes = new Type[children.size()];
-                    for (int i = 0; i < children.size(); i++) {
-                        if (children.get(i) instanceof LiteralExpr) {
-                            childTypes[i] = children.get(i).getType();
-                        } else if (children.get(i) instanceof SlotRef) {
-                            childTypes[i] = 
getChildTypeByName(children.get(i).getExprName(), stmt);
-                        } else {
-                            throw new AnalysisException(String.format(
-                                    "partition expr %s has unrecognized 
parameter in slot %d", func.getExprName(), i));
-                        }
-                    }
-                    Function fn = null;
-                    try {
-                        fn = 
func.getBuiltinFunction(func.getFnName().getFunction(), childTypes,
-                                Function.CompareMode.IS_INDISTINGUISHABLE); // 
only for test
-                    } catch (Exception e) {
-                        throw new AnalysisException("partition expr " + 
func.getExprName() + " is illegal!");
-                    }
-                    if (fn == null) {
-                        throw new AnalysisException("partition expr " + 
func.getExprName() + " is illegal!");
-                    }
-                }
-            }
+            checkLegalityofPartitionExprs(stmt, partitionDesc);
         }
 
         PartitionInfo partitionInfo = null;
         Map<String, Long> partitionNameToId = Maps.newHashMap();
         if (partitionDesc != null) {
             for (SinglePartitionDesc desc : 
partitionDesc.getSinglePartitionDescs()) {
+                // check legality of nullity of partition items.
+                checkPartitionNullity(baseSchema, partitionDesc, desc);
+
                 long partitionId = idGeneratorBuffer.getNextId();
                 partitionNameToId.put(desc.getPartitionName(), partitionId);
             }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/SetConfigAction.java 
b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/SetConfigAction.java
index d6b539269a9..d9351ec5978 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/SetConfigAction.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/SetConfigAction.java
@@ -17,6 +17,7 @@
 
 package org.apache.doris.httpv2.rest;
 
+import org.apache.doris.catalog.Env;
 import org.apache.doris.common.ConfigBase;
 import org.apache.doris.common.ConfigException;
 import org.apache.doris.common.DdlException;
@@ -93,7 +94,7 @@ public class SetConfigAction extends RestBaseController {
             try {
                 if (confValue != null && confValue.length == 1) {
                     try {
-                        ConfigBase.setMutableConfig(confKey, confValue[0]);
+                        
Env.getCurrentEnv().setMutableConfigwithCallback(confKey, confValue[0]);
                     } catch (ConfigException e) {
                         throw new DdlException(e.getMessage());
                     }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/Random.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/Random.java
index a7f3a360a6a..5045d85c919 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/Random.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/Random.java
@@ -65,10 +65,17 @@ public class Random extends ScalarFunction
      */
     public Random(Expression lchild, Expression rchild) {
         super("random", lchild, rchild);
+    }
+
+    @Override
+    public void checkLegalityBeforeTypeCoercion() {
         // align with original planner behavior, refer to:
         // org/apache/doris/analysis/Expr.getBuiltinFunction()
-        Preconditions.checkState(lchild instanceof Literal && rchild 
instanceof Literal,
-                "The param of rand function must be literal");
+        for (Expression child : children()) {
+            if (!child.isLiteral()) {
+                throw new AnalysisException("The param of rand function must 
be literal ");
+            }
+        }
     }
 
     /**
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreateTableCommand.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreateTableCommand.java
index 8541dc29d71..00382ea457a 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreateTableCommand.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/CreateTableCommand.java
@@ -97,11 +97,8 @@ public class CreateTableCommand extends Command implements 
ForwardWithSync {
                 LOG.debug("Nereids start to execute the create table command, 
query id: {}, tableName: {}",
                         ctx.queryId(), createTableInfo.getTableName());
             }
-            try {
-                Env.getCurrentEnv().createTable(createTableStmt);
-            } catch (Exception e) {
-                throw new AnalysisException(e.getMessage(), e.getCause());
-            }
+
+            Env.getCurrentEnv().createTable(createTableStmt);
             return;
         }
         LogicalPlan query = ctasQuery.get();
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateTableTest.java 
b/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateTableTest.java
index 0c73fee2319..a2867c5d96e 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateTableTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/CreateTableTest.java
@@ -356,17 +356,16 @@ public class CreateTableTest extends TestWithFeService {
 
         // single partition column with multi keys
         ExceptionChecker
-                .expectThrowsWithMsg(IllegalArgumentException.class, 
"partition key desc list size[2] is not equal to partition column size[1]",
-                        () -> createTable("create table test.tbl10\n"
-                                + "(k1 int not null, k2 varchar(128), k3 int, 
v1 int, v2 int)\n"
-                                + "partition by list(k1)\n"
-                                + "(\n"
-                                + "partition p1 values in (\"1\", \"3\", 
\"5\"),\n"
-                                + "partition p2 values in (\"2\", \"4\", 
\"6\"),\n"
-                                + "partition p3 values in ((\"7\", \"8\"))\n"
-                                + ")\n"
-                                + "distributed by hash(k2) buckets 1\n"
-                                + "properties('replication_num' = '1');"));
+                .expectThrowsWithMsg(AnalysisException.class,
+                                        "partition item's size out of 
partition columns: Index: 1, Size: 1",
+                                        () -> createTable("create table 
test.tbl10\n"
+                                        + "(k1 int not null, k2 varchar(128), 
k3 int, v1 int, v2 int)\n"
+                                        + "partition by list(k1)\n" + "(\n"
+                                        + "partition p1 values in (\"1\", 
\"3\", \"5\"),\n"
+                                        + "partition p2 values in (\"2\", 
\"4\", \"6\"),\n"
+                                        + "partition p3 values in ((\"7\", 
\"8\"))\n" + ")\n"
+                                        + "distributed by hash(k2) buckets 1\n"
+                                        + "properties('replication_num' = 
'1');"));
 
         // multi partition columns with single key
         ExceptionChecker
@@ -383,7 +382,7 @@ public class CreateTableTest extends TestWithFeService {
 
         // multi partition columns with multi keys
         ExceptionChecker
-                .expectThrowsWithMsg(IllegalArgumentException.class, 
"partition key desc list size[3] is not equal to partition column size[2]",
+                .expectThrowsWithMsg(AnalysisException.class, "partition 
item's size out of partition columns: Index: 2, Size: 2",
                         () -> createTable("create table test.tbl12\n"
                                 + "(k1 int not null, k2 varchar(128) not null, 
k3 int, v1 int, v2 int)\n"
                                 + "partition by list(k1, k2)\n"
@@ -906,8 +905,8 @@ public class CreateTableTest extends TestWithFeService {
 
     @Test
     public void testCreateTableWithNerieds() throws Exception {
-        
ExceptionChecker.expectThrowsWithMsg(org.apache.doris.nereids.exceptions.AnalysisException.class,
-                "Failed to check min load replica num",
+        
ExceptionChecker.expectThrowsWithMsg(org.apache.doris.common.DdlException.class,
+                                "Failed to check min load replica num",
                 () -> createTable("create table 
test.tbl_min_load_replica_num_2_nereids\n"
                         + "(k1 int, k2 int)\n"
                         + "duplicate key(k1)\n"
@@ -948,7 +947,7 @@ public class CreateTableTest extends TestWithFeService {
                         + "distributed by hash(k1) buckets 10", true));
 
         createDatabaseWithSql("create database db2 
properties('replication_num' = '4')");
-        
ExceptionChecker.expectThrowsWithMsg(org.apache.doris.nereids.exceptions.AnalysisException.class,
+        ExceptionChecker.expectThrowsWithMsg(DdlException.class,
                 "replication num should be less than the number of available 
backends. "
                         + "replication num is 4, available backend num is 3",
                 () -> createTable("create table db2.tbl_4_replica\n"
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/hive/HiveDDLAndDMLPlanTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/hive/HiveDDLAndDMLPlanTest.java
index a40a4fed385..ab4e5fd0fc5 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/hive/HiveDDLAndDMLPlanTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/hive/HiveDDLAndDMLPlanTest.java
@@ -339,7 +339,7 @@ public class HiveDDLAndDMLPlanTest extends 
TestWithFeService {
                 + "PROPERTIES (\n"
                 + "  'location'='hdfs://loc/db/tbl',\n"
                 + "  'file_format'='orc')";
-        
ExceptionChecker.expectThrowsWithMsg(org.apache.doris.nereids.exceptions.AnalysisException.class,
+        
ExceptionChecker.expectThrowsWithMsg(org.apache.doris.common.UserException.class,
                 "errCode = 2, detailMessage = errCode = 2,"
                         + " detailMessage = Create hive bucket table need set 
enable_create_hive_bucket_table to true",
                 () -> createTable(createBucketedTableErr, true));
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/nereids/trees/plans/CreateTableCommandTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/nereids/trees/plans/CreateTableCommandTest.java
index dc45e3de0fe..741faea4a13 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/nereids/trees/plans/CreateTableCommandTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/nereids/trees/plans/CreateTableCommandTest.java
@@ -241,12 +241,12 @@ public class CreateTableCommandTest extends 
TestWithFeService {
 
     @Test
     public void testAbnormal() throws ConfigException {
-        checkThrow(AnalysisException.class,
+        checkThrow(org.apache.doris.common.DdlException.class,
                 "Unknown properties: {aa=bb}",
                 () -> createTable("create table test.atbl1\n" + "(k1 int, k2 
float)\n" + "duplicate key(k1)\n"
                         + "distributed by hash(k1) buckets 1\n" + 
"properties('replication_num' = '1','aa'='bb'); "));
 
-        checkThrow(AnalysisException.class,
+        checkThrow(org.apache.doris.common.DdlException.class,
                 "Floating point type should not be used in distribution 
column",
                 () -> createTable("create table test.atbl1\n" + "(k1 int, k2 
float)\n" + "duplicate key(k1)\n"
                         + "distributed by hash(k2) buckets 1\n" + 
"properties('replication_num' = '1'); "));
@@ -257,18 +257,18 @@ public class CreateTableCommandTest extends 
TestWithFeService {
                         + "partition by range(k3)\n" + "(partition p1 values 
less than(\"10\"))\n"
                         + "distributed by hash(k2) buckets 1\n" + 
"properties('replication_num' = '1'); "));
 
-        checkThrow(AnalysisException.class,
+        checkThrow(org.apache.doris.common.DdlException.class,
                 "Varchar should not in the middle of short keys",
                 () -> createTable("create table test.atbl3\n" + "(k1 
varchar(40), k2 int, k3 int)\n"
                         + "duplicate key(k1, k2, k3)\n" + "distributed by 
hash(k1) buckets 1\n"
                         + "properties('replication_num' = '1', 'short_key' = 
'3');"));
 
-        checkThrow(AnalysisException.class, "Short key is too large. should 
less than: 3",
+        checkThrow(org.apache.doris.common.DdlException.class, "Short key is 
too large. should less than: 3",
                 () -> createTable("create table test.atbl4\n" + "(k1 int, k2 
int, k3 int)\n"
                         + "duplicate key(k1, k2, k3)\n" + "distributed by 
hash(k1) buckets 1\n"
                         + "properties('replication_num' = '1', 'short_key' = 
'4');"));
 
-        checkThrow(AnalysisException.class,
+        checkThrow(org.apache.doris.common.DdlException.class,
                 "replication num should be less than the number of available 
backends. replication num is 3, available backend num is 1",
                 () -> createTable("create table test.atbl5\n" + "(k1 int, k2 
int, k3 int)\n"
                         + "duplicate key(k1, k2, k3)\n" + "distributed by 
hash(k1) buckets 1\n"
@@ -278,48 +278,49 @@ public class CreateTableCommandTest extends 
TestWithFeService {
                 () -> createTable("create table test.atbl6\n" + "(k1 int, k2 
int)\n" + "duplicate key(k1)\n"
                         + "distributed by hash(k2) buckets 1\n" + 
"properties('replication_num' = '1'); "));
 
-        checkThrow(AnalysisException.class, "Table 'atbl6' already exists",
+        checkThrow(org.apache.doris.common.DdlException.class, "Table 'atbl6' 
already exists",
                 () -> createTable("create table test.atbl6\n" + "(k1 int, k2 
int, k3 int)\n"
                         + "duplicate key(k1, k2, k3)\n" + "distributed by 
hash(k1) buckets 1\n"
                         + "properties('replication_num' = '1');"));
 
         ConfigBase.setMutableConfig("disable_storage_medium_check", "false");
-        checkThrow(AnalysisException.class,
+        checkThrow(org.apache.doris.common.DdlException.class,
                 "Failed to find enough backend, please check the replication 
num,replication tag and storage medium.\n"
                         + "Create failed replications:\n"
                         + "replication tag: {\"location\" : \"default\"}, 
replication num: 1, storage medium: SSD",
                 () -> createTable("create table test.tb7(key1 int, key2 
varchar(10)) distributed by hash(key1) \n"
                         + "buckets 1 properties('replication_num' = '1', 
'storage_medium' = 'ssd');"));
 
-        checkThrow(AnalysisException.class, "sequence column only support 
UNIQUE_KEYS",
+        checkThrow(org.apache.doris.common.DdlException.class, "sequence 
column only support UNIQUE_KEYS",
                 () -> createTable("create table test.atbl8\n" + "(k1 
varchar(40), k2 int, v1 int sum)\n"
                         + "aggregate key(k1, k2)\n"
                         + "partition by range(k2)\n" + "(partition p1 values 
less than(\"10\"))\n"
                         + "distributed by hash(k2) buckets 1\n" + 
"properties('replication_num' = '1',\n"
                         + "'function_column.sequence_type' = 'int');"));
 
-        checkThrow(AnalysisException.class, "sequence type only support 
integer types and date types",
+        checkThrow(org.apache.doris.common.DdlException.class,
+                "sequence type only support integer types and date types",
                 () -> createTable("create table test.atbl8\n" + "(k1 
varchar(40), k2 int, v1 int)\n"
                         + "unique key(k1, k2)\n"
                         + "partition by range(k2)\n" + "(partition p1 values 
less than(\"10\"))\n"
                         + "distributed by hash(k2) buckets 1\n" + 
"properties('replication_num' = '1',\n"
                         + "'function_column.sequence_type' = 'double');"));
 
-        checkThrow(AnalysisException.class, "The sequence_col and 
sequence_type cannot be set at the same time",
+        checkThrow(org.apache.doris.common.DdlException.class, "The 
sequence_col and sequence_type cannot be set at the same time",
                 () -> createTable("create table test.atbl8\n" + "(k1 
varchar(40), k2 int, v1 int)\n"
                         + "unique key(k1, k2)\n"
                         + "partition by range(k2)\n" + "(partition p1 values 
less than(\"10\"))\n"
                         + "distributed by hash(k2) buckets 1\n" + 
"properties('replication_num' = '1',\n"
                         + "'function_column.sequence_type' = 'int', 
'function_column.sequence_col' = 'v1');"));
 
-        checkThrow(AnalysisException.class, "The specified sequence column[v3] 
not exists",
+        checkThrow(org.apache.doris.common.DdlException.class, "The specified 
sequence column[v3] not exists",
                 () -> createTable("create table test.atbl8\n" + "(k1 
varchar(40), k2 int, v1 int)\n"
                         + "unique key(k1, k2)\n"
                         + "partition by range(k2)\n" + "(partition p1 values 
less than(\"10\"))\n"
                         + "distributed by hash(k2) buckets 1\n" + 
"properties('replication_num' = '1',\n"
                         + "'function_column.sequence_col' = 'v3');"));
 
-        checkThrow(AnalysisException.class, "Sequence type only support 
integer types and date types",
+        checkThrow(org.apache.doris.common.DdlException.class, "Sequence type 
only support integer types and date types",
                 () -> createTable("create table test.atbl8\n" + "(k1 
varchar(40), k2 int, v1 int)\n"
                         + "unique key(k1, k2)\n"
                         + "partition by range(k2)\n" + "(partition p1 values 
less than(\"10\"))\n"
@@ -341,7 +342,7 @@ public class CreateTableCommandTest extends 
TestWithFeService {
                 + "properties('replication_num' = '1');"));
 
         // single partition column with multi keys
-        checkThrow(AnalysisException.class,
+        checkThrow(org.apache.doris.common.AnalysisException.class,
                 "partition key desc list size[2] is not equal to partition 
column size[1]",
                 () -> createTable("create table test.tbl10\n"
                         + "(k1 int not null, k2 varchar(128), k3 int, v1 int, 
v2 int)\n"
@@ -355,7 +356,7 @@ public class CreateTableCommandTest extends 
TestWithFeService {
                         + "properties('replication_num' = '1');"));
 
         // multi partition columns with single key
-        checkThrow(AnalysisException.class,
+        checkThrow(IllegalArgumentException.class,
                 "partition key desc list size[1] is not equal to partition 
column size[2]",
                 () -> createTable("create table test.tbl11\n"
                         + "(k1 int not null, k2 varchar(128) not null, k3 int, 
v1 int, v2 int)\n"
@@ -368,7 +369,7 @@ public class CreateTableCommandTest extends 
TestWithFeService {
                         + "properties('replication_num' = '1');"));
 
         // multi partition columns with multi keys
-        checkThrow(AnalysisException.class,
+        checkThrow(org.apache.doris.common.AnalysisException.class,
                 "partition key desc list size[3] is not equal to partition 
column size[2]",
                 () -> createTable("create table test.tbl12\n"
                         + "(k1 int not null, k2 varchar(128) not null, k3 int, 
v1 int, v2 int)\n"
@@ -453,7 +454,7 @@ public class CreateTableCommandTest extends 
TestWithFeService {
                         + "PROPERTIES(\"replication_num\" = \"1\");"));
 
         // range: partition content != partition key type
-        checkThrow(AnalysisException.class, "Invalid number format: beijing",
+        checkThrow(org.apache.doris.common.DdlException.class, "Invalid number 
format: beijing",
                 () -> createTable("CREATE TABLE test.tbl17 (\n"
                         + "    k1 int, k2 varchar(128), k3 int, v1 int, v2 
int\n"
                         + ")\n"
@@ -466,7 +467,7 @@ public class CreateTableCommandTest extends 
TestWithFeService {
                         + "PROPERTIES(\"replication_num\" = \"1\");"));
 
         // list: partition content != partition key type
-        checkThrow(AnalysisException.class, "Invalid number format: beijing",
+        checkThrow(org.apache.doris.common.DdlException.class, "Invalid number 
format: beijing",
                 () -> createTable("CREATE TABLE test.tbl18 (\n"
                         + "    k1 int not null, k2 varchar(128), k3 int, v1 
int, v2 int\n"
                         + ")\n"
@@ -482,7 +483,7 @@ public class CreateTableCommandTest extends 
TestWithFeService {
          * dynamic partition table
          */
         // list partition with dynamic properties
-        checkThrow(AnalysisException.class, "Only support dynamic partition 
properties on range partition table",
+        checkThrow(org.apache.doris.common.DdlException.class, "Only support 
dynamic partition properties on range partition table",
                 () -> createTable("CREATE TABLE test.tbl19\n"
                         + "(\n"
                         + "    k1 DATE not null\n"
@@ -500,7 +501,7 @@ public class CreateTableCommandTest extends 
TestWithFeService {
                         + ");\n"));
 
         // no partition table with dynamic properties
-        checkThrow(AnalysisException.class, "Only support dynamic partition 
properties on range partition table",
+        checkThrow(org.apache.doris.common.DdlException.class, "Only support 
dynamic partition properties on range partition table",
                 () -> createTable("CREATE TABLE test.tbl20\n"
                         + "(\n"
                         + "    k1 DATE\n"
@@ -558,7 +559,7 @@ public class CreateTableCommandTest extends 
TestWithFeService {
                         + " 'data_sort.sort_type' = 'lexical');"));
 
         // create z-order sort table, default col_num
-        checkThrow(AnalysisException.class, "only support lexical method now!",
+        checkThrow(org.apache.doris.common.AnalysisException.class, "only 
support lexical method now!",
                 () -> createTable(
                         "create table test.zorder_tbl2\n" + "(k1 varchar(40), 
k2 int, k3 int)\n"
                                 + "duplicate key(k1, k2, k3)\n"
@@ -567,7 +568,7 @@ public class CreateTableCommandTest extends 
TestWithFeService {
                                 + " 'data_sort.sort_type' = 'zorder');"));
 
         // create z-order sort table, define sort_col_num
-        checkThrow(AnalysisException.class, "only support lexical method now!",
+        checkThrow(org.apache.doris.common.AnalysisException.class, "only 
support lexical method now!",
                 () -> createTable(
                         "create table test.zorder_tbl3\n" + "(k1 varchar(40), 
k2 int, k3 int)\n"
                                 + "duplicate key(k1, k2, k3)\n"
@@ -576,7 +577,7 @@ public class CreateTableCommandTest extends 
TestWithFeService {
                                 + " 'data_sort.sort_type' = 'zorder',"
                                 + " 'data_sort.col_num' = '2');"));
         // create z-order sort table, only 1 sort column
-        checkThrow(AnalysisException.class, "only support lexical method now!",
+        checkThrow(org.apache.doris.common.AnalysisException.class, "only 
support lexical method now!",
                 () -> createTable("create table test.zorder_tbl4\n" + "(k1 
varchar(40), k2 int, k3 int)\n"
                         + "duplicate key(k1, k2, k3)\n"
                         + "partition by range(k2)\n" + "(partition p1 values 
less than(\"10\"))\n"
@@ -584,7 +585,7 @@ public class CreateTableCommandTest extends 
TestWithFeService {
                         + " 'data_sort.sort_type' = 'zorder',"
                         + " 'data_sort.col_num' = '1');"));
         // create z-order sort table, sort column is empty
-        checkThrow(AnalysisException.class, "only support lexical method now!",
+        checkThrow(org.apache.doris.common.AnalysisException.class, "only 
support lexical method now!",
                 () -> createTable("create table test.zorder_tbl4\n" + "(k1 
varchar(40), k2 int, k3 int)\n"
                         + "duplicate key(k1, k2, k3)\n"
                         + "partition by range(k2)\n" + "(partition p1 values 
less than(\"10\"))\n"
@@ -691,7 +692,7 @@ public class CreateTableCommandTest extends 
TestWithFeService {
 
     @Test
     public void testCreateTableWithInMemory() {
-        checkThrow(AnalysisException.class, "Not support set 
'in_memory'='true' now!",
+        checkThrow(org.apache.doris.common.AnalysisException.class, "Not 
support set 'in_memory'='true' now!",
                 () -> createTable("create table test.test_inmemory(k1 INT, k2 
INT) duplicate key (k1) "
                         + "distributed by hash(k1) buckets 1 
properties('replication_num' = '1','in_memory'='true');"));
     }
diff --git 
a/regression-test/data/nereids_function_p0/agg_function/test_corr.out 
b/regression-test/data/nereids_function_p0/agg_function/test_corr.out
index 71293030ba8..735c3f9eeab 100644
--- a/regression-test/data/nereids_function_p0/agg_function/test_corr.out
+++ b/regression-test/data/nereids_function_p0/agg_function/test_corr.out
@@ -35,3 +35,21 @@
 0.0
 0.0
 
+-- !sql_const1 --
+0.0
+
+-- !sql_const2 --
+0.0
+
+-- !sql_const3 --
+0.0
+
+-- !sql_const4 --
+0.0
+
+-- !sql_const5 --
+0.0
+
+-- !sql_const6 --
+0.0
+
diff --git a/regression-test/suites/load_p0/stream_load/test_stream_load.groovy 
b/regression-test/suites/load_p0/stream_load/test_stream_load.groovy
index 574e2e1466e..860ff10e14e 100644
--- a/regression-test/suites/load_p0/stream_load/test_stream_load.groovy
+++ b/regression-test/suites/load_p0/stream_load/test_stream_load.groovy
@@ -1648,7 +1648,7 @@ suite("test_stream_load", "p0") {
        log.info("test chunked transfer result: ${out}".toString())
        def json = parseJson(out)
        assertEquals("fail", json.Status.toLowerCase())
-       assertTrue(json.Message.contains("[INTERNAL_ERROR]please do not set 
both content_length and transfer-encoding"))
+       assertTrue(json.Message.contains("please do not set both content_length 
and transfer-encoding"))
     } finally {
        sql """ DROP TABLE IF EXISTS ${tableName16} FORCE"""
     }
@@ -1678,7 +1678,7 @@ suite("test_stream_load", "p0") {
         log.info("test chunked transfer result: ${out}".toString())
         def json = parseJson(out)
         assertEquals("fail", json.Status.toLowerCase())
-        assertTrue(json.Message.contains("[INTERNAL_ERROR]content_length is 
empty and transfer-encoding!=chunked, please set content_length or 
transfer-encoding=chunked"))
+        assertTrue(json.Message.contains("content_length is empty and 
transfer-encoding!=chunked, please set content_length or 
transfer-encoding=chunked"))
     } finally {
         sql """ DROP TABLE IF EXISTS ${tableName16} FORCE"""
     }
diff --git 
a/regression-test/suites/nereids_function_p0/agg_function/test_corr.groovy 
b/regression-test/suites/nereids_function_p0/agg_function/test_corr.groovy
index c752e8cb133..50cd3cac79a 100644
--- a/regression-test/suites/nereids_function_p0/agg_function/test_corr.groovy
+++ b/regression-test/suites/nereids_function_p0/agg_function/test_corr.groovy
@@ -86,4 +86,11 @@ suite("test_corr") {
     qt_sql1 "select corr(non_nullable(x), non_nullable(y)) ans from test_corr 
group by id order by ans"
     qt_sql2 "select corr(x, non_nullable(y)) ans from test_corr group by id 
order by ans"
     qt_sql3 "select corr(non_nullable(x), y) ans from test_corr group by id 
order by ans"
+
+    qt_sql_const1 "select corr(x,1) from test_corr"
+    qt_sql_const2 "select corr(x,1e100) from test_corr"
+    qt_sql_const3 "select corr(x,1e-100) from test_corr"
+    qt_sql_const4 "select corr(1,y) from test_corr"
+    qt_sql_const5 "select corr(1e100,y) from test_corr"
+    qt_sql_const6 "select corr(1e-100,y) from test_corr"
 }
diff --git a/regression-test/suites/nereids_p0/system/test_query_sys.groovy 
b/regression-test/suites/nereids_p0/system/test_query_sys.groovy
index 85d612b9c17..e0e68f909fd 100644
--- a/regression-test/suites/nereids_p0/system/test_query_sys.groovy
+++ b/regression-test/suites/nereids_p0/system/test_query_sys.groovy
@@ -48,4 +48,25 @@ suite("test_query_sys", "query,p0") {
     sql "set enable_nereids_planner=true"
     def v2 = sql "select version()"
     assertEquals(v1, v2)
+
+    test {
+        sql "select random(random());"
+        exception "The param of rand function must be literal"
+    }
+
+    sql "set enable_nereids_planner=false"
+    sql """
+        CREATE TABLE IF NOT EXISTS `test_random` (
+        fcst_emp varchar(128) NOT NULL
+        ) ENGINE=OLAP
+        DISTRIBUTED BY HASH(`fcst_emp`)
+        PROPERTIES(
+        "replication_num" = "1",
+        "compression" = "LZ4" );
+    """
+    sql """ insert into test_random values('123,1233,4123,3131'); """
+    test {
+        sql "select random(1,array_size(split_by_string(fcst_emp,','))) from 
test_random;"
+        exception "The param of rand function must be literal"
+    }
 }
diff --git 
a/regression-test/suites/partition_p0/auto_partition/test_auto_list_partition.groovy
 
b/regression-test/suites/partition_p0/auto_partition/test_auto_list_partition.groovy
index 7868f1ffb9a..5855ecc06e1 100644
--- 
a/regression-test/suites/partition_p0/auto_partition/test_auto_list_partition.groovy
+++ 
b/regression-test/suites/partition_p0/auto_partition/test_auto_list_partition.groovy
@@ -310,4 +310,20 @@ suite("test_auto_list_partition") {
     result12 = sql "show partitions from 
stream_load_list_test_table_string_key"
     logger.info("${result12}")
     assertEquals(result12.size(), 4)
+
+    sql "drop table if exists awh_test_list_auto"
+    test {
+        sql """
+            CREATE TABLE awh_test_list_auto (
+                DATE_ID BIGINT NOT NULL COMMENT 'DATE_ID',
+                LAST_UPLOAD_TIME DATETIME COMMENT 'LAST_UPLOAD_TIME'
+            )
+            AUTO PARTITION BY LIST (sum(DATE_ID))()
+            DISTRIBUTED BY HASH(DATE_ID) BUCKETS AUTO
+            PROPERTIES (
+                "replication_num" = "1"
+            );
+        """
+        exception "auto create partition only support slotRef in list 
partitions."
+    }
 }
diff --git 
a/regression-test/suites/partition_p0/auto_partition/test_auto_range_partition.groovy
 
b/regression-test/suites/partition_p0/auto_partition/test_auto_range_partition.groovy
index f52dc2945f0..1ba228bbd54 100644
--- 
a/regression-test/suites/partition_p0/auto_partition/test_auto_range_partition.groovy
+++ 
b/regression-test/suites/partition_p0/auto_partition/test_auto_range_partition.groovy
@@ -16,6 +16,8 @@
 // under the License.
 
 suite("test_auto_range_partition") {
+    sql "set enable_fallback_to_original_planner=false"
+
     sql "drop table if exists range_table1"
     sql """
         CREATE TABLE `range_table1` (
@@ -166,4 +168,77 @@ suite("test_auto_range_partition") {
     sql " insert into isit select * from isit_src "
     sql " sync "
     qt_sql " select * from isit order by k "
+
+    sql "drop table if exists awh_test_range_auto"
+    test {
+        sql """
+            CREATE TABLE awh_test_range_auto (
+                DATE_ID BIGINT NOT NULL,
+                LAST_UPLOAD_TIME DATETIME
+            )
+            AUTO PARTITION BY RANGE (DATE_ID)()
+            DISTRIBUTED BY HASH(DATE_ID) BUCKETS AUTO
+            PROPERTIES (
+                "replication_num" = "1"
+            );
+        """
+        exception "Auto Range Partition need FunctionCallExpr"
+    }
+    test {
+        sql """
+            CREATE TABLE awh_test_range_auto (
+                DATE_ID BIGINT NOT NULL,
+                LAST_UPLOAD_TIME DATETIME
+            )
+            AUTO PARTITION BY RANGE (date(DATE_ID))()
+            DISTRIBUTED BY HASH(DATE_ID) BUCKETS AUTO
+            PROPERTIES (
+                "replication_num" = "1"
+            );
+        """
+        exception "auto create partition only support function call expr is"
+    }
+    test {
+        sql """
+            CREATE TABLE awh_test_range_auto (
+                DATE_ID BIGINT NOT NULL,
+                LAST_UPLOAD_TIME DATETIME
+            )
+            AUTO PARTITION BY RANGE (date_trunc(DATE_ID))()
+            DISTRIBUTED BY HASH(DATE_ID) BUCKETS AUTO
+            PROPERTIES (
+                "replication_num" = "1"
+            );
+        """
+        exception "partition expr date_trunc is illegal!"
+    }
+    test {
+        sql """
+            CREATE TABLE awh_test_range_auto (
+                DATE_ID BIGINT NOT NULL,
+                LAST_UPLOAD_TIME DATETIME
+            )
+            AUTO PARTITION BY RANGE (date_trunc(DATE_ID, 'year'))()
+            DISTRIBUTED BY HASH(DATE_ID) BUCKETS AUTO
+            PROPERTIES (
+                "replication_num" = "1"
+            );
+        """
+        exception "partition expr date_trunc is illegal!"
+    }
+    sql """
+        CREATE TABLE awh_test_range_auto (
+            DATE_ID BIGINT NOT NULL,
+            LAST_UPLOAD_TIME DATETIME NOT NULL
+        )
+        AUTO PARTITION BY RANGE (date_trunc(LAST_UPLOAD_TIME, 'yeear'))()
+        DISTRIBUTED BY HASH(DATE_ID) BUCKETS AUTO
+        PROPERTIES (
+            "replication_num" = "1"
+        );
+    """
+    test {
+        sql "insert into awh_test_range_auto values (1,'20201212')"
+        exception "date_trunc function second param only support argument is"
+    }
 }
diff --git a/regression-test/suites/partition_p0/test_null_partition.groovy 
b/regression-test/suites/partition_p0/test_null_partition.groovy
new file mode 100644
index 00000000000..c5ad9d049de
--- /dev/null
+++ b/regression-test/suites/partition_p0/test_null_partition.groovy
@@ -0,0 +1,84 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_null_partition") {
+    sql "set enable_fallback_to_original_planner=false"
+    sql "set allow_partition_column_nullable = true;"
+
+    sql " drop table if exists test_null "
+    test {
+        sql """
+            CREATE TABLE `test_null` (
+            `k0` BIGINT NOT NULL,
+            `k1` BIGINT NOT NULL
+            )
+            partition by list (k0, k1) (
+            PARTITION `pX` values in ((NULL, 1))
+            )
+            PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1"
+            );
+        """
+        exception "Can't have null partition is for NOT NULL partition column 
in partition expr's index 0"
+    }
+    
+    test {
+        sql """
+            CREATE TABLE `test_null` (
+            `k0` BIGINT NOT NULL,
+            `k1` BIGINT NOT NULL
+            )
+            partition by list (k0, k1) (
+            PARTITION `pX` values in ((1, 2), (1, NULL))
+            )
+            PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1"
+            );
+        """
+        exception "Can't have null partition is for NOT NULL partition column 
in partition expr's index 1"
+    }
+
+    sql " drop table if exists OK "
+    sql """
+        CREATE TABLE `OK` (
+        `k0` BIGINT NULL,
+        `k1` BIGINT NOT NULL
+        )
+        partition by list (k0, k1) (
+        PARTITION `pX` values in ((NULL, 1), (NULL, 2), (NULL, 3))
+        )
+        PROPERTIES (
+        "replication_allocation" = "tag.location.default: 1"
+        );
+    """
+
+    test {
+        sql """
+            CREATE TABLE `test_null` (
+            `k0` BIGINT NULL,
+            `k1` BIGINT NOT NULL
+            )
+            partition by list (k0, k1) (
+            PARTITION `pX` values in ((NULL, 1), (NULL, 2), (NULL, 3), (4, 
NULL))
+            )
+            PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1"
+            );
+        """
+        exception "Can't have null partition is for NOT NULL partition column 
in partition expr's index 1"
+    }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to