This is an automated email from the ASF dual-hosted git repository.
kxiao pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-2.0 by this push:
new 3cdc10f06c8 [opt](docker)optimize ES docker compose (#30068) (#30363)
3cdc10f06c8 is described below
commit 3cdc10f06c8c991eacbb5b20e99198e3369db922
Author: qiye <[email protected]>
AuthorDate: Thu Jan 25 17:00:34 2024 +0800
[opt](docker)optimize ES docker compose (#30068) (#30363)
---
.../elasticsearch/config/es6/elasticsearch.yml | 62 +++++
.../elasticsearch/config/es6/log4j2.properties | 205 ++++++++++++++
.../elasticsearch/config/es7/log4j2.properties | 299 +++++++++++++++++++++
.../elasticsearch/config/es8/log4j2.properties | 273 +++++++++++++++++++
.../docker-compose/elasticsearch/es.yaml.tpl | 28 +-
.../elasticsearch/scripts/index/es6_test1.json | 2 +-
.../elasticsearch/scripts/index/es6_test2.json | 2 +-
docker/thirdparties/run-thirdparties-docker.sh | 8 +
.../data/external_table_p0/es/test_es_query.out | 42 +--
.../es/test_es_query_no_http_url.out | 2 +-
10 files changed, 894 insertions(+), 29 deletions(-)
diff --git
a/docker/thirdparties/docker-compose/elasticsearch/config/es6/elasticsearch.yml
b/docker/thirdparties/docker-compose/elasticsearch/config/es6/elasticsearch.yml
new file mode 100755
index 00000000000..6cf99d51b8f
--- /dev/null
+++
b/docker/thirdparties/docker-compose/elasticsearch/config/es6/elasticsearch.yml
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+cluster:
+ name: elasticsearch6
+ routing:
+ allocation:
+ disk:
+ threshold_enabled: true
+ watermark:
+ flood_stage: 200mb
+ low: 500mb
+ high: 300mb
+
+node:
+ name: 2bf5838228d8
+ master: true
+ data: true
+ ingest: true
+
+path:
+ data: /var/lib/elasticsearch/data
+ logs: /var/lib/elasticsearch/logs
+
+network:
+ host: 0.0.0.0
+
+
+http:
+ compression: true
+ cors:
+ enabled: false
+
+
+bootstrap:
+ memory_lock: false
+
+discovery:
+ zen:
+ minimum_master_nodes: 1
+
+
+action:
+ destructive_requires_name: false
+
+xpack:
+ security:
+ enabled: false
diff --git
a/docker/thirdparties/docker-compose/elasticsearch/config/es6/log4j2.properties
b/docker/thirdparties/docker-compose/elasticsearch/config/es6/log4j2.properties
new file mode 100755
index 00000000000..3ab4e253748
--- /dev/null
+++
b/docker/thirdparties/docker-compose/elasticsearch/config/es6/log4j2.properties
@@ -0,0 +1,205 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+status = error
+
+# log action execution errors for easier debugging
+logger.action.name = org.elasticsearch.action
+logger.action.level = debug
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}]
[%node_name]%marker %m%n
+
+appender.rolling.type = RollingFile
+appender.rolling.name = rolling
+appender.rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
+appender.rolling.layout.type = PatternLayout
+appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}]
[%node_name]%marker %.-10000m%n
+appender.rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
+appender.rolling.policies.type = Policies
+appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling.policies.time.interval = 1
+appender.rolling.policies.time.modulate = true
+appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.rolling.policies.size.size = 128MB
+appender.rolling.strategy.type = DefaultRolloverStrategy
+appender.rolling.strategy.fileIndex = nomax
+appender.rolling.strategy.action.type = Delete
+appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
+appender.rolling.strategy.action.condition.type = IfFileName
+appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
+appender.rolling.strategy.action.condition.nested_condition.type =
IfAccumulatedFileSize
+appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
+
+rootLogger.level = info
+rootLogger.appenderRef.console.ref = console
+rootLogger.appenderRef.rolling.ref = rolling
+
+appender.deprecation_rolling.type = RollingFile
+appender.deprecation_rolling.name = deprecation_rolling
+appender.deprecation_rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
+appender.deprecation_rolling.layout.type = PatternLayout
+appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}]
[%node_name]%marker %.-10000m%n
+appender.deprecation_rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz
+appender.deprecation_rolling.policies.type = Policies
+appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.deprecation_rolling.policies.size.size = 1GB
+appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
+appender.deprecation_rolling.strategy.max = 4
+
+logger.deprecation.name = org.elasticsearch.deprecation
+logger.deprecation.level = warn
+logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
+logger.deprecation.additivity = false
+
+appender.index_search_slowlog_rolling.type = RollingFile
+appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
+appender.index_search_slowlog_rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log
+appender.index_search_slowlog_rolling.layout.type = PatternLayout
+appender.index_search_slowlog_rolling.layout.pattern =
[%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n
+appender.index_search_slowlog_rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log
+appender.index_search_slowlog_rolling.policies.type = Policies
+appender.index_search_slowlog_rolling.policies.time.type =
TimeBasedTriggeringPolicy
+appender.index_search_slowlog_rolling.policies.time.interval = 1
+appender.index_search_slowlog_rolling.policies.time.modulate = true
+
+logger.index_search_slowlog_rolling.name = index.search.slowlog
+logger.index_search_slowlog_rolling.level = trace
+logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref
= index_search_slowlog_rolling
+logger.index_search_slowlog_rolling.additivity = false
+
+appender.index_indexing_slowlog_rolling.type = RollingFile
+appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
+appender.index_indexing_slowlog_rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log
+appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
+appender.index_indexing_slowlog_rolling.layout.pattern =
[%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n
+appender.index_indexing_slowlog_rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
+appender.index_indexing_slowlog_rolling.policies.type = Policies
+appender.index_indexing_slowlog_rolling.policies.time.type =
TimeBasedTriggeringPolicy
+appender.index_indexing_slowlog_rolling.policies.time.interval = 1
+appender.index_indexing_slowlog_rolling.policies.time.modulate = true
+
+logger.index_indexing_slowlog.name = index.indexing.slowlog.index
+logger.index_indexing_slowlog.level = trace
+logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref =
index_indexing_slowlog_rolling
+logger.index_indexing_slowlog.additivity = false
+
+
+appender.audit_rolling.type = RollingFile
+appender.audit_rolling.name = audit_rolling
+appender.audit_rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit.log
+appender.audit_rolling.layout.type = PatternLayout
+appender.audit_rolling.layout.pattern = {\
+ "@timestamp":"%d{ISO8601}"\
+ %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\
+ %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\
+ %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\
+ %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\
+ %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\
+ %varsNotEmpty{,
"event.action":"%enc{%map{event.action}}{JSON}"}\
+ %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\
+ %varsNotEmpty{,
"user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\
+ %varsNotEmpty{,
"user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\
+ %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\
+ %varsNotEmpty{,
"user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\
+ %varsNotEmpty{,
"user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\
+ %varsNotEmpty{, "user.roles":%map{user.roles}}\
+ %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\
+ %varsNotEmpty{,
"origin.address":"%enc{%map{origin.address}}{JSON}"}\
+ %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\
+ %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\
+ %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\
+ %varsNotEmpty{,
"request.method":"%enc{%map{request.method}}{JSON}"}\
+ %varsNotEmpty{,
"request.body":"%enc{%map{request.body}}{JSON}"}\
+ %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\
+ %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\
+ %varsNotEmpty{,
"request.name":"%enc{%map{request.name}}{JSON}"}\
+ %varsNotEmpty{, "indices":%map{indices}}\
+ %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\
+ %varsNotEmpty{,
"x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\
+ %varsNotEmpty{,
"transport.profile":"%enc{%map{transport.profile}}{JSON}"}\
+ %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\
+ %varsNotEmpty{,
"event.category":"%enc{%map{event.category}}{JSON}"}\
+ }%n
+# "node.name" node name from the `elasticsearch.yml` settings
+# "node.id" node id which should not change between cluster restarts
+# "host.name" unresolved hostname of the local node
+# "host.ip" the local bound ip (i.e. the ip listening for connections)
+# "event.type" a received REST request is translated into one or more
transport requests. This indicates which processing layer generated the event
"rest" or "transport" (internal)
+# "event.action" the name of the audited event, eg. "authentication_failed",
"access_granted", "run_as_granted", etc.
+# "user.name" the subject name as authenticated by a realm
+# "user.run_by.name" the original authenticated subject name that is
impersonating another one.
+# "user.run_as.name" if this "event.action" is of a run_as type, this is the
subject name to be impersonated as.
+# "user.realm" the name of the realm that authenticated "user.name"
+# "user.run_by.realm" the realm name of the impersonating subject
("user.run_by.name")
+# "user.run_as.realm" if this "event.action" is of a run_as type, this is the
realm name the impersonated user is looked up from
+# "user.roles" the roles array of the user; these are the roles that are
granting privileges
+# "origin.type" it is "rest" if the event is originating (is in relation to) a
REST request; possible other values are "transport" and "ip_filter"
+# "origin.address" the remote address and port of the first network hop, i.e.
a REST proxy or another cluster node
+# "realm" name of a realm that has generated an "authentication_failed" or an
"authentication_successful"; the subject is not yet authenticated
+# "url.path" the URI component between the port and the query string; it is
percent (URL) encoded
+# "url.query" the URI component after the path and before the fragment; it is
percent (URL) encoded
+# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT,
DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT
+# "request.body" the content of the request body entity, JSON escaped
+# "request.id" a synthentic identifier for the incoming request, this is
unique per incoming request, and consistent across all audit events generated
by that request
+# "action" an action is the most granular operation that is authorized and
this identifies it in a namespaced way (internal)
+# "request.name" if the event is in connection to a transport message this is
the name of the request class, similar to how rest requests are identified by
the url path (internal)
+# "indices" the array of indices that the "action" is acting upon
+# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header
+# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header,
as a verbatim string value (not an array)
+# "transport.profile" name of the transport profile in case this is a
"connection_granted" or "connection_denied" event
+# "rule" name of the applied rulee if the "origin.type" is "ip_filter"
+# "event.category" fixed value "elasticsearch-audit"
+
+appender.audit_rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit-%d{yyyy-MM-dd}.log
+appender.audit_rolling.policies.type = Policies
+appender.audit_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.audit_rolling.policies.time.interval = 1
+appender.audit_rolling.policies.time.modulate = true
+
+appender.deprecated_audit_rolling.type = RollingFile
+appender.deprecated_audit_rolling.name = deprecated_audit_rolling
+appender.deprecated_audit_rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_access.log
+appender.deprecated_audit_rolling.layout.type = PatternLayout
+appender.deprecated_audit_rolling.layout.pattern = [%d{ISO8601}] %m%n
+appender.deprecated_audit_rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_access-%d{yyyy-MM-dd}.log
+appender.deprecated_audit_rolling.policies.type = Policies
+appender.deprecated_audit_rolling.policies.time.type =
TimeBasedTriggeringPolicy
+appender.deprecated_audit_rolling.policies.time.interval = 1
+appender.deprecated_audit_rolling.policies.time.modulate = true
+
+logger.xpack_security_audit_logfile.name =
org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail
+logger.xpack_security_audit_logfile.level = info
+logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref =
audit_rolling
+logger.xpack_security_audit_logfile.additivity = false
+
+logger.xpack_security_audit_deprecated_logfile.name =
org.elasticsearch.xpack.security.audit.logfile.DeprecatedLoggingAuditTrail
+# set this to "off" instead of "info" to disable the deprecated appender
+# in the 6.x releases both the new and the previous appenders are enabled
+# for the logfile auditing
+logger.xpack_security_audit_deprecated_logfile.level = info
+logger.xpack_security_audit_deprecated_logfile.appenderRef.deprecated_audit_rolling.ref
= deprecated_audit_rolling
+logger.xpack_security_audit_deprecated_logfile.additivity = false
+
+logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature
+logger.xmlsig.level = error
+logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter
+logger.samlxml_decrypt.level = fatal
+logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter
+logger.saml2_decrypt.level = fatal
\ No newline at end of file
diff --git
a/docker/thirdparties/docker-compose/elasticsearch/config/es7/log4j2.properties
b/docker/thirdparties/docker-compose/elasticsearch/config/es7/log4j2.properties
new file mode 100755
index 00000000000..0e033f21dce
--- /dev/null
+++
b/docker/thirdparties/docker-compose/elasticsearch/config/es7/log4j2.properties
@@ -0,0 +1,299 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+status = error
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}]
[%node_name]%marker %m%n
+
+######## Server JSON ############################
+appender.rolling.type = RollingFile
+appender.rolling.name = rolling
+appender.rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json
+appender.rolling.layout.type = ESJsonLayout
+appender.rolling.layout.type_name = server
+
+appender.rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz
+appender.rolling.policies.type = Policies
+appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling.policies.time.interval = 1
+appender.rolling.policies.time.modulate = true
+appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.rolling.policies.size.size = 128MB
+appender.rolling.strategy.type = DefaultRolloverStrategy
+appender.rolling.strategy.fileIndex = nomax
+appender.rolling.strategy.action.type = Delete
+appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
+appender.rolling.strategy.action.condition.type = IfFileName
+appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
+appender.rolling.strategy.action.condition.nested_condition.type =
IfAccumulatedFileSize
+appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
+################################################
+######## Server - old style pattern ###########
+appender.rolling_old.type = RollingFile
+appender.rolling_old.name = rolling_old
+appender.rolling_old.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
+appender.rolling_old.layout.type = PatternLayout
+appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}]
[%node_name]%marker %m%n
+
+appender.rolling_old.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
+appender.rolling_old.policies.type = Policies
+appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling_old.policies.time.interval = 1
+appender.rolling_old.policies.time.modulate = true
+appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy
+appender.rolling_old.policies.size.size = 128MB
+appender.rolling_old.strategy.type = DefaultRolloverStrategy
+appender.rolling_old.strategy.fileIndex = nomax
+appender.rolling_old.strategy.action.type = Delete
+appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path}
+appender.rolling_old.strategy.action.condition.type = IfFileName
+appender.rolling_old.strategy.action.condition.glob =
${sys:es.logs.cluster_name}-*
+appender.rolling_old.strategy.action.condition.nested_condition.type =
IfAccumulatedFileSize
+appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB
+################################################
+
+rootLogger.level = info
+rootLogger.appenderRef.console.ref = console
+rootLogger.appenderRef.rolling.ref = rolling
+rootLogger.appenderRef.rolling_old.ref = rolling_old
+
+######## Deprecation JSON #######################
+appender.deprecation_rolling.type = RollingFile
+appender.deprecation_rolling.name = deprecation_rolling
+appender.deprecation_rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json
+appender.deprecation_rolling.layout.type = ESJsonLayout
+appender.deprecation_rolling.layout.type_name = deprecation.elasticsearch
+appender.deprecation_rolling.layout.esmessagefields=x-opaque-id,key,category,elasticsearch.elastic_product_origin
+appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter
+
+appender.deprecation_rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz
+appender.deprecation_rolling.policies.type = Policies
+appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.deprecation_rolling.policies.size.size = 1GB
+appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
+appender.deprecation_rolling.strategy.max = 4
+
+appender.header_warning.type = HeaderWarningAppender
+appender.header_warning.name = header_warning
+#################################################
+######## Deprecation - old style pattern #######
+appender.deprecation_rolling_old.type = RollingFile
+appender.deprecation_rolling_old.name = deprecation_rolling_old
+appender.deprecation_rolling_old.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
+appender.deprecation_rolling_old.layout.type = PatternLayout
+appender.deprecation_rolling_old.layout.pattern =
[%d{ISO8601}][%-5p][%-25c{1.}] [%node_name] [%product_origin]%marker %m%n
+appender.deprecation_rolling_old.filter.rate_limit.type = RateLimitingFilter
+
+appender.deprecation_rolling_old.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
+ _deprecation-%i.log.gz
+appender.deprecation_rolling_old.policies.type = Policies
+appender.deprecation_rolling_old.policies.size.type = SizeBasedTriggeringPolicy
+appender.deprecation_rolling_old.policies.size.size = 1GB
+appender.deprecation_rolling_old.strategy.type = DefaultRolloverStrategy
+appender.deprecation_rolling_old.strategy.max = 4
+#################################################
+logger.deprecation.name = org.elasticsearch.deprecation
+logger.deprecation.level = WARN
+logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
+logger.deprecation.appenderRef.deprecation_rolling_old.ref =
deprecation_rolling_old
+logger.deprecation.appenderRef.header_warning.ref = header_warning
+logger.deprecation.additivity = false
+
+######## Search slowlog JSON ####################
+appender.index_search_slowlog_rolling.type = RollingFile
+appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
+appender.index_search_slowlog_rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\
+ .cluster_name}_index_search_slowlog.json
+appender.index_search_slowlog_rolling.layout.type = ESJsonLayout
+appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog
+appender.index_search_slowlog_rolling.layout.esmessagefields=message,took,took_millis,total_hits,types,stats,search_type,total_shards,source,id
+
+appender.index_search_slowlog_rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\
+ .cluster_name}_index_search_slowlog-%i.json.gz
+appender.index_search_slowlog_rolling.policies.type = Policies
+appender.index_search_slowlog_rolling.policies.size.type =
SizeBasedTriggeringPolicy
+appender.index_search_slowlog_rolling.policies.size.size = 1GB
+appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy
+appender.index_search_slowlog_rolling.strategy.max = 4
+#################################################
+######## Search slowlog - old style pattern ####
+appender.index_search_slowlog_rolling_old.type = RollingFile
+appender.index_search_slowlog_rolling_old.name =
index_search_slowlog_rolling_old
+appender.index_search_slowlog_rolling_old.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
+ _index_search_slowlog.log
+appender.index_search_slowlog_rolling_old.layout.type = PatternLayout
+appender.index_search_slowlog_rolling_old.layout.pattern =
[%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
+
+appender.index_search_slowlog_rolling_old.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
+ _index_search_slowlog-%i.log.gz
+appender.index_search_slowlog_rolling_old.policies.type = Policies
+appender.index_search_slowlog_rolling_old.policies.size.type =
SizeBasedTriggeringPolicy
+appender.index_search_slowlog_rolling_old.policies.size.size = 1GB
+appender.index_search_slowlog_rolling_old.strategy.type =
DefaultRolloverStrategy
+appender.index_search_slowlog_rolling_old.strategy.max = 4
+#################################################
+logger.index_search_slowlog_rolling.name = index.search.slowlog
+logger.index_search_slowlog_rolling.level = trace
+logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref
= index_search_slowlog_rolling
+logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling_old.ref
= index_search_slowlog_rolling_old
+logger.index_search_slowlog_rolling.additivity = false
+
+######## Indexing slowlog JSON ##################
+appender.index_indexing_slowlog_rolling.type = RollingFile
+appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
+appender.index_indexing_slowlog_rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
+ _index_indexing_slowlog.json
+appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout
+appender.index_indexing_slowlog_rolling.layout.type_name =
index_indexing_slowlog
+appender.index_indexing_slowlog_rolling.layout.esmessagefields=message,took,took_millis,doc_type,id,routing,source
+
+appender.index_indexing_slowlog_rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
+ _index_indexing_slowlog-%i.json.gz
+appender.index_indexing_slowlog_rolling.policies.type = Policies
+appender.index_indexing_slowlog_rolling.policies.size.type =
SizeBasedTriggeringPolicy
+appender.index_indexing_slowlog_rolling.policies.size.size = 1GB
+appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy
+appender.index_indexing_slowlog_rolling.strategy.max = 4
+#################################################
+######## Indexing slowlog - old style pattern ##
+appender.index_indexing_slowlog_rolling_old.type = RollingFile
+appender.index_indexing_slowlog_rolling_old.name =
index_indexing_slowlog_rolling_old
+appender.index_indexing_slowlog_rolling_old.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
+ _index_indexing_slowlog.log
+appender.index_indexing_slowlog_rolling_old.layout.type = PatternLayout
+appender.index_indexing_slowlog_rolling_old.layout.pattern =
[%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
+
+appender.index_indexing_slowlog_rolling_old.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
+ _index_indexing_slowlog-%i.log.gz
+appender.index_indexing_slowlog_rolling_old.policies.type = Policies
+appender.index_indexing_slowlog_rolling_old.policies.size.type =
SizeBasedTriggeringPolicy
+appender.index_indexing_slowlog_rolling_old.policies.size.size = 1GB
+appender.index_indexing_slowlog_rolling_old.strategy.type =
DefaultRolloverStrategy
+appender.index_indexing_slowlog_rolling_old.strategy.max = 4
+#################################################
+
+logger.index_indexing_slowlog.name = index.indexing.slowlog.index
+logger.index_indexing_slowlog.level = trace
+logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref =
index_indexing_slowlog_rolling
+logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling_old.ref
= index_indexing_slowlog_rolling_old
+logger.index_indexing_slowlog.additivity = false
+
+
+appender.audit_rolling.type = RollingFile
+appender.audit_rolling.name = audit_rolling
+appender.audit_rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit.json
+appender.audit_rolling.layout.type = PatternLayout
+appender.audit_rolling.layout.pattern = {\
+ "type":"audit", \
+ "timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\
+ %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\
+ %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\
+ %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\
+ %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\
+ %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\
+ %varsNotEmpty{,
"event.action":"%enc{%map{event.action}}{JSON}"}\
+ %varsNotEmpty{,
"authentication.type":"%enc{%map{authentication.type}}{JSON}"}\
+ %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\
+ %varsNotEmpty{,
"user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\
+ %varsNotEmpty{,
"user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\
+ %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\
+ %varsNotEmpty{,
"user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\
+ %varsNotEmpty{,
"user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\
+ %varsNotEmpty{, "user.roles":%map{user.roles}}\
+ %varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\
+ %varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\
+ %varsNotEmpty{,
"authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\
+ %varsNotEmpty{,
"authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\
+ %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\
+ %varsNotEmpty{,
"origin.address":"%enc{%map{origin.address}}{JSON}"}\
+ %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\
+ %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\
+ %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\
+ %varsNotEmpty{,
"request.method":"%enc{%map{request.method}}{JSON}"}\
+ %varsNotEmpty{,
"request.body":"%enc{%map{request.body}}{JSON}"}\
+ %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\
+ %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\
+ %varsNotEmpty{,
"request.name":"%enc{%map{request.name}}{JSON}"}\
+ %varsNotEmpty{, "indices":%map{indices}}\
+ %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\
+ %varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\
+ %varsNotEmpty{,
"x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\
+ %varsNotEmpty{,
"transport.profile":"%enc{%map{transport.profile}}{JSON}"}\
+ %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\
+ %varsNotEmpty{, "put":%map{put}}\
+ %varsNotEmpty{, "delete":%map{delete}}\
+ %varsNotEmpty{, "change":%map{change}}\
+ %varsNotEmpty{, "create":%map{create}}\
+ %varsNotEmpty{, "invalidate":%map{invalidate}}\
+ }%n
+# "node.name" node name from the `elasticsearch.yml` settings
+# "node.id" node id which should not change between cluster restarts
+# "host.name" unresolved hostname of the local node
+# "host.ip" the local bound ip (i.e. the ip listening for connections)
+# "origin.type" a received REST request is translated into one or more
transport requests. This indicates which processing layer generated the event
"rest" or "transport" (internal)
+# "event.action" the name of the audited event, eg. "authentication_failed",
"access_granted", "run_as_granted", etc.
+# "authentication.type" one of "realm", "api_key", "token", "anonymous" or
"internal"
+# "user.name" the subject name as authenticated by a realm
+# "user.run_by.name" the original authenticated subject name that is
impersonating another one.
+# "user.run_as.name" if this "event.action" is of a run_as type, this is the
subject name to be impersonated as.
+# "user.realm" the name of the realm that authenticated "user.name"
+# "user.run_by.realm" the realm name of the impersonating subject
("user.run_by.name")
+# "user.run_as.realm" if this "event.action" is of a run_as type, this is the
realm name the impersonated user is looked up from
+# "user.roles" the roles array of the user; these are the roles that are
granting privileges
+# "apikey.id" this field is present if and only if the "authentication.type"
is "api_key"
+# "apikey.name" this field is present if and only if the "authentication.type"
is "api_key"
+# "authentication.token.name" this field is present if and only if the
authenticating credential is a service account token
+# "authentication.token.type" this field is present if and only if the
authenticating credential is a service account token
+# "event.type" informs about what internal system generated the event;
possible values are "rest", "transport", "ip_filter" and
"security_config_change"
+# "origin.address" the remote address and port of the first network hop, i.e.
a REST proxy or another cluster node
+# "realm" name of a realm that has generated an "authentication_failed" or an
"authentication_successful"; the subject is not yet authenticated
+# "url.path" the URI component between the port and the query string; it is
percent (URL) encoded
+# "url.query" the URI component after the path and before the fragment; it is
percent (URL) encoded
+# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT,
DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT
+# "request.body" the content of the request body entity, JSON escaped
+# "request.id" a synthetic identifier for the incoming request, this is unique
per incoming request, and consistent across all audit events generated by that
request
+# "action" an action is the most granular operation that is authorized and
this identifies it in a namespaced way (internal)
+# "request.name" if the event is in connection to a transport message this is
the name of the request class, similar to how rest requests are identified by
the url path (internal)
+# "indices" the array of indices that the "action" is acting upon
+# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header
+# "trace_id" an identifier conveyed by the part of "traceparent" request header
+# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header,
as a verbatim string value (not an array)
+# "transport.profile" name of the transport profile in case this is a
"connection_granted" or "connection_denied" event
+# "rule" name of the applied rule if the "origin.type" is "ip_filter"
+# the "put", "delete", "change", "create", "invalidate" fields are only present
+# when the "event.type" is "security_config_change" and contain the security
config change (as an object) taking effect
+
+appender.audit_rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit-%d{yyyy-MM-dd}.json
+appender.audit_rolling.policies.type = Policies
+appender.audit_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.audit_rolling.policies.time.interval = 1
+appender.audit_rolling.policies.time.modulate = true
+
+logger.xpack_security_audit_logfile.name =
org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail
+logger.xpack_security_audit_logfile.level = info
+logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref =
audit_rolling
+logger.xpack_security_audit_logfile.additivity = false
+
+logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature
+logger.xmlsig.level = error
+logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter
+logger.samlxml_decrypt.level = fatal
+logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter
+logger.saml2_decrypt.level = fatal
\ No newline at end of file
diff --git
a/docker/thirdparties/docker-compose/elasticsearch/config/es8/log4j2.properties
b/docker/thirdparties/docker-compose/elasticsearch/config/es8/log4j2.properties
new file mode 100755
index 00000000000..10a7f36c9cd
--- /dev/null
+++
b/docker/thirdparties/docker-compose/elasticsearch/config/es8/log4j2.properties
@@ -0,0 +1,273 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+status = error
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}]
[%node_name]%marker %m%n
+
+######## Server JSON ############################
+appender.rolling.type = RollingFile
+appender.rolling.name = rolling
+appender.rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json
+appender.rolling.layout.type = ECSJsonLayout
+appender.rolling.layout.dataset = elasticsearch.server
+
+appender.rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz
+appender.rolling.policies.type = Policies
+appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling.policies.time.interval = 1
+appender.rolling.policies.time.modulate = true
+appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.rolling.policies.size.size = 128MB
+appender.rolling.strategy.type = DefaultRolloverStrategy
+appender.rolling.strategy.fileIndex = nomax
+appender.rolling.strategy.action.type = Delete
+appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
+appender.rolling.strategy.action.condition.type = IfFileName
+appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
+appender.rolling.strategy.action.condition.nested_condition.type =
IfAccumulatedFileSize
+appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
+################################################
+######## Server - old style pattern ###########
+appender.rolling_old.type = RollingFile
+appender.rolling_old.name = rolling_old
+appender.rolling_old.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
+appender.rolling_old.layout.type = PatternLayout
+appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}]
[%node_name]%marker %m%n
+
+appender.rolling_old.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
+appender.rolling_old.policies.type = Policies
+appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling_old.policies.time.interval = 1
+appender.rolling_old.policies.time.modulate = true
+appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy
+appender.rolling_old.policies.size.size = 128MB
+appender.rolling_old.strategy.type = DefaultRolloverStrategy
+appender.rolling_old.strategy.fileIndex = nomax
+appender.rolling_old.strategy.action.type = Delete
+appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path}
+appender.rolling_old.strategy.action.condition.type = IfFileName
+appender.rolling_old.strategy.action.condition.glob =
${sys:es.logs.cluster_name}-*
+appender.rolling_old.strategy.action.condition.nested_condition.type =
IfAccumulatedFileSize
+appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB
+################################################
+
+rootLogger.level = info
+rootLogger.appenderRef.console.ref = console
+rootLogger.appenderRef.rolling.ref = rolling
+rootLogger.appenderRef.rolling_old.ref = rolling_old
+
+######## Deprecation JSON #######################
+appender.deprecation_rolling.type = RollingFile
+appender.deprecation_rolling.name = deprecation_rolling
+appender.deprecation_rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json
+appender.deprecation_rolling.layout.type = ECSJsonLayout
+# Intentionally follows a different pattern to above
+appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch
+appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter
+
+appender.deprecation_rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz
+appender.deprecation_rolling.policies.type = Policies
+appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.deprecation_rolling.policies.size.size = 1GB
+appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
+appender.deprecation_rolling.strategy.max = 4
+
+appender.header_warning.type = HeaderWarningAppender
+appender.header_warning.name = header_warning
+#################################################
+
+logger.deprecation.name = org.elasticsearch.deprecation
+logger.deprecation.level = WARN
+logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
+logger.deprecation.appenderRef.header_warning.ref = header_warning
+logger.deprecation.additivity = false
+
+######## Search slowlog JSON ####################
+appender.index_search_slowlog_rolling.type = RollingFile
+appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
+appender.index_search_slowlog_rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\
+ .cluster_name}_index_search_slowlog.json
+appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout
+appender.index_search_slowlog_rolling.layout.dataset =
elasticsearch.index_search_slowlog
+
+appender.index_search_slowlog_rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\
+ .cluster_name}_index_search_slowlog-%i.json.gz
+appender.index_search_slowlog_rolling.policies.type = Policies
+appender.index_search_slowlog_rolling.policies.size.type =
SizeBasedTriggeringPolicy
+appender.index_search_slowlog_rolling.policies.size.size = 1GB
+appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy
+appender.index_search_slowlog_rolling.strategy.max = 4
+#################################################
+
+#################################################
+logger.index_search_slowlog_rolling.name = index.search.slowlog
+logger.index_search_slowlog_rolling.level = trace
+logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref
= index_search_slowlog_rolling
+logger.index_search_slowlog_rolling.additivity = false
+
+######## Indexing slowlog JSON ##################
+appender.index_indexing_slowlog_rolling.type = RollingFile
+appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
+appender.index_indexing_slowlog_rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
+ _index_indexing_slowlog.json
+appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout
+appender.index_indexing_slowlog_rolling.layout.dataset =
elasticsearch.index_indexing_slowlog
+
+
+appender.index_indexing_slowlog_rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
+ _index_indexing_slowlog-%i.json.gz
+appender.index_indexing_slowlog_rolling.policies.type = Policies
+appender.index_indexing_slowlog_rolling.policies.size.type =
SizeBasedTriggeringPolicy
+appender.index_indexing_slowlog_rolling.policies.size.size = 1GB
+appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy
+appender.index_indexing_slowlog_rolling.strategy.max = 4
+#################################################
+
+
+logger.index_indexing_slowlog.name = index.indexing.slowlog.index
+logger.index_indexing_slowlog.level = trace
+logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref =
index_indexing_slowlog_rolling
+logger.index_indexing_slowlog.additivity = false
+
+
+logger.com_amazonaws.name = com.amazonaws
+logger.com_amazonaws.level = warn
+
+logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name =
com.amazonaws.jmx.SdkMBeanRegistrySupport
+logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error
+
+logger.com_amazonaws_metrics_AwsSdkMetrics.name =
com.amazonaws.metrics.AwsSdkMetrics
+logger.com_amazonaws_metrics_AwsSdkMetrics.level = error
+
+logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name =
com.amazonaws.auth.profile.internal.BasicProfileConfigFileLoader
+logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level
= error
+
+logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.name =
com.amazonaws.services.s3.internal.UseArnRegionResolver
+logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.level = error
+
+
+appender.audit_rolling.type = RollingFile
+appender.audit_rolling.name = audit_rolling
+appender.audit_rolling.fileName =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit.json
+appender.audit_rolling.layout.type = PatternLayout
+appender.audit_rolling.layout.pattern = {\
+ "type":"audit", \
+ "timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\
+ %varsNotEmpty{,
"cluster.name":"%enc{%map{cluster.name}}{JSON}"}\
+ %varsNotEmpty{,
"cluster.uuid":"%enc{%map{cluster.uuid}}{JSON}"}\
+ %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\
+ %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\
+ %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\
+ %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\
+ %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\
+ %varsNotEmpty{,
"event.action":"%enc{%map{event.action}}{JSON}"}\
+ %varsNotEmpty{,
"authentication.type":"%enc{%map{authentication.type}}{JSON}"}\
+ %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\
+ %varsNotEmpty{,
"user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\
+ %varsNotEmpty{,
"user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\
+ %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\
+ %varsNotEmpty{,
"user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\
+ %varsNotEmpty{,
"user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\
+ %varsNotEmpty{, "user.roles":%map{user.roles}}\
+ %varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\
+ %varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\
+ %varsNotEmpty{,
"authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\
+ %varsNotEmpty{,
"authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\
+ %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\
+ %varsNotEmpty{,
"origin.address":"%enc{%map{origin.address}}{JSON}"}\
+ %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\
+ %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\
+ %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\
+ %varsNotEmpty{,
"request.method":"%enc{%map{request.method}}{JSON}"}\
+ %varsNotEmpty{,
"request.body":"%enc{%map{request.body}}{JSON}"}\
+ %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\
+ %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\
+ %varsNotEmpty{,
"request.name":"%enc{%map{request.name}}{JSON}"}\
+ %varsNotEmpty{, "indices":%map{indices}}\
+ %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\
+ %varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\
+ %varsNotEmpty{,
"x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\
+ %varsNotEmpty{,
"transport.profile":"%enc{%map{transport.profile}}{JSON}"}\
+ %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\
+ %varsNotEmpty{, "put":%map{put}}\
+ %varsNotEmpty{, "delete":%map{delete}}\
+ %varsNotEmpty{, "change":%map{change}}\
+ %varsNotEmpty{, "create":%map{create}}\
+ %varsNotEmpty{, "invalidate":%map{invalidate}}\
+ }%n
+# "node.name" node name from the `elasticsearch.yml` settings
+# "node.id" node id which should not change between cluster restarts
+# "host.name" unresolved hostname of the local node
+# "host.ip" the local bound ip (i.e. the ip listening for connections)
+# "origin.type" a received REST request is translated into one or more
transport requests. This indicates which processing layer generated the event
"rest" or "transport" (internal)
+# "event.action" the name of the audited event, eg. "authentication_failed",
"access_granted", "run_as_granted", etc.
+# "authentication.type" one of "realm", "api_key", "token", "anonymous" or
"internal"
+# "user.name" the subject name as authenticated by a realm
+# "user.run_by.name" the original authenticated subject name that is
impersonating another one.
+# "user.run_as.name" if this "event.action" is of a run_as type, this is the
subject name to be impersonated as.
+# "user.realm" the name of the realm that authenticated "user.name"
+# "user.run_by.realm" the realm name of the impersonating subject
("user.run_by.name")
+# "user.run_as.realm" if this "event.action" is of a run_as type, this is the
realm name the impersonated user is looked up from
+# "user.roles" the roles array of the user; these are the roles that are
granting privileges
+# "apikey.id" this field is present if and only if the "authentication.type"
is "api_key"
+# "apikey.name" this field is present if and only if the "authentication.type"
is "api_key"
+# "authentication.token.name" this field is present if and only if the
authenticating credential is a service account token
+# "authentication.token.type" this field is present if and only if the
authenticating credential is a service account token
+# "event.type" informs about what internal system generated the event;
possible values are "rest", "transport", "ip_filter" and
"security_config_change"
+# "origin.address" the remote address and port of the first network hop, i.e.
a REST proxy or another cluster node
+# "realm" name of a realm that has generated an "authentication_failed" or an
"authentication_successful"; the subject is not yet authenticated
+# "url.path" the URI component between the port and the query string; it is
percent (URL) encoded
+# "url.query" the URI component after the path and before the fragment; it is
percent (URL) encoded
+# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT,
DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT
+# "request.body" the content of the request body entity, JSON escaped
+# "request.id" a synthetic identifier for the incoming request, this is unique
per incoming request, and consistent across all audit events generated by that
request
+# "action" an action is the most granular operation that is authorized and
this identifies it in a namespaced way (internal)
+# "request.name" if the event is in connection to a transport message this is
the name of the request class, similar to how rest requests are identified by
the url path (internal)
+# "indices" the array of indices that the "action" is acting upon
+# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header
+# "trace_id" an identifier conveyed by the part of "traceparent" request header
+# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header,
as a verbatim string value (not an array)
+# "transport.profile" name of the transport profile in case this is a
"connection_granted" or "connection_denied" event
+# "rule" name of the applied rule if the "origin.type" is "ip_filter"
+# the "put", "delete", "change", "create", "invalidate" fields are only present
+# when the "event.type" is "security_config_change" and contain the security
config change (as an object) taking effect
+
+appender.audit_rolling.filePattern =
${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit-%d{yyyy-MM-dd}-%i.json.gz
+appender.audit_rolling.policies.type = Policies
+appender.audit_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.audit_rolling.policies.time.interval = 1
+appender.audit_rolling.policies.time.modulate = true
+appender.audit_rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.audit_rolling.policies.size.size = 1GB
+appender.audit_rolling.strategy.type = DefaultRolloverStrategy
+appender.audit_rolling.strategy.fileIndex = nomax
+
+logger.xpack_security_audit_logfile.name =
org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail
+logger.xpack_security_audit_logfile.level = info
+logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref =
audit_rolling
+logger.xpack_security_audit_logfile.additivity = false
+
+logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature
+logger.xmlsig.level = error
+logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter
+logger.samlxml_decrypt.level = fatal
+logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter
+logger.saml2_decrypt.level = fatal
\ No newline at end of file
diff --git a/docker/thirdparties/docker-compose/elasticsearch/es.yaml.tpl
b/docker/thirdparties/docker-compose/elasticsearch/es.yaml.tpl
index 92ce5cd0170..cee7c2748a8 100644
--- a/docker/thirdparties/docker-compose/elasticsearch/es.yaml.tpl
+++ b/docker/thirdparties/docker-compose/elasticsearch/es.yaml.tpl
@@ -20,6 +20,7 @@ version: "3.9"
services:
doris--es_6:
# es official not provide 6.x image for arm/v8, use compatible image.
+ # https://github.com/dockhippie/elasticsearch/tree/master/v6.8
image: webhippie/elasticsearch:6.8
ports:
- ${DOCKER_ES_6_EXTERNAL_PORT}:9200
@@ -27,13 +28,16 @@ services:
ELASTICSEARCH_CLUSTER_NAME: "elasticsearch6"
ES_JAVA_OPTS: "-Xms256m -Xmx256m"
discovery.type: "single-node"
- ELASTICSEARCH_XPACK_SECURITY_ENABLED: "false"
+ ELASTICSEARCH_SKIP_TEMPLATES: "true"
volumes:
- - ./data/es6/:/usr/share/elasticsearch/data
+ - ./data/es6/:/var/lib/elasticsearch/data
+ - ./logs/es6/:/var/lib/elasticsearch/logs
+ - ./config/es6/log4j2.properties:/etc/elasticsearch/log4j2.properties
+ - ./config/es6/elasticsearch.yml:/etc/elasticsearch/elasticsearch.yml
networks:
- doris--es
healthcheck:
- test: [ "CMD", "curl", "localhost:9200" ]
+ test: [ "CMD", "curl",
"localhost:9200/_cluster/health?wait_for_status=green" ]
interval: 30s
timeout: 10s
retries: 100
@@ -46,12 +50,19 @@ services:
ES_JAVA_OPTS: "-Xms256m -Xmx256m"
discovery.type: "single-node"
xpack.security.enabled: "false"
+ cluster.routing.allocation.disk.threshold_enabled: true
+ cluster.routing.allocation.disk.watermark.low: 500mb
+ cluster.routing.allocation.disk.watermark.high: 300mb
+ cluster.routing.allocation.disk.watermark.flood_stage: 200mb
+ ES_LOG_STYLE: "file"
volumes:
- ./data/es7/:/usr/share/elasticsearch/data
+ - ./logs/es7/:/usr/share/elasticsearch/logs
+ -
./config/es7/log4j2.properties:/usr/share/elasticsearch/log4j2.properties
networks:
- doris--es
healthcheck:
- test: [ "CMD", "curl", "localhost:9200" ]
+ test: [ "CMD", "curl",
"localhost:9200/_cluster/health?wait_for_status=green" ]
interval: 30s
timeout: 10s
retries: 100
@@ -64,12 +75,19 @@ services:
ES_JAVA_OPTS: "-Xms256m -Xmx256m"
discovery.type: "single-node"
xpack.security.enabled: "false"
+ cluster.routing.allocation.disk.threshold_enabled: true
+ cluster.routing.allocation.disk.watermark.low: 500mb
+ cluster.routing.allocation.disk.watermark.high: 300mb
+ cluster.routing.allocation.disk.watermark.flood_stage: 200mb
+ ES_LOG_STYLE: "file"
volumes:
- ./data/es8/:/usr/share/elasticsearch/data
+ - ./logs/es8/:/usr/share/elasticsearch/logs
+ -
./config/es8/log4j2.properties:/usr/share/elasticsearch/log4j2.properties
networks:
- doris--es
healthcheck:
- test: [ "CMD", "curl", "localhost:9200" ]
+ test: [ "CMD", "curl",
"localhost:9200/_cluster/health?wait_for_status=green" ]
interval: 30s
timeout: 10s
retries: 100
diff --git
a/docker/thirdparties/docker-compose/elasticsearch/scripts/index/es6_test1.json
b/docker/thirdparties/docker-compose/elasticsearch/scripts/index/es6_test1.json
index 97c5f537c5b..26dbdb98203 100755
---
a/docker/thirdparties/docker-compose/elasticsearch/scripts/index/es6_test1.json
+++
b/docker/thirdparties/docker-compose/elasticsearch/scripts/index/es6_test1.json
@@ -46,7 +46,7 @@
"type": "long"
},
"c_unsigned_long": {
- "type": "unsigned_long"
+ "type": "long"
},
"c_float": {
"type": "float"
diff --git
a/docker/thirdparties/docker-compose/elasticsearch/scripts/index/es6_test2.json
b/docker/thirdparties/docker-compose/elasticsearch/scripts/index/es6_test2.json
index 1dec9e7ff44..e1feb6664b2 100755
---
a/docker/thirdparties/docker-compose/elasticsearch/scripts/index/es6_test2.json
+++
b/docker/thirdparties/docker-compose/elasticsearch/scripts/index/es6_test2.json
@@ -49,7 +49,7 @@
"type": "long"
},
"c_unsigned_long": {
- "type": "unsigned_long"
+ "type": "long"
},
"c_float": {
"type": "float"
diff --git a/docker/thirdparties/run-thirdparties-docker.sh
b/docker/thirdparties/run-thirdparties-docker.sh
index e94ddbca9f8..a3d925ca4a7 100755
--- a/docker/thirdparties/run-thirdparties-docker.sh
+++ b/docker/thirdparties/run-thirdparties-docker.sh
@@ -180,6 +180,14 @@ if [[ "${RUN_ES}" -eq 1 ]]; then
sudo mkdir -p "${ROOT}"/docker-compose/elasticsearch/data/es8/
sudo rm -rf "${ROOT}"/docker-compose/elasticsearch/data/es8/*
sudo chmod -R 777 "${ROOT}"/docker-compose/elasticsearch/data
+ sudo mkdir -p "${ROOT}"/docker-compose/elasticsearch/logs/es6/
+ sudo rm -rf "${ROOT}"/docker-compose/elasticsearch/logs/es6/*
+ sudo mkdir -p "${ROOT}"/docker-compose/elasticsearch/logs/es7/
+ sudo rm -rf "${ROOT}"/docker-compose/elasticsearch/logs/es7/*
+ sudo mkdir -p "${ROOT}"/docker-compose/elasticsearch/logs/es8/
+ sudo rm -rf "${ROOT}"/docker-compose/elasticsearch/logs/es8/*
+ sudo chmod -R 777 "${ROOT}"/docker-compose/elasticsearch/logs
+ sudo chmod -R 777 "${ROOT}"/docker-compose/elasticsearch/config
sudo docker compose -f "${ROOT}"/docker-compose/elasticsearch/es.yaml
--env-file "${ROOT}"/docker-compose/elasticsearch/es.env up -d --remove-orphans
fi
fi
diff --git a/regression-test/data/external_table_p0/es/test_es_query.out
b/regression-test/data/external_table_p0/es/test_es_query.out
index cc8be6d8a36..3b995832c4d 100644
--- a/regression-test/data/external_table_p0/es/test_es_query.out
+++ b/regression-test/data/external_table_p0/es/test_es_query.out
@@ -39,47 +39,47 @@ I'm not null or empty
2022-08-08 2022-08-11T12:10:10 2022-08-11T12:10:10
2022-08-11T12:10:10 2022-08-11T11:10:10
-- !sql_6_02 --
-[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2,
1.3] [1, 2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1",
"127.0.0.1"] ["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] \N string1 text#1 3.14 2022-08-08T00:00
12345 2022-08-08T20:10:10
+[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2, 1.3] [1,
2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1", "127.0.0.1"]
["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] \N string1 text#1 3.14 2022-08-08T00:00
12345 2022-08-08T20:10:10
-- !sql_6_03 --
-[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2,
1.3] [1, 2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1",
"127.0.0.1"] ["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] \N string1 text#1 3.14 2022-08-08T00:00
12345 2022-08-08T20:10:10
-[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2,
1.3] [1, 2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1",
"127.0.0.1"] ["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] string2 text2 4.0 2022-08-08T00:00
2222 2022-08-08T12:10:10
-[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2,
1.3] [1, 2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1",
"127.0.0.1"] ["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] I'm not null or empty string3 text3_4*5 5.0
2022-08-08T00:00 3333 2022-08-08T20:10:10
+[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2, 1.3] [1,
2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1", "127.0.0.1"]
["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] \N string1 text#1 3.14 2022-08-08T00:00
12345 2022-08-08T20:10:10
+[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2, 1.3] [1,
2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1", "127.0.0.1"]
["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] string2 text2 4.0 2022-08-08T00:00
2222 2022-08-08T12:10:10
+[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2, 1.3] [1,
2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1", "127.0.0.1"]
["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] I'm not null or empty string3 text3_4*5 5.0
2022-08-08T00:00 3333 2022-08-08T20:10:10
-- !sql_6_04 --
-[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2,
1.3] [1, 2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1",
"127.0.0.1"] ["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] string2 text2 4.0 2022-08-08T00:00
2222 2022-08-08T12:10:10
+[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2, 1.3] [1,
2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1", "127.0.0.1"]
["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] string2 text2 4.0 2022-08-08T00:00
2222 2022-08-08T12:10:10
-- !sql_6_05 --
-true 1 128 32768 -1 0 1.0 1 1 1
2020-01-01T00:00 2020-01-01 12:00:00 a d 192.168.0.1
{"name":"Andy","age":18}
-true 1 128 32768 -1 0 1.0 1 1 1
2020-01-01T00:00 2020-01-01 12:00:00 a d 192.168.0.1
{"name":"Andy","age":18}
-true 1 128 32768 -1 0 1.0 1 1 1
2020-01-01T00:00 2020-01-01 12:00:00 a d 192.168.0.1
{"name":"Andy","age":18}
+true 1 128 32768 -1 0 1.0 1.0 1.0 1.0
2020-01-01 2020-01-01T12:00 a d 192.168.0.1
{"name":"Andy","age":18}
+true 1 128 32768 -1 0 1.0 1.0 1.0 1.0
2020-01-01 2020-01-01T12:00 a d 192.168.0.1
{"name":"Andy","age":18}
+true 1 128 32768 -1 0 1.0 1.0 1.0 1.0
2020-01-01 2020-01-01T12:00 a d 192.168.0.1
{"name":"Andy","age":18}
-- !sql_6_06 --
-true 1 128 32768 -1 0 1.0 1 1 1
2020-01-01T00:00 2020-01-01 12:00:00 a d 192.168.0.1
{"name":"Andy","age":18}
-true 1 128 32768 -1 0 1.0 1 1 1
2020-01-01T00:00 2020-01-01 12:00:00 a d 192.168.0.1
{"name":"Andy","age":18}
-true 1 128 32768 -1 0 1.0 1 1 1
2020-01-01T00:00 2020-01-01 12:00:00 a d 192.168.0.1
{"name":"Andy","age":18}
+true 1 128 32768 -1 0 1.0 1.0 1.0 1.0
2020-01-01 2020-01-01T12:00 a d 192.168.0.1
{"name":"Andy","age":18}
+true 1 128 32768 -1 0 1.0 1.0 1.0 1.0
2020-01-01 2020-01-01T12:00 a d 192.168.0.1
{"name":"Andy","age":18}
+true 1 128 32768 -1 0 1.0 1.0 1.0 1.0
2020-01-01 2020-01-01T12:00 a d 192.168.0.1
{"name":"Andy","age":18}
-- !sql_6_07 --
-[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2,
1.3] [1, 2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1",
"127.0.0.1"] ["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] \N string1 text#1 3.14 2022-08-08T00:00
12345 2022-08-08T20:10:10
+[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2, 1.3] [1,
2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1", "127.0.0.1"]
["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] \N string1 text#1 3.14 2022-08-08T00:00
12345 2022-08-08T20:10:10
-- !sql_6_08 --
-[1, 0, 1, 1] [1, -2, -3, 4] [128, 129, -129, -130] [32768, 32769, -32769,
-32770] [-1, 0, 1, 2] [0, 1, 2, 3] [1, 1.1, 1.2, 1.3] [1, 2, 3, 4]
[1, 2, 3, 4] [1, 2, 3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] ["a", "b", "c"] ["d", "e",
"f"] ["192.168.0.1", "127.0.0.1"] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"]
-[1, 0, 1, 1] [1, -2, -3, 4] [128, 129, -129, -130] [32768, 32769, -32769,
-32770] [-1, 0, 1, 2] [0, 1, 2, 3] [1, 1.1, 1.2, 1.3] [1, 2, 3, 4]
[1, 2, 3, 4] [1, 2, 3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] ["a", "b", "c"] ["d", "e",
"f"] ["192.168.0.1", "127.0.0.1"] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"]
-[1, 0, 1, 1] [1, -2, -3, 4] [128, 129, -129, -130] [32768, 32769, -32769,
-32770] [-1, 0, 1, 2] [0, 1, 2, 3] [1, 1.1, 1.2, 1.3] [1, 2, 3, 4]
[1, 2, 3, 4] [1, 2, 3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] ["a", "b", "c"] ["d", "e",
"f"] ["192.168.0.1", "127.0.0.1"] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"]
+[1, 0, 1, 1] [1, -2, -3, 4] [128, 129, -129, -130] [32768, 32769, -32769,
-32770] [-1, 0, 1, 2] [0, 1, 2, 3] [1, 1.1, 1.2, 1.3] [1, 2, 3, 4]
[1, 2, 3, 4] [1, 2, 3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] ["a", "b", "c"] ["d", "e", "f"]
["192.168.0.1", "127.0.0.1"] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"]
+[1, 0, 1, 1] [1, -2, -3, 4] [128, 129, -129, -130] [32768, 32769, -32769,
-32770] [-1, 0, 1, 2] [0, 1, 2, 3] [1, 1.1, 1.2, 1.3] [1, 2, 3, 4]
[1, 2, 3, 4] [1, 2, 3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] ["a", "b", "c"] ["d", "e", "f"]
["192.168.0.1", "127.0.0.1"] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"]
+[1, 0, 1, 1] [1, -2, -3, 4] [128, 129, -129, -130] [32768, 32769, -32769,
-32770] [-1, 0, 1, 2] [0, 1, 2, 3] [1, 1.1, 1.2, 1.3] [1, 2, 3, 4]
[1, 2, 3, 4] [1, 2, 3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] ["a", "b", "c"] ["d", "e", "f"]
["192.168.0.1", "127.0.0.1"] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"]
-- !sql_6_09 --
-[1, 0, 1, 1] [1, -2, -3, 4] [128, 129, -129, -130] [32768, 32769, -32769,
-32770] [-1, 0, 1, 2] [0, 1, 2, 3] [1, 1.1, 1.2, 1.3] [1, 2, 3, 4]
[1, 2, 3, 4] [1, 2, 3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] ["a", "b", "c"] ["d", "e",
"f"] ["192.168.0.1", "127.0.0.1"] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"]
-[1, 0, 1, 1] [1, -2, -3, 4] [128, 129, -129, -130] [32768, 32769, -32769,
-32770] [-1, 0, 1, 2] [0, 1, 2, 3] [1, 1.1, 1.2, 1.3] [1, 2, 3, 4]
[1, 2, 3, 4] [1, 2, 3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] ["a", "b", "c"] ["d", "e",
"f"] ["192.168.0.1", "127.0.0.1"] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"]
-[1, 0, 1, 1] [1, -2, -3, 4] [128, 129, -129, -130] [32768, 32769, -32769,
-32770] [-1, 0, 1, 2] [0, 1, 2, 3] [1, 1.1, 1.2, 1.3] [1, 2, 3, 4]
[1, 2, 3, 4] [1, 2, 3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] ["a", "b", "c"] ["d", "e",
"f"] ["192.168.0.1", "127.0.0.1"] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"]
+[1, 0, 1, 1] [1, -2, -3, 4] [128, 129, -129, -130] [32768, 32769, -32769,
-32770] [-1, 0, 1, 2] [0, 1, 2, 3] [1, 1.1, 1.2, 1.3] [1, 2, 3, 4]
[1, 2, 3, 4] [1, 2, 3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] ["a", "b", "c"] ["d", "e", "f"]
["192.168.0.1", "127.0.0.1"] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"]
+[1, 0, 1, 1] [1, -2, -3, 4] [128, 129, -129, -130] [32768, 32769, -32769,
-32770] [-1, 0, 1, 2] [0, 1, 2, 3] [1, 1.1, 1.2, 1.3] [1, 2, 3, 4]
[1, 2, 3, 4] [1, 2, 3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] ["a", "b", "c"] ["d", "e", "f"]
["192.168.0.1", "127.0.0.1"] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"]
+[1, 0, 1, 1] [1, -2, -3, 4] [128, 129, -129, -130] [32768, 32769, -32769,
-32770] [-1, 0, 1, 2] [0, 1, 2, 3] [1, 1.1, 1.2, 1.3] [1, 2, 3, 4]
[1, 2, 3, 4] [1, 2, 3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] ["a", "b", "c"] ["d", "e", "f"]
["192.168.0.1", "127.0.0.1"] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"]
-- !sql_6_10 --
-[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2,
1.3] [1, 2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1",
"127.0.0.1"] ["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] \N string1 text#1 3.14 2022-08-08T00:00
12345 2022-08-08T20:10:10
+[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2, 1.3] [1,
2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1", "127.0.0.1"]
["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] \N string1 text#1 3.14 2022-08-08T00:00
12345 2022-08-08T20:10:10
-- !sql_6_11 --
-[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2,
1.3] [1, 2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1",
"127.0.0.1"] ["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] string2 text2 4.0 2022-08-08T00:00
2222 2022-08-08T12:10:10
+[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2, 1.3] [1,
2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1", "127.0.0.1"]
["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] string2 text2 4.0 2022-08-08T00:00
2222 2022-08-08T12:10:10
-- !sql_6_12 --
-[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2,
1.3] [1, 2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1",
"127.0.0.1"] ["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] I'm not null or empty string3 text3_4*5 5.0
2022-08-08T00:00 3333 2022-08-08T20:10:10
+[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2, 1.3] [1,
2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1", "127.0.0.1"]
["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] I'm not null or empty string3 text3_4*5 5.0
2022-08-08T00:00 3333 2022-08-08T20:10:10
-- !sql_6_13 --
2022-08-08T20:10:10
diff --git
a/regression-test/data/external_table_p0/es/test_es_query_no_http_url.out
b/regression-test/data/external_table_p0/es/test_es_query_no_http_url.out
index ceedce57b18..edab82a4056 100644
--- a/regression-test/data/external_table_p0/es/test_es_query_no_http_url.out
+++ b/regression-test/data/external_table_p0/es/test_es_query_no_http_url.out
@@ -6,7 +6,7 @@
["2020-01-01", "2020-01-02"] [-1, 0, 1, 2] [0, 1, 2, 3] ["d", "e", "f"]
[128, 129, -129, -130] ["192.168.0.1", "127.0.0.1"] string1 [1, 2, 3, 4]
2022-08-08 2022-08-08T12:10:10 text#1 ["2020-01-01", "2020-01-02"]
3.14 [1, 2, 3, 4] [1, 1.1, 1.2, 1.3] [1, 2, 3, 4] ["a", "b", "c"]
["{"name":"Andy","age":18}", "{"name":"Tim","age":28}"] 2022-08-08T12:10:10
2022-08-08T12:10:10 2022-08-08T20:10:10 [1, -2, -3, 4] [1, 0, 1, 1]
[32768, 32769, -32769, -32770]
-- !sql61 --
-[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01 00:00:00", "2020-01-02 00:00:00"]
["2020-01-01 12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2,
1.3] [1, 2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1",
"127.0.0.1"] ["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] \N string1 text#1 3.14 2022-08-08T00:00
12345 2022-08-08T20:10:10
+[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2, 1.3] [1,
2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1", "127.0.0.1"]
["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] \N string1 text#1 3.14 2022-08-08T00:00
12345 2022-08-08T20:10:10
-- !sql71 --
[1, 0, 1, 1] [1, -2, -3, 4] ["2020-01-01", "2020-01-02"] ["2020-01-01
12:00:00", "2020-01-02 13:01:01"] [1, 2, 3, 4] [1, 1.1, 1.2, 1.3] [1,
2, 3, 4] [32768, 32769, -32769, -32770] ["192.168.0.1", "127.0.0.1"]
["a", "b", "c"] [-1, 0, 1, 2] ["{"name":"Andy","age":18}",
"{"name":"Tim","age":28}"] [1, 2, 3, 4] [128, 129, -129, -130] ["d", "e",
"f"] [0, 1, 2, 3] \N string1 2022-08-08T20:10:10 text#1 3.14
2022-08-08T00:00 2022-08-08T12:10:10 1659931810000
2022-08-08T12:10:10 2022-08-08T20:10:10 12345
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]