This is an automated email from the ASF dual-hosted git repository.
tanxinyu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iotdb.git
The following commit(s) were added to refs/heads/master by this push:
new bd3094265bb Replenish effective mode in iotdb-system.properties
(#12706)
bd3094265bb is described below
commit bd3094265bb4bd18e7cb076784be0ca2aff9216f
Author: shuwenwei <[email protected]>
AuthorDate: Wed Jun 12 18:13:34 2024 +0800
Replenish effective mode in iotdb-system.properties (#12706)
---
.../java/org/apache/iotdb/db/conf/IoTDBConfig.java | 10 +-----
.../org/apache/iotdb/db/conf/IoTDBDescriptor.java | 5 ---
.../db/queryengine/plan/parser/ASTVisitor.java | 4 +--
.../apache/iotdb/db/utils/TypeInferenceUtils.java | 21 ++++--------
.../resources/conf/generate_properties.bat | 3 +-
.../resources/conf/iotdb-system.properties | 40 ++++++++++++++++++----
.../iotdb/commons/conf/ConfigurationFileUtils.java | 17 +++++----
7 files changed, 52 insertions(+), 48 deletions(-)
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
index ea42d9f4d73..421d8c5f0ed 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
@@ -73,7 +73,7 @@ public class IoTDBConfig {
/* Names of Watermark methods */
public static final String WATERMARK_GROUPED_LSB = "GroupBasedLSBMethod";
- public static final String CONFIG_NAME = "iotdb-datanode.properties";
+ public static final String CONFIG_NAME = "iotdb-system.properties";
private static final Logger logger =
LoggerFactory.getLogger(IoTDBConfig.class);
private static final String MULTI_DIR_STRATEGY_PREFIX =
"org.apache.iotdb.db.storageengine.rescon.disk.strategy.";
@@ -2184,14 +2184,6 @@ public class IoTDBConfig {
this.avgSeriesPointNumberThreshold = avgSeriesPointNumberThreshold;
}
- public long getCrossCompactionFileSelectionTimeBudget() {
- return crossCompactionFileSelectionTimeBudget;
- }
-
- void setCrossCompactionFileSelectionTimeBudget(long
crossCompactionFileSelectionTimeBudget) {
- this.crossCompactionFileSelectionTimeBudget =
crossCompactionFileSelectionTimeBudget;
- }
-
public boolean isRpcThriftCompressionEnable() {
return rpcThriftCompressionEnable;
}
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
index 9040ab125a1..81c60b9b809 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
@@ -591,11 +591,6 @@ public class IoTDBDescriptor {
conf.setChunkBufferPoolEnable(
Boolean.parseBoolean(properties.getProperty("chunk_buffer_pool_enable")));
}
- conf.setCrossCompactionFileSelectionTimeBudget(
- Long.parseLong(
- properties.getProperty(
- "cross_compaction_file_selection_time_budget",
-
Long.toString(conf.getCrossCompactionFileSelectionTimeBudget()))));
conf.setMergeIntervalSec(
Long.parseLong(
properties.getProperty(
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java
index 261f3bb7ed9..68f865d362c 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java
@@ -3257,8 +3257,8 @@ public class ASTVisitor extends
IoTDBSqlParserBaseVisitor<Statement> {
Integer.parseInt(ctx.INTEGER_LITERAL() == null ? "-1" :
ctx.INTEGER_LITERAL().getText());
Map<String, String> configItems = new HashMap<>();
for (IoTDBSqlParser.SetConfigurationEntryContext entry :
ctx.setConfigurationEntry()) {
- String key = entry.STRING_LITERAL(0).getText().replace("\"", "");
- String value = entry.STRING_LITERAL(1).getText().replace("\"", "");
+ String key =
parseStringLiteral(entry.STRING_LITERAL(0).getText()).trim();
+ String value =
parseStringLiteral(entry.STRING_LITERAL(1).getText()).trim();
configItems.put(key, value);
}
setConfigurationStatement.setNodeId(nodeId);
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/TypeInferenceUtils.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/TypeInferenceUtils.java
index 5665fd9aa5a..165ea4b8b0f 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/TypeInferenceUtils.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/utils/TypeInferenceUtils.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.utils;
import org.apache.iotdb.commons.path.MeasurementPath;
+import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.exception.sql.SemanticException;
import org.apache.iotdb.db.queryengine.plan.analyze.ExpressionUtils;
@@ -40,17 +41,7 @@ import java.util.List;
public class TypeInferenceUtils {
- private static final TSDataType booleanStringInferType =
- IoTDBDescriptor.getInstance().getConfig().getBooleanStringInferType();
-
- private static final TSDataType integerStringInferType =
- IoTDBDescriptor.getInstance().getConfig().getIntegerStringInferType();
-
- private static final TSDataType floatingStringInferType =
- IoTDBDescriptor.getInstance().getConfig().getFloatingStringInferType();
-
- private static final TSDataType nanStringInferType =
- IoTDBDescriptor.getInstance().getConfig().getNanStringInferType();
+ private static final IoTDBConfig CONF =
IoTDBDescriptor.getInstance().getConfig();
private TypeInferenceUtils() {}
@@ -108,18 +99,18 @@ public class TypeInferenceUtils {
} else if (inferType) {
String strValue = value.toString();
if (isBoolean(strValue)) {
- return booleanStringInferType;
+ return CONF.getBooleanStringInferType();
} else if (isNumber(strValue)) {
if (isLong(StringUtils.trim(strValue))) {
- return integerStringInferType;
+ return CONF.getIntegerStringInferType();
} else {
- return floatingStringInferType;
+ return CONF.getFloatingStringInferType();
}
} else if ("null".equals(strValue) || "NULL".equals(strValue)) {
return null;
// "NaN" is returned if the NaN Literal is given in Parser
} else if ("NaN".equals(strValue)) {
- return nanStringInferType;
+ return CONF.getNanStringInferType();
} else if (isBlob(strValue)) {
return TSDataType.BLOB;
} else {
diff --git
a/iotdb-core/node-commons/src/assembly/resources/conf/generate_properties.bat
b/iotdb-core/node-commons/src/assembly/resources/conf/generate_properties.bat
index f158d7fe6d2..1e270fea64b 100644
---
a/iotdb-core/node-commons/src/assembly/resources/conf/generate_properties.bat
+++
b/iotdb-core/node-commons/src/assembly/resources/conf/generate_properties.bat
@@ -61,4 +61,5 @@ for /f "usebackq tokens=*" %%i in ("%target_template_file%")
do (
echo !line!>>"%target_properties_file%"
)
endlocal
-)
\ No newline at end of file
+)
+powershell -Command "(Get-Content '%target_properties_file%') -join \"`n\" |
Set-Content -NoNewline '%target_properties_file%'"
\ No newline at end of file
diff --git
a/iotdb-core/node-commons/src/assembly/resources/conf/iotdb-system.properties
b/iotdb-core/node-commons/src/assembly/resources/conf/iotdb-system.properties
index 9e32106da52..38f3f7c00f0 100644
---
a/iotdb-core/node-commons/src/assembly/resources/conf/iotdb-system.properties
+++
b/iotdb-core/node-commons/src/assembly/resources/conf/iotdb-system.properties
@@ -257,6 +257,7 @@ dn_seed_config_node=127.0.0.1:10710
# dn_rpc_thrift_compression_enable=false
# if true, a snappy based compression method will be called before sending
data by the network
+# effectiveMode: restart
# Datatype: boolean
# this feature is under development, set this as false before it is done.
# dn_rpc_advanced_compression_enable=false
@@ -431,6 +432,7 @@ dn_seed_config_node=127.0.0.1:10710
# If it is relative, system will save the data in the relative path directory
it indicates under the IoTDB folder.
# If there are more than one directory, please separate them by commas ",".
# Note: If pipe_consensus_receiver_file_dirs is assigned an empty
string(i.e.,zero-size), it will be handled as a relative path.
+# effectiveMode: restart
# For windows platform
# If its prefix is a drive specifier followed by "\\", or if its prefix is
"\\\\", then the path is absolute. Otherwise, it is relative.
#
pipe_consensus_receiver_file_dirs=data\\datanode\\system\\pipe\\consensus\\receiver
@@ -537,6 +539,7 @@ dn_metric_prometheus_reporter_port=9092
####################
# Used for indicate cluster name and distinguish different cluster.
+# effectiveMode: first_start
# Datatype: string
cluster_name=defaultCluster
@@ -719,6 +722,7 @@ data_replication_factor=1
# primitive_array_size=64
# Ratio of compaction memory for chunk metadata maintains in memory when doing
compaction
+# effectiveMode: restart
# Datatype: double
# chunk_metadata_size_proportion=0.1
@@ -741,6 +745,7 @@ data_replication_factor=1
# reject_proportion=0.8
# Ratio of memory for the DevicePathCache. DevicePathCache is the deviceId
cache, keep only one copy of the same deviceId in memory
+# effectiveMode: restart
# Datatype: double
# device_path_cache_proportion=0.05
@@ -812,12 +817,15 @@ data_replication_factor=1
# Policy of DataNodeSchemaCache eviction.
# Support FIFO and LRU policy. FIFO takes low cache update overhead. LRU takes
high cache hit rate.
+# effectiveMode: restart
+# Datatype: int
# datanode_schema_cache_eviction_policy=FIFO
# This configuration parameter sets the maximum number of time series allowed
in the cluster.
# The value should be a positive integer representing the desired threshold.
# When the threshold is reached, users will be prohibited from creating new
time series.
# -1 means unlimited
+# effectiveMode: restart
# Datatype: int
# cluster_timeseries_limit_threshold=-1
@@ -825,6 +833,7 @@ data_replication_factor=1
# The value should be a positive integer representing the desired threshold.
# When the threshold is reached, users will be prohibited from creating new
time series.
# -1 means unlimited
+# effectiveMode: restart
# Datatype: int
# cluster_device_limit_threshold=-1
@@ -832,6 +841,7 @@ data_replication_factor=1
# The value should be a positive integer representing the desired threshold.
# When the threshold is reached, users will be prohibited from creating new
databases.
# -1 means unlimited.
+# effectiveMode: restart
# Datatype: int
# database_limit_threshold = -1
@@ -989,10 +999,12 @@ data_replication_factor=1
# query_thread_count=0
# How many pipeline drivers will be created for one fragment instance. When <=
0, use CPU core number / 2.
+# effectiveMode: restart
# Datatype: int
# degree_of_query_parallelism=0
# The threshold of count map size when calculating the MODE aggregation
function
+# effectiveMode: restart
# Datatype: int
# mode_map_size_threshold=10000
@@ -1002,6 +1014,7 @@ data_replication_factor=1
# batch_size=100000
# The memory for external sort in sort operator, when the data size is smaller
than sort_buffer_size_in_bytes, the sort operator will use in-memory sort.
+# effectiveMode: restart
# Datatype: long
# sort_buffer_size_in_bytes=1048576
@@ -1021,6 +1034,7 @@ data_replication_factor=1
# timestamp_precision=ms
# When the timestamp precision check is enabled, the timestamps those are over
13 digits for ms precision, or over 16 digits for us precision are not allowed
to be inserted.
+# effectiveMode: first_start
# Datatype: Boolean
# timestamp_precision_check_enabled=true
@@ -1033,12 +1047,14 @@ data_replication_factor=1
# default_ttl_in_ms=-1
# The maximum number of TTL rules stored in the system, the default is 1000.
+# effectiveMode: restart
# Negative value means the threshold is unlimited.
# Datatype: int
# ttl_rule_capacity=1000
# The interval of TTL check task in each database. The TTL check task will
inspect and select files with a higher volume of expired data for compaction.
Default is 2 hours.
# Notice: It is not recommended to change it too small, as it will affect the
read and write performance of the system.
+# effectiveMode: restart
# Unit: ms
# Datatype: int
# ttl_check_interval=7200000
@@ -1046,11 +1062,13 @@ data_replication_factor=1
# The maximum expiring time of devices that have a ttl. Default is 1 month.
# If the data elapsed time (current timestamp minus the maximum data timestamp
of the device in the file) of such devices exceeds this value, then the file
will be cleaned by compaction.
# Notice: It is not recommended to change it too small, as it will affect the
read and write performance of the system.
+# effectiveMode: restart
# Unit: ms
# Datatype: int
# max_expired_time=2592000000
# The expired device ratio. If the ratio of expired devices in one file
exceeds this value, then expired data of this file will be cleaned by
compaction.
+# effectiveMode: restart
# Datatype: float
# expired_data_ratio=0.3
@@ -1062,6 +1080,7 @@ data_replication_factor=1
# Add a switch to enable separate sequence and unsequence data.
# If it is true, then data will be separated into seq and unseq data dir. If
it is false, then all data will be written into unseq data dir.
+# effectiveMode: restart
# Datatype: boolean
# enable_separate_data=true
@@ -1208,6 +1227,7 @@ data_replication_factor=1
# compaction_priority=BALANCE
# The size of candidate compaction task queue.
+# effectiveMode: restart
# Datatype: int
# candidate_compaction_task_queue_size=50
@@ -1263,13 +1283,6 @@ data_replication_factor=1
# Datatype: int
# min_cross_compaction_unseq_file_level=1
-# If one merge file selection runs for more than this time, it will be ended
and its current
-# selection will be used as final selection.
-# When < 0, it means time is unbounded.
-# effectiveMode: restart
-# Datatype: long, Unit: ms
-# cross_compaction_file_selection_time_budget=30000
-
# How many threads will be set up to perform compaction, 10 by default.
# Set to 1 when less than or equal to 0.
# effectiveMode: hot_reload
@@ -1623,6 +1636,7 @@ data_replication_factor=1
# For Windows platform
# If its prefix is a drive specifier followed by "\\", or if its prefix is
"\\\\", then the path is
# absolute. Otherwise, it is relative.
+# effectiveMode: first_start
# pipe_lib_dir=ext\\pipe
# For Linux platform
# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
@@ -1630,34 +1644,41 @@ data_replication_factor=1
# The maximum number of threads that can be used to execute the pipe subtasks
in PipeSubtaskExecutor.
# The actual value will be min(pipe_subtask_executor_max_thread_num, max(1,
CPU core number / 2)).
+# effectiveMode: restart
# Datatype: int
# pipe_subtask_executor_max_thread_num=5
# The connection timeout (in milliseconds) for the thrift client.
+# effectiveMode: restart
# Datatype: int
# pipe_sink_timeout_ms=900000
# The maximum number of selectors that can be used in the sink.
# Recommend to set this value to less than or equal to
pipe_sink_max_client_number.
+# effectiveMode: restart
# Datatype: int
# pipe_sink_selector_number=4
# The maximum number of clients that can be used in the sink.
+# effectiveMode: restart
# Datatype: int
# pipe_sink_max_client_number=16
# Whether to enable receiving pipe data through air gap.
# The receiver can only return 0 or 1 in tcp mode to indicate whether the data
is received successfully.
+# effectiveMode: restart
# Datatype: Boolean
# pipe_air_gap_receiver_enabled=false
# The port for the server to receive pipe data through air gap.
# Datatype: int
+# effectiveMode: restart
# pipe_air_gap_receiver_port=9780
# The total bytes that all pipe sinks can transfer per second.
# When given a value less than or equal to 0, it means no limit.
# default value is -1, which means no limit.
+# effectiveMode: hot_reload
# Datatype: double
# pipe_all_sinks_rate_limit_bytes_per_second=-1
@@ -1755,12 +1776,14 @@ data_replication_factor=1
# data_region_ratis_preserve_logs_num_when_purge=1000
# Raft Log disk size control
+# effectiveMode: restart
# Datatype: int
# config_node_ratis_log_max_size = 2147483648
# schema_region_ratis_log_max_size = 2147483648
# data_region_ratis_log_max_size = 21474836480
# Raft periodic snapshot interval, time unit is second
+# effectiveMode: restart
# Datatype: int
# config_node_ratis_periodic_snapshot_interval=86400
# schema_region_ratis_periodic_snapshot_interval=86400
@@ -1770,6 +1793,7 @@ data_replication_factor=1
### Fast IoTConsensus Configuration
####################
# Default event buffer size for connector and receiver in pipe consensus
+# effectiveMode: restart
# DataType: int
# fast_iot_consensus_pipeline_size=5
@@ -1831,6 +1855,7 @@ data_replication_factor=1
####################
# The thread count which can be used for model inference operation.
+# effectiveMode: restart
# Datatype: int
# model_inference_execution_thread_count=5
@@ -1847,6 +1872,7 @@ data_replication_factor=1
# The maximum bytes per second of disk write throughput when loading tsfile.
# When given a value less than or equal to 0, it means no limit.
# Default value is -1, which means no limit.
+# effectiveMode: hot_reload
# Datatype: int
# load_write_throughput_bytes_per_second=-1
diff --git
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/ConfigurationFileUtils.java
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/ConfigurationFileUtils.java
index 0bd422d4620..acccffb98bc 100644
---
a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/ConfigurationFileUtils.java
+++
b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/ConfigurationFileUtils.java
@@ -82,6 +82,7 @@ public class ConfigurationFileUtils {
"dn_data_region_consensus_port",
"dn_seed_config_node",
"dn_session_timeout_threshold",
+ "cluster_name",
"config_node_consensus_protocol_class",
"schema_replication_factor",
"data_replication_factor",
@@ -94,7 +95,8 @@ public class ConfigurationFileUtils {
"tag_attribute_total_size",
"timestamp_precision",
"iotdb_server_encrypt_decrypt_provider",
- "iotdb_server_encrypt_decrypt_provider_parameter"));
+ "iotdb_server_encrypt_decrypt_provider_parameter",
+ "pipe_lib_dir"));
public static void checkAndMayUpdate(
URL systemUrl, URL configNodeUrl, URL dataNodeUrl, URL commonUrl)
@@ -158,7 +160,7 @@ public class ConfigurationFileUtils {
BufferedReader reader = new BufferedReader(isr)) {
String line;
while ((line = reader.readLine()) != null) {
- content.append(line).append(System.lineSeparator());
+ content.append(line).append("\n");
}
} catch (IOException e) {
logger.warn("Failed to read configuration template", e);
@@ -192,7 +194,7 @@ public class ConfigurationFileUtils {
StringBuilder contentsOfNewConfigurationFile = new StringBuilder();
for (String currentLine : lines) {
if (currentLine.trim().isEmpty() || currentLine.trim().startsWith("#")) {
-
contentsOfNewConfigurationFile.append(currentLine).append(System.lineSeparator());
+ contentsOfNewConfigurationFile.append(currentLine).append("\n");
continue;
}
int equalsIndex = currentLine.indexOf('=');
@@ -201,17 +203,14 @@ public class ConfigurationFileUtils {
String key = currentLine.substring(0, equalsIndex).trim();
String value = currentLine.substring(equalsIndex + 1).trim();
if (!newConfigItems.containsKey(key)) {
-
contentsOfNewConfigurationFile.append(currentLine).append(System.lineSeparator());
+ contentsOfNewConfigurationFile.append(currentLine).append("\n");
continue;
}
if (newConfigItems.getProperty(key).equals(value)) {
-
contentsOfNewConfigurationFile.append(currentLine).append(System.lineSeparator());
+ contentsOfNewConfigurationFile.append(currentLine).append("\n");
newConfigItems.remove(key);
} else {
- contentsOfNewConfigurationFile
- .append("#")
- .append(currentLine)
- .append(System.lineSeparator());
+
contentsOfNewConfigurationFile.append("#").append(currentLine).append("\n");
}
}
}