This is an automated email from the ASF dual-hosted git repository.
qiaojialin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iotdb.git
The following commit(s) were added to refs/heads/master by this push:
new 56bed8c [IOTDB-1326] make config files and IoTDBConfig consistent
(#3071)
56bed8c is described below
commit 56bed8ca514a1e7c12dc854b33c3291c2ab1c8fe
Author: xuesyn <[email protected]>
AuthorDate: Tue Apr 27 10:04:07 2021 +0800
[IOTDB-1326] make config files and IoTDBConfig consistent (#3071)
---
.../resources/conf/iotdb-cluster.properties | 8 +-
.../resources/conf/iotdb-engine.properties | 137 ++++++++++++++++++++-
.../java/org/apache/iotdb/db/conf/IoTDBConfig.java | 56 +++++----
3 files changed, 171 insertions(+), 30 deletions(-)
diff --git a/cluster/src/assembly/resources/conf/iotdb-cluster.properties
b/cluster/src/assembly/resources/conf/iotdb-cluster.properties
index 5fb1457..1ce4a43 100644
--- a/cluster/src/assembly/resources/conf/iotdb-cluster.properties
+++ b/cluster/src/assembly/resources/conf/iotdb-cluster.properties
@@ -88,7 +88,7 @@ default_replica_num=1
# so the leader will send logs(snapshot) to the follower,
# NOTICE, it may cost minutes of time to send a snapshot,
# so this parameter should be larger than the snapshot cost time.
-# catch_up_timeout_ms=300000
+# catch_up_timeout_ms=60000
# whether to use batch append entries in log catch up
# use_batch_in_catch_up=true
@@ -96,12 +96,12 @@ default_replica_num=1
# the minimum number of committed logs in memory, after each log deletion, at
most such number of logs
# will remain in memory. Increasing the number will reduce the chance to use
snapshot in catch-ups,
# but will also increase the memory footprint
-# min_num_of_logs_in_mem=1000
+# min_num_of_logs_in_mem=100
# maximum number of committed logs in memory, when reached, a log deletion
will be triggered.
# Increasing the number will reduce the chance to use snapshot in catch-ups,
but will also increase
# memory footprint
-# max_num_of_logs_in_mem=2000
+# max_num_of_logs_in_mem=1000
# maximum memory size of committed logs in memory, when reached, a log
deletion will be triggered.
# Increasing the number will reduce the chance to use snapshot in catch-ups,
but will also increase
@@ -156,7 +156,7 @@ default_replica_num=1
# whether enable use persist log on disk to catch up when no logs found in
memory, if set false,
# will use snapshot to catch up when no logs found in memory.
-# enable_use_persist_log_on_disk_to_catch_up=true
+# enable_use_persist_log_on_disk_to_catch_up=false
# The number of logs read on the disk at one time, which is mainly used to
control the memory usage.
# This value multiplied by the log size is about the amount of memory used to
read logs from the disk at one time.
diff --git a/server/src/assembly/resources/conf/iotdb-engine.properties
b/server/src/assembly/resources/conf/iotdb-engine.properties
index f909b39..5299d78 100644
--- a/server/src/assembly/resources/conf/iotdb-engine.properties
+++ b/server/src/assembly/resources/conf/iotdb-engine.properties
@@ -21,31 +21,41 @@
### Web Page Configuration
####################
+# Datatype: boolean
# enable_metric_service=false
+# Datatype: int
# metrics_port=8181
+# Datatype: int
# query_cache_size_in_metric=50
####################
### RPC Configuration
####################
+# Datatype: String
rpc_address=0.0.0.0
+# Datatype: int
rpc_port=6667
+# Datatype: boolean
# rpc_thrift_compression_enable=false
# if true, a snappy based compression method will be called before sending
data by the network
+# Datatype: boolean
# rpc_advanced_compression_enable=false
+# Datatype: int
# rpc_max_concurrent_client_num=65535
# thrift max frame size, 64MB by default
+# Datatype: int
# thrift_max_frame_size=67108864
# thrift init buffer size
+# Datatype: int
# thrift_init_buffer_size=1024
####################
@@ -53,19 +63,23 @@ rpc_port=6667
####################
# Is insert ahead log enable
+# Datatype: boolean
# enable_wal=true
# Add a switch to drop ouf-of-order data
# Out-of-order data will impact the aggregation query a lot. Users may not
care about discarding some out-of-order data.
+# Datatype: boolean
# enable_discard_out_of_order_data=false
# When a certain amount of insert ahead log is reached, it will be flushed to
disk
# It is possible to lose at most flush_wal_threshold operations
+# Datatype: int
# flush_wal_threshold=10000
# The cycle when insert ahead log is periodically forced to be written to
disk(in milliseconds)
# If force_wal_period_in_ms = 0 it means force insert ahead log to be written
to disk after each refreshment
# Set this parameter to 0 may slow down the ingestion on slow disk.
+# Datatype: long
# force_wal_period_in_ms=100
####################
@@ -125,40 +139,52 @@ rpc_port=6667
# TSFile storage file system. Currently, Tsfile are supported to be stored in
LOCAL file system or HDFS.
+# Datatype: FSType
# tsfile_storage_fs=LOCAL
# If using HDFS, the absolute file path of Hadoop core-site.xml should be
configured
+# Datatype: String
# core_site_path=/etc/hadoop/conf/core-site.xml
# If using HDFS, the absolute file path of Hadoop hdfs-site.xml should be
configured
+# Datatype: String
# hdfs_site_path=/etc/hadoop/conf/hdfs-site.xml
# If using HDFS, hadoop ip can be configured. If there are more than one
hdfs_ip, Hadoop HA is used
+# Datatype: String
# hdfs_ip=localhost
# If using HDFS, hadoop port can be configured
+# Datatype: String
# hdfs_port=9000
# If there are more than one hdfs_ip, Hadoop HA is used. Below are
configuration for HA
# If using Hadoop HA, nameservices of hdfs can be configured
+# Datatype: String
# dfs_nameservices=hdfsnamespace
# If using Hadoop HA, namenodes under dfs nameservices can be configured
+# Datatype: String
# dfs_ha_namenodes=nn1,nn2
# If using Hadoop HA, automatic failover can be enabled or disabled
+# Datatype: boolean
# dfs_ha_automatic_failover_enabled=true
# If using Hadoop HA and enabling automatic failover, the proxy provider can
be configured
+# Datatype: String
#
dfs_client_failover_proxy_provider=org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
# If using kerberos to authenticate hdfs, this should be true
+# Datatype: boolean
# hdfs_use_kerberos=false
# Full path of kerberos keytab file
+# Datatype: String
# kerberos_keytab_file_path=/path
# Kerberos pricipal
+# Datatype: String
# kerberos_principal=your principal
@@ -168,6 +194,7 @@ rpc_port=6667
# Use this value to set timestamp precision as "ms", "us" or "ns".
# Once the precision is been set, it can not be changed.
+# Datatype: String
timestamp_precision=ms
# Default TTL for storage groups that are not set TTL by statements, in ms. If
not set (default),
@@ -175,69 +202,86 @@ timestamp_precision=ms
# Notice: if this property is changed, previous created storage group which
are not set TTL will
# also be affected. And negative values are accepted, which means you can only
insert future
# data.
-# default_ttl=36000000
+# Datatype: long
+# default_ttl=0
# The size of the log buffer in each log node (in bytes). Due to the double
buffer mechanism,
# if WAL is enabled and the size of the inserted plan is greater than one-half
of this parameter,
# then the insert plan will be rejected by WAL.
# If it sets a value smaller than 0, use the default value 16777216
+# Datatype: int
# wal_buffer_size=16777216
# When a TsFile's file size (in byte) exceeds this, the TsFile is forced
closed.
# It may cause memTable size smaller if it is a large value
+# Datatype: long
# tsfile_size_threshold=1
# Size of log buffer in each metadata operation plan(in byte).
# If the size of a metadata operation plan is larger than this parameter, then
it will be rejected by MManager
# If it sets a value smaller than 0, use the default value 1024*1024
+# Datatype: int
# mlog_buffer_size=1048576
# When a memTable's size (in byte) exceeds this, the memtable is flushed to
disk. The default threshold is 1 GB.
+# Datatype: long
# memtable_size_threshold=1073741824
# When the average point number of timeseries in memtable exceeds this, the
memtable is flushed to disk. The default threshold is 10000.
+# Datatype: int
# avg_series_point_number_threshold=10000
# How many threads can concurrently flush. When <= 0, use CPU core number.
+# Datatype: int
# concurrent_flush_thread=0
# How many threads can concurrently query. When <= 0, use CPU core number.
+# Datatype: int
# concurrent_query_thread=0
# whether take over the memory management by IoTDB rather than JVM when
serializing memtable as bytes in memory
# (i.e., whether use ChunkBufferPool), value true, false
+# Datatype: boolean
# chunk_buffer_pool_enable=false
# The amount of data iterate each time in server (the number of data strips,
that is, the number of different timestamps.)
+# Datatype: int
# batch_size=100000
# max size for tag and attribute of one time series
# the unit is byte
+# Datatype: int
# tag_attribute_total_size=700
# In one insert (one device, one timestamp, multiple measurements),
# if enable partial insert, one measurement failure will not impact other
measurements
+# Datatype: boolean
# enable_partial_insert=true
# Whether to enable MTree snapshot. Default false from 0.11.0 on.
+# Datatype: boolean
# enable_mtree_snapshot=false
# The least interval line numbers of mlog.txt when creating a checkpoint and
saving snapshot of MTree.
# Only take effect when enable_mtree_snapshot=true. Unit: line numbers
+# Datatype: int
# mtree_snapshot_interval=100000
# Threshold interval time of MTree modification. Unit: second. Default: 1
hour(3600 seconds)
# If the last modification time is less than this threshold, MTree snapshot
will not be created
# Only take effect when enable_mtree_snapshot=true.
+# Datatype: int
# mtree_snapshot_threshold_time=3600
# number of virtual storage groups per user-defined storage group
# a virtual storage group is the unit of parallelism in memory as all
ingestions in one virtual storage group are serialized
# recommended value is [virtual storage group number] = [CPU core number] /
[user-defined storage group number]
+# Datatype: int
# virtual_storage_group_num = 1
# Level of TimeIndex, which records the start time and end time of
TsFileResource. Currently,
# DEVICE_TIME_INDEX and FILE_TIME_INDEX are supported, and could not be
changed after first set.
+# Datatype: TimeIndexLevel
# time_index_level=DEVICE_TIME_INDEX
####################
@@ -245,6 +289,7 @@ timestamp_precision=ms
####################
# Whether to enable memory control
+# Datatype: boolean
# enable_mem_control=true
# Memory Allocation Ratio: Write, Read, Schema and Free Memory.
@@ -253,39 +298,49 @@ timestamp_precision=ms
# write_read_schema_free_memory_proportion=4:3:1:2
# primitive array size (length of each array) in array pool
+# Datatype: int
# primitive_array_size=128
# Ratio of write memory for invoking flush disk, 0.4 by default
# If you have extremely high write load (like batch=1000), it can be set lower
than the default value like 0.2
+# Datatype: double
# flush_proportion=0.4
# Ratio of write memory allocated for buffered arrays, 0.6 by default
+# Datatype: double
# buffered_arrays_memory_proportion=0.6
# Ratio of write memory for rejecting insertion, 0.8 by default
# If you have extremely high write load (like batch=1000) and the physical
memory size is large enough,
# it can be set higher than the default value like 0.9
+# Datatype: double
# reject_proportion=0.8
# If memory (in byte) of storage group increased more than this threshold,
report to system. The default value is 16MB
+# Datatype: long
# storage_group_report_threshold=16777216
# allowed max numbers of deduplicated path in one query
# it's just an advised value, the real limitation will be the smaller one
between this and the one we calculated
+# Datatype: int
# max_deduplicated_path_num=1000
# When an inserting is rejected, waiting period (in ms) to check system again,
50 by default.
# If the insertion has been rejected and the read load is low, it can be set
larger.
+# Datatype: int
# check_period_when_insert_blocked=50
# When the waiting time (in ms) of an inserting exceeds this, throw an
exception. 10000 by default.
# If the insertion has been rejected and the read load is low, it can be set
larger
+# Datatype: int
# max_waiting_time_when_insert_blocked=10000
# estimated metadata size (in byte) of one timeseries in Mtree
+# Datatype: int
# estimated_series_size=300
# size of ioTaskQueue. The default value is 10
+# Datatype: int
# io_task_queue_size_for_flushing=10
####################
@@ -294,6 +349,7 @@ timestamp_precision=ms
# When there exists old version(0.9.x/v1) data, how many thread will be set up
to perform upgrade tasks, 1 by default.
# Set to 1 when less than or equal to 0.
+# Datatype: int
# upgrade_thread_num=1
@@ -302,36 +358,43 @@ timestamp_precision=ms
####################
# the default time period that used in fill query, -1 by default means
infinite past time, in ms
+# Datatype: int
# default_fill_interval=-1
####################
### Merge Configurations
####################
# LEVEL_COMPACTION, NO_COMPACTION
+# Datatype: CompactionStrategy
# compaction_strategy=LEVEL_COMPACTION
# Works when the compaction_strategy is LEVEL_COMPACTION.
# Whether to merge unseq files into seq files or not.
+# Datatype: boolean
# enable_unseq_compaction=true
# Works when the compaction_strategy is LEVEL_COMPACTION.
# The max seq file num of each level.
# When the num of files in one level exceeds this,
# the files in this level will merge to one and put to upper level.
+# Datatype: int
# seq_file_num_in_each_level=6
# Works when the compaction_strategy is LEVEL_COMPACTION.
# The max num of seq level.
+# Datatype: int
# seq_level_num=3
# Works when compaction_strategy is LEVEL_COMPACTION.
# The max ujseq file num of each level.
# When the num of files in one level exceeds this,
# the files in this level will merge to one and put to upper level.
+# Datatype: int
# unseq_file_num_in_each_level=10
# Works when the compaction_strategy is LEVEL_COMPACTION.
# The max num of unseq level.
+# Datatype: int
# unseq_level_num=1
# Works when the compaction_strategy is LEVEL_COMPACTION.
@@ -340,46 +403,56 @@ timestamp_precision=ms
# merged with its succeeding chunks even if it is not overflowed, until the
merged chunks reach
# this threshold and the new chunk will be flushed.
# When less than 0, this mechanism is disabled.
+# Datatype: int
# merge_chunk_point_number=100000
# Works when the compaction_strategy is LEVEL_COMPACTION.
# When point number of a page reaches this, use "append merge" instead of
"deserialize merge".
+# Datatype: int
# merge_page_point_number=100
# How many threads will be set up to perform unseq merge chunk sub-tasks, 4 by
default.
# Set to 1 when less than or equal to 0.
+# Datatype: int
# merge_chunk_subthread_num=4
# If one merge file selection runs for more than this time, it will be ended
and its current
# selection will be used as final selection. Unit: millis.
# When < 0, it means time is unbounded.
+# Datatype: long
# merge_fileSelection_time_budget=30000
# How much memory may be used in ONE merge task (in byte), 10% of maximum JVM
memory by default.
# This is only a rough estimation, starting from a relatively small value to
avoid OOM.
# Each new merge thread may take such memory, so merge_thread_num *
merge_memory_budget is the
# total memory estimation of merge.
-# merge_memory_budget=2147483648
+# Datatype: long
+# merge_memory_budget=268435456
# When set to true, if some crashed merges are detected during system
rebooting, such merges will
# be continued, otherwise, the unfinished parts of such merges will not be
continued while the
# finished parts still remains as they are.
# If you are feeling the rebooting is too slow, set this to false, false by
default
+# Datatype: boolean
# continue_merge_after_reboot=false
# When set to true, all unseq merges becomes full merge (the whole SeqFiles
are re-written despite how
# much they are overflowed). This may increase merge overhead depending on how
much the SeqFiles
# are overflowed.
+# Datatype: boolean
# force_full_merge=false
# How many threads will be set up to perform compaction, 10 by default.
# Set to 1 when less than or equal to 0.
+# Datatype: int
# compaction_thread_num=10
# The limit of write throughput merge can reach per second
+# Datatype: int
# merge_write_throughput_mb_per_sec=8
# The max executing time of query. unit: ms
+# Datatype: int
# query_timeout_threshold=60000
####################
@@ -387,6 +460,7 @@ timestamp_precision=ms
####################
# whether to cache meta data(ChunkMetadata and TimeSeriesMetadata) or not.
+# Datatype: boolean
# meta_data_cache_enable=true
# Read memory Allocation Ratio: ChunkCache, TimeSeriesMetadataCache, memory
used for constructing QueryDataSet and Free Memory Used in Query.
# The parameter form is a:b:c:d, where a, b, c and d are integers. for
example: 1:1:1:1 , 1:2:3:4
@@ -394,6 +468,7 @@ timestamp_precision=ms
# cache size for MManager.
# This cache is used to improve insert speed where all path check and
TSDataType will be cached in MManager with corresponding Path.
+# Datatype: int
# metadata_node_cache_size=300000
####################
@@ -401,6 +476,7 @@ timestamp_precision=ms
####################
# Whether to enable LAST cache
+# Datatype: boolean
# enable_last_cache=true
####################
@@ -408,31 +484,37 @@ timestamp_precision=ms
####################
# Set enable_stat_monitor true(or false) to enable(or disable) the StatMonitor
that stores statistics info.
+# Datatype: boolean
# enable_stat_monitor=false
# Set enable_monitor_series_write true (or false) to enable (or disable) the
writing monitor time series
+# Datatype: boolean
# enable_monitor_series_write=false
####################
### WAL Direct Buffer Pool Configuration
####################
# the interval to trim the wal pool
+# Datatype: long
# wal_pool_trim_interval_ms=10000
# the max number of wal bytebuffer can be allocated for each time partition,
if there is no unseq data you can set it to 4.
# it should be an even number
+# Datatype: int
# max_wal_bytebuffer_num_for_each_partition=6
####################
### External sort Configuration
####################
# Is external sort enable
+# Datatype: boolean
# enable_external_sort=true
# The maximum number of simultaneous chunk reading for a single time series.
# If the num of simultaneous chunk reading is greater than
external_sort_threshold, external sorting is used.
# When external_sort_threshold increases, the number of chunks sorted at the
same time in memory may increase and this will occupy more memory.
# When external_sort_threshold decreases, triggering external sorting will
increase the time-consuming.
+# Datatype: int
# external_sort_threshold=1000
@@ -441,15 +523,18 @@ timestamp_precision=ms
####################
# Whether to open the sync_server_port for receiving data from sync client,
the default is closed
+# Datatype: boolean
# is_sync_enable=false
# Sync server port to listen
+# Datatype: int
# sync_server_port=5555
# White IP list of Sync client.
# Please use the form of network segment to present the range of IP, for
example: 192.168.0.0/16
# If there are more than one IP segment, please separate them by commas
# The default is to allow all IP to sync
+# Datatype: String
# ip_white_list=0.0.0.0/0
####################
@@ -457,12 +542,16 @@ timestamp_precision=ms
####################
# Is stat performance of sub-module enable
+# Datatype: boolean
# enable_performance_stat=false
# The interval of display statistic result in ms.
+# Datatype: long
# performance_stat_display_interval=60000
# The memory used for performance_stat in kb.
+# Datatype: int
# performance_stat_memory_in_kb=20
# Is performance tracing enable
+# Datatype: boolean
# enable_performance_tracing=false
# Uncomment following fields to configure the tracing root directory.
@@ -475,9 +564,13 @@ timestamp_precision=ms
####################
### Configurations for watermark module
####################
+# Datatype: boolean
# watermark_module_opened=false
+# Datatype: String
# watermark_secret_key=IoTDB*2019@Beijing
+# Datatype: String
# watermark_bit_string=100101110100
+# Datatype: String
# watermark_method=GroupBasedLSBMethod(embed_row_cycle=2,embed_lsb_num=5)
@@ -486,60 +579,76 @@ timestamp_precision=ms
####################
# Whether creating schema automatically is enabled
+# Datatype: boolean
# enable_auto_create_schema=true
# Storage group level when creating schema automatically is enabled
# e.g. root.sg0.d1.s2
# we will set root.sg0 as the storage group if storage group level is 1
+# Datatype: int
# default_storage_group_level=1
# ALL data types: BOOLEAN, INT32, INT64, FLOAT, DOUBLE, TEXT
# register time series as which type when receiving boolean string "true" or
"false"
+# Datatype: TSDataType
# boolean_string_infer_type=BOOLEAN
# register time series as which type when receiving an integer string "67"
+# Datatype: TSDataType
# integer_string_infer_type=FLOAT
# register time series as which type when receiving an integer string and
using float may lose precision
# num > 2 ^ 24
+# Datatype: TSDataType
# long_string_infer_type=DOUBLE
# register time series as which type when receiving a floating number string
"6.7"
+# Datatype: TSDataType
# floating_string_infer_type=FLOAT
# register time series as which type when receiving the Literal NaN. Values
can be DOUBLE, FLOAT or TEXT
+# Datatype: TSDataType
# nan_string_infer_type=DOUBLE
# BOOLEAN encoding when creating schema automatically is enabled
+# Datatype: TSEncoding
# default_boolean_encoding=RLE
# INT32 encoding when creating schema automatically is enabled
+# Datatype: TSEncoding
# default_int32_encoding=RLE
# INT64 encoding when creating schema automatically is enabled
+# Datatype: TSEncoding
# default_int64_encoding=RLE
# FLOAT encoding when creating schema automatically is enabled
+# Datatype: TSEncoding
# default_float_encoding=GORILLA
# DOUBLE encoding when creating schema automatically is enabled
+# Datatype: TSEncoding
# default_double_encoding=GORILLA
# TEXT encoding when creating schema automatically is enabled
+# Datatype: TSEncoding
# default_text_encoding=PLAIN
####################
### Configurations for tsfile-format
####################
+# Datatype: int [xsy]
# group_size_in_byte=134217728
# The memory size for each series writer to pack page, default value is 64KB
+# Datatype: int [xsy]
# page_size_in_byte=65536
# The maximum number of data points in a page, default 1024*1024
+# Datatype: int [xsy]
# max_number_of_points_in_page=1048576
# Data type configuration
@@ -547,9 +656,11 @@ timestamp_precision=ms
# time_series_data_type=INT64
# Max size limitation of input string
+# Datatype: int [xsy]
# max_string_length=128
# Floating-point precision
+# Datatype: int [xsy]
# float_precision=2
# Encoder configuration
@@ -567,12 +678,15 @@ timestamp_precision=ms
# compressor=SNAPPY
# Maximum degree of a metadataIndex node, default value is 256
+# Datatype: int [xsy]
# max_degree_of_index_node=256
# time interval in minute for calculating query frequency
+# Datatype: int
# frequency_interval_in_minute=1
# time cost(ms) threshold for slow query
+# Datatype: long
# slow_query_threshold=5000
####################
@@ -580,21 +694,27 @@ timestamp_precision=ms
####################
# whether to enable the mqtt service.
+# Datatype: boolean
# enable_mqtt_service=false
# the mqtt service binding host.
+# Datatype: String
# mqtt_host=0.0.0.0
# the mqtt service binding port.
+# Datatype: int
# mqtt_port=1883
# the handler pool size for handing the mqtt messages.
+# Datatype: int
# mqtt_handler_pool_size=1
# the mqtt message payload formatter.
+# Datatype: String
# mqtt_payload_formatter=json
# max length of mqtt message in byte
+# Datatype: int
# mqtt_max_message_size=1048576
####################
@@ -617,10 +737,12 @@ timestamp_precision=ms
# Used to estimate the memory usage of text fields in a UDF query.
# It is recommended to set this value to be slightly larger than the average
length of all text
# records.
+# Datatype: int
# udf_initial_byte_array_length_for_memory_control=48
# How much memory may be used in ONE UDF query (in MB).
# The upper limit is 20% of allocated memory for read.
+# Datatype: float
# udf_memory_budget_in_mb=30.0
# UDF memory allocation ratio.
@@ -643,6 +765,7 @@ timestamp_precision=ms
# The size of log buffer for every trigger management operation plan. If the
size of a trigger
# management operation plan is larger than this parameter, the trigger
management operation plan
# will be rejected by TriggerManager.
+# Datatype: int
# tlog_buffer_size=1048576
# Uncomment the following field to configure the trigger root directory.
@@ -655,11 +778,13 @@ timestamp_precision=ms
# trigger_root_dir=ext/trigger
# How many threads can be used for evaluating sliding windows. When <= 0, use
CPU core number.
+# Datatype: int
# concurrent_window_evaluation_thread=0
# Max number of window evaluation tasks that can be pending for execution.
When <= 0, the value is
# 64 by default.
-# max_pending_window_evaluation_tasks = 64
+# Datatype: int
+# max_pending_window_evaluation_tasks=64
####################
### Index Configuration
@@ -673,19 +798,25 @@ timestamp_precision=ms
# index_root_dir=data/index
# Is index enable
+# Datatype: boolean
# enable_index=false
# How many threads can concurrently build index. When <= 0, use CPU core
number.
+# Datatype: int
# concurrent_index_build_thread=0
# the default size of sliding window used for the subsequence matching in
index framework
+# Datatype: int
# default_index_window_range=10
# buffer parameter for index processor.
+# Datatype: long
# index_buffer_size=134217728
# whether enable data partition. If disabled, all data belongs to partition 0
+# Datatype: boolean
# enable_partition=false
# time range for partitioning data inside each storage group, the unit is
second
+# Datatype: long
# partition_interval=604800
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
index 92d1cf3..ee83180 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
@@ -90,7 +90,7 @@ public class IoTDBConfig {
/** the mqtt message payload formatter. */
private String mqttPayloadFormatter = "json";
- /** max mqtt message size */
+ /** max mqtt message size. Unit: byte */
private int mqttMaxMessageSize = 1048576;
/** Rpc binding address. */
@@ -131,13 +131,13 @@ public class IoTDBConfig {
/** Reject proportion for system */
private double rejectProportion = 0.8;
- /** If storage group increased more than this threshold, report to system. */
+ /** If storage group increased more than this threshold, report to system.
Unit: byte */
private long storageGroupSizeReportThreshold = 16 * 1024 * 1024L;
- /** When inserting rejected, waiting period to check system again */
+ /** When inserting rejected, waiting period to check system again. Unit:
millisecond */
private int checkPeriodWhenInsertBlocked = 50;
- /** When inserting rejected exceeds this, throw an exception */
+ /** When inserting rejected exceeds this, throw an exception. Unit:
millisecond */
private int maxWaitingTimeWhenInsertBlockedInMs = 10000;
/** Is the write ahead log enable. */
private boolean enableWal = true;
@@ -157,33 +157,37 @@ public class IoTDBConfig {
/**
* The cycle when write ahead log is periodically forced to be written to
disk(in milliseconds) If
- * set this parameter to 0 it means call channel.force(true) after every
each insert
+ * set this parameter to 0 it means call channel.force(true) after every
each insert. Unit:
+ * millisecond
*/
private long forceWalPeriodInMs = 100;
/**
* The size of the log buffer in each log node (in bytes). Due to the double
buffer mechanism, if
* WAL is enabled and the size of the inserted plan is greater than one-half
of this parameter,
- * then the insert plan will be rejected by WAL.
+ * then the insert plan will be rejected by WAL. Unit: byte
*/
private int walBufferSize = 16 * 1024 * 1024;
private int maxWalBytebufferNumForEachPartition = 6;
+ /** Unit: millisecond */
private long walPoolTrimIntervalInMS = 10_000;
+ /** Unit: byte */
private int estimatedSeriesSize = 300;
/**
* Size of log buffer for every MetaData operation. If the size of a
MetaData operation plan is
* larger than this parameter, then the MetaData operation plan will be
rejected by MManager.
+ * Unit: byte
*/
private int mlogBufferSize = 1024 * 1024;
/**
* The size of log buffer for every trigger management operation plan. If
the size of a trigger
* management operation plan is larger than this parameter, the trigger
management operation plan
- * will be rejected by TriggerManager.
+ * will be rejected by TriggerManager. Unit: byte
*/
private int tlogBufferSize = 1024 * 1024;
@@ -272,7 +276,7 @@ public class IoTDBConfig {
* common buffer size. With the memory-control mechanism, the occupied
memory of all raw data and
* index structures will be counted. If the memory buffer size reaches this
threshold, the indexes
* will be flushed to the disk file. As a result, data in one series may be
divided into more than
- * one part and indexed separately.
+ * one part and indexed separately. Unit: byte
*/
private long indexBufferSize = 128 * 1024 * 1024L;
@@ -285,14 +289,14 @@ public class IoTDBConfig {
/** index directory. */
private String indexRootFolder = "data" + File.separator + "index";
- /** When a TsFile's file size (in byte) exceed this, the TsFile is forced
closed. */
+ /** When a TsFile's file size (in byte) exceed this, the TsFile is forced
closed. Unit: byte */
private long tsFileSizeThreshold = 1L;
- /** When a memTable's size (in byte) exceeds this, the memtable is flushed
to disk. */
+ /** When a memTable's size (in byte) exceeds this, the memtable is flushed
to disk. Unit: byte */
private long memtableSizeThreshold = 1024 * 1024 * 1024L;
/** When average series point number reaches this, flush the memtable to
disk */
- private int avgSeriesPointNumberThreshold = 100000;
+ private int avgSeriesPointNumberThreshold = 10000;
/**
* Work when tsfile_manage_strategy is level_strategy. When merge point
number reaches this, merge
@@ -372,18 +376,21 @@ public class IoTDBConfig {
/** Is this IoTDB instance a receiver of sync or not. */
private boolean isSyncEnable = false;
+
/** If this IoTDB instance is a receiver of sync, set the server port. */
private int syncServerPort = 5555;
+
/**
* Set the language version when loading file including error information,
default value is "EN"
*/
private String languageVersion = "EN";
private String ipWhiteList = "0.0.0.0/0";
- /** Examining period of cache file reader : 100 seconds. */
+
+ /** Examining period of cache file reader : 100 seconds. Unit: millisecond */
private long cacheFileReaderClearPeriod = 100000;
- /** the max executing time of query in ms. */
+ /** the max executing time of query in ms. Unit: millisecond */
private int queryTimeoutThreshold = 60000;
/** Replace implementation class of JDBC service */
@@ -395,11 +402,12 @@ public class IoTDBConfig {
/** Is performance tracing enable. */
private boolean enablePerformanceTracing = false;
- /** The display of stat performance interval in ms. */
+ /** The display of stat performance interval in ms. Unit: millisecond */
private long performanceStatDisplayInterval = 60000;
- /** The memory used for stat performance. */
+ /** The memory used for stat performance. Unit: kilobyte */
private int performanceStatMemoryInKB = 20;
+
/** whether use chunkBufferPool. */
private boolean chunkBufferPoolEnable = false;
@@ -474,7 +482,8 @@ public class IoTDBConfig {
/**
* If one merge file selection runs for more than this time, it will be
ended and its current
- * selection will be used as final selection. Unit: millis. When < 0, it
means time is unbounded.
+ * selection will be used as final selection. When < 0, it means time is
unbounded. Unit:
+ * millisecond
*/
private long mergeFileSelectionTimeBudget = 30 * 1000L;
@@ -559,10 +568,10 @@ public class IoTDBConfig {
private int defaultFillInterval = -1;
/**
- * default TTL for storage groups that are not set TTL by statements, in ms
+ * default TTL for storage groups that are not set TTL by statements, in ms.
*
* <p>Notice: if this property is changed, previous created storage group
which are not set TTL
- * will also be affected.
+ * will also be affected. Unit: millisecond
*/
private long defaultTTL = Long.MAX_VALUE;
@@ -580,11 +589,11 @@ public class IoTDBConfig {
/**
* Threshold interval time of MTree modification. If the last modification
time is less than this
- * threshold, MTree snapshot will not be created. Unit: second. Default: 1
hour(3600 seconds)
+ * threshold, MTree snapshot will not be created. Default: 1 hour(3600
seconds) Unit: second
*/
private int mtreeSnapshotThresholdTime = 3600;
- /** Time range for partitioning data inside each storage group, the unit is
second */
+ /** Time range for partitioning data inside each storage group. Unit: second
*/
private long partitionInterval = 604800;
/**
@@ -637,14 +646,15 @@ public class IoTDBConfig {
// time in nanosecond precision when starting up
private long startUpNanosecond = System.nanoTime();
- private int thriftMaxFrameSize = RpcUtils.THRIFT_FRAME_MAX_SIZE;
+ /** Unit: byte */
+ private int thriftMaxFrameSize = 67108864;
private int thriftDefaultBufferSize = RpcUtils.THRIFT_DEFAULT_BUF_CAPACITY;
- /** time interval in minute for calculating query frequency */
+ /** time interval in minute for calculating query frequency. Unit: minute */
private int frequencyIntervalInMinute = 1;
- /** time cost(ms) threshold for slow query */
+ /** time cost(ms) threshold for slow query. Unit: millisecond */
private long slowQueryThreshold = 5000;
/**