This is an automated email from the ASF dual-hosted git repository.

chaow pushed a commit to branch uncomment_for_0.12
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit 6974e81cec282e9382775fd0206a48d0e851921c
Author: chaow <[email protected]>
AuthorDate: Mon Apr 19 20:39:43 2021 +0800

    uncomment the less used configuration (#2993)
---
 .../resources/conf/iotdb-cluster.properties        |  58 ++---
 .../apache/iotdb/cluster/config/ClusterConfig.java |   2 +-
 .../cluster/client/async/AsyncClientPoolTest.java  |  37 ++-
 .../cluster/client/async/AsyncDataClientTest.java  |  18 ++
 .../client/async/AsyncDataHeartbeatClientTest.java |  18 ++
 .../cluster/client/async/AsyncMetaClientTest.java  |  18 ++
 .../client/async/AsyncMetaHeartbeatClientTest.java |  18 ++
 .../iotdb/cluster/log/LogDispatcherTest.java       |  10 +-
 .../cluster/log/snapshot/DataSnapshotTest.java     |   8 +
 .../resources/conf/iotdb-engine.properties         | 262 ++++++++++-----------
 10 files changed, 274 insertions(+), 175 deletions(-)

diff --git a/cluster/src/assembly/resources/conf/iotdb-cluster.properties 
b/cluster/src/assembly/resources/conf/iotdb-cluster.properties
index 73e7b42..079b124 100644
--- a/cluster/src/assembly/resources/conf/iotdb-cluster.properties
+++ b/cluster/src/assembly/resources/conf/iotdb-cluster.properties
@@ -39,7 +39,7 @@ internal_data_port=40010
 
 # whether open port for server module (for debug purpose)
 # if true, the rpc_port of the single server will be changed to rpc_port (in 
iotdb-engines.properties) + 1
-open_server_rpc_port=false
+# open_server_rpc_port=false
 
 # comma-separated {IP/DOMAIN}:internal_meta_port pairs, when used by 
start-node.sh(.bat),
 # this configuration means the nodes that will form the initial cluster,
@@ -57,60 +57,60 @@ seed_nodes=127.0.0.1:9003,127.0.0.1:9005,127.0.0.1:9007
 # compression settings for external clients, please modify 
'rpc_thrift_compression_enable' in
 # 'iotdb-engine.properties'.
 # WARNING: this must be consistent across all nodes in the cluster
-rpc_thrift_compression_enable=false
+# rpc_thrift_compression_enable=false
 
 # max client connections created by thrift
 # this configuration applies separately to data/meta/client connections and 
thus does not control
 # the number of global connections
-max_concurrent_client_num=10000
+# max_concurrent_client_num=10000
 
 # number of replications for one partition
 default_replica_num=3
 
 # cluster name to identify different clusters
 # all node's cluster_name in one cluster are the same
-cluster_name=default
+# cluster_name=default
 
 # connection time out (ms) among raft nodes
-connection_timeout_ms=20000
+# connection_timeout_ms=20000
 
 # write operation timeout threshold (ms), this is only for internal 
communications,
 # not for the whole operation.
-write_operation_timeout_ms=30000
+# write_operation_timeout_ms=30000
 
 # read operation timeout threshold (ms), this is only for internal 
communications,
 # not for the whole operation.
-read_operation_timeout_ms=30000
+# read_operation_timeout_ms=30000
 
 # catch up timeout threshold (ms), this is used for a follower behind the 
leader too much,
 # so the leader will send logs(snapshot) to the follower,
 # NOTICE, it may cost minutes of time to send a snapshot,
 # so this parameter should be larger than the snapshot cost time.
-catch_up_timeout_ms=300000
+# catch_up_timeout_ms=300000
 
 # whether to use batch append entries in log catch up
-use_batch_in_catch_up=true
+# use_batch_in_catch_up=true
 
 # the minimum number of committed logs in memory, after each log deletion, at 
most such number of logs
 # will remain in memory. Increasing the number will reduce the chance to use 
snapshot in catch-ups,
 # but will also increase the memory footprint
-min_num_of_logs_in_mem=1000
+# min_num_of_logs_in_mem=1000
 
 # maximum number of committed logs in memory, when reached, a log deletion 
will be triggered.
 # Increasing the number will reduce the chance to use snapshot in catch-ups, 
but will also increase
 # memory footprint
-max_num_of_logs_in_mem=2000
+# max_num_of_logs_in_mem=2000
 
 # maximum memory size of committed logs in memory, when reached, a log 
deletion will be triggered.
 # Increasing the number will reduce the chance to use snapshot in catch-ups, 
but will also increase
 # memory footprint, default is 512MB
-max_memory_size_for_raft_log=536870912
+# max_memory_size_for_raft_log=536870912
 
 # deletion check period of the submitted log
-log_deletion_check_interval_second=-1
+# log_deletion_check_interval_second=-1
 
 # Whether creating schema automatically is enabled, this will replace the one 
in iotdb-engine.properties
-enable_auto_create_schema=true
+# enable_auto_create_schema=true
 
 # consistency level, now three consistency levels are supported: strong, mid, 
and weak.
 # Strong consistency means the server will first try to synchronize with the 
leader to get the
@@ -118,56 +118,56 @@ enable_auto_create_schema=true
 # While mid consistency means the server will first try to synchronize with 
the leader,
 # but if failed(timeout), it will give up and just use current data it has 
cached before;
 # Weak consistency does not synchronize with the leader and simply use the 
local data
-consistency_level=mid
+# consistency_level=mid
 
 # Whether to use asynchronous server
-is_use_async_server=false
+# is_use_async_server=false
 
 # Whether to use asynchronous applier
-is_use_async_applier=true
+# is_use_async_applier=true
 
 # is raft log persistence enabled
-is_enable_raft_log_persistence=true
+# is_enable_raft_log_persistence=true
 
 # When a certain amount of raft log is reached, it will be flushed to disk
 # It is possible to lose at most flush_raft_log_threshold operations
-flush_raft_log_threshold=10000
+# flush_raft_log_threshold=10000
 
 # Size of log buffer in each RaftMember's LogManager(in byte).
-raft_log_buffer_size=16777216
+# raft_log_buffer_size=16777216
 
 # The maximum value of the raft log index stored in the memory per raft group,
 # These indexes are used to index the location of the log on the disk
-max_raft_log_index_size_in_memory=10000
+# max_raft_log_index_size_in_memory=10000
 
 # The maximum size of the raft log saved on disk for each file (in bytes) of 
each raft group.
 # The default size is 1GB
-max_raft_log_persist_data_size_per_file=1073741824
+# max_raft_log_persist_data_size_per_file=1073741824
 
 # The maximum number of persistent raft log files on disk per raft group,
 # So each raft group's log takes up disk space approximately equals
 # max_raft_log_persist_data_size_per_file*max_number_of_persist_raft_log_files
-max_number_of_persist_raft_log_files=5
+# max_number_of_persist_raft_log_files=5
 
 # The maximum number of logs saved on the disk
-max_persist_raft_log_number_on_disk=1000000
+# max_persist_raft_log_number_on_disk=1000000
 
 # whether enable use persist log on disk to catch up when no logs found in 
memory, if set false,
 # will use snapshot to catch up when no logs found in memory.
-enable_use_persist_log_on_disk_to_catch_up=true
+# enable_use_persist_log_on_disk_to_catch_up=true
 
 # The number of logs read on the disk at one time, which is mainly used to 
control the memory usage.
 # This value multiplied by the log size is about the amount of memory used to 
read logs from the disk at one time.
-max_number_of_logs_per_fetch_on_disk=1000
+# max_number_of_logs_per_fetch_on_disk=1000
 
 # When consistency level is set to mid, query will fail if the log lag exceeds 
max_read_log_lag
 # This default value is 1000
-max_read_log_lag=1000
+# max_read_log_lag=1000
 
 # Max number of clients in a ClientPool of a member for one node.
-max_client_pernode_permember_number=1000
+# max_client_pernode_permember_number=1000
 
 # If the number of connections created for a node exceeds  
`max_client_pernode_permember_number`,
 # we need to wait so much time for other connections to be released until 
timeout,
 # or a new connection will be created.
-wait_client_timeout_ms=5000
\ No newline at end of file
+# wait_client_timeout_ms=5000
\ No newline at end of file
diff --git 
a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java 
b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
index 11cedc8..241dc82 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java
@@ -45,7 +45,7 @@ public class ClusterConfig {
 
   @ClusterConsistent private String clusterName = "default";
 
-  @ClusterConsistent private boolean useAsyncServer = true;
+  @ClusterConsistent private boolean useAsyncServer = false;
 
   private boolean useAsyncApplier = true;
 
diff --git 
a/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncClientPoolTest.java
 
b/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncClientPoolTest.java
index 7a4e57a..6913f50 100644
--- 
a/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncClientPoolTest.java
+++ 
b/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncClientPoolTest.java
@@ -8,12 +8,15 @@ import 
org.apache.iotdb.cluster.client.async.AsyncDataClient.FactoryAsync;
 import org.apache.iotdb.cluster.common.TestAsyncClient;
 import org.apache.iotdb.cluster.common.TestAsyncClientFactory;
 import org.apache.iotdb.cluster.common.TestUtils;
+import org.apache.iotdb.cluster.config.ClusterConfig;
 import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient;
 
 import org.apache.thrift.protocol.TBinaryProtocol;
 import org.apache.thrift.protocol.TBinaryProtocol.Factory;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mock;
 
@@ -30,6 +33,20 @@ import static org.junit.Assert.assertNull;
 
 public class AsyncClientPoolTest {
 
+  private final ClusterConfig config = 
ClusterDescriptor.getInstance().getConfig();
+  private boolean isAsyncServer;
+
+  @Before
+  public void setUp() {
+    isAsyncServer = config.isUseAsyncServer();
+    config.setUseAsyncServer(true);
+  }
+
+  @After
+  public void tearDown() {
+    config.setUseAsyncServer(isAsyncServer);
+  }
+
   @Mock private AsyncClientFactory testAsyncClientFactory;
 
   @Test
@@ -93,8 +110,8 @@ public class AsyncClientPoolTest {
 
   @Test
   public void testMaxClient() throws IOException {
-    int maxClientNum = 
ClusterDescriptor.getInstance().getConfig().getMaxClientPerNodePerMember();
-    
ClusterDescriptor.getInstance().getConfig().setMaxClientPerNodePerMember(5);
+    int maxClientNum = config.getMaxClientPerNodePerMember();
+    config.setMaxClientPerNodePerMember(5);
     testAsyncClientFactory = new TestAsyncClientFactory();
     AsyncClientPool asyncClientPool = new 
AsyncClientPool(testAsyncClientFactory);
 
@@ -114,15 +131,14 @@ public class AsyncClientPoolTest {
     t.start();
     t.interrupt();
     assertNull(reference.get());
-    
ClusterDescriptor.getInstance().getConfig().setMaxClientPerNodePerMember(maxClientNum);
+    config.setMaxClientPerNodePerMember(maxClientNum);
   }
 
   @Test
   public void testWaitClient() throws IOException {
-    int maxClientPerNodePerMember =
-        
ClusterDescriptor.getInstance().getConfig().getMaxClientPerNodePerMember();
+    int maxClientPerNodePerMember = config.getMaxClientPerNodePerMember();
     try {
-      
ClusterDescriptor.getInstance().getConfig().setMaxClientPerNodePerMember(10);
+      config.setMaxClientPerNodePerMember(10);
       testAsyncClientFactory = new TestAsyncClientFactory();
       AsyncClientPool asyncClientPool = new 
AsyncClientPool(testAsyncClientFactory);
 
@@ -154,18 +170,15 @@ public class AsyncClientPoolTest {
       }
       assertNotNull(client);
     } finally {
-      ClusterDescriptor.getInstance()
-          .getConfig()
-          .setMaxClientPerNodePerMember(maxClientPerNodePerMember);
+      config.setMaxClientPerNodePerMember(maxClientPerNodePerMember);
     }
   }
 
   @Test
   public void testWaitClientTimeOut() throws IOException {
-    int maxClientPerNodePerMember =
-        
ClusterDescriptor.getInstance().getConfig().getMaxClientPerNodePerMember();
+    int maxClientPerNodePerMember = config.getMaxClientPerNodePerMember();
     try {
-      
ClusterDescriptor.getInstance().getConfig().setMaxClientPerNodePerMember(1);
+      config.setMaxClientPerNodePerMember(1);
       testAsyncClientFactory = new TestAsyncClientFactory();
       AsyncClientPool asyncClientPool = new 
AsyncClientPool(testAsyncClientFactory);
 
diff --git 
a/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncDataClientTest.java
 
b/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncDataClientTest.java
index 1694446..bc3e057 100644
--- 
a/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncDataClientTest.java
+++ 
b/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncDataClientTest.java
@@ -6,6 +6,8 @@ package org.apache.iotdb.cluster.client.async;
 
 import 
org.apache.iotdb.cluster.client.async.AsyncDataClient.SingleManagerFactory;
 import org.apache.iotdb.cluster.common.TestUtils;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.server.RaftServer;
 
@@ -14,6 +16,8 @@ import org.apache.thrift.async.AsyncMethodCallback;
 import org.apache.thrift.async.TAsyncClientManager;
 import org.apache.thrift.protocol.TBinaryProtocol.Factory;
 import org.apache.thrift.transport.TNonblockingSocket;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
@@ -25,6 +29,20 @@ import static org.junit.Assert.assertTrue;
 
 public class AsyncDataClientTest {
 
+  private final ClusterConfig config = 
ClusterDescriptor.getInstance().getConfig();
+  private boolean isAsyncServer;
+
+  @Before
+  public void setUp() {
+    isAsyncServer = config.isUseAsyncServer();
+    config.setUseAsyncServer(true);
+  }
+
+  @After
+  public void tearDown() {
+    config.setUseAsyncServer(isAsyncServer);
+  }
+
   @Test
   public void test() throws IOException, TException {
     AsyncClientPool asyncClientPool = new AsyncClientPool(new 
SingleManagerFactory(new Factory()));
diff --git 
a/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncDataHeartbeatClientTest.java
 
b/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncDataHeartbeatClientTest.java
index 631e487..392c69e 100644
--- 
a/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncDataHeartbeatClientTest.java
+++ 
b/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncDataHeartbeatClientTest.java
@@ -21,16 +21,34 @@ package org.apache.iotdb.cluster.client.async;
 
 import 
org.apache.iotdb.cluster.client.async.AsyncDataHeartbeatClient.FactoryAsync;
 import org.apache.iotdb.cluster.common.TestUtils;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient;
 
 import junit.framework.TestCase;
 import org.apache.thrift.protocol.TBinaryProtocol.Factory;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
 
 public class AsyncDataHeartbeatClientTest extends TestCase {
 
+  private final ClusterConfig config = 
ClusterDescriptor.getInstance().getConfig();
+  private boolean isAsyncServer;
+
+  @Before
+  public void setUp() {
+    isAsyncServer = config.isUseAsyncServer();
+    config.setUseAsyncServer(true);
+  }
+
+  @After
+  public void tearDown() {
+    config.setUseAsyncServer(isAsyncServer);
+  }
+
   @Test
   public void test() throws IOException {
     FactoryAsync factoryAsync = new FactoryAsync(new Factory());
diff --git 
a/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncMetaClientTest.java
 
b/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncMetaClientTest.java
index 7ceccb8..15d53de 100644
--- 
a/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncMetaClientTest.java
+++ 
b/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncMetaClientTest.java
@@ -6,6 +6,8 @@ package org.apache.iotdb.cluster.client.async;
 
 import org.apache.iotdb.cluster.client.async.AsyncMetaClient.FactoryAsync;
 import org.apache.iotdb.cluster.common.TestUtils;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.server.RaftServer;
 
@@ -14,6 +16,8 @@ import org.apache.thrift.async.AsyncMethodCallback;
 import org.apache.thrift.async.TAsyncClientManager;
 import org.apache.thrift.protocol.TBinaryProtocol.Factory;
 import org.apache.thrift.transport.TNonblockingSocket;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
@@ -25,6 +29,20 @@ import static org.junit.Assert.assertTrue;
 
 public class AsyncMetaClientTest {
 
+  private final ClusterConfig config = 
ClusterDescriptor.getInstance().getConfig();
+  private boolean isAsyncServer;
+
+  @Before
+  public void setUp() {
+    isAsyncServer = config.isUseAsyncServer();
+    config.setUseAsyncServer(true);
+  }
+
+  @After
+  public void tearDown() {
+    config.setUseAsyncServer(isAsyncServer);
+  }
+
   @Test
   public void test() throws IOException, TException {
     AsyncClientPool asyncClientPool = new AsyncClientPool(new FactoryAsync(new 
Factory()));
diff --git 
a/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncMetaHeartbeatClientTest.java
 
b/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncMetaHeartbeatClientTest.java
index 1f4aef4..345b9fe 100644
--- 
a/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncMetaHeartbeatClientTest.java
+++ 
b/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncMetaHeartbeatClientTest.java
@@ -21,16 +21,34 @@ package org.apache.iotdb.cluster.client.async;
 
 import 
org.apache.iotdb.cluster.client.async.AsyncMetaHeartbeatClient.FactoryAsync;
 import org.apache.iotdb.cluster.common.TestUtils;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient;
 
 import org.apache.thrift.protocol.TBinaryProtocol.Factory;
+import org.junit.After;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
 
 public class AsyncMetaHeartbeatClientTest {
 
+  private final ClusterConfig config = 
ClusterDescriptor.getInstance().getConfig();
+  private boolean isAsyncServer;
+
+  @Before
+  public void setUp() {
+    isAsyncServer = config.isUseAsyncServer();
+    config.setUseAsyncServer(true);
+  }
+
+  @After
+  public void tearDown() {
+    config.setUseAsyncServer(isAsyncServer);
+  }
+
   @Test
   public void test() throws IOException {
     FactoryAsync factoryAsync = new FactoryAsync(new Factory());
diff --git 
a/cluster/src/test/java/org/apache/iotdb/cluster/log/LogDispatcherTest.java 
b/cluster/src/test/java/org/apache/iotdb/cluster/log/LogDispatcherTest.java
index a0dc673..281e084 100644
--- a/cluster/src/test/java/org/apache/iotdb/cluster/log/LogDispatcherTest.java
+++ b/cluster/src/test/java/org/apache/iotdb/cluster/log/LogDispatcherTest.java
@@ -108,7 +108,10 @@ public class LogDispatcherTest {
               @Override
               public long appendEntry(AppendEntryRequest request) throws 
TException {
                 try {
-                  return mockedAppendEntry(request);
+                  if (!downNode.contains(node)) {
+                    return mockedAppendEntry(request);
+                  }
+                  return -1;
                 } catch (UnknownLogTypeException e) {
                   throw new TException(e);
                 }
@@ -117,7 +120,10 @@ public class LogDispatcherTest {
               @Override
               public long appendEntries(AppendEntriesRequest request) throws 
TException {
                 try {
-                  return mockedAppendEntries(request);
+                  if (!downNode.contains(node)) {
+                    return mockedAppendEntries(request);
+                  }
+                  return -1;
                 } catch (UnknownLogTypeException e) {
                   throw new TException(e);
                 }
diff --git 
a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/DataSnapshotTest.java
 
b/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/DataSnapshotTest.java
index 951b56d..504fd6a 100644
--- 
a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/DataSnapshotTest.java
+++ 
b/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/DataSnapshotTest.java
@@ -25,6 +25,8 @@ import org.apache.iotdb.cluster.common.TestDataGroupMember;
 import org.apache.iotdb.cluster.common.TestLogManager;
 import org.apache.iotdb.cluster.common.TestMetaGroupMember;
 import org.apache.iotdb.cluster.common.TestUtils;
+import org.apache.iotdb.cluster.config.ClusterConfig;
+import org.apache.iotdb.cluster.config.ClusterDescriptor;
 import org.apache.iotdb.cluster.coordinator.Coordinator;
 import org.apache.iotdb.cluster.rpc.thrift.Node;
 import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient;
@@ -59,8 +61,13 @@ public abstract class DataSnapshotTest {
   int failureCnt;
   boolean addNetFailure = false;
 
+  private final ClusterConfig config = 
ClusterDescriptor.getInstance().getConfig();
+  private boolean isAsyncServer;
+
   @Before
   public void setUp() throws MetadataException, StartupException {
+    isAsyncServer = config.isUseAsyncServer();
+    config.setUseAsyncServer(true);
     dataGroupMember =
         new TestDataGroupMember() {
           @Override
@@ -171,6 +178,7 @@ public abstract class DataSnapshotTest {
 
   @After
   public void tearDown() throws Exception {
+    config.setUseAsyncServer(isAsyncServer);
     metaGroupMember.closeLogManager();
     dataGroupMember.closeLogManager();
     metaGroupMember.stop();
diff --git a/server/src/assembly/resources/conf/iotdb-engine.properties 
b/server/src/assembly/resources/conf/iotdb-engine.properties
index 500ab73..521c0b9 100644
--- a/server/src/assembly/resources/conf/iotdb-engine.properties
+++ b/server/src/assembly/resources/conf/iotdb-engine.properties
@@ -21,11 +21,11 @@
 ### Web Page Configuration
 ####################
 
-enable_metric_service=false
+# enable_metric_service=false
 
-metrics_port=8181
+# metrics_port=8181
 
-query_cache_size_in_metric=50
+# query_cache_size_in_metric=50
 
 ####################
 ### RPC Configuration
@@ -35,38 +35,38 @@ rpc_address=0.0.0.0
 
 rpc_port=6667
 
-rpc_thrift_compression_enable=false
+# rpc_thrift_compression_enable=false
 
 # if true, a snappy based compression method will be called before sending 
data by the network
-rpc_advanced_compression_enable=false
+# rpc_advanced_compression_enable=false
 
-rpc_max_concurrent_client_num=65535
+# rpc_max_concurrent_client_num=65535
 
 # thrift max frame size, 64MB by default
-thrift_max_frame_size=67108864
+# thrift_max_frame_size=67108864
 
 # thrift init buffer size
-thrift_init_buffer_size=1024
+# thrift_init_buffer_size=1024
 
 ####################
 ### Write Ahead Log Configuration
 ####################
 
 # Is insert ahead log enable
-enable_wal=true
+# enable_wal=true
 
 # Add a switch to drop ouf-of-order data
 # Out-of-order data will impact the aggregation query a lot. Users may not 
care about discarding some out-of-order data.
-enable_discard_out_of_order_data=false
+# enable_discard_out_of_order_data=false
 
 # When a certain amount of insert ahead log is reached, it will be flushed to 
disk
 # It is possible to lose at most flush_wal_threshold operations
-flush_wal_threshold=10000
+# flush_wal_threshold=10000
 
 # The cycle when insert ahead log is periodically forced to be written to 
disk(in milliseconds)
 # If force_wal_period_in_ms = 0 it means force insert ahead log to be written 
to disk after each refreshment
 # Set this parameter to 0 may slow down the ingestion on slow disk.
-force_wal_period_in_ms=100
+# force_wal_period_in_ms=100
 
 ####################
 ### Directory Configuration
@@ -125,41 +125,41 @@ force_wal_period_in_ms=100
 
 
 # TSFile storage file system. Currently, Tsfile are supported to be stored in 
LOCAL file system or HDFS.
-tsfile_storage_fs=LOCAL
+# tsfile_storage_fs=LOCAL
 
 # If using HDFS, the absolute file path of Hadoop core-site.xml should be 
configured
-core_site_path=/etc/hadoop/conf/core-site.xml
+# core_site_path=/etc/hadoop/conf/core-site.xml
 
 # If using HDFS, the absolute file path of Hadoop hdfs-site.xml should be 
configured
-hdfs_site_path=/etc/hadoop/conf/hdfs-site.xml
+# hdfs_site_path=/etc/hadoop/conf/hdfs-site.xml
 
 # If using HDFS, hadoop ip can be configured. If there are more than one 
hdfs_ip, Hadoop HA is used
-hdfs_ip=localhost
+# hdfs_ip=localhost
 
 # If using HDFS, hadoop port can be configured
-hdfs_port=9000
+# hdfs_port=9000
 
 # If there are more than one hdfs_ip, Hadoop HA is used. Below are 
configuration for HA
 # If using Hadoop HA, nameservices of hdfs can be configured
-dfs_nameservices=hdfsnamespace
+# dfs_nameservices=hdfsnamespace
 
 # If using Hadoop HA, namenodes under dfs nameservices can be configured
-dfs_ha_namenodes=nn1,nn2
+# dfs_ha_namenodes=nn1,nn2
 
 # If using Hadoop HA, automatic failover can be enabled or disabled
-dfs_ha_automatic_failover_enabled=true
+# dfs_ha_automatic_failover_enabled=true
 
 # If using Hadoop HA and enabling automatic failover, the proxy provider can 
be configured
-dfs_client_failover_proxy_provider=org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+# 
dfs_client_failover_proxy_provider=org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
 
 # If using kerberos to authenticate hdfs, this should be true
-hdfs_use_kerberos=false
+# hdfs_use_kerberos=false
 
 # Full path of kerberos keytab file
-kerberos_keytab_file_path=/path
+# kerberos_keytab_file_path=/path
 
 # Kerberos pricipal
-kerberos_principal=your principal
+# kerberos_principal=your principal
 
 
 ####################
@@ -181,112 +181,112 @@ timestamp_precision=ms
 # if WAL is enabled and the size of the inserted plan is greater than one-half 
of this parameter,
 # then the insert plan will be rejected by WAL.
 # If it sets a value smaller than 0, use the default value 16777216
-wal_buffer_size=16777216
+# wal_buffer_size=16777216
 
 # When a TsFile's file size (in byte) exceeds this, the TsFile is forced 
closed.
 # It may cause memTable size smaller if it is a large value
-tsfile_size_threshold=1
+# tsfile_size_threshold=1
 
 # Size of log buffer in each metadata operation plan(in byte).
 # If the size of a metadata operation plan is larger than this parameter, then 
it will be rejected by MManager
 # If it sets a value smaller than 0, use the default value 1024*1024
-mlog_buffer_size=1048576
+# mlog_buffer_size=1048576
 
 # When a memTable's size (in byte) exceeds this, the memtable is flushed to 
disk. The default threshold is 1 GB.
-memtable_size_threshold=1073741824
+# memtable_size_threshold=1073741824
 
 # When the average point number of timeseries in memtable exceeds this, the 
memtable is flushed to disk. The default threshold is 10000.
-avg_series_point_number_threshold=10000
+# avg_series_point_number_threshold=10000
 
 # How many threads can concurrently flush. When <= 0, use CPU core number.
-concurrent_flush_thread=0
+# concurrent_flush_thread=0
 
 # How many threads can concurrently query. When <= 0, use CPU core number.
-concurrent_query_thread=0
+# concurrent_query_thread=0
 
 # whether take over the memory management by IoTDB rather than JVM when 
serializing memtable as bytes in memory
 # (i.e., whether use ChunkBufferPool), value true, false
-chunk_buffer_pool_enable=false
+# chunk_buffer_pool_enable=false
 
 # The amount of data iterate each time in server (the number of data strips, 
that is, the number of different timestamps.)
-batch_size=100000
+# batch_size=100000
 
 # max size for tag and attribute of one time series
 # the unit is byte
-tag_attribute_total_size=700
+# tag_attribute_total_size=700
 
 # In one insert (one device, one timestamp, multiple measurements),
 # if enable partial insert, one measurement failure will not impact other 
measurements
-enable_partial_insert=true
+# enable_partial_insert=true
 
 # Whether to enable MTree snapshot. Default false from 0.11.0 on.
-enable_mtree_snapshot=false
+# enable_mtree_snapshot=false
 
 # The least interval line numbers of mlog.txt when creating a checkpoint and 
saving snapshot of MTree.
 # Only take effect when enable_mtree_snapshot=true. Unit: line numbers
-mtree_snapshot_interval=100000
+# mtree_snapshot_interval=100000
 
 # Threshold interval time of MTree modification. Unit: second. Default: 1 
hour(3600 seconds)
 # If the last modification time is less than this threshold, MTree snapshot 
will not be created
 # Only take effect when enable_mtree_snapshot=true.
-mtree_snapshot_threshold_time=3600
+# mtree_snapshot_threshold_time=3600
 
 # number of virtual storage groups per user-defined storage group
 # a virtual storage group is the unit of parallelism in memory as all 
ingestions in one virtual storage group are serialized
 # recommended value is [virtual storage group number] = [CPU core number] / 
[user-defined storage group number]
-virtual_storage_group_num = 1
+# virtual_storage_group_num = 1
 
 # Level of TimeIndex, which records the start time and end time of 
TsFileResource. Currently,
 # DEVICE_TIME_INDEX and FILE_TIME_INDEX are supported, and could not be 
changed after first set.
-time_index_level=DEVICE_TIME_INDEX
+# time_index_level=DEVICE_TIME_INDEX
 
 ####################
 ### Memory Control Configuration
 ####################
 
 # Whether to enable memory control
-enable_mem_control=true
+# enable_mem_control=true
 
 # Memory Allocation Ratio: Write, Read, Schema and Free Memory.
 # The parameter form is a:b:c:d, where a, b, c and d are integers. for 
example: 1:1:1:1 , 6:2:1:1
 # If you have high level of writing pressure and low level of reading 
pressure, please adjust it to for example 6:1:1:2 
-write_read_schema_free_memory_proportion=4:3:1:2
+# write_read_schema_free_memory_proportion=4:3:1:2
 
 # primitive array size (length of each array) in array pool
-primitive_array_size=128
+# primitive_array_size=128
 
 # Ratio of write memory for invoking flush disk, 0.4 by default
 # If you have extremely high write load (like batch=1000), it can be set lower 
than the default value like 0.2
-flush_proportion=0.4
+# flush_proportion=0.4
 
 # Ratio of write memory allocated for buffered arrays, 0.6 by default
-buffered_arrays_memory_proportion=0.6
+# buffered_arrays_memory_proportion=0.6
 
 # Ratio of write memory for rejecting insertion, 0.8 by default
 # If you have extremely high write load (like batch=1000) and the physical 
memory size is large enough, 
 # it can be set higher than the default value like 0.9
-reject_proportion=0.8
+# reject_proportion=0.8
 
 # If memory (in byte) of storage group increased more than this threshold, 
report to system. The default value is 16MB
-storage_group_report_threshold=16777216
+# storage_group_report_threshold=16777216
 
 # allowed max numbers of deduplicated path in one query
 # it's just an advised value, the real limitation will be the smaller one 
between this and the one we calculated
-max_deduplicated_path_num=1000
+# max_deduplicated_path_num=1000
 
 # When an inserting is rejected, waiting period (in ms) to check system again, 
50 by default.
 # If the insertion has been rejected and the read load is low, it can be set 
larger.
-check_period_when_insert_blocked=50
+# check_period_when_insert_blocked=50
 
 # When the waiting time (in ms) of an inserting exceeds this, throw an 
exception. 10000 by default.
 # If the insertion has been rejected and the read load is low, it can be set 
larger
-max_waiting_time_when_insert_blocked=10000
+# max_waiting_time_when_insert_blocked=10000
 
 # estimated metadata size (in byte) of one timeseries in Mtree
-estimated_series_size=300
+# estimated_series_size=300
 
 # size of ioTaskQueue. The default value is 10
-io_task_queue_size_for_flushing=10
+# io_task_queue_size_for_flushing=10
 
 ####################
 ### Upgrade Configurations
@@ -294,7 +294,7 @@ io_task_queue_size_for_flushing=10
 
 # When there exists old version(0.9.x/v1) data, how many thread will be set up 
to perform upgrade tasks, 1 by default.
 # Set to 1 when less than or equal to 0.
-upgrade_thread_num=1
+# upgrade_thread_num=1
 
 
 ####################
@@ -302,37 +302,37 @@ upgrade_thread_num=1
 ####################
 
 # the default time period that used in fill query, -1 by default means 
infinite past time, in ms
-default_fill_interval=-1
+# default_fill_interval=-1
 
 ####################
 ### Merge Configurations
 ####################
 # LEVEL_COMPACTION, NO_COMPACTION
-compaction_strategy=LEVEL_COMPACTION
+# compaction_strategy=LEVEL_COMPACTION
 
 # Works when the compaction_strategy is LEVEL_COMPACTION.
 # Whether to merge unseq files into seq files or not.
-enable_unseq_compaction=true
+# enable_unseq_compaction=true
 
 # Works when the compaction_strategy is LEVEL_COMPACTION.
 # The max seq file num of each level.
 # When the num of files in one level exceeds this,
 # the files in this level will merge to one and put to upper level.
-seq_file_num_in_each_level=6
+# seq_file_num_in_each_level=6
 
 # Works when the compaction_strategy is LEVEL_COMPACTION.
 # The max num of seq level.
-seq_level_num=3
+# seq_level_num=3
 
 # Works when compaction_strategy is LEVEL_COMPACTION.
 # The max ujseq file num of each level.
 # When the num of files in one level exceeds this,
 # the files in this level will merge to one and put to upper level.
-unseq_file_num_in_each_level=10
+# unseq_file_num_in_each_level=10
 
 # Works when the compaction_strategy is LEVEL_COMPACTION.
 # The max num of unseq level.
-unseq_level_num=1
+# unseq_level_num=1
 
 # Works when the compaction_strategy is LEVEL_COMPACTION.
 # When the average point number of chunks in the target file reaches this, 
merge the file to the top level.
@@ -340,20 +340,20 @@ unseq_level_num=1
 # merged with its succeeding chunks even if it is not overflowed, until the 
merged chunks reach
 # this threshold and the new chunk will be flushed.
 # When less than 0, this mechanism is disabled.
-merge_chunk_point_number=100000
+# merge_chunk_point_number=100000
 
 # Works when the compaction_strategy is LEVEL_COMPACTION.
 # When point number of a page reaches this, use "append merge" instead of 
"deserialize merge".
-merge_page_point_number=100
+# merge_page_point_number=100
 
 # How many threads will be set up to perform unseq merge chunk sub-tasks, 4 by 
default.
 # Set to 1 when less than or equal to 0.
-merge_chunk_subthread_num=4
+# merge_chunk_subthread_num=4
 
 # If one merge file selection runs for more than this time, it will be ended 
and its current
 # selection will be used as final selection. Unit: millis.
 # When < 0, it means time is unbounded.
-merge_fileSelection_time_budget=30000
+# merge_fileSelection_time_budget=30000
 
 # How much memory may be used in ONE merge task (in byte), 10% of maximum JVM 
memory by default.
 # This is only a rough estimation, starting from a relatively small value to 
avoid OOM.
@@ -365,75 +365,75 @@ merge_fileSelection_time_budget=30000
 # be continued, otherwise, the unfinished parts of such merges will not be 
continued while the
 # finished parts still remains as they are.
 # If you are feeling the rebooting is too slow, set this to false, false by 
default
-continue_merge_after_reboot=false
+# continue_merge_after_reboot=false
 
 # When set to true, all unseq merges becomes full merge (the whole SeqFiles 
are re-written despite how
 # much they are overflowed). This may increase merge overhead depending on how 
much the SeqFiles
 # are overflowed.
-force_full_merge=false
+# force_full_merge=false
 
 # How many threads will be set up to perform compaction, 10 by default.
 # Set to 1 when less than or equal to 0.
-compaction_thread_num=10
+# compaction_thread_num=10
 
 # The limit of write throughput merge can reach per second
-merge_write_throughput_mb_per_sec=8
+# merge_write_throughput_mb_per_sec=8
 
 # The max executing time of query. unit: ms
-query_timeout_threshold=60000
+# query_timeout_threshold=60000
 
 ####################
 ### Metadata Cache Configuration
 ####################
 
 # whether to cache meta data(ChunkMetadata and TimeSeriesMetadata) or not.
-meta_data_cache_enable=true
+# meta_data_cache_enable=true
 # Read memory Allocation Ratio: ChunkCache, TimeSeriesMetadataCache, memory 
used for constructing QueryDataSet and Free Memory Used in Query.
 # The parameter form is a:b:c:d, where a, b, c and d are integers. for 
example: 1:1:1:1 , 1:2:3:4
-chunk_timeseriesmeta_free_memory_proportion=1:2:3:4
+# chunk_timeseriesmeta_free_memory_proportion=1:2:3:4
 
 # cache size for MManager.
 # This cache is used to improve insert speed where all path check and 
TSDataType will be cached in MManager with corresponding Path.
-metadata_node_cache_size=300000
+# metadata_node_cache_size=300000
 
 ####################
 ### LAST Cache Configuration
 ####################
 
 # Whether to enable LAST cache
-enable_last_cache=true
+# enable_last_cache=true
 
 ####################
 ### Statistics Monitor configuration
 ####################
 
 # Set enable_stat_monitor true(or false) to enable(or disable) the StatMonitor 
that stores statistics info.
-enable_stat_monitor=false
+# enable_stat_monitor=false
 
 # Set enable_monitor_series_write true (or false) to enable (or disable) the 
writing monitor time series
-enable_monitor_series_write=false
+# enable_monitor_series_write=false
 
 ####################
 ### WAL Direct Buffer Pool Configuration
 ####################
 # the interval to trim the wal pool
-wal_pool_trim_interval_ms=10000
+# wal_pool_trim_interval_ms=10000
 
 # the max number of wal bytebuffer can be allocated for each time partition, 
if there is no unseq data you can set it to 4.
 # it should be an even number
-max_wal_bytebuffer_num_for_each_partition=6
+# max_wal_bytebuffer_num_for_each_partition=6
 
 ####################
 ### External sort Configuration
 ####################
 # Is external sort enable
-enable_external_sort=true
+# enable_external_sort=true
 
 # The maximum number of simultaneous chunk reading for a single time series.
 # If the num of simultaneous chunk reading is greater than 
external_sort_threshold, external sorting is used.
 # When external_sort_threshold increases, the number of chunks sorted at the 
same time in memory may increase and this will occupy more memory.
 # When external_sort_threshold decreases, triggering external sorting will 
increase the time-consuming.
-external_sort_threshold=1000
+# external_sort_threshold=1000
 
 
 ####################
@@ -441,29 +441,29 @@ external_sort_threshold=1000
 ####################
 
 # Whether to open the sync_server_port for receiving data from sync client, 
the default is closed
-is_sync_enable=false
+# is_sync_enable=false
 
 # Sync server port to listen
-sync_server_port=5555
+# sync_server_port=5555
 
 # White IP list of Sync client.
 # Please use the form of network segment to present the range of IP, for 
example: 192.168.0.0/16
 # If there are more than one IP segment, please separate them by commas
 # The default is to allow all IP to sync
-ip_white_list=0.0.0.0/0
+# ip_white_list=0.0.0.0/0
 
 ####################
 ### performance statistic configuration
 ####################
 
 # Is stat performance of sub-module enable
-enable_performance_stat=false
+# enable_performance_stat=false
 # The interval of display statistic result in ms.
-performance_stat_display_interval=60000
+# performance_stat_display_interval=60000
 # The memory used for performance_stat in kb.
-performance_stat_memory_in_kb=20
+# performance_stat_memory_in_kb=20
 # Is performance tracing enable
-enable_performance_tracing=false
+# enable_performance_tracing=false
 
 # Uncomment following fields to configure the tracing root directory.
 # For Window platform, the index is as follows:
@@ -475,10 +475,10 @@ enable_performance_tracing=false
 ####################
 ### Configurations for watermark module
 ####################
-watermark_module_opened=false
-watermark_secret_key=IoTDB*2019@Beijing
-watermark_bit_string=100101110100
-watermark_method=GroupBasedLSBMethod(embed_row_cycle=2,embed_lsb_num=5)
+# watermark_module_opened=false
+# watermark_secret_key=IoTDB*2019@Beijing
+# watermark_bit_string=100101110100
+# watermark_method=GroupBasedLSBMethod(embed_row_cycle=2,embed_lsb_num=5)
 
 
 ####################
@@ -486,116 +486,116 @@ 
watermark_method=GroupBasedLSBMethod(embed_row_cycle=2,embed_lsb_num=5)
 ####################
 
 # Whether creating schema automatically is enabled
-enable_auto_create_schema=true
+# enable_auto_create_schema=true
 
 # Storage group level when creating schema automatically is enabled
 # e.g. root.sg0.d1.s2
 #      we will set root.sg0 as the storage group if storage group level is 1
-default_storage_group_level=1
+# default_storage_group_level=1
 
 # ALL data types: BOOLEAN, INT32, INT64, FLOAT, DOUBLE, TEXT
 
 # register time series as which type when receiving boolean string "true" or 
"false"
-boolean_string_infer_type=BOOLEAN
+# boolean_string_infer_type=BOOLEAN
 
 # register time series as which type when receiving an integer string "67"
-integer_string_infer_type=FLOAT
+# integer_string_infer_type=FLOAT
 
 # register time series as which type when receiving an integer string and 
using float may lose precision
 # num > 2 ^ 24
-long_string_infer_type=DOUBLE
+# long_string_infer_type=DOUBLE
 
 # register time series as which type when receiving a floating number string 
"6.7"
-floating_string_infer_type=FLOAT
+# floating_string_infer_type=FLOAT
 
 # register time series as which type when receiving the Literal NaN. Values 
can be DOUBLE, FLOAT or TEXT
-nan_string_infer_type=DOUBLE
+# nan_string_infer_type=DOUBLE
 
 
 # BOOLEAN encoding when creating schema automatically is enabled
-default_boolean_encoding=RLE
+# default_boolean_encoding=RLE
 
 # INT32 encoding when creating schema automatically is enabled
-default_int32_encoding=RLE
+# default_int32_encoding=RLE
 
 # INT64 encoding when creating schema automatically is enabled
-default_int64_encoding=RLE
+# default_int64_encoding=RLE
 
 # FLOAT encoding when creating schema automatically is enabled
-default_float_encoding=GORILLA
+# default_float_encoding=GORILLA
 
 # DOUBLE encoding when creating schema automatically is enabled
-default_double_encoding=GORILLA
+# default_double_encoding=GORILLA
 
 # TEXT encoding when creating schema automatically is enabled
-default_text_encoding=PLAIN
+# default_text_encoding=PLAIN
 
 ####################
 ### Configurations for tsfile-format
 ####################
 
-group_size_in_byte=134217728
+# group_size_in_byte=134217728
 
 # The memory size for each series writer to pack page, default value is 64KB
-page_size_in_byte=65536
+# page_size_in_byte=65536
 
 # The maximum number of data points in a page, default 1024*1024
-max_number_of_points_in_page=1048576
+# max_number_of_points_in_page=1048576
 
 # Data type configuration
 # Data type for input timestamp, supports INT32 or INT64
-time_series_data_type=INT64
+# time_series_data_type=INT64
 
 # Max size limitation of input string
-max_string_length=128
+# max_string_length=128
 
 # Floating-point precision
-float_precision=2
+# float_precision=2
 
 # Encoder configuration
 # Encoder of time series, supports TS_2DIFF, PLAIN and RLE(run-length 
encoding), REGULAR and default value is TS_2DIFF
-time_encoder=TS_2DIFF
+# time_encoder=TS_2DIFF
 
 # Encoder of value series. default value is PLAIN.
 # For int, long data type, also supports TS_2DIFF and RLE(run-length encoding) 
and GORILLA.
 # For float, double data type, also supports TS_2DIFF, RLE(run-length 
encoding) and GORILLA.
 # For text data type, only supports PLAIN.
-value_encoder=PLAIN
+# value_encoder=PLAIN
 
 # Compression configuration
 # Data compression method, supports UNCOMPRESSED, SNAPPY or LZ4. Default value 
is SNAPPY
-compressor=SNAPPY
+# compressor=SNAPPY
 
 # Maximum degree of a metadataIndex node, default value is 256
-max_degree_of_index_node=256
+# max_degree_of_index_node=256
 
 # time interval in minute for calculating query frequency
-frequency_interval_in_minute=1
+# frequency_interval_in_minute=1
 
 # time cost(ms) threshold for slow query
-slow_query_threshold=5000
+# slow_query_threshold=5000
 
 ####################
 ### MQTT Broker Configuration
 ####################
 
 # whether to enable the mqtt service.
-enable_mqtt_service=false
+# enable_mqtt_service=false
 
 # the mqtt service binding host.
-mqtt_host=0.0.0.0
+# mqtt_host=0.0.0.0
 
 # the mqtt service binding port.
-mqtt_port=1883
+# mqtt_port=1883
 
 # the handler pool size for handing the mqtt messages.
-mqtt_handler_pool_size=1
+# mqtt_handler_pool_size=1
 
 # the mqtt message payload formatter.
-mqtt_payload_formatter=json
+# mqtt_payload_formatter=json
 
 # max length of mqtt message in byte
-mqtt_max_message_size=1048576
+# mqtt_max_message_size=1048576
 
 ####################
 ### Authorization Configuration
@@ -603,7 +603,7 @@ mqtt_max_message_size=1048576
 
 #which class to serve for authorization. By default, it is LocalFileAuthorizer.
 #Another choice is org.apache.iotdb.db.auth.authorizer.OpenIdAuthorizer
-authorizer_provider_class=org.apache.iotdb.db.auth.authorizer.LocalFileAuthorizer
+# 
authorizer_provider_class=org.apache.iotdb.db.auth.authorizer.LocalFileAuthorizer
 
 
 #If OpenIdAuthorizer is enabled, then openID_url must be set.
@@ -617,15 +617,15 @@ 
authorizer_provider_class=org.apache.iotdb.db.auth.authorizer.LocalFileAuthorize
 # Used to estimate the memory usage of text fields in a UDF query.
 # It is recommended to set this value to be slightly larger than the average 
length of all text
 # records.
-udf_initial_byte_array_length_for_memory_control=48
+# udf_initial_byte_array_length_for_memory_control=48
 
 # How much memory may be used in ONE UDF query (in MB).
 # The upper limit is 20% of allocated memory for read.
-udf_memory_budget_in_mb=30.0
+# udf_memory_budget_in_mb=30.0
 
 # UDF memory allocation ratio.
 # The parameter form is a:b:c, where a, b, and c are integers.
-udf_reader_transformer_collector_memory_proportion=1:1:1
+# udf_reader_transformer_collector_memory_proportion=1:1:1
 
 # Uncomment following fields to configure the udf root directory.
 # For Window platform, the index is as follows:
@@ -643,19 +643,19 @@ udf_reader_transformer_collector_memory_proportion=1:1:1
 # index_root_dir=data/index
 
 # Is index enable
-enable_index=false
+# enable_index=false
 
 # How many threads can concurrently build index. When <= 0, use CPU core 
number.
-concurrent_index_build_thread=0
+# concurrent_index_build_thread=0
 
 # the default size of sliding window used for the subsequence matching in 
index framework
-default_index_window_range=10
+# default_index_window_range=10
 
 # buffer parameter for index processor.
-index_buffer_size=134217728
+# index_buffer_size=134217728
 
 # whether enable data partition. If disabled, all data belongs to partition 0
-enable_partition=false
+# enable_partition=false
 
 # time range for partitioning data inside each storage group, the unit is 
second
-partition_interval=604800
+# partition_interval=604800

Reply via email to