You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by ro...@apache.org on 2021/11/19 07:00:21 UTC
[iotdb] 01/01: modify config items
This is an automated email from the ASF dual-hosted git repository.
rong pushed a commit to branch xianyi
in repository https://gitbox.apache.org/repos/asf/iotdb.git
commit 6877d9a99315918566734216b599c8880926c28b
Author: Steve Yurong Su <ro...@apache.org>
AuthorDate: Fri Nov 19 14:59:15 2021 +0800
modify config items
---
.../resources/conf/iotdb-engine.properties | 30 +++++++++++-----------
1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/server/src/assembly/resources/conf/iotdb-engine.properties b/server/src/assembly/resources/conf/iotdb-engine.properties
index 3e2da93..48a6d2f 100644
--- a/server/src/assembly/resources/conf/iotdb-engine.properties
+++ b/server/src/assembly/resources/conf/iotdb-engine.properties
@@ -65,7 +65,7 @@ rpc_port=6667
# Is insert ahead log enable
# Datatype: boolean
-# enable_wal=true
+enable_wal=false
# Add a switch to drop ouf-of-order data
# Out-of-order data will impact the aggregation query a lot. Users may not care about discarding some out-of-order data.
@@ -278,7 +278,7 @@ timestamp_precision=ms
# When the average point number of timeseries in memtable exceeds this, the memtable is flushed to disk. The default threshold is 10000.
# Datatype: int
-# avg_series_point_number_threshold=10000
+avg_series_point_number_threshold=10000000
# How many threads can concurrently flush. When <= 0, use CPU core number.
# Datatype: int
@@ -353,7 +353,7 @@ timestamp_precision=ms
# primitive array size (length of each array) in array pool
# Datatype: int
-# primitive_array_size=32
+primitive_array_size=1024
# Ratio of write memory for invoking flush disk, 0.4 by default
# If you have extremely high write load (like batch=1000), it can be set lower than the default value like 0.2
@@ -381,7 +381,7 @@ timestamp_precision=ms
# allowed max numbers of deduplicated path in one query
# it's just an advised value, the real limitation will be the smaller one between this and the one we calculated
# Datatype: int
-# max_deduplicated_path_num=1000
+max_deduplicated_path_num=1000000
# When an inserting is rejected, waiting period (in ms) to check system again, 50 by default.
# If the insertion has been rejected and the read load is low, it can be set larger.
@@ -423,15 +423,15 @@ timestamp_precision=ms
####################
# sequence space compaction: only compact the sequence files
# Datatype: boolean
-# enable_seq_space_compaction=true
+enable_seq_space_compaction=false
# unsequence space compaction: only compact the unsequence files
# Datatype: boolean
-# enable_unseq_space_compaction=true
+enable_unseq_space_compaction=false
# cross space compaction: compact the unsequence files into the overlapped sequence files
# Datatype: boolean
-# enable_cross_space_compaction=true
+enable_cross_space_compaction=false
# the strategy of inner space compaction task
# Options: inplace_compaction
@@ -528,7 +528,7 @@ timestamp_precision=ms
# The max executing time of query. unit: ms
# Datatype: int
-# query_timeout_threshold=60000
+query_timeout_threshold=600000000
####################
### Metadata Cache Configuration
@@ -539,7 +539,7 @@ timestamp_precision=ms
# meta_data_cache_enable=true
# Read memory Allocation Ratio: BloomFilterCache, ChunkCache, TimeSeriesMetadataCache, memory used for constructing QueryDataSet and Free Memory Used in Query.
# The parameter form is a:b:c:d:e, where a, b, c, d and e are integers. for example: 1:1:1:1:1 , 1:100:200:300:400
-# chunk_timeseriesmeta_free_memory_proportion=1:100:200:300:400
+chunk_timeseriesmeta_free_memory_proportion=1000:100:200:300:400
# cache size for MManager.
# This cache is used to improve insert speed where all path check and TSDataType will be cached in MManager with corresponding Path.
@@ -552,7 +552,7 @@ timestamp_precision=ms
# Whether to enable LAST cache
# Datatype: boolean
-# enable_last_cache=true
+enable_last_cache=false
####################
### Statistics Monitor configuration
@@ -814,11 +814,11 @@ timestamp_precision=ms
# How much memory may be used in ONE UDF query (in MB).
# The upper limit is 20% of allocated memory for read.
# Datatype: float
-# udf_memory_budget_in_mb=30.0
+udf_memory_budget_in_mb=10240.0
# UDF memory allocation ratio.
# The parameter form is a:b:c, where a, b, and c are integers.
-# udf_reader_transformer_collector_memory_proportion=1:1:1
+udf_reader_transformer_collector_memory_proportion=1:3:2
# Uncomment the following field to configure the udf root directory.
# For Window platform
@@ -850,12 +850,12 @@ timestamp_precision=ms
# How many threads can be used for evaluating sliding windows. When <= 0, use CPU core number.
# Datatype: int
-# concurrent_window_evaluation_thread=0
+concurrent_window_evaluation_thread=0
# Max number of window evaluation tasks that can be pending for execution. When <= 0, the value is
# 64 by default.
# Datatype: int
-# max_pending_window_evaluation_tasks=64
+max_pending_window_evaluation_tasks=10240
####################
### Continuous Query Configuration
@@ -882,7 +882,7 @@ timestamp_precision=ms
# The maximum number of rows can be processed in insert-tablet-plan when executing select-into statements.
# When <= 0, use 10000.
# Datatype: int
-# select_into_insert_tablet_plan_row_limit=10000
+select_into_insert_tablet_plan_row_limit=10000000
####################
### Index Configuration