You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by va...@apache.org on 2020/01/16 06:00:29 UTC

[couchdb] 01/01: Add a few missing settings to the default.ini file

This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch add-defaults-config-ini-file
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 7fe47863ec36d482e5ad77dc45d9b929d76dc382
Author: Nick Vatamaniuc <va...@apache.org>
AuthorDate: Thu Jan 16 00:53:01 2020 -0500

    Add a few missing settings to the default.ini file
    
    Some rexi and reshard parameters
    
    Issue: https://github.com/apache/couchdb/issues/2457
---
 rel/overlay/etc/default.ini | 47 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 47 insertions(+)

diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index a301987..1a5b77a 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -216,6 +216,12 @@ port = 6984
 ; buffer_count = 2000
 ; server_per_node = true
 ; stream_limit = 5
+;
+; Use a single message to kill a group of remote workers This is
+; mostly is an upgrade clause to allow operating in a mixed cluster of
+; 2.x and 3.x nodes. After upgrading switch to true to save some
+; network bandwidth
+;use_kill_all = false
 
 ; [global_changes]
 ; max_event_delay = 25
@@ -578,3 +584,44 @@ compaction = false
 ; CouchDB will use the value of `max_limit` instead. If neither is
 ; defined, the default is 2000 as stated here.
 ; max_limit_partitions = 2000
+
+[reshard]
+; Maximum resharding jobs allowed. New jobs cannot be added until some
+; old ones are deleted. A finished job is not automatically
+; removed. It has to be removed by hand.
+;max_jobs = 48
+;
+; Time to wait before retrying a failed shard splitting phase.
+;retry_interval_sec = 10
+;
+;
+; Every time a job switches from one phase to the next, it adds an
+; entry to the history. This setting specify the maximum length of
+; that history.
+;max_history = 20
+;
+; Require users to specify a node and/or range argument when creating
+; shard splitting jobs. The reason is to prevent users from
+; inadvertently splitting all ranges on all the nodes and adding to
+; much load to the cluster.
+;require_node_param = false
+;require_range_param = false
+;
+; How long should shard splitting job wait for the source shard to
+; close after the shard splitting had finished.
+;source_close_timeout_sec = 600
+;
+; How long to wait for the shard map update phase. This might have to
+; be increased if there is quite a bit of load or if connection
+; between nodes is slow or unreliable.
+;update_shardmap_timeout_sec = 60
+;
+; Whether to delete the source after shard splitting has
+; finished. This is mainly for debugging or for cases when extra
+; safety is needed.
+;delete_source = true
+;
+; How many time to retry a failed shard splitting job phase. For
+; example if initial copy fails, the default value will retry the
+; initial copy one extra time before failing the whole job.
+;max_retries = 1