You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by va...@apache.org on 2020/09/15 20:14:11 UTC

[couchdb] 14/16: Update and cleanup default.ini replicator entries

This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit b718d3380cbc8dc5799f36e19e1a94b8c2e00c2d
Author: Nick Vatamaniuc <va...@apache.org>
AuthorDate: Fri Aug 28 04:36:05 2020 -0400

    Update and cleanup default.ini replicator entries
    
    Update settings with defaults. Also comment out values which are already set to
    default in the code.
---
 rel/overlay/etc/default.ini | 78 ++++++++++++++++++++++++++++++++++-----------
 1 file changed, 59 insertions(+), 19 deletions(-)

diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index b837082..712150b 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -436,55 +436,99 @@ compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to d
 compressible_types = text/*, application/javascript, application/json, application/xml
 
 [replicator]
-; Random jitter applied on replication job startup (milliseconds)
-startup_jitter = 5000
-; Number of actively running replications
-max_jobs = 500
-;Scheduling interval in milliseconds. During each reschedule cycle
-interval = 60000
+; Number of actively running replications per replication backend node
+;max_jobs = 500
+
+; Scheduling interval in seconds
+;interval_sec = 15
+
 ; Maximum number of replications to start and stop during rescheduling.
-max_churn = 20
+;max_churn = 100
+
+; Max number of acceptors running per node. If they are available job slots
+; left then up to these many acceptors are kept open.
+;max_acceptors = 2
+
+; The amount of jitter (in milliseconds) to apply to replication job acceptors.
+; This will allow multiple acceptors to avoid generating too many transaction
+; conflicts on busy clusters.
+;accept_jitter = 2000
+
+; Minimum time in seconds replication jobs will be left running before being
+; rotated when all the schedule slots are filled. This migth be useful if
+; max_jobs is very low, but jobs should be left running long enough to make at
+; least some progress before being replaced
+;min_run_time_sec = 60
+
+; Health threshold is the minimum amount of time an unhealthy job should run
+; crashing before it is considered to be healthy again.
+;health_threshold_sec = 120
+
+; These are applied when jobs are pentalized after repeatedly crashing. On
+; first error the minimum value is applied. Then the penalty is doubled, but
+; only up to the maximum value.
+;min_backoff_penalty_sec = 32
+;max_backoff_penalty_sec = 172800
+
+; How many per-job history events to keep
+;max_history = 10
+
 ; More worker processes can give higher network throughput but can also
 ; imply more disk and network IO.
-worker_processes = 4
+;worker_processes = 4
+
 ; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
 ; also reduce the total amount of used RAM memory.
-worker_batch_size = 500
+;worker_batch_size = 500
+
 ; Maximum number of HTTP connections per replication.
-http_connections = 20
+;http_connections = 20
+
 ; HTTP connection timeout per replication.
 ; Even for very fast/reliable networks it might need to be increased if a remote
 ; database is too busy.
-connection_timeout = 30000
+;connection_timeout = 30000
+
 ; Request timeout
 ;request_timeout = infinity
 ; If a request fails, the replicator will retry it up to N times.
-retries_per_request = 5
+;retries_per_request = 5
+
 ; Use checkpoints
 ;use_checkpoints = true
+
 ; Checkpoint interval
 ;checkpoint_interval = 30000
+
 ; Some socket options that might boost performance in some scenarios:
 ;       {nodelay, boolean()}
 ;       {sndbuf, integer()}
 ;       {recbuf, integer()}
 ;       {priority, integer()}
 ; See the `inet` Erlang module's man page for the full list of options.
-socket_options = [{keepalive, true}, {nodelay, false}]
+;socket_options = [{keepalive, true}, {nodelay, false}]
+
 ; Path to a file containing the user's certificate.
 ;cert_file = /full/path/to/server_cert.pem
+
 ; Path to file containing user's private PEM encoded key.
 ;key_file = /full/path/to/server_key.pem
+
 ; String containing the user's password. Only used if the private keyfile is password protected.
 ;password = somepassword
+
 ; Set to true to validate peer certificates.
-verify_ssl_certificates = false
+;verify_ssl_certificates = false
+
 ; File containing a list of peer trusted certificates (in the PEM format).
 ;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
+
 ; Maximum peer certificate depth (must be set even if certificate validation is off).
-ssl_certificate_max_depth = 3
+;ssl_certificate_max_depth = 3
+
 ; Maximum document ID length for replication.
 ;max_document_id_length = infinity
+
 ; How much time to wait before retrying after a missing doc exception. This
 ; exception happens if the document was seen in the changes feed, but internal
 ; replication hasn't caught up yet, and fetching document's revisions
@@ -494,10 +538,6 @@ ssl_certificate_max_depth = 3
 ; avoid crashing the whole replication job, which would consume more resources
 ; and add log noise.
 ;missing_doc_retry_msec = 2000
-; Wait this many seconds after startup before attaching changes listeners
-; cluster_start_period = 5
-; Re-check cluster state at least every cluster_quiet_period seconds
-; cluster_quiet_period = 60
 
 ; List of replicator client authentication plugins to try. Plugins will be
 ; tried in order. The first to initialize successfully will be used for that