You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by va...@apache.org on 2023/04/13 19:50:27 UTC

[couchdb] branch 3.3.x updated (d42531b6d -> 791eb7c1c)

This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a change to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git


    from d42531b6d chore(configure): be more defensive about SpiderMonkey location (#4407)
     new 453e3c52f Fix replication _scheduler/docs total_rows
     new 3dda4bd73 Allow _local doc writes to the replicator dbs
     new 96578cb18 Ensure design docs are uploaded individually when replicating with _bulk_get
     new 87ceb1b01 Bump recon to 2.5.3
     new 05659effc Upgrade hash algorithm for proxy auth (#4438)
     new 71f46314a allow configurable timeouts for _view and _search
     new 736833293 Remove json2 JS script and the try except around seal
     new 28bdca6e5 Avoid re-compiling filter view functions
     new a93b46b08 fix(doc): reverse definition of `all_nodes` and `cluster_nodes` to match reality
     new 6bc813ce5 Fix list ordering and indentation in "Search" docs (#4476)
     new 6d2ab3ea7 docs(_find): Remove redundancy from sample `_explain` response
     new 95372279e Make remsh work with quoted cookie
     new f7a9b3527 Employ `make python-black-update`
     new ae791731c Update Erlang 24 for CI
     new 791eb7c1c Bump Erlang 24 and 25 in CI

The 15 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 build-aux/Jenkinsfile.full                         |   2 +-
 build-aux/Jenkinsfile.pr                           |   2 +-
 build-aux/show-test-results.py                     |   4 +-
 dev/run                                            |   2 -
 rebar.config.script                                |   2 +-
 rel/overlay/bin/remsh                              |  19 +-
 share/server/filter.js                             |   5 -
 share/server/json2.js                              | 482 ---------------------
 share/server/loop.js                               |   5 +-
 share/server/util.js                               |  12 +-
 src/chttpd/test/eunit/chttpd_auth_tests.erl        |  87 ++++
 src/couch/src/couch_httpd_auth.erl                 |  15 +-
 src/couch_replicator/src/couch_replicator_docs.erl |   2 +
 .../src/couch_replicator_httpd_util.erl            |   8 +-
 .../src/couch_replicator_worker.erl                |  22 +-
 .../test/eunit/couch_replicator_bulk_get_tests.erl |  79 +++-
 .../couch_replicator_scheduler_docs_tests.erl      | 158 +++++++
 .../test/eunit/couch_replicator_test_helper.erl    |  10 +-
 src/docs/ext/configdomain.py                       |   2 -
 src/docs/src/api/database/find.rst                 |   1 -
 src/docs/src/api/server/authn.rst                  |  16 +-
 src/docs/src/config/auth.rst                       |  16 +-
 src/docs/src/ddocs/search.rst                      |  90 ++--
 src/docs/src/setup/cluster.rst                     |   2 +-
 src/dreyfus/src/dreyfus_fabric_search.erl          |   4 +-
 src/fabric/src/fabric_util.erl                     |   2 +-
 src/fabric/src/fabric_view_map.erl                 |   2 +-
 src/fabric/src/fabric_view_reduce.erl              |   2 +-
 src/mango/test/06-text-default-field-test.py       |   3 -
 src/mango/test/07-text-custom-field-list-test.py   |   2 -
 src/mango/test/user_docs.py                        |   2 +-
 support/build_js.escript                           |   2 -
 32 files changed, 456 insertions(+), 606 deletions(-)
 delete mode 100644 share/server/json2.js
 create mode 100644 src/couch_replicator/test/eunit/couch_replicator_scheduler_docs_tests.erl


[couchdb] 12/15: Make remsh work with quoted cookie

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 95372279e0df17e4b6ef69771a6864d55a8ac88b
Author: jiahuili <Ji...@ibm.com>
AuthorDate: Mon Mar 27 20:16:47 2023 -0500

    Make remsh work with quoted cookie
    
    Allow space and other special characters in cookies.
    
    Test:
      First set the cookie in `vm.args`, then run the script below
      e.g.: `-setcookie 'a b\n\t\xd#{}()[]$&^!-=+?|//c\\d\\\e\\\\f'`
         or `-setcookie "a b\n\t\xd#{}()[]$&^!-=+?|//c\\d\\\e\\\\f"`
    
      ```
      make release
      cd rel/couchdb
      ./bin/couchdb
      ./bin/remsh
      ```
---
 rel/overlay/bin/remsh | 19 ++++++++++++++-----
 1 file changed, 14 insertions(+), 5 deletions(-)

diff --git a/rel/overlay/bin/remsh b/rel/overlay/bin/remsh
index de37d6cc2..1804336b5 100755
--- a/rel/overlay/bin/remsh
+++ b/rel/overlay/bin/remsh
@@ -49,10 +49,14 @@ NODE="${NODE:-$DEFAULT_NODE}"
 
 # If present, extract cookie from ERL_FLAGS
 # This is used by the CouchDB Dockerfile and Helm chart
-COOKIE=$(echo "$ERL_FLAGS" | sed 's/^.*setcookie \([^ ][^ ]*\).*$/\1/g')
+COOKIE=$(echo "$ERL_FLAGS" | sed -r '
+  s/.*-setcookie[ ]*['\''](.*)['\''].*/\1/
+  s/.*-setcookie[ ]*["](.*)["].*/\1/
+  s/.*-setcookie[ ]*([^ ]*).*/\1/
+')
 if test -f "$ARGS_FILE"; then
 # else attempt to extract from vm.args
-  ARGS_FILE_COOKIE=$(awk '$1=="-setcookie"{print $2}' "$ARGS_FILE")
+  ARGS_FILE_COOKIE=$(awk '$1=="-setcookie"{st=index($0," "); print substr($0,st+1)}' "$ARGS_FILE" | tr -d \" | tr -d \')
   COOKIE="${COOKIE:-$ARGS_FILE_COOKIE}"
 fi
 
@@ -111,7 +115,12 @@ fi
 
 # If present, strip -name or -setcookie from ERL_FLAGS
 # to avoid conflicts with the cli parameters
-ERL_FLAGS_CLEAN=$(echo "$ERL_FLAGS" | sed 's/-setcookie \([^ ][^ ]*\)//g' | sed 's/-name \([^ ][^ ]*\)//g')
+ERL_FLAGS_CLEAN=$(echo "$ERL_FLAGS" | sed -r '
+  s/-setcookie[ ]*['\''].*['\'']//
+  s/-setcookie[ ]*["].*["]//
+  s/-setcookie[ ]*[^ ]*//
+  s/-name[ ]*[^ ]*//
+')
 
 if [ -z "${COOKIE}" ]; then
     echo "No Erlang cookie could be found, please specify with -c" >&2
@@ -120,11 +129,11 @@ fi
 
 if [ -z "$TLSCONF" ]; then
   exec env ERL_FLAGS="$ERL_FLAGS_CLEAN" "$BINDIR/erl" -boot "$ROOTDIR/releases/$APP_VSN/start_clean" \
-      -name remsh$$@$LHOST -remsh $NODE -hidden -setcookie $COOKIE \
+      -name remsh$$@$LHOST -remsh $NODE -hidden -setcookie "$COOKIE" \
       "$@"
 else
   exec env ERL_FLAGS="$ERL_FLAGS_CLEAN" "$BINDIR/erl" -boot "$ROOTDIR/releases/$APP_VSN/start_clean" \
-      -name remsh$$@$LHOST -remsh $NODE -hidden -setcookie $COOKIE \
+      -name remsh$$@$LHOST -remsh $NODE -hidden -setcookie "$COOKIE" \
       -proto_dist inet_tls -ssl_dist_optfile $TLSCONF \
       "$@"
 fi


[couchdb] 01/15: Fix replication _scheduler/docs total_rows

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 453e3c52fbaa898c632725ad12e607dd784af1db
Author: Jay Doane <ja...@apache.org>
AuthorDate: Mon Jan 9 21:34:29 2023 -0800

    Fix replication _scheduler/docs total_rows
    
    The total_rows property was decremented by one to account for the VDU
    that was automatically added to the total. Now that a BDU has replaced
    the VDU [1] total_rows is one less than it should be.
    
    This removes the decrement so that total_rows equals the actual doc
    count.
    
    [1] https://github.com/apache/couchdb/pull/4274
---
 .../src/couch_replicator_httpd_util.erl            |  8 +--
 .../couch_replicator_scheduler_docs_tests.erl      | 77 ++++++++++++++++++++++
 2 files changed, 78 insertions(+), 7 deletions(-)

diff --git a/src/couch_replicator/src/couch_replicator_httpd_util.erl b/src/couch_replicator/src/couch_replicator_httpd_util.erl
index ddcc179d4..17efee3b3 100644
--- a/src/couch_replicator/src/couch_replicator_httpd_util.erl
+++ b/src/couch_replicator/src/couch_replicator_httpd_util.erl
@@ -158,7 +158,7 @@ docs_cb({meta, Meta}, #vacc{meta_sent = false, row_sent = false} = Acc) ->
     Parts =
         case couch_util:get_value(total, Meta) of
             undefined -> [];
-            Total -> [io_lib:format("\"total_rows\":~p", [adjust_total(Total)])]
+            Total -> [io_lib:format("\"total_rows\":~p", [Total])]
         end ++
             case couch_util:get_value(offset, Meta) of
                 undefined -> [];
@@ -193,9 +193,3 @@ row_to_json(Row) ->
     Doc0 = couch_util:get_value(doc, Row),
     Doc1 = update_db_name(Doc0),
     ?JSON_ENCODE(Doc1).
-
-%% Adjust Total as there is an automatically created validation design doc
-adjust_total(Total) when is_integer(Total), Total > 0 ->
-    Total - 1;
-adjust_total(Total) when is_integer(Total) ->
-    0.
diff --git a/src/couch_replicator/test/eunit/couch_replicator_scheduler_docs_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_scheduler_docs_tests.erl
new file mode 100644
index 000000000..bb71bf305
--- /dev/null
+++ b/src/couch_replicator/test/eunit/couch_replicator_scheduler_docs_tests.erl
@@ -0,0 +1,77 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_replicator_scheduler_docs_tests).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+scheduler_docs_test_() ->
+    {
+        foreach,
+        fun() ->
+            Ctx = couch_replicator_test_helper:test_setup(),
+            ok = config:set("replicator", "cluster_start_period", "0", false),
+            Opts = [{q, 1}, {n, 1}, ?ADMIN_CTX],
+            case fabric:create_db(<<"_replicator">>, Opts) of
+                ok -> ok;
+                {error, file_exists} -> ok
+            end,
+            Ctx
+        end,
+        fun(Ctx) ->
+            ok = config:delete("replicator", "cluster_start_period"),
+            ok = fabric:delete_db(<<"_replicator">>, [?ADMIN_CTX]),
+            couch_replicator_test_helper:test_teardown(Ctx)
+        end,
+        [
+            ?TDEF_FE(t_scheduler_docs_total_rows, 10)
+        ]
+    }.
+
+t_scheduler_docs_total_rows({_Ctx, {Source, Target}}) ->
+    SourceUrl = couch_replicator_test_helper:cluster_db_url(Source),
+    TargetUrl = couch_replicator_test_helper:cluster_db_url(Target),
+    RepDoc = jiffy:encode(
+        {[
+            {<<"source">>, SourceUrl},
+            {<<"target">>, TargetUrl}
+        ]}
+    ),
+    RepDocUrl = couch_replicator_test_helper:cluster_db_url(
+        list_to_binary("/_replicator/" ++ ?docid())
+    ),
+    {ok, 201, _, _} = test_request:put(binary_to_list(RepDocUrl), [], RepDoc),
+    SchedulerDocsUrl =
+        couch_replicator_test_helper:cluster_db_url(<<"/_scheduler/docs">>),
+    Body = test_util:wait(
+        fun() ->
+            case test_request:get(binary_to_list(SchedulerDocsUrl), []) of
+                {ok, 200, _, JsonBody} ->
+                    Decoded = jiffy:decode(JsonBody, [return_maps]),
+                    case maps:get(<<"docs">>, Decoded) of
+                        [] ->
+                            wait;
+                        _ ->
+                            Decoded
+                    end;
+                _ ->
+                    wait
+            end
+        end,
+        10000,
+        1000
+    ),
+    Docs = maps:get(<<"docs">>, Body),
+    TotalRows = maps:get(<<"total_rows">>, Body),
+    ?assertEqual(TotalRows, length(Docs)),
+    ok.


[couchdb] 07/15: Remove json2 JS script and the try except around seal

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 7368332932c6781b27049563757a8f8601d80e13
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Thu Feb 2 11:02:40 2023 -0500

    Remove json2 JS script and the try except around seal
    
     * Remove json2 all supported runtimes have the JSON now
    
     * Remove the try except for broken seal in 1.9.x we don't support 1.9.x and it
       worked in 1.8.x and then in 60+ (at least, we don't support anything lower).
    
    1.8.5
    ```
    % js --help 2>&1 | grep 185
      185:  JavaScript 1.8.5 (default)
    % js
    js>  var a=[1,2,3]; Object.freeze(a); a[1]=100; a
    [1, 2, 3]
    ```
    
    78
    ```
    % js78
    var a=[1,2,3]; Object.freeze(a); a[1]=100; a
    [1, 2, 3]
    js>
    ```
    
    [1] https://bugzilla.mozilla.org/show_bug.cgi?id=449657#c0
---
 share/server/json2.js    | 482 -----------------------------------------------
 share/server/util.js     |   8 +-
 support/build_js.escript |   2 -
 3 files changed, 1 insertion(+), 491 deletions(-)

diff --git a/share/server/json2.js b/share/server/json2.js
deleted file mode 100644
index a1a3b170c..000000000
--- a/share/server/json2.js
+++ /dev/null
@@ -1,482 +0,0 @@
-/*
-    http://www.JSON.org/json2.js
-    2010-03-20
-
-    Public Domain.
-
-    NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
-
-    See http://www.JSON.org/js.html
-
-
-    This code should be minified before deployment.
-    See http://javascript.crockford.com/jsmin.html
-
-    USE YOUR OWN COPY. IT IS EXTREMELY UNWISE TO LOAD CODE FROM SERVERS YOU DO
-    NOT CONTROL.
-
-
-    This file creates a global JSON object containing two methods: stringify
-    and parse.
-
-        JSON.stringify(value, replacer, space)
-            value       any JavaScript value, usually an object or array.
-
-            replacer    an optional parameter that determines how object
-                        values are stringified for objects. It can be a
-                        function or an array of strings.
-
-            space       an optional parameter that specifies the indentation
-                        of nested structures. If it is omitted, the text will
-                        be packed without extra whitespace. If it is a number,
-                        it will specify the number of spaces to indent at each
-                        level. If it is a string (such as '\t' or '&nbsp;'),
-                        it contains the characters used to indent at each level.
-
-            This method produces a JSON text from a JavaScript value.
-
-            When an object value is found, if the object contains a toJSON
-            method, its toJSON method will be called and the result will be
-            stringified. A toJSON method does not serialize: it returns the
-            value represented by the name/value pair that should be serialized,
-            or undefined if nothing should be serialized. The toJSON method
-            will be passed the key associated with the value, and this will be
-            bound to the value
-
-            For example, this would serialize Dates as ISO strings.
-
-                Date.prototype.toJSON = function (key) {
-                    function f(n) {
-                        // Format integers to have at least two digits.
-                        return n < 10 ? '0' + n : n;
-                    }
-
-                    return this.getUTCFullYear()   + '-' +
-                         f(this.getUTCMonth() + 1) + '-' +
-                         f(this.getUTCDate())      + 'T' +
-                         f(this.getUTCHours())     + ':' +
-                         f(this.getUTCMinutes())   + ':' +
-                         f(this.getUTCSeconds())   + 'Z';
-                };
-
-            You can provide an optional replacer method. It will be passed the
-            key and value of each member, with this bound to the containing
-            object. The value that is returned from your method will be
-            serialized. If your method returns undefined, then the member will
-            be excluded from the serialization.
-
-            If the replacer parameter is an array of strings, then it will be
-            used to select the members to be serialized. It filters the results
-            such that only members with keys listed in the replacer array are
-            stringified.
-
-            Values that do not have JSON representations, such as undefined or
-            functions, will not be serialized. Such values in objects will be
-            dropped; in arrays they will be replaced with null. You can use
-            a replacer function to replace those with JSON values.
-            JSON.stringify(undefined) returns undefined.
-
-            The optional space parameter produces a stringification of the
-            value that is filled with line breaks and indentation to make it
-            easier to read.
-
-            If the space parameter is a non-empty string, then that string will
-            be used for indentation. If the space parameter is a number, then
-            the indentation will be that many spaces.
-
-            Example:
-
-            text = JSON.stringify(['e', {pluribus: 'unum'}]);
-            // text is '["e",{"pluribus":"unum"}]'
-
-
-            text = JSON.stringify(['e', {pluribus: 'unum'}], null, '\t');
-            // text is '[\n\t"e",\n\t{\n\t\t"pluribus": "unum"\n\t}\n]'
-
-            text = JSON.stringify([new Date()], function (key, value) {
-                return this[key] instanceof Date ?
-                    'Date(' + this[key] + ')' : value;
-            });
-            // text is '["Date(---current time---)"]'
-
-
-        JSON.parse(text, reviver)
-            This method parses a JSON text to produce an object or array.
-            It can throw a SyntaxError exception.
-
-            The optional reviver parameter is a function that can filter and
-            transform the results. It receives each of the keys and values,
-            and its return value is used instead of the original value.
-            If it returns what it received, then the structure is not modified.
-            If it returns undefined then the member is deleted.
-
-            Example:
-
-            // Parse the text. Values that look like ISO date strings will
-            // be converted to Date objects.
-
-            myData = JSON.parse(text, function (key, value) {
-                var a;
-                if (typeof value === 'string') {
-                    a =
-/^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:\.\d*)?)Z$/.exec(value);
-                    if (a) {
-                        return new Date(Date.UTC(+a[1], +a[2] - 1, +a[3], +a[4],
-                            +a[5], +a[6]));
-                    }
-                }
-                return value;
-            });
-
-            myData = JSON.parse('["Date(09/09/2001)"]', function (key, value) {
-                var d;
-                if (typeof value === 'string' &&
-                        value.slice(0, 5) === 'Date(' &&
-                        value.slice(-1) === ')') {
-                    d = new Date(value.slice(5, -1));
-                    if (d) {
-                        return d;
-                    }
-                }
-                return value;
-            });
-
-
-    This is a reference implementation. You are free to copy, modify, or
-    redistribute.
-*/
-
-/*jslint evil: true, strict: false */
-
-/*members "", "\b", "\t", "\n", "\f", "\r", "\"", JSON, "\\", apply,
-    call, charCodeAt, getUTCDate, getUTCFullYear, getUTCHours,
-    getUTCMinutes, getUTCMonth, getUTCSeconds, hasOwnProperty, join,
-    lastIndex, length, parse, prototype, push, replace, slice, stringify,
-    test, toJSON, toString, valueOf
-*/
-
-
-// Create a JSON object only if one does not already exist. We create the
-// methods in a closure to avoid creating global variables.
-
-if (!this.JSON) {
-    this.JSON = {};
-}
-
-(function () {
-
-    function f(n) {
-        // Format integers to have at least two digits.
-        return n < 10 ? '0' + n : n;
-    }
-
-    if (typeof Date.prototype.toJSON !== 'function') {
-
-        Date.prototype.toJSON = function (key) {
-
-            return isFinite(this.valueOf()) ?
-                   this.getUTCFullYear()   + '-' +
-                 f(this.getUTCMonth() + 1) + '-' +
-                 f(this.getUTCDate())      + 'T' +
-                 f(this.getUTCHours())     + ':' +
-                 f(this.getUTCMinutes())   + ':' +
-                 f(this.getUTCSeconds())   + 'Z' : null;
-        };
-
-        String.prototype.toJSON =
-        Number.prototype.toJSON =
-        Boolean.prototype.toJSON = function (key) {
-            return this.valueOf();
-        };
-    }
-
-    var cx = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
-        escapable = /[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
-        gap,
-        indent,
-        meta = {    // table of character substitutions
-            '\b': '\\b',
-            '\t': '\\t',
-            '\n': '\\n',
-            '\f': '\\f',
-            '\r': '\\r',
-            '"' : '\\"',
-            '\\': '\\\\'
-        },
-        rep;
-
-
-    function quote(string) {
-
-// If the string contains no control characters, no quote characters, and no
-// backslash characters, then we can safely slap some quotes around it.
-// Otherwise we must also replace the offending characters with safe escape
-// sequences.
-
-        escapable.lastIndex = 0;
-        return escapable.test(string) ?
-            '"' + string.replace(escapable, function (a) {
-                var c = meta[a];
-                return typeof c === 'string' ? c :
-                    '\\u' + ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
-            }) + '"' :
-            '"' + string + '"';
-    }
-
-
-    function str(key, holder) {
-
-// Produce a string from holder[key].
-
-        var i,          // The loop counter.
-            k,          // The member key.
-            v,          // The member value.
-            length,
-            mind = gap,
-            partial,
-            value = holder[key];
-
-// If the value has a toJSON method, call it to obtain a replacement value.
-
-        if (value && typeof value === 'object' &&
-                typeof value.toJSON === 'function') {
-            value = value.toJSON(key);
-        }
-
-// If we were called with a replacer function, then call the replacer to
-// obtain a replacement value.
-
-        if (typeof rep === 'function') {
-            value = rep.call(holder, key, value);
-        }
-
-// What happens next depends on the value's type.
-
-        switch (typeof value) {
-        case 'string':
-            return quote(value);
-
-        case 'number':
-
-// JSON numbers must be finite. Encode non-finite numbers as null.
-
-            return isFinite(value) ? String(value) : 'null';
-
-        case 'boolean':
-        case 'null':
-
-// If the value is a boolean or null, convert it to a string. Note:
-// typeof null does not produce 'null'. The case is included here in
-// the remote chance that this gets fixed someday.
-
-            return String(value);
-
-// If the type is 'object', we might be dealing with an object or an array or
-// null.
-
-        case 'object':
-
-// Due to a specification blunder in ECMAScript, typeof null is 'object',
-// so watch out for that case.
-
-            if (!value) {
-                return 'null';
-            }
-
-// Make an array to hold the partial results of stringifying this object value.
-
-            gap += indent;
-            partial = [];
-
-// Is the value an array?
-
-            if (Object.prototype.toString.apply(value) === '[object Array]') {
-
-// The value is an array. Stringify every element. Use null as a placeholder
-// for non-JSON values.
-
-                length = value.length;
-                for (i = 0; i < length; i += 1) {
-                    partial[i] = str(i, value) || 'null';
-                }
-
-// Join all of the elements together, separated with commas, and wrap them in
-// brackets.
-
-                v = partial.length === 0 ? '[]' :
-                    gap ? '[\n' + gap +
-                            partial.join(',\n' + gap) + '\n' +
-                                mind + ']' :
-                          '[' + partial.join(',') + ']';
-                gap = mind;
-                return v;
-            }
-
-// If the replacer is an array, use it to select the members to be stringified.
-
-            if (rep && typeof rep === 'object') {
-                length = rep.length;
-                for (i = 0; i < length; i += 1) {
-                    k = rep[i];
-                    if (typeof k === 'string') {
-                        v = str(k, value);
-                        if (v) {
-                            partial.push(quote(k) + (gap ? ': ' : ':') + v);
-                        }
-                    }
-                }
-            } else {
-
-// Otherwise, iterate through all of the keys in the object.
-
-                for (k in value) {
-                    if (Object.hasOwnProperty.call(value, k)) {
-                        v = str(k, value);
-                        if (v) {
-                            partial.push(quote(k) + (gap ? ': ' : ':') + v);
-                        }
-                    }
-                }
-            }
-
-// Join all of the member texts together, separated with commas,
-// and wrap them in braces.
-
-            v = partial.length === 0 ? '{}' :
-                gap ? '{\n' + gap + partial.join(',\n' + gap) + '\n' +
-                        mind + '}' : '{' + partial.join(',') + '}';
-            gap = mind;
-            return v;
-        }
-    }
-
-// If the JSON object does not yet have a stringify method, give it one.
-
-    if (typeof JSON.stringify !== 'function') {
-        JSON.stringify = function (value, replacer, space) {
-
-// The stringify method takes a value and an optional replacer, and an optional
-// space parameter, and returns a JSON text. The replacer can be a function
-// that can replace values, or an array of strings that will select the keys.
-// A default replacer method can be provided. Use of the space parameter can
-// produce text that is more easily readable.
-
-            var i;
-            gap = '';
-            indent = '';
-
-// If the space parameter is a number, make an indent string containing that
-// many spaces.
-
-            if (typeof space === 'number') {
-                for (i = 0; i < space; i += 1) {
-                    indent += ' ';
-                }
-
-// If the space parameter is a string, it will be used as the indent string.
-
-            } else if (typeof space === 'string') {
-                indent = space;
-            }
-
-// If there is a replacer, it must be a function or an array.
-// Otherwise, throw an error.
-
-            rep = replacer;
-            if (replacer && typeof replacer !== 'function' &&
-                    (typeof replacer !== 'object' ||
-                     typeof replacer.length !== 'number')) {
-                throw new Error('JSON.stringify');
-            }
-
-// Make a fake root object containing our value under the key of ''.
-// Return the result of stringifying the value.
-
-            return str('', {'': value});
-        };
-    }
-
-
-// If the JSON object does not yet have a parse method, give it one.
-
-    if (typeof JSON.parse !== 'function') {
-        JSON.parse = function (text, reviver) {
-
-// The parse method takes a text and an optional reviver function, and returns
-// a JavaScript value if the text is a valid JSON text.
-
-            var j;
-
-            function walk(holder, key) {
-
-// The walk method is used to recursively walk the resulting structure so
-// that modifications can be made.
-
-                var k, v, value = holder[key];
-                if (value && typeof value === 'object') {
-                    for (k in value) {
-                        if (Object.hasOwnProperty.call(value, k)) {
-                            v = walk(value, k);
-                            if (v !== undefined) {
-                                value[k] = v;
-                            } else {
-                                delete value[k];
-                            }
-                        }
-                    }
-                }
-                return reviver.call(holder, key, value);
-            }
-
-
-// Parsing happens in four stages. In the first stage, we replace certain
-// Unicode characters with escape sequences. JavaScript handles many characters
-// incorrectly, either silently deleting them, or treating them as line endings.
-
-            text = String(text);
-            cx.lastIndex = 0;
-            if (cx.test(text)) {
-                text = text.replace(cx, function (a) {
-                    return '\\u' +
-                        ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
-                });
-            }
-
-// In the second stage, we run the text against regular expressions that look
-// for non-JSON patterns. We are especially concerned with '()' and 'new'
-// because they can cause invocation, and '=' because it can cause mutation.
-// But just to be safe, we want to reject all unexpected forms.
-
-// We split the second stage into 4 regexp operations in order to work around
-// crippling inefficiencies in IE's and Safari's regexp engines. First we
-// replace the JSON backslash pairs with '@' (a non-JSON character). Second, we
-// replace all simple value tokens with ']' characters. Third, we delete all
-// open brackets that follow a colon or comma or that begin the text. Finally,
-// we look to see that the remaining characters are only whitespace or ']' or
-// ',' or ':' or '{' or '}'. If that is so, then the text is safe for eval.
-
-            if (/^[\],:{}\s]*$/.
-test(text.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g, '@').
-replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g, ']').
-replace(/(?:^|:|,)(?:\s*\[)+/g, ''))) {
-
-// In the third stage we use the eval function to compile the text into a
-// JavaScript structure. The '{' operator is subject to a syntactic ambiguity
-// in JavaScript: it can begin a block or an object literal. We wrap the text
-// in parens to eliminate the ambiguity.
-
-                j = eval('(' + text + ')');
-
-// In the optional fourth stage, we recursively walk the new structure, passing
-// each name/value pair to a reviver function for possible transformation.
-
-                return typeof reviver === 'function' ?
-                    walk({'': j}, '') : j;
-            }
-
-// If the text is not JSON parseable, then a SyntaxError is thrown.
-
-            throw new SyntaxError('JSON.parse');
-        };
-    }
-}());
diff --git a/share/server/util.js b/share/server/util.js
index f570acebd..aba56eaf2 100644
--- a/share/server/util.js
+++ b/share/server/util.js
@@ -117,13 +117,7 @@ var Couch = {
     };
   },
   recursivelySeal : function(obj) {
-    // seal() is broken in current Spidermonkey
-    try {
-      seal(obj);
-    } catch (x) {
-      // Sealing of arrays broken in some SpiderMonkey versions.
-      // https://bugzilla.mozilla.org/show_bug.cgi?id=449657
-    }
+    seal(obj);
     for (var propname in obj) {
       if (typeof obj[propname] == "object") {
         arguments.callee(obj[propname]);
diff --git a/support/build_js.escript b/support/build_js.escript
index 5ff45faed..194201a1e 100644
--- a/support/build_js.escript
+++ b/support/build_js.escript
@@ -34,7 +34,6 @@ main([]) ->
     end,
 
     JsFiles =  [
-        "share/server/json2.js",
         "share/server/dreyfus.js",
         "share/server/filter.js",
         "share/server/mimeparse.js",
@@ -47,7 +46,6 @@ main([]) ->
     ],
 
     CoffeeFiles = [
-        "share/server/json2.js",
         "share/server/dreyfus.js",
         "share/server/filter.js",
         "share/server/mimeparse.js",


[couchdb] 05/15: Upgrade hash algorithm for proxy auth (#4438)

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 05659effc7c6f080dd90e908ea693928f4f6722b
Author: Ronny Berndt <ro...@apache.org>
AuthorDate: Thu Feb 23 09:10:55 2023 +0100

    Upgrade hash algorithm for proxy auth (#4438)
    
    Proxy auth can now use one of the configured hash algorithms
    from chttpd_auth/hash_algorithms to decode authentication tokens.
---
 src/chttpd/test/eunit/chttpd_auth_tests.erl | 87 +++++++++++++++++++++++++++++
 src/couch/src/couch_httpd_auth.erl          | 15 +++--
 src/docs/src/api/server/authn.rst           | 16 +++---
 src/docs/src/config/auth.rst                | 16 ++++--
 4 files changed, 117 insertions(+), 17 deletions(-)

diff --git a/src/chttpd/test/eunit/chttpd_auth_tests.erl b/src/chttpd/test/eunit/chttpd_auth_tests.erl
index 7beda9bc7..7e7b94a12 100644
--- a/src/chttpd/test/eunit/chttpd_auth_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_auth_tests.erl
@@ -12,6 +12,9 @@
 
 -module(chttpd_auth_tests).
 
+-define(WORKING_HASHES, "sha256, sha512, sha, blake2s").
+-define(FAILING_HASHES, "md4, md5, ripemd160").
+
 -include_lib("couch/include/couch_eunit.hrl").
 -include_lib("couch/include/couch_db.hrl").
 
@@ -24,6 +27,27 @@ setup() ->
 teardown(_Url) ->
     ok.
 
+setup_proxy_auth() ->
+    {StartCtx, ProxyCfgFile} = start_couch_with_cfg("{chttpd_auth, proxy_authentication_handler}"),
+    config:set("chttpd", "require_valid_user", "false", false),
+    config:set("chttpd_auth", "hash_algorithms", ?WORKING_HASHES, false),
+    config:set("chttpd_auth", "proxy_use_secret", "true", false),
+    config:set("chttpd_auth", "secret", "the_secret", false),
+    HashesShouldWork = re:split(?WORKING_HASHES, "\\s*,\\s*", [
+        trim, {return, binary}
+    ]),
+    HashesShouldFail = re:split(?FAILING_HASHES, "\\s*,\\s*", [trim, {return, binary}]),
+    SupportedHashAlgorithms = crypto:supports(hashs),
+    {{StartCtx, ProxyCfgFile}, HashesShouldWork, HashesShouldFail, SupportedHashAlgorithms}.
+
+teardown_proxy_auth({{Ctx, ProxyCfgFile}, _, _, _}) ->
+    ok = file:delete(ProxyCfgFile),
+    config:delete("chttpd_auth", "hash_algorithms", false),
+    config:delete("chttpd_auth", "secret", false),
+    config:delete("chttpd_auth", "proxy_use_secret", false),
+    config:delete("chttpd", "require_valid_user", false),
+    test_util:stop_couch(Ctx).
+
 require_valid_user_exception_test_() ->
     {
         "_up",
@@ -43,6 +67,20 @@ require_valid_user_exception_test_() ->
         }
     }.
 
+proxy_auth_test_() ->
+    {
+        "Testing hash algorithms for proxy auth",
+        {
+            setup,
+            fun setup_proxy_auth/0,
+            fun teardown_proxy_auth/1,
+            with([
+                ?TDEF(test_hash_algorithms_with_proxy_auth_should_work),
+                ?TDEF(test_hash_algorithms_with_proxy_auth_should_fail)
+            ])
+        }
+    }.
+
 set_require_user_false() ->
     ok = config:set("chttpd", "require_valid_user", "false", _Persist = false).
 
@@ -125,3 +163,52 @@ should_handle_require_valid_user_except_up_on_non_up_routes(_Url) ->
         set_require_user_except_for_up_true(),
         ?assertThrow(ExpectAuth, chttpd_auth:party_mode_handler(NonUpRequest))
     end).
+
+% Helper functions
+base_url() ->
+    Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+    Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
+    "http://" ++ Addr ++ ":" ++ Port.
+
+append_to_cfg_chain(Cfg) ->
+    CfgDir = filename:dirname(lists:last(?CONFIG_CHAIN)),
+    CfgFile = filename:join([CfgDir, "chttpd_proxy_auth_cfg.ini"]),
+    CfgSect = io_lib:format("[chttpd]~nauthentication_handlers = ~s~n", [Cfg]),
+    ok = file:write_file(CfgFile, CfgSect),
+    ?CONFIG_CHAIN ++ [CfgFile].
+
+start_couch_with_cfg(Cfg) ->
+    CfgChain = append_to_cfg_chain(Cfg),
+    StartCtx = test_util:start_couch(CfgChain, [chttpd]),
+    ProxyCfgFile = lists:last(CfgChain),
+    {StartCtx, ProxyCfgFile}.
+
+% Test functions
+test_hash_algorithm([]) ->
+    ok;
+test_hash_algorithm([DefaultHashAlgorithm | DecodingHashAlgorithmsList] = _) ->
+    Secret = chttpd_util:get_chttpd_auth_config("secret"),
+    Token = couch_util:to_hex(couch_util:hmac(DefaultHashAlgorithm, Secret, "PROXY-USER")),
+    Headers = [
+        {"X-Auth-CouchDB-UserName", "PROXY-USER"},
+        {"X-Auth-CouchDB-Roles", "PROXY-USER-ROLE1, PROXY-USER-ROLE2"},
+        {"X-Auth-CouchDB-Token", Token}
+    ],
+    {ok, _, _, ReqBody} = test_request:get(base_url() ++ "/_session", Headers),
+    IsAuthenticatedViaProxy = couch_util:get_nested_json_value(
+        jiffy:decode(ReqBody), [<<"info">>, <<"authenticated">>]
+    ),
+    ?assertEqual(IsAuthenticatedViaProxy, <<"proxy">>),
+    test_hash_algorithm(DecodingHashAlgorithmsList).
+
+test_hash_algorithms_with_proxy_auth_should_work(
+    {_Ctx, WorkingHashes, _FailingHashes, SupportedHashAlgorithms} = _
+) ->
+    Hashes = couch_util:verify_hash_names(WorkingHashes, SupportedHashAlgorithms),
+    test_hash_algorithm(Hashes).
+
+test_hash_algorithms_with_proxy_auth_should_fail(
+    {_Ctx, _WorkingHashes, FailingHashes, SupportedHashAlgorithms} = _
+) ->
+    Hashes = couch_util:verify_hash_names(FailingHashes, SupportedHashAlgorithms),
+    ?assertThrow({not_found, _}, test_hash_algorithm(Hashes)).
diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl
index 4a7b217d1..72e0cd76f 100644
--- a/src/couch/src/couch_httpd_auth.erl
+++ b/src/couch/src/couch_httpd_auth.erl
@@ -201,18 +201,21 @@ proxy_auth_user(Req) ->
                         undefined ->
                             Req#httpd{user_ctx = #user_ctx{name = ?l2b(UserName), roles = Roles}};
                         Secret ->
-                            ExpectedToken = couch_util:to_hex(
-                                couch_util:hmac(sha, Secret, UserName)
-                            ),
-                            case header_value(Req, XHeaderToken) of
-                                Token when Token == ExpectedToken ->
+                            HashAlgorithms = couch_util:get_config_hash_algorithms(),
+                            Token = header_value(Req, XHeaderToken),
+                            VerifyTokens = fun(HashAlg) ->
+                                Hmac = couch_util:hmac(HashAlg, Secret, UserName),
+                                couch_passwords:verify(couch_util:to_hex(Hmac), Token)
+                            end,
+                            case lists:any(VerifyTokens, HashAlgorithms) of
+                                true ->
                                     Req#httpd{
                                         user_ctx = #user_ctx{
                                             name = ?l2b(UserName),
                                             roles = Roles
                                         }
                                     };
-                                _ ->
+                                false ->
                                     nil
                             end
                     end;
diff --git a/src/docs/src/api/server/authn.rst b/src/docs/src/api/server/authn.rst
index bffe0bf27..5d23ddb73 100644
--- a/src/docs/src/api/server/authn.rst
+++ b/src/docs/src/api/server/authn.rst
@@ -291,22 +291,24 @@ remotely authenticated user. By default, the client just needs to pass specific
 headers to CouchDB with related requests:
 
 - :config:option:`X-Auth-CouchDB-UserName <chttpd_auth/x_auth_username>`:
-  username;
+  username
 - :config:option:`X-Auth-CouchDB-Roles <chttpd_auth/x_auth_roles>`:
-  comma-separated (``,``) list of user roles;
+  comma-separated (``,``) list of user roles
 - :config:option:`X-Auth-CouchDB-Token <chttpd_auth/x_auth_token>`:
   authentication token. When
   :config:option:`proxy_use_secret <chttpd_auth/proxy_use_secret>`
   is set (which is strongly recommended!), this header provides an HMAC of the
   username to authenticate and the secret token to prevent requests from
-  untrusted sources. (Use the SHA1 of the username and sign with the secret)
+  untrusted sources. (Use one of the configured hash algorithms in
+  :config:option:`chttpd_auth/hash_algorithms <chttpd_auth/hash_algorithms>`
+  and sign the username with the secret)
 
 **Creating the token (example with openssl)**:
 
 .. code-block:: sh
 
-    echo -n "foo" | openssl dgst -sha1 -hmac "the_secret"
-    # (stdin)= 22047ebd7c4ec67dfbcbad7213a693249dbfbf86
+    echo -n "foo" | openssl dgst -sha256 -hmac "the_secret"
+    # (stdin)= 3f0786e96b20b0102b77f1a49c041be6977cfb3bf78c41a12adc121cd9b4e68a
 
 **Request**:
 
@@ -318,7 +320,7 @@ headers to CouchDB with related requests:
     Content-Type: application/json; charset=utf-8
     X-Auth-CouchDB-Roles: users,blogger
     X-Auth-CouchDB-UserName: foo
-    X-Auth-CouchDB-Token: 22047ebd7c4ec67dfbcbad7213a693249dbfbf86
+    X-Auth-CouchDB-Token: 3f0786e96b20b0102b77f1a49c041be6977cfb3bf78c41a12adc121cd9b4e68a
 
 **Response**:
 
@@ -351,7 +353,7 @@ headers to CouchDB with related requests:
         }
     }
 
-Note that you don't need to request :ref:`session <api/auth/session>`
+Note that you don't need to request a :ref:`session <api/auth/session>`
 to be authenticated by this method if all required HTTP headers are provided.
 
 .. _api/auth/jwt:
diff --git a/src/docs/src/config/auth.rst b/src/docs/src/config/auth.rst
index d43810054..7b9e5860f 100644
--- a/src/docs/src/config/auth.rst
+++ b/src/docs/src/config/auth.rst
@@ -196,14 +196,22 @@ Authentication Configuration
             [chttpd_auth]
             authentication_redirect = /_utils/session.html
 
-    .. config:option:: hash_algorithms :: Supported hash algorithms for cookie auth
+    .. config:option:: hash_algorithms :: Supported hash algorithms for cookie and \
+            proxy auth
 
         .. versionadded:: 3.3
 
-        Sets the HMAC hash algorithm used for cookie authentication. You can provide a
-        comma-separated list of hash algorithms. New cookie sessions or
+        .. note::
+            Until CouchDB version 3.3.1, :ref:`api/auth/proxy` used only the hash
+            algorithm ``sha1`` as validation of
+            :config:option:`X-Auth-CouchDB-Token <chttpd_auth/x_auth_token>`.
+
+        Sets the HMAC hash algorithm used for cookie and proxy authentication. You can
+        provide a comma-separated list of hash algorithms. New cookie sessions or
         session updates are calculated with the first hash algorithm. All values in the
-        list can be used to decode the cookie session. ::
+        list can be used to decode the cookie session and the token
+        :config:option:`X-Auth-CouchDB-Token <chttpd_auth/x_auth_token>` for
+        :ref:`api/auth/proxy`. ::
 
             [chttpd_auth]
             hash_algorithms = sha256, sha


[couchdb] 03/15: Ensure design docs are uploaded individually when replicating with _bulk_get

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 96578cb18a216af01d977939869e3d2bc6183f78
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Jan 11 23:51:05 2023 -0500

    Ensure design docs are uploaded individually when replicating with _bulk_get
    
    Previously, when replication jobs used _bulk_get they didn't upload design
    docs individually like they do when not using _bulk_get.
    
    Here were are preserving an already existing behavior which we had in the
    replicator without _bulk_get usage for more than 2 years. It was introduced
    here in #2426. Related to these issues #2415 and #2413.
    
    Add tests to cover both attachments and ddoc cases. meck:num_calls/3 is helpful
    as it allows to nicely assert which API function was called and how many times.
---
 .../src/couch_replicator_worker.erl                | 22 ++++--
 .../test/eunit/couch_replicator_bulk_get_tests.erl | 79 +++++++++++++++++++++-
 2 files changed, 93 insertions(+), 8 deletions(-)

diff --git a/src/couch_replicator/src/couch_replicator_worker.erl b/src/couch_replicator/src/couch_replicator_worker.erl
index d8f872388..46e4a6e94 100644
--- a/src/couch_replicator/src/couch_replicator_worker.erl
+++ b/src/couch_replicator/src/couch_replicator_worker.erl
@@ -297,17 +297,25 @@ queue_fetch_loop(#fetch_st{} = St) ->
         {changes, ChangesManager, Changes, ReportSeq} ->
             % Find missing revisions (POST to _revs_diff)
             {IdRevs, RdSt1} = find_missing(Changes, Target, Parent, RdSt),
-            {Docs, BgSt1} = bulk_get(UseBulkGet, Source, IdRevs, Parent, BgSt),
-            % Documents without attachments can be uploaded right away
-            BatchFun = fun({_, #doc{} = Doc}) ->
-                ok = gen_server:call(Parent, {batch_doc, Doc}, infinity)
+            % Filter out and handle design docs individually
+            DDocFilter = fun
+                ({<<?DESIGN_DOC_PREFIX, _/binary>>, _Rev}, _PAs) -> true;
+                ({_Id, _Rev}, _PAs) -> false
             end,
-            lists:foreach(BatchFun, lists:sort(maps:to_list(Docs))),
-            % Fetch individually if _bulk_get failed or there are attachments
+            DDocIdRevs = maps:filter(DDocFilter, IdRevs),
             FetchFun = fun({Id, Rev}, PAs) ->
                 ok = gen_server:call(Parent, {fetch_doc, {Id, [Rev], PAs}}, infinity)
             end,
-            maps:map(FetchFun, maps:without(maps:keys(Docs), IdRevs)),
+            maps:map(FetchFun, DDocIdRevs),
+            % IdRevs1 is all the docs without design docs. Bulk get those.
+            IdRevs1 = maps:without(maps:keys(DDocIdRevs), IdRevs),
+            {Docs, BgSt1} = bulk_get(UseBulkGet, Source, IdRevs1, Parent, BgSt),
+            BatchFun = fun({_, #doc{} = Doc}) ->
+                ok = gen_server:call(Parent, {batch_doc, Doc}, infinity)
+            end,
+            lists:foreach(BatchFun, lists:sort(maps:to_list(Docs))),
+            % Invidually upload docs with attachments.
+            maps:map(FetchFun, maps:without(maps:keys(Docs), IdRevs1)),
             {ok, Stats} = gen_server:call(Parent, flush, infinity),
             ok = report_seq_done(Cp, ReportSeq, Stats),
             couch_log:debug("Worker reported completion of seq ~p", [ReportSeq]),
diff --git a/src/couch_replicator/test/eunit/couch_replicator_bulk_get_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_bulk_get_tests.erl
index 2ecd0f4ee..f0d9569db 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_bulk_get_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_bulk_get_tests.erl
@@ -26,7 +26,11 @@ bulk_get_test_() ->
             fun couch_replicator_test_helper:test_teardown/1,
             [
                 ?TDEF_FE(use_bulk_get),
+                ?TDEF_FE(use_bulk_get_with_ddocs),
+                ?TDEF_FE(use_bulk_get_with_attachments),
                 ?TDEF_FE(dont_use_bulk_get),
+                ?TDEF_FE(dont_use_bulk_get_ddocs),
+                ?TDEF_FE(dont_use_bulk_get_attachments),
                 ?TDEF_FE(job_enable_overrides_global_disable),
                 ?TDEF_FE(global_disable_works)
             ]
@@ -39,7 +43,33 @@ use_bulk_get({_Ctx, {Source, Target}}) ->
     replicate(Source, Target, true),
     BulkGets = meck:num_calls(couch_replicator_api_wrap, bulk_get, 3),
     JustGets = meck:num_calls(couch_replicator_api_wrap, open_doc_revs, 6),
+    DocUpdates = meck:num_calls(couch_replicator_api_wrap, update_doc, 4),
     ?assertEqual(0, JustGets),
+    ?assertEqual(0, DocUpdates),
+    ?assert(BulkGets >= 1),
+    compare_dbs(Source, Target).
+
+use_bulk_get_with_ddocs({_Ctx, {Source, Target}}) ->
+    populate_db_ddocs(Source, ?DOC_COUNT),
+    meck:new(couch_replicator_api_wrap, [passthrough]),
+    replicate(Source, Target, true),
+    BulkGets = meck:num_calls(couch_replicator_api_wrap, bulk_get, 3),
+    JustGets = meck:num_calls(couch_replicator_api_wrap, open_doc_revs, 6),
+    DocUpdates = meck:num_calls(couch_replicator_api_wrap, update_doc, 4),
+    ?assertEqual(?DOC_COUNT, JustGets),
+    ?assertEqual(?DOC_COUNT, DocUpdates),
+    ?assert(BulkGets >= 1),
+    compare_dbs(Source, Target).
+
+use_bulk_get_with_attachments({_Ctx, {Source, Target}}) ->
+    populate_db_atts(Source, ?DOC_COUNT),
+    meck:new(couch_replicator_api_wrap, [passthrough]),
+    replicate(Source, Target, true),
+    BulkGets = meck:num_calls(couch_replicator_api_wrap, bulk_get, 3),
+    JustGets = meck:num_calls(couch_replicator_api_wrap, open_doc_revs, 6),
+    DocUpdates = meck:num_calls(couch_replicator_api_wrap, update_doc, 4),
+    ?assertEqual(?DOC_COUNT, JustGets),
+    ?assertEqual(?DOC_COUNT, DocUpdates),
     ?assert(BulkGets >= 1),
     compare_dbs(Source, Target).
 
@@ -49,10 +79,36 @@ dont_use_bulk_get({_Ctx, {Source, Target}}) ->
     replicate(Source, Target, false),
     BulkGets = meck:num_calls(couch_replicator_api_wrap, bulk_get, 3),
     JustGets = meck:num_calls(couch_replicator_api_wrap, open_doc_revs, 6),
+    DocUpdates = meck:num_calls(couch_replicator_api_wrap, update_doc, 4),
     ?assertEqual(0, BulkGets),
+    ?assertEqual(0, DocUpdates),
     ?assertEqual(?DOC_COUNT, JustGets),
     compare_dbs(Source, Target).
 
+dont_use_bulk_get_ddocs({_Ctx, {Source, Target}}) ->
+    populate_db_ddocs(Source, ?DOC_COUNT),
+    meck:new(couch_replicator_api_wrap, [passthrough]),
+    replicate(Source, Target, false),
+    BulkGets = meck:num_calls(couch_replicator_api_wrap, bulk_get, 3),
+    JustGets = meck:num_calls(couch_replicator_api_wrap, open_doc_revs, 6),
+    DocUpdates = meck:num_calls(couch_replicator_api_wrap, update_doc, 4),
+    ?assertEqual(0, BulkGets),
+    ?assertEqual(?DOC_COUNT, JustGets),
+    ?assertEqual(?DOC_COUNT, DocUpdates),
+    compare_dbs(Source, Target).
+
+dont_use_bulk_get_attachments({_Ctx, {Source, Target}}) ->
+    populate_db_atts(Source, ?DOC_COUNT),
+    meck:new(couch_replicator_api_wrap, [passthrough]),
+    replicate(Source, Target, false),
+    BulkGets = meck:num_calls(couch_replicator_api_wrap, bulk_get, 3),
+    JustGets = meck:num_calls(couch_replicator_api_wrap, open_doc_revs, 6),
+    DocUpdates = meck:num_calls(couch_replicator_api_wrap, update_doc, 4),
+    ?assertEqual(0, BulkGets),
+    ?assertEqual(?DOC_COUNT, JustGets),
+    ?assertEqual(?DOC_COUNT, DocUpdates),
+    compare_dbs(Source, Target).
+
 job_enable_overrides_global_disable({_Ctx, {Source, Target}}) ->
     populate_db(Source, ?DOC_COUNT),
     Persist = false,
@@ -78,10 +134,31 @@ global_disable_works({_Ctx, {Source, Target}}) ->
     compare_dbs(Source, Target).
 
 populate_db(DbName, DocCount) ->
-    Fun = fun(Id, Acc) -> [#doc{id = integer_to_binary(Id)} | Acc] end,
+    IdFun = fun(Id) -> integer_to_binary(Id) end,
+    Fun = fun(Id, Acc) -> [#doc{id = IdFun(Id)} | Acc] end,
+    Docs = lists:foldl(Fun, [], lists:seq(1, DocCount)),
+    {ok, _} = fabric:update_docs(DbName, Docs, [?ADMIN_CTX]).
+
+populate_db_ddocs(DbName, DocCount) ->
+    IdFun = fun(Id) -> <<"_design/", (integer_to_binary(Id))/binary>> end,
+    Fun = fun(Id, Acc) -> [#doc{id = IdFun(Id)} | Acc] end,
     Docs = lists:foldl(Fun, [], lists:seq(1, DocCount)),
     {ok, _} = fabric:update_docs(DbName, Docs, [?ADMIN_CTX]).
 
+populate_db_atts(DbName, DocCount) ->
+    IdFun = fun(Id) -> integer_to_binary(Id) end,
+    Fun = fun(Id, Acc) -> [#doc{id = IdFun(Id), atts = [att(<<"a">>)]} | Acc] end,
+    Docs = lists:foldl(Fun, [], lists:seq(1, DocCount)),
+    {ok, _} = fabric:update_docs(DbName, Docs, [?ADMIN_CTX]).
+
+att(Name) when is_binary(Name) ->
+    couch_att:new([
+        {name, Name},
+        {att_len, 1},
+        {type, <<"app/binary">>},
+        {data, <<"x">>}
+    ]).
+
 compare_dbs(Source, Target) ->
     couch_replicator_test_helper:cluster_compare_dbs(Source, Target).
 


[couchdb] 14/15: Update Erlang 24 for CI

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit ae791731ccadd16ec1fe5eafa6795726155d9bd9
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Fri Feb 24 13:25:42 2023 -0500

    Update Erlang 24 for CI
    
    Multi-arch images were updated in https://hub.docker.com/r/apache/couchdbci-debian/tags
---
 build-aux/Jenkinsfile.full | 2 +-
 build-aux/Jenkinsfile.pr   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/build-aux/Jenkinsfile.full b/build-aux/Jenkinsfile.full
index efbf1db5a..e7abc9e47 100644
--- a/build-aux/Jenkinsfile.full
+++ b/build-aux/Jenkinsfile.full
@@ -14,7 +14,7 @@
 // the License.
 
 // Erlang version embedded in binary packages
-ERLANG_VERSION = '24.3.4.7'
+ERLANG_VERSION = '24.3.4.9'
 
 // Erlang version used for rebar in release process. CouchDB will not build from
 // the release tarball on Erlang versions older than this
diff --git a/build-aux/Jenkinsfile.pr b/build-aux/Jenkinsfile.pr
index cf97c900f..4b14cc339 100644
--- a/build-aux/Jenkinsfile.pr
+++ b/build-aux/Jenkinsfile.pr
@@ -247,7 +247,7 @@ pipeline {
         axes {
           axis {
             name 'ERLANG_VERSION'
-            values '23.3.4.18', '24.3.4.7', '25.2'
+            values '23.3.4.18', '24.3.4.9', '25.2'
           }
           axis {
             name 'SM_VSN'


[couchdb] 09/15: fix(doc): reverse definition of `all_nodes` and `cluster_nodes` to match reality

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit a93b46b08c22be15644cb8545ffc3f84c1caa6cc
Author: Jan Lehnardt <ja...@apache.org>
AuthorDate: Wed Mar 22 14:21:34 2023 +0100

    fix(doc): reverse definition of `all_nodes` and `cluster_nodes` to match reality
---
 src/docs/src/setup/cluster.rst | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/docs/src/setup/cluster.rst b/src/docs/src/setup/cluster.rst
index ea6f6cd5c..186455e0f 100644
--- a/src/docs/src/setup/cluster.rst
+++ b/src/docs/src/setup/cluster.rst
@@ -277,7 +277,7 @@ should show all of the nodes in your cluster:
       ]
     }
 
-The ``all_nodes`` section is the list of *expected* nodes; the ``cluster_nodes``
+The ``cluster_nodes`` section is the list of *expected* nodes; the ``all_nodes``
 section is the list of *actually connected* nodes. Be sure the two lists match.
 
 Now your cluster is ready and available! You can send requests to any one of


[couchdb] 04/15: Bump recon to 2.5.3

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 87ceb1b01fa09974f008bddda868b264d9f31249
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Wed Jan 18 11:08:55 2023 -0500

    Bump recon to 2.5.3
    
    Changes since 2.5.2: https://github.com/ferd/recon/compare/2.5.2...2.5.3
---
 rebar.config.script | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/rebar.config.script b/rebar.config.script
index 028aabe8f..2bdb17a49 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -156,7 +156,7 @@ DepDescs = [
 {jiffy,            "jiffy",            {tag, "CouchDB-1.0.9-2"}},
 {mochiweb,         "mochiweb",         {tag, "v3.1.1"}},
 {meck,             "meck",             {tag, "0.9.2"}},
-{recon,            "recon",            {tag, "2.5.2"}}
+{recon,            "recon",            {tag, "2.5.3"}}
 ].
 
 WithProper = lists:keyfind(with_proper, 1, CouchConfig) == {with_proper, true}.


[couchdb] 15/15: Bump Erlang 24 and 25 in CI

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 791eb7c1c5d8ac945814782649d878ff98116292
Author: Nick Vatamaniuc <va...@apache.org>
AuthorDate: Wed Mar 22 00:10:48 2023 -0400

    Bump Erlang 24 and 25 in CI
    
    It fixes aliases leak and prepare for new package updates
---
 build-aux/Jenkinsfile.full | 2 +-
 build-aux/Jenkinsfile.pr   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/build-aux/Jenkinsfile.full b/build-aux/Jenkinsfile.full
index e7abc9e47..daf4e2442 100644
--- a/build-aux/Jenkinsfile.full
+++ b/build-aux/Jenkinsfile.full
@@ -14,7 +14,7 @@
 // the License.
 
 // Erlang version embedded in binary packages
-ERLANG_VERSION = '24.3.4.9'
+ERLANG_VERSION = '24.3.4.10'
 
 // Erlang version used for rebar in release process. CouchDB will not build from
 // the release tarball on Erlang versions older than this
diff --git a/build-aux/Jenkinsfile.pr b/build-aux/Jenkinsfile.pr
index 4b14cc339..4b749e1ac 100644
--- a/build-aux/Jenkinsfile.pr
+++ b/build-aux/Jenkinsfile.pr
@@ -247,7 +247,7 @@ pipeline {
         axes {
           axis {
             name 'ERLANG_VERSION'
-            values '23.3.4.18', '24.3.4.9', '25.2'
+            values '23.3.4.18', '24.3.4.10', '25.3'
           }
           axis {
             name 'SM_VSN'


[couchdb] 11/15: docs(_find): Remove redundancy from sample `_explain` response

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 6d2ab3ea77c344597445c75fd9a213d9feaf562a
Author: Gabor Pali <ga...@ibm.com>
AuthorDate: Thu Apr 6 21:49:07 2023 +0200

    docs(_find): Remove redundancy from sample `_explain` response
    
    Acciddentally in `467e14ef`, the `use_index` field got doubled in
    one of the sample responses.
---
 src/docs/src/api/database/find.rst | 1 -
 1 file changed, 1 deletion(-)

diff --git a/src/docs/src/api/database/find.rst b/src/docs/src/api/database/find.rst
index 0e511e6ad..6bd3bd963 100644
--- a/src/docs/src/api/database/find.rst
+++ b/src/docs/src/api/database/find.rst
@@ -1372,7 +1372,6 @@ it easier to take advantage of future improvements to query planning
                 }
             },
             "opts": {
-                "use_index": [],
                 "bookmark": "nil",
                 "limit": 2,
                 "skip": 0,


[couchdb] 02/15: Allow _local doc writes to the replicator dbs

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 3dda4bd73579e4da1b686f2254d629cb3aefc9eb
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Mon Feb 6 15:04:33 2023 -0500

    Allow _local doc writes to the replicator dbs
    
    During the VDU -> BDU update we inadvertently blocked _local doc writes to
    _replicator dbs. This commit rectifies that.
    
    Add tests for _local, _design, and regular malformed docs for both the default
    _replicator db as well for for the prefixed version like $db/_replicator. For
    completeness, update the _scheduler/docs counts test also test both cases.
---
 src/couch_replicator/src/couch_replicator_docs.erl |   2 +
 .../couch_replicator_scheduler_docs_tests.erl      | 159 ++++++++++++++++-----
 .../test/eunit/couch_replicator_test_helper.erl    |  10 +-
 3 files changed, 130 insertions(+), 41 deletions(-)

diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl
index 5fb86c4f5..85baa1c3e 100644
--- a/src/couch_replicator/src/couch_replicator_docs.erl
+++ b/src/couch_replicator/src/couch_replicator_docs.erl
@@ -225,6 +225,8 @@ save_rep_doc(DbName, Doc) ->
 -spec before_doc_update(#doc{}, Db :: any(), couch_db:update_type()) -> #doc{}.
 before_doc_update(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, _Db, _UpdateType) ->
     Doc;
+before_doc_update(#doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} = Doc, _Db, _UpdateType) ->
+    Doc;
 before_doc_update(#doc{} = Doc, _Db, ?REPLICATED_CHANGES) ->
     % Skip internal replicator updates
     Doc;
diff --git a/src/couch_replicator/test/eunit/couch_replicator_scheduler_docs_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_scheduler_docs_tests.erl
index bb71bf305..7d868eb11 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_scheduler_docs_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_scheduler_docs_tests.erl
@@ -15,57 +15,95 @@
 -include_lib("couch/include/couch_eunit.hrl").
 -include_lib("couch/include/couch_db.hrl").
 
-scheduler_docs_test_() ->
+-define(JSON, {"Content-Type", "application/json"}).
+
+setup_replicator_db(Prefix) ->
+    RepDb =
+        case Prefix of
+            <<>> -> <<"_replicator">>;
+            <<_/binary>> -> <<Prefix/binary, "/_replicator">>
+        end,
+    Opts = [{q, 1}, {n, 1}, ?ADMIN_CTX],
+    case fabric:create_db(RepDb, Opts) of
+        ok -> ok;
+        {error, file_exists} -> ok
+    end,
+    RepDb.
+
+setup_main_replicator_db() ->
+    {Ctx, {Source, Target}} = couch_replicator_test_helper:test_setup(),
+    RepDb = setup_replicator_db(<<>>),
+    {Ctx, {RepDb, Source, Target}}.
+
+setup_prefixed_replicator_db() ->
+    {Ctx, {Source, Target}} = couch_replicator_test_helper:test_setup(),
+    RepDb = setup_replicator_db(?tempdb()),
+    {Ctx, {RepDb, Source, Target}}.
+
+teardown({Ctx, {RepDb, Source, Target}}) ->
+    ok = fabric:delete_db(RepDb, [?ADMIN_CTX]),
+    couch_replicator_test_helper:test_teardown({Ctx, {Source, Target}}).
+
+scheduler_docs_test_main_db_test_() ->
     {
         foreach,
-        fun() ->
-            Ctx = couch_replicator_test_helper:test_setup(),
-            ok = config:set("replicator", "cluster_start_period", "0", false),
-            Opts = [{q, 1}, {n, 1}, ?ADMIN_CTX],
-            case fabric:create_db(<<"_replicator">>, Opts) of
-                ok -> ok;
-                {error, file_exists} -> ok
-            end,
-            Ctx
-        end,
-        fun(Ctx) ->
-            ok = config:delete("replicator", "cluster_start_period"),
-            ok = fabric:delete_db(<<"_replicator">>, [?ADMIN_CTX]),
-            couch_replicator_test_helper:test_teardown(Ctx)
-        end,
+        fun setup_main_replicator_db/0,
+        fun teardown/1,
+        [
+            ?TDEF_FE(t_scheduler_docs_total_rows, 10)
+        ]
+    }.
+
+scheduler_docs_test_prefixed_db_test_() ->
+    {
+        foreach,
+        fun setup_prefixed_replicator_db/0,
+        fun teardown/1,
         [
             ?TDEF_FE(t_scheduler_docs_total_rows, 10)
         ]
     }.
 
-t_scheduler_docs_total_rows({_Ctx, {Source, Target}}) ->
+replicator_bdu_test_main_db_test_() ->
+    {
+        setup,
+        fun setup_prefixed_replicator_db/0,
+        fun teardown/1,
+        with([
+            ?TDEF(t_local_docs_can_be_written),
+            ?TDEF(t_design_docs_can_be_written),
+            ?TDEF(t_malformed_docs_are_rejected)
+        ])
+    }.
+
+replicator_bdu_test_prefixed_db_test_() ->
+    {
+        setup,
+        fun setup_prefixed_replicator_db/0,
+        fun teardown/1,
+        with([
+            ?TDEF(t_local_docs_can_be_written),
+            ?TDEF(t_design_docs_can_be_written),
+            ?TDEF(t_malformed_docs_are_rejected)
+        ])
+    }.
+
+t_scheduler_docs_total_rows({_Ctx, {RepDb, Source, Target}}) ->
     SourceUrl = couch_replicator_test_helper:cluster_db_url(Source),
     TargetUrl = couch_replicator_test_helper:cluster_db_url(Target),
-    RepDoc = jiffy:encode(
-        {[
-            {<<"source">>, SourceUrl},
-            {<<"target">>, TargetUrl}
-        ]}
-    ),
-    RepDocUrl = couch_replicator_test_helper:cluster_db_url(
-        list_to_binary("/_replicator/" ++ ?docid())
-    ),
-    {ok, 201, _, _} = test_request:put(binary_to_list(RepDocUrl), [], RepDoc),
+    RepDoc = #{<<"source">> => SourceUrl, <<"target">> => TargetUrl},
+    RepDocUrl = rep_doc_url(RepDb, ?docid()),
+    {201, _} = req(put, RepDocUrl, RepDoc),
     SchedulerDocsUrl =
-        couch_replicator_test_helper:cluster_db_url(<<"/_scheduler/docs">>),
+        case RepDb of
+            <<"_replicator">> -> url(<<"/_scheduler/docs">>);
+            <<_/binary>> -> url(<<"/_scheduler/docs/", RepDb/binary>>)
+        end,
     Body = test_util:wait(
         fun() ->
-            case test_request:get(binary_to_list(SchedulerDocsUrl), []) of
-                {ok, 200, _, JsonBody} ->
-                    Decoded = jiffy:decode(JsonBody, [return_maps]),
-                    case maps:get(<<"docs">>, Decoded) of
-                        [] ->
-                            wait;
-                        _ ->
-                            Decoded
-                    end;
-                _ ->
-                    wait
+            case req(get, SchedulerDocsUrl) of
+                {200, #{<<"docs">> := [_ | _]} = Decoded} -> Decoded;
+                {_, #{}} -> wait
             end
         end,
         10000,
@@ -75,3 +113,46 @@ t_scheduler_docs_total_rows({_Ctx, {Source, Target}}) ->
     TotalRows = maps:get(<<"total_rows">>, Body),
     ?assertEqual(TotalRows, length(Docs)),
     ok.
+
+t_local_docs_can_be_written({_Ctx, {RepDb, _, _}}) ->
+    DocUrl1 = rep_doc_url(RepDb, <<"_local/doc1">>),
+    ?assertMatch({201, _}, req(put, DocUrl1, #{})),
+    DocUrl2 = rep_doc_url(RepDb, <<"_local/doc2">>),
+    ?assertMatch({201, _}, req(put, DocUrl2, #{<<"foo">> => <<"bar">>})).
+
+t_design_docs_can_be_written({_Ctx, {RepDb, _, _}}) ->
+    DocUrl1 = rep_doc_url(RepDb, <<"_design/ddoc1">>),
+    ?assertMatch({201, _}, req(put, DocUrl1, #{})),
+    DocUrl2 = rep_doc_url(RepDb, <<"_design/ddoc2">>),
+    ?assertMatch({201, _}, req(put, DocUrl2, #{<<"foo">> => <<"bar">>})).
+
+t_malformed_docs_are_rejected({_Ctx, {RepDb, _, _}}) ->
+    % couch_replicator_parse holds most of the BDU validation logic
+    % Here we just test that the BDU works with a few basic cases
+    DocUrl1 = rep_doc_url(RepDb, <<"rep1">>),
+    ?assertMatch({403, _}, req(put, DocUrl1, #{})),
+    DocUrl2 = rep_doc_url(RepDb, <<"rep2">>),
+    ?assertMatch({403, _}, req(put, DocUrl2, #{<<"foo">> => <<"bar">>})).
+
+rep_doc_url(RepDb, DocId) when is_binary(RepDb) ->
+    rep_doc_url(binary_to_list(RepDb), DocId);
+rep_doc_url(RepDb, DocId) when is_binary(DocId) ->
+    rep_doc_url(RepDb, binary_to_list(DocId));
+rep_doc_url(RepDb, DocId) when is_list(RepDb), is_list(DocId) ->
+    UrlQuotedRepDb = mochiweb_util:quote_plus(RepDb),
+    url(UrlQuotedRepDb ++ "/" ++ DocId).
+
+url(UrlPath) ->
+    binary_to_list(couch_replicator_test_helper:cluster_db_url(UrlPath)).
+
+req(Method, Url) ->
+    Headers = [?JSON],
+    {ok, Code, _, Res} = test_request:request(Method, Url, Headers),
+    {Code, jiffy:decode(Res, [return_maps])}.
+
+req(Method, Url, #{} = Body) ->
+    req(Method, Url, jiffy:encode(Body));
+req(Method, Url, Body) ->
+    Headers = [?JSON],
+    {ok, Code, _, Res} = test_request:request(Method, Url, Headers, Body),
+    {Code, jiffy:decode(Res, [return_maps])}.
diff --git a/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl b/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl
index e28b4a28b..03c2f13be 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl
@@ -154,6 +154,8 @@ cluster_url() ->
     Args = [?USERNAME, ?PASSWORD, Addr, Port],
     ?l2b(io_lib:format(Fmt, Args)).
 
+cluster_db_url(Path) when is_list(Path) ->
+    cluster_db_url(list_to_binary(Path));
 cluster_db_url(<<"/", _/binary>> = Path) ->
     <<(cluster_url())/binary, Path/binary>>;
 cluster_db_url(Path) ->
@@ -200,7 +202,9 @@ teardown_db(DbName) ->
 test_setup() ->
     Ctx = test_util:start_couch([fabric, mem3, chttpd, couch_replicator]),
     Hashed = couch_passwords:hash_admin_password(?PASSWORD),
-    ok = config:set("admins", ?USERNAME, ?b2l(Hashed), _Persist = false),
+    Persist = false,
+    ok = config:set("admins", ?USERNAME, ?b2l(Hashed), Persist),
+    ok = config:set("replicator", "cluster_start_period", "0", Persist),
     Source = setup_db(),
     Target = setup_db(),
     {Ctx, {Source, Target}}.
@@ -209,5 +213,7 @@ test_teardown({Ctx, {Source, Target}}) ->
     meck:unload(),
     teardown_db(Source),
     teardown_db(Target),
-    config:delete("admins", ?USERNAME, _Persist = false),
+    Persist = false,
+    config:delete("admins", ?USERNAME, Persist),
+    config:delete("replicator", "cluster_start_period", Persist),
     ok = test_util:stop_couch(Ctx).


[couchdb] 10/15: Fix list ordering and indentation in "Search" docs (#4476)

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 6bc813ce58fe8231238af5b94686045389a9bcda
Author: Ronny Berndt <ro...@apache.org>
AuthorDate: Mon Mar 13 23:22:48 2023 +0100

    Fix list ordering and indentation in "Search" docs (#4476)
---
 src/docs/src/ddocs/search.rst | 90 +++++++++++++++++++++----------------------
 1 file changed, 44 insertions(+), 46 deletions(-)

diff --git a/src/docs/src/ddocs/search.rst b/src/docs/src/ddocs/search.rst
index 42e0d9f22..98409f527 100644
--- a/src/docs/src/ddocs/search.rst
+++ b/src/docs/src/ddocs/search.rst
@@ -31,14 +31,12 @@ To create a search index, you add a JavaScript function to a design document in
 database. An index builds after processing one search request or after the server detects
 a document update. The ``index`` function takes the following parameters:
 
-1.  Field name - The name of the field you want to use when you query the index. If you
-set this parameter to ``default``, then this field is queried if no field is specified in
-the query syntax.
-
-2.  Data that you want to index, for example, ``doc.address.country``.
-
-3.  (Optional) The third parameter includes the following fields: ``boost``, ``facet``,
-``index``, and ``store``. These fields are described in more detail later.
+#. Field name - The name of the field you want to use when you query the index.
+   If you set this parameter to ``default``, then this field is queried if no field is
+   specified in the query syntax.
+#. Data that you want to index, for example, ``doc.address.country``.
+#. (Optional) The third parameter includes the following fields: ``boost``, ``facet``,
+   ``index``, and ``store``. These fields are described in more detail later.
 
 By default, a search index response returns 25 rows. The number of rows that is returned
 can be changed by using the ``limit`` parameter. Each response includes a ``bookmark``
@@ -96,64 +94,64 @@ in the ``index`` field to index that data.
 
 The ``index`` function takes three parameters, where the third parameter is optional.
 
-The first parameter is the name of the field you intend to use when querying the index,
-and which is specified in the Lucene syntax portion of subsequent queries.
-An example appears in the following query:
+#. The first parameter is the name of the field you intend to use when querying the index,
+   and which is specified in the Lucene syntax portion of subsequent queries.
+   An example appears in the following query:
 
-.. code-block:: javascript
+   .. code-block:: javascript
 
-    query=color:red
+        query=color:red
 
-The Lucene field name ``color`` is the first parameter of the ``index`` function.
+   The Lucene field name ``color`` is the first parameter of the ``index`` function.
 
-The ``query`` parameter can be abbreviated to ``q``,
-so another way of writing the query is as follows:
+   The ``query`` parameter can be abbreviated to ``q``,
+   so another way of writing the query is as follows:
 
-.. code-block:: javascript
+   .. code-block:: javascript
 
-    q=color:red
+       q=color:red
 
-If the special value ``"default"`` is used when you define the name,
-you do not have to specify a field name at query time.
-The effect is that the query can be simplified:
+   If the special value ``"default"`` is used when you define the name,
+   you do not have to specify a field name at query time.
+   The effect is that the query can be simplified:
 
-.. code-block:: javascript
+   .. code-block:: javascript
 
-    query=red
+       query=red
 
-The second parameter is the data to be indexed. Keep the following information
-in mind when you index your data:
+#. The second parameter is the data to be indexed. Keep the following information
+   in mind when you index your data:
 
-- This data must be only a string, number, or boolean. Other types will cause
-  an error to be thrown by the index function call.
+   - This data must be only a string, number, or boolean. Other types will cause
+     an error to be thrown by the index function call.
 
-- If an error is thrown when running your function, for this reason or others,
-  the document will not be added to that search index.
+   - If an error is thrown when running your function, for this reason or others,
+     the document will not be added to that search index.
 
-The third, optional, parameter is a JavaScript object with the following fields:
+#. The third, optional, parameter is a JavaScript object with the following fields:
 
-*Index function (optional parameter)*
+   *Index function (optional parameter)*
 
-* **boost** - A number that specifies the relevance in search results. Content that is
-  indexed with a boost value greater than 1 is more relevant than content that is
-  indexed without a boost value. Content with a boost value less than one is not so
-  relevant. Value is a positive floating point number. Default is 1 (no boosting).
+   * **boost** - A number that specifies the relevance in search results. Content that is
+     indexed with a boost value greater than 1 is more relevant than content that is
+     indexed without a boost value. Content with a boost value less than one is not so
+     relevant. Value is a positive floating point number. Default is 1 (no boosting).
 
-* **facet** - Creates a faceted index. See :ref:`Faceting <ddoc/search/faceting>`.
-  Values are ``true`` or ``false``. Default is ``false``.
+   * **facet** - Creates a faceted index. See :ref:`Faceting <ddoc/search/faceting>`.
+     Values are ``true`` or ``false``. Default is ``false``.
 
-* **index** - Whether the data is indexed, and if so, how. If set to ``false``, the data
-  cannot be used for searches, but can still be retrieved from the index if ``store`` is
-  set to ``true``. See :ref:`Analyzers <ddoc/search/analyzers>`.
-  Values are ``true`` or ``false``. Default is ``true``
+   * **index** - Whether the data is indexed, and if so, how. If set to ``false``, the
+     data cannot be used for searches, but can still be retrieved from the index if
+     ``store`` is set to ``true``. See :ref:`Analyzers <ddoc/search/analyzers>`.
+     Values are ``true`` or ``false``. Default is ``true``
 
-* **store** - If ``true``, the value is returned in the search result; otherwise,
-  the value is not returned. Values are ``true`` or ``false``. Default is ``false``.
+   * **store** - If ``true``, the value is returned in the search result; otherwise,
+     the value is not returned. Values are ``true`` or ``false``. Default is ``false``.
 
-.. note::
+   .. note::
 
-    If you do not set the ``store`` parameter,
-    the index data results for the document are not returned in response to a query.
+       If you do not set the ``store`` parameter,
+       the index data results for the document are not returned in response to a query.
 
 *Example search index function:*
 


[couchdb] 13/15: Employ `make python-black-update`

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit f7a9b35275c8d13d4e1f8994dad9c07c7efdb64f
Author: Gabor Pali <ga...@ibm.com>
AuthorDate: Thu Feb 2 17:31:45 2023 +0100

    Employ `make python-black-update`
---
 build-aux/show-test-results.py                   | 4 ++--
 dev/run                                          | 2 --
 src/docs/ext/configdomain.py                     | 2 --
 src/mango/test/06-text-default-field-test.py     | 3 ---
 src/mango/test/07-text-custom-field-list-test.py | 2 --
 src/mango/test/user_docs.py                      | 2 +-
 6 files changed, 3 insertions(+), 12 deletions(-)

diff --git a/build-aux/show-test-results.py b/build-aux/show-test-results.py
index edd6ca13f..ca28a3ee8 100755
--- a/build-aux/show-test-results.py
+++ b/build-aux/show-test-results.py
@@ -18,7 +18,7 @@ TEST_COLLECTIONS = {
 
 def _attrs(elem):
     ret = {}
-    for (k, v) in elem.attributes.items():
+    for k, v in elem.attributes.items():
         ret[k.lower()] = v
     return ret
 
@@ -381,7 +381,7 @@ def main():
         args.collection = ["eunit", "exunit", "mango", "javascript"]
 
     collections = []
-    for (name, pattern) in TEST_COLLECTIONS.items():
+    for name, pattern in TEST_COLLECTIONS.items():
         if name.lower() not in args.collection:
             continue
         collections.append(TestCollection(name, pattern))
diff --git a/dev/run b/dev/run
index 52db65255..df1a0b105 100755
--- a/dev/run
+++ b/dev/run
@@ -452,7 +452,6 @@ def boot_haproxy(ctx):
 
 
 def hack_default_ini(ctx, node, contents):
-
     contents = re.sub(
         "^\[httpd\]$",
         "[httpd]\nenable = true",
@@ -594,7 +593,6 @@ def check_node_alive(url):
 
 
 def set_boot_env(ctx):
-
     # fudge fauxton path
     if os.path.exists("src/fauxton/dist/release"):
         fauxton_root = "src/fauxton/dist/release"
diff --git a/src/docs/ext/configdomain.py b/src/docs/ext/configdomain.py
index 983ac44b0..671726e22 100644
--- a/src/docs/ext/configdomain.py
+++ b/src/docs/ext/configdomain.py
@@ -56,7 +56,6 @@ class ConfigObject(ObjectDescription):
 
 
 class ConfigIndex(Index):
-
     name = "ref"
     localname = "Configuration Quick Reference"
     shortname = "Config Quick Reference"
@@ -79,7 +78,6 @@ class ConfigIndex(Index):
 
 
 class ConfigDomain(Domain):
-
     name = "config"
     label = "CONFIG"
 
diff --git a/src/mango/test/06-text-default-field-test.py b/src/mango/test/06-text-default-field-test.py
index 7fdbd747d..1e88967f2 100644
--- a/src/mango/test/06-text-default-field-test.py
+++ b/src/mango/test/06-text-default-field-test.py
@@ -16,7 +16,6 @@ import unittest
 
 @unittest.skipUnless(mango.has_text_service(), "requires text service")
 class NoDefaultFieldTest(mango.UserDocsTextTests):
-
     DEFAULT_FIELD = False
 
     def test_basic(self):
@@ -32,7 +31,6 @@ class NoDefaultFieldTest(mango.UserDocsTextTests):
 
 @unittest.skipUnless(mango.has_text_service(), "requires text service")
 class NoDefaultFieldWithAnalyzer(mango.UserDocsTextTests):
-
     DEFAULT_FIELD = {"enabled": False, "analyzer": "keyword"}
 
     def test_basic(self):
@@ -47,7 +45,6 @@ class NoDefaultFieldWithAnalyzer(mango.UserDocsTextTests):
 
 @unittest.skipUnless(mango.has_text_service(), "requires text service")
 class DefaultFieldWithCustomAnalyzer(mango.UserDocsTextTests):
-
     DEFAULT_FIELD = {"enabled": True, "analyzer": "keyword"}
 
     def test_basic(self):
diff --git a/src/mango/test/07-text-custom-field-list-test.py b/src/mango/test/07-text-custom-field-list-test.py
index 8514111c4..36b23a7f6 100644
--- a/src/mango/test/07-text-custom-field-list-test.py
+++ b/src/mango/test/07-text-custom-field-list-test.py
@@ -17,7 +17,6 @@ import user_docs
 
 @unittest.skipUnless(mango.has_text_service(), "requires text service")
 class CustomFieldsTest(mango.UserDocsTextTests):
-
     FIELDS = [
         {"name": "favorites.[]", "type": "string"},
         {"name": "manager", "type": "boolean"},
@@ -163,7 +162,6 @@ class CustomFieldsTest(mango.UserDocsTextTests):
 
 @unittest.skipUnless(mango.has_text_service(), "requires text service")
 class CustomFieldsExistsTest(mango.UserDocsTextTests):
-
     FIELDS = [
         {"name": "exists_field", "type": "string"},
         {"name": "exists_array.[]", "type": "string"},
diff --git a/src/mango/test/user_docs.py b/src/mango/test/user_docs.py
index 8f0ed2e04..4f06e0342 100644
--- a/src/mango/test/user_docs.py
+++ b/src/mango/test/user_docs.py
@@ -89,7 +89,7 @@ def add_view_indexes(db, kwargs):
         (["twitter"], "twitter"),
         (["ordered"], "ordered"),
     ]
-    for (idx, name) in indexes:
+    for idx, name in indexes:
         assert db.create_index(idx, name=name, ddoc=name) is True
 
 


[couchdb] 08/15: Avoid re-compiling filter view functions

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 28bdca6e56619eee507461545103fbcce118e1ce
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Mon Mar 13 12:46:57 2023 -0400

    Avoid re-compiling filter view functions
    
    Filter view functions feature re-uses view map function for filtering _changes
    feeds. Instead of accumulating emitted KVs, it uses custom emit() function
    which just toggles a flag. However, in order to use this optimisation, the
    function is compiled first with the regular emit function, then the function
    source is queried with a non-portable toSource() method, and re-compiled again
    with a new sandbox where emit is overridden.
    
    Instead of reparsing and re-compiling it, pass the sandbox to the compile
    function and compile filter views with that correct sandbox to start with.
    Moreover, this helps remove another non-portable function call.
---
 share/server/filter.js | 5 -----
 share/server/loop.js   | 5 ++++-
 share/server/util.js   | 4 ++--
 3 files changed, 6 insertions(+), 8 deletions(-)

diff --git a/share/server/filter.js b/share/server/filter.js
index 84f5cfc09..e3a62ab26 100644
--- a/share/server/filter.js
+++ b/share/server/filter.js
@@ -28,11 +28,6 @@ var Filter = (function() {
         respond([true, results]);
       },
       filter_view : function(fun, ddoc, args) {
-        // recompile
-        var sandbox = create_filter_sandbox();
-        var source = fun.toSource();
-        fun = evalcx(source, sandbox);
-
         var results = [];
         var docs = args[0];
         for (var i=0; i < docs.length; i++) {
diff --git a/share/server/loop.js b/share/server/loop.js
index 91dd1d6b0..70a143a45 100644
--- a/share/server/loop.js
+++ b/share/server/loop.js
@@ -87,7 +87,10 @@ var DDoc = (function() {
                        " on design doc " + ddocId]);
               }
               if (typeof fun != "function") {
-                fun = Couch.compileFunction(fun, ddoc, funPath.join('.'));
+                // For filter_view we want the emit() function to be overridden
+                // and just toggle a flag instead of accumulating rows
+                var sandbox = (cmd === "views") ? create_filter_sandbox() : create_sandbox();
+                fun = Couch.compileFunction(fun, ddoc, funPath.join('.'), sandbox);
                 // cache the compiled fun on the ddoc
                 point[funPath[i]] = fun;
               };
diff --git a/share/server/util.js b/share/server/util.js
index aba56eaf2..c207d0ab9 100644
--- a/share/server/util.js
+++ b/share/server/util.js
@@ -58,11 +58,11 @@ var resolveModule = function(names, mod, root) {
 
 var Couch = {
   // moving this away from global so we can move to json2.js later
-  compileFunction : function(source, ddoc, name) {
+  compileFunction : function(source, ddoc, name, sandbox) {
     if (!source) throw(["error","not_found","missing function"]);
 
     var functionObject = null;
-    var sandbox = create_sandbox();
+    var sandbox = sandbox || create_sandbox();
 
     var require = function(name, module) {
       module = module || {};


[couchdb] 06/15: allow configurable timeouts for _view and _search

Posted by va...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 3.3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 71f46314a4377b0fafa5ac933e8225e1052af8fa
Author: Robert Newson <rn...@apache.org>
AuthorDate: Wed Apr 5 18:46:58 2023 +0100

    allow configurable timeouts for _view and _search
---
 src/dreyfus/src/dreyfus_fabric_search.erl | 4 ++--
 src/fabric/src/fabric_util.erl            | 2 +-
 src/fabric/src/fabric_view_map.erl        | 2 +-
 src/fabric/src/fabric_view_reduce.erl     | 2 +-
 4 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/src/dreyfus/src/dreyfus_fabric_search.erl b/src/dreyfus/src/dreyfus_fabric_search.erl
index 7e78e5fc3..bb104892b 100644
--- a/src/dreyfus/src/dreyfus_fabric_search.erl
+++ b/src/dreyfus/src/dreyfus_fabric_search.erl
@@ -120,8 +120,8 @@ go(DbName, DDoc, IndexName, QueryArgs, Counters, Bookmark, RingOpts) ->
             #shard.ref,
             fun handle_message/3,
             State,
-            infinity,
-            1000 * 60 * 60
+            fabric_util:timeout("search", "infinity"),
+            fabric_util:timeout("search_permsg", "3600000")
         )
     of
         {ok, Result} ->
diff --git a/src/fabric/src/fabric_util.erl b/src/fabric/src/fabric_util.erl
index fbba5bdf8..9f9e76234 100644
--- a/src/fabric/src/fabric_util.erl
+++ b/src/fabric/src/fabric_util.erl
@@ -25,7 +25,7 @@
     remove_down_workers/2, remove_down_workers/3,
     doc_id_and_rev/1
 ]).
--export([request_timeout/0, attachments_timeout/0, all_docs_timeout/0, view_timeout/1]).
+-export([request_timeout/0, attachments_timeout/0, all_docs_timeout/0, view_timeout/1, timeout/2]).
 -export([log_timeout/2, remove_done_workers/2]).
 -export([is_users_db/1, is_replicator_db/1]).
 -export([open_cluster_db/1, open_cluster_db/2]).
diff --git a/src/fabric/src/fabric_view_map.erl b/src/fabric/src/fabric_view_map.erl
index 104086d67..7b3b75863 100644
--- a/src/fabric/src/fabric_view_map.erl
+++ b/src/fabric/src/fabric_view_map.erl
@@ -106,7 +106,7 @@ go(DbName, Workers, {map, View, _}, Args, Callback, Acc0) ->
             fun handle_message/3,
             State,
             fabric_util:view_timeout(Args),
-            1000 * 60 * 60
+            fabric_util:timeout("view_permsg", "3600000")
         )
     of
         {ok, NewState} ->
diff --git a/src/fabric/src/fabric_view_reduce.erl b/src/fabric/src/fabric_view_reduce.erl
index 600c8d01a..90fa523a1 100644
--- a/src/fabric/src/fabric_view_reduce.erl
+++ b/src/fabric/src/fabric_view_reduce.erl
@@ -105,7 +105,7 @@ go2(DbName, Workers, {red, {_, Lang, View}, _} = VInfo, Args, Callback, Acc0) ->
             fun handle_message/3,
             State,
             fabric_util:view_timeout(Args),
-            1000 * 60 * 60
+            fabric_util:timeout("view_permsg", "3600000")
         )
     of
         {ok, NewState} ->