You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by da...@apache.org on 2019/12/25 17:45:34 UTC

[couchdb] branch speedup-test-suite updated (2433ff9 -> 38a85fd)

This is an automated email from the ASF dual-hosted git repository.

davisp pushed a change to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git.


 discard 2433ff9  Speedup eunit: smoosh_server
 discard 68384e4  Speedup eunit: mem3_sync_event_listener
 discard 940a7e0  Speedup eunit: mem3_shards
 discard b7cb0a1  Speedup eunit: mem3_rep
 discard 59eaef0  Speedup eunit: mango_idx_test
 discard 081c3f3  Speedup eunit: fabric_doc_update
 discard 4555ab5  Speedup eunit: fabric_doc_purge
 discard 6e9c554  Speedup eunit: fabric_doc_open
 discard 0365f8b  Speedup eunit: fabric_db_create
 discard 74e2a95  Speedup eunit: ddoc_cache_no_cache_test
 discard 44a1e06  Speedup eunit: couch_replicator_scheduler
 discard cb455cd  Speedup eunit: couch_replicator_clustering
 discard 6c4e9ae  Speedup eunit: couch_replicator_auth_session
 discard 822f163  Speedup eunit: couch_replicator
 discard 7d11ba7  Speedup eunit: couch_mrview_purge_docs_fabric_tests
 discard 337e848  Speedup eunit: couch_mrview_compactor
 discard c4c9c68  Speedup eunit: couch_index_compaction_tests
 discard 64354d1  Speedup eunit: couch_index
 discard a32861d  Speedup eunit: couchdb_mrview_tests
 discard f6f3fca  Speedup eunit: couch_uuids_tests
 discard 3f467ed  Speedup eunit: couch_flags_config_tests
 discard b026480  Speedup eunit: couch_file_tests
 discard fc8989e  Speedup eunit: couch_server
 discard 1b4e2c1  Speedup eunit: couch_httpd
 discard e6e0a98  Speedup eunit: couch_db
 discard d03cac7  Speedup eunit: chttpd_xframe_test
 discard 9a122a3  Speedup eunit: chttpd_prefer_header_test
 discard 643006b  Speedup eunit: chttpd_view
 discard 26b807c  Speedup eunit: couch_replicator_compact_tests
 discard 593c407  Speedup eunit: chttpd_endpoints_tests
 discard d540999  Speedup eunit: couch_replicator_doc_processor
 discard 79622bf  Speedup eunit: chttpd_db_bulk_get_test
 discard 986cac0  Speedup eunit: chttpd_db_bulk_get_multipart_test
 discard 380489e  Speedup eunit: couchdb_file_compression_tests
 discard 5405d03  Speedup eunit: fabric_doc_open_revs
 discard c70c331  Speedup eunit: couch_multidb_changes
 discard 1a56439  Speedup eunit: couch_peruser_test
 discard 969f5a3  Cleanup eunit: couch_peruser_test
 discard a94dfc4  Disable JavaScript tests ported to Elixir
 discard 8c18fa5  Speedup JavaScript tests
 discard 905800b  Silence already started message for crypto
     add 2336964  Add SpiderMonkey version option to configure
     add c38f2c6  Enable multi-version SpiderMonkey support
     add 15a3c17  Import SpiderMonkey 60 based CouchJS sources
     add 227f1d6  Allow configuring the use of SpiderMonkey 60
     add ec416c3  Add Javascript to support Spidermonkey 60
     add 5c8e882  Merge pull request #2345 from apache/sm60-davisp
     add cda1801  Switch replicator "info" error message to be an object
     new 4d73243  Silence already started message for crypto
     new b4fa539  Speedup JavaScript tests
     new 01a2f8d  Disable JavaScript tests ported to Elixir
     new e6ddd0d  Cleanup eunit: couch_peruser_test
     new 6949f14  Speedup eunit: couch_peruser_test
     new fc3f4a4  Speedup eunit: couch_multidb_changes
     new 0913e51  Speedup eunit: fabric_doc_open_revs
     new f6bd4c4  Speedup eunit: couchdb_file_compression_tests
     new fa8aae5  Speedup eunit: chttpd_db_bulk_get_multipart_test
     new a704456  Speedup eunit: chttpd_db_bulk_get_test
     new 3795271  Speedup eunit: couch_replicator_doc_processor
     new c7b47fd  Speedup eunit: chttpd_endpoints_tests
     new 9f4a7f6  Speedup eunit: couch_replicator_compact_tests
     new 9076475  Speedup eunit: chttpd_view
     new ce2020c  Speedup eunit: chttpd_prefer_header_test
     new be2d14d  Speedup eunit: chttpd_xframe_test
     new 5a1edf3  Speedup eunit: couch_db
     new 03197c1  Speedup eunit: couch_httpd
     new bba82ed  Speedup eunit: couch_server
     new 226a8cd  Speedup eunit: couch_file_tests
     new 5159058  Speedup eunit: couch_flags_config_tests
     new 287d8bf  Speedup eunit: couch_uuids_tests
     new c59e7b8  Speedup eunit: couchdb_mrview_tests
     new 6a569d3  Speedup eunit: couch_index
     new 470f2c8  Speedup eunit: couch_index_compaction_tests
     new a259b93  Speedup eunit: couch_mrview_compactor
     new ad78501  Speedup eunit: couch_mrview_purge_docs_fabric_tests
     new dc37489  Speedup eunit: couch_replicator
     new 774d84e  Speedup eunit: couch_replicator_auth_session
     new 6aed5a8  Speedup eunit: couch_replicator_clustering
     new 3f614ca  Speedup eunit: couch_replicator_scheduler
     new d45b21a  Speedup eunit: ddoc_cache_no_cache_test
     new 3fba3a4  Speedup eunit: fabric_db_create
     new f1d2906  Speedup eunit: fabric_doc_open
     new 23859f5  Speedup eunit: fabric_doc_purge
     new 64bb28c  Speedup eunit: fabric_doc_update
     new 9cba2f1  Speedup eunit: mango_idx_test
     new c99c58a  Speedup eunit: mem3_rep
     new 3b60bd9  Speedup eunit: mem3_shards
     new a944017  Speedup eunit: mem3_sync_event_listener
     new 38a85fd  Speedup eunit: smoosh_server

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (2433ff9)
            \
             N -- N -- N   refs/heads/speedup-test-suite (38a85fd)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 41 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .gitignore                                         |    7 +-
 LICENSE                                            |   55 +
 NOTICE                                             |    8 +
 configure                                          |   23 +
 configure.ps1                                      |    8 +-
 share/server/60/escodegen.js                       |    1 +
 share/server/60/esprima.js                         | 6711 ++++++++++++++++++++
 share/server/60/rewrite_fun.js                     |   56 +
 share/server/dreyfus.js                            |    4 +-
 .../couch_js/utf8.h => share/server/rewrite_fun.js |   15 +-
 share/server/util.js                               |    5 +-
 share/server/views.js                              |    4 +-
 src/couch/priv/couch_js/{ => 1.8.5}/help.h         |    0
 src/couch/priv/couch_js/{ => 1.8.5}/http.c         |    0
 src/couch/priv/couch_js/{ => 1.8.5}/http.h         |    0
 src/couch/priv/couch_js/{ => 1.8.5}/main.c         |    0
 src/couch/priv/couch_js/{ => 1.8.5}/utf8.c         |    0
 src/couch/priv/couch_js/{ => 1.8.5}/utf8.h         |    0
 src/couch/priv/couch_js/{ => 1.8.5}/util.c         |    0
 src/couch/priv/couch_js/{ => 1.8.5}/util.h         |    0
 src/couch/priv/couch_js/{ => 60}/help.h            |    0
 src/couch/priv/couch_js/{http.c => 60/http.cpp}    |  312 +-
 src/couch/priv/couch_js/{ => 60}/http.h            |   12 +-
 src/couch/priv/couch_js/60/main.cpp                |  494 ++
 src/couch/priv/couch_js/{utf8.c => 60/utf8.cpp}    |   94 +-
 src/couch/priv/couch_js/{ => 60}/utf8.h            |    2 +-
 src/couch/priv/couch_js/{util.c => 60/util.cpp}    |  143 +-
 src/couch/priv/couch_js/{ => 60}/util.h            |   12 +-
 src/couch/rebar.config.script                      |   85 +-
 src/couch_replicator/src/couch_replicator.erl      |    5 +-
 .../src/couch_replicator_scheduler.erl             |    6 +-
 .../src/couch_replicator_utils.erl                 |    5 +-
 support/build_js.escript                           |   79 +-
 test/javascript/tests/reader_acl.js                |    2 +-
 test/javascript/tests/security_validation.js       |    2 +-
 test/javascript/tests/view_errors.js               |    2 +-
 36 files changed, 7815 insertions(+), 337 deletions(-)
 create mode 100644 share/server/60/escodegen.js
 create mode 100644 share/server/60/esprima.js
 create mode 100644 share/server/60/rewrite_fun.js
 copy src/couch/priv/couch_js/utf8.h => share/server/rewrite_fun.js (73%)
 copy src/couch/priv/couch_js/{ => 1.8.5}/help.h (100%)
 copy src/couch/priv/couch_js/{ => 1.8.5}/http.c (100%)
 copy src/couch/priv/couch_js/{ => 1.8.5}/http.h (100%)
 rename src/couch/priv/couch_js/{ => 1.8.5}/main.c (100%)
 copy src/couch/priv/couch_js/{ => 1.8.5}/utf8.c (100%)
 copy src/couch/priv/couch_js/{ => 1.8.5}/utf8.h (100%)
 copy src/couch/priv/couch_js/{ => 1.8.5}/util.c (100%)
 copy src/couch/priv/couch_js/{ => 1.8.5}/util.h (100%)
 rename src/couch/priv/couch_js/{ => 60}/help.h (100%)
 rename src/couch/priv/couch_js/{http.c => 60/http.cpp} (68%)
 rename src/couch/priv/couch_js/{ => 60}/http.h (63%)
 create mode 100644 src/couch/priv/couch_js/60/main.cpp
 rename src/couch/priv/couch_js/{utf8.c => 60/utf8.cpp} (74%)
 rename src/couch/priv/couch_js/{ => 60}/utf8.h (91%)
 rename src/couch/priv/couch_js/{util.c => 60/util.cpp} (65%)
 rename src/couch/priv/couch_js/{ => 60}/util.h (69%)


[couchdb] 30/41: Speedup eunit: couch_replicator_clustering

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 6aed5a873039b13be4ea583fdf2f20aae983a99e
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:39:22 2019 -0600

    Speedup eunit: couch_replicator_clustering
---
 .../src/couch_replicator_clustering.erl            | 39 ++++++++++++++++------
 1 file changed, 28 insertions(+), 11 deletions(-)

diff --git a/src/couch_replicator/src/couch_replicator_clustering.erl b/src/couch_replicator/src/couch_replicator_clustering.erl
index a7f7573..3ea6934 100644
--- a/src/couch_replicator/src/couch_replicator_clustering.erl
+++ b/src/couch_replicator/src/couch_replicator_clustering.erl
@@ -203,13 +203,18 @@ owner_int(ShardName, DocId) ->
 
 replicator_clustering_test_() ->
     {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            t_stable_callback(),
-            t_unstable_callback()
-        ]
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        {
+            foreach,
+            fun setup/0,
+            fun teardown/1,
+            [
+                t_stable_callback(),
+                t_unstable_callback()
+            ]
+        }
     }.
 
 
@@ -230,19 +235,31 @@ t_unstable_callback() ->
     end).
 
 
-setup() ->
+setup_all() ->
     meck:expect(couch_log, notice, 2, ok),
     meck:expect(config, get, fun(_, _, Default) -> Default end),
     meck:expect(config, listen_for_changes, 2, ok),
     meck:expect(couch_stats, update_gauge, 2, ok),
-    meck:expect(couch_replicator_notifier, notify, 1, ok),
+    meck:expect(couch_replicator_notifier, notify, 1, ok).
+
+
+teardown_all(_) ->
+    meck:unload().
+
+
+setup() ->
+    meck:reset([
+        config,
+        couch_log,
+        couch_stats,
+        couch_replicator_notifier
+    ]),
     {ok, Pid} = start_link(),
     Pid.
 
 
 teardown(Pid) ->
     unlink(Pid),
-    exit(Pid, kill),
-    meck:unload().
+    exit(Pid, kill).
 
 -endif.


[couchdb] 14/41: Speedup eunit: chttpd_view

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 907647511187e1d1bbc87c2d8e8a01f15ccd45aa
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:30:25 2019 -0600

    Speedup eunit: chttpd_view
---
 src/chttpd/src/chttpd_view.erl | 35 ++++++++++++++++++++++++++---------
 1 file changed, 26 insertions(+), 9 deletions(-)

diff --git a/src/chttpd/src/chttpd_view.erl b/src/chttpd/src/chttpd_view.erl
index 5070468..f73a8b7 100644
--- a/src/chttpd/src/chttpd_view.erl
+++ b/src/chttpd/src/chttpd_view.erl
@@ -123,13 +123,18 @@ assert_no_queries_param(_) ->
 
 check_multi_query_reduce_view_overrides_test_() ->
     {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            t_check_include_docs_throw_validation_error(),
-            t_check_user_can_override_individual_query_type()
-        ]
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        {
+            foreach,
+            fun setup/0,
+            fun teardown/1,
+            [
+                t_check_include_docs_throw_validation_error(),
+                t_check_user_can_override_individual_query_type()
+            ]
+        }
     }.
 
 
@@ -153,7 +158,7 @@ t_check_user_can_override_individual_query_type() ->
     end).
 
 
-setup() ->
+setup_all() ->
     Views = [#mrview{reduce_funs = [{<<"v">>, <<"_count">>}]}],
     meck:expect(couch_mrview_util, ddoc_to_mrst, 2, {ok, #mrst{views = Views}}),
     meck:expect(chttpd, start_delayed_json_response, 4, {ok, resp}),
@@ -162,8 +167,20 @@ setup() ->
     meck:expect(chttpd, end_delayed_json_response, 1, ok).
 
 
-teardown(_) ->
+teardown_all(_) ->
     meck:unload().
 
 
+setup() ->
+    meck:reset([
+        chttpd,
+        couch_mrview_util,
+        fabric
+    ]).
+
+
+teardown(_) ->
+    ok.
+
+
 -endif.


[couchdb] 23/41: Speedup eunit: couchdb_mrview_tests

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit c59e7b84819b447668917e23c9d4f0565e888cfa
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:35:59 2019 -0600

    Speedup eunit: couchdb_mrview_tests
---
 src/couch/test/eunit/couchdb_mrview_tests.erl | 31 +++++++++++++++++----------
 1 file changed, 20 insertions(+), 11 deletions(-)

diff --git a/src/couch/test/eunit/couchdb_mrview_tests.erl b/src/couch/test/eunit/couchdb_mrview_tests.erl
index f5bad73..ec77b19 100644
--- a/src/couch/test/eunit/couchdb_mrview_tests.erl
+++ b/src/couch/test/eunit/couchdb_mrview_tests.erl
@@ -40,14 +40,20 @@
 -define(AUTH, {basic_auth, {?USER, ?PASS}}).
 
 
-start() ->
+setup_all() ->
     Ctx = test_util:start_couch([chttpd]),
+    ok = meck:new(mochiweb_socket, [passthrough]),
     Hashed = couch_passwords:hash_admin_password(?PASS),
     ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
     Ctx.
 
+teardown_all(Ctx) ->
+    meck:unload(),
+    ok = config:delete("admins", ?USER, _Persist=false),
+    test_util:stop_couch(Ctx).
+
 setup(PortType) ->
-    ok = meck:new(mochiweb_socket, [passthrough]),
+    meck:reset([mochiweb_socket]),
     ok = meck:expect(mochiweb_socket, recv, fun mochiweb_socket_recv/3),
 
     DbName = ?tempdb(),
@@ -57,12 +63,7 @@ setup(PortType) ->
     upload_ddoc(Host, ?b2l(DbName)),
     {Host, ?b2l(DbName)}.
 
-teardown(Ctx) ->
-    ok = config:delete("admins", ?USER, _Persist=false),
-    test_util:stop_couch(Ctx).
-
 teardown(PortType, {_Host, DbName}) ->
-    (catch meck:unload(mochiweb_socket)),
     delete_db(PortType, ?l2b(DbName)),
     ok.
 
@@ -71,7 +72,8 @@ mrview_show_test_() ->
         "Check show functionality",
         {
             setup,
-            fun start/0, fun teardown/1,
+            fun setup_all/0,
+            fun teardown_all/1,
             [
                 make_test_case(clustered, [fun should_return_invalid_request_body/2]),
                 make_test_case(backdoor, [fun should_return_invalid_request_body/2])
@@ -84,7 +86,8 @@ mrview_query_test_() ->
         "Check view query functionality",
         {
             setup,
-            fun start/0, fun teardown/1,
+            fun setup_all/0,
+            fun teardown_all/1,
             [
                 make_test_case(clustered, [fun should_return_400_for_wrong_order_of_keys/2]),
                 make_test_case(backdoor, [fun should_return_400_for_wrong_order_of_keys/2])
@@ -97,7 +100,8 @@ mrview_cleanup_index_files_test_() ->
         "Check index files cleanup",
         {
             setup,
-            fun start/0, fun teardown/1,
+            fun setup_all/0,
+            fun teardown_all/1,
             [
                 make_test_case(clustered, [fun should_cleanup_index_files/2])
             ]
@@ -108,7 +112,12 @@ mrview_cleanup_index_files_test_() ->
 make_test_case(Mod, Funs) ->
     {
         lists:flatten(io_lib:format("~s", [Mod])),
-        {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]}
+        {
+            foreachx,
+            fun setup/1,
+            fun teardown/2,
+            [{Mod, Fun} || Fun <- Funs]
+        }
     }.
 
 should_return_invalid_request_body(PortType, {Host, DbName}) ->


[couchdb] 38/41: Speedup eunit: mem3_rep

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit c99c58aea6709dbed434b67ff7111fcb8e9212bc
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:42:19 2019 -0600

    Speedup eunit: mem3_rep
---
 src/mem3/src/mem3_rep.erl | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/mem3/src/mem3_rep.erl b/src/mem3/src/mem3_rep.erl
index fd7c680..4b75846 100644
--- a/src/mem3/src/mem3_rep.erl
+++ b/src/mem3/src/mem3_rep.erl
@@ -880,7 +880,7 @@ doc_() ->
 
 targets_map_test_() ->
     {
-        foreach,
+        setup,
         fun() -> meck:new(mem3, [passthrough]) end,
         fun(_) -> meck:unload() end,
         [


[couchdb] 03/41: Disable JavaScript tests ported to Elixir

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 01a2f8d9650d33ef20103fa8d951790d566d2983
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Dec 19 15:36:05 2019 -0600

    Disable JavaScript tests ported to Elixir
---
 test/javascript/cli_runner.js                  |   4 +
 test/javascript/run                            |   4 +-
 test/javascript/tests/all_docs.js              |   3 +-
 test/javascript/tests/attachment_names.js      |   1 +
 test/javascript/tests/attachment_paths.js      |   1 +
 test/javascript/tests/attachment_ranges.js     |   3 +-
 test/javascript/tests/attachment_views.js      |   1 +
 test/javascript/tests/attachments.js           |   3 +-
 test/javascript/tests/attachments_multipart.js | 115 +++++++++++++------------
 test/javascript/tests/basics.js                |   1 +
 test/javascript/tests/batch_save.js            |  11 +--
 test/javascript/tests/bulk_docs.js             |   1 +
 test/javascript/tests/coffee.js                |   1 +
 test/javascript/tests/compact.js               |   1 +
 test/javascript/tests/config.js                |   1 +
 test/javascript/tests/conflicts.js             |   1 +
 test/javascript/tests/copy_doc.js              |   1 +
 test/javascript/tests/invalid_docids.js        |   1 +
 test/javascript/tests/large_docs.js            |   1 +
 test/javascript/tests/lots_of_docs.js          |   1 +
 test/javascript/tests/multiple_rows.js         |   1 +
 test/javascript/tests/reduce.js                |   1 +
 test/javascript/tests/uuids.js                 |   3 +-
 test/javascript/tests/view_collation.js        |   5 +-
 24 files changed, 97 insertions(+), 69 deletions(-)

diff --git a/test/javascript/cli_runner.js b/test/javascript/cli_runner.js
index 5d7a980..7346762 100644
--- a/test/javascript/cli_runner.js
+++ b/test/javascript/cli_runner.js
@@ -26,6 +26,10 @@ function runTest() {
       quit(2);
   }
 
+  if(couchTests.elixir) {
+      quit(3);
+  }
+
   for(var name in couchTests) {
       count++;
   }
diff --git a/test/javascript/run b/test/javascript/run
index ebcdef9..ac49e3a 100755
--- a/test/javascript/run
+++ b/test/javascript/run
@@ -55,6 +55,8 @@ def mkformatter(tests):
             return green + "pass" + clear
         elif rval == 2:
             return orange + "skipped" + clear
+        elif rval == 3:
+            return green + "ported to elixir" + clear
         else:
             return red + "fail" + clear
 
@@ -166,7 +168,7 @@ def main():
         fmt = mkformatter(tests)
         for test in tests:
             result = run_couchjs(test, fmt)
-            if result == 0 or result == 2:
+            if result == 0 or result == 2 or result == 3:
                 passed += 1
             else:
                 failed += 1
diff --git a/test/javascript/tests/all_docs.js b/test/javascript/tests/all_docs.js
index 64524d8..a360fb9 100644
--- a/test/javascript/tests/all_docs.js
+++ b/test/javascript/tests/all_docs.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.all_docs = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"}, {w: 3});
@@ -79,7 +80,7 @@ couchTests.all_docs = function(debug) {
   })[0];
   TEquals("1", deleted_doc.id, "deletes");
 
-  // (remember old seq) 
+  // (remember old seq)
   var orig_doc = changes.results.filter(function(row) {
     return row.id == "3"
   })[0];
diff --git a/test/javascript/tests/attachment_names.js b/test/javascript/tests/attachment_names.js
index d208396..4e9217c 100644
--- a/test/javascript/tests/attachment_names.js
+++ b/test/javascript/tests/attachment_names.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.attachment_names = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"}, {w: 3});
diff --git a/test/javascript/tests/attachment_paths.js b/test/javascript/tests/attachment_paths.js
index 0599771..048640d 100644
--- a/test/javascript/tests/attachment_paths.js
+++ b/test/javascript/tests/attachment_paths.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.attachment_paths = function(debug) {
   if (debug) debugger;
   var r_db_name = get_random_db_name()
diff --git a/test/javascript/tests/attachment_ranges.js b/test/javascript/tests/attachment_ranges.js
index e052713..37700ec 100644
--- a/test/javascript/tests/attachment_ranges.js
+++ b/test/javascript/tests/attachment_ranges.js
@@ -14,6 +14,7 @@ function cacheBust() {
     return "?anti-cache=" + String(Math.round(Math.random() * 1000000));
 };
 
+couchTests.elixir = true;
 couchTests.attachment_ranges = function(debug) {
     var db_name = get_random_db_name();
     var db = new CouchDB(db_name, {
@@ -132,7 +133,7 @@ couchTests.attachment_ranges = function(debug) {
     TEquals("ext", xhr.responseText);
     TEquals("3", xhr.getResponseHeader("Content-Length"));
     TEquals("bytes 26-28/29", xhr.getResponseHeader("Content-Range"));
-    
+
     // backward range is 416
     var xhr = CouchDB.request("GET", "/" + db_name + "/bin_doc/foo.txt" + cacheBust(), {
        headers: {
diff --git a/test/javascript/tests/attachment_views.js b/test/javascript/tests/attachment_views.js
index a322d7c..7be32a9 100644
--- a/test/javascript/tests/attachment_views.js
+++ b/test/javascript/tests/attachment_views.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.attachment_views= function(debug) {
 
   var db_name = get_random_db_name()
diff --git a/test/javascript/tests/attachments.js b/test/javascript/tests/attachments.js
index 73de018..09c6acd 100644
--- a/test/javascript/tests/attachments.js
+++ b/test/javascript/tests/attachments.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.attachments= function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
@@ -301,7 +302,7 @@ couchTests.attachments= function(debug) {
   T(db.save(bin_doc6).ok == true);
 
   // wrong rev pos specified
-  
+
   // stub out the attachment with the wrong revpos
   bin_doc6._attachments["foo.txt"] = { stub: true, revpos: 10};
   try {
diff --git a/test/javascript/tests/attachments_multipart.js b/test/javascript/tests/attachments_multipart.js
index e15cb57..c36083f 100644
--- a/test/javascript/tests/attachments_multipart.js
+++ b/test/javascript/tests/attachments_multipart.js
@@ -10,14 +10,15 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.attachments_multipart= function(debug) {
   var db_name = get_random_db_name()
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
   db.createDb();
   if (debug) debugger;
-  
+
   // mime multipart
-            
+
   var xhr = CouchDB.request("PUT", "/" + db_name + "/multipart", {
     headers: {"Content-Type": "multipart/related;boundary=\"abc123\""},
     body:
@@ -55,39 +56,39 @@ couchTests.attachments_multipart= function(debug) {
       "this is 19 chars lo" +
       "\r\n--abc123--epilogue"
     });
-    
+
   var result = JSON.parse(xhr.responseText);
-  
+
   T(result.ok);
-  
-  
-    
+
+
+
   TEquals(201, xhr.status, "should send 201 Accepted");
-  
+
   xhr = CouchDB.request("GET", "/" + db_name + "/multipart/foo.txt");
-  
+
   T(xhr.responseText == "this is 21 chars long");
-  
+
   xhr = CouchDB.request("GET", "/" + db_name + "/multipart/bar.txt");
-  
+
   T(xhr.responseText == "this is 20 chars lon");
-  
+
   xhr = CouchDB.request("GET", "/" + db_name + "/multipart/baz.txt");
-  
+
   T(xhr.responseText == "this is 19 chars lo");
-  
+
   // now edit an attachment
-  
+
   var doc = db.open("multipart", {att_encoding_info: true});
   var firstrev = doc._rev;
-  
+
   T(doc._attachments["foo.txt"].stub == true);
   T(doc._attachments["bar.txt"].stub == true);
   T(doc._attachments["baz.txt"].stub == true);
   TEquals("undefined", typeof doc._attachments["foo.txt"].encoding);
   TEquals("undefined", typeof doc._attachments["bar.txt"].encoding);
   TEquals("gzip", doc._attachments["baz.txt"].encoding);
-  
+
   //lets change attachment bar
   delete doc._attachments["bar.txt"].stub; // remove stub member (or could set to false)
   delete doc._attachments["bar.txt"].digest; // remove the digest (it's for the gzip form)
@@ -95,7 +96,7 @@ couchTests.attachments_multipart= function(debug) {
   doc._attachments["bar.txt"].follows = true;
   //lets delete attachment baz:
   delete doc._attachments["baz.txt"];
-  
+
   var xhr = CouchDB.request("PUT", "/" + db_name + "/multipart", {
     headers: {"Content-Type": "multipart/related;boundary=\"abc123\""},
     body:
@@ -109,16 +110,16 @@ couchTests.attachments_multipart= function(debug) {
       "\r\n--abc123--"
     });
   TEquals(201, xhr.status);
-  
+
   xhr = CouchDB.request("GET", "/" + db_name + "/multipart/bar.txt");
-  
+
   T(xhr.responseText == "this is 18 chars l");
-  
+
   xhr = CouchDB.request("GET", "/" + db_name + "/multipart/baz.txt");
   T(xhr.status == 404);
-  
+
   // now test receiving multipart docs
-  
+
   function getBoundary(xhr) {
     var ctype = CouchDB.xhrheader(xhr, "Content-Type");
     var ctypeArgs = ctype.split("; ").slice(1);
@@ -127,7 +128,7 @@ couchTests.attachments_multipart= function(debug) {
       if (ctypeArgs[i].indexOf("boundary=") == 0) {
         boundary = ctypeArgs[i].split("=")[1];
         if (boundary.charAt(0) == '"') {
-          // stringified boundary, parse as json 
+          // stringified boundary, parse as json
           // (will maybe not if there are escape quotes)
           boundary = JSON.parse(boundary);
         }
@@ -135,22 +136,22 @@ couchTests.attachments_multipart= function(debug) {
     }
     return boundary;
   }
-  
+
   function parseMultipart(xhr) {
     var boundary = getBoundary(xhr);
     var mimetext = CouchDB.xhrbody(xhr);
     // strip off leading boundary
     var leading = "--" + boundary + "\r\n";
     var last = "\r\n--" + boundary + "--";
-    
+
     // strip off leading and trailing boundary
     var leadingIdx = mimetext.indexOf(leading) + leading.length;
     var trailingIdx = mimetext.indexOf(last);
     mimetext = mimetext.slice(leadingIdx, trailingIdx);
-    
+
     // now split the sections
     var sections = mimetext.split(new RegExp("\\r\\n--" + boundary));
-    
+
     // spilt out the headers for each section
     for(var i=0; i < sections.length; i++) {
       var section = sections[i];
@@ -160,20 +161,20 @@ couchTests.attachments_multipart= function(debug) {
       var headers = {};
       for(var j=0; j<headersraw.length; j++) {
         var tmp = headersraw[j].split(": ");
-        headers[tmp[0]] = tmp[1]; 
+        headers[tmp[0]] = tmp[1];
       }
       sections[i] = {"headers":headers, "body":body};
     }
-    
+
     return sections;
   }
-  
-  
+
+
   xhr = CouchDB.request("GET", "/" + db_name + "/multipart?attachments=true",
     {headers:{"accept": "multipart/related,*/*;"}});
-  
+
   T(xhr.status == 200);
-  
+
   // parse out the multipart
   var sections = parseMultipart(xhr);
   TEquals("790", xhr.getResponseHeader("Content-Length"),
@@ -199,30 +200,30 @@ couchTests.attachments_multipart= function(debug) {
     "Content-Disposition should be bar.txt section[2]");
 
   var doc = JSON.parse(sections[0].body);
-  
+
   T(doc._attachments['foo.txt'].follows == true);
   T(doc._attachments['bar.txt'].follows == true);
-  
+
   T(sections[1].body == "this is 21 chars long");
   TEquals("this is 18 chars l", sections[2].body, "should be 18 chars long");
-  
+
   // now get attachments incrementally (only the attachments changes since
   // a certain rev).
-  
+
   xhr = CouchDB.request("GET", "/" + db_name + "/multipart?atts_since=[\"" + firstrev + "\"]",
     {headers:{"accept": "multipart/related, */*"}});
-  
+
   T(xhr.status == 200);
 
   var sections = parseMultipart(xhr);
-  
+
   T(sections.length == 2);
-  
+
   var doc = JSON.parse(sections[0].body);
-  
+
   T(doc._attachments['foo.txt'].stub == true);
   T(doc._attachments['bar.txt'].follows == true);
-  
+
   TEquals("this is 18 chars l", sections[1].body, "should be 18 chars long 2");
 
   // try the atts_since parameter together with the open_revs parameter
@@ -259,39 +260,39 @@ couchTests.attachments_multipart= function(debug) {
   T(innerSections[2].body === "this is 18 chars l");
 
   // try it with a rev that doesn't exist (should get all attachments)
-  
+
   xhr = CouchDB.request("GET", "/" + db_name + "/multipart?atts_since=[\"1-2897589\"]",
     {headers:{"accept": "multipart/related,*/*;"}});
-  
+
   T(xhr.status == 200);
-  
+
   var sections = parseMultipart(xhr);
-  
+
   T(sections.length == 3);
-  
+
   var doc = JSON.parse(sections[0].body);
-  
+
   T(doc._attachments['foo.txt'].follows == true);
   T(doc._attachments['bar.txt'].follows == true);
-  
+
   T(sections[1].body == "this is 21 chars long");
   TEquals("this is 18 chars l", sections[2].body, "should be 18 chars long 3");
   // try it with a rev that doesn't exist, and one that does
-  
+
   xhr = CouchDB.request("GET", "/" + db_name + "/multipart?atts_since=[\"1-2897589\",\"" + firstrev + "\"]",
     {headers:{"accept": "multipart/related,*/*;"}});
-  
+
   T(xhr.status == 200);
-  
+
   var sections = parseMultipart(xhr);
-  
+
   T(sections.length == 2);
-  
+
   var doc = JSON.parse(sections[0].body);
-  
+
   T(doc._attachments['foo.txt'].stub == true);
   T(doc._attachments['bar.txt'].follows == true);
-  
+
   TEquals("this is 18 chars l", sections[1].body, "should be 18 chars long 4");
 
   // check that with the document multipart/mixed API it's possible to receive
diff --git a/test/javascript/tests/basics.js b/test/javascript/tests/basics.js
index a36b303..edf9692 100644
--- a/test/javascript/tests/basics.js
+++ b/test/javascript/tests/basics.js
@@ -11,6 +11,7 @@
 // the License.
 
 // Do some basic tests.
+couchTests.elixir = true;
 couchTests.basics = function(debug) {
 
   if (debug) debugger;
diff --git a/test/javascript/tests/batch_save.js b/test/javascript/tests/batch_save.js
index b6e40ab..1f85b12 100644
--- a/test/javascript/tests/batch_save.js
+++ b/test/javascript/tests/batch_save.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.batch_save = function(debug) {
   var db_name = get_random_db_name()
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
@@ -19,19 +20,19 @@ couchTests.batch_save = function(debug) {
   var i
   for(i=0; i < 100; i++) {
     T(db.save({_id:i.toString(),a:i,b:i},  {batch : "ok"}).ok);
-    
+
     // test that response is 202 Accepted
     T(db.last_req.status == 202);
   }
-  
+
   for(i=0; i < 100; i++) {
     // attempt to save the same document a bunch of times
     T(db.save({_id:"foo",a:i,b:i},  {batch : "ok"}).ok);
-    
+
     // test that response is 202 Accepted
     T(db.last_req.status == 202);
   }
-  
+
   while(db.allDocs().total_rows != 101){};
 
   // repeat the tests for POST
@@ -42,7 +43,7 @@ couchTests.batch_save = function(debug) {
     });
     T(JSON.parse(resp.responseText).ok);
   }
-  
+
   while(db.allDocs().total_rows != 201){};
 
   // cleanup
diff --git a/test/javascript/tests/bulk_docs.js b/test/javascript/tests/bulk_docs.js
index ae8a087..7e65ae3 100644
--- a/test/javascript/tests/bulk_docs.js
+++ b/test/javascript/tests/bulk_docs.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.bulk_docs = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
diff --git a/test/javascript/tests/coffee.js b/test/javascript/tests/coffee.js
index 13f05b8..747bacf 100644
--- a/test/javascript/tests/coffee.js
+++ b/test/javascript/tests/coffee.js
@@ -11,6 +11,7 @@
 // the License.
 
 // test basic coffeescript functionality
+couchTests.elixir = true;
 couchTests.coffee = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
diff --git a/test/javascript/tests/compact.js b/test/javascript/tests/compact.js
index 8c8beb4..2b9dd21 100644
--- a/test/javascript/tests/compact.js
+++ b/test/javascript/tests/compact.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.compact = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
diff --git a/test/javascript/tests/config.js b/test/javascript/tests/config.js
index a9dce63..889cbd0 100644
--- a/test/javascript/tests/config.js
+++ b/test/javascript/tests/config.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.config = function(debug) {
   if (debug) debugger;
 
diff --git a/test/javascript/tests/conflicts.js b/test/javascript/tests/conflicts.js
index 81b3d8d..7b5e020 100644
--- a/test/javascript/tests/conflicts.js
+++ b/test/javascript/tests/conflicts.js
@@ -11,6 +11,7 @@
 // the License.
 
 // Do some edit conflict detection tests
+couchTests.elixir = true;
 couchTests.conflicts = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
diff --git a/test/javascript/tests/copy_doc.js b/test/javascript/tests/copy_doc.js
index 9d8ed54..708fe53 100644
--- a/test/javascript/tests/copy_doc.js
+++ b/test/javascript/tests/copy_doc.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.copy_doc = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
diff --git a/test/javascript/tests/invalid_docids.js b/test/javascript/tests/invalid_docids.js
index 0e5c70c..74f0e4f 100644
--- a/test/javascript/tests/invalid_docids.js
+++ b/test/javascript/tests/invalid_docids.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.invalid_docids = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
diff --git a/test/javascript/tests/large_docs.js b/test/javascript/tests/large_docs.js
index 7528e9a..bc9d22c 100644
--- a/test/javascript/tests/large_docs.js
+++ b/test/javascript/tests/large_docs.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.large_docs = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
diff --git a/test/javascript/tests/lots_of_docs.js b/test/javascript/tests/lots_of_docs.js
index 024284c..dc1486a 100644
--- a/test/javascript/tests/lots_of_docs.js
+++ b/test/javascript/tests/lots_of_docs.js
@@ -11,6 +11,7 @@
 // the License.
 
 // test saving a semi-large quanitity of documents and do some view queries.
+couchTests.elixir = true;
 couchTests.lots_of_docs = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
diff --git a/test/javascript/tests/multiple_rows.js b/test/javascript/tests/multiple_rows.js
index 0056e59..5bac8ab 100644
--- a/test/javascript/tests/multiple_rows.js
+++ b/test/javascript/tests/multiple_rows.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.multiple_rows = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
diff --git a/test/javascript/tests/reduce.js b/test/javascript/tests/reduce.js
index 9c373e4..6b8ea18 100644
--- a/test/javascript/tests/reduce.js
+++ b/test/javascript/tests/reduce.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.reduce = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
diff --git a/test/javascript/tests/uuids.js b/test/javascript/tests/uuids.js
index d53a80c..cbf5e8e 100644
--- a/test/javascript/tests/uuids.js
+++ b/test/javascript/tests/uuids.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.uuids = function(debug) {
   var etags = [];
   var testHashBustingHeaders = function(xhr) {
@@ -19,7 +20,7 @@ couchTests.uuids = function(debug) {
     var newetag = xhr.getResponseHeader("ETag");
     T(etags.indexOf(newetag) < 0);
     etags[etags.length] = newetag;
-    
+
     // Removing the time based tests as they break easily when
     // running CouchDB on a remote server in regards to the browser
     // running the Futon test suite.
diff --git a/test/javascript/tests/view_collation.js b/test/javascript/tests/view_collation.js
index 51e74ff..7391fc8 100644
--- a/test/javascript/tests/view_collation.js
+++ b/test/javascript/tests/view_collation.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.elixir = true;
 couchTests.view_collation = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
@@ -103,12 +104,12 @@ couchTests.view_collation = function(debug) {
   var rows = db.query(queryFun, null, {endkey : "b",
     descending:true, inclusive_end:false}).rows;
   T(rows[rows.length-1].key == "B");
-  
+
   var rows = db.query(queryFun, null, {
     endkey : "b", endkey_docid: "10",
     inclusive_end:false}).rows;
   T(rows[rows.length-1].key == "aa");
-  
+
   var rows = db.query(queryFun, null, {
     endkey : "b", endkey_docid: "11",
     inclusive_end:false}).rows;


[couchdb] 16/41: Speedup eunit: chttpd_xframe_test

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit be2d14d46643b9053a3595faabb626cc66ff7983
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:31:03 2019 -0600

    Speedup eunit: chttpd_xframe_test
---
 src/chttpd/test/eunit/chttpd_xframe_test.erl | 29 ++++++++++++++++++++--------
 1 file changed, 21 insertions(+), 8 deletions(-)

diff --git a/src/chttpd/test/eunit/chttpd_xframe_test.erl b/src/chttpd/test/eunit/chttpd_xframe_test.erl
index 1272c19..f3e6165 100644
--- a/src/chttpd/test/eunit/chttpd_xframe_test.erl
+++ b/src/chttpd/test/eunit/chttpd_xframe_test.erl
@@ -4,13 +4,19 @@
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("eunit/include/eunit.hrl").
 
-setup() ->
+setup_all() ->
     ok = meck:new(config),
     ok = meck:expect(config, get, fun(_, _, _) -> "X-Forwarded-Host" end),
     ok.
 
+teardown_all(_) ->
+    meck:unload().
+
+setup() ->
+    meck:reset([config]).
+
 teardown(_) ->
-    meck:unload(config).
+    ok.
 
 mock_request() ->
     Headers = mochiweb_headers:make([{"Host", "examples.com"}]),
@@ -62,12 +68,19 @@ xframe_host_test_() ->
     {
         "xframe host tests",
         {
-            foreach, fun setup/0, fun teardown/1,
-            [
-                fun allow_with_wildcard_host/1,
-                fun allow_with_specific_host/1,
-                fun deny_with_different_host/1
-            ]
+            setup,
+            fun setup_all/0,
+            fun teardown_all/1,
+            {
+                foreach,
+                fun setup/0,
+                fun teardown/1,
+                [
+                    fun allow_with_wildcard_host/1,
+                    fun allow_with_specific_host/1,
+                    fun deny_with_different_host/1
+                ]
+            }
         }
     }.
 


[couchdb] 09/41: Speedup eunit: chttpd_db_bulk_get_multipart_test

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit fa8aae5cbdc2beec4c3dd811488d592d0b9461ed
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Dec 19 13:15:25 2019 -0600

    Speedup eunit: chttpd_db_bulk_get_multipart_test
    
    Another module where loading the meck expect values once per suite saves
    us 20s or so.
---
 .../eunit/chttpd_db_bulk_get_multipart_test.erl    | 67 ++++++++++++++--------
 1 file changed, 43 insertions(+), 24 deletions(-)

diff --git a/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl b/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl
index 8a95c92..86a8eab 100644
--- a/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl
+++ b/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl
@@ -18,39 +18,60 @@
 -define(TIMEOUT, 3000).
 
 
-setup() ->
+setup_all() ->
     mock(config),
     mock(chttpd),
     mock(couch_epi),
     mock(couch_httpd),
     mock(couch_stats),
     mock(fabric),
-    mock(mochireq),
-    Pid = spawn_accumulator(),
-    Pid.
+    mock(mochireq).
 
 
-teardown(Pid) ->
-    ok = stop_accumulator(Pid),
+teardown_all(_) ->
     meck:unload().
 
 
+setup() ->
+    meck:reset([
+        config,
+        chttpd,
+        couch_epi,
+        couch_httpd,
+        couch_stats,
+        fabric,
+        mochireq
+    ]),
+    spawn_accumulator().
+
+
+teardown(Pid) ->
+    ok = stop_accumulator(Pid).
+
+
 bulk_get_test_() ->
     {
         "/db/_bulk_get tests",
         {
-            foreach, fun setup/0, fun teardown/1,
-            [
-                fun should_require_docs_field/1,
-                fun should_not_accept_specific_query_params/1,
-                fun should_return_empty_results_on_no_docs/1,
-                fun should_get_doc_with_all_revs/1,
-                fun should_validate_doc_with_bad_id/1,
-                fun should_validate_doc_with_bad_rev/1,
-                fun should_validate_missing_doc/1,
-                fun should_validate_bad_atts_since/1,
-                fun should_include_attachments_when_atts_since_specified/1
-            ]
+            setup,
+            fun setup_all/0,
+            fun teardown_all/1,
+            {
+                foreach,
+                fun setup/0,
+                fun teardown/1,
+                [
+                    fun should_require_docs_field/1,
+                    fun should_not_accept_specific_query_params/1,
+                    fun should_return_empty_results_on_no_docs/1,
+                    fun should_get_doc_with_all_revs/1,
+                    fun should_validate_doc_with_bad_id/1,
+                    fun should_validate_doc_with_bad_rev/1,
+                    fun should_validate_missing_doc/1,
+                    fun should_validate_bad_atts_since/1,
+                    fun should_include_attachments_when_atts_since_specified/1
+                ]
+            }
         }
     }.
 
@@ -65,12 +86,10 @@ should_not_accept_specific_query_params(_) ->
     Req = fake_request({[{<<"docs">>, []}]}),
     Db  = test_util:fake_db([{name, <<"foo">>}]),
     lists:map(fun (Param) ->
-        {Param, ?_assertThrow({bad_request, _},
-                              begin
-                                  ok = meck:expect(chttpd, qs,
-                                                   fun(_) -> [{Param, ""}] end),
-                                  chttpd_db:db_req(Req, Db)
-                              end)}
+        {Param, ?_assertThrow({bad_request, _}, begin
+            BadReq = Req#httpd{qs = [{Param, ""}]},
+            chttpd_db:db_req(BadReq, Db)
+        end)}
     end, ["rev", "open_revs", "atts_since", "w", "new_edits"]).
 
 


[couchdb] 19/41: Speedup eunit: couch_server

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit bba82ede70c409ac8ba19dd20a835ab466c97517
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:31:49 2019 -0600

    Speedup eunit: couch_server
---
 src/couch/src/couch_server.erl | 63 +++++++++++++++++++-----------------------
 1 file changed, 29 insertions(+), 34 deletions(-)

diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl
index e42f103..30f4e69 100644
--- a/src/couch/src/couch_server.erl
+++ b/src/couch/src/couch_server.erl
@@ -775,19 +775,30 @@ get_engine_path(DbName, Engine) when is_binary(DbName), is_atom(Engine) ->
 -ifdef(TEST).
 -include_lib("eunit/include/eunit.hrl").
 
-setup() ->
+setup_all() ->
     ok = meck:new(config, [passthrough]),
     ok = meck:expect(config, get, fun config_get/3),
     ok.
 
-teardown(_) ->
-    (catch meck:unload(config)).
+teardown_all(_) ->
+    meck:unload().
 
 config_get("couchdb", "users_db_suffix", _) -> "users_db";
 config_get(_, _, _) -> undefined.
 
 maybe_add_sys_db_callbacks_pass_test_() ->
-    SysDbCases = [
+    {
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        [
+            fun should_add_sys_db_callbacks/0,
+            fun should_not_add_sys_db_callbacks/0
+        ]
+    }.
+
+should_add_sys_db_callbacks() ->
+    Cases = [
         "shards/00000000-3fffffff/foo/users_db.1415960794.couch",
         "shards/00000000-3fffffff/foo/users_db.1415960794",
         "shards/00000000-3fffffff/foo/users_db",
@@ -816,8 +827,13 @@ maybe_add_sys_db_callbacks_pass_test_() ->
         "_replicator.couch",
         "_replicator"
     ],
+    lists:foreach(fun(DbName) ->
+        check_case(DbName, true),
+        check_case(?l2b(DbName), true)
+    end, Cases).
 
-    NonSysDbCases = [
+should_not_add_sys_db_callbacks() ->
+    Cases = [
         "shards/00000000-3fffffff/foo/mydb.1415960794.couch",
         "shards/00000000-3fffffff/foo/mydb.1415960794",
         "shards/00000000-3fffffff/mydb",
@@ -826,34 +842,13 @@ maybe_add_sys_db_callbacks_pass_test_() ->
         "mydb.couch",
         "mydb"
     ],
-    {
-        foreach, fun setup/0, fun teardown/1,
-        [
-            [should_add_sys_db_callbacks(C) || C <- SysDbCases]
-            ++
-            [should_add_sys_db_callbacks(?l2b(C)) || C <- SysDbCases]
-            ++
-            [should_not_add_sys_db_callbacks(C) || C <- NonSysDbCases]
-            ++
-            [should_not_add_sys_db_callbacks(?l2b(C)) || C <- NonSysDbCases]
-        ]
-    }.
-
-should_add_sys_db_callbacks(DbName) ->
-    {test_name(DbName), ?_test(begin
-        Options = maybe_add_sys_db_callbacks(DbName, [other_options]),
-        ?assert(lists:member(sys_db, Options)),
-        ok
-    end)}.
-should_not_add_sys_db_callbacks(DbName) ->
-    {test_name(DbName), ?_test(begin
-        Options = maybe_add_sys_db_callbacks(DbName, [other_options]),
-        ?assertNot(lists:member(sys_db, Options)),
-        ok
-    end)}.
-
-test_name(DbName) ->
-    lists:flatten(io_lib:format("~p", [DbName])).
-
+    lists:foreach(fun(DbName) ->
+        check_case(DbName, false),
+        check_case(?l2b(DbName), false)
+    end, Cases).
+
+check_case(DbName, IsAdded) ->
+    Options = maybe_add_sys_db_callbacks(DbName, [other_options]),
+    ?assertEqual(IsAdded, lists:member(sys_db, Options)).
 
 -endif.


[couchdb] 13/41: Speedup eunit: couch_replicator_compact_tests

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 9f4a7f61c8e9d4c042eddfac97acd510d6f9a53e
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Dec 19 14:41:38 2019 -0600

    Speedup eunit: couch_replicator_compact_tests
    
    Increase the doc write batch count in the background writer process to
    speed up the should_populate_and_compact test.
---
 .../test/eunit/couch_replicator_compact_tests.erl  | 39 ++++++++++++----------
 1 file changed, 21 insertions(+), 18 deletions(-)

diff --git a/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl
index eb3fc82..997c848 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl
@@ -26,6 +26,7 @@
 -define(TIMEOUT, 360000).
 -define(TIMEOUT_WRITER, 100000).
 -define(TIMEOUT_EUNIT, ?TIMEOUT div 1000 + 70).
+-define(WRITE_BATCH_SIZE, 25).
 
 setup() ->
     DbName = ?tempdb(),
@@ -408,33 +409,35 @@ writer_loop(Db0, Parent, Counter) ->
     DbName = couch_db:name(Db0),
     {ok, Data} = file:read_file(?ATTFILE),
     maybe_pause(Parent, Counter),
-    Doc = couch_doc:from_json_obj({[
-        {<<"_id">>, ?l2b(integer_to_list(Counter + 1))},
-        {<<"value">>, Counter + 1},
-        {<<"_attachments">>, {[
-            {<<"icon1.png">>, {[
-                {<<"data">>, base64:encode(Data)},
-                {<<"content_type">>, <<"image/png">>}
-            ]}},
-            {<<"icon2.png">>, {[
-                {<<"data">>, base64:encode(iolist_to_binary([Data, Data]))},
-                {<<"content_type">>, <<"image/png">>}
+    Docs = lists:map(fun(I) ->
+        couch_doc:from_json_obj({[
+            {<<"_id">>, ?l2b(integer_to_list(Counter + I))},
+            {<<"value">>, Counter + I},
+            {<<"_attachments">>, {[
+                {<<"icon1.png">>, {[
+                    {<<"data">>, base64:encode(Data)},
+                    {<<"content_type">>, <<"image/png">>}
+                ]}},
+                {<<"icon2.png">>, {[
+                    {<<"data">>, base64:encode(iolist_to_binary([Data, Data]))},
+                    {<<"content_type">>, <<"image/png">>}
+                ]}}
             ]}}
-        ]}}
-    ]}),
+        ]})
+    end, lists:seq(1, ?WRITE_BATCH_SIZE)),
     maybe_pause(Parent, Counter),
     {ok, Db} = couch_db:open_int(DbName, []),
-    {ok, _} = couch_db:update_doc(Db, Doc, []),
+    {ok, _} = couch_db:update_docs(Db, Docs, []),
     ok = couch_db:close(Db),
     receive
         {get_count, Ref} ->
-            Parent ! {count, Ref, Counter + 1},
-            writer_loop(Db, Parent, Counter + 1);
+            Parent ! {count, Ref, Counter + ?WRITE_BATCH_SIZE},
+            writer_loop(Db, Parent, Counter + ?WRITE_BATCH_SIZE);
         {stop, Ref} ->
-            Parent ! {stopped, Ref, Counter + 1}
+            Parent ! {stopped, Ref, Counter + ?WRITE_BATCH_SIZE}
     after 0 ->
         timer:sleep(?DELAY),
-        writer_loop(Db, Parent, Counter + 1)
+        writer_loop(Db, Parent, Counter + ?WRITE_BATCH_SIZE)
     end.
 
 maybe_pause(Parent, Counter) ->


[couchdb] 12/41: Speedup eunit: chttpd_endpoints_tests

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit c7b47fdcf0995d5194852704b29ff9b07fa4ca21
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Dec 19 14:00:31 2019 -0600

    Speedup eunit: chttpd_endpoints_tests
    
    There's no need to call through mocked functions when we can just assert
    its the correct function returned.
---
 src/couch/test/eunit/chttpd_endpoints_tests.erl | 191 +++++++-----------------
 1 file changed, 55 insertions(+), 136 deletions(-)

diff --git a/src/couch/test/eunit/chttpd_endpoints_tests.erl b/src/couch/test/eunit/chttpd_endpoints_tests.erl
index 6433d3d..3c8586a 100644
--- a/src/couch/test/eunit/chttpd_endpoints_tests.erl
+++ b/src/couch/test/eunit/chttpd_endpoints_tests.erl
@@ -15,28 +15,27 @@
 -include_lib("couch/include/couch_eunit.hrl").
 -include_lib("couch/include/couch_db.hrl").
 
-setup("mocked") ->
-    fun setup_mocked/1;
-setup("not_mocked") ->
-    fun setup_not_mocked/1.
-
-setup_mocked({Endpoint, {_Path, Module, Function}}) ->
-    catch meck:unload(Module),
-    meck:new(Module, [passthrough, non_strict]),
-    Expected = mock_handler(Endpoint, Module, Function),
-    Expected.
-
-setup_not_mocked({_Endpoint, {_Path, Module, _Function}}) ->
-    catch meck:unload(Module),
-    meck:new(Module, [non_strict]),
-    ok.
-
-teardown({_Endpoint, {Module, _F, _A}}, _) ->
-    catch meck:unload(Module),
-    ok.
-
-handlers(url_handler) ->
-    [
+
+endpoints_test_() ->
+    {
+        "Checking dynamic endpoints",
+        {
+            setup,
+            fun() ->
+                test_util:start_couch([chttpd])
+            end,
+            fun test_util:stop/1,
+            [
+                fun url_handlers/0,
+                fun db_handlers/0,
+                fun design_handlers/0
+            ]
+        }
+    }.
+
+
+url_handlers() ->
+    Handlers = [
         {<<"">>, chttpd_misc, handle_welcome_req},
         {<<"favicon.ico">>, chttpd_misc, handle_favicon_req},
         {<<"_utils">>, chttpd_misc, handle_utils_dir_req},
@@ -51,11 +50,20 @@ handlers(url_handler) ->
         {<<"_up">>, chttpd_misc, handle_up_req},
         {<<"_membership">>, mem3_httpd, handle_membership_req},
         {<<"_db_updates">>, global_changes_httpd, handle_global_changes_req},
-        {<<"_cluster_setup">>, setup_httpd, handle_setup_req},
-        {<<"anything">>, chttpd_db, handle_request}
-    ];
-handlers(db_handler) ->
-    [
+        {<<"_cluster_setup">>, setup_httpd, handle_setup_req}
+    ],
+
+    lists:foreach(fun({Path, Mod, Fun}) ->
+        Handler = chttpd_handlers:url_handler(Path, undefined),
+        Expect = fun Mod:Fun/1,
+        ?assertEqual(Expect, Handler)
+    end, Handlers),
+
+    ?assertEqual(undefined, chttpd_handlers:url_handler("foo", undefined)).
+
+
+db_handlers() ->
+    Handlers = [
         {<<"_view_cleanup">>, chttpd_db, handle_view_cleanup_req},
         {<<"_compact">>, chttpd_db, handle_compact_req},
         {<<"_design">>, chttpd_db, handle_design_req},
@@ -65,120 +73,31 @@ handlers(db_handler) ->
         {<<"_index">>, mango_httpd, handle_req},
         {<<"_explain">>, mango_httpd, handle_req},
         {<<"_find">>, mango_httpd, handle_req}
-    ];
-handlers(design_handler) ->
-    [
+    ],
+
+    lists:foreach(fun({Path, Mod, Fun}) ->
+        Handler = chttpd_handlers:db_handler(Path, undefined),
+        Expect = fun Mod:Fun/2,
+        ?assertEqual(Expect, Handler)
+    end, Handlers),
+
+    ?assertEqual(undefined, chttpd_handlers:db_handler("bam", undefined)).
+
+
+design_handlers() ->
+    Handlers = [
         {<<"_view">>, chttpd_view, handle_view_req},
         {<<"_show">>, chttpd_show, handle_doc_show_req},
         {<<"_list">>, chttpd_show, handle_view_list_req},
         {<<"_update">>, chttpd_show, handle_doc_update_req},
         {<<"_info">>, chttpd_db, handle_design_info_req},
         {<<"_rewrite">>, chttpd_rewrite, handle_rewrite_req}
-    ].
-
-endpoints_test_() ->
-    {
-        "Checking dynamic endpoints",
-        {
-            setup,
-            fun() -> test_util:start_couch([chttpd, mem3, global_changes, mango, setup]) end,
-            fun test_util:stop/1,
-            [
-                check_dynamic_endpoints(
-                    "mocked", url_handler, fun ensure_called/2),
-                check_dynamic_endpoints(
-                    "mocked", db_handler, fun ensure_called/2),
-                check_dynamic_endpoints(
-                    "mocked", design_handler, fun ensure_called/2),
-                check_dynamic_endpoints(
-                    "not_mocked", url_handler, fun verify_we_fail_if_missing/2),
-                check_dynamic_endpoints(
-                    "not_mocked", db_handler, fun verify_we_fail_if_missing/2),
-                check_dynamic_endpoints(
-                    "not_mocked", design_handler, fun verify_we_fail_if_missing/2)
-            ]
-        }
-    }.
-
-check_dynamic_endpoints(Setup, EndpointType, TestFun) ->
-    {
-        "Checking '"
-            ++ atom_to_list(EndpointType)
-            ++ "' [" ++ Setup ++ "] dynamic endpoints",
-        [
-            make_test_case(Setup, EndpointType, Spec, TestFun)
-               || Spec <- handlers(EndpointType)
-        ]
-    }.
-
-make_test_case(Setup, EndpointType, {Path, Module, Function}, TestFun) ->
-    {
-        lists:flatten(io_lib:format("~s -- \"~s\"", [EndpointType, ?b2l(Path)])),
-        {
-            foreachx, setup(Setup), fun teardown/2,
-            [
-                {{EndpointType, {Path, Module, Function}}, TestFun}
-            ]
-        }
-    }.
+    ],
 
+    lists:foreach(fun({Path, Mod, Fun}) ->
+        Handler = chttpd_handlers:design_handler(Path, undefined),
+        Expect = fun Mod:Fun/3,
+        ?assertEqual(Expect, Handler)
+    end, Handlers),
 
-mock_handler(url_handler = Endpoint, M, F) ->
-    meck:expect(M, F, fun(X) -> {return, Endpoint, X} end),
-    fun M:F/1;
-mock_handler(db_handler = Endpoint, M, F) ->
-    meck:expect(M, F, fun(X, Y) -> {return, Endpoint, X, Y} end),
-    fun M:F/2;
-mock_handler(design_handler = Endpoint, M, F) ->
-    meck:expect(M, F, fun(X, Y, Z) -> {return, Endpoint, X, Y, Z} end),
-    fun M:F/3.
-
-ensure_called({url_handler = Endpoint, {Path, _M, _Fun}}, ExpectedFun) ->
-    HandlerFun = handler(Endpoint, Path),
-    ?_test(begin
-        ?assertEqual(ExpectedFun, HandlerFun),
-        ?assertMatch({return, Endpoint, x}, HandlerFun(x))
-     end);
-ensure_called({db_handler = Endpoint, {Path, _M, _Fun}}, ExpectedFun) ->
-    HandlerFun = handler(Endpoint, Path),
-    ?_test(begin
-        ?assertEqual(ExpectedFun, HandlerFun),
-        ?assertMatch({return, Endpoint, x, y}, HandlerFun(x, y))
-     end);
-ensure_called({design_handler = Endpoint, {Path, _M, _Fun}}, ExpectedFun) ->
-    HandlerFun = handler(Endpoint, Path),
-    ?_test(begin
-        ?assertEqual(ExpectedFun, HandlerFun),
-        ?assertMatch({return, Endpoint, x, y, z}, HandlerFun(x, y, z))
-     end).
-
-%% Test the test: when the final target function is missing,
-%% the Fun call must fail.
-verify_we_fail_if_missing({url_handler = Endpoint, {Path, _M, _Fun}}, _) ->
-    HandlerFun = handler(Endpoint, Path),
-    ?_test(begin
-        ?assert(is_function(HandlerFun)),
-        ?assertError(undef, HandlerFun(x))
-    end);
-verify_we_fail_if_missing({db_handler = Endpoint, {Path, _M, _Fun}}, _) ->
-    HandlerFun = handler(Endpoint, Path),
-    ?_test(begin
-        ?assert(is_function(HandlerFun)),
-        ?assertError(undef, HandlerFun(x, y))
-    end);
-verify_we_fail_if_missing({design_handler = Endpoint, {Path, _M, _Fun}}, _) ->
-    HandlerFun = handler(Endpoint, Path),
-    ?_test(begin
-        ?assert(is_function(HandlerFun)),
-        ?assertError(undef, HandlerFun(x, y, z))
-    end).
-
-handler(url_handler, HandlerKey) ->
-    chttpd_handlers:url_handler(HandlerKey, fun chttpd_db:handle_request/1);
-handler(db_handler, HandlerKey) ->
-    chttpd_handlers:db_handler(HandlerKey, fun chttpd_db:db_req/2);
-handler(design_handler, HandlerKey) ->
-    chttpd_handlers:design_handler(HandlerKey, fun dummy/3).
-
-dummy(_, _, _) ->
-    throw(error).
+    ?assertEqual(undefined, chttpd_handlers:design_handler("baz", undefined)).


[couchdb] 07/41: Speedup eunit: fabric_doc_open_revs

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 0913e510dae917b0a22d21c09bed034c38187f02
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Dec 19 12:18:00 2019 -0600

    Speedup eunit: fabric_doc_open_revs
    
    Moving the meck configuration into setup and resetting in foreach makes
    tests quite a bit faster.
---
 src/fabric/src/fabric_doc_open_revs.erl | 90 ++++++++++++++++++++-------------
 1 file changed, 54 insertions(+), 36 deletions(-)

diff --git a/src/fabric/src/fabric_doc_open_revs.erl b/src/fabric/src/fabric_doc_open_revs.erl
index 8ac3f30..3d7b9dc 100644
--- a/src/fabric/src/fabric_doc_open_revs.erl
+++ b/src/fabric/src/fabric_doc_open_revs.erl
@@ -317,7 +317,7 @@ collapse_duplicate_revs_int([Reply | Rest]) ->
 -include_lib("eunit/include/eunit.hrl").
 
 
-setup() ->
+setup_all() ->
     config:start_link([]),
     meck:new([fabric, couch_stats, couch_log]),
     meck:new(fabric_util, [passthrough]),
@@ -328,11 +328,24 @@ setup() ->
 
 
 
-teardown(_) ->
-    (catch meck:unload([fabric, couch_stats, couch_log, fabric_util])),
+teardown_all(_) ->
+    meck:unload(),
     config:stop().
 
 
+setup() ->
+    meck:reset([
+        couch_log,
+        couch_stats,
+        fabric,
+        fabric_util
+    ]).
+
+
+teardown(_) ->
+    ok.
+
+
 state0(Revs, Latest) ->
     #state{
         worker_count = 3,
@@ -361,39 +374,44 @@ baz1() -> {ok, #doc{revs = {1, [<<"baz">>]}}}.
 
 open_doc_revs_test_() ->
     {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            check_empty_response_not_quorum(),
-            check_basic_response(),
-            check_finish_quorum(),
-            check_finish_quorum_newer(),
-            check_no_quorum_on_second(),
-            check_done_on_third(),
-            check_specific_revs_first_msg(),
-            check_revs_done_on_agreement(),
-            check_latest_true(),
-            check_ancestor_counted_in_quorum(),
-            check_not_found_counts_for_descendant(),
-            check_worker_error_skipped(),
-            check_quorum_only_counts_valid_responses(),
-            check_empty_list_when_no_workers_reply(),
-            check_node_rev_stored(),
-            check_node_rev_store_head_only(),
-            check_node_rev_store_multiple(),
-            check_node_rev_dont_store_errors(),
-            check_node_rev_store_non_errors(),
-            check_node_rev_store_concatenate(),
-            check_node_rev_store_concantenate_multiple(),
-            check_node_rev_unmodified_on_down_or_exit(),
-            check_not_found_replies_are_removed_when_doc_found(),
-            check_not_found_returned_when_one_of_docs_not_found(),
-            check_not_found_returned_when_doc_not_found(),
-            check_longer_rev_list_returned(),
-            check_longer_rev_list_not_combined(),
-            check_not_found_removed_and_longer_rev_list()
-        ]
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        {
+            foreach,
+            fun setup/0,
+            fun teardown/1,
+            [
+                check_empty_response_not_quorum(),
+                check_basic_response(),
+                check_finish_quorum(),
+                check_finish_quorum_newer(),
+                check_no_quorum_on_second(),
+                check_done_on_third(),
+                check_specific_revs_first_msg(),
+                check_revs_done_on_agreement(),
+                check_latest_true(),
+                check_ancestor_counted_in_quorum(),
+                check_not_found_counts_for_descendant(),
+                check_worker_error_skipped(),
+                check_quorum_only_counts_valid_responses(),
+                check_empty_list_when_no_workers_reply(),
+                check_node_rev_stored(),
+                check_node_rev_store_head_only(),
+                check_node_rev_store_multiple(),
+                check_node_rev_dont_store_errors(),
+                check_node_rev_store_non_errors(),
+                check_node_rev_store_concatenate(),
+                check_node_rev_store_concantenate_multiple(),
+                check_node_rev_unmodified_on_down_or_exit(),
+                check_not_found_replies_are_removed_when_doc_found(),
+                check_not_found_returned_when_one_of_docs_not_found(),
+                check_not_found_returned_when_doc_not_found(),
+                check_longer_rev_list_returned(),
+                check_longer_rev_list_not_combined(),
+                check_not_found_removed_and_longer_rev_list()
+            ]
+        }
     }.
 
 


[couchdb] 08/41: Speedup eunit: couchdb_file_compression_tests

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit f6bd4c4864c00160c3a188115ed19d252a919bf3
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Dec 19 13:02:47 2019 -0600

    Speedup eunit: couchdb_file_compression_tests
    
    This just populates a single test db and then copies the raw file to a
    new database name for each individual test.
---
 .../test/eunit/couchdb_file_compression_tests.erl  | 124 ++++++++++-----------
 1 file changed, 57 insertions(+), 67 deletions(-)

diff --git a/src/couch/test/eunit/couchdb_file_compression_tests.erl b/src/couch/test/eunit/couchdb_file_compression_tests.erl
index 8f0fe5b..7725033 100644
--- a/src/couch/test/eunit/couchdb_file_compression_tests.erl
+++ b/src/couch/test/eunit/couchdb_file_compression_tests.erl
@@ -16,10 +16,11 @@
 -include_lib("couch/include/couch_db.hrl").
 
 -define(DDOC_ID, <<"_design/test">>).
--define(DOCS_COUNT, 5000).
--define(TIMEOUT, 60000).
+-define(DOCS_COUNT, 1000).
+-define(TIMEOUT, 60).
 
-setup() ->
+setup_all() ->
+    Ctx = test_util:start_couch(),
     config:set("couchdb", "file_compression", "none", false),
     DbName = ?tempdb(),
     {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
@@ -35,13 +36,13 @@ setup() ->
         }
     ]}),
     {ok, _} = couch_db:update_doc(Db, DDoc, []),
-    refresh_index(DbName),
     ok = couch_db:close(Db),
-    DbName.
+    {Ctx, DbName}.
 
-teardown(DbName) ->
+
+teardown_all({Ctx, DbName}) ->
     ok = couch_server:delete(DbName, [?ADMIN_CTX]),
-    ok.
+    test_util:stop_couch(Ctx).
 
 
 couch_file_compression_test_() ->
@@ -49,75 +50,51 @@ couch_file_compression_test_() ->
         "CouchDB file compression tests",
         {
             setup,
-            fun test_util:start_couch/0, fun test_util:stop_couch/1,
-            {
-                foreach,
-                fun setup/0, fun teardown/1,
-                [
-                    fun should_use_none/1,
-                    fun should_use_deflate_1/1,
-                    fun should_use_deflate_9/1,
-                    fun should_use_snappy/1,
-                    fun should_compare_compression_methods/1
-                ]
-            }
+            fun setup_all/0,
+            fun teardown_all/1,
+            {with, [
+                fun should_use_none/1,
+                fun should_use_deflate_1/1,
+                fun should_use_deflate_9/1,
+                fun should_use_snappy/1,
+                fun should_compare_compression_methods/1
+            ]}
         }
     }.
 
 
-should_use_none(DbName) ->
-    config:set("couchdb", "file_compression", "none", false),
-    {
-        "Use no compression",
-        [
-            {"compact database",
-             {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_db(DbName))}},
-            {"compact view",
-             {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_view(DbName))}}
-        ]
-    }.
+should_use_none({_, DbName}) -> run_test(DbName, "none").
+should_use_deflate_1({_, DbName}) -> run_test(DbName, "deflate_1").
+should_use_deflate_9({_, DbName}) -> run_test(DbName, "deflate_9").
+should_use_snappy({_, DbName}) -> run_test(DbName, "snappy").
 
-should_use_deflate_1(DbName) ->
-    config:set("couchdb", "file_compression", "deflate_1", false),
-    {
-        "Use deflate compression at level 1",
-        [
-            {"compact database",
-             {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_db(DbName))}},
-            {"compact view",
-             {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_view(DbName))}}
-        ]
-    }.
 
-should_use_deflate_9(DbName) ->
-    config:set("couchdb", "file_compression", "deflate_9", false),
-    {
-        "Use deflate compression at level 9",
-        [
-            {"compact database",
-             {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_db(DbName))}},
-            {"compact view",
-             {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_view(DbName))}}
-        ]
-    }.
+should_compare_compression_methods({_, DbName}) ->
+    TestDb = setup_db(DbName),
+    Name = "none > snappy > deflate_1 > deflate_9",
+    try
+        {Name, {timeout, ?TIMEOUT, ?_test(compare_methods(TestDb))}}
+    after
+        couch_server:delete(TestDb, [?ADMIN_CTX])
+    end.
 
-should_use_snappy(DbName) ->
-    config:set("couchdb", "file_compression", "snappy", false),
-    {
-        "Use snappy compression",
-        [
-            {"compact database",
-             {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_db(DbName))}},
-            {"compact view",
-             {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_view(DbName))}}
-        ]
-    }.
 
-should_compare_compression_methods(DbName) ->
-    {"none > snappy > deflate_1 > deflate_9",
-     {timeout, ?TIMEOUT div 1000, ?_test(compare_compression_methods(DbName))}}.
+run_test(DbName, Comp) ->
+    config:set("couchdb", "file_compression", Comp, false),
+    Timeout = 5 + ?TIMEOUT,
+    TestDb = setup_db(DbName),
+    Tests = [
+        {"compact database", {timeout, Timeout, ?_test(compact_db(DbName))}},
+        {"compact view", {timeout, Timeout, ?_test(compact_view(DbName))}}
+    ],
+    try
+        {"Use compression: " ++ Comp, Tests}
+    after
+        ok = couch_server:delete(TestDb, [?ADMIN_CTX])
+    end.
+
 
-compare_compression_methods(DbName) ->
+compare_methods(DbName) ->
     config:set("couchdb", "file_compression", "none", false),
     ExternalSizePreCompact = db_external_size(DbName),
     compact_db(DbName),
@@ -178,6 +155,19 @@ populate_db(Db, NumDocs) ->
     {ok, _} = couch_db:update_docs(Db, Docs, []),
     populate_db(Db, NumDocs - 500).
 
+
+setup_db(SrcDbName) ->
+    TgtDbName = ?tempdb(),
+    TgtDbFileName = binary_to_list(TgtDbName) ++ ".couch",
+    couch_util:with_db(SrcDbName, fun(Db) ->
+        OldPath = couch_db:get_filepath(Db),
+        NewPath = filename:join(filename:dirname(OldPath), TgtDbFileName),
+        {ok, _} = file:copy(OldPath, NewPath)
+    end),
+    refresh_index(TgtDbName),
+    TgtDbName.
+
+
 refresh_index(DbName) ->
     {ok, Db} = couch_db:open_int(DbName, []),
     {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),


[couchdb] 39/41: Speedup eunit: mem3_shards

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 3b60bd9ef2b0a110503aaba4efcb1b784ad6c81b
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:42:34 2019 -0600

    Speedup eunit: mem3_shards
---
 src/mem3/src/mem3_shards.erl | 54 +++++++++++++++++++++++++++++---------------
 1 file changed, 36 insertions(+), 18 deletions(-)

diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl
index dfa40c3..110e227 100644
--- a/src/mem3/src/mem3_shards.erl
+++ b/src/mem3/src/mem3_shards.erl
@@ -525,25 +525,30 @@ filter_shards_by_range(Range, Shards)->
 
 mem3_shards_test_() ->
     {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            t_maybe_spawn_shard_writer_already_exists(),
-            t_maybe_spawn_shard_writer_new(),
-            t_flush_writer_exists_normal(),
-            t_flush_writer_times_out(),
-            t_flush_writer_crashes(),
-            t_writer_deletes_itself_when_done(),
-            t_writer_does_not_delete_other_writers_for_same_shard(),
-            t_spawn_writer_in_load_shards_from_db(),
-            t_cache_insert_takes_new_update(),
-            t_cache_insert_ignores_stale_update_and_kills_worker()
-        ]
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        {
+            foreach,
+            fun setup/0,
+            fun teardown/1,
+            [
+                t_maybe_spawn_shard_writer_already_exists(),
+                t_maybe_spawn_shard_writer_new(),
+                t_flush_writer_exists_normal(),
+                t_flush_writer_times_out(),
+                t_flush_writer_crashes(),
+                t_writer_deletes_itself_when_done(),
+                t_writer_does_not_delete_other_writers_for_same_shard(),
+                t_spawn_writer_in_load_shards_from_db(),
+                t_cache_insert_takes_new_update(),
+                t_cache_insert_ignores_stale_update_and_kills_worker()
+            ]
+        }
     }.
 
 
-setup() ->
+setup_all() ->
     ets:new(?SHARDS, [bag, public, named_table, {keypos, #shard.dbname}]),
     ets:new(?OPENERS, [bag, public, named_table]),
     ets:new(?DBS, [set, public, named_table]),
@@ -552,7 +557,7 @@ setup() ->
     ok.
 
 
-teardown(_) ->
+teardown_all(_) ->
     meck:unload(),
     ets:delete(?ATIMES),
     ets:delete(?DBS),
@@ -560,6 +565,17 @@ teardown(_) ->
     ets:delete(?SHARDS).
 
 
+setup() ->
+    ets:delete_all_objects(?ATIMES),
+    ets:delete_all_objects(?DBS),
+    ets:delete_all_objects(?OPENERS),
+    ets:delete_all_objects(?SHARDS).
+
+
+teardown(_) ->
+    ok.
+
+
 t_maybe_spawn_shard_writer_already_exists() ->
     ?_test(begin
         ets:insert(?OPENERS, {?DB, self()}),
@@ -653,7 +669,9 @@ t_spawn_writer_in_load_shards_from_db() ->
         ?assertMatch({cache_insert, ?DB, Pid, 1} when is_pid(Pid), Cast),
         {cache_insert, _, WPid, _} = Cast,
         exit(WPid, kill),
-        ?assertEqual([{?DB, WPid}], ets:tab2list(?OPENERS))
+        ?assertEqual([{?DB, WPid}], ets:tab2list(?OPENERS)),
+        meck:unload(couch_db),
+        meck:unload(mem3_util)
     end).
 
 


[couchdb] 10/41: Speedup eunit: chttpd_db_bulk_get_test

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit a70445626b0e844e13db99ba924f5b803db9f663
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Dec 19 13:20:22 2019 -0600

    Speedup eunit: chttpd_db_bulk_get_test
    
    Another example of moving mocks to a setup and using `meck:reset/1` in
    the foreach fixture.
---
 src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl | 64 ++++++++++++-----------
 1 file changed, 34 insertions(+), 30 deletions(-)

diff --git a/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl b/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl
index 864e707..1a34112 100644
--- a/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl
+++ b/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl
@@ -18,45 +18,51 @@
 -define(TIMEOUT, 3000).
 
 
-setup() ->
+setup_all() ->
     mock(config),
     mock(chttpd),
     mock(couch_epi),
     mock(couch_httpd),
     mock(couch_stats),
     mock(fabric),
-    mock(mochireq),
-    Pid = spawn_accumulator(),
-    Pid.
+    mock(mochireq).
+
+
+teardown_all(_) ->
+    meck:unload().
+
+
+setup() ->
+    spawn_accumulator().
 
 
 teardown(Pid) ->
-    ok = stop_accumulator(Pid),
-    meck:unload(config),
-    meck:unload(chttpd),
-    meck:unload(couch_epi),
-    meck:unload(couch_httpd),
-    meck:unload(couch_stats),
-    meck:unload(fabric),
-    meck:unload(mochireq).
+    ok = stop_accumulator(Pid).
 
 
 bulk_get_test_() ->
     {
         "/db/_bulk_get tests",
         {
-            foreach, fun setup/0, fun teardown/1,
-            [
-                fun should_require_docs_field/1,
-                fun should_not_accept_specific_query_params/1,
-                fun should_return_empty_results_on_no_docs/1,
-                fun should_get_doc_with_all_revs/1,
-                fun should_validate_doc_with_bad_id/1,
-                fun should_validate_doc_with_bad_rev/1,
-                fun should_validate_missing_doc/1,
-                fun should_validate_bad_atts_since/1,
-                fun should_include_attachments_when_atts_since_specified/1
-            ]
+            setup,
+            fun setup_all/0,
+            fun teardown_all/1,
+            {
+                foreach,
+                fun setup/0,
+                fun teardown/1,
+                [
+                    fun should_require_docs_field/1,
+                    fun should_not_accept_specific_query_params/1,
+                    fun should_return_empty_results_on_no_docs/1,
+                    fun should_get_doc_with_all_revs/1,
+                    fun should_validate_doc_with_bad_id/1,
+                    fun should_validate_doc_with_bad_rev/1,
+                    fun should_validate_missing_doc/1,
+                    fun should_validate_bad_atts_since/1,
+                    fun should_include_attachments_when_atts_since_specified/1
+                ]
+            }
         }
     }.
 
@@ -69,12 +75,10 @@ should_require_docs_field(_) ->
 should_not_accept_specific_query_params(_) ->
     Req = fake_request({[{<<"docs">>, []}]}),
     lists:map(fun (Param) ->
-        {Param, ?_assertThrow({bad_request, _},
-                              begin
-                                  ok = meck:expect(chttpd, qs,
-                                                   fun(_) -> [{Param, ""}] end),
-                                  chttpd_db:db_req(Req, nil)
-                              end)}
+        {Param, ?_assertThrow({bad_request, _}, begin
+            BadReq = Req#httpd{qs = [{Param, ""}]},
+            chttpd_db:db_req(BadReq, nil)
+        end)}
     end, ["rev", "open_revs", "atts_since", "w", "new_edits"]).
 
 


[couchdb] 06/41: Speedup eunit: couch_multidb_changes

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit fc3f4a464747518e167f3749b1a4e25369dc0739
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Dec 19 11:50:05 2019 -0600

    Speedup eunit: couch_multidb_changes
    
    This moves all of the mock work to a single setup function and then just
    resets all of the mocked modules before each test run.
---
 src/couch/src/couch_multidb_changes.erl | 131 ++++++++++++++++++--------------
 1 file changed, 75 insertions(+), 56 deletions(-)

diff --git a/src/couch/src/couch_multidb_changes.erl b/src/couch/src/couch_multidb_changes.erl
index ccdc0f9..e2bbda3 100644
--- a/src/couch/src/couch_multidb_changes.erl
+++ b/src/couch/src/couch_multidb_changes.erl
@@ -342,41 +342,46 @@ is_design_doc_id(_) ->
 
 couch_multidb_changes_test_() ->
     {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            t_handle_call_change(),
-            t_handle_call_change_filter_design_docs(),
-            t_handle_call_checkpoint_new(),
-            t_handle_call_checkpoint_existing(),
-            t_handle_info_created(),
-            t_handle_info_deleted(),
-            t_handle_info_updated(),
-            t_handle_info_other_event(),
-            t_handle_info_created_other_db(),
-            t_handle_info_scanner_exit_normal(),
-            t_handle_info_scanner_crashed(),
-            t_handle_info_event_server_exited(),
-            t_handle_info_unknown_pid_exited(),
-            t_handle_info_change_feed_exited(),
-            t_handle_info_change_feed_exited_and_need_rescan(),
-            t_spawn_changes_reader(),
-            t_changes_reader_cb_change(),
-            t_changes_reader_cb_stop(),
-            t_changes_reader_cb_other(),
-            t_handle_call_resume_scan_no_chfeed_no_ets_entry(),
-            t_handle_call_resume_scan_chfeed_no_ets_entry(),
-            t_handle_call_resume_scan_chfeed_ets_entry(),
-            t_handle_call_resume_scan_no_chfeed_ets_entry(),
-            t_start_link(),
-            t_start_link_no_ddocs(),
-            t_misc_gen_server_callbacks()
-        ]
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        {
+            foreach,
+            fun setup/0,
+            fun teardown/1,
+            [
+                t_handle_call_change(),
+                t_handle_call_change_filter_design_docs(),
+                t_handle_call_checkpoint_new(),
+                t_handle_call_checkpoint_existing(),
+                t_handle_info_created(),
+                t_handle_info_deleted(),
+                t_handle_info_updated(),
+                t_handle_info_other_event(),
+                t_handle_info_created_other_db(),
+                t_handle_info_scanner_exit_normal(),
+                t_handle_info_scanner_crashed(),
+                t_handle_info_event_server_exited(),
+                t_handle_info_unknown_pid_exited(),
+                t_handle_info_change_feed_exited(),
+                t_handle_info_change_feed_exited_and_need_rescan(),
+                t_spawn_changes_reader(),
+                t_changes_reader_cb_change(),
+                t_changes_reader_cb_stop(),
+                t_changes_reader_cb_other(),
+                t_handle_call_resume_scan_no_chfeed_no_ets_entry(),
+                t_handle_call_resume_scan_chfeed_no_ets_entry(),
+                t_handle_call_resume_scan_chfeed_ets_entry(),
+                t_handle_call_resume_scan_no_chfeed_ets_entry(),
+                t_start_link(),
+                t_start_link_no_ddocs(),
+                t_misc_gen_server_callbacks()
+            ]
+        }
     }.
 
 
-setup() ->
+setup_all() ->
     mock_logs(),
     mock_callback_mod(),
     meck:expect(couch_event, register_all, 1, ok),
@@ -397,12 +402,26 @@ setup() ->
     EvtPid.
 
 
-teardown(EvtPid) ->
+teardown_all(EvtPid) ->
     unlink(EvtPid),
     exit(EvtPid, kill),
     meck:unload().
 
 
+setup() ->
+    meck:reset([
+        ?MOD,
+        couch_changes,
+        couch_db,
+        couch_event,
+        couch_log
+    ]).
+
+
+teardown(_) ->
+    ok.
+
+
 t_handle_call_change() ->
     ?_test(begin
         State = mock_state(),
@@ -728,38 +747,41 @@ t_misc_gen_server_callbacks() ->
 
 scan_dbs_test_() ->
 {
-    foreach,
-    fun() -> test_util:start_couch([mem3, fabric]) end,
-    fun(Ctx) -> test_util:stop_couch(Ctx) end,
-    [
-        t_find_shard(),
-        t_shard_not_found(),
-        t_pass_local(),
-        t_fail_local()
-    ]
+    setup,
+    fun() ->
+        Ctx = test_util:start_couch([mem3, fabric]),
+        GlobalDb = ?tempdb(),
+        ok = fabric:create_db(GlobalDb, [?CTX]),
+        #shard{name = LocalDb} = hd(mem3:local_shards(GlobalDb)),
+        {Ctx, GlobalDb, LocalDb}
+    end,
+    fun({Ctx, GlobalDb, _LocalDb}) ->
+        fabric:delete_db(GlobalDb, [?CTX]),
+        test_util:stop_couch(Ctx)
+    end,
+    {with, [
+        fun t_find_shard/1,
+        fun t_shard_not_found/1,
+        fun t_pass_local/1,
+        fun t_fail_local/1
+    ]}
 }.
 
 
-t_find_shard() ->
+t_find_shard({_, DbName, _}) ->
     ?_test(begin
-        DbName = ?tempdb(),
-        ok = fabric:create_db(DbName, [?CTX]),
-        ?assertEqual(2, length(local_shards(DbName))),
-        fabric:delete_db(DbName, [?CTX])
+        ?assertEqual(2, length(local_shards(DbName)))
     end).
 
 
-t_shard_not_found() ->
+t_shard_not_found(_) ->
     ?_test(begin
         ?assertEqual([], local_shards(?tempdb()))
     end).
 
 
-t_pass_local() ->
+t_pass_local({_, _, LocalDb}) ->
     ?_test(begin
-        LocalDb = ?tempdb(),
-        {ok, Db} = couch_db:create(LocalDb, [?CTX]),
-        ok = couch_db:close(Db),
         scan_local_db(self(), LocalDb),
         receive
             {'$gen_cast', Msg} ->
@@ -770,11 +792,8 @@ t_pass_local() ->
     end).
 
 
-t_fail_local() ->
+t_fail_local({_, _, LocalDb}) ->
     ?_test(begin
-        LocalDb = ?tempdb(),
-        {ok, Db} = couch_db:create(LocalDb, [?CTX]),
-        ok = couch_db:close(Db),
         scan_local_db(self(), <<"some_other_db">>),
         receive
             {'$gen_cast', Msg} ->


[couchdb] 22/41: Speedup eunit: couch_uuids_tests

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 287d8bfe3573f7ada0c042895b3788470c2e866e
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:34:47 2019 -0600

    Speedup eunit: couch_uuids_tests
    
    Avoid `test_util:start/3` as it wastes time mocking modules for each
    test.
---
 src/couch/test/eunit/couch_uuids_tests.erl | 166 ++++++++++++-----------------
 1 file changed, 68 insertions(+), 98 deletions(-)

diff --git a/src/couch/test/eunit/couch_uuids_tests.erl b/src/couch/test/eunit/couch_uuids_tests.erl
index a836ecc..9ca2c8a 100644
--- a/src/couch/test/eunit/couch_uuids_tests.erl
+++ b/src/couch/test/eunit/couch_uuids_tests.erl
@@ -14,113 +14,79 @@
 
 -include_lib("couch/include/couch_eunit.hrl").
 
--define(TIMEOUT_S, 20).
+-define(TIMEOUT, 20).
 
 
-setup() ->
-    Ctx = test_util:start(?MODULE, [], [{dont_mock, [config]}]),
-    couch_uuids:start(),
-    Ctx.
+setup_all() ->
+    test_util:start_applications([config]),
+    couch_uuids:start().
 
-setup(Opts) ->
-    Pid = setup(),
-    lists:foreach(
-        fun({Option, Value}) ->
-            config:set("uuids", Option, Value, false)
-        end, Opts),
-    Pid.
 
-teardown(Ctx) ->
+teardown_all(_) ->
     couch_uuids:stop(),
-    test_util:stop(Ctx).
+    test_util:stop_applications([config]).
 
-teardown(_, Ctx) ->
-    teardown(Ctx).
 
-
-default_test_() ->
+uuids_test_() ->
     {
-        "Default UUID algorithm",
-        {
-            setup,
-            fun setup/0, fun teardown/1,
-            fun should_be_unique/1
-        }
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        [
+            {timeout, ?TIMEOUT, fun default_algorithm/0},
+            {timeout, ?TIMEOUT, fun sequential_algorithm/0},
+            {timeout, ?TIMEOUT, fun utc_algorithm/0},
+            {timeout, ?TIMEOUT, fun utc_id_suffix_algorithm/0}
+        ]
     }.
 
-sequential_test_() ->
-    Opts = [{"algorithm", "sequential"}],
-    Cases = [
-        fun should_be_unique/2,
-        fun should_increment_monotonically/2,
-        fun should_rollover/2
-    ],
-    {
-        "UUID algorithm: sequential",
-        {
-            foreachx,
-            fun setup/1, fun teardown/2,
-            [{Opts, Fun} || Fun <- Cases]
-        }
-    }.
 
-utc_test_() ->
-    Opts = [{"algorithm", "utc_random"}],
-    Cases = [
-        fun should_be_unique/2,
-        fun should_increment_monotonically/2
-    ],
-    {
-        "UUID algorithm: utc_random",
-        {
-            foreachx,
-            fun setup/1, fun teardown/2,
-            [{Opts, Fun} || Fun <- Cases]
-        }
-    }.
+default_algorithm() ->
+    config:delete("uuids", "algorithm", false),
+    check_unique().
+
+
+sequential_algorithm() ->
+    config:set("uuids", "algorithm", "sequential", false),
+    check_unique(),
+    check_increment_monotonically(),
+    check_rollover().
 
-utc_id_suffix_test_() ->
-    Opts = [{"algorithm", "utc_id"}, {"utc_id_suffix", "bozo"}],
-    Cases = [
-        fun should_be_unique/2,
-        fun should_increment_monotonically/2,
-        fun should_preserve_suffix/2
-    ],
-    {
-        "UUID algorithm: utc_id",
-        {
-            foreachx,
-            fun setup/1, fun teardown/2,
-            [{Opts, Fun} || Fun <- Cases]
-        }
-    }.
 
+utc_algorithm() ->
+    config:set("uuids", "algorithm", "utc_random", false),
+    check_unique(),
+    check_increment_monotonically().
 
-should_be_unique() ->
+
+utc_id_suffix_algorithm() ->
+    config:set("uuids", "algorithm", "utc_id", false),
+    config:set("uuids", "utc_id_suffix", "bozo", false),
+    check_unique(),
+    check_increment_monotonically(),
+    check_preserve_suffix().
+
+
+check_unique() ->
     %% this one may really runs for too long on slow hosts
-    {timeout, ?TIMEOUT_S, ?_assert(test_unique(10000, [couch_uuids:new()]))}.
-should_be_unique(_) ->
-    should_be_unique().
-should_be_unique(_, _) ->
-    should_be_unique().
-
-should_increment_monotonically(_, _) ->
-    ?_assert(couch_uuids:new() < couch_uuids:new()).
-
-should_rollover(_, _) ->
-    ?_test(begin
-        UUID = binary_to_list(couch_uuids:new()),
-        Prefix = element(1, lists:split(26, UUID)),
-        N = gen_until_pref_change(Prefix, 0),
-        ?assert(N >= 5000 andalso N =< 11000)
-    end).
-
-should_preserve_suffix(_, _) ->
-    ?_test(begin
-        UUID = binary_to_list(couch_uuids:new()),
-        Suffix = get_suffix(UUID),
-        ?assert(test_same_suffix(10000, Suffix))
-    end).
+    ?assert(test_unique(10000, [couch_uuids:new()])).
+
+
+check_increment_monotonically() ->
+    ?assert(couch_uuids:new() < couch_uuids:new()).
+
+
+check_rollover() ->
+    UUID = binary_to_list(couch_uuids:new()),
+    Prefix = element(1, lists:split(26, UUID)),
+    N = gen_until_pref_change(Prefix, 0),
+    ?assert(N >= 5000 andalso N =< 11000).
+
+
+check_preserve_suffix() ->
+    UUID = binary_to_list(couch_uuids:new()),
+    Suffix = get_suffix(UUID),
+    ?assert(test_same_suffix(10000, Suffix)).
 
 
 test_unique(0, _) ->
@@ -130,8 +96,6 @@ test_unique(N, UUIDs) ->
     ?assertNot(lists:member(UUID, UUIDs)),
     test_unique(N - 1, [UUID| UUIDs]).
 
-get_prefix(UUID) ->
-    element(1, lists:split(26, binary_to_list(UUID))).
 
 gen_until_pref_change(_, Count) when Count > 8251 ->
     Count;
@@ -141,10 +105,6 @@ gen_until_pref_change(Prefix, N) ->
         _ -> N
     end.
 
-get_suffix(UUID) when is_binary(UUID) ->
-    get_suffix(binary_to_list(UUID));
-get_suffix(UUID) ->
-    element(2, lists:split(14, UUID)).
 
 test_same_suffix(0, _) ->
     true;
@@ -153,3 +113,13 @@ test_same_suffix(N, Suffix) ->
         Suffix -> test_same_suffix(N - 1, Suffix);
         _ -> false
     end.
+
+
+get_prefix(UUID) ->
+    element(1, lists:split(26, binary_to_list(UUID))).
+
+
+get_suffix(UUID) when is_binary(UUID) ->
+    get_suffix(binary_to_list(UUID));
+get_suffix(UUID) ->
+    element(2, lists:split(14, UUID)).


[couchdb] 17/41: Speedup eunit: couch_db

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 5a1edf362df62cda96b12767b8d4fdf926b83cc2
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:31:21 2019 -0600

    Speedup eunit: couch_db
---
 src/couch/src/couch_db.erl | 70 ++++++++++++++++++++++++++++++++++------------
 1 file changed, 52 insertions(+), 18 deletions(-)

diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
index 1e09b9e..e1d726d 100644
--- a/src/couch/src/couch_db.erl
+++ b/src/couch/src/couch_db.erl
@@ -1870,13 +1870,19 @@ set_design_doc_end_key(Options, rev) ->
 -ifdef(TEST).
 -include_lib("eunit/include/eunit.hrl").
 
-setup() ->
+setup_all() ->
     ok = meck:new(couch_epi, [passthrough]),
     ok = meck:expect(couch_epi, decide, fun(_, _, _, _, _) -> no_decision end),
     ok.
 
+teardown_all(_) ->
+    meck:unload().
+
+setup() ->
+    meck:reset([couch_epi]).
+
 teardown(_) ->
-    (catch meck:unload(couch_epi)).
+    ok.
 
 validate_dbname_success_test_() ->
     Cases =
@@ -1886,8 +1892,15 @@ validate_dbname_success_test_() ->
             [generate_cases_with_shards(?b2l(SystemDb))
                 || SystemDb <- ?SYSTEM_DATABASES]),
     {
-        foreach, fun setup/0, fun teardown/1,
-        [should_pass_validate_dbname(A) || {_, A} <- Cases]
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        {
+            foreach,
+            fun setup/0,
+            fun teardown/1,
+            [should_pass_validate_dbname(A) || {_, A} <- Cases]
+        }
     }.
 
 validate_dbname_fail_test_() ->
@@ -1898,8 +1911,15 @@ validate_dbname_fail_test_() ->
        ++ generate_cases("!abcdefg/werwej/_users")
        ++ generate_cases_with_shards("!abcdefg/werwej/_users"),
     {
-        foreach, fun setup/0, fun teardown/1,
-        [should_fail_validate_dbname(A) || {_, A} <- Cases]
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        {
+            foreach,
+            fun setup/0,
+            fun teardown/1,
+            [should_fail_validate_dbname(A) || {_, A} <- Cases]
+        }
     }.
 
 normalize_dbname_test_() ->
@@ -1941,19 +1961,24 @@ should_fail_validate_dbname(DbName) ->
 
 calculate_start_seq_test_() ->
     {
-        foreach,
-        fun setup_start_seq/0,
-        fun teardown_start_seq/1,
-        [
-            t_calculate_start_seq_uuid_mismatch(),
-            t_calculate_start_seq_is_owner(),
-            t_calculate_start_seq_not_owner(),
-            t_calculate_start_seq_raw(),
-            t_calculate_start_seq_epoch_mismatch()
-        ]
+        setup,
+        fun setup_start_seq_all/0,
+        fun teardown_start_seq_all/1,
+        {
+            foreach,
+            fun setup_start_seq/0,
+            fun teardown_start_seq/1,
+            [
+                t_calculate_start_seq_uuid_mismatch(),
+                t_calculate_start_seq_is_owner(),
+                t_calculate_start_seq_not_owner(),
+                t_calculate_start_seq_raw(),
+                t_calculate_start_seq_epoch_mismatch()
+            ]
+        }
     }.
 
-setup_start_seq() ->
+setup_start_seq_all() ->
     meck:new(couch_db_engine, [passthrough]),
     meck:expect(couch_db_engine, get_uuid, fun(_) -> <<"foo">> end),
     ok = meck:expect(couch_log, warning, 2, ok),
@@ -1963,9 +1988,18 @@ setup_start_seq() ->
     ],
     meck:expect(couch_db_engine, get_epochs, fun(_) -> Epochs end).
 
-teardown_start_seq(_) ->
+teardown_start_seq_all(_) ->
     meck:unload().
 
+setup_start_seq() ->
+    meck:reset([
+        couch_db_engine,
+        couch_log
+    ]).
+
+teardown_start_seq(_) ->
+    ok.
+
 t_calculate_start_seq_uuid_mismatch() ->
     ?_test(begin
         Db = test_util:fake_db([]),


[couchdb] 20/41: Speedup eunit: couch_file_tests

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 226a8cd8080e9064eded64f739a3e650d6a94a89
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:32:02 2019 -0600

    Speedup eunit: couch_file_tests
---
 src/couch/test/eunit/couch_file_tests.erl | 166 +++++++++++++++++-------------
 1 file changed, 92 insertions(+), 74 deletions(-)

diff --git a/src/couch/test/eunit/couch_file_tests.erl b/src/couch/test/eunit/couch_file_tests.erl
index e9806c0..606f4bb 100644
--- a/src/couch/test/eunit/couch_file_tests.erl
+++ b/src/couch/test/eunit/couch_file_tests.erl
@@ -327,45 +327,53 @@ delete_test_() ->
     {
         "File delete tests",
         {
-            foreach,
+            setup,
             fun() ->
-                meck:new(config, [passthrough]),
-                File = ?tempfile() ++ ".couch",
-                RootDir = filename:dirname(File),
-                ok = couch_file:init_delete_dir(RootDir),
-                ok = file:write_file(File, <<>>),
-                {RootDir, File}
+                meck:new(config, [passthrough])
             end,
-            fun({_, File}) ->
-                meck:unload(config),
-                file:delete(File)
+            fun(_) ->
+                meck:unload()
             end,
-            [
-                fun(Cfg) ->
-                    {"enable_database_recovery = false, context = delete",
-                    make_enable_recovery_test_case(Cfg, false, delete)}
-                end,
-                fun(Cfg) ->
-                    {"enable_database_recovery = true, context = delete",
-                    make_enable_recovery_test_case(Cfg, true, delete)}
+            {
+                foreach,
+                fun() ->
+                    meck:reset([config]),
+                    File = ?tempfile() ++ ".couch",
+                    RootDir = filename:dirname(File),
+                    ok = couch_file:init_delete_dir(RootDir),
+                    ok = file:write_file(File, <<>>),
+                    {RootDir, File}
                 end,
-                fun(Cfg) ->
-                    {"enable_database_recovery = false, context = compaction",
-                    make_enable_recovery_test_case(Cfg, false, compaction)}
+                fun({_, File}) ->
+                    file:delete(File)
                 end,
-                fun(Cfg) ->
-                    {"enable_database_recovery = true, context = compaction",
-                    make_enable_recovery_test_case(Cfg, true, compaction)}
-                end,
-                fun(Cfg) ->
-                    {"delete_after_rename = true",
-                    make_delete_after_rename_test_case(Cfg, true)}
-                end,
-                fun(Cfg) ->
-                    {"delete_after_rename = false",
-                    make_delete_after_rename_test_case(Cfg, false)}
-                end
-            ]
+                [
+                    fun(Cfg) ->
+                        {"enable_database_recovery = false, context = delete",
+                        make_enable_recovery_test_case(Cfg, false, delete)}
+                    end,
+                    fun(Cfg) ->
+                        {"enable_database_recovery = true, context = delete",
+                        make_enable_recovery_test_case(Cfg, true, delete)}
+                    end,
+                    fun(Cfg) ->
+                        {"enable_database_recovery = false, context = compaction",
+                        make_enable_recovery_test_case(Cfg, false, compaction)}
+                    end,
+                    fun(Cfg) ->
+                        {"enable_database_recovery = true, context = compaction",
+                        make_enable_recovery_test_case(Cfg, true, compaction)}
+                    end,
+                    fun(Cfg) ->
+                        {"delete_after_rename = true",
+                        make_delete_after_rename_test_case(Cfg, true)}
+                    end,
+                    fun(Cfg) ->
+                        {"delete_after_rename = false",
+                        make_delete_after_rename_test_case(Cfg, false)}
+                    end
+                ]
+            }
         }
     }.
 
@@ -412,49 +420,57 @@ nuke_dir_test_() ->
     {
         "Nuke directory tests",
         {
-            foreach,
+            setup,
             fun() ->
-                meck:new(config, [passthrough]),
-                File0 = ?tempfile() ++ ".couch",
-                RootDir = filename:dirname(File0),
-                BaseName = filename:basename(File0),
-                Seed = couch_rand:uniform(8999999999) + 999999999,
-                DDocDir = io_lib:format("db.~b_design", [Seed]),
-                ViewDir = filename:join([RootDir, DDocDir]),
-                file:make_dir(ViewDir),
-                File = filename:join([ViewDir, BaseName]),
-                file:rename(File0, File),
-                ok = couch_file:init_delete_dir(RootDir),
-                ok = file:write_file(File, <<>>),
-                {RootDir, ViewDir}
+                meck:new(config, [passthrough])
             end,
-            fun({RootDir, ViewDir}) ->
-                meck:unload(config),
-                remove_dir(ViewDir),
-                Ext = filename:extension(ViewDir),
-                case filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext) of
-                    [DelDir] -> remove_dir(DelDir);
-                    _ -> ok
-                end
+            fun(_) ->
+                meck:unload()
             end,
-            [
-                fun(Cfg) ->
-                    {"enable_database_recovery = false",
-                    make_rename_dir_test_case(Cfg, false)}
-                end,
-                fun(Cfg) ->
-                    {"enable_database_recovery = true",
-                    make_rename_dir_test_case(Cfg, true)}
+            {
+                foreach,
+                fun() ->
+                    meck:reset([config]),
+                    File0 = ?tempfile() ++ ".couch",
+                    RootDir = filename:dirname(File0),
+                    BaseName = filename:basename(File0),
+                    Seed = couch_rand:uniform(8999999999) + 999999999,
+                    DDocDir = io_lib:format("db.~b_design", [Seed]),
+                    ViewDir = filename:join([RootDir, DDocDir]),
+                    file:make_dir(ViewDir),
+                    File = filename:join([ViewDir, BaseName]),
+                    file:rename(File0, File),
+                    ok = couch_file:init_delete_dir(RootDir),
+                    ok = file:write_file(File, <<>>),
+                    {RootDir, ViewDir}
                 end,
-                fun(Cfg) ->
-                    {"delete_after_rename = true",
-                    make_delete_dir_test_case(Cfg, true)}
+                fun({RootDir, ViewDir}) ->
+                    remove_dir(ViewDir),
+                    Ext = filename:extension(ViewDir),
+                    case filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext) of
+                        [DelDir] -> remove_dir(DelDir);
+                        _ -> ok
+                    end
                 end,
-                fun(Cfg) ->
-                    {"delete_after_rename = false",
-                    make_delete_dir_test_case(Cfg, false)}
-                end
-            ]
+                [
+                    fun(Cfg) ->
+                        {"enable_database_recovery = false",
+                        make_rename_dir_test_case(Cfg, false)}
+                    end,
+                    fun(Cfg) ->
+                        {"enable_database_recovery = true",
+                        make_rename_dir_test_case(Cfg, true)}
+                    end,
+                    fun(Cfg) ->
+                        {"delete_after_rename = true",
+                        make_delete_dir_test_case(Cfg, true)}
+                    end,
+                    fun(Cfg) ->
+                        {"delete_after_rename = false",
+                        make_delete_dir_test_case(Cfg, false)}
+                    end
+                ]
+            }
         }
     }.
 
@@ -462,7 +478,8 @@ nuke_dir_test_() ->
 make_rename_dir_test_case({RootDir, ViewDir}, EnableRecovery) ->
     meck:expect(config, get_boolean, fun
         ("couchdb", "enable_database_recovery", _) -> EnableRecovery;
-        ("couchdb", "delete_after_rename", _) -> true
+        ("couchdb", "delete_after_rename", _) -> true;
+        (_, _, Default) -> Default
     end),
     DirExistsBefore = filelib:is_dir(ViewDir),
     couch_file:nuke_dir(RootDir, ViewDir),
@@ -479,7 +496,8 @@ make_rename_dir_test_case({RootDir, ViewDir}, EnableRecovery) ->
 make_delete_dir_test_case({RootDir, ViewDir}, DeleteAfterRename) ->
     meck:expect(config, get_boolean, fun
         ("couchdb", "enable_database_recovery", _) -> false;
-        ("couchdb", "delete_after_rename", _) -> DeleteAfterRename
+        ("couchdb", "delete_after_rename", _) -> DeleteAfterRename;
+        (_, _, Default) -> Default
     end),
     DirExistsBefore = filelib:is_dir(ViewDir),
     couch_file:nuke_dir(RootDir, ViewDir),


[couchdb] 31/41: Speedup eunit: couch_replicator_scheduler

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 3f614cac4214e784a603d6d4859582aad322bf96
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:39:45 2019 -0600

    Speedup eunit: couch_replicator_scheduler
---
 .../src/couch_replicator_scheduler.erl             | 93 +++++++++++++---------
 1 file changed, 55 insertions(+), 38 deletions(-)

diff --git a/src/couch_replicator/src/couch_replicator_scheduler.erl b/src/couch_replicator/src/couch_replicator_scheduler.erl
index dde2141..f84860c 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler.erl
@@ -1037,42 +1037,47 @@ longest_running_test() ->
 
 scheduler_test_() ->
     {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            t_pending_jobs_simple(),
-            t_pending_jobs_skip_crashed(),
-            t_one_job_starts(),
-            t_no_jobs_start_if_max_is_0(),
-            t_one_job_starts_if_max_is_1(),
-            t_max_churn_does_not_throttle_initial_start(),
-            t_excess_oneshot_only_jobs(),
-            t_excess_continuous_only_jobs(),
-            t_excess_prefer_continuous_first(),
-            t_stop_oldest_first(),
-            t_start_oldest_first(),
-            t_jobs_churn_even_if_not_all_max_jobs_are_running(),
-            t_jobs_dont_churn_if_there_are_available_running_slots(),
-            t_start_only_pending_jobs_do_not_churn_existing_ones(),
-            t_dont_stop_if_nothing_pending(),
-            t_max_churn_limits_number_of_rotated_jobs(),
-            t_existing_jobs(),
-            t_if_pending_less_than_running_start_all_pending(),
-            t_running_less_than_pending_swap_all_running(),
-            t_oneshot_dont_get_rotated(),
-            t_rotate_continuous_only_if_mixed(),
-            t_oneshot_dont_get_starting_priority(),
-            t_oneshot_will_hog_the_scheduler(),
-            t_if_excess_is_trimmed_rotation_still_happens(),
-            t_if_transient_job_crashes_it_gets_removed(),
-            t_if_permanent_job_crashes_it_stays_in_ets(),
-            t_job_summary_running(),
-            t_job_summary_pending(),
-            t_job_summary_crashing_once(),
-            t_job_summary_crashing_many_times(),
-            t_job_summary_proxy_fields()
-         ]
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        {
+            foreach,
+            fun setup/0,
+            fun teardown/1,
+            [
+                t_pending_jobs_simple(),
+                t_pending_jobs_skip_crashed(),
+                t_one_job_starts(),
+                t_no_jobs_start_if_max_is_0(),
+                t_one_job_starts_if_max_is_1(),
+                t_max_churn_does_not_throttle_initial_start(),
+                t_excess_oneshot_only_jobs(),
+                t_excess_continuous_only_jobs(),
+                t_excess_prefer_continuous_first(),
+                t_stop_oldest_first(),
+                t_start_oldest_first(),
+                t_jobs_churn_even_if_not_all_max_jobs_are_running(),
+                t_jobs_dont_churn_if_there_are_available_running_slots(),
+                t_start_only_pending_jobs_do_not_churn_existing_ones(),
+                t_dont_stop_if_nothing_pending(),
+                t_max_churn_limits_number_of_rotated_jobs(),
+                t_existing_jobs(),
+                t_if_pending_less_than_running_start_all_pending(),
+                t_running_less_than_pending_swap_all_running(),
+                t_oneshot_dont_get_rotated(),
+                t_rotate_continuous_only_if_mixed(),
+                t_oneshot_dont_get_starting_priority(),
+                t_oneshot_will_hog_the_scheduler(),
+                t_if_excess_is_trimmed_rotation_still_happens(),
+                t_if_transient_job_crashes_it_gets_removed(),
+                t_if_permanent_job_crashes_it_stays_in_ets(),
+                t_job_summary_running(),
+                t_job_summary_pending(),
+                t_job_summary_crashing_once(),
+                t_job_summary_crashing_many_times(),
+                t_job_summary_proxy_fields()
+            ]
+        }
     }.
 
 
@@ -1521,7 +1526,7 @@ t_job_summary_proxy_fields() ->
 
 % Test helper functions
 
-setup() ->
+setup_all() ->
     catch ets:delete(?MODULE),
     meck:expect(couch_log, notice, 2, ok),
     meck:expect(couch_log, warning, 2, ok),
@@ -1533,11 +1538,23 @@ setup() ->
     meck:expect(couch_replicator_scheduler_sup, start_child, 1, {ok, Pid}).
 
 
-teardown(_) ->
+teardown_all(_) ->
     catch ets:delete(?MODULE),
     meck:unload().
 
 
+setup() ->
+    meck:reset([
+        couch_log,
+        couch_replicator_scheduler_sup,
+        couch_stats
+    ]).
+
+
+teardown(_) ->
+    ok.
+
+
 setup_jobs(Jobs) when is_list(Jobs) ->
     ?MODULE = ets:new(?MODULE, [named_table, {keypos, #job.id}]),
     ets:insert(?MODULE, Jobs).


[couchdb] 28/41: Speedup eunit: couch_replicator

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit dc37489da7ee3976f8f8bb37e886caf5394a2486
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:38:15 2019 -0600

    Speedup eunit: couch_replicator
---
 src/couch_replicator/src/couch_replicator.erl | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl
index 510b878..b38f31b 100644
--- a/src/couch_replicator/src/couch_replicator.erl
+++ b/src/couch_replicator/src/couch_replicator.erl
@@ -346,11 +346,13 @@ expect_rep_user_ctx(Name, Role) ->
 
 strip_url_creds_test_() ->
      {
-        foreach,
-        fun () -> meck:expect(config, get,
-            fun(_, _, Default) -> Default end)
+        setup,
+        fun() ->
+            meck:expect(config, get, fun(_, _, Default) -> Default end)
+        end,
+        fun(_) ->
+            meck:unload()
         end,
-        fun (_) -> meck:unload() end,
         [
             t_strip_http_basic_creds(),
             t_strip_http_props_creds(),


[couchdb] 11/41: Speedup eunit: couch_replicator_doc_processor

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 3795271457294d98f6b2a8293c881767ee5ee9ea
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Dec 19 13:31:20 2019 -0600

    Speedup eunit: couch_replicator_doc_processor
    
    Another example of moving mocks around.
---
 .../src/couch_replicator_doc_processor.erl         | 68 ++++++++++++++--------
 1 file changed, 45 insertions(+), 23 deletions(-)

diff --git a/src/couch_replicator/src/couch_replicator_doc_processor.erl b/src/couch_replicator/src/couch_replicator_doc_processor.erl
index 29170ed..6778d53 100644
--- a/src/couch_replicator/src/couch_replicator_doc_processor.erl
+++ b/src/couch_replicator/src/couch_replicator_doc_processor.erl
@@ -614,25 +614,30 @@ cluster_membership_foldl(#rdoc{id = {DbName, DocId} = Id, rid = RepId}, nil) ->
 
 doc_processor_test_() ->
     {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            t_bad_change(),
-            t_regular_change(),
-            t_change_with_doc_processor_crash(),
-            t_change_with_existing_job(),
-            t_deleted_change(),
-            t_triggered_change(),
-            t_completed_change(),
-            t_active_replication_completed(),
-            t_error_change(),
-            t_failed_change(),
-            t_change_for_different_node(),
-            t_change_when_cluster_unstable(),
-            t_ejson_docs(),
-            t_cluster_membership_foldl()
-        ]
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        {
+            foreach,
+            fun setup/0,
+            fun teardown/1,
+            [
+                t_bad_change(),
+                t_regular_change(),
+                t_change_with_doc_processor_crash(),
+                t_change_with_existing_job(),
+                t_deleted_change(),
+                t_triggered_change(),
+                t_completed_change(),
+                t_active_replication_completed(),
+                t_error_change(),
+                t_failed_change(),
+                t_change_for_different_node(),
+                t_change_when_cluster_unstable(),
+                t_ejson_docs(),
+                t_cluster_membership_foldl()
+            ]
+        }
     }.
 
 
@@ -829,7 +834,7 @@ get_worker_ref_test_() ->
 % Test helper functions
 
 
-setup() ->
+setup_all() ->
     meck:expect(couch_log, info, 2, ok),
     meck:expect(couch_log, notice, 2, ok),
     meck:expect(couch_log, warning, 2, ok),
@@ -845,15 +850,32 @@ setup() ->
     end),
     meck:expect(couch_replicator_scheduler, remove_job, 1, ok),
     meck:expect(couch_replicator_docs, remove_state_fields, 2, ok),
-    meck:expect(couch_replicator_docs, update_failed, 3, ok),
+    meck:expect(couch_replicator_docs, update_failed, 3, ok).
+
+
+teardown_all(_) ->
+    meck:unload().
+
+
+setup() ->
+    meck:reset([
+        config,
+        couch_log,
+        couch_replicator_clustering,
+        couch_replicator_doc_processor_worker,
+        couch_replicator_docs,
+        couch_replicator_scheduler
+    ]),
+    % Set this expectation back to the default for
+    % each test since some tests change it
+    meck:expect(couch_replicator_clustering, owner, 2, node()),
     {ok, Pid} = start_link(),
     unlink(Pid),
     Pid.
 
 
 teardown(Pid) ->
-    exit(Pid, kill),
-    meck:unload().
+    exit(Pid, kill).
 
 
 removed_state_fields() ->


[couchdb] 29/41: Speedup eunit: couch_replicator_auth_session

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 774d84e85082dfc5bd5f4bb06265e70a1723e5f4
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:38:36 2019 -0600

    Speedup eunit: couch_replicator_auth_session
---
 .../src/couch_replicator_auth_session.erl          | 57 ++++++++++++++--------
 1 file changed, 37 insertions(+), 20 deletions(-)

diff --git a/src/couch_replicator/src/couch_replicator_auth_session.erl b/src/couch_replicator/src/couch_replicator_auth_session.erl
index 51efd2a..30f499a 100644
--- a/src/couch_replicator/src/couch_replicator_auth_session.erl
+++ b/src/couch_replicator/src/couch_replicator_auth_session.erl
@@ -631,24 +631,29 @@ extract_creds_success_test_() ->
 
 cookie_update_test_() ->
     {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            t_do_refresh_without_max_age(),
-            t_do_refresh_with_max_age(),
-            t_dont_refresh(),
-            t_process_auth_failure(),
-            t_process_auth_failure_stale_epoch(),
-            t_process_auth_failure_too_frequent(),
-            t_process_ok_update_cookie(),
-            t_process_ok_no_cookie(),
-            t_init_state_fails_on_401(),
-            t_init_state_401_with_require_valid_user(),
-            t_init_state_404(),
-            t_init_state_no_creds(),
-            t_init_state_http_error()
-        ]
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        {
+            foreach,
+            fun setup/0,
+            fun teardown/1,
+            [
+                t_do_refresh_without_max_age(),
+                t_do_refresh_with_max_age(),
+                t_dont_refresh(),
+                t_process_auth_failure(),
+                t_process_auth_failure_stale_epoch(),
+                t_process_auth_failure_too_frequent(),
+                t_process_ok_update_cookie(),
+                t_process_ok_no_cookie(),
+                t_init_state_fails_on_401(),
+                t_init_state_401_with_require_valid_user(),
+                t_init_state_404(),
+                t_init_state_no_creds(),
+                t_init_state_http_error()
+            ]
+        }
     }.
 
 
@@ -774,7 +779,7 @@ t_init_state_http_error() ->
     end).
 
 
-setup() ->
+setup_all() ->
     meck:expect(couch_replicator_httpc_pool, get_worker, 1, {ok, worker}),
     meck:expect(couch_replicator_httpc_pool, release_worker_sync, 2, ok),
     meck:expect(config, get, fun(_, _, Default) -> Default end),
@@ -782,10 +787,22 @@ setup() ->
     ok.
 
 
-teardown(_) ->
+teardown_all(_) ->
     meck:unload().
 
 
+setup() ->
+    meck:reset([
+        config,
+        couch_replicator_httpc_pool,
+        ibrowse
+    ]).
+
+
+teardown(_) ->
+    ok.
+
+
 mock_http_cookie_response(Cookie) ->
     Resp = {ok, "200", [{"Set-Cookie", "AuthSession=" ++ Cookie}], []},
     meck:expect(ibrowse, send_req_direct, 7, Resp).


[couchdb] 33/41: Speedup eunit: fabric_db_create

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 3fba3a44521def2dff17360d048192529847a8e2
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:40:32 2019 -0600

    Speedup eunit: fabric_db_create
---
 src/fabric/src/fabric_db_create.erl | 39 ++++++++++++++++++++++++-------------
 1 file changed, 25 insertions(+), 14 deletions(-)

diff --git a/src/fabric/src/fabric_db_create.erl b/src/fabric/src/fabric_db_create.erl
index 2edc6dc..03fabb4 100644
--- a/src/fabric/src/fabric_db_create.erl
+++ b/src/fabric/src/fabric_db_create.erl
@@ -188,30 +188,41 @@ db_exists(DbName) -> is_list(catch mem3:shards(DbName)).
 -ifdef(TEST).
 -include_lib("eunit/include/eunit.hrl").
 
-db_exists_for_existing_db_test() ->
-    start_meck_(),
+db_exists_test_() ->
+    {
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        [
+            fun db_exists_for_existing_db/0,
+            fun db_exists_for_missing_db/0
+        ]
+    }.
+
+
+setup_all() ->
+    meck:new(mem3).
+
+
+teardown_all(_) ->
+    meck:unload().
+
+
+db_exists_for_existing_db() ->
     Mock = fun(DbName) when is_binary(DbName) ->
         [#shard{dbname = DbName, range = [0,100]}]
     end,
     ok = meck:expect(mem3, shards, Mock),
     ?assertEqual(true, db_exists(<<"foobar">>)),
-    ?assertEqual(true, meck:validate(mem3)),
-    stop_meck_().
+    ?assertEqual(true, meck:validate(mem3)).
+
 
-db_exists_for_missing_db_test() ->
-    start_meck_(),
+db_exists_for_missing_db() ->
     Mock = fun(DbName) ->
         erlang:error(database_does_not_exist, DbName)
     end,
     ok = meck:expect(mem3, shards, Mock),
     ?assertEqual(false, db_exists(<<"foobar">>)),
-    ?assertEqual(false, meck:validate(mem3)),
-    stop_meck_().
-
-start_meck_() ->
-    ok = meck:new(mem3).
-
-stop_meck_() ->
-    ok = meck:unload(mem3).
+    ?assertEqual(false, meck:validate(mem3)).
 
 -endif.


[couchdb] 34/41: Speedup eunit: fabric_doc_open

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit f1d29063f272deefa365729ded7ef56a54056814
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:40:48 2019 -0600

    Speedup eunit: fabric_doc_open
---
 src/fabric/src/fabric_doc_open.erl | 61 ++++++++++++++++++++++++--------------
 1 file changed, 38 insertions(+), 23 deletions(-)

diff --git a/src/fabric/src/fabric_doc_open.erl b/src/fabric/src/fabric_doc_open.erl
index 743ad8c7..8ef604b 100644
--- a/src/fabric/src/fabric_doc_open.erl
+++ b/src/fabric/src/fabric_doc_open.erl
@@ -185,38 +185,53 @@ format_reply(Else, _) ->
 -ifdef(TEST).
 -include_lib("eunit/include/eunit.hrl").
 
+-define(MECK_MODS, [
+    couch_log,
+    couch_stats,
+    fabric,
+    fabric_util,
+    mem3,
+    rexi,
+    rexi_monitor
+]).
+
+
+setup_all() ->
+    meck:new(?MECK_MODS, [passthrough]).
+
+
+teardown_all(_) ->
+    meck:unload().
+
 
 setup() ->
-    meck:new([
-        couch_log,
-        couch_stats,
-        fabric,
-        fabric_util,
-        mem3,
-        rexi,
-        rexi_monitor
-    ], [passthrough]).
+    meck:reset(?MECK_MODS).
 
 
 teardown(_) ->
-    meck:unload().
+    ok.
 
 
 open_doc_test_() ->
     {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            t_is_r_met(),
-            t_handle_message_down(),
-            t_handle_message_exit(),
-            t_handle_message_reply(),
-            t_store_node_revs(),
-            t_read_repair(),
-            t_handle_response_quorum_met(),
-            t_get_doc_info()
-        ]
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        {
+            foreach,
+            fun setup/0,
+            fun teardown/1,
+            [
+                t_is_r_met(),
+                t_handle_message_down(),
+                t_handle_message_exit(),
+                t_handle_message_reply(),
+                t_store_node_revs(),
+                t_read_repair(),
+                t_handle_response_quorum_met(),
+                t_get_doc_info()
+            ]
+        }
     }.
 
 


[couchdb] 21/41: Speedup eunit: couch_flags_config_tests

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 5159058f40fbac344c6f54b881b008e6cd1ee178
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:32:26 2019 -0600

    Speedup eunit: couch_flags_config_tests
    
    This undoes the test per combination approach to avoid the time spent
    printing each name to the console.
---
 src/couch/test/eunit/couch_flags_config_tests.erl | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/src/couch/test/eunit/couch_flags_config_tests.erl b/src/couch/test/eunit/couch_flags_config_tests.erl
index 1a66cdc..ed7df11 100644
--- a/src/couch/test/eunit/couch_flags_config_tests.erl
+++ b/src/couch/test/eunit/couch_flags_config_tests.erl
@@ -16,8 +16,10 @@ couch_flags_config_test_() ->
     {
         "test couch_flags_config",
         {
-            setup, fun setup/0, fun teardown/1,
-            all_combinations_return_same_result()
+            setup,
+            fun setup/0,
+            fun teardown/1,
+                [fun all_combinations_return_same_result/0]
                 ++ latest_overide_wins()
                 ++ [
                     {"rules_are_sorted", fun rules_are_sorted/0}
@@ -41,8 +43,9 @@ all_combinations_return_same_result() ->
         {{<<"*">>},{<<"*">>, 1, [bar, foo]}}
     ],
     Combinations = couch_tests_combinatorics:permutations(Config),
-    [{test_id(Items), ?_assertEqual(Expected, couch_flags_config:data(Items))}
-        || Items <- Combinations].
+    lists:foreach(fun(Items) ->
+        ?assertEqual(Expected, couch_flags_config:data(Items))
+    end, Combinations).
 
 rules_are_sorted() ->
     Expected = [


[couchdb] 40/41: Speedup eunit: mem3_sync_event_listener

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit a94401754f68eec09a7291167d1ec9ae1f4ab4a5
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:42:56 2019 -0600

    Speedup eunit: mem3_sync_event_listener
---
 src/mem3/src/mem3_sync_event_listener.erl | 53 ++++++++++++++++++++-----------
 1 file changed, 35 insertions(+), 18 deletions(-)

diff --git a/src/mem3/src/mem3_sync_event_listener.erl b/src/mem3/src/mem3_sync_event_listener.erl
index 69a7a60..cd8a650 100644
--- a/src/mem3/src/mem3_sync_event_listener.erl
+++ b/src/mem3/src/mem3_sync_event_listener.erl
@@ -217,7 +217,9 @@ subscribe_for_config() ->
 -ifdef(TEST).
 -include_lib("couch/include/couch_eunit.hrl").
 
-setup() ->
+setup_all() ->
+    application:start(config),
+
     ok = meck:new(couch_event, [passthrough]),
     ok = meck:expect(couch_event, register_all, ['_'], ok),
 
@@ -225,33 +227,39 @@ setup() ->
     ok = meck:expect(config_notifier, handle_event, [
         {[{'_', '_', '_', "error", '_'}, '_'], meck:raise(throw, raised_error)},
         {['_', '_'], meck:passthrough()}
-    ]),
+    ]).
 
-    application:start(config),
+teardown_all(_) ->
+    meck:unload(),
+    application:stop(config).
+
+setup() ->
     {ok, Pid} = ?MODULE:start_link(),
     erlang:unlink(Pid),
     meck:wait(config_notifier, subscribe, '_', 1000),
     Pid.
 
 teardown(Pid) ->
-    exit(Pid, shutdown),
-    application:stop(config),
-    (catch meck:unload(couch_event)),
-    (catch meck:unload(config_notifier)),
-    ok.
+    exit(Pid, shutdown).
 
 subscribe_for_config_test_() ->
     {
-        "Subscrive for configuration changes",
+        "Subscribe for configuration changes",
         {
-            foreach,
-            fun setup/0, fun teardown/1,
-            [
-                fun should_set_sync_delay/1,
-                fun should_set_sync_frequency/1,
-                fun should_restart_listener/1,
-                fun should_terminate/1
-            ]
+            setup,
+            fun setup_all/0,
+            fun teardown_all/1,
+            {
+                foreach,
+                fun setup/0,
+                fun teardown/1,
+                [
+                    fun should_set_sync_delay/1,
+                    fun should_set_sync_frequency/1,
+                    fun should_restart_listener/1,
+                    fun should_terminate/1
+                ]
+            }
         }
     }.
 
@@ -286,11 +294,20 @@ should_terminate(Pid) ->
 
         EventMgr = whereis(config_event),
 
+        Ref = erlang:monitor(process, Pid),
+
         RestartFun = fun() -> exit(EventMgr, kill) end,
         test_util:with_process_restart(config_event, RestartFun),
 
         ?assertNot(is_process_alive(EventMgr)),
-        ?assertNot(is_process_alive(Pid)),
+
+        receive
+            {'DOWN', Ref, _, _, _} ->
+                ok
+        after 1000 ->
+            ?assert(false)
+        end,
+
         ?assert(is_process_alive(whereis(config_event))),
         ok
     end).


[couchdb] 25/41: Speedup eunit: couch_index_compaction_tests

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 470f2c8c0e65488d1086856a8ee2804b32ee44f4
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:36:49 2019 -0600

    Speedup eunit: couch_index_compaction_tests
---
 .../test/eunit/couch_index_compaction_tests.erl       | 19 +++++++++++++++----
 1 file changed, 15 insertions(+), 4 deletions(-)

diff --git a/src/couch_index/test/eunit/couch_index_compaction_tests.erl b/src/couch_index/test/eunit/couch_index_compaction_tests.erl
index 53316d9..ab493a9 100644
--- a/src/couch_index/test/eunit/couch_index_compaction_tests.erl
+++ b/src/couch_index/test/eunit/couch_index_compaction_tests.erl
@@ -17,6 +17,16 @@
 
 -define(WAIT_TIMEOUT, 1000).
 
+
+setup_all() ->
+    Ctx = test_util:start_couch(),
+    meck:new([test_index], [non_strict]),
+    Ctx.
+
+teardown_all(Ctx) ->
+    meck:unload(),
+    test_util:stop_couch(Ctx).
+
 setup() ->
     DbName = ?tempdb(),
     {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
@@ -27,7 +37,6 @@ setup() ->
     {Db, IndexerPid}.
 
 fake_index(DbName) ->
-    ok = meck:new([test_index], [non_strict]),
     ok = meck:expect(test_index, init, ['_', '_'], {ok, 10}),
     ok = meck:expect(test_index, open, fun(_Db, State) ->
         {ok, State}
@@ -51,17 +60,19 @@ fake_index(DbName) ->
     end).
 
 teardown(_) ->
-    meck:unload(test_index).
+    ok.
 
 compaction_test_() ->
     {
         "Check compaction",
         {
             setup,
-            fun() -> test_util:start_couch([]) end, fun test_util:stop_couch/1,
+            fun setup_all/0,
+            fun teardown_all/1,
             {
                 foreach,
-                fun setup/0, fun teardown/1,
+                fun setup/0,
+                fun teardown/1,
                 [
                     fun hold_db_for_recompaction/1
                 ]


[couchdb] 18/41: Speedup eunit: couch_httpd

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 03197c10ca364fc5ed583fa2ccc864e48f083cf8
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:31:36 2019 -0600

    Speedup eunit: couch_httpd
---
 src/couch/src/couch_httpd.erl | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index 65291e3..872b556 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -1263,8 +1263,8 @@ maybe_add_default_headers_test_() ->
     {"Tests adding default headers", Tests}.
 
 log_request_test_() ->
-    {foreachx,
-        fun(_) ->
+    {setup,
+        fun() ->
             ok = meck:new([couch_log]),
             ok = meck:expect(couch_log, error, fun(Fmt, Args) ->
                 case catch io_lib_format:fwrite(Fmt, Args) of
@@ -1273,13 +1273,16 @@ log_request_test_() ->
                 end
             end)
         end,
-        fun(_, _) ->
-            meck:unload([couch_log])
+        fun(_) ->
+            meck:unload()
         end,
-        [{Flag, fun should_accept_code_and_message/2} || Flag <- [true, false]]
+        [
+            fun() -> should_accept_code_and_message(true) end,
+            fun() -> should_accept_code_and_message(false) end
+        ]
     }.
 
-should_accept_code_and_message(DontLogFlag, _) ->
+should_accept_code_and_message(DontLogFlag) ->
     erlang:put(dont_log_response, DontLogFlag),
     {"with dont_log_response = " ++ atom_to_list(DontLogFlag),
         [


[couchdb] 05/41: Speedup eunit: couch_peruser_test

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 6949f14a00fbe27b277aa6ff795690426e575384
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Dec 19 11:11:52 2019 -0600

    Speedup eunit: couch_peruser_test
    
    The quiet and start periods of a second were costing quite a bit of
    time. Setting them to zero shaves off about 26 seconds.
---
 src/couch_peruser/test/eunit/couch_peruser_test.erl | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/src/couch_peruser/test/eunit/couch_peruser_test.erl b/src/couch_peruser/test/eunit/couch_peruser_test.erl
index e128d31..5ddbe7a 100644
--- a/src/couch_peruser/test/eunit/couch_peruser_test.erl
+++ b/src/couch_peruser/test/eunit/couch_peruser_test.erl
@@ -18,7 +18,7 @@
 -define(ADMIN_USERNAME, "admin").
 -define(ADMIN_PASSWORD, "secret").
 
--define(WAIT_FOR_USER_DELETE_TIMEOUT, 3000).
+-define(WAIT_FOR_USER_DELETE_TIMEOUT, 1000).
 
 setup_all() ->
     TestCtx = test_util:start_couch([chttpd]),
@@ -37,8 +37,8 @@ setup() ->
     do_request(put, get_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
     do_request(put, get_cluster_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
     set_config("couch_httpd_auth", "authentication_db", ?b2l(TestAuthDb)),
-    set_config("couch_peruser", "cluster_quiet_period", "1"),
-    set_config("couch_peruser", "cluster_start_period", "1"),
+    set_config("couch_peruser", "cluster_quiet_period", "0"),
+    set_config("couch_peruser", "cluster_start_period", "0"),
     set_config("couch_peruser", "enable", "true"),
     set_config("cluster", "n", "1"),
     TestAuthDb.


[couchdb] 26/41: Speedup eunit: couch_mrview_compactor

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit a259b936d996b7f4dc3aade5100fc8ac6146648e
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:37:19 2019 -0600

    Speedup eunit: couch_mrview_compactor
---
 src/couch_mrview/src/couch_mrview_compactor.erl | 41 +++++++++++++------------
 1 file changed, 22 insertions(+), 19 deletions(-)

diff --git a/src/couch_mrview/src/couch_mrview_compactor.erl b/src/couch_mrview/src/couch_mrview_compactor.erl
index 17d67f1..d42edc0 100644
--- a/src/couch_mrview/src/couch_mrview_compactor.erl
+++ b/src/couch_mrview/src/couch_mrview_compactor.erl
@@ -248,11 +248,23 @@ remove_compacted(#mrst{sig = Sig, db_name = DbName} = State) ->
 -ifdef(TEST).
 -include_lib("eunit/include/eunit.hrl").
 
+setup_all() ->
+    meck:new(couch_index_updater),
+    meck:new(couch_log).
+
+teardown_all(_) ->
+    meck:unload().
+
 recompact_test_() ->
-    [
-        recompact_success_after_progress(),
-        recompact_exceeded_retry_count()
-    ].
+    {
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        [
+            recompact_success_after_progress(),
+            recompact_exceeded_retry_count()
+        ]
+    }.
 
 recompact_success_after_progress() ->
     ?_test(begin
@@ -262,12 +274,8 @@ recompact_success_after_progress() ->
                 timer:sleep(100),
                 exit({updated, self(), State#mrst{update_seq = 2}})
         end),
-        try
-            State = #mrst{fd=self(), update_seq=0},
-            ?assertEqual({ok, State#mrst{update_seq = 2}}, recompact(State))
-        after
-            meck:unload(couch_index_updater)
-        end
+        State = #mrst{fd=self(), update_seq=0},
+        ?assertEqual({ok, State#mrst{update_seq = 2}}, recompact(State))
     end).
 
 recompact_exceeded_retry_count() ->
@@ -277,15 +285,10 @@ recompact_exceeded_retry_count() ->
                 exit(error)
         end),
         ok = meck:expect(couch_log, warning, fun(_, _) -> ok end),
-        try
-            State = #mrst{fd=self(), db_name=foo, idx_name=bar},
-            ExpectedError = {exceeded_recompact_retry_count,
-                [{db_name, foo}, {idx_name, bar}]},
-                ?assertError(ExpectedError, recompact(State))
-        after
-            meck:unload(couch_log),
-            meck:unload(couch_index_updater)
-        end
+        State = #mrst{fd=self(), db_name=foo, idx_name=bar},
+        ExpectedError = {exceeded_recompact_retry_count,
+            [{db_name, foo}, {idx_name, bar}]},
+            ?assertError(ExpectedError, recompact(State))
     end).
 
 -endif.


[couchdb] 15/41: Speedup eunit: chttpd_prefer_header_test

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit ce2020c607e255b166680a8dd7419d4f017ef7e3
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:30:48 2019 -0600

    Speedup eunit: chttpd_prefer_header_test
---
 .../test/eunit/chttpd_prefer_header_test.erl       | 47 ++++++++++++++--------
 1 file changed, 31 insertions(+), 16 deletions(-)

diff --git a/src/chttpd/test/eunit/chttpd_prefer_header_test.erl b/src/chttpd/test/eunit/chttpd_prefer_header_test.erl
index 0f43ba4..1b11954 100644
--- a/src/chttpd/test/eunit/chttpd_prefer_header_test.erl
+++ b/src/chttpd/test/eunit/chttpd_prefer_header_test.erl
@@ -51,7 +51,7 @@ minimal_options_headers() ->
 
 default_no_exclude_header_test() ->
     Headers = chttpd_prefer_header:maybe_return_minimal(
-        mock_request([]), 
+        mock_request([]),
         default_headers()
         ),
     ?assertEqual(default_headers(), Headers).
@@ -68,30 +68,45 @@ empty_header_test() ->
     Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()),
     ?assertEqual(default_headers(), Headers).
 
-setup() ->
+setup_all() ->
     ok = meck:new(config),
-    ok = meck:expect(config, get, fun("chttpd", "prefer_minimal",  _) -> 
+    ok = meck:expect(config, get, fun("chttpd", "prefer_minimal",  _) ->
         "Cache-Control, Content-Length, Content-Type, ETag, Server, Vary"
     end),
     ok.
 
 
+teardown_all(_) ->
+    meck:unload().
+
+
+setup() ->
+    meck:reset([config]).
+
+
 teardown(_) ->
-    meck:unload(config).
+    ok.
 
 
 exclude_headers_test_() ->
-     {
-         "Test Prefer headers",
-         {
-             foreach, fun setup/0, fun teardown/1,
-             [
-                 fun minimal_options/1,
-                 fun minimal_options_check_header_case/1,
-                 fun minimal_options_check_header_value_case/1
-             ]
-         }
-     }.
+    {
+        "Test Prefer headers",
+        {
+            setup,
+            fun setup_all/0,
+            fun teardown_all/1,
+            {
+                foreach,
+                fun setup/0,
+                fun teardown/1,
+                [
+                    fun minimal_options/1,
+                    fun minimal_options_check_header_case/1,
+                    fun minimal_options_check_header_value_case/1
+                ]
+            }
+        }
+    }.
 
 
 minimal_options(_) ->
@@ -109,4 +124,4 @@ minimal_options_check_header_case(_) ->
 minimal_options_check_header_value_case(_) ->
     Req = mock_request([{"prefer", "RETURN=MINIMAL"}]),
     Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()),
-    ?_assertEqual(minimal_options_headers(), Headers).
\ No newline at end of file
+    ?_assertEqual(minimal_options_headers(), Headers).


[couchdb] 04/41: Cleanup eunit: couch_peruser_test

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit e6ddd0d8db8eb8707cf0a6a630ff99b604a9693a
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Dec 19 11:11:06 2019 -0600

    Cleanup eunit: couch_peruser_test
    
    The body of these tests were running during the setup phase which is not
    correct. Wrapping each body with `?_test()` macros fixes the issue.
---
 .../test/eunit/couch_peruser_test.erl              | 527 +++++++++++----------
 1 file changed, 273 insertions(+), 254 deletions(-)

diff --git a/src/couch_peruser/test/eunit/couch_peruser_test.erl b/src/couch_peruser/test/eunit/couch_peruser_test.erl
index 8501cc3..e128d31 100644
--- a/src/couch_peruser/test/eunit/couch_peruser_test.erl
+++ b/src/couch_peruser/test/eunit/couch_peruser_test.erl
@@ -145,297 +145,316 @@ get_cluster_base_url() ->
 
 
 should_create_user_db_with_default(TestAuthDb) ->
-    create_user(TestAuthDb, "foo"),
-    wait_for_db_create(<<"userdb-666f6f">>),
-    {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>),
-    {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
-    [
-        ?_assert(lists:member(<<"userdb-666f6f">>, all_dbs())),
-        ?_assertEqual(1, couch_util:get_value(q, ClusterInfo))
-    ].
+    ?_test(begin
+        create_user(TestAuthDb, "foo"),
+        wait_for_db_create(<<"userdb-666f6f">>),
+        {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>),
+        {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
+        ?assert(lists:member(<<"userdb-666f6f">>, all_dbs())),
+        ?assertEqual(1, couch_util:get_value(q, ClusterInfo))
+    end).
 
 should_create_user_db_with_custom_prefix(TestAuthDb) ->
-    set_config("couch_peruser", "database_prefix", "newuserdb-"),
-    create_user(TestAuthDb, "fooo"),
-    wait_for_db_create(<<"newuserdb-666f6f6f">>),
-    delete_config("couch_peruser", "database_prefix"),
-    ?_assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs())).
+    ?_test(begin
+        set_config("couch_peruser", "database_prefix", "newuserdb-"),
+        create_user(TestAuthDb, "fooo"),
+        wait_for_db_create(<<"newuserdb-666f6f6f">>),
+        delete_config("couch_peruser", "database_prefix"),
+        ?assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs()))
+    end).
 
 should_create_user_db_with_custom_special_prefix(TestAuthDb) ->
-    set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
-    create_user(TestAuthDb, "fooo"),
-    wait_for_db_create(<<"userdb_$()+--/666f6f6f">>),
-    delete_config("couch_peruser", "database_prefix"),
-    ?_assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs())).
+    ?_test(begin
+        set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+        create_user(TestAuthDb, "fooo"),
+        wait_for_db_create(<<"userdb_$()+--/666f6f6f">>),
+        delete_config("couch_peruser", "database_prefix"),
+        ?assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs()))
+    end).
 
 should_create_anon_user_db_with_default(TestAuthDb) ->
-    create_anon_user(TestAuthDb, "fooo"),
-    wait_for_db_create(<<"userdb-666f6f6f">>),
-    {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>),
-    {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
-    [
-        ?_assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())),
-        ?_assertEqual(1, couch_util:get_value(q, ClusterInfo))
-    ].
+    ?_test(begin
+        create_anon_user(TestAuthDb, "fooo"),
+        wait_for_db_create(<<"userdb-666f6f6f">>),
+        {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>),
+        {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
+        ?assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())),
+        ?assertEqual(1, couch_util:get_value(q, ClusterInfo))
+    end).
 
 should_create_anon_user_db_with_custom_prefix(TestAuthDb) ->
-    set_config("couch_peruser", "database_prefix", "newuserdb-"),
-    create_anon_user(TestAuthDb, "fooo"),
-    wait_for_db_create(<<"newuserdb-666f6f6f">>),
-    delete_config("couch_peruser", "database_prefix"),
-    ?_assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs())).
+    ?_test(begin
+        set_config("couch_peruser", "database_prefix", "newuserdb-"),
+        create_anon_user(TestAuthDb, "fooo"),
+        wait_for_db_create(<<"newuserdb-666f6f6f">>),
+        delete_config("couch_peruser", "database_prefix"),
+        ?assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs()))
+    end).
 
 should_create_anon_user_db_with_custom_special_prefix(TestAuthDb) ->
-    set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
-    create_anon_user(TestAuthDb, "fooo"),
-    wait_for_db_create(<<"userdb_$()+--/666f6f6f">>),
-    delete_config("couch_peruser", "database_prefix"),
-    ?_assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs())).
+    ?_test(begin
+        set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+        create_anon_user(TestAuthDb, "fooo"),
+        wait_for_db_create(<<"userdb_$()+--/666f6f6f">>),
+        delete_config("couch_peruser", "database_prefix"),
+        ?assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs()))
+    end).
 
 should_create_user_db_with_q4(TestAuthDb) ->
-    set_config("couch_peruser", "q", "4"),
-    create_user(TestAuthDb, "foo"),
-    wait_for_db_create(<<"userdb-666f6f">>),
-    {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>),
-    {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
-    delete_config("couch_peruser", "q"),
-    [
-        ?_assert(lists:member(<<"userdb-666f6f">>, all_dbs())),
-        ?_assertEqual(4, couch_util:get_value(q, ClusterInfo))
-    ].
+    ?_test(begin
+        set_config("couch_peruser", "q", "4"),
+        create_user(TestAuthDb, "foo"),
+        wait_for_db_create(<<"userdb-666f6f">>),
+        {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>),
+        {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
+        delete_config("couch_peruser", "q"),
+        ?assert(lists:member(<<"userdb-666f6f">>, all_dbs())),
+        ?assertEqual(4, couch_util:get_value(q, ClusterInfo))
+    end).
 
 should_create_anon_user_db_with_q4(TestAuthDb) ->
-    set_config("couch_peruser", "q", "4"),
-    create_anon_user(TestAuthDb, "fooo"),
-    wait_for_db_create(<<"userdb-666f6f6f">>),
-    {ok, TargetInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>),
-    {ClusterInfo} = couch_util:get_value(cluster, TargetInfo),
-    delete_config("couch_peruser", "q"),
-    [
-        ?_assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())),
-        ?_assertEqual(4, couch_util:get_value(q, ClusterInfo))
-    ].
+    ?_test(begin
+        set_config("couch_peruser", "q", "4"),
+        create_anon_user(TestAuthDb, "fooo"),
+        wait_for_db_create(<<"userdb-666f6f6f">>),
+        {ok, TargetInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>),
+        {ClusterInfo} = couch_util:get_value(cluster, TargetInfo),
+        delete_config("couch_peruser", "q"),
+        ?assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())),
+        ?assertEqual(4, couch_util:get_value(q, ClusterInfo))
+    end).
 
 should_not_delete_user_db(TestAuthDb) ->
-    User = "foo",
-    UserDbName = <<"userdb-666f6f">>,
-    create_user(TestAuthDb, User),
-    wait_for_db_create(<<"userdb-666f6f">>),
-    AfterCreate = lists:member(UserDbName, all_dbs()),
-    delete_user(TestAuthDb, User),
-    timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
-    AfterDelete = lists:member(UserDbName, all_dbs()),
-    [?_assert(AfterCreate), ?_assert(AfterDelete)].
+    ?_test(begin
+        User = "foo",
+        UserDbName = <<"userdb-666f6f">>,
+        create_user(TestAuthDb, User),
+        wait_for_db_create(<<"userdb-666f6f">>),
+        AfterCreate = lists:member(UserDbName, all_dbs()),
+        delete_user(TestAuthDb, User),
+        timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
+        AfterDelete = lists:member(UserDbName, all_dbs()),
+        ?assert(AfterCreate),
+        ?assert(AfterDelete)
+    end).
 
 should_delete_user_db(TestAuthDb) ->
-    User = "bar",
-    UserDbName = <<"userdb-626172">>,
-    set_config("couch_peruser", "delete_dbs", "true"),
-    create_user(TestAuthDb, User),
-    wait_for_db_create(UserDbName),
-    AfterCreate = lists:member(UserDbName, all_dbs()),
-    delete_user(TestAuthDb, User),
-    wait_for_db_delete(UserDbName),
-    AfterDelete = lists:member(UserDbName, all_dbs()),
-    [?_assert(AfterCreate), ?_assertNot(AfterDelete)].
+    ?_test(begin
+        User = "bar",
+        UserDbName = <<"userdb-626172">>,
+        set_config("couch_peruser", "delete_dbs", "true"),
+        create_user(TestAuthDb, User),
+        wait_for_db_create(UserDbName),
+        AfterCreate = lists:member(UserDbName, all_dbs()),
+        delete_user(TestAuthDb, User),
+        wait_for_db_delete(UserDbName),
+        AfterDelete = lists:member(UserDbName, all_dbs()),
+        ?assert(AfterCreate),
+        ?assertNot(AfterDelete)
+    end).
 
 should_delete_user_db_with_custom_prefix(TestAuthDb) ->
-    User = "bar",
-    UserDbName = <<"newuserdb-626172">>,
-    set_config("couch_peruser", "delete_dbs", "true"),
-    set_config("couch_peruser", "database_prefix", "newuserdb-"),
-    create_user(TestAuthDb, User),
-    wait_for_db_create(UserDbName),
-    AfterCreate = lists:member(UserDbName, all_dbs()),
-    delete_user(TestAuthDb, User),
-    wait_for_db_delete(UserDbName),
-    delete_config("couch_peruser", "database_prefix"),
-    AfterDelete = lists:member(UserDbName, all_dbs()),
-    [
-        ?_assert(AfterCreate),
-        ?_assertNot(AfterDelete)
-    ].
+    ?_test(begin
+        User = "bar",
+        UserDbName = <<"newuserdb-626172">>,
+        set_config("couch_peruser", "delete_dbs", "true"),
+        set_config("couch_peruser", "database_prefix", "newuserdb-"),
+        create_user(TestAuthDb, User),
+        wait_for_db_create(UserDbName),
+        AfterCreate = lists:member(UserDbName, all_dbs()),
+        delete_user(TestAuthDb, User),
+        wait_for_db_delete(UserDbName),
+        delete_config("couch_peruser", "database_prefix"),
+        AfterDelete = lists:member(UserDbName, all_dbs()),
+        ?assert(AfterCreate),
+        ?assertNot(AfterDelete)
+    end).
 
 should_delete_user_db_with_custom_special_prefix(TestAuthDb) ->
-    User = "bar",
-    UserDbName = <<"userdb_$()+--/626172">>,
-    set_config("couch_peruser", "delete_dbs", "true"),
-    set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
-    create_user(TestAuthDb, User),
-    wait_for_db_create(UserDbName),
-    AfterCreate = lists:member(UserDbName, all_dbs()),
-    delete_user(TestAuthDb, User),
-    wait_for_db_delete(UserDbName),
-    delete_config("couch_peruser", "database_prefix"),
-    AfterDelete = lists:member(UserDbName, all_dbs()),
-    [
-        ?_assert(AfterCreate),
-        ?_assertNot(AfterDelete)
-    ].
+    ?_test(begin
+        User = "bar",
+        UserDbName = <<"userdb_$()+--/626172">>,
+        set_config("couch_peruser", "delete_dbs", "true"),
+        set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+        create_user(TestAuthDb, User),
+        wait_for_db_create(UserDbName),
+        AfterCreate = lists:member(UserDbName, all_dbs()),
+        delete_user(TestAuthDb, User),
+        wait_for_db_delete(UserDbName),
+        delete_config("couch_peruser", "database_prefix"),
+        AfterDelete = lists:member(UserDbName, all_dbs()),
+        ?assert(AfterCreate),
+        ?assertNot(AfterDelete)
+    end).
 
 should_reflect_config_changes(TestAuthDb) ->
-    User = "baz",
-    UserDbName = <<"userdb-62617a">>,
-    set_config("couch_peruser", "delete_dbs", "true"),
-    create_user(TestAuthDb, User),
-    wait_for_db_create(UserDbName),
-    AfterCreate1 = lists:member(UserDbName, all_dbs()),
-    delete_user(TestAuthDb, User),
-    timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
-    wait_for_db_delete(UserDbName),
-    AfterDelete1 = lists:member(UserDbName, all_dbs()),
-    create_user(TestAuthDb, User),
-    wait_for_db_create(UserDbName),
-    AfterCreate2 = lists:member(UserDbName, all_dbs()),
-    set_config("couch_peruser", "delete_dbs", "false"),
-    delete_user(TestAuthDb, User),
-    timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
-    AfterDelete2 = lists:member(UserDbName, all_dbs()),
-    create_user(TestAuthDb, User),
-    wait_for_db_create(UserDbName),
-    set_config("couch_peruser", "delete_dbs", "true"),
-    delete_user(TestAuthDb, User),
-    wait_for_db_delete(UserDbName),
-    AfterDelete3 = lists:member(UserDbName, all_dbs()),
-    set_config("couch_peruser", "enable", "false"),
-    create_user(TestAuthDb, User),
-    timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
-    AfterCreate3 = lists:member(UserDbName, all_dbs()),
-    [
-        ?_assert(AfterCreate1),
-        ?_assertNot(AfterDelete1),
-        ?_assert(AfterCreate2),
-        ?_assert(AfterDelete2),
-        ?_assertNot(AfterDelete3),
-        ?_assertNot(AfterCreate3)
-    ].
+    {timeout, 10000, ?_test(begin
+        User = "baz",
+        UserDbName = <<"userdb-62617a">>,
+        set_config("couch_peruser", "delete_dbs", "true"),
+        create_user(TestAuthDb, User),
+        wait_for_db_create(UserDbName),
+        AfterCreate1 = lists:member(UserDbName, all_dbs()),
+        delete_user(TestAuthDb, User),
+        timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
+        wait_for_db_delete(UserDbName),
+        AfterDelete1 = lists:member(UserDbName, all_dbs()),
+        create_user(TestAuthDb, User),
+        wait_for_db_create(UserDbName),
+        AfterCreate2 = lists:member(UserDbName, all_dbs()),
+        set_config("couch_peruser", "delete_dbs", "false"),
+        delete_user(TestAuthDb, User),
+        timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
+        AfterDelete2 = lists:member(UserDbName, all_dbs()),
+        create_user(TestAuthDb, User),
+        wait_for_db_create(UserDbName),
+        set_config("couch_peruser", "delete_dbs", "true"),
+        delete_user(TestAuthDb, User),
+        wait_for_db_delete(UserDbName),
+        AfterDelete3 = lists:member(UserDbName, all_dbs()),
+        set_config("couch_peruser", "enable", "false"),
+        create_user(TestAuthDb, User),
+        timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
+        AfterCreate3 = lists:member(UserDbName, all_dbs()),
+        ?assert(AfterCreate1),
+        ?assertNot(AfterDelete1),
+        ?assert(AfterCreate2),
+        ?assert(AfterDelete2),
+        ?assertNot(AfterDelete3),
+        ?assertNot(AfterCreate3)
+    end)}.
 
 
 should_add_user_to_db_admins(TestAuthDb) ->
-    User = "qux",
-    UserDbName = <<"userdb-717578">>,
-    create_user(TestAuthDb, User),
-    wait_for_db_create(UserDbName),
-    ?_assertEqual(
-        {[{<<"names">>,[<<"qux">>]}]},
-        proplists:get_value(<<"admins">>, get_security(UserDbName))).
+    ?_test(begin
+        User = "qux",
+        UserDbName = <<"userdb-717578">>,
+        create_user(TestAuthDb, User),
+        wait_for_db_create(UserDbName),
+        ?assertEqual(
+            {[{<<"names">>,[<<"qux">>]}]},
+            proplists:get_value(<<"admins">>, get_security(UserDbName)))
+    end).
 
 should_add_user_to_db_members(TestAuthDb) ->
-    User = "qux",
-    UserDbName = <<"userdb-717578">>,
-    create_user(TestAuthDb, User),
-    wait_for_db_create(UserDbName),
-    ?_assertEqual(
-        {[{<<"names">>,[<<"qux">>]}]},
-        proplists:get_value(<<"members">>, get_security(UserDbName))).
+    ?_test(begin
+        User = "qux",
+        UserDbName = <<"userdb-717578">>,
+        create_user(TestAuthDb, User),
+        wait_for_db_create(UserDbName),
+        ?assertEqual(
+            {[{<<"names">>,[<<"qux">>]}]},
+            proplists:get_value(<<"members">>, get_security(UserDbName)))
+    end).
 
 should_not_remove_existing_db_admins(TestAuthDb) ->
-    User = "qux",
-    UserDbName = <<"userdb-717578">>,
-    SecurityProperties = [
-        {<<"admins">>,{[{<<"names">>,[<<"foo">>,<<"bar">>]}]}},
-        {<<"members">>,{[{<<"names">>,[<<"baz">>,<<"pow">>]}]}}
-    ],
-    create_db(UserDbName),
-    set_security(UserDbName, SecurityProperties),
-    create_user(TestAuthDb, User),
-    wait_for_security_create(<<"admins">>, User, UserDbName),
-    {AdminProperties} = proplists:get_value(<<"admins">>,
-        get_security(UserDbName)),
-    AdminNames = proplists:get_value(<<"names">>, AdminProperties),
-    [
-      ?_assert(lists:member(<<"foo">>, AdminNames)),
-      ?_assert(lists:member(<<"bar">>, AdminNames)),
-      ?_assert(lists:member(<<"qux">>, AdminNames))
-    ].
+    ?_test(begin
+        User = "qux",
+        UserDbName = <<"userdb-717578">>,
+        SecurityProperties = [
+            {<<"admins">>,{[{<<"names">>,[<<"foo">>,<<"bar">>]}]}},
+            {<<"members">>,{[{<<"names">>,[<<"baz">>,<<"pow">>]}]}}
+        ],
+        create_db(UserDbName),
+        set_security(UserDbName, SecurityProperties),
+        create_user(TestAuthDb, User),
+        wait_for_security_create(<<"admins">>, User, UserDbName),
+        {AdminProperties} = proplists:get_value(<<"admins">>,
+            get_security(UserDbName)),
+        AdminNames = proplists:get_value(<<"names">>, AdminProperties),
+        ?assert(lists:member(<<"foo">>, AdminNames)),
+        ?assert(lists:member(<<"bar">>, AdminNames)),
+        ?assert(lists:member(<<"qux">>, AdminNames))
+    end).
 
 should_not_remove_existing_db_members(TestAuthDb) ->
-    User = "qux",
-    UserDbName = <<"userdb-717578">>,
-    SecurityProperties = [
-        {<<"admins">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}},
-        {<<"members">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}}
-    ],
-    create_db(UserDbName),
-    set_security(UserDbName, SecurityProperties),
-    create_user(TestAuthDb, User),
-    wait_for_security_create(<<"members">>, User, UserDbName),
-    {MemberProperties} = proplists:get_value(<<"members">>,
-        get_security(UserDbName)),
-    MemberNames = proplists:get_value(<<"names">>, MemberProperties),
-    [
-      ?_assert(lists:member(<<"pow">>, MemberNames)),
-      ?_assert(lists:member(<<"wow">>, MemberNames)),
-      ?_assert(lists:member(<<"qux">>, MemberNames))
-    ].
+    ?_test(begin
+        User = "qux",
+        UserDbName = <<"userdb-717578">>,
+        SecurityProperties = [
+            {<<"admins">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}},
+            {<<"members">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}}
+        ],
+        create_db(UserDbName),
+        set_security(UserDbName, SecurityProperties),
+        create_user(TestAuthDb, User),
+        wait_for_security_create(<<"members">>, User, UserDbName),
+        {MemberProperties} = proplists:get_value(<<"members">>,
+            get_security(UserDbName)),
+        MemberNames = proplists:get_value(<<"names">>, MemberProperties),
+        ?assert(lists:member(<<"pow">>, MemberNames)),
+        ?assert(lists:member(<<"wow">>, MemberNames)),
+        ?assert(lists:member(<<"qux">>, MemberNames))
+    end).
 
 should_remove_user_from_db_admins(TestAuthDb) ->
-    User = "qux",
-    UserDbName = <<"userdb-717578">>,
-    SecurityProperties = [
-        {<<"admins">>,{[{<<"names">>,[<<"foo">>,<<"bar">>]}]}},
-        {<<"members">>,{[{<<"names">>,[<<"baz">>,<<"pow">>]}]}}
-    ],
-    create_db(UserDbName),
-    set_security(UserDbName, SecurityProperties),
-    create_user(TestAuthDb, User),
-    wait_for_security_create(<<"admins">>, User, UserDbName),
-    {AdminProperties} = proplists:get_value(<<"admins">>,
-        get_security(UserDbName)),
-    AdminNames = proplists:get_value(<<"names">>, AdminProperties),
-    FooBefore = lists:member(<<"foo">>, AdminNames),
-    BarBefore = lists:member(<<"bar">>, AdminNames),
-    QuxBefore = lists:member(<<"qux">>, AdminNames),
-    delete_user(TestAuthDb, User),
-    wait_for_security_delete(<<"admins">>, User, UserDbName),
-    {NewAdminProperties} = proplists:get_value(<<"admins">>,
-        get_security(UserDbName)),
-    NewAdminNames = proplists:get_value(<<"names">>, NewAdminProperties),
-    FooAfter = lists:member(<<"foo">>, NewAdminNames),
-    BarAfter = lists:member(<<"bar">>, NewAdminNames),
-    QuxAfter = lists:member(<<"qux">>, NewAdminNames),
-    [
-      ?_assert(FooBefore),
-      ?_assert(BarBefore),
-      ?_assert(QuxBefore),
-      ?_assert(FooAfter),
-      ?_assert(BarAfter),
-      ?_assertNot(QuxAfter)
-    ].
+    ?_test(begin
+        User = "qux",
+        UserDbName = <<"userdb-717578">>,
+        SecurityProperties = [
+            {<<"admins">>,{[{<<"names">>,[<<"foo">>,<<"bar">>]}]}},
+            {<<"members">>,{[{<<"names">>,[<<"baz">>,<<"pow">>]}]}}
+        ],
+        create_db(UserDbName),
+        set_security(UserDbName, SecurityProperties),
+        create_user(TestAuthDb, User),
+        wait_for_security_create(<<"admins">>, User, UserDbName),
+        {AdminProperties} = proplists:get_value(<<"admins">>,
+            get_security(UserDbName)),
+        AdminNames = proplists:get_value(<<"names">>, AdminProperties),
+        FooBefore = lists:member(<<"foo">>, AdminNames),
+        BarBefore = lists:member(<<"bar">>, AdminNames),
+        QuxBefore = lists:member(<<"qux">>, AdminNames),
+        delete_user(TestAuthDb, User),
+        wait_for_security_delete(<<"admins">>, User, UserDbName),
+        {NewAdminProperties} = proplists:get_value(<<"admins">>,
+            get_security(UserDbName)),
+        NewAdminNames = proplists:get_value(<<"names">>, NewAdminProperties),
+        FooAfter = lists:member(<<"foo">>, NewAdminNames),
+        BarAfter = lists:member(<<"bar">>, NewAdminNames),
+        QuxAfter = lists:member(<<"qux">>, NewAdminNames),
+        ?assert(FooBefore),
+        ?assert(BarBefore),
+        ?assert(QuxBefore),
+        ?assert(FooAfter),
+        ?assert(BarAfter),
+        ?assertNot(QuxAfter)
+    end).
 
 should_remove_user_from_db_members(TestAuthDb) ->
-    User = "qux",
-    UserDbName = <<"userdb-717578">>,
-    SecurityProperties = [
-        {<<"admins">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}},
-        {<<"members">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}}
-    ],
-    create_db(UserDbName),
-    set_security(UserDbName, SecurityProperties),
-    create_user(TestAuthDb, User),
-    wait_for_security_create(<<"members">>, User, UserDbName),
-    {MemberProperties} = proplists:get_value(<<"members">>,
-        get_security(UserDbName)),
-    MemberNames = proplists:get_value(<<"names">>, MemberProperties),
-    PowBefore = lists:member(<<"pow">>, MemberNames),
-    WowBefore = lists:member(<<"wow">>, MemberNames),
-    QuxBefore = lists:member(<<"qux">>, MemberNames),
-    delete_user(TestAuthDb, User),
-    wait_for_security_delete(<<"members">>, User, UserDbName),
-    {NewMemberProperties} = proplists:get_value(<<"members">>,
-        get_security(UserDbName)),
-    NewMemberNames = proplists:get_value(<<"names">>, NewMemberProperties),
-    PowAfter = lists:member(<<"pow">>, NewMemberNames),
-    WowAfter = lists:member(<<"wow">>, NewMemberNames),
-    QuxAfter = lists:member(<<"qux">>, NewMemberNames),
-    [
-      ?_assert(PowBefore),
-      ?_assert(WowBefore),
-      ?_assert(QuxBefore),
-      ?_assert(PowAfter),
-      ?_assert(WowAfter),
-      ?_assertNot(QuxAfter)
-    ].
+    ?_test(begin
+        User = "qux",
+        UserDbName = <<"userdb-717578">>,
+        SecurityProperties = [
+            {<<"admins">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}},
+            {<<"members">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}}
+        ],
+        create_db(UserDbName),
+        set_security(UserDbName, SecurityProperties),
+        create_user(TestAuthDb, User),
+        wait_for_security_create(<<"members">>, User, UserDbName),
+        {MemberProperties} = proplists:get_value(<<"members">>,
+            get_security(UserDbName)),
+        MemberNames = proplists:get_value(<<"names">>, MemberProperties),
+        PowBefore = lists:member(<<"pow">>, MemberNames),
+        WowBefore = lists:member(<<"wow">>, MemberNames),
+        QuxBefore = lists:member(<<"qux">>, MemberNames),
+        delete_user(TestAuthDb, User),
+        wait_for_security_delete(<<"members">>, User, UserDbName),
+        {NewMemberProperties} = proplists:get_value(<<"members">>,
+            get_security(UserDbName)),
+        NewMemberNames = proplists:get_value(<<"names">>, NewMemberProperties),
+        PowAfter = lists:member(<<"pow">>, NewMemberNames),
+        WowAfter = lists:member(<<"wow">>, NewMemberNames),
+        QuxAfter = lists:member(<<"qux">>, NewMemberNames),
+        ?assert(PowBefore),
+        ?assert(WowBefore),
+        ?assert(QuxBefore),
+        ?assert(PowAfter),
+        ?assert(WowAfter),
+        ?assertNot(QuxAfter)
+    end).
+
 
 
 wait_for_db_create(UserDbName) ->


[couchdb] 35/41: Speedup eunit: fabric_doc_purge

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 23859f5cb37a0abdfebd2d7084d62cc239d7c573
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:41:00 2019 -0600

    Speedup eunit: fabric_doc_purge
---
 src/fabric/src/fabric_doc_purge.erl | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/src/fabric/src/fabric_doc_purge.erl b/src/fabric/src/fabric_doc_purge.erl
index 7e447ff..3492f88 100644
--- a/src/fabric/src/fabric_doc_purge.erl
+++ b/src/fabric/src/fabric_doc_purge.erl
@@ -225,12 +225,11 @@ has_quorum(Resps, Count, W) ->
 
 
 -ifdef(TEST).
-
 -include_lib("eunit/include/eunit.hrl").
 
 purge_test_() ->
     {
-        foreach,
+        setup,
         fun setup/0,
         fun teardown/1,
         [


[couchdb] 37/41: Speedup eunit: mango_idx_test

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 9cba2f12323cf544f810d66b764ed90ad0f39714
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:42:01 2019 -0600

    Speedup eunit: mango_idx_test
---
 src/mango/src/mango_idx_text.erl | 47 +++++++++++++++++++++++++---------------
 1 file changed, 30 insertions(+), 17 deletions(-)

diff --git a/src/mango/src/mango_idx_text.erl b/src/mango/src/mango_idx_text.erl
index 0b78e88..50f6cc8 100644
--- a/src/mango/src/mango_idx_text.erl
+++ b/src/mango/src/mango_idx_text.erl
@@ -381,40 +381,52 @@ forbid_index_all() ->
 -include_lib("eunit/include/eunit.hrl").
 
 
-setup() ->
+setup_all() ->
     Ctx = test_util:start_couch(),
     meck:expect(couch_log, warning, 2,
         fun(_,_) ->
             throw({test_error, logged_warning})
         end),
+    Ctx.
+
+
+teardown_all(Ctx) ->
+    meck:unload(),
+    test_util:stop_couch(Ctx).
+
+
+setup() ->
     %default index all def that generates {fields, all_fields}
     Index = #idx{def={[]}},
     DbName = <<"testdb">>,
     UserCtx = #user_ctx{name = <<"u1">>},
     {ok, Db} = couch_db:clustered_db(DbName, UserCtx),
-    {Index, Db, Ctx}.
+    {Index, Db}.
 
 
-teardown({_, _, Ctx}) ->
-    meck:unload(),
-    test_util:stop_couch(Ctx).
+teardown(_) ->
+    ok.
 
 
 index_all_test_() ->
     {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            fun forbid_index_all/1,
-            fun default_and_false_index_all/1,
-            fun warn_index_all/1
-        ]
-
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        {
+            foreach,
+            fun setup/0,
+            fun teardown/1,
+            [
+                fun forbid_index_all/1,
+                fun default_and_false_index_all/1,
+                fun warn_index_all/1
+            ]
+        }
     }.
 
 
-forbid_index_all({Idx, Db, _}) ->
+forbid_index_all({Idx, Db}) ->
     ?_test(begin
         ok = config:set("mango", "index_all_disabled", "true", false),
         ?assertThrow({mango_error, ?MODULE, index_all_disabled},
@@ -423,8 +435,9 @@ forbid_index_all({Idx, Db, _}) ->
     end).
 
 
-default_and_false_index_all({Idx, Db, _}) ->
+default_and_false_index_all({Idx, Db}) ->
     ?_test(begin
+        config:delete("mango", "index_all_disabled", false),
         {ok, #idx{def={Def}}} = validate_new(Idx, Db),
         Fields = couch_util:get_value(fields, Def),
         ?assertEqual(all_fields, Fields),
@@ -435,7 +448,7 @@ default_and_false_index_all({Idx, Db, _}) ->
     end).
 
 
-warn_index_all({Idx, Db, _}) ->
+warn_index_all({Idx, Db}) ->
     ?_test(begin
         ok = config:set("mango", "index_all_disabled", "warn", false),
         ?assertThrow({test_error, logged_warning}, validate_new(Idx, Db))


[couchdb] 02/41: Speedup JavaScript tests

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit b4fa539b603c67936250ffae222e4ac442d277a2
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Dec 19 10:23:27 2019 -0600

    Speedup JavaScript tests
    
    We sleep for a bit more than a second for each test. Rather than using
    return to skip a test we just mark the test as skipped so we don't
    have to waste time. This saves about 25s on the test suite.
---
 test/javascript/cli_runner.js                      |  4 ++
 test/javascript/run                                | 15 +++--
 .../attachments_delete_overridden_quorum.js        |  3 +-
 .../with-quorum/attachments_overridden_quorum.js   |  3 +-
 .../with-quorum/db_creation_overridden_quorum.js   |  3 +-
 .../with-quorum/doc_copy_overridden_quorum.js      |  3 +-
 .../without-quorum/attachments_delete.js           |  3 +-
 .../attachments_delete_overridden_quorum.js        |  3 +-
 .../db_creation_overridden_quorum.js               |  3 +-
 .../without-quorum/doc_copy_overridden_quorum.js   |  3 +-
 test/javascript/tests/etags_views.js               | 27 ++++-----
 .../tests/replicator_db_compact_rep_db.js          |  3 +-
 test/javascript/tests/replicator_db_continuous.js  |  3 +-
 .../tests/replicator_db_credential_delegation.js   |  3 +-
 .../tests/replicator_db_field_validation.js        |  3 +-
 test/javascript/tests/replicator_db_filtered.js    |  3 +-
 test/javascript/tests/replicator_db_identical.js   |  3 +-
 .../tests/replicator_db_identical_continuous.js    |  3 +-
 .../tests/replicator_db_invalid_filter.js          |  3 +-
 test/javascript/tests/replicator_db_security.js    |  5 +-
 test/javascript/tests/replicator_db_simple.js      |  3 +-
 test/javascript/tests/replicator_db_successive.js  |  3 +-
 test/javascript/tests/replicator_db_survives.js    |  3 +-
 test/javascript/tests/replicator_db_swap_rep_db.js |  3 +-
 .../tests/replicator_db_update_security.js         |  3 +-
 test/javascript/tests/replicator_db_user_ctx.js    |  3 +-
 test/javascript/tests/replicator_db_write_auth.js  |  3 +-
 test/javascript/tests/stats.js                     | 65 +++++++++++-----------
 28 files changed, 92 insertions(+), 93 deletions(-)

diff --git a/test/javascript/cli_runner.js b/test/javascript/cli_runner.js
index dbaf1c2..5d7a980 100644
--- a/test/javascript/cli_runner.js
+++ b/test/javascript/cli_runner.js
@@ -22,6 +22,10 @@ function runTest() {
   var count = 0;
   var start = new Date().getTime();
 
+  if(couchTests.skip) {
+      quit(2);
+  }
+
   for(var name in couchTests) {
       count++;
   }
diff --git a/test/javascript/run b/test/javascript/run
index 1fa605d..ebcdef9 100755
--- a/test/javascript/run
+++ b/test/javascript/run
@@ -44,14 +44,17 @@ RUNNER = "test/javascript/cli_runner.js"
 def mkformatter(tests):
     longest = max([len(x) for x in tests])
     green = "\033[32m"
+    orange = "\033[33m"
     red = "\033[31m"
     clear = "\033[0m"
     if not sys.stderr.isatty():
-        green, read, clear = "", "", ""
+        green, orange, red, clear = "", "", "", ""
 
-    def _colorized(passed):
-        if passed:
+    def _colorized(rval):
+        if rval == 0:
             return green + "pass" + clear
+        elif rval == 2:
+            return orange + "skipped" + clear
         else:
             return red + "fail" + clear
 
@@ -60,7 +63,7 @@ def mkformatter(tests):
             padding = (longest - len(test)) * " "
             sys.stderr.write(test + "   " + padding)
             sys.stderr.flush()
-        elif isinstance(test, bool):
+        elif isinstance(test, int):
             if test:
                 sys.stderr.write(_colorized(test) + os.linesep)
             else:
@@ -86,7 +89,7 @@ def run_couchjs(test, fmt):
         line = line.decode()
         sys.stderr.write(line)
     p.wait()
-    fmt(p.returncode == 0)
+    fmt(p.returncode)
     return p.returncode
 
 
@@ -163,7 +166,7 @@ def main():
         fmt = mkformatter(tests)
         for test in tests:
             result = run_couchjs(test, fmt)
-            if result == 0:
+            if result == 0 or result == 2:
                 passed += 1
             else:
                 failed += 1
diff --git a/test/javascript/tests-cluster/with-quorum/attachments_delete_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/attachments_delete_overridden_quorum.js
index 1994a0a..79c070e 100644
--- a/test/javascript/tests-cluster/with-quorum/attachments_delete_overridden_quorum.js
+++ b/test/javascript/tests-cluster/with-quorum/attachments_delete_overridden_quorum.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.attachments_delete_overridden_quorum= function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
@@ -25,7 +26,7 @@ couchTests.attachments_delete_overridden_quorum= function(debug) {
   var rev = JSON.parse(xhr.responseText).rev;
 
   xhr = CouchDB.request("DELETE", "/" + db_name + "/dummy/foo.txt?rev=" + rev);
-  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status);
+  console.log("TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status);
   // TODO: Define correct behaviour
   //T(xhr.status == 202,"Should return 202 but returns "+xhr.status);
 
diff --git a/test/javascript/tests-cluster/with-quorum/attachments_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/attachments_overridden_quorum.js
index 22c8a4c..f9deb15 100644
--- a/test/javascript/tests-cluster/with-quorum/attachments_overridden_quorum.js
+++ b/test/javascript/tests-cluster/with-quorum/attachments_overridden_quorum.js
@@ -11,6 +11,7 @@
 // the License.
 
 //Test attachments operations with an overridden quorum parameter
+couchTests.skip = true;
 couchTests.attachments_overriden_quorum= function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
@@ -32,7 +33,7 @@ couchTests.attachments_overriden_quorum= function(debug) {
     body:"This is no base64 encoded text-2",
     headers:{"Content-Type": "text/plain;charset=utf-8"}
   });
-  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status);
+  console.log("TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status);
   //TODO: Define correct behaviour
   //T(xhr.status == 202,"Should return 202");
 
diff --git a/test/javascript/tests-cluster/with-quorum/db_creation_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/db_creation_overridden_quorum.js
index 14d319c..1e69cd8 100644
--- a/test/javascript/tests-cluster/with-quorum/db_creation_overridden_quorum.js
+++ b/test/javascript/tests-cluster/with-quorum/db_creation_overridden_quorum.js
@@ -11,6 +11,7 @@
 // the License.
 
 // Do DB creation under cluster with quorum conditions but overriding write quorum.
+couchTests.skip = true;
 couchTests.db_creation_overridden_quorum = function(debug) {
 
   if (debug) debugger;
@@ -20,7 +21,7 @@ couchTests.db_creation_overridden_quorum = function(debug) {
 
   // DB Creation should return 202 - Accepted
   xhr = CouchDB.request("PUT", "/" + db_name + "/");
-  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status)
+  console.log("TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status)
   //T(xhr.status == 202,"Should return 202");
 
   // cleanup
diff --git a/test/javascript/tests-cluster/with-quorum/doc_copy_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/doc_copy_overridden_quorum.js
index 23fbc97..1ceef97 100644
--- a/test/javascript/tests-cluster/with-quorum/doc_copy_overridden_quorum.js
+++ b/test/javascript/tests-cluster/with-quorum/doc_copy_overridden_quorum.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.doc_copy_overriden_quorum = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
@@ -23,7 +24,7 @@ couchTests.doc_copy_overriden_quorum = function(debug) {
   });
   //TODO: Define correct behaviour
   //T(xhr.status=="202","Should return 202");
-  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status);
+  console.log("TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status);
 
   db.deleteDb();
 
diff --git a/test/javascript/tests-cluster/without-quorum/attachments_delete.js b/test/javascript/tests-cluster/without-quorum/attachments_delete.js
index d05fcaf..48a33d2 100644
--- a/test/javascript/tests-cluster/without-quorum/attachments_delete.js
+++ b/test/javascript/tests-cluster/without-quorum/attachments_delete.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.attachments_delete= function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
@@ -26,7 +27,7 @@ couchTests.attachments_delete= function(debug) {
   var rev = JSON.parse(xhr.responseText).rev;
 
   xhr = CouchDB.request("DELETE", "/" + db_name + "/dummy/foo.txt?rev=" + rev);
-  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering quorum. 202->"+xhr.status);
+  console.log("TODO: Clarify correct behaviour. Is not considering quorum. 202->"+xhr.status);
   //TODO: Define correct behaviour
   //T(xhr.status == 202,"Should return 202 Accepted but returns "+xhr.status);
 
diff --git a/test/javascript/tests-cluster/without-quorum/attachments_delete_overridden_quorum.js b/test/javascript/tests-cluster/without-quorum/attachments_delete_overridden_quorum.js
index 906391a..c3b95f8 100644
--- a/test/javascript/tests-cluster/without-quorum/attachments_delete_overridden_quorum.js
+++ b/test/javascript/tests-cluster/without-quorum/attachments_delete_overridden_quorum.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.attachments_delete_overridden_quorum= function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
@@ -25,7 +26,7 @@ couchTests.attachments_delete_overridden_quorum= function(debug) {
   var rev = JSON.parse(xhr.responseText).rev;
 
   xhr = CouchDB.request("DELETE", "/" + db_name + "/dummy/foo.txt?rev=" + rev);
-  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering quorum. 202->"+xhr.status);
+  console.log("TODO: Clarify correct behaviour. Is not considering quorum. 202->"+xhr.status);
   //TODO: Define correct behaviour
   //T(xhr.status == 200,"Should return 200 but returns "+xhr.status);
 
diff --git a/test/javascript/tests-cluster/without-quorum/db_creation_overridden_quorum.js b/test/javascript/tests-cluster/without-quorum/db_creation_overridden_quorum.js
index 6d5d798..7cee52e 100644
--- a/test/javascript/tests-cluster/without-quorum/db_creation_overridden_quorum.js
+++ b/test/javascript/tests-cluster/without-quorum/db_creation_overridden_quorum.js
@@ -11,6 +11,7 @@
 // the License.
 
 // Do DB creation under cluster with quorum conditions but overriding write quorum.
+couchTests.skip = true;
 couchTests.db_creation_overridden_quorum = function(debug) {
 
   if (debug) debugger;
@@ -20,7 +21,7 @@ couchTests.db_creation_overridden_quorum = function(debug) {
 
   // DB Creation should return 201 - Created
   xhr = CouchDB.request("PUT", "/" + db_name + "/");
-  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 201->"+xhr.status)
+  console.log("TODO: Clarify correct behaviour. Is not considering overridden quorum. 201->"+xhr.status)
   //T(xhr.status == 201,"Should return 201");
 
   //db.deleteDb();
diff --git a/test/javascript/tests-cluster/without-quorum/doc_copy_overridden_quorum.js b/test/javascript/tests-cluster/without-quorum/doc_copy_overridden_quorum.js
index e72425d..bf372ca 100644
--- a/test/javascript/tests-cluster/without-quorum/doc_copy_overridden_quorum.js
+++ b/test/javascript/tests-cluster/without-quorum/doc_copy_overridden_quorum.js
@@ -10,6 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.doc_copy_overriden_quorum = function(debug) {
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
@@ -21,7 +22,7 @@ couchTests.doc_copy_overriden_quorum = function(debug) {
   var xhr = CouchDB.request("COPY", "/" + db_name + "/dummy", {
     headers: {"Destination":"dummy2"}
   });
-  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 201->"+xhr.status);
+  console.log("TODO: Clarify correct behaviour. Is not considering overridden quorum. 201->"+xhr.status);
   //TODO Defie correct behaviour
   //T(xhr.status=="201","Should return 201");
 
diff --git a/test/javascript/tests/etags_views.js b/test/javascript/tests/etags_views.js
index 6c110f8..555fe66 100644
--- a/test/javascript/tests/etags_views.js
+++ b/test/javascript/tests/etags_views.js
@@ -10,8 +10,9 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+// TODO: https://issues.apache.org/jira/browse/COUCHDB-2859
+couchTests.skip = true;
 couchTests.etags_views = function(debug) {
-  return console.log('TODO: see https://issues.apache.org/jira/browse/COUCHDB-2859');
   var db_name = get_random_db_name();
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"true"});
   db.createDb();
@@ -79,7 +80,7 @@ couchTests.etags_views = function(debug) {
   xhr = CouchDB.request("GET", "/" + db_name + "/_design/etags/_view/basicView?include_docs=true");
   var etag2 = xhr.getResponseHeader("etag");
   T(etag1 != etag2);
- 
+
   // Verify that purges affect etags
   xhr = CouchDB.request("GET", "/" + db_name + "/_design/etags/_view/fooView");
   var foo_etag = xhr.getResponseHeader("etag");
@@ -180,7 +181,7 @@ couchTests.etags_views = function(debug) {
   );
   etag2 = xhr.getResponseHeader("etag");
   T(etag1 != etag2, "POST to reduce view generates key-depdendent ETags");
-  
+
   // all docs
   xhr = CouchDB.request("GET", "/" + db_name + "/_all_docs");
   T(xhr.status == 200);
@@ -201,21 +202,21 @@ couchTests.etags_views = function(debug) {
 
   // list etag
   // in the list test for now
-  
-  // A new database should have unique _all_docs etags. 
-  db.deleteDb(); 
+
+  // A new database should have unique _all_docs etags.
+  db.deleteDb();
   db.createDb(); // TODO: when re-activating try having a new DB name
-  db.save({a: 1}); 
-  xhr = CouchDB.request("GET", "/" + db_name + "/_all_docs"); 
-  var etag = xhr.getResponseHeader("etag"); 
-  db.deleteDb(); 
+  db.save({a: 1});
+  xhr = CouchDB.request("GET", "/" + db_name + "/_all_docs");
+  var etag = xhr.getResponseHeader("etag");
+  db.deleteDb();
   db.createDb(); // TODO: when re-activating try having a new DB name
-  db.save({a: 2}); 
-  xhr = CouchDB.request("GET", "/" + db_name + "/_all_docs"); 
+  db.save({a: 2});
+  xhr = CouchDB.request("GET", "/" + db_name + "/_all_docs");
   var new_etag = xhr.getResponseHeader("etag");
   T(etag != new_etag);
   // but still be cacheable
-  xhr = CouchDB.request("GET", "/" + db_name + "/_all_docs"); 
+  xhr = CouchDB.request("GET", "/" + db_name + "/_all_docs");
   T(new_etag == xhr.getResponseHeader("etag"));
 
   // cleanup
diff --git a/test/javascript/tests/replicator_db_compact_rep_db.js b/test/javascript/tests/replicator_db_compact_rep_db.js
index 8bd45f9..e8ba326 100644
--- a/test/javascript/tests/replicator_db_compact_rep_db.js
+++ b/test/javascript/tests/replicator_db_compact_rep_db.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_compact_rep_db = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/replicator_db_continuous.js b/test/javascript/tests/replicator_db_continuous.js
index 63174e9..6d37149 100644
--- a/test/javascript/tests/replicator_db_continuous.js
+++ b/test/javascript/tests/replicator_db_continuous.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_continuous = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/replicator_db_credential_delegation.js b/test/javascript/tests/replicator_db_credential_delegation.js
index 6401819..7ec7711 100644
--- a/test/javascript/tests/replicator_db_credential_delegation.js
+++ b/test/javascript/tests/replicator_db_credential_delegation.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_credential_delegation = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/replicator_db_field_validation.js b/test/javascript/tests/replicator_db_field_validation.js
index 9e7bb89..4442c88 100644
--- a/test/javascript/tests/replicator_db_field_validation.js
+++ b/test/javascript/tests/replicator_db_field_validation.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_field_validation = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/replicator_db_filtered.js b/test/javascript/tests/replicator_db_filtered.js
index 7675b41..4c1cfb3 100644
--- a/test/javascript/tests/replicator_db_filtered.js
+++ b/test/javascript/tests/replicator_db_filtered.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_filtered = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/replicator_db_identical.js b/test/javascript/tests/replicator_db_identical.js
index 15bedc6..a51fb67 100644
--- a/test/javascript/tests/replicator_db_identical.js
+++ b/test/javascript/tests/replicator_db_identical.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_identical = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/replicator_db_identical_continuous.js b/test/javascript/tests/replicator_db_identical_continuous.js
index bafa19c..37495ec 100644
--- a/test/javascript/tests/replicator_db_identical_continuous.js
+++ b/test/javascript/tests/replicator_db_identical_continuous.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_identical_continuous = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/replicator_db_invalid_filter.js b/test/javascript/tests/replicator_db_invalid_filter.js
index 38c7469..a974ad2 100644
--- a/test/javascript/tests/replicator_db_invalid_filter.js
+++ b/test/javascript/tests/replicator_db_invalid_filter.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_invalid_filter = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/replicator_db_security.js b/test/javascript/tests/replicator_db_security.js
index ffb5c40..4994958 100644
--- a/test/javascript/tests/replicator_db_security.js
+++ b/test/javascript/tests/replicator_db_security.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_security = function(debug) {
-  return console.log('TODO');
-
   var reset_dbs = function(dbs) {
     dbs.forEach(function(db) {
       db.deleteDb();
@@ -166,7 +165,7 @@ couchTests.replicator_db_security = function(debug) {
         names : ["benoitc"]
       }
     }).ok);
-    
+
     run_on_modified_server([
         {
           section: "admins",
diff --git a/test/javascript/tests/replicator_db_simple.js b/test/javascript/tests/replicator_db_simple.js
index 61fed8d..ad0a692 100644
--- a/test/javascript/tests/replicator_db_simple.js
+++ b/test/javascript/tests/replicator_db_simple.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_simple = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/replicator_db_successive.js b/test/javascript/tests/replicator_db_successive.js
index c556baf..d2ff4df 100644
--- a/test/javascript/tests/replicator_db_successive.js
+++ b/test/javascript/tests/replicator_db_successive.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_successive = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/replicator_db_survives.js b/test/javascript/tests/replicator_db_survives.js
index 2fa69da..e44156d 100644
--- a/test/javascript/tests/replicator_db_survives.js
+++ b/test/javascript/tests/replicator_db_survives.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_survives = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/replicator_db_swap_rep_db.js b/test/javascript/tests/replicator_db_swap_rep_db.js
index a802134..4eac484 100644
--- a/test/javascript/tests/replicator_db_swap_rep_db.js
+++ b/test/javascript/tests/replicator_db_swap_rep_db.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_swap_rep_db = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/replicator_db_update_security.js b/test/javascript/tests/replicator_db_update_security.js
index 78d02af..73c28f9 100644
--- a/test/javascript/tests/replicator_db_update_security.js
+++ b/test/javascript/tests/replicator_db_update_security.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_update_security = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/replicator_db_user_ctx.js b/test/javascript/tests/replicator_db_user_ctx.js
index 353e2ed..06ca781 100644
--- a/test/javascript/tests/replicator_db_user_ctx.js
+++ b/test/javascript/tests/replicator_db_user_ctx.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_user_ctx = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/replicator_db_write_auth.js b/test/javascript/tests/replicator_db_write_auth.js
index 9745395..2ac27c2 100644
--- a/test/javascript/tests/replicator_db_write_auth.js
+++ b/test/javascript/tests/replicator_db_write_auth.js
@@ -10,9 +10,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+couchTests.skip = true;
 couchTests.replicator_db_survives = function(debug) {
-  return console.log('TODO');
-
   if (debug) debugger;
 
   var populate_db = replicator_db.populate_db;
diff --git a/test/javascript/tests/stats.js b/test/javascript/tests/stats.js
index be9d4d2..3a89ddd 100644
--- a/test/javascript/tests/stats.js
+++ b/test/javascript/tests/stats.js
@@ -10,11 +10,10 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+// test has become very flaky - needs complete rewrite
+couchTests.skip = true;
 couchTests.stats = function(debug) {
 
-  // test has become very flaky - needs complete rewrite
-  return console.log('TODO');
-
   function newDb(doSetup) {
     var db_name = get_random_db_name();
     var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
@@ -65,14 +64,14 @@ couchTests.stats = function(debug) {
   (function() {
     var db = newDb(false);
     db.deleteDb();
-  
+
     var before = getStat(["couchdb", "open_databases"]);
     db.createDb();
     var after = getStat(["couchdb", "open_databases"]);
     TEquals(before+8, after, "Creating a db increments open db count.");
     db.deleteDb();
   })();
-  
+
   runTest(["couchdb", "open_databases"], {
     setup: function() {restartServer();},
     run: function(db) {db.open("123");},
@@ -80,7 +79,7 @@ couchTests.stats = function(debug) {
       T(before<after, "Opening a db increases open db count.");
     }
   });
-  
+
   runTest(["couchdb", "open_databases"], {
     setup: function(db) {restartServer(); db.open("123");},
     run: function(db) {db.deleteDb();},
@@ -88,16 +87,16 @@ couchTests.stats = function(debug) {
       T(before>after, "Deleting a db decrements open db count.");
     }
   });
-  
-  /* Improvements in LRU has made this test difficult... 
+
+  /* Improvements in LRU has made this test difficult...
   (function() {
     restartServer();
     var max = 5;
-    
+
     var testFun = function() {
       var pre_dbs = getStat(["couchdb", "open_databases"]) || 0;
       var pre_files = getStat(["couchdb", "open_os_files"]) || 0;
-     
+
       var triggered = false;
       var db = null;
       var dbs = [];
@@ -117,15 +116,15 @@ couchTests.stats = function(debug) {
         db.save({"a": "1"});
       }
       T(triggered, "We managed to force a all_dbs_active error.");
-      
+
       var open_dbs = getStat(["couchdb", "open_databases"]);
       TEquals(open_dbs > 0, true, "We actually opened some dbs.");
       TEquals(max, open_dbs, "We only have max db's open.");
-      
+
       for (var i = 0; i < dbs.length; i++) {
         dbs[i].deleteDb();
       }
-      
+
       var post_dbs = getStat(["couchdb", "open_databases"]);
       var post_files = getStat(["couchdb", "open_os_files"]);
       TEquals(pre_dbs, post_dbs, "We have the same number of open dbs.");
@@ -134,14 +133,14 @@ couchTests.stats = function(debug) {
         dbs[ctr].deleteDb();
       }
     };
-    
+
     run_on_modified_server(
       [{section: "couchdb", key: "max_dbs_open", value: "40"}],
       testFun
     );
   })();
   */
-  
+
   // Just fetching the before value is the extra +1 in test
   runTest(["couchdb", "httpd", "requests"], {
     run: function() {CouchDB.request("GET", "/");},
@@ -149,7 +148,7 @@ couchTests.stats = function(debug) {
       TEquals(before+2, after, "Request counts are incremented properly.");
     }
   });
-  
+
   runTest(["couchdb", "database_reads"], {
     setup: function(db) {db.save({"_id": "test"});},
     run: function(db) {db.open("test");},
@@ -157,7 +156,7 @@ couchTests.stats = function(debug) {
       T(before<after, "Reading a doc increments docs reads.");
     }
   });
-  
+
   runTest(["couchdb", "database_reads"], {
     setup: function(db) {db.save({"_id": "test"});},
     run: function(db) {db.request("GET", "/");},
@@ -165,7 +164,7 @@ couchTests.stats = function(debug) {
       TEquals(before, after, "Only doc reads increment doc reads.");
     }
   });
-  
+
   runTest(["couchdb", "database_reads"], {
     setup: function(db) {db.save({"_id": "test"});},
     run: function(db) {db.open("test", {"open_revs": "all"});},
@@ -173,14 +172,14 @@ couchTests.stats = function(debug) {
       T(before<after, "Reading doc revs increments docs reads.");
     }
   });
-  
+
   runTest(["couchdb", "database_writes"], {
     run: function(db) {db.save({"a": "1"});},
     test: function(before, after) {
       T(before<after, "Saving docs incrememnts doc writes.");
     }
   });
-  
+
   runTest(["couchdb", "database_writes"], {
     run: function(db) {
       CouchDB.request("POST", "/" + db.name + "", {
@@ -192,7 +191,7 @@ couchTests.stats = function(debug) {
       T(before<after, "POST'ing new docs increments doc writes.");
     }
   });
-  
+
   runTest(["couchdb", "database_writes"], {
     setup: function(db) {db.save({"_id": "test"});},
     run: function(db) {var doc = db.open("test"); db.save(doc);},
@@ -200,7 +199,7 @@ couchTests.stats = function(debug) {
       T(before<after, "Updating docs incrememnts doc writes.");
     }
   });
-  
+
   runTest(["couchdb", "database_writes"], {
     setup: function(db) {db.save({"_id": "test"});},
     run: function(db) {var doc = db.open("test"); db.deleteDoc(doc);},
@@ -208,7 +207,7 @@ couchTests.stats = function(debug) {
       T(before<after, "Deleting docs increments doc writes.");
     }
   });
-  
+
   runTest(["couchdb", "database_writes"], {
     setup: function(db) {db.save({"_id": "test"});},
     run: function(db) {
@@ -220,7 +219,7 @@ couchTests.stats = function(debug) {
       T(before<after, "Copying docs increments doc writes.");
     }
   });
-  
+
   runTest(["couchdb", "database_writes"], {
     run: function(db) {
       CouchDB.request("PUT", "/" + db.name + "/bin_doc2/foo2.txt", {
@@ -232,7 +231,7 @@ couchTests.stats = function(debug) {
       T(before<after, "Create with attachment increments doc writes.");
     }
   });
-  
+
   runTest(["couchdb", "database_writes"], {
     setup: function(db) {db.save({"_id": "test"});},
     run: function(db) {
@@ -246,21 +245,21 @@ couchTests.stats = function(debug) {
       T(before<after, "Adding attachment increments doc writes.");
     }
   });
-  
+
   runTest(["couchdb", "httpd", "bulk_requests"], {
     run: function(db) {db.bulkSave(makeDocs(5));},
     test: function(before, after) {
       TEquals(before+1, after, "The bulk_requests counter is incremented.");
     }
   });
-  
+
   runTest(["couchdb", "httpd", "view_reads"], {
     run: function(db) {doView(db);},
     test: function(before, after) {
       T(before<after, "Reading a view increments view reads.");
     }
   });
-  
+
   runTest(["couchdb", "httpd", "view_reads"], {
     setup: function(db) {db.save({"_id": "test"});},
     run: function(db) {db.open("test");},
@@ -268,35 +267,35 @@ couchTests.stats = function(debug) {
       TEquals(before, after, "Reading a doc doesn't increment view reads.");
     }
   });
-  
+
   // Relies on getting the stats values being GET requests.
   runTest(["couchdb", "httpd_request_methods", "GET"], {
     test: function(before, after) {
       TEquals(before+1, after, "Get requests are incremented properly.");
     }
   });
-  
+
   runTest(["couchdb", "httpd_request_methods", "GET"], {
     run: function() {CouchDB.request("POST", "/");},
     test: function(before, after) {
       TEquals(before+1, after, "POST requests don't affect GET counter.");
     }
   });
-  
+
   runTest(["couchdb", "httpd_request_methods", "POST"], {
     run: function() {CouchDB.request("POST", "/");},
     test: function(before, after) {
       TEquals(before+1, after, "POST requests are incremented properly.");
     }
   });
-  
+
   runTest(["couchdb", "httpd_status_codes", "404"], {
     run: function() {CouchDB.request("GET", "/nonexistant_db");},
     test: function(before, after) {
       TEquals(before+1, after, "Increments 404 counter on db not found.");
     }
   });
-  
+
   runTest(["couchdb", "httpd_status_codes", "404"], {
     run: function() {CouchDB.request("GET", "/");},
     test: function(before, after) {


[couchdb] 01/41: Silence already started message for crypto

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 4d73243379538c057c37a0a248283d53161d84bf
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Dec 20 13:15:43 2019 -0600

    Silence already started message for crypto
---
 src/couch/src/test_util.erl | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/src/couch/src/test_util.erl b/src/couch/src/test_util.erl
index 9566e8e..125e764 100644
--- a/src/couch/src/test_util.erl
+++ b/src/couch/src/test_util.erl
@@ -98,6 +98,8 @@ start_applications([App|Apps], Acc) when App == kernel; App == stdlib ->
     start_applications(Apps, Acc);
 start_applications([App|Apps], Acc) ->
     case application:start(App) of
+    {error, {already_started, crypto}} ->
+        start_applications(Apps, [crypto | Acc]);
     {error, {already_started, App}} ->
         io:format(standard_error, "Application ~s was left running!~n", [App]),
         application:stop(App),


[couchdb] 32/41: Speedup eunit: ddoc_cache_no_cache_test

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit d45b21a818449a4ef1b58766ec2b4665a7ccd3c2
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:40:07 2019 -0600

    Speedup eunit: ddoc_cache_no_cache_test
---
 src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl
index a1937a0..9668291 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl
@@ -40,10 +40,12 @@ no_cache_test_() ->
         "ddoc_cache no cache test",
         {
             setup,
-            fun ddoc_cache_tutil:start_couch/0, fun ddoc_cache_tutil:stop_couch/1,
+            fun setup_all/0,
+            fun teardown_all/1,
             {
                 foreachx,
-                fun setup/1, fun teardown/2,
+                fun setup/1,
+                fun teardown/2,
                 [
                     {fun ddoc/1, fun no_cache_open_ok_test/2},
                     {fun not_found/1, fun no_cache_open_not_found_test/2},
@@ -53,8 +55,16 @@ no_cache_test_() ->
         }
     }.
 
-setup(Resp) ->
+setup_all() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
     meck:new(fabric),
+    Ctx.
+
+teardown_all(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+setup(Resp) ->
     meck:expect(fabric, open_doc, fun(_, DDocId, _) ->
         Resp(DDocId)
     end).


[couchdb] 27/41: Speedup eunit: couch_mrview_purge_docs_fabric_tests

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit ad78501ad8c6a7fdb40c613e742eb69b3b7a31b5
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:37:39 2019 -0600

    Speedup eunit: couch_mrview_purge_docs_fabric_tests
---
 .../eunit/couch_mrview_purge_docs_fabric_tests.erl     | 18 ++++++++++++++----
 1 file changed, 14 insertions(+), 4 deletions(-)

diff --git a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl
index a593f54..b2969bb 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl
@@ -20,10 +20,21 @@
 -define(TIMEOUT, 60). % seconds
 
 
+setup_all() ->
+    Ctx = test_util:start_couch([fabric, mem3]),
+    meck:new(couch_mrview_index, [passthrough]),
+    Ctx.
+
+
+teardown_all(Ctx) ->
+    meck:unload(),
+    test_util:stop_couch(Ctx).
+
+
 setup() ->
     DbName = ?tempdb(),
     ok = fabric:create_db(DbName, [?ADMIN_CTX, {q, 1}]),
-    meck:new(couch_mrview_index, [passthrough]),
+    meck:reset([couch_mrview_index]),
     meck:expect(couch_mrview_index, ensure_local_purge_docs, fun(A, B) ->
         meck:passthrough([A, B])
     end),
@@ -31,7 +42,6 @@ setup() ->
 
 
 teardown(DbName) ->
-    meck:unload(),
     ok = fabric:delete_db(DbName, [?ADMIN_CTX]).
 
 
@@ -40,8 +50,8 @@ view_purge_fabric_test_() ->
         "Map views",
         {
             setup,
-            fun() -> test_util:start_couch([fabric, mem3]) end,
-            fun test_util:stop_couch/1,
+            fun setup_all/0,
+            fun teardown_all/1,
             {
                 foreach,
                 fun setup/0,


[couchdb] 36/41: Speedup eunit: fabric_doc_update

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 64bb28c41bddf8af615bde74c7b9bf552c7100ef
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:41:23 2019 -0600

    Speedup eunit: fabric_doc_update
---
 src/fabric/src/fabric_doc_update.erl | 53 +++++++++++++++++++++++-------------
 1 file changed, 34 insertions(+), 19 deletions(-)

diff --git a/src/fabric/src/fabric_doc_update.erl b/src/fabric/src/fabric_doc_update.erl
index c108c9a..69babc1 100644
--- a/src/fabric/src/fabric_doc_update.erl
+++ b/src/fabric/src/fabric_doc_update.erl
@@ -219,13 +219,36 @@ validate_atomic_update(_DbName, AllDocs, true) ->
     end, AllDocs),
     throw({aborted, PreCommitFailures}).
 
-% eunits
-doc_update1_test() ->
-    meck:new(couch_stats),
-    meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-    meck:new(couch_log),
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+
+setup_all() ->
+    meck:new([couch_log, couch_stats]),
     meck:expect(couch_log, warning, fun(_,_) -> ok end),
+    meck:expect(couch_stats, increment_counter, fun(_) -> ok end).
+
+
+teardown_all(_) ->
+    meck:unload().
+
 
+doc_update_test_() ->
+    {
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        [
+            fun doc_update1/0,
+            fun doc_update2/0,
+            fun doc_update3/0
+        ]
+    }.
+
+
+% eunits
+doc_update1() ->
     Doc1 = #doc{revs = {1,[<<"foo">>]}},
     Doc2 = #doc{revs = {1,[<<"bar">>]}},
     Docs = [Doc1],
@@ -294,17 +317,9 @@ doc_update1_test() ->
     ?assertEqual(
         {error, [{Doc1,{accepted,"A"}},{Doc2,{error,internal_server_error}}]},
         ReplyW5
-    ),
-    meck:unload(couch_log),
-    meck:unload(couch_stats).
-
-
-doc_update2_test() ->
-    meck:new(couch_stats),
-    meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-    meck:new(couch_log),
-    meck:expect(couch_log, warning, fun(_,_) -> ok end),
+    ).
 
+doc_update2() ->
     Doc1 = #doc{revs = {1,[<<"foo">>]}},
     Doc2 = #doc{revs = {1,[<<"bar">>]}},
     Docs = [Doc2, Doc1],
@@ -326,11 +341,9 @@ doc_update2_test() ->
         handle_message({rexi_EXIT, 1},lists:nth(3,Shards),Acc2),
 
     ?assertEqual({accepted, [{Doc1,{accepted,Doc2}}, {Doc2,{accepted,Doc1}}]},
-        Reply),
-    meck:unload(couch_log),
-    meck:unload(couch_stats).
+        Reply).
 
-doc_update3_test() ->
+doc_update3() ->
     Doc1 = #doc{revs = {1,[<<"foo">>]}},
     Doc2 = #doc{revs = {1,[<<"bar">>]}},
     Docs = [Doc2, Doc1],
@@ -360,3 +373,5 @@ group_docs_by_shard_hack(_DbName, Shards, Docs) ->
             dict:append(Shard, Doc, D1)
         end, D0, Shards)
     end, dict:new(), Docs)).
+
+-endif.


[couchdb] 41/41: Speedup eunit: smoosh_server

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 38a85fd76df223685b2cfb1eb81b2f3dedc899c3
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:43:17 2019 -0600

    Speedup eunit: smoosh_server
---
 src/smoosh/src/smoosh_server.erl | 45 ++++++++++++++++++++++++----------------
 1 file changed, 27 insertions(+), 18 deletions(-)

diff --git a/src/smoosh/src/smoosh_server.erl b/src/smoosh/src/smoosh_server.erl
index 43f4bd8..7af1e4e 100644
--- a/src/smoosh/src/smoosh_server.erl
+++ b/src/smoosh/src/smoosh_server.erl
@@ -446,18 +446,22 @@ needs_upgrade(Props) ->
 -include_lib("eunit/include/eunit.hrl").
 
 
-setup() ->
+setup_all() ->
     meck:new([config, couch_index, couch_index_server], [passthrough]),
     Pid = list_to_pid("<0.0.0>"),
     meck:expect(couch_index_server, get_index, 3, {ok, Pid}),
-    meck:expect(config, get, fun(_, _, Default) -> Default end),
+    meck:expect(config, get, fun(_, _, Default) -> Default end).
+
+teardown_all(_) ->
+    meck:unload().
+
+setup() ->
     Shard = <<"shards/00000000-1fffffff/test.1529510412">>,
     GroupId = <<"_design/ddoc">>,
     {ok, Shard, GroupId}.
 
-
 teardown(_) ->
-    meck:unload().
+    ok.
 
 config_change_test_() ->
     {
@@ -474,20 +478,25 @@ config_change_test_() ->
 
 get_priority_test_() ->
     {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            fun t_ratio_view/1,
-            fun t_slack_view/1,
-            fun t_no_data_view/1,
-            fun t_below_min_priority_view/1,
-            fun t_below_min_size_view/1,
-            fun t_timeout_view/1,
-            fun t_missing_view/1,
-            fun t_invalid_view/1
-        ]
-}.
+        setup,
+        fun setup_all/0,
+        fun teardown_all/1,
+        {
+            foreach,
+            fun setup/0,
+            fun teardown/1,
+            [
+                fun t_ratio_view/1,
+                fun t_slack_view/1,
+                fun t_no_data_view/1,
+                fun t_below_min_priority_view/1,
+                fun t_below_min_size_view/1,
+                fun t_timeout_view/1,
+                fun t_missing_view/1,
+                fun t_invalid_view/1
+            ]
+        }
+    }.
 
 t_restart_config_listener(_) ->
     ?_test(begin


[couchdb] 24/41: Speedup eunit: couch_index

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch speedup-test-suite
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 6a569d3d179b9a4b3191585537fd5d8d3b43b8ad
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Dec 25 11:36:22 2019 -0600

    Speedup eunit: couch_index
---
 src/couch_index/src/couch_index.erl | 21 +++++++++++++++------
 1 file changed, 15 insertions(+), 6 deletions(-)

diff --git a/src/couch_index/src/couch_index.erl b/src/couch_index/src/couch_index.erl
index ab6deae..cfe0d9e 100644
--- a/src/couch_index/src/couch_index.erl
+++ b/src/couch_index/src/couch_index.erl
@@ -470,18 +470,25 @@ get(idx_name, _, _) ->
 get(signature, _, _) ->
     <<61,237,157,230,136,93,96,201,204,17,137,186,50,249,44,135>>.
 
-setup(Settings) ->
+setup_all() ->
+    Ctx = test_util:start_couch(),
     ok = meck:new([config], [passthrough]),
     ok = meck:new([test_index], [non_strict]),
+    ok = meck:expect(test_index, get, fun get/3),
+    Ctx.
+
+teardown_all(Ctx) ->
+    meck:unload(),
+    test_util:stop_couch(Ctx).
+
+setup(Settings) ->
+    meck:reset([config, test_index]),
     ok = meck:expect(config, get, fun(Section, Key) ->
         configure(Section, Key, Settings)
     end),
-    ok = meck:expect(test_index, get, fun get/3),
     {undefined, #st{mod = {test_index}}}.
 
 teardown(_, _) ->
-    (catch meck:unload(config)),
-    (catch meck:unload(test_index)),
     ok.
 
 configure("view_compaction", "enabled_recompaction", [Global, _Db, _Index]) ->
@@ -498,10 +505,12 @@ recompaction_configuration_test_() ->
         "Compaction tests",
         {
             setup,
-            fun test_util:start_couch/0, fun test_util:stop_couch/1,
+            fun setup_all/0,
+            fun teardown_all/1,
             {
                 foreachx,
-                fun setup/1, fun teardown/2,
+                fun setup/1,
+                fun teardown/2,
                 recompaction_configuration_tests()
             }
         }