You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by ko...@apache.org on 2019/07/28 13:47:47 UTC

[couchdb] branch jenkins-fix-eunit-timeouts created (now 265b1ca)

This is an automated email from the ASF dual-hosted git repository.

kocolosk pushed a change to branch jenkins-fix-eunit-timeouts
in repository https://gitbox.apache.org/repos/asf/couchdb.git.


      at 265b1ca  Modernize the sync_security test setup/teardown

This branch includes the following new commits:

     new f0f35d9  Proactively increase timeout for PBKDF2 test
     new 662aa14  Bump timeouts for slow reshard_api tests
     new 1eccd7a  Bump default timeout for all mem3_reshard tests
     new 265b1ca  Modernize the sync_security test setup/teardown

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[couchdb] 02/04: Bump timeouts for slow reshard_api tests

Posted by ko...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

kocolosk pushed a commit to branch jenkins-fix-eunit-timeouts
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 662aa14390072c85634e64a2afe7646859286c78
Author: Adam Kocoloski <ko...@apache.org>
AuthorDate: Sun Jul 28 09:42:14 2019 -0400

    Bump timeouts for slow reshard_api tests
    
    These two tests do a lot of work and could exceed the default timeout
    under normal circumstances.
---
 src/mem3/test/mem3_reshard_api_test.erl | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/src/mem3/test/mem3_reshard_api_test.erl b/src/mem3/test/mem3_reshard_api_test.erl
index 982fed1..f394308 100644
--- a/src/mem3/test/mem3_reshard_api_test.erl
+++ b/src/mem3/test/mem3_reshard_api_test.erl
@@ -27,6 +27,7 @@
 -define(STATE, "_reshard/state").
 -define(ID, <<"id">>).
 -define(OK, <<"ok">>).
+-define(TIMEOUT, 60). % seconds
 
 
 setup() ->
@@ -438,7 +439,7 @@ create_job_with_invalid_arguments({Top, {Db1, _, _}}) ->
 
 
 create_job_with_db({Top, {Db1, _, _}}) ->
-    ?_test(begin
+    {timeout, ?TIMEOUT, ?_test(begin
         Jobs = Top ++ ?JOBS,
         Body1 = #{type => split, db => Db1},
 
@@ -465,11 +466,11 @@ create_job_with_db({Top, {Db1, _, _}}) ->
             [16#80000000, 16#bfffffff],
             [16#c0000000, 16#ffffffff]
         ], [mem3:range(S) || S <- lists:sort(mem3:shards(Db1))])
-    end).
+    end)}.
 
 
 create_job_with_shard_name({Top, {_, _, Db3}}) ->
-    ?_test(begin
+    {timeout, ?TIMEOUT, ?_test(begin
         Jobs = Top ++ ?JOBS,
         [S1, S2] = [mem3:name(S) || S <- lists:sort(mem3:shards(Db3))],
 
@@ -490,7 +491,7 @@ create_job_with_shard_name({Top, {_, _, Db3}}) ->
             [16#80000000, 16#bfffffff],
             [16#c0000000, 16#ffffffff]
         ], [mem3:range(S) || S <- lists:sort(mem3:shards(Db3))])
-    end).
+    end)}.
 
 
 completed_job_handling({Top, {Db1, _, _}}) ->


[couchdb] 01/04: Proactively increase timeout for PBKDF2 test

Posted by ko...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

kocolosk pushed a commit to branch jenkins-fix-eunit-timeouts
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit f0f35d93aabf01d060b3593e1e3b3acadf053b0f
Author: Adam Kocoloski <ko...@apache.org>
AuthorDate: Sun Jul 28 09:15:04 2019 -0400

    Proactively increase timeout for PBKDF2 test
    
    This test was taking 134s in a recent run, which is uncomfortably close
    to the threshold.
---
 src/couch/test/couch_passwords_tests.erl | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/couch/test/couch_passwords_tests.erl b/src/couch/test/couch_passwords_tests.erl
index dea6d6b..88de853 100644
--- a/src/couch/test/couch_passwords_tests.erl
+++ b/src/couch/test/couch_passwords_tests.erl
@@ -46,7 +46,7 @@ pbkdf2_test_()->
                                      <<"sa\0lt">>,
                                      4096, 16))},
 
-         {timeout, 180,  %% this may runs too long on slow hosts
+         {timeout, 600,  %% this may runs too long on slow hosts
           {"Iterations: 16777216 - this may take some time",
            ?_assertEqual(
                {ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>},


[couchdb] 03/04: Bump default timeout for all mem3_reshard tests

Posted by ko...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

kocolosk pushed a commit to branch jenkins-fix-eunit-timeouts
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 1eccd7ab6f572c9c51d4f3048d6a9c7bed1fd13b
Author: Adam Kocoloski <ko...@apache.org>
AuthorDate: Sun Jul 28 09:43:31 2019 -0400

    Bump default timeout for all mem3_reshard tests
    
    A couple of these tests were exceeding the default timeout under normal
    circumstances, but many of them do a significant amount of work, so for
    simplicity we set a module-wide timeout and apply it consistently
    throughout.
---
 src/mem3/test/mem3_reshard_test.erl | 37 +++++++++++++++++++------------------
 1 file changed, 19 insertions(+), 18 deletions(-)

diff --git a/src/mem3/test/mem3_reshard_test.erl b/src/mem3/test/mem3_reshard_test.erl
index 8c44796..ab62021 100644
--- a/src/mem3/test/mem3_reshard_test.erl
+++ b/src/mem3/test/mem3_reshard_test.erl
@@ -19,6 +19,7 @@
 -include_lib("couch_mrview/include/couch_mrview.hrl"). % for all_docs function
 
 -define(ID, <<"_id">>).
+-define(TIMEOUT, 60).
 
 setup() ->
     HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
@@ -81,7 +82,7 @@ mem3_reshard_db_test_() ->
 % This is a basic test to check that shard splitting preserves documents, and
 % db meta props like revs limits and security.
 split_one_shard(#{db1 := Db}) ->
-    ?_test(begin
+    {timeout, ?TIMEOUT, ?_test(begin
         DocSpec = #{docs => 10, delete => [5, 9], mrview => 1, local => 1},
         add_test_docs(Db, DocSpec),
 
@@ -135,13 +136,13 @@ split_one_shard(#{db1 := Db}) ->
         % Don't forget about the local but don't include internal checkpoints
         % as some of those are munged and transformed during the split
         ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
-    end).
+    end)}.
 
 
 % This test checks that document added while the shard is being split are not
 % lost. Topoff1 state happens before indices are built
 update_docs_before_topoff1(#{db1 := Db}) ->
-    ?_test(begin
+    {timeout, ?TIMEOUT, ?_test(begin
         add_test_docs(Db, #{docs => 10}),
 
         intercept_state(topoff1),
@@ -177,12 +178,12 @@ update_docs_before_topoff1(#{db1 := Db}) ->
 
         ?assertEqual(Docs0, Docs1),
         ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
-    end).
+    end)}.
 
 
 % This test that indices are built during shard splitting.
 indices_are_built(#{db1 := Db}) ->
-    ?_test(begin
+    {timeout, ?TIMEOUT, ?_test(begin
         HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
         HaveHastings = code:lib_dir(hastings) /= {error, bad_name},
 
@@ -206,7 +207,7 @@ indices_are_built(#{db1 := Db}) ->
             % 4 because there are 2 indices and 2 target shards
             ?assertEqual(4, meck:num_calls(hastings_index, await, 2))
         end
-    end).
+    end)}.
 
 
 mock_dreyfus_indices() ->
@@ -238,7 +239,7 @@ mock_hastings_indices() ->
 
 % Split partitioned database
 split_partitioned_db(#{db2 := Db}) ->
-    ?_test(begin
+    {timeout, ?TIMEOUT, ?_test(begin
         DocSpec = #{
             pdocs => #{
                 <<"PX">> => 5,
@@ -304,14 +305,14 @@ split_partitioned_db(#{db2 := Db}) ->
         % Don't forget about the local but don't include internal checkpoints
         % as some of those are munged and transformed during the split
         ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
-    end).
+    end)}.
 
 
 % Make sure a shard can be split again after it was split once. This checks that
 % too many got added to some range, such that on next split they'd fail to fit
 % in to any of the new target ranges.
 split_twice(#{db1 := Db}) ->
-    ?_test(begin
+    {timeout, ?TIMEOUT, ?_test(begin
         DocSpec = #{docs => 100, delete => [80, 99], mrview => 2, local => 100},
         add_test_docs(Db, DocSpec),
 
@@ -390,11 +391,11 @@ split_twice(#{db1 := Db}) ->
         ?assertEqual(trunc(UpdateSeq1 * 1.5), UpdateSeq2),
         ?assertEqual(Docs1, Docs2),
         ?assertEqual(without_meta_locals(Local1), without_meta_locals(Local2))
-    end).
+    end)}.
 
 
 couch_events_are_emitted(#{db1 := Db}) ->
-    ?_test(begin
+    {timeout, ?TIMEOUT, ?_test(begin
         couch_event:register_all(self()),
 
         % Split the one shard
@@ -425,11 +426,11 @@ couch_events_are_emitted(#{db1 := Db}) ->
         StartAtDeleted = lists:dropwhile(fun(E) -> E =/= deleted end, Events),
         ?assertMatch([deleted, deleted, updated, updated | _], StartAtDeleted),
         couch_event:unregister(self())
-    end).
+    end)}.
 
 
 retries_work(#{db1 := Db}) ->
-    ?_test(begin
+    {timeout, ?TIMEOUT, ?_test(begin
         meck:expect(couch_db_split, split, fun(_, _, _) ->
              error(kapow)
         end),
@@ -439,11 +440,11 @@ retries_work(#{db1 := Db}) ->
 
         wait_state(JobId, failed),
         ?assertEqual(3, meck:num_calls(couch_db_split, split, 3))
-    end).
+    end)}.
 
 
 target_reset_in_initial_copy(#{db1 := Db}) ->
-    ?_test(begin
+    {timeout, ?TIMEOUT, ?_test(begin
         [#shard{} = Src] = lists:sort(mem3:local_shards(Db)),
         Job = #job{
             source = Src,
@@ -465,17 +466,17 @@ target_reset_in_initial_copy(#{db1 := Db}) ->
         exit(JobPid, kill),
         exit(BogusParent, kill),
         ?assertEqual(2, meck:num_calls(couch_db_split, cleanup_target, 2))
-    end).
+    end)}.
 
 
 split_an_incomplete_shard_map(#{db1 := Db}) ->
-    ?_test(begin
+    {timeout, ?TIMEOUT, ?_test(begin
         [#shard{} = Src] = lists:sort(mem3:local_shards(Db)),
         [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
         meck:expect(mem3_util, calculate_max_n, 1, 0),
         ?assertMatch({error, {not_enough_shard_copies, _}},
             mem3_reshard:start_split_job(Shard))
-    end).
+    end)}.
 
 
 intercept_state(State) ->


[couchdb] 04/04: Modernize the sync_security test setup/teardown

Posted by ko...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

kocolosk pushed a commit to branch jenkins-fix-eunit-timeouts
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 265b1ca8f70493b1a0c682a994dc2be7f70614aa
Author: Adam Kocoloski <ko...@apache.org>
AuthorDate: Sun Jul 28 09:45:27 2019 -0400

    Modernize the sync_security test setup/teardown
    
    This test actually doesn't do much real work, but I think what was
    happening is that the setup and teardown time was being charged to the
    test itself. I've refactored it to use a more modern scaffolding
    following some of our more recent additions to the test suite, but have
    left the timeout at the default to test this hypothesis.
---
 src/mem3/test/mem3_sync_security_test.erl | 48 ++++++++++++++++++++++---------
 1 file changed, 35 insertions(+), 13 deletions(-)

diff --git a/src/mem3/test/mem3_sync_security_test.erl b/src/mem3/test/mem3_sync_security_test.erl
index 4e06dd8..e67a720 100644
--- a/src/mem3/test/mem3_sync_security_test.erl
+++ b/src/mem3/test/mem3_sync_security_test.erl
@@ -17,16 +17,38 @@
 -include("mem3.hrl").
 -include_lib("eunit/include/eunit.hrl").
 
-go_test() ->
-    Ctx = test_util:start_couch([fabric, mem3]),
-    try
-        ok = meck:new(fabric, [passthrough]),
-        meck:expect(fabric, all_dbs, fun() ->
-            {ok, [<<"NoExistDb1">>, <<"NoExistDb2">>]}
-        end),
-        Result = mem3_sync_security:go(),
-        ?assertEqual(ok, Result)
-    after
-        meck:unload(),
-        test_util:stop_couch(Ctx)
-    end.
+-define(TIMEOUT, 5). % seconds
+
+go_test_() ->
+    {
+        "security property sync test",
+        {
+            setup,
+            fun start_couch/0, fun stop_couch/1,
+            {
+                foreach,
+                fun setup/0, fun teardown/1,
+                [
+                    fun sync_security_ok/1
+                ]
+            }
+        }
+    }.
+
+start_couch() ->
+    test_util:start_couch([fabric, mem3]).
+
+stop_couch(Ctx) ->
+    test_util:stop_couch(Ctx).
+
+setup() ->
+    ok = meck:new(fabric, [passthrough]),
+    meck:expect(fabric, all_dbs, fun() ->
+        {ok, [<<"NoExistDb1">>, <<"NoExistDb2">>]}
+    end).
+
+teardown(_) ->
+    meck:unload().
+
+sync_security_ok(_) ->
+    {timeout, ?TIMEOUT, ?_assertEqual(ok, mem3_sync_security:go())}.