You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by va...@apache.org on 2022/08/19 01:52:38 UTC

[couchdb] 03/18: Update couch_replicator_compact_tests

This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch refactor-replication-tests
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 35da93ac185daf5844ee826fd24478115a8e7373
Author: Nick Vatamaniuc <va...@gmail.com>
AuthorDate: Thu Aug 18 21:10:15 2022 -0400

    Update couch_replicator_compact_tests
    
    Compactor tests are the only tests which continue using the local ports since
    they deals with triggering and managing low level compaction processes.
    
    However, it was still possible to improve the tests somewhat by using the
    TDEF_FE macros and removing some left-over foreachx cruft.
---
 .../test/eunit/couch_replicator_compact_tests.erl  | 314 +++++++++------------
 1 file changed, 131 insertions(+), 183 deletions(-)

diff --git a/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl
index 1c093d58c..1f241c753 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl
@@ -15,11 +15,7 @@
 -include_lib("couch/include/couch_eunit.hrl").
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("couch_replicator/src/couch_replicator.hrl").
-
--import(couch_replicator_test_helper, [
-    db_url/1,
-    get_pid/1
-]).
+-include("couch_replicator_test.hrl").
 
 -define(ATTFILE, filename:join([?FIXTURESDIR, "logo.png"])).
 -define(DELAY, 500).
@@ -28,92 +24,60 @@
 -define(TIMEOUT_EUNIT, ?TIMEOUT div 1000 + 70).
 -define(WRITE_BATCH_SIZE, 25).
 
-setup() ->
+setup_db() ->
     DbName = ?tempdb(),
     {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
     ok = couch_db:close(Db),
     DbName.
 
-setup(remote) ->
-    {remote, setup()};
-setup({A, B}) ->
-    Ctx = test_util:start_couch([couch_replicator]),
-    Source = setup(A),
-    Target = setup(B),
-    {Ctx, {Source, Target}}.
-
-teardown({remote, DbName}) ->
-    teardown(DbName);
-teardown(DbName) ->
+teardown_db(DbName) ->
     ok = couch_server:delete(DbName, [?ADMIN_CTX]),
     ok.
 
-teardown(_, {Ctx, {Source, Target}}) ->
-    teardown(Source),
-    teardown(Target),
-    ok = application:stop(couch_replicator),
+test_setup() ->
+    Ctx = test_util:start_couch([couch_replicator]),
+    Source = setup_db(),
+    Target = setup_db(),
+    {Ctx, {Source, Target}}.
+
+test_teardown({Ctx, {Source, Target}}) ->
+    teardown_db(Source),
+    teardown_db(Target),
     ok = test_util:stop_couch(Ctx).
 
 compact_test_() ->
-    Pairs = [{remote, remote}],
     {
         "Compaction during replication tests",
         {
-            foreachx,
-            fun setup/1,
-            fun teardown/2,
+            foreach,
+            fun test_setup/0,
+            fun test_teardown/1,
             [
-                {Pair, fun should_populate_replicate_compact/2}
-             || Pair <- Pairs
+                ?TDEF_FE(populate_replicate_compact, ?TIMEOUT_EUNIT)
             ]
         }
     }.
 
-should_populate_replicate_compact({From, To}, {_Ctx, {Source, Target}}) ->
+populate_replicate_compact({_Ctx, {Source, Target}}) ->
     {ok, RepPid, RepId} = replicate(Source, Target),
-    {
-        lists:flatten(io_lib:format("~p -> ~p", [From, To])),
-        {inorder, [
-            should_run_replication(RepPid, RepId, Source, Target),
-            should_all_processes_be_alive(RepPid, Source, Target),
-            should_populate_and_compact(RepPid, Source, Target, 50, 3),
-            should_wait_target_in_sync(Source, Target),
-            should_ensure_replication_still_running(RepPid, RepId, Source, Target),
-            should_cancel_replication(RepId, RepPid),
-            should_compare_databases(Source, Target)
-        ]}
-    }.
-
-should_all_processes_be_alive(RepPid, Source, Target) ->
-    ?_test(begin
-        {ok, SourceDb} = reopen_db(Source),
-        {ok, TargetDb} = reopen_db(Target),
-        ?assert(is_process_alive(RepPid)),
-        ?assert(is_process_alive(couch_db:get_pid(SourceDb))),
-        ?assert(is_process_alive(couch_db:get_pid(TargetDb)))
-    end).
-
-should_run_replication(RepPid, RepId, Source, Target) ->
-    ?_test(check_active_tasks(RepPid, RepId, Source, Target)).
-
-should_ensure_replication_still_running(RepPid, RepId, Source, Target) ->
-    ?_test(check_active_tasks(RepPid, RepId, Source, Target)).
+    check_active_tasks(RepPid, RepId, Source, Target),
+    all_processes_are_alive(RepPid, Source, Target),
+    populate_and_compact(RepPid, Source, Target, 50, 3),
+    wait_target_in_sync(Source, Target),
+    check_active_tasks(RepPid, RepId, Source, Target),
+    cancel_replication(RepId, RepPid),
+    compare_databases(Source, Target).
+
+all_processes_are_alive(RepPid, Source, Target) ->
+    {ok, SourceDb} = reopen_db(Source),
+    {ok, TargetDb} = reopen_db(Target),
+    ?assert(is_process_alive(RepPid)),
+    ?assert(is_process_alive(couch_db:get_pid(SourceDb))),
+    ?assert(is_process_alive(couch_db:get_pid(TargetDb))).
 
 check_active_tasks(RepPid, {BaseId, Ext} = _RepId, Src, Tgt) ->
-    Source =
-        case Src of
-            {remote, NameSrc} ->
-                <<(db_url(NameSrc))/binary, $/>>;
-            _ ->
-                Src
-        end,
-    Target =
-        case Tgt of
-            {remote, NameTgt} ->
-                <<(db_url(NameTgt))/binary, $/>>;
-            _ ->
-                Tgt
-        end,
+    Source = <<(db_url(Src))/binary, $/>>,
+    Target = <<(db_url(Tgt))/binary, $/>>,
     FullRepId = ?l2b(BaseId ++ Ext),
     Pid = ?l2b(pid_to_list(RepPid)),
     RepTasks = wait_for_task_status(),
@@ -152,71 +116,59 @@ wait_for_task_status() ->
         end
     end).
 
-should_cancel_replication(RepId, RepPid) ->
-    ?_assertNot(begin
-        ok = couch_replicator_scheduler:remove_job(RepId),
-        is_process_alive(RepPid)
-    end).
+cancel_replication(RepId, RepPid) ->
+    ok = couch_replicator_scheduler:remove_job(RepId),
+    ?assertNot(is_process_alive(RepPid)).
+
+populate_and_compact(RepPid, Source, Target, BatchSize, Rounds) ->
+    {ok, SourceDb0} = reopen_db(Source),
+    Writer = spawn_writer(SourceDb0),
+    lists:foreach(
+        fun(N) ->
+            {ok, SourceDb} = reopen_db(Source),
+            {ok, TargetDb} = reopen_db(Target),
+            pause_writer(Writer),
+
+            compact_db("source", SourceDb),
+            ?assert(is_process_alive(RepPid)),
+            ?assert(is_process_alive(couch_db:get_pid(SourceDb))),
+            wait_for_compaction("source", SourceDb),
+
+            compact_db("target", TargetDb),
+            ?assert(is_process_alive(RepPid)),
+            ?assert(is_process_alive(couch_db:get_pid(TargetDb))),
+            wait_for_compaction("target", TargetDb),
+
+            {ok, SourceDb2} = reopen_db(SourceDb),
+            {ok, TargetDb2} = reopen_db(TargetDb),
+
+            resume_writer(Writer),
+            wait_writer(Writer, BatchSize * N),
+
+            compact_db("source", SourceDb2),
+            ?assert(is_process_alive(RepPid)),
+            ?assert(is_process_alive(couch_db:get_pid(SourceDb2))),
+            pause_writer(Writer),
+            wait_for_compaction("source", SourceDb2),
+            resume_writer(Writer),
+
+            compact_db("target", TargetDb2),
+            ?assert(is_process_alive(RepPid)),
+            ?assert(is_process_alive(couch_db:get_pid(TargetDb2))),
+            pause_writer(Writer),
+            wait_for_compaction("target", TargetDb2),
+            resume_writer(Writer)
+        end,
+        lists:seq(1, Rounds)
+    ),
+    stop_writer(Writer).
 
-should_populate_and_compact(RepPid, Source, Target, BatchSize, Rounds) ->
-    {timeout, ?TIMEOUT_EUNIT,
-        ?_test(begin
-            {ok, SourceDb0} = reopen_db(Source),
-            Writer = spawn_writer(SourceDb0),
-            lists:foreach(
-                fun(N) ->
-                    {ok, SourceDb} = reopen_db(Source),
-                    {ok, TargetDb} = reopen_db(Target),
-                    pause_writer(Writer),
-
-                    compact_db("source", SourceDb),
-                    ?assert(is_process_alive(RepPid)),
-                    ?assert(is_process_alive(couch_db:get_pid(SourceDb))),
-                    wait_for_compaction("source", SourceDb),
-
-                    compact_db("target", TargetDb),
-                    ?assert(is_process_alive(RepPid)),
-                    ?assert(is_process_alive(couch_db:get_pid(TargetDb))),
-                    wait_for_compaction("target", TargetDb),
-
-                    {ok, SourceDb2} = reopen_db(SourceDb),
-                    {ok, TargetDb2} = reopen_db(TargetDb),
-
-                    resume_writer(Writer),
-                    wait_writer(Writer, BatchSize * N),
-
-                    compact_db("source", SourceDb2),
-                    ?assert(is_process_alive(RepPid)),
-                    ?assert(is_process_alive(couch_db:get_pid(SourceDb2))),
-                    pause_writer(Writer),
-                    wait_for_compaction("source", SourceDb2),
-                    resume_writer(Writer),
-
-                    compact_db("target", TargetDb2),
-                    ?assert(is_process_alive(RepPid)),
-                    ?assert(is_process_alive(couch_db:get_pid(TargetDb2))),
-                    pause_writer(Writer),
-                    wait_for_compaction("target", TargetDb2),
-                    resume_writer(Writer)
-                end,
-                lists:seq(1, Rounds)
-            ),
-            stop_writer(Writer)
-        end)}.
-
-should_wait_target_in_sync({remote, Source}, Target) ->
-    should_wait_target_in_sync(Source, Target);
-should_wait_target_in_sync(Source, {remote, Target}) ->
-    should_wait_target_in_sync(Source, Target);
-should_wait_target_in_sync(Source, Target) ->
-    {timeout, ?TIMEOUT_EUNIT,
-        ?_assert(begin
-            {ok, SourceDb} = couch_db:open_int(Source, []),
-            {ok, SourceInfo} = couch_db:get_db_info(SourceDb),
-            ok = couch_db:close(SourceDb),
-            SourceDocCount = couch_util:get_value(doc_count, SourceInfo),
-            wait_target_in_sync_loop(SourceDocCount, Target, 300)
-        end)}.
+wait_target_in_sync(Source, Target) ->
+    {ok, SourceDb} = couch_db:open_int(Source, []),
+    {ok, SourceInfo} = couch_db:get_db_info(SourceDb),
+    ok = couch_db:close(SourceDb),
+    SourceDocCount = couch_util:get_value(doc_count, SourceInfo),
+    wait_target_in_sync_loop(SourceDocCount, Target, 300).
 
 wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
     erlang:error(
@@ -226,8 +178,6 @@ wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
             {reason, "Could not get source and target databases in sync"}
         ]}
     );
-wait_target_in_sync_loop(DocCount, {remote, TargetName}, RetriesLeft) ->
-    wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft);
 wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
     {ok, Target} = couch_db:open_int(TargetName, []),
     {ok, TargetInfo} = couch_db:get_db_info(Target),
@@ -241,49 +191,40 @@ wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
             wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1)
     end.
 
-should_compare_databases({remote, Source}, Target) ->
-    should_compare_databases(Source, Target);
-should_compare_databases(Source, {remote, Target}) ->
-    should_compare_databases(Source, Target);
-should_compare_databases(Source, Target) ->
-    {timeout, 35,
-        ?_test(begin
-            {ok, SourceDb} = couch_db:open_int(Source, []),
-            {ok, TargetDb} = couch_db:open_int(Target, []),
-            Fun = fun(FullDocInfo, Acc) ->
-                {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
-                {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
-                DocId = couch_util:get_value(<<"_id">>, Props),
-                DocTarget =
-                    case couch_db:open_doc(TargetDb, DocId) of
-                        {ok, DocT} ->
-                            DocT;
-                        Error ->
-                            erlang:error(
-                                {assertion_failed, [
-                                    {module, ?MODULE},
-                                    {line, ?LINE},
-                                    {reason,
-                                        lists:concat([
-                                            "Error opening document '",
-                                            ?b2l(DocId),
-                                            "' from target: ",
-                                            couch_util:to_list(Error)
-                                        ])}
-                                ]}
-                            )
-                    end,
-                DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
-                ?assertEqual(DocJson, DocTargetJson),
-                {ok, Acc}
+compare_databases(Source, Target) ->
+    {ok, SourceDb} = couch_db:open_int(Source, []),
+    {ok, TargetDb} = couch_db:open_int(Target, []),
+    Fun = fun(FullDocInfo, Acc) ->
+        {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
+        {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
+        DocId = couch_util:get_value(<<"_id">>, Props),
+        DocTarget =
+            case couch_db:open_doc(TargetDb, DocId) of
+                {ok, DocT} ->
+                    DocT;
+                Error ->
+                    erlang:error(
+                        {assertion_failed, [
+                            {module, ?MODULE},
+                            {line, ?LINE},
+                            {reason,
+                                lists:concat([
+                                    "Error opening document '",
+                                    ?b2l(DocId),
+                                    "' from target: ",
+                                    couch_util:to_list(Error)
+                                ])}
+                        ]}
+                    )
             end,
-            {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []),
-            ok = couch_db:close(SourceDb),
-            ok = couch_db:close(TargetDb)
-        end)}.
+        DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
+        ?assertEqual(DocJson, DocTargetJson),
+        {ok, Acc}
+    end,
+    {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []),
+    ok = couch_db:close(SourceDb),
+    ok = couch_db:close(TargetDb).
 
-reopen_db({remote, Db}) ->
-    reopen_db(Db);
 reopen_db(DbName) when is_binary(DbName) ->
     {ok, Db} = couch_db:open_int(DbName, []),
     ok = couch_db:close(Db),
@@ -357,21 +298,17 @@ wait_for_compaction(Type, Db) ->
             )
     end.
 
-replicate({remote, Db}, Target) ->
-    replicate(db_url(Db), Target);
-replicate(Source, {remote, Db}) ->
-    replicate(Source, db_url(Db));
 replicate(Source, Target) ->
     RepObject =
         {[
-            {<<"source">>, Source},
-            {<<"target">>, Target},
+            {<<"source">>, db_url(Source)},
+            {<<"target">>, db_url(Target)},
             {<<"continuous">>, true}
         ]},
     {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER),
     ok = couch_replicator_scheduler:add_job(Rep),
     couch_replicator_scheduler:reschedule(),
-    Pid = get_pid(Rep#rep.id),
+    Pid = couch_replicator_test_helper:get_pid(Rep#rep.id),
     {ok, Pid, Rep#rep.id}.
 
 wait_writer(Pid, NumDocs) ->
@@ -521,3 +458,14 @@ maybe_pause(Parent, Counter) ->
     after 0 ->
         ok
     end.
+
+db_url(DbName) ->
+    % Note we're returning the backend (local) URL here
+    iolist_to_binary([
+        "http://",
+        config:get("httpd", "bind_address", "127.0.0.1"),
+        ":",
+        integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+        "/",
+        DbName
+    ]).