You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by va...@apache.org on 2019/03/26 23:24:05 UTC
[couchdb] branch reshard updated: [fixup|mem3_reshard_test] retry
and target reset tests
This is an automated email from the ASF dual-hosted git repository.
vatamane pushed a commit to branch reshard
in repository https://gitbox.apache.org/repos/asf/couchdb.git
The following commit(s) were added to refs/heads/reshard by this push:
new b26b71b [fixup|mem3_reshard_test] retry and target reset tests
b26b71b is described below
commit b26b71bbc357a7dbbd9fb039d8b7cc2f8a3994ca
Author: Nick Vatamaniuc <va...@apache.org>
AuthorDate: Tue Mar 26 19:23:32 2019 -0400
[fixup|mem3_reshard_test] retry and target reset tests
---
src/mem3/test/mem3_reshard_test.erl | 46 ++++++++++++++++++++++++++++++++++++-
1 file changed, 45 insertions(+), 1 deletion(-)
diff --git a/src/mem3/test/mem3_reshard_test.erl b/src/mem3/test/mem3_reshard_test.erl
index 64000ef..8417363 100644
--- a/src/mem3/test/mem3_reshard_test.erl
+++ b/src/mem3/test/mem3_reshard_test.erl
@@ -25,12 +25,14 @@ setup() ->
create_db(Db1, [{q, 1}, {n, 1}]),
PartProps = [{partitioned, true}, {hash, [couch_partition, hash, []]}],
create_db(Db2, [{q, 1}, {n, 1}, {props, PartProps}]),
+ config:set("reshard", "retry_interval_sec", "0", _Persist=false),
#{db1 => Db1, db2 => Db2}.
teardown(#{} = Dbs) ->
mem3_reshard:reset_state(),
maps:map(fun(_, Db) -> delete_db(Db) end, Dbs),
+ config:delete("reshard", "retry_interval_sec", _Persist=false),
meck:unload().
@@ -57,7 +59,9 @@ mem3_reshard_db_test_() ->
fun indices_are_built/1,
fun split_partitioned_db/1,
fun split_twice/1,
- fun couch_events_are_emitted/1
+ fun couch_events_are_emitted/1,
+ fun retries_work/1,
+ fun target_reset_in_initial_copy/1
]
}
}
@@ -372,6 +376,46 @@ couch_events_are_emitted(#{db1 := Db}) ->
end).
+retries_work(#{db1 := Db}) ->
+ ?_test(begin
+ meck:expect(couch_db_split, split, fun(_, _, _) ->
+ error(kapow)
+ end),
+
+ [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
+ {ok, JobId} = mem3_reshard:start_split_job(Shard),
+
+ wait_state(JobId, failed),
+ ?assertEqual(3, meck:num_calls(couch_db_split, split, 3))
+ end).
+
+
+target_reset_in_initial_copy(#{db1 := Db}) ->
+ ?_test(begin
+ [#shard{} = Src] = lists:sort(mem3:local_shards(Db)),
+ Job = #job{
+ source = Src,
+ target = [#shard{name= <<"t1">>}, #shard{name = <<"t2">>}],
+ job_state = running,
+ split_state = initial_copy
+ },
+ BogusParent = spawn(fun() -> receive {ack, _, _} -> ok end end),
+ put('$ancestors', [BogusParent]), % make prock_lib:ack not blow up
+ meck:expect(mem3_reshard, checkpoint, 2, ok),
+ meck:expect(couch_db_split, cleanup_target, 2, ok),
+ meck:expect(couch_server, exists, fun
+ (<<"t1">>) -> true;
+ (<<"t2">>) -> true;
+ (DbName) -> meck:passthrough([DbName])
+ end),
+ JobPid = spawn(fun() -> mem3_reshard_job:init(Job) end),
+ meck:wait(2, couch_db_split, cleanup_target, ['_', '_'], 5000),
+ exit(JobPid, kill),
+ exit(BogusParent, kill),
+ ?assertEqual(2, meck:num_calls(couch_db_split, cleanup_target, 2))
+ end).
+
+
intercept_state(State) ->
TestPid = self(),
meck:new(mem3_reshard_job, [passthrough]),