You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by ch...@apache.org on 2014/08/11 22:23:19 UTC
[43/50] [abbrv] Move files out of test/couchdb into top level test/
folder
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/661443fb/test/couch_file_tests.erl
----------------------------------------------------------------------
diff --git a/test/couch_file_tests.erl b/test/couch_file_tests.erl
new file mode 100644
index 0000000..ad13383
--- /dev/null
+++ b/test/couch_file_tests.erl
@@ -0,0 +1,265 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_file_tests).
+
+-include("couch_eunit.hrl").
+
+-define(BLOCK_SIZE, 4096).
+-define(setup(F), {setup, fun setup/0, fun teardown/1, F}).
+-define(foreach(Fs), {foreach, fun setup/0, fun teardown/1, Fs}).
+
+
+setup() ->
+ {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
+ Fd.
+
+teardown(Fd) ->
+ ok = couch_file:close(Fd).
+
+
+open_close_test_() ->
+ {
+ "Test for proper file open and close",
+ [
+ should_return_enoent_if_missed(),
+ should_ignore_invalid_flags_with_open(),
+ ?setup(fun should_return_pid_on_file_open/1),
+ should_close_file_properly(),
+ ?setup(fun should_create_empty_new_files/1)
+ ]
+ }.
+
+should_return_enoent_if_missed() ->
+ ?_assertEqual({error, enoent}, couch_file:open("not a real file")).
+
+should_ignore_invalid_flags_with_open() ->
+ ?_assertMatch({ok, _},
+ couch_file:open(?tempfile(), [create, invalid_option])).
+
+should_return_pid_on_file_open(Fd) ->
+ ?_assert(is_pid(Fd)).
+
+should_close_file_properly() ->
+ {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
+ ok = couch_file:close(Fd),
+ ?_assert(true).
+
+should_create_empty_new_files(Fd) ->
+ ?_assertMatch({ok, 0}, couch_file:bytes(Fd)).
+
+
+read_write_test_() ->
+ {
+ "Common file read/write tests",
+ ?foreach([
+ fun should_increase_file_size_on_write/1,
+ fun should_return_current_file_size_on_write/1,
+ fun should_write_and_read_term/1,
+ fun should_write_and_read_binary/1,
+ fun should_write_and_read_large_binary/1,
+ fun should_return_term_as_binary_for_reading_binary/1,
+ fun should_read_term_written_as_binary/1,
+ fun should_read_iolist/1,
+ fun should_fsync/1,
+ fun should_not_read_beyond_eof/1,
+ fun should_truncate/1
+ ])
+ }.
+
+
+should_increase_file_size_on_write(Fd) ->
+ {ok, 0, _} = couch_file:append_term(Fd, foo),
+ {ok, Size} = couch_file:bytes(Fd),
+ ?_assert(Size > 0).
+
+should_return_current_file_size_on_write(Fd) ->
+ {ok, 0, _} = couch_file:append_term(Fd, foo),
+ {ok, Size} = couch_file:bytes(Fd),
+ ?_assertMatch({ok, Size, _}, couch_file:append_term(Fd, bar)).
+
+should_write_and_read_term(Fd) ->
+ {ok, Pos, _} = couch_file:append_term(Fd, foo),
+ ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)).
+
+should_write_and_read_binary(Fd) ->
+ {ok, Pos, _} = couch_file:append_binary(Fd, <<"fancy!">>),
+ ?_assertMatch({ok, <<"fancy!">>}, couch_file:pread_binary(Fd, Pos)).
+
+should_return_term_as_binary_for_reading_binary(Fd) ->
+ {ok, Pos, _} = couch_file:append_term(Fd, foo),
+ Foo = couch_compress:compress(foo, snappy),
+ ?_assertMatch({ok, Foo}, couch_file:pread_binary(Fd, Pos)).
+
+should_read_term_written_as_binary(Fd) ->
+ {ok, Pos, _} = couch_file:append_binary(Fd, <<131,100,0,3,102,111,111>>),
+ ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)).
+
+should_write_and_read_large_binary(Fd) ->
+ BigBin = list_to_binary(lists:duplicate(100000, 0)),
+ {ok, Pos, _} = couch_file:append_binary(Fd, BigBin),
+ ?_assertMatch({ok, BigBin}, couch_file:pread_binary(Fd, Pos)).
+
+should_read_iolist(Fd) ->
+ %% append_binary == append_iolist?
+ %% Possible bug in pread_iolist or iolist() -> append_binary
+ {ok, Pos, _} = couch_file:append_binary(Fd, ["foo", $m, <<"bam">>]),
+ {ok, IoList} = couch_file:pread_iolist(Fd, Pos),
+ ?_assertMatch(<<"foombam">>, iolist_to_binary(IoList)).
+
+should_fsync(Fd) ->
+ {"How does on test fsync?", ?_assertMatch(ok, couch_file:sync(Fd))}.
+
+should_not_read_beyond_eof(_) ->
+ {"No idea how to test reading beyond EOF", ?_assert(true)}.
+
+should_truncate(Fd) ->
+ {ok, 0, _} = couch_file:append_term(Fd, foo),
+ {ok, Size} = couch_file:bytes(Fd),
+ BigBin = list_to_binary(lists:duplicate(100000, 0)),
+ {ok, _, _} = couch_file:append_binary(Fd, BigBin),
+ ok = couch_file:truncate(Fd, Size),
+ ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, 0)).
+
+
+header_test_() ->
+ {
+ "File header read/write tests",
+ [
+ ?foreach([
+ fun should_write_and_read_atom_header/1,
+ fun should_write_and_read_tuple_header/1,
+ fun should_write_and_read_second_header/1,
+ fun should_truncate_second_header/1,
+ fun should_produce_same_file_size_on_rewrite/1,
+ fun should_save_headers_larger_than_block_size/1
+ ]),
+ should_recover_header_marker_corruption(),
+ should_recover_header_size_corruption(),
+ should_recover_header_md5sig_corruption(),
+ should_recover_header_data_corruption()
+ ]
+ }.
+
+
+should_write_and_read_atom_header(Fd) ->
+ ok = couch_file:write_header(Fd, hello),
+ ?_assertMatch({ok, hello}, couch_file:read_header(Fd)).
+
+should_write_and_read_tuple_header(Fd) ->
+ ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
+ ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)).
+
+should_write_and_read_second_header(Fd) ->
+ ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
+ ok = couch_file:write_header(Fd, [foo, <<"more">>]),
+ ?_assertMatch({ok, [foo, <<"more">>]}, couch_file:read_header(Fd)).
+
+should_truncate_second_header(Fd) ->
+ ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
+ {ok, Size} = couch_file:bytes(Fd),
+ ok = couch_file:write_header(Fd, [foo, <<"more">>]),
+ ok = couch_file:truncate(Fd, Size),
+ ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)).
+
+should_produce_same_file_size_on_rewrite(Fd) ->
+ ok = couch_file:write_header(Fd, {<<"some_data">>, 32}),
+ {ok, Size1} = couch_file:bytes(Fd),
+ ok = couch_file:write_header(Fd, [foo, <<"more">>]),
+ {ok, Size2} = couch_file:bytes(Fd),
+ ok = couch_file:truncate(Fd, Size1),
+ ok = couch_file:write_header(Fd, [foo, <<"more">>]),
+ ?_assertMatch({ok, Size2}, couch_file:bytes(Fd)).
+
+should_save_headers_larger_than_block_size(Fd) ->
+ Header = erlang:make_tuple(5000, <<"CouchDB">>),
+ couch_file:write_header(Fd, Header),
+ {"COUCHDB-1319", ?_assertMatch({ok, Header}, couch_file:read_header(Fd))}.
+
+
+should_recover_header_marker_corruption() ->
+ ?_assertMatch(
+ ok,
+ check_header_recovery(
+ fun(CouchFd, RawFd, Expect, HeaderPos) ->
+ ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
+ file:pwrite(RawFd, HeaderPos, <<0>>),
+ ?assertMatch(Expect, couch_file:read_header(CouchFd))
+ end)
+ ).
+
+should_recover_header_size_corruption() ->
+ ?_assertMatch(
+ ok,
+ check_header_recovery(
+ fun(CouchFd, RawFd, Expect, HeaderPos) ->
+ ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
+ % +1 for 0x1 byte marker
+ file:pwrite(RawFd, HeaderPos + 1, <<10/integer>>),
+ ?assertMatch(Expect, couch_file:read_header(CouchFd))
+ end)
+ ).
+
+should_recover_header_md5sig_corruption() ->
+ ?_assertMatch(
+ ok,
+ check_header_recovery(
+ fun(CouchFd, RawFd, Expect, HeaderPos) ->
+ ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
+ % +5 = +1 for 0x1 byte and +4 for term size.
+ file:pwrite(RawFd, HeaderPos + 5, <<"F01034F88D320B22">>),
+ ?assertMatch(Expect, couch_file:read_header(CouchFd))
+ end)
+ ).
+
+should_recover_header_data_corruption() ->
+ ?_assertMatch(
+ ok,
+ check_header_recovery(
+ fun(CouchFd, RawFd, Expect, HeaderPos) ->
+ ?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
+ % +21 = +1 for 0x1 byte, +4 for term size and +16 for MD5 sig
+ file:pwrite(RawFd, HeaderPos + 21, <<"some data goes here!">>),
+ ?assertMatch(Expect, couch_file:read_header(CouchFd))
+ end)
+ ).
+
+
+check_header_recovery(CheckFun) ->
+ Path = ?tempfile(),
+ {ok, Fd} = couch_file:open(Path, [create, overwrite]),
+ {ok, RawFd} = file:open(Path, [read, write, raw, binary]),
+
+ {ok, _} = write_random_data(Fd),
+ ExpectHeader = {some_atom, <<"a binary">>, 756},
+ ok = couch_file:write_header(Fd, ExpectHeader),
+
+ {ok, HeaderPos} = write_random_data(Fd),
+ ok = couch_file:write_header(Fd, {2342, <<"corruption! greed!">>}),
+
+ CheckFun(Fd, RawFd, {ok, ExpectHeader}, HeaderPos),
+
+ ok = file:close(RawFd),
+ ok = couch_file:close(Fd),
+ ok.
+
+write_random_data(Fd) ->
+ write_random_data(Fd, 100 + random:uniform(1000)).
+
+write_random_data(Fd, 0) ->
+ {ok, Bytes} = couch_file:bytes(Fd),
+ {ok, (1 + Bytes div ?BLOCK_SIZE) * ?BLOCK_SIZE};
+write_random_data(Fd, N) ->
+ Choices = [foo, bar, <<"bizzingle">>, "bank", ["rough", stuff]],
+ Term = lists:nth(random:uniform(4) + 1, Choices),
+ {ok, _, _} = couch_file:append_term(Fd, Term),
+ write_random_data(Fd, N - 1).
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/661443fb/test/couch_key_tree_tests.erl
----------------------------------------------------------------------
diff --git a/test/couch_key_tree_tests.erl b/test/couch_key_tree_tests.erl
new file mode 100644
index 0000000..753ecc4
--- /dev/null
+++ b/test/couch_key_tree_tests.erl
@@ -0,0 +1,380 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_key_tree_tests).
+
+-include("couch_eunit.hrl").
+
+-define(DEPTH, 10).
+
+
+key_tree_merge_test_()->
+ {
+ "Key tree merge",
+ [
+ should_merge_with_empty_tree(),
+ should_merge_reflexive(),
+ should_merge_prefix_of_a_tree_with_tree(),
+ should_produce_conflict_on_merge_with_unrelated_branch(),
+ should_merge_reflexive_for_child_nodes(),
+ should_merge_tree_to_itself(),
+ should_merge_tree_of_odd_length(),
+ should_merge_tree_with_stem(),
+ should_merge_with_stem_at_deeper_level(),
+ should_merge_with_stem_at_deeper_level_with_deeper_paths(),
+ should_merge_single_tree_with_deeper_stem(),
+ should_merge_tree_with_large_stem(),
+ should_merge_stems(),
+ should_create_conflicts_on_merge(),
+ should_create_no_conflicts_on_merge(),
+ should_ignore_conflicting_branch()
+ ]
+ }.
+
+key_tree_missing_leaves_test_()->
+ {
+ "Missing tree leaves",
+ [
+ should_not_find_missing_leaves(),
+ should_find_missing_leaves()
+ ]
+ }.
+
+key_tree_remove_leaves_test_()->
+ {
+ "Remove tree leaves",
+ [
+ should_have_no_effect_on_removing_no_leaves(),
+ should_have_no_effect_on_removing_non_existant_branch(),
+ should_remove_leaf(),
+ should_produce_empty_tree_on_removing_all_leaves(),
+ should_have_no_effect_on_removing_non_existant_node(),
+ should_produce_empty_tree_on_removing_last_leaf()
+ ]
+ }.
+
+key_tree_get_leaves_test_()->
+ {
+ "Leaves retrieving",
+ [
+ should_extract_subtree(),
+ should_extract_subsubtree(),
+ should_gather_non_existant_leaf(),
+ should_gather_leaf(),
+ shoul_gather_multiple_leaves(),
+ should_retrieve_full_key_path(),
+ should_retrieve_full_key_path_for_node(),
+ should_retrieve_leaves_with_parent_node(),
+ should_retrieve_all_leaves()
+ ]
+ }.
+
+key_tree_leaf_counting_test_()->
+ {
+ "Leaf counting",
+ [
+ should_have_no_leaves_for_empty_tree(),
+ should_have_single_leaf_for_tree_with_single_node(),
+ should_have_two_leaves_for_tree_with_chindler_siblings(),
+ should_not_affect_on_leaf_counting_for_stemmed_tree()
+ ]
+ }.
+
+key_tree_stemming_test_()->
+ {
+ "Stemming",
+ [
+ should_have_no_effect_for_stemming_more_levels_than_exists(),
+ should_return_one_deepest_node(),
+ should_return_two_deepest_nodes()
+ ]
+ }.
+
+
+should_merge_with_empty_tree()->
+ One = {1, {"1","foo",[]}},
+ ?_assertEqual({[One], no_conflicts},
+ couch_key_tree:merge([], One, ?DEPTH)).
+
+should_merge_reflexive()->
+ One = {1, {"1","foo",[]}},
+ ?_assertEqual({[One], no_conflicts},
+ couch_key_tree:merge([One], One, ?DEPTH)).
+
+should_merge_prefix_of_a_tree_with_tree()->
+ One = {1, {"1","foo",[]}},
+ TwoSibs = [{1, {"1","foo",[]}},
+ {1, {"2","foo",[]}}],
+ ?_assertEqual({TwoSibs, no_conflicts},
+ couch_key_tree:merge(TwoSibs, One, ?DEPTH)).
+
+should_produce_conflict_on_merge_with_unrelated_branch()->
+ TwoSibs = [{1, {"1","foo",[]}},
+ {1, {"2","foo",[]}}],
+ Three = {1, {"3","foo",[]}},
+ ThreeSibs = [{1, {"1","foo",[]}},
+ {1, {"2","foo",[]}},
+ {1, {"3","foo",[]}}],
+ ?_assertEqual({ThreeSibs, conflicts},
+ couch_key_tree:merge(TwoSibs, Three, ?DEPTH)).
+
+should_merge_reflexive_for_child_nodes()->
+ TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ ?_assertEqual({[TwoChild], no_conflicts},
+ couch_key_tree:merge([TwoChild], TwoChild, ?DEPTH)).
+
+should_merge_tree_to_itself()->
+ TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
+ {"1b", "bar", []}]}},
+ ?_assertEqual({[TwoChildSibs], no_conflicts},
+ couch_key_tree:merge([TwoChildSibs], TwoChildSibs, ?DEPTH)).
+
+should_merge_tree_of_odd_length()->
+ TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
+ {"1b", "bar", []}]}},
+ TwoChildPlusSibs = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]},
+ {"1b", "bar", []}]}},
+
+ ?_assertEqual({[TwoChildPlusSibs], no_conflicts},
+ couch_key_tree:merge([TwoChild], TwoChildSibs, ?DEPTH)).
+
+should_merge_tree_with_stem()->
+ Stemmed = {2, {"1a", "bar", []}},
+ TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
+ {"1b", "bar", []}]}},
+
+ ?_assertEqual({[TwoChildSibs], no_conflicts},
+ couch_key_tree:merge([TwoChildSibs], Stemmed, ?DEPTH)).
+
+should_merge_with_stem_at_deeper_level()->
+ Stemmed = {3, {"1bb", "boo", []}},
+ TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
+ {"1b", "bar", [{"1bb", "boo", []}]}]}},
+ ?_assertEqual({[TwoChildSibs], no_conflicts},
+ couch_key_tree:merge([TwoChildSibs], Stemmed, ?DEPTH)).
+
+should_merge_with_stem_at_deeper_level_with_deeper_paths()->
+ Stemmed = {3, {"1bb", "boo", []}},
+ StemmedTwoChildSibs = [{2,{"1a", "bar", []}},
+ {2,{"1b", "bar", [{"1bb", "boo", []}]}}],
+ ?_assertEqual({StemmedTwoChildSibs, no_conflicts},
+ couch_key_tree:merge(StemmedTwoChildSibs, Stemmed, ?DEPTH)).
+
+should_merge_single_tree_with_deeper_stem()->
+ Stemmed = {3, {"1aa", "bar", []}},
+ TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ ?_assertEqual({[TwoChild], no_conflicts},
+ couch_key_tree:merge([TwoChild], Stemmed, ?DEPTH)).
+
+should_merge_tree_with_large_stem()->
+ Stemmed = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
+ TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ ?_assertEqual({[TwoChild], no_conflicts},
+ couch_key_tree:merge([TwoChild], Stemmed, ?DEPTH)).
+
+should_merge_stems()->
+ StemmedA = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
+ StemmedB = {3, {"1aa", "bar", []}},
+ ?_assertEqual({[StemmedA], no_conflicts},
+ couch_key_tree:merge([StemmedA], StemmedB, ?DEPTH)).
+
+should_create_conflicts_on_merge()->
+ OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
+ Stemmed = {3, {"1aa", "bar", []}},
+ ?_assertEqual({[OneChild, Stemmed], conflicts},
+ couch_key_tree:merge([OneChild], Stemmed, ?DEPTH)).
+
+should_create_no_conflicts_on_merge()->
+ OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
+ Stemmed = {3, {"1aa", "bar", []}},
+ TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ ?_assertEqual({[TwoChild], no_conflicts},
+ couch_key_tree:merge([OneChild, Stemmed], TwoChild, ?DEPTH)).
+
+should_ignore_conflicting_branch()->
+ %% this test is based on couch-902-test-case2.py
+ %% foo has conflicts from replication at depth two
+ %% foo3 is the current value
+ Foo = {1, {"foo",
+ "val1",
+ [{"foo2","val2",[]},
+ {"foo3", "val3", []}
+ ]}},
+ %% foo now has an attachment added, which leads to foo4 and val4
+ %% off foo3
+ Bar = {1, {"foo",
+ [],
+ [{"foo3",
+ [],
+ [{"foo4","val4",[]}
+ ]}]}},
+ %% this is what the merge returns
+ %% note that it ignore the conflicting branch as there's no match
+ FooBar = {1, {"foo",
+ "val1",
+ [{"foo2","val2",[]},
+ {"foo3", "val3", [{"foo4","val4",[]}]}
+ ]}},
+ {
+ "COUCHDB-902",
+ ?_assertEqual({[FooBar], no_conflicts},
+ couch_key_tree:merge([Foo], Bar, ?DEPTH))
+ }.
+
+should_not_find_missing_leaves()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual([],
+ couch_key_tree:find_missing(TwoChildSibs,
+ [{0,"1"}, {1,"1a"}])).
+
+should_find_missing_leaves()->
+ Stemmed1 = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+ Stemmed2 = [{2, {"1aa", "bar", []}}],
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ [
+ ?_assertEqual(
+ [{0, "10"}, {100, "x"}],
+ couch_key_tree:find_missing(
+ TwoChildSibs,
+ [{0,"1"}, {0, "10"}, {1,"1a"}, {100, "x"}])),
+ ?_assertEqual(
+ [{0, "1"}, {100, "x"}],
+ couch_key_tree:find_missing(
+ Stemmed1,
+ [{0,"1"}, {1,"1a"}, {100, "x"}])),
+ ?_assertEqual(
+ [{0, "1"}, {1,"1a"}, {100, "x"}],
+ couch_key_tree:find_missing(
+ Stemmed2,
+ [{0,"1"}, {1,"1a"}, {100, "x"}]))
+ ].
+
+should_have_no_effect_on_removing_no_leaves()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({TwoChildSibs, []},
+ couch_key_tree:remove_leafs(TwoChildSibs,
+ [])).
+
+should_have_no_effect_on_removing_non_existant_branch()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({TwoChildSibs, []},
+ couch_key_tree:remove_leafs(TwoChildSibs,
+ [{0, "1"}])).
+
+should_remove_leaf()->
+ OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}],
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({OneChild, [{1, "1b"}]},
+ couch_key_tree:remove_leafs(TwoChildSibs,
+ [{1, "1b"}])).
+
+should_produce_empty_tree_on_removing_all_leaves()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[], [{1, "1b"}, {1, "1a"}]},
+ couch_key_tree:remove_leafs(TwoChildSibs,
+ [{1, "1b"}, {1, "1a"}])).
+
+should_have_no_effect_on_removing_non_existant_node()->
+ Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+ ?_assertEqual({Stemmed, []},
+ couch_key_tree:remove_leafs(Stemmed,
+ [{1, "1a"}])).
+
+should_produce_empty_tree_on_removing_last_leaf()->
+ Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+ ?_assertEqual({[], [{2, "1aa"}]},
+ couch_key_tree:remove_leafs(Stemmed,
+ [{2, "1aa"}])).
+
+should_extract_subtree()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[{"foo", {0, ["1"]}}],[]},
+ couch_key_tree:get(TwoChildSibs, [{0, "1"}])).
+
+should_extract_subsubtree()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[{"bar", {1, ["1a", "1"]}}],[]},
+ couch_key_tree:get(TwoChildSibs, [{1, "1a"}])).
+
+should_gather_non_existant_leaf()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[],[{0, "x"}]},
+ couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "x"}])).
+
+should_gather_leaf()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[{"bar", {1, ["1a","1"]}}],[]},
+ couch_key_tree:get_key_leafs(TwoChildSibs, [{1, "1a"}])).
+
+shoul_gather_multiple_leaves()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]},
+ couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "1"}])).
+
+should_retrieve_full_key_path()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[{0,[{"1", "foo"}]}],[]},
+ couch_key_tree:get_full_key_paths(TwoChildSibs, [{0, "1"}])).
+
+should_retrieve_full_key_path_for_node()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual({[{1,[{"1a", "bar"},{"1", "foo"}]}],[]},
+ couch_key_tree:get_full_key_paths(TwoChildSibs, [{1, "1a"}])).
+
+should_retrieve_leaves_with_parent_node()->
+ Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ [
+ ?_assertEqual([{2, [{"1aa", "bar"},{"1a", "bar"}]}],
+ couch_key_tree:get_all_leafs_full(Stemmed)),
+ ?_assertEqual([{1, [{"1a", "bar"},{"1", "foo"}]},
+ {1, [{"1b", "bar"},{"1", "foo"}]}],
+ couch_key_tree:get_all_leafs_full(TwoChildSibs))
+ ].
+
+should_retrieve_all_leaves()->
+ Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ [
+ ?_assertEqual([{"bar", {2, ["1aa","1a"]}}],
+ couch_key_tree:get_all_leafs(Stemmed)),
+ ?_assertEqual([{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b","1"]}}],
+ couch_key_tree:get_all_leafs(TwoChildSibs))
+ ].
+
+should_have_no_leaves_for_empty_tree()->
+ ?_assertEqual(0, couch_key_tree:count_leafs([])).
+
+should_have_single_leaf_for_tree_with_single_node()->
+ ?_assertEqual(1, couch_key_tree:count_leafs([{0, {"1","foo",[]}}])).
+
+should_have_two_leaves_for_tree_with_chindler_siblings()->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual(2, couch_key_tree:count_leafs(TwoChildSibs)).
+
+should_not_affect_on_leaf_counting_for_stemmed_tree()->
+ ?_assertEqual(1, couch_key_tree:count_leafs([{2, {"1bb", "boo", []}}])).
+
+should_have_no_effect_for_stemming_more_levels_than_exists()->
+ TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
+ ?_assertEqual(TwoChild, couch_key_tree:stem(TwoChild, 3)).
+
+should_return_one_deepest_node()->
+ TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
+ Stemmed = [{2, {"1aa", "bar", []}}],
+ ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 1)).
+
+should_return_two_deepest_nodes()->
+ TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
+ Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+ ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 2)).
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/661443fb/test/couch_passwords_tests.erl
----------------------------------------------------------------------
diff --git a/test/couch_passwords_tests.erl b/test/couch_passwords_tests.erl
new file mode 100644
index 0000000..116265c
--- /dev/null
+++ b/test/couch_passwords_tests.erl
@@ -0,0 +1,54 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_passwords_tests).
+
+-include("couch_eunit.hrl").
+
+
+pbkdf2_test_()->
+ {"PBKDF2",
+ [
+ {"Iterations: 1, length: 20",
+ ?_assertEqual(
+ {ok, <<"0c60c80f961f0e71f3a9b524af6012062fe037a6">>},
+ couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 1, 20))},
+
+ {"Iterations: 2, length: 20",
+ ?_assertEqual(
+ {ok, <<"ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957">>},
+ couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 2, 20))},
+
+ {"Iterations: 4096, length: 20",
+ ?_assertEqual(
+ {ok, <<"4b007901b765489abead49d926f721d065a429c1">>},
+ couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 4096, 20))},
+
+ {"Iterations: 4096, length: 25",
+ ?_assertEqual(
+ {ok, <<"3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038">>},
+ couch_passwords:pbkdf2(<<"passwordPASSWORDpassword">>,
+ <<"saltSALTsaltSALTsaltSALTsaltSALTsalt">>,
+ 4096, 25))},
+ {"Null byte",
+ ?_assertEqual(
+ {ok, <<"56fa6aa75548099dcc37d7f03425e0c3">>},
+ couch_passwords:pbkdf2(<<"pass\0word">>,
+ <<"sa\0lt">>,
+ 4096, 16))},
+
+ {timeout, 180, %% this may runs too long on slow hosts
+ {"Iterations: 16777216 - this may take some time",
+ ?_assertEqual(
+ {ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>},
+ couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 16777216, 20)
+ )}}]}.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/661443fb/test/couch_ref_counter_tests.erl
----------------------------------------------------------------------
diff --git a/test/couch_ref_counter_tests.erl b/test/couch_ref_counter_tests.erl
new file mode 100644
index 0000000..b7e97b4
--- /dev/null
+++ b/test/couch_ref_counter_tests.erl
@@ -0,0 +1,107 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_ref_counter_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(TIMEOUT, 1000).
+
+
+setup() ->
+ {ok, RefCtr} = couch_ref_counter:start([]),
+ ChildPid = spawn(fun() -> loop() end),
+ {RefCtr, ChildPid}.
+
+teardown({_, ChildPid}) ->
+ erlang:monitor(process, ChildPid),
+ ChildPid ! close,
+ wait().
+
+
+couch_ref_counter_test_() ->
+ {
+ "CouchDB reference counter tests",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_initialize_with_calling_process_as_referrer/1,
+ fun should_ignore_unknown_pid/1,
+ fun should_increment_counter_on_pid_add/1,
+ fun should_not_increase_counter_on_readding_same_pid/1,
+ fun should_drop_ref_for_double_added_pid/1,
+ fun should_decrement_counter_on_pid_drop/1,
+ fun should_add_after_drop/1,
+ fun should_decrement_counter_on_process_exit/1
+
+ ]
+ }
+ }.
+
+
+should_initialize_with_calling_process_as_referrer({RefCtr, _}) ->
+ ?_assertEqual(1, couch_ref_counter:count(RefCtr)).
+
+should_ignore_unknown_pid({RefCtr, ChildPid}) ->
+ ?_assertEqual(ok, couch_ref_counter:drop(RefCtr, ChildPid)).
+
+should_increment_counter_on_pid_add({RefCtr, ChildPid}) ->
+ couch_ref_counter:add(RefCtr, ChildPid),
+ ?_assertEqual(2, couch_ref_counter:count(RefCtr)).
+
+should_not_increase_counter_on_readding_same_pid({RefCtr, ChildPid}) ->
+ couch_ref_counter:add(RefCtr, ChildPid),
+ couch_ref_counter:add(RefCtr, ChildPid),
+ ?_assertEqual(2, couch_ref_counter:count(RefCtr)).
+
+should_drop_ref_for_double_added_pid({RefCtr, ChildPid}) ->
+ couch_ref_counter:add(RefCtr, ChildPid),
+ couch_ref_counter:add(RefCtr, ChildPid),
+ couch_ref_counter:drop(RefCtr, ChildPid),
+ ?_assertEqual(2, couch_ref_counter:count(RefCtr)).
+
+should_decrement_counter_on_pid_drop({RefCtr, ChildPid}) ->
+ couch_ref_counter:add(RefCtr, ChildPid),
+ couch_ref_counter:drop(RefCtr, ChildPid),
+ ?_assertEqual(1, couch_ref_counter:count(RefCtr)).
+
+should_add_after_drop({RefCtr, ChildPid}) ->
+ couch_ref_counter:add(RefCtr, ChildPid),
+ couch_ref_counter:drop(RefCtr, ChildPid),
+ couch_ref_counter:add(RefCtr, ChildPid),
+ ?_assertEqual(2, couch_ref_counter:count(RefCtr)).
+
+should_decrement_counter_on_process_exit({RefCtr, ChildPid}) ->
+ ?_assertEqual(1,
+ begin
+ couch_ref_counter:add(RefCtr, ChildPid),
+ erlang:monitor(process, ChildPid),
+ ChildPid ! close,
+ wait(),
+ couch_ref_counter:count(RefCtr)
+ end).
+
+
+loop() ->
+ receive
+ close -> ok
+ end.
+
+wait() ->
+ receive
+ {'DOWN', _, _, _, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw(timeout_error)
+ end.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/661443fb/test/couch_stats_tests.erl
----------------------------------------------------------------------
diff --git a/test/couch_stats_tests.erl b/test/couch_stats_tests.erl
new file mode 100644
index 0000000..d156449
--- /dev/null
+++ b/test/couch_stats_tests.erl
@@ -0,0 +1,412 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_stats_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(STATS_CFG_FIXTURE,
+ filename:join([?FIXTURESDIR, "couch_stats_aggregates.cfg"])).
+-define(STATS_INI_FIXTURE,
+ filename:join([?FIXTURESDIR, "couch_stats_aggregates.ini"])).
+-define(TIMEOUT, 1000).
+-define(TIMEWAIT, 500).
+
+
+setup_collector() ->
+ couch_stats_collector:start(),
+ ok.
+
+setup_aggregator(_) ->
+ {ok, Pid} = couch_config:start_link([?STATS_INI_FIXTURE]),
+ {ok, _} = couch_stats_collector:start(),
+ {ok, _} = couch_stats_aggregator:start(?STATS_CFG_FIXTURE),
+ Pid.
+
+teardown_collector(_) ->
+ couch_stats_collector:stop(),
+ ok.
+
+teardown_aggregator(_, Pid) ->
+ couch_stats_aggregator:stop(),
+ couch_stats_collector:stop(),
+ erlang:monitor(process, Pid),
+ couch_config:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw({timeout, config_stop})
+ end,
+ ok.
+
+
+couch_stats_collector_test_() ->
+ {
+ "CouchDB stats collector tests",
+ {
+ foreach,
+ fun setup_collector/0, fun teardown_collector/1,
+ [
+ should_increment_counter(),
+ should_decrement_counter(),
+ should_increment_and_decrement_counter(),
+ should_record_absolute_values(),
+ should_clear_absolute_values(),
+ should_track_process_count(),
+ should_increment_counter_multiple_times_per_pid(),
+ should_decrement_counter_on_process_exit(),
+ should_decrement_for_each_track_process_count_call_on_exit(),
+ should_return_all_counters_and_absolute_values(),
+ should_return_incremental_counters(),
+ should_return_absolute_values()
+ ]
+ }
+ }.
+
+couch_stats_aggregator_test_() ->
+ Funs = [
+ fun should_init_empty_aggregate/2,
+ fun should_get_empty_aggregate/2,
+ fun should_change_stats_on_values_add/2,
+ fun should_change_stats_for_all_times_on_values_add/2,
+ fun should_change_stats_on_values_change/2,
+ fun should_change_stats_for_all_times_on_values_change/2,
+ fun should_not_remove_data_after_some_time_for_0_sample/2,
+ fun should_remove_data_after_some_time_for_other_samples/2
+ ],
+ {
+ "CouchDB stats aggregator tests",
+ [
+ {
+ "Absolute values",
+ {
+ foreachx,
+ fun setup_aggregator/1, fun teardown_aggregator/2,
+ [{absolute, Fun} || Fun <- Funs]
+ }
+ },
+ {
+ "Counters",
+ {
+ foreachx,
+ fun setup_aggregator/1, fun teardown_aggregator/2,
+ [{counter, Fun} || Fun <- Funs]
+ }
+ }
+ ]
+ }.
+
+
+should_increment_counter() ->
+ ?_assertEqual(100,
+ begin
+ AddCount = fun() -> couch_stats_collector:increment(foo) end,
+ repeat(AddCount, 100),
+ couch_stats_collector:get(foo)
+ end).
+
+should_decrement_counter() ->
+ ?_assertEqual(67,
+ begin
+ AddCount = fun() -> couch_stats_collector:increment(foo) end,
+ RemCount = fun() -> couch_stats_collector:decrement(foo) end,
+ repeat(AddCount, 100),
+ repeat(RemCount, 33),
+ couch_stats_collector:get(foo)
+ end).
+
+should_increment_and_decrement_counter() ->
+ ?_assertEqual(0,
+ begin
+ AddCount = fun() -> couch_stats_collector:increment(foo) end,
+ RemCount = fun() -> couch_stats_collector:decrement(foo) end,
+ repeat(AddCount, 100),
+ repeat(RemCount, 25),
+ repeat(AddCount, 10),
+ repeat(RemCount, 5),
+ repeat(RemCount, 80),
+ couch_stats_collector:get(foo)
+ end).
+
+should_record_absolute_values() ->
+ ?_assertEqual(lists:seq(1, 15),
+ begin
+ lists:map(fun(Val) ->
+ couch_stats_collector:record(bar, Val)
+ end, lists:seq(1, 15)),
+ couch_stats_collector:get(bar)
+ end).
+
+should_clear_absolute_values() ->
+ ?_assertEqual(nil,
+ begin
+ lists:map(fun(Val) ->
+ couch_stats_collector:record(bar, Val)
+ end, lists:seq(1, 15)),
+ couch_stats_collector:clear(bar),
+ couch_stats_collector:get(bar)
+ end).
+
+should_track_process_count() ->
+ ?_assertMatch({_, 1}, spawn_and_count(1)).
+
+should_increment_counter_multiple_times_per_pid() ->
+ ?_assertMatch({_, 3}, spawn_and_count(3)).
+
+should_decrement_counter_on_process_exit() ->
+ ?_assertEqual(2,
+ begin
+ {Pid, 1} = spawn_and_count(1),
+ spawn_and_count(2),
+ RefMon = erlang:monitor(process, Pid),
+ Pid ! sepuku,
+ receive
+ {'DOWN', RefMon, _, _, _} -> ok
+ after ?TIMEOUT ->
+ throw(timeout)
+ end,
+ % sleep for awhile to let collector handle the updates
+ % suddenly, it couldn't notice process death instantly
+ timer:sleep(?TIMEWAIT),
+ couch_stats_collector:get(hoopla)
+ end).
+
+should_decrement_for_each_track_process_count_call_on_exit() ->
+ ?_assertEqual(2,
+ begin
+ {_, 2} = spawn_and_count(2),
+ {Pid, 6} = spawn_and_count(4),
+ RefMon = erlang:monitor(process, Pid),
+ Pid ! sepuku,
+ receive
+ {'DOWN', RefMon, _, _, _} -> ok
+ after ?TIMEOUT ->
+ throw(timeout)
+ end,
+ timer:sleep(?TIMEWAIT),
+ couch_stats_collector:get(hoopla)
+ end).
+
+should_return_all_counters_and_absolute_values() ->
+ ?_assertEqual([{bar,[1.0,0.0]}, {foo,1}],
+ begin
+ couch_stats_collector:record(bar, 0.0),
+ couch_stats_collector:record(bar, 1.0),
+ couch_stats_collector:increment(foo),
+ lists:sort(couch_stats_collector:all())
+ end).
+
+should_return_incremental_counters() ->
+ ?_assertEqual([{foo,1}],
+ begin
+ couch_stats_collector:record(bar, 0.0),
+ couch_stats_collector:record(bar, 1.0),
+ couch_stats_collector:increment(foo),
+ lists:sort(couch_stats_collector:all(incremental))
+ end).
+
+should_return_absolute_values() ->
+ ?_assertEqual([{bar,[1.0,0.0]}, {zing, "Z"}],
+ begin
+ couch_stats_collector:record(bar, 0.0),
+ couch_stats_collector:record(bar, 1.0),
+ couch_stats_collector:record(zing, 90),
+ couch_stats_collector:increment(foo),
+ lists:sort(couch_stats_collector:all(absolute))
+ end).
+
+should_init_empty_aggregate(absolute, _) ->
+ {Aggs} = couch_stats_aggregator:all(),
+ ?_assertEqual({[{'11', make_agg(<<"randomosity">>,
+ null, null, null, null, null)}]},
+ couch_util:get_value(number, Aggs));
+should_init_empty_aggregate(counter, _) ->
+ {Aggs} = couch_stats_aggregator:all(),
+ ?_assertEqual({[{stuff, make_agg(<<"yay description">>,
+ null, null, null, null, null)}]},
+ couch_util:get_value(testing, Aggs)).
+
+should_get_empty_aggregate(absolute, _) ->
+ ?_assertEqual(make_agg(<<"randomosity">>, null, null, null, null, null),
+ couch_stats_aggregator:get_json({number, '11'}));
+should_get_empty_aggregate(counter, _) ->
+ ?_assertEqual(make_agg(<<"yay description">>, null, null, null, null, null),
+ couch_stats_aggregator:get_json({testing, stuff})).
+
+should_change_stats_on_values_add(absolute, _) ->
+ lists:foreach(fun(X) ->
+ couch_stats_collector:record({number, 11}, X)
+ end, lists:seq(0, 10)),
+ couch_stats_aggregator:collect_sample(),
+ ?_assertEqual(make_agg(<<"randomosity">>, 5.0, 5.0, null, 5.0, 5.0),
+ couch_stats_aggregator:get_json({number, 11}));
+should_change_stats_on_values_add(counter, _) ->
+ lists:foreach(fun(_) ->
+ couch_stats_collector:increment({testing, stuff})
+ end, lists:seq(1, 100)),
+ couch_stats_aggregator:collect_sample(),
+ ?_assertEqual(make_agg(<<"yay description">>, 100.0, 100.0, null, 100, 100),
+ couch_stats_aggregator:get_json({testing, stuff})).
+
+should_change_stats_for_all_times_on_values_add(absolute, _) ->
+ lists:foreach(fun(X) ->
+ couch_stats_collector:record({number, 11}, X)
+ end, lists:seq(0, 10)),
+ couch_stats_aggregator:collect_sample(),
+ ?_assertEqual(make_agg(<<"randomosity">>, 5.0, 5.0, null, 5.0, 5.0),
+ couch_stats_aggregator:get_json({number, 11}, 1));
+should_change_stats_for_all_times_on_values_add(counter, _) ->
+ lists:foreach(fun(_) ->
+ couch_stats_collector:increment({testing, stuff})
+ end, lists:seq(1, 100)),
+ couch_stats_aggregator:collect_sample(),
+ ?_assertEqual(make_agg(<<"yay description">>, 100.0, 100.0, null, 100, 100),
+ couch_stats_aggregator:get_json({testing, stuff}, 1)).
+
+should_change_stats_on_values_change(absolute, _) ->
+ ?_assertEqual(make_agg(<<"randomosity">>, 20.0, 10.0, 7.071, 5.0, 15.0),
+ begin
+ lists:foreach(fun(X) ->
+ couch_stats_collector:record({number, 11}, X)
+ end, lists:seq(0, 10)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_collector:record({number, 11}, 15),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({number, 11})
+ end);
+should_change_stats_on_values_change(counter, _) ->
+ ?_assertEqual(make_agg(<<"yay description">>, 100.0, 50.0, 70.711, 0, 100),
+ begin
+ lists:foreach(fun(_) ->
+ couch_stats_collector:increment({testing, stuff})
+ end, lists:seq(1, 100)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({testing, stuff})
+ end).
+
+should_change_stats_for_all_times_on_values_change(absolute, _) ->
+ ?_assertEqual(make_agg(<<"randomosity">>, 20.0, 10.0, 7.071, 5.0, 15.0),
+ begin
+ lists:foreach(fun(X) ->
+ couch_stats_collector:record({number, 11}, X)
+ end, lists:seq(0, 10)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_collector:record({number, 11}, 15),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({number, 11}, 1)
+ end);
+should_change_stats_for_all_times_on_values_change(counter, _) ->
+ ?_assertEqual(make_agg(<<"yay description">>, 100.0, 50.0, 70.711, 0, 100),
+ begin
+ lists:foreach(fun(_) ->
+ couch_stats_collector:increment({testing, stuff})
+ end, lists:seq(1, 100)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({testing, stuff}, 1)
+ end).
+
+should_not_remove_data_after_some_time_for_0_sample(absolute, _) ->
+ ?_assertEqual(make_agg(<<"randomosity">>, 20.0, 10.0, 7.071, 5.0, 15.0),
+ begin
+ lists:foreach(fun(X) ->
+ couch_stats_collector:record({number, 11}, X)
+ end, lists:seq(0, 10)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_collector:record({number, 11}, 15),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({number, 11})
+ end);
+should_not_remove_data_after_some_time_for_0_sample(counter, _) ->
+ ?_assertEqual(make_agg(<<"yay description">>, 100.0, 33.333, 57.735, 0, 100),
+ begin
+ lists:foreach(fun(_) ->
+ couch_stats_collector:increment({testing, stuff})
+ end, lists:seq(1, 100)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({testing, stuff})
+ end).
+
+should_remove_data_after_some_time_for_other_samples(absolute, _) ->
+ ?_assertEqual(make_agg(<<"randomosity">>, 15.0, 15.0, null, 15.0, 15.0),
+ begin
+ lists:foreach(fun(X) ->
+ couch_stats_collector:record({number, 11}, X)
+ end, lists:seq(0, 10)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_collector:record({number, 11}, 15),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({number, 11}, 1)
+ end);
+should_remove_data_after_some_time_for_other_samples(counter, _) ->
+ ?_assertEqual(make_agg(<<"yay description">>, 0, 0.0, 0.0, 0, 0),
+ begin
+ lists:foreach(fun(_) ->
+ couch_stats_collector:increment({testing, stuff})
+ end, lists:seq(1, 100)),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ timer:sleep(?TIMEWAIT),
+ couch_stats_aggregator:collect_sample(),
+ couch_stats_aggregator:get_json({testing, stuff}, 1)
+ end).
+
+
+spawn_and_count(N) ->
+ Self = self(),
+ Pid = spawn(fun() ->
+ lists:foreach(
+ fun(_) ->
+ couch_stats_collector:track_process_count(hoopla)
+ end, lists:seq(1,N)),
+ Self ! reporting,
+ receive
+ sepuku -> ok
+ end
+ end),
+ receive reporting -> ok end,
+ {Pid, couch_stats_collector:get(hoopla)}.
+
+repeat(_, 0) ->
+ ok;
+repeat(Fun, Count) ->
+ Fun(),
+ repeat(Fun, Count-1).
+
+make_agg(Desc, Sum, Mean, StdDev, Min, Max) ->
+ {[
+ {description, Desc},
+ {current, Sum},
+ {sum, Sum},
+ {mean, Mean},
+ {stddev, StdDev},
+ {min, Min},
+ {max, Max}
+ ]}.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/661443fb/test/couch_stream_tests.erl
----------------------------------------------------------------------
diff --git a/test/couch_stream_tests.erl b/test/couch_stream_tests.erl
new file mode 100644
index 0000000..335a2fe
--- /dev/null
+++ b/test/couch_stream_tests.erl
@@ -0,0 +1,100 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_stream_tests).
+
+-include("couch_eunit.hrl").
+
+
+setup() ->
+ {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
+ {ok, Stream} = couch_stream:open(Fd),
+ {Fd, Stream}.
+
+teardown({Fd, _}) ->
+ ok = couch_file:close(Fd).
+
+
+stream_test_() ->
+ {
+ "CouchDB stream tests",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_write/1,
+ fun should_write_consecutive/1,
+ fun should_write_empty_binary/1,
+ fun should_return_file_pointers_on_close/1,
+ fun should_return_stream_size_on_close/1,
+ fun should_return_valid_pointers/1,
+ fun should_recall_last_pointer_position/1,
+ fun should_stream_more_with_4K_chunk_size/1
+ ]
+ }
+ }.
+
+
+should_write({_, Stream}) ->
+ ?_assertEqual(ok, couch_stream:write(Stream, <<"food">>)).
+
+should_write_consecutive({_, Stream}) ->
+ couch_stream:write(Stream, <<"food">>),
+ ?_assertEqual(ok, couch_stream:write(Stream, <<"foob">>)).
+
+should_write_empty_binary({_, Stream}) ->
+ ?_assertEqual(ok, couch_stream:write(Stream, <<>>)).
+
+should_return_file_pointers_on_close({_, Stream}) ->
+ couch_stream:write(Stream, <<"foodfoob">>),
+ {Ptrs, _, _, _, _} = couch_stream:close(Stream),
+ ?_assertEqual([{0, 8}], Ptrs).
+
+should_return_stream_size_on_close({_, Stream}) ->
+ couch_stream:write(Stream, <<"foodfoob">>),
+ {_, Length, _, _, _} = couch_stream:close(Stream),
+ ?_assertEqual(8, Length).
+
+should_return_valid_pointers({Fd, Stream}) ->
+ couch_stream:write(Stream, <<"foodfoob">>),
+ {Ptrs, _, _, _, _} = couch_stream:close(Stream),
+ ?_assertEqual(<<"foodfoob">>, read_all(Fd, Ptrs)).
+
+should_recall_last_pointer_position({Fd, Stream}) ->
+ couch_stream:write(Stream, <<"foodfoob">>),
+ {_, _, _, _, _} = couch_stream:close(Stream),
+ {ok, ExpPtr} = couch_file:bytes(Fd),
+ {ok, Stream2} = couch_stream:open(Fd),
+ ZeroBits = <<0:(8 * 10)>>,
+ OneBits = <<1:(8 * 10)>>,
+ ok = couch_stream:write(Stream2, OneBits),
+ ok = couch_stream:write(Stream2, ZeroBits),
+ {Ptrs, 20, _, _, _} = couch_stream:close(Stream2),
+ [{ExpPtr, 20}] = Ptrs,
+ AllBits = iolist_to_binary([OneBits, ZeroBits]),
+ ?_assertEqual(AllBits, read_all(Fd, Ptrs)).
+
+should_stream_more_with_4K_chunk_size({Fd, _}) ->
+ {ok, Stream} = couch_stream:open(Fd, [{buffer_size, 4096}]),
+ lists:foldl(
+ fun(_, Acc) ->
+ Data = <<"a1b2c">>,
+ couch_stream:write(Stream, Data),
+ [Data | Acc]
+ end, [], lists:seq(1, 1024)),
+ ?_assertMatch({[{0, 4100}, {4106, 1020}], 5120, _, _, _},
+ couch_stream:close(Stream)).
+
+
+read_all(Fd, PosList) ->
+ Data = couch_stream:foldl(Fd, PosList, fun(Bin, Acc) -> [Bin, Acc] end, []),
+ iolist_to_binary(Data).
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/661443fb/test/couch_task_status_tests.erl
----------------------------------------------------------------------
diff --git a/test/couch_task_status_tests.erl b/test/couch_task_status_tests.erl
new file mode 100644
index 0000000..f71ad2b
--- /dev/null
+++ b/test/couch_task_status_tests.erl
@@ -0,0 +1,225 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_task_status_tests).
+
+-include("couch_eunit.hrl").
+-include_lib("couchdb/couch_db.hrl").
+
+-define(TIMEOUT, 1000).
+
+
+setup() ->
+ {ok, TaskStatusPid} = couch_task_status:start_link(),
+ TaskUpdaterPid = spawn(fun() -> loop() end),
+ {TaskStatusPid, TaskUpdaterPid}.
+
+teardown({TaskStatusPid, _}) ->
+ erlang:monitor(process, TaskStatusPid),
+ couch_task_status:stop(),
+ receive
+ {'DOWN', _, _, TaskStatusPid, _} ->
+ ok
+ after ?TIMEOUT ->
+ throw(timeout_error)
+ end.
+
+
+couch_task_status_test_() ->
+ {
+ "CouchDB task status updates",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_register_task/1,
+ fun should_set_task_startup_time/1,
+ fun should_have_update_time_as_startup_before_any_progress/1,
+ fun should_set_task_type/1,
+ fun should_not_register_multiple_tasks_for_same_pid/1,
+ fun should_set_task_progress/1,
+ fun should_update_task_progress/1,
+ fun should_update_time_changes_on_task_progress/1,
+ fun should_control_update_frequency/1,
+ fun should_reset_control_update_frequency/1,
+ fun should_track_multiple_tasks/1,
+ fun should_finish_task/1
+
+ ]
+ }
+ }.
+
+
+should_register_task({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ ?_assertEqual(1, length(couch_task_status:all())).
+
+should_set_task_startup_time({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ ?_assert(is_integer(get_task_prop(Pid, started_on))).
+
+should_have_update_time_as_startup_before_any_progress({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ StartTime = get_task_prop(Pid, started_on),
+ ?_assertEqual(StartTime, get_task_prop(Pid, updated_on)).
+
+should_set_task_type({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ ?_assertEqual(replication, get_task_prop(Pid, type)).
+
+should_not_register_multiple_tasks_for_same_pid({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ ?_assertEqual({add_task_error, already_registered},
+ call(Pid, add, [{type, compaction}, {progress, 0}])).
+
+should_set_task_progress({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ ?_assertEqual(0, get_task_prop(Pid, progress)).
+
+should_update_task_progress({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ call(Pid, update, [{progress, 25}]),
+ ?_assertEqual(25, get_task_prop(Pid, progress)).
+
+should_update_time_changes_on_task_progress({_, Pid}) ->
+ ?_assert(
+ begin
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ ok = timer:sleep(1000), % sleep awhile to customize update time
+ call(Pid, update, [{progress, 25}]),
+ get_task_prop(Pid, updated_on) > get_task_prop(Pid, started_on)
+ end).
+
+should_control_update_frequency({_, Pid}) ->
+ ?_assertEqual(66,
+ begin
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ call(Pid, update, [{progress, 50}]),
+ call(Pid, update_frequency, 500),
+ call(Pid, update, [{progress, 66}]),
+ call(Pid, update, [{progress, 77}]),
+ get_task_prop(Pid, progress)
+ end).
+
+should_reset_control_update_frequency({_, Pid}) ->
+ ?_assertEqual(87,
+ begin
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ call(Pid, update, [{progress, 50}]),
+ call(Pid, update_frequency, 500),
+ call(Pid, update, [{progress, 66}]),
+ call(Pid, update, [{progress, 77}]),
+ call(Pid, update_frequency, 0),
+ call(Pid, update, [{progress, 87}]),
+ get_task_prop(Pid, progress)
+ end).
+
+should_track_multiple_tasks(_) ->
+ ?_assert(run_multiple_tasks()).
+
+should_finish_task({_, Pid}) ->
+ ok = call(Pid, add, [{type, replication}, {progress, 0}]),
+ ?assertEqual(1, length(couch_task_status:all())),
+ ok = call(Pid, done),
+ ?_assertEqual(0, length(couch_task_status:all())).
+
+
+run_multiple_tasks() ->
+ Pid1 = spawn(fun() -> loop() end),
+ Pid2 = spawn(fun() -> loop() end),
+ Pid3 = spawn(fun() -> loop() end),
+ call(Pid1, add, [{type, replication}, {progress, 0}]),
+ call(Pid2, add, [{type, compaction}, {progress, 0}]),
+ call(Pid3, add, [{type, indexer}, {progress, 0}]),
+
+ ?assertEqual(3, length(couch_task_status:all())),
+ ?assertEqual(replication, get_task_prop(Pid1, type)),
+ ?assertEqual(compaction, get_task_prop(Pid2, type)),
+ ?assertEqual(indexer, get_task_prop(Pid3, type)),
+
+ call(Pid2, update, [{progress, 33}]),
+ call(Pid3, update, [{progress, 42}]),
+ call(Pid1, update, [{progress, 11}]),
+ ?assertEqual(42, get_task_prop(Pid3, progress)),
+ call(Pid1, update, [{progress, 72}]),
+ ?assertEqual(72, get_task_prop(Pid1, progress)),
+ ?assertEqual(33, get_task_prop(Pid2, progress)),
+
+ call(Pid1, done),
+ ?assertEqual(2, length(couch_task_status:all())),
+ call(Pid3, done),
+ ?assertEqual(1, length(couch_task_status:all())),
+ call(Pid2, done),
+ ?assertEqual(0, length(couch_task_status:all())),
+
+ true.
+
+
+loop() ->
+ receive
+ {add, Props, From} ->
+ Resp = couch_task_status:add_task(Props),
+ From ! {ok, self(), Resp},
+ loop();
+ {update, Props, From} ->
+ Resp = couch_task_status:update(Props),
+ From ! {ok, self(), Resp},
+ loop();
+ {update_frequency, Msecs, From} ->
+ Resp = couch_task_status:set_update_frequency(Msecs),
+ From ! {ok, self(), Resp},
+ loop();
+ {done, From} ->
+ From ! {ok, self(), ok}
+ end.
+
+call(Pid, Command) ->
+ Pid ! {Command, self()},
+ wait(Pid).
+
+call(Pid, Command, Arg) ->
+ Pid ! {Command, Arg, self()},
+ wait(Pid).
+
+wait(Pid) ->
+ receive
+ {ok, Pid, Msg} ->
+ Msg
+ after ?TIMEOUT ->
+ throw(timeout_error)
+ end.
+
+get_task_prop(Pid, Prop) ->
+ From = list_to_binary(pid_to_list(Pid)),
+ Element = lists:foldl(
+ fun(PropList, Acc) ->
+ case couch_util:get_value(pid, PropList) of
+ From ->
+ [PropList | Acc];
+ _ ->
+ Acc
+ end
+ end,
+ [], couch_task_status:all()
+ ),
+ case couch_util:get_value(Prop, hd(Element), nil) of
+ nil ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Could not get property '"
+ ++ couch_util:to_list(Prop)
+ ++ "' for task "
+ ++ pid_to_list(Pid)}]});
+ Value ->
+ Value
+ end.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/661443fb/test/couch_util_tests.erl
----------------------------------------------------------------------
diff --git a/test/couch_util_tests.erl b/test/couch_util_tests.erl
new file mode 100644
index 0000000..8e24e72
--- /dev/null
+++ b/test/couch_util_tests.erl
@@ -0,0 +1,136 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_util_tests).
+
+-include("couch_eunit.hrl").
+
+
+setup() ->
+ %% We cannot start driver from here since it becomes bounded to eunit
+ %% master process and the next couch_server_sup:start_link call will
+ %% fail because server couldn't load driver since it already is.
+ %%
+ %% On other hand, we cannot unload driver here due to
+ %% {error, not_loaded_by_this_process} while it is. Any ideas is welcome.
+ %%
+ couch_server_sup:start_link(?CONFIG_CHAIN),
+ %% couch_config:start_link(?CONFIG_CHAIN),
+ %% {ok, _} = couch_drv:start_link(),
+ ok.
+
+teardown(_) ->
+ couch_server_sup:stop(),
+ %% couch_config:stop(),
+ %% erl_ddll:unload_driver(couch_icu_driver),
+ ok.
+
+
+collation_test_() ->
+ {
+ "Collation tests",
+ [
+ {
+ setup,
+ fun setup/0, fun teardown/1,
+ [
+ should_collate_ascii(),
+ should_collate_non_ascii()
+ ]
+ }
+ ]
+ }.
+
+should_collate_ascii() ->
+ ?_assertEqual(1, couch_util:collate(<<"foo">>, <<"bar">>)).
+
+should_collate_non_ascii() ->
+ ?_assertEqual(-1, couch_util:collate(<<"A">>, <<"aa">>)).
+
+to_existed_atom_test() ->
+ ?assert(couch_util:to_existing_atom(true)),
+ ?assertMatch(foo, couch_util:to_existing_atom(<<"foo">>)),
+ ?assertMatch(foobarbaz, couch_util:to_existing_atom("foobarbaz")).
+
+implode_test() ->
+ ?assertEqual([1, 38, 2, 38, 3], couch_util:implode([1, 2, 3], "&")).
+
+trim_test() ->
+ lists:map(fun(S) -> ?assertEqual("foo", couch_util:trim(S)) end,
+ [" foo", "foo ", "\tfoo", " foo ", "foo\t", "foo\n", "\nfoo"]).
+
+abs_pathname_test() ->
+ {ok, Cwd} = file:get_cwd(),
+ ?assertEqual(Cwd ++ "/foo", couch_util:abs_pathname("./foo")).
+
+flush_test() ->
+ ?assertNot(couch_util:should_flush()),
+ AcquireMem = fun() ->
+ _IntsToAGazillion = lists:seq(1, 200000),
+ _LotsOfData = lists:map(fun(_) -> <<"foobar">> end,
+ lists:seq(1, 500000)),
+ _BigBin = list_to_binary(_LotsOfData),
+
+ %% Allocation 200K tuples puts us above the memory threshold
+ %% Originally, there should be:
+ %% ?assertNot(should_flush())
+ %% however, unlike for etap test, GC collects all allocated bits
+ %% making this conditions fail. So we have to invert the condition
+ %% since GC works, cleans the memory and everything is fine.
+ ?assertNot(couch_util:should_flush())
+ end,
+ AcquireMem(),
+
+ %% Checking to flush invokes GC
+ ?assertNot(couch_util:should_flush()).
+
+verify_test() ->
+ ?assert(couch_util:verify("It4Vooya", "It4Vooya")),
+ ?assertNot(couch_util:verify("It4VooyaX", "It4Vooya")),
+ ?assert(couch_util:verify(<<"ahBase3r">>, <<"ahBase3r">>)),
+ ?assertNot(couch_util:verify(<<"ahBase3rX">>, <<"ahBase3r">>)),
+ ?assertNot(couch_util:verify(nil, <<"ahBase3r">>)).
+
+find_in_binary_test_() ->
+ Cases = [
+ {<<"foo">>, <<"foobar">>, {exact, 0}},
+ {<<"foo">>, <<"foofoo">>, {exact, 0}},
+ {<<"foo">>, <<"barfoo">>, {exact, 3}},
+ {<<"foo">>, <<"barfo">>, {partial, 3}},
+ {<<"f">>, <<"fobarfff">>, {exact, 0}},
+ {<<"f">>, <<"obarfff">>, {exact, 4}},
+ {<<"f">>, <<"obarggf">>, {exact, 6}},
+ {<<"f">>, <<"f">>, {exact, 0}},
+ {<<"f">>, <<"g">>, not_found},
+ {<<"foo">>, <<"f">>, {partial, 0}},
+ {<<"foo">>, <<"g">>, not_found},
+ {<<"foo">>, <<"">>, not_found},
+ {<<"fofo">>, <<"foofo">>, {partial, 3}},
+ {<<"foo">>, <<"gfobarfo">>, {partial, 6}},
+ {<<"foo">>, <<"gfobarf">>, {partial, 6}},
+ {<<"foo">>, <<"gfobar">>, not_found},
+ {<<"fog">>, <<"gbarfogquiz">>, {exact, 4}},
+ {<<"ggg">>, <<"ggg">>, {exact, 0}},
+ {<<"ggg">>, <<"ggggg">>, {exact, 0}},
+ {<<"ggg">>, <<"bggg">>, {exact, 1}},
+ {<<"ggg">>, <<"bbgg">>, {partial, 2}},
+ {<<"ggg">>, <<"bbbg">>, {partial, 3}},
+ {<<"ggg">>, <<"bgbggbggg">>, {exact, 6}},
+ {<<"ggg">>, <<"bgbggb">>, not_found}
+ ],
+ lists:map(
+ fun({Needle, Haystack, Result}) ->
+ Msg = lists:flatten(io_lib:format("Looking for ~s in ~s",
+ [Needle, Haystack])),
+ {Msg, ?_assertMatch(Result,
+ couch_util:find_in_binary(Needle, Haystack))}
+ end, Cases).
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/661443fb/test/couch_uuids_tests.erl
----------------------------------------------------------------------
diff --git a/test/couch_uuids_tests.erl b/test/couch_uuids_tests.erl
new file mode 100644
index 0000000..ea1d034
--- /dev/null
+++ b/test/couch_uuids_tests.erl
@@ -0,0 +1,161 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_uuids_tests).
+
+-include("couch_eunit.hrl").
+
+-define(TIMEOUT_S, 20).
+
+
+setup() ->
+ {ok, Pid} = couch_config:start_link(?CONFIG_CHAIN),
+ erlang:monitor(process, Pid),
+ couch_uuids:start(),
+ Pid.
+
+setup(Opts) ->
+ Pid = setup(),
+ lists:foreach(
+ fun({Option, Value}) ->
+ couch_config:set("uuids", Option, Value, false)
+ end, Opts),
+ Pid.
+
+teardown(Pid) ->
+ couch_uuids:stop(),
+ couch_config:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} -> ok
+ after
+ 1000 -> throw({timeout_error, config_stop})
+ end.
+
+teardown(_, Pid) ->
+ teardown(Pid).
+
+
+default_test_() ->
+ {
+ "Default UUID algorithm",
+ {
+ setup,
+ fun setup/0, fun teardown/1,
+ fun should_be_unique/1
+ }
+ }.
+
+sequential_test_() ->
+ Opts = [{"algorithm", "sequential"}],
+ Cases = [
+ fun should_be_unique/2,
+ fun should_increment_monotonically/2,
+ fun should_rollover/2
+ ],
+ {
+ "UUID algorithm: sequential",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{Opts, Fun} || Fun <- Cases]
+ }
+ }.
+
+utc_test_() ->
+ Opts = [{"algorithm", "utc_random"}],
+ Cases = [
+ fun should_be_unique/2,
+ fun should_increment_monotonically/2
+ ],
+ {
+ "UUID algorithm: utc_random",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{Opts, Fun} || Fun <- Cases]
+ }
+ }.
+
+utc_id_suffix_test_() ->
+ Opts = [{"algorithm", "utc_id"}, {"utc_id_suffix", "bozo"}],
+ Cases = [
+ fun should_be_unique/2,
+ fun should_increment_monotonically/2,
+ fun should_preserve_suffix/2
+ ],
+ {
+ "UUID algorithm: utc_id",
+ {
+ foreachx,
+ fun setup/1, fun teardown/2,
+ [{Opts, Fun} || Fun <- Cases]
+ }
+ }.
+
+
+should_be_unique() ->
+ %% this one may really runs for too long on slow hosts
+ {timeout, ?TIMEOUT_S, ?_assert(test_unique(10000, [couch_uuids:new()]))}.
+should_be_unique(_) ->
+ should_be_unique().
+should_be_unique(_, _) ->
+ should_be_unique().
+
+should_increment_monotonically(_, _) ->
+ ?_assert(couch_uuids:new() < couch_uuids:new()).
+
+should_rollover(_, _) ->
+ ?_test(begin
+ UUID = binary_to_list(couch_uuids:new()),
+ Prefix = element(1, lists:split(26, UUID)),
+ N = gen_until_pref_change(Prefix, 0),
+ ?assert(N >= 5000 andalso N =< 11000)
+ end).
+
+should_preserve_suffix(_, _) ->
+ ?_test(begin
+ UUID = binary_to_list(couch_uuids:new()),
+ Suffix = get_suffix(UUID),
+ ?assert(test_same_suffix(10000, Suffix))
+ end).
+
+
+test_unique(0, _) ->
+ true;
+test_unique(N, UUIDs) ->
+ UUID = couch_uuids:new(),
+ ?assertNot(lists:member(UUID, UUIDs)),
+ test_unique(N - 1, [UUID| UUIDs]).
+
+get_prefix(UUID) ->
+ element(1, lists:split(26, binary_to_list(UUID))).
+
+gen_until_pref_change(_, Count) when Count > 8251 ->
+ Count;
+gen_until_pref_change(Prefix, N) ->
+ case get_prefix(couch_uuids:new()) of
+ Prefix -> gen_until_pref_change(Prefix, N + 1);
+ _ -> N
+ end.
+
+get_suffix(UUID) when is_binary(UUID) ->
+ get_suffix(binary_to_list(UUID));
+get_suffix(UUID) ->
+ element(2, lists:split(14, UUID)).
+
+test_same_suffix(0, _) ->
+ true;
+test_same_suffix(N, Suffix) ->
+ case get_suffix(couch_uuids:new()) of
+ Suffix -> test_same_suffix(N - 1, Suffix);
+ _ -> false
+ end.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/661443fb/test/couch_work_queue_tests.erl
----------------------------------------------------------------------
diff --git a/test/couch_work_queue_tests.erl b/test/couch_work_queue_tests.erl
new file mode 100644
index 0000000..8a463b5
--- /dev/null
+++ b/test/couch_work_queue_tests.erl
@@ -0,0 +1,393 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_work_queue_tests).
+
+-include("couch_eunit.hrl").
+
+-define(TIMEOUT, 100).
+
+
+setup(Opts) ->
+ {ok, Q} = couch_work_queue:new(Opts),
+ Producer = spawn_producer(Q),
+ Consumer = spawn_consumer(Q),
+ {Q, Producer, Consumer}.
+
+setup_max_items() ->
+ setup([{max_items, 3}]).
+
+setup_max_size() ->
+ setup([{max_size, 160}]).
+
+setup_max_items_and_size() ->
+ setup([{max_size, 160}, {max_items, 3}]).
+
+setup_multi_workers() ->
+ {Q, Producer, Consumer1} = setup([{max_size, 160},
+ {max_items, 3},
+ {multi_workers, true}]),
+ Consumer2 = spawn_consumer(Q),
+ Consumer3 = spawn_consumer(Q),
+ {Q, Producer, [Consumer1, Consumer2, Consumer3]}.
+
+teardown({Q, Producer, Consumers}) when is_list(Consumers) ->
+ % consume all to unblock and let producer/consumer stop without timeout
+ [consume(Consumer, all) || Consumer <- Consumers],
+
+ ok = close_queue(Q),
+ ok = stop(Producer, "producer"),
+ R = [stop(Consumer, "consumer") || Consumer <- Consumers],
+ R = [ok || _ <- Consumers],
+ ok;
+teardown({Q, Producer, Consumer}) ->
+ teardown({Q, Producer, [Consumer]}).
+
+
+single_consumer_test_() ->
+ {
+ "Single producer and consumer",
+ [
+ {
+ "Queue with 3 max items",
+ {
+ foreach,
+ fun setup_max_items/0, fun teardown/1,
+ single_consumer_max_item_count() ++ common_cases()
+ }
+ },
+ {
+ "Queue with max size of 160 bytes",
+ {
+ foreach,
+ fun setup_max_size/0, fun teardown/1,
+ single_consumer_max_size() ++ common_cases()
+ }
+ },
+ {
+ "Queue with max size of 160 bytes and 3 max items",
+ {
+ foreach,
+ fun setup_max_items_and_size/0, fun teardown/1,
+ single_consumer_max_items_and_size() ++ common_cases()
+ }
+ }
+ ]
+ }.
+
+multiple_consumers_test_() ->
+ {
+ "Single producer and multiple consumers",
+ [
+ {
+ "Queue with max size of 160 bytes and 3 max items",
+ {
+ foreach,
+ fun setup_multi_workers/0, fun teardown/1,
+ common_cases() ++ multiple_consumers()
+ }
+
+ }
+ ]
+ }.
+
+common_cases()->
+ [
+ fun should_block_consumer_on_dequeue_from_empty_queue/1,
+ fun should_consume_right_item/1,
+ fun should_timeout_on_close_non_empty_queue/1,
+ fun should_not_block_producer_for_non_empty_queue_after_close/1,
+ fun should_be_closed/1
+ ].
+
+single_consumer_max_item_count()->
+ [
+ fun should_have_no_items_for_new_queue/1,
+ fun should_block_producer_on_full_queue_count/1,
+ fun should_receive_first_queued_item/1,
+ fun should_consume_multiple_items/1,
+ fun should_consume_all/1
+ ].
+
+single_consumer_max_size()->
+ [
+ fun should_have_zero_size_for_new_queue/1,
+ fun should_block_producer_on_full_queue_size/1,
+ fun should_increase_queue_size_on_produce/1,
+ fun should_receive_first_queued_item/1,
+ fun should_consume_multiple_items/1,
+ fun should_consume_all/1
+ ].
+
+single_consumer_max_items_and_size() ->
+ single_consumer_max_item_count() ++ single_consumer_max_size().
+
+multiple_consumers() ->
+ [
+ fun should_have_zero_size_for_new_queue/1,
+ fun should_have_no_items_for_new_queue/1,
+ fun should_increase_queue_size_on_produce/1
+ ].
+
+
+should_have_no_items_for_new_queue({Q, _, _}) ->
+ ?_assertEqual(0, couch_work_queue:item_count(Q)).
+
+should_have_zero_size_for_new_queue({Q, _, _}) ->
+ ?_assertEqual(0, couch_work_queue:size(Q)).
+
+should_block_consumer_on_dequeue_from_empty_queue({_, _, Consumers}) when is_list(Consumers) ->
+ [consume(C, 2) || C <- Consumers],
+ Pongs = [ping(C) || C <- Consumers],
+ ?_assertEqual([timeout, timeout, timeout], Pongs);
+should_block_consumer_on_dequeue_from_empty_queue({_, _, Consumer}) ->
+ consume(Consumer, 1),
+ Pong = ping(Consumer),
+ ?_assertEqual(timeout, Pong).
+
+should_consume_right_item({Q, Producer, Consumers}) when is_list(Consumers) ->
+ [consume(C, 3) || C <- Consumers],
+
+ Item1 = produce(Producer, 10),
+ ok = ping(Producer),
+ ?assertEqual(0, couch_work_queue:item_count(Q)),
+ ?assertEqual(0, couch_work_queue:size(Q)),
+
+ Item2 = produce(Producer, 10),
+ ok = ping(Producer),
+ ?assertEqual(0, couch_work_queue:item_count(Q)),
+ ?assertEqual(0, couch_work_queue:size(Q)),
+
+ Item3 = produce(Producer, 10),
+ ok = ping(Producer),
+ ?assertEqual(0, couch_work_queue:item_count(Q)),
+ ?assertEqual(0, couch_work_queue:size(Q)),
+
+ R = [{ping(C), Item}
+ || {C, Item} <- lists:zip(Consumers, [Item1, Item2, Item3])],
+
+ ?_assertEqual([{ok, Item1}, {ok, Item2}, {ok, Item3}], R);
+should_consume_right_item({_, Producer, Consumer}) ->
+ consume(Consumer, 1),
+ Item = produce(Producer, 10),
+ produce(Producer, 20),
+ ok = ping(Producer),
+ ok = ping(Consumer),
+ {ok, Items} = last_consumer_items(Consumer),
+ ?_assertEqual([Item], Items).
+
+should_increase_queue_size_on_produce({Q, Producer, _}) ->
+ produce(Producer, 50),
+ ok = ping(Producer),
+ Count1 = couch_work_queue:item_count(Q),
+ Size1 = couch_work_queue:size(Q),
+
+ produce(Producer, 10),
+ Count2 = couch_work_queue:item_count(Q),
+ Size2 = couch_work_queue:size(Q),
+
+ ?_assertEqual([{Count1, Size1}, {Count2, Size2}], [{1, 50}, {2, 60}]).
+
+should_block_producer_on_full_queue_count({Q, Producer, _}) ->
+ produce(Producer, 10),
+ ?assertEqual(1, couch_work_queue:item_count(Q)),
+ ok = ping(Producer),
+
+ produce(Producer, 15),
+ ?assertEqual(2, couch_work_queue:item_count(Q)),
+ ok = ping(Producer),
+
+ produce(Producer, 20),
+ ?assertEqual(3, couch_work_queue:item_count(Q)),
+ Pong = ping(Producer),
+
+ ?_assertEqual(timeout, Pong).
+
+should_block_producer_on_full_queue_size({Q, Producer, _}) ->
+ produce(Producer, 100),
+ ok = ping(Producer),
+ ?assertEqual(1, couch_work_queue:item_count(Q)),
+ ?assertEqual(100, couch_work_queue:size(Q)),
+
+ produce(Producer, 110),
+ Pong = ping(Producer),
+ ?assertEqual(2, couch_work_queue:item_count(Q)),
+ ?assertEqual(210, couch_work_queue:size(Q)),
+
+ ?_assertEqual(timeout, Pong).
+
+should_consume_multiple_items({_, Producer, Consumer}) ->
+ Item1 = produce(Producer, 10),
+ ok = ping(Producer),
+
+ Item2 = produce(Producer, 15),
+ ok = ping(Producer),
+
+ consume(Consumer, 2),
+
+ {ok, Items} = last_consumer_items(Consumer),
+ ?_assertEqual([Item1, Item2], Items).
+
+should_receive_first_queued_item({Q, Producer, Consumer}) ->
+ consume(Consumer, 100),
+ timeout = ping(Consumer),
+
+ Item = produce(Producer, 11),
+ ok = ping(Producer),
+
+ ok = ping(Consumer),
+ ?assertEqual(0, couch_work_queue:item_count(Q)),
+
+ {ok, Items} = last_consumer_items(Consumer),
+ ?_assertEqual([Item], Items).
+
+should_consume_all({_, Producer, Consumer}) ->
+ Item1 = produce(Producer, 10),
+ Item2 = produce(Producer, 15),
+ Item3 = produce(Producer, 20),
+
+ consume(Consumer, all),
+
+ {ok, Items} = last_consumer_items(Consumer),
+ ?_assertEqual([Item1, Item2, Item3], Items).
+
+should_timeout_on_close_non_empty_queue({Q, Producer, _}) ->
+ produce(Producer, 1),
+ Status = close_queue(Q),
+
+ ?_assertEqual(timeout, Status).
+
+should_not_block_producer_for_non_empty_queue_after_close({Q, Producer, _}) ->
+ produce(Producer, 1),
+ close_queue(Q),
+ Pong = ping(Producer),
+ Size = couch_work_queue:size(Q),
+ Count = couch_work_queue:item_count(Q),
+
+ ?_assertEqual({ok, 1, 1}, {Pong, Size, Count}).
+
+should_be_closed({Q, _, Consumers}) when is_list(Consumers) ->
+ ok = close_queue(Q),
+
+ [consume(C, 1) || C <- Consumers],
+
+ LastConsumerItems = [last_consumer_items(C) || C <- Consumers],
+ ItemsCount = couch_work_queue:item_count(Q),
+ Size = couch_work_queue:size(Q),
+
+ ?_assertEqual({[closed, closed, closed], closed, closed},
+ {LastConsumerItems, ItemsCount, Size});
+should_be_closed({Q, _, Consumer}) ->
+ ok = close_queue(Q),
+
+ consume(Consumer, 1),
+
+ LastConsumerItems = last_consumer_items(Consumer),
+ ItemsCount = couch_work_queue:item_count(Q),
+ Size = couch_work_queue:size(Q),
+
+ ?_assertEqual({closed, closed, closed},
+ {LastConsumerItems, ItemsCount, Size}).
+
+
+close_queue(Q) ->
+ ok = couch_work_queue:close(Q),
+ MonRef = erlang:monitor(process, Q),
+ receive
+ {'DOWN', MonRef, process, Q, _Reason} -> ok
+ after ?TIMEOUT ->
+ erlang:demonitor(MonRef),
+ timeout
+ end.
+
+spawn_consumer(Q) ->
+ Parent = self(),
+ spawn(fun() -> consumer_loop(Parent, Q, nil) end).
+
+consumer_loop(Parent, Q, PrevItem) ->
+ receive
+ {stop, Ref} ->
+ Parent ! {ok, Ref};
+ {ping, Ref} ->
+ Parent ! {pong, Ref},
+ consumer_loop(Parent, Q, PrevItem);
+ {last_item, Ref} ->
+ Parent ! {item, Ref, PrevItem},
+ consumer_loop(Parent, Q, PrevItem);
+ {consume, N} ->
+ Result = couch_work_queue:dequeue(Q, N),
+ consumer_loop(Parent, Q, Result)
+ end.
+
+spawn_producer(Q) ->
+ Parent = self(),
+ spawn(fun() -> producer_loop(Parent, Q) end).
+
+producer_loop(Parent, Q) ->
+ receive
+ {stop, Ref} ->
+ Parent ! {ok, Ref};
+ {ping, Ref} ->
+ Parent ! {pong, Ref},
+ producer_loop(Parent, Q);
+ {produce, Ref, Size} ->
+ Item = crypto:rand_bytes(Size),
+ Parent ! {item, Ref, Item},
+ ok = couch_work_queue:queue(Q, Item),
+ producer_loop(Parent, Q)
+ end.
+
+consume(Consumer, N) ->
+ Consumer ! {consume, N}.
+
+last_consumer_items(Consumer) ->
+ Ref = make_ref(),
+ Consumer ! {last_item, Ref},
+ receive
+ {item, Ref, Items} ->
+ Items
+ after ?TIMEOUT ->
+ timeout
+ end.
+
+produce(Producer, Size) ->
+ Ref = make_ref(),
+ Producer ! {produce, Ref, Size},
+ receive
+ {item, Ref, Item} ->
+ Item
+ after ?TIMEOUT ->
+ erlang:error({assertion_failed,
+ [{module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout asking producer to produce an item"}]})
+ end.
+
+ping(Pid) ->
+ Ref = make_ref(),
+ Pid ! {ping, Ref},
+ receive
+ {pong, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end.
+
+stop(Pid, Name) ->
+ Ref = make_ref(),
+ Pid ! {stop, Ref},
+ receive
+ {ok, Ref} -> ok
+ after ?TIMEOUT ->
+ ?debugMsg("Timeout stopping " ++ Name),
+ timeout
+ end.