You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by rn...@apache.org on 2018/08/29 14:55:39 UTC

[couchdb] 08/13: mem3 tests for partitioned databases

This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch user-partitioned-dbs-6
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 2dff1017fdbf92edc05e562318bddeac29e949d2
Author: Garren Smith <ga...@gmail.com>
AuthorDate: Tue Aug 7 12:15:14 2018 +0200

    mem3 tests for partitioned databases
---
 src/mem3/src/mem3.erl        | 22 ++++++++++++++
 src/mem3/src/mem3_shards.erl | 68 +++++++++++++++++++++++++++++++++++++++++++-
 src/mem3/src/mem3_util.erl   | 59 +++++++++++++++++++++++++++++++++++++-
 3 files changed, 147 insertions(+), 2 deletions(-)

diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
index 531f35c..bafb227 100644
--- a/src/mem3/src/mem3.erl
+++ b/src/mem3/src/mem3.erl
@@ -385,4 +385,26 @@ allowed_nodes_test_() ->
         ]
     }]}.
 
+is_partitioned_false_shards_db_test() ->
+    meck:expect(config, get, fun (_, _, Default) -> Default end),
+    ?assertEqual(is_partitioned(<<"_dbs">>), false),
+    meck:unload().
+
+is_partitioned_false_nodes_db_test() ->
+    meck:expect(config, get, fun (_, _, Default) -> Default end),
+    ?assertEqual(is_partitioned(<<"_nodes">>), false),
+    meck:unload().
+
+is_partitioned_true_partitioned_db_test() ->
+    Shard = #shard{
+        opts = [{partitioned, true}]
+    },
+    ?assertEqual(is_partitioned([Shard]), true).
+
+is_partitioned_false_partitioned_db_test() ->
+    Shard = #shard{
+        opts = []
+    },
+    ?assertEqual(is_partitioned([Shard]), false).
+
 -endif.
diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl
index 1cd7be6..cfbc32e 100644
--- a/src/mem3/src/mem3_shards.erl
+++ b/src/mem3/src/mem3_shards.erl
@@ -559,7 +559,9 @@ mem3_shards_test_() ->
             t_writer_does_not_delete_other_writers_for_same_shard(),
             t_spawn_writer_in_load_shards_from_db(),
             t_cache_insert_takes_new_update(),
-            t_cache_insert_ignores_stale_update_and_kills_worker()
+            t_cache_insert_ignores_stale_update_and_kills_worker(),
+            t_load_shards_from_disk_returns_correct_shard_for_partition(),
+            t_for_docid_returns_correct_shard_for_partition()
         ]
     }.
 
@@ -703,6 +705,70 @@ t_cache_insert_ignores_stale_update_and_kills_worker() ->
         ?assertEqual([], ets:tab2list(?OPENERS))
     end).
 
+t_load_shards_from_disk_returns_correct_shard_for_partition() ->
+    ?_test(begin
+        Shards = [
+            #ordered_shard{
+                name = <<"shards/80000000-9fffffff/db1.1533630706">>,
+                node = 'node1@127.0.0.1',
+                dbname = <<"db1">>,
+                range = [2147483648,2684354559],
+                ref = undefined,
+                order = 1,
+                opts = [{partitioned,true}]
+            }
+        ],
+        DbName = <<"db1">>,
+        DocId = <<"foo:123">>,
+        Doc = #doc{body = {[]}},
+        meck:expect(couch_db, open_doc, 3, {ok, Doc}),
+        meck:expect(couch_db, get_update_seq, 1, 1),
+        meck:expect(mem3_util, build_ordered_shards, 2, Shards),
+        meck:expect(mem3_util, ensure_exists, 1, {ok, <<"shard-name">>}),
+        meck:expect(couch_db, close, 1, ok),
+
+        [Shard] = load_shards_from_disk(DbName, DocId),
+
+        meck:validate(couch_db),
+        meck:validate(mem3_util),
+
+        ShardName = Shard#ordered_shard.name,
+        ?assertEqual(ShardName, <<"shards/80000000-9fffffff/db1.1533630706">>)
+    end).
+
+t_for_docid_returns_correct_shard_for_partition() ->
+        ?_test(begin
+            Shards = [
+                #ordered_shard{
+                    name = <<"shards/60000000-7fffffff/db1.1533630706">>,
+                    node = 'node1@127.0.0.1',
+                    dbname = <<"db1">>,
+                    range = [1610612736,2147483647],
+                    ref = undefined,
+                    order = 1,
+                    opts = [{partitioned,true}]
+                },
+                #ordered_shard{
+                    name = <<"shards/80000000-9fffffff/db1.1533630706">>,
+                    node = 'node1@127.0.0.1',
+                    dbname = <<"db1">>,
+                    range = [2147483648,2684354559],
+                    ref = undefined,
+                    order = 1,
+                    opts = [{partitioned,true}]
+                }
+            ],
+            DbName = <<"db1">>,
+            DocId = <<"foo:123">>,
+
+            true = ets:insert(?SHARDS, Shards),
+
+            [Shard] = for_docid(DbName, DocId, [ordered]),
+    
+            ShardName = Shard#ordered_shard.name,
+            ?assertEqual(ShardName, <<"shards/80000000-9fffffff/db1.1533630706">>)
+        end).
+
 
 mock_state(UpdateSeq) ->
     #st{
diff --git a/src/mem3/src/mem3_util.erl b/src/mem3/src/mem3_util.erl
index 9620e98..7b8dd16 100644
--- a/src/mem3/src/mem3_util.erl
+++ b/src/mem3/src/mem3_util.erl
@@ -35,7 +35,6 @@ hash(Item) when is_binary(Item) ->
 hash(Item) ->
     erlang:crc32(term_to_binary(Item)).
 
-
 docid_hash(DocId) when is_binary(DocId) ->
     docid_hash(DocId, []).
 
@@ -294,3 +293,61 @@ downcast(#ordered_shard{}=S) ->
       };
 downcast(Shards) when is_list(Shards) ->
     [downcast(Shard) || Shard <- Shards].
+
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+
+docid_hash_design_doc_test() ->
+    Id = <<"_design/ddoc">>,
+    Hash = docid_hash(Id),
+    ?assertEqual(Hash, erlang:crc32(Id)).
+
+docid_hash_doc_partition_false_test() ->
+    Id = <<"partitionkey:docid">>,
+    IdHash = erlang:crc32(Id),
+    Hash = docid_hash(Id),
+    ?assertEqual(Hash, IdHash),
+    Hash = docid_hash(Id, []),
+    ?assertEqual(Hash, IdHash).
+
+docid_hash_doc_partition_true_test() ->
+    Id = <<"partitionkey:doc:id">>,
+    Hash = docid_hash(Id, [{partitioned, true}]),
+    ?assertEqual(Hash, erlang:crc32(<<"partitionkey">>)).
+
+
+add_shards_by_node_adds_partition_prop_test() ->
+    DocProp = [
+    {<<"_id">>, <<"database-name">>},
+    {<<"_rev">>,<<"1-fb8e28457a6e0c49de1848b5e4a28238">>},
+    {<<"shard_suffix">>,".1533550200"},
+    {<<"changelog">>, [[<<"add">>,<<"00000000-1fffffff">>,<<"node1@127.0.0.1">>]]},
+    {<<"by_node">>, {[{<<"node1@127.0.0.1">>, [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>]}]}},
+    {<<"by_range">>, {[{<<"00000000-1fffffff">>,[<<"node1@127.0.0.1">>]}]}},
+    {<<"options">>,{[{partitioned,true}]}}
+   ],
+
+    [ShardRange | _] = build_shards_by_node(<<"database-name">>, DocProp),
+    Opts = ShardRange#shard.opts,
+    Partitioned = lists:keyfind(partitioned, 1, Opts),
+    ?assertEqual(Partitioned, {partitioned, true}).
+
+    
+add_shards_by_range_adds_partition_prop_test() ->
+    DocProp = [
+    {<<"_id">>, <<"database-name">>},
+    {<<"_rev">>,<<"1-fb8e28457a6e0c49de1848b5e4a28238">>},
+    {<<"shard_suffix">>,".1533550200"},
+    {<<"changelog">>, [[<<"add">>,<<"00000000-1fffffff">>,<<"node1@127.0.0.1">>]]},
+    {<<"by_node">>, {[{<<"node1@127.0.0.1">>, [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>]}]}},
+    {<<"by_range">>, {[{<<"00000000-1fffffff">>,[<<"node1@127.0.0.1">>]}]}},
+    {<<"options">>,{[{partitioned,true}]}}
+   ],
+
+    [ShardRange | _] = build_shards_by_range(<<"database-name">>, DocProp),
+    Opts = ShardRange#ordered_shard.opts,
+    Partitioned = lists:keyfind(partitioned, 1, Opts),
+    ?assertEqual(Partitioned, {partitioned, true}).
+
+-endif.