You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by rn...@apache.org on 2018/08/20 17:50:37 UTC

[couchdb] branch user-partitioned-dbs-6 updated (6f38cb7 -> e633e59)

This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a change to branch user-partitioned-dbs-6
in repository https://gitbox.apache.org/repos/asf/couchdb.git.


 discard 6f38cb7  optimize _all_docs requests that are bounded within a single partition
 discard 1d6bf96  Implement _all_docs and _find support
 discard d650f30  mem3 tests for partitioned databases
 discard 775098c  implement partitioned views
     new 86abbc3  implement partitioned views
     new efb4202  mem3 tests for partitioned databases
     new 406e518  Implement _all_docs and _find support
     new e633e59  optimize _all_docs requests that are bounded within a single partition

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (6f38cb7)
            \
             N -- N -- N   refs/heads/user-partitioned-dbs-6 (e633e59)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 src/couch_mrview/src/couch_mrview_util.erl | 49 ++++++++++++++----------------
 1 file changed, 22 insertions(+), 27 deletions(-)


[couchdb] 04/04: optimize _all_docs requests that are bounded within a single partition

Posted by rn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch user-partitioned-dbs-6
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit e633e59f5b8e50b5c14f09a94bf3f8ba7eff1a5b
Author: Robert Newson <rn...@apache.org>
AuthorDate: Mon Aug 13 22:34:59 2018 +0100

    optimize _all_docs requests that are bounded within a single partition
---
 src/fabric/src/fabric_view_all_docs.erl | 24 +++++++++++++++++++++++-
 1 file changed, 23 insertions(+), 1 deletion(-)

diff --git a/src/fabric/src/fabric_view_all_docs.erl b/src/fabric/src/fabric_view_all_docs.erl
index d515ab8..83c3790 100644
--- a/src/fabric/src/fabric_view_all_docs.erl
+++ b/src/fabric/src/fabric_view_all_docs.erl
@@ -21,7 +21,7 @@
 -include_lib("couch_mrview/include/couch_mrview.hrl").
 
 go(DbName, Options, #mrargs{keys=undefined} = QueryArgs, Callback, Acc) ->
-    Shards = mem3:shards(DbName),
+    Shards = shards(DbName, QueryArgs),
     Workers0 = fabric_util:submit_jobs(
             Shards, fabric_rpc, all_docs, [Options, QueryArgs]),
     RexiMon = fabric_util:create_monitors(Workers0),
@@ -136,6 +136,28 @@ go(DbName, _Options, Workers, QueryArgs, Callback, Acc0) ->
         {ok, Resp}
     end.
 
+shards(DbName, Args) ->
+    case couch_mrview_util:get_extra(Args, partitioned) of
+        true ->
+            StartKey = partition(Args#mrargs.start_key),
+            EndKey = partition(Args#mrargs.end_key),
+            case {StartKey, EndKey} of
+                {Same, Same} when Same =/= undefined ->
+                    mem3:shards(DbName, <<Same/binary, ":foo">>);
+                {_, _} ->
+                    mem3:shards(DbName)
+            end;
+        _ ->
+            mem3:shards(DbName)
+    end.
+
+partition(undefined) ->
+    undefined;
+partition(null) ->
+    null;
+partition(Key) when is_binary(Key) ->
+    hd(binary:split(Key, <<":">>)).
+
 handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
     fabric_view:check_down_shards(State, NodeRef);
 


[couchdb] 02/04: mem3 tests for partitioned databases

Posted by rn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch user-partitioned-dbs-6
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit efb4202413679396acb51dc7b6e300f36348d12c
Author: Garren Smith <ga...@gmail.com>
AuthorDate: Tue Aug 7 12:15:14 2018 +0200

    mem3 tests for partitioned databases
---
 src/mem3/src/mem3.erl        | 22 ++++++++++++++
 src/mem3/src/mem3_shards.erl | 68 +++++++++++++++++++++++++++++++++++++++++++-
 src/mem3/src/mem3_util.erl   | 59 +++++++++++++++++++++++++++++++++++++-
 3 files changed, 147 insertions(+), 2 deletions(-)

diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
index 99c0863..aecca2f 100644
--- a/src/mem3/src/mem3.erl
+++ b/src/mem3/src/mem3.erl
@@ -389,4 +389,26 @@ allowed_nodes_test_() ->
         ]
     }]}.
 
+is_partitioned_false_shards_db_test() ->
+    meck:expect(config, get, fun (_, _, Default) -> Default end),
+    ?assertEqual(is_partitioned(<<"_dbs">>), false),
+    meck:unload().
+
+is_partitioned_false_nodes_db_test() ->
+    meck:expect(config, get, fun (_, _, Default) -> Default end),
+    ?assertEqual(is_partitioned(<<"_nodes">>), false),
+    meck:unload().
+
+is_partitioned_true_partitioned_db_test() ->
+    Shard = #shard{
+        opts = [{partitioned, true}]
+    },
+    ?assertEqual(is_partitioned([Shard]), true).
+
+is_partitioned_false_partitioned_db_test() ->
+    Shard = #shard{
+        opts = []
+    },
+    ?assertEqual(is_partitioned([Shard]), false).
+
 -endif.
diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl
index 1cd7be6..cfbc32e 100644
--- a/src/mem3/src/mem3_shards.erl
+++ b/src/mem3/src/mem3_shards.erl
@@ -559,7 +559,9 @@ mem3_shards_test_() ->
             t_writer_does_not_delete_other_writers_for_same_shard(),
             t_spawn_writer_in_load_shards_from_db(),
             t_cache_insert_takes_new_update(),
-            t_cache_insert_ignores_stale_update_and_kills_worker()
+            t_cache_insert_ignores_stale_update_and_kills_worker(),
+            t_load_shards_from_disk_returns_correct_shard_for_partition(),
+            t_for_docid_returns_correct_shard_for_partition()
         ]
     }.
 
@@ -703,6 +705,70 @@ t_cache_insert_ignores_stale_update_and_kills_worker() ->
         ?assertEqual([], ets:tab2list(?OPENERS))
     end).
 
+t_load_shards_from_disk_returns_correct_shard_for_partition() ->
+    ?_test(begin
+        Shards = [
+            #ordered_shard{
+                name = <<"shards/80000000-9fffffff/db1.1533630706">>,
+                node = 'node1@127.0.0.1',
+                dbname = <<"db1">>,
+                range = [2147483648,2684354559],
+                ref = undefined,
+                order = 1,
+                opts = [{partitioned,true}]
+            }
+        ],
+        DbName = <<"db1">>,
+        DocId = <<"foo:123">>,
+        Doc = #doc{body = {[]}},
+        meck:expect(couch_db, open_doc, 3, {ok, Doc}),
+        meck:expect(couch_db, get_update_seq, 1, 1),
+        meck:expect(mem3_util, build_ordered_shards, 2, Shards),
+        meck:expect(mem3_util, ensure_exists, 1, {ok, <<"shard-name">>}),
+        meck:expect(couch_db, close, 1, ok),
+
+        [Shard] = load_shards_from_disk(DbName, DocId),
+
+        meck:validate(couch_db),
+        meck:validate(mem3_util),
+
+        ShardName = Shard#ordered_shard.name,
+        ?assertEqual(ShardName, <<"shards/80000000-9fffffff/db1.1533630706">>)
+    end).
+
+t_for_docid_returns_correct_shard_for_partition() ->
+        ?_test(begin
+            Shards = [
+                #ordered_shard{
+                    name = <<"shards/60000000-7fffffff/db1.1533630706">>,
+                    node = 'node1@127.0.0.1',
+                    dbname = <<"db1">>,
+                    range = [1610612736,2147483647],
+                    ref = undefined,
+                    order = 1,
+                    opts = [{partitioned,true}]
+                },
+                #ordered_shard{
+                    name = <<"shards/80000000-9fffffff/db1.1533630706">>,
+                    node = 'node1@127.0.0.1',
+                    dbname = <<"db1">>,
+                    range = [2147483648,2684354559],
+                    ref = undefined,
+                    order = 1,
+                    opts = [{partitioned,true}]
+                }
+            ],
+            DbName = <<"db1">>,
+            DocId = <<"foo:123">>,
+
+            true = ets:insert(?SHARDS, Shards),
+
+            [Shard] = for_docid(DbName, DocId, [ordered]),
+    
+            ShardName = Shard#ordered_shard.name,
+            ?assertEqual(ShardName, <<"shards/80000000-9fffffff/db1.1533630706">>)
+        end).
+
 
 mock_state(UpdateSeq) ->
     #st{
diff --git a/src/mem3/src/mem3_util.erl b/src/mem3/src/mem3_util.erl
index 9620e98..7b8dd16 100644
--- a/src/mem3/src/mem3_util.erl
+++ b/src/mem3/src/mem3_util.erl
@@ -35,7 +35,6 @@ hash(Item) when is_binary(Item) ->
 hash(Item) ->
     erlang:crc32(term_to_binary(Item)).
 
-
 docid_hash(DocId) when is_binary(DocId) ->
     docid_hash(DocId, []).
 
@@ -294,3 +293,61 @@ downcast(#ordered_shard{}=S) ->
       };
 downcast(Shards) when is_list(Shards) ->
     [downcast(Shard) || Shard <- Shards].
+
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+
+docid_hash_design_doc_test() ->
+    Id = <<"_design/ddoc">>,
+    Hash = docid_hash(Id),
+    ?assertEqual(Hash, erlang:crc32(Id)).
+
+docid_hash_doc_partition_false_test() ->
+    Id = <<"partitionkey:docid">>,
+    IdHash = erlang:crc32(Id),
+    Hash = docid_hash(Id),
+    ?assertEqual(Hash, IdHash),
+    Hash = docid_hash(Id, []),
+    ?assertEqual(Hash, IdHash).
+
+docid_hash_doc_partition_true_test() ->
+    Id = <<"partitionkey:doc:id">>,
+    Hash = docid_hash(Id, [{partitioned, true}]),
+    ?assertEqual(Hash, erlang:crc32(<<"partitionkey">>)).
+
+
+add_shards_by_node_adds_partition_prop_test() ->
+    DocProp = [
+    {<<"_id">>, <<"database-name">>},
+    {<<"_rev">>,<<"1-fb8e28457a6e0c49de1848b5e4a28238">>},
+    {<<"shard_suffix">>,".1533550200"},
+    {<<"changelog">>, [[<<"add">>,<<"00000000-1fffffff">>,<<"node1@127.0.0.1">>]]},
+    {<<"by_node">>, {[{<<"node1@127.0.0.1">>, [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>]}]}},
+    {<<"by_range">>, {[{<<"00000000-1fffffff">>,[<<"node1@127.0.0.1">>]}]}},
+    {<<"options">>,{[{partitioned,true}]}}
+   ],
+
+    [ShardRange | _] = build_shards_by_node(<<"database-name">>, DocProp),
+    Opts = ShardRange#shard.opts,
+    Partitioned = lists:keyfind(partitioned, 1, Opts),
+    ?assertEqual(Partitioned, {partitioned, true}).
+
+    
+add_shards_by_range_adds_partition_prop_test() ->
+    DocProp = [
+    {<<"_id">>, <<"database-name">>},
+    {<<"_rev">>,<<"1-fb8e28457a6e0c49de1848b5e4a28238">>},
+    {<<"shard_suffix">>,".1533550200"},
+    {<<"changelog">>, [[<<"add">>,<<"00000000-1fffffff">>,<<"node1@127.0.0.1">>]]},
+    {<<"by_node">>, {[{<<"node1@127.0.0.1">>, [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>]}]}},
+    {<<"by_range">>, {[{<<"00000000-1fffffff">>,[<<"node1@127.0.0.1">>]}]}},
+    {<<"options">>,{[{partitioned,true}]}}
+   ],
+
+    [ShardRange | _] = build_shards_by_range(<<"database-name">>, DocProp),
+    Opts = ShardRange#ordered_shard.opts,
+    Partitioned = lists:keyfind(partitioned, 1, Opts),
+    ?assertEqual(Partitioned, {partitioned, true}).
+
+-endif.


[couchdb] 03/04: Implement _all_docs and _find support

Posted by rn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch user-partitioned-dbs-6
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 406e5183eb276ef16ad591ddf6302616bcca32ff
Author: Robert Newson <rn...@apache.org>
AuthorDate: Thu Aug 9 13:43:17 2018 +0100

    Implement _all_docs and _find support
    
    Co-authored-by: Garren Smith <ga...@gmail.com>
    Co-authored-by: Robert Newson <rn...@apache.org>
---
 src/chttpd/src/chttpd_db.erl                  |  9 ++--
 src/couch_mrview/src/couch_mrview.erl         |  7 +--
 src/couch_mrview/src/couch_mrview_updater.erl |  4 +-
 src/couch_mrview/src/couch_mrview_util.erl    | 75 ++++++++++++++++++++++-----
 src/fabric/src/fabric.erl                     |  7 ++-
 src/fabric/src/fabric_view.erl                |  2 +-
 src/fabric/src/fabric_view_all_docs.erl       |  2 +-
 src/mango/src/mango_cursor_view.erl           | 23 ++++++--
 src/mango/src/mango_opts.erl                  |  6 +++
 src/mem3/src/mem3.erl                         |  5 +-
 10 files changed, 112 insertions(+), 28 deletions(-)

diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index 57d85e1..83d0489 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -684,12 +684,15 @@ multi_all_docs_view(Req, Db, OP, Queries) ->
 all_docs_view(Req, Db, Keys, OP) ->
     Args0 = couch_mrview_http:parse_params(Req, Keys),
     Args1 = Args0#mrargs{view_type=map},
-    Args2 = couch_mrview_util:validate_args(Args1),
-    Args3 = set_namespace(OP, Args2),
+    DbPartitioned = mem3:is_partitioned(couch_db:name(Db)),
+    Args2 = couch_mrview_util:set_extra(Args1, partitioned, DbPartitioned),
+    Args3 = couch_mrview_util:set_extra(Args2, style, all_docs),
+    Args4 = couch_mrview_util:validate_args(Args3),
+    Args5 = set_namespace(OP, Args4),
     Options = [{user_ctx, Req#httpd.user_ctx}],
     Max = chttpd:chunked_response_buffer_size(),
     VAcc = #vacc{db=Db, req=Req, threshold=Max},
-    {ok, Resp} = fabric:all_docs(Db, Options, fun couch_mrview_http:view_cb/2, VAcc, Args3),
+    {ok, Resp} = fabric:all_docs(Db, Options, fun couch_mrview_http:view_cb/2, VAcc, Args5),
     {ok, Resp#vacc.resp}.
 
 db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl
index 09945f5..f5963e7 100644
--- a/src/couch_mrview/src/couch_mrview.erl
+++ b/src/couch_mrview/src/couch_mrview.erl
@@ -228,12 +228,13 @@ query_all_docs(Db, Args0, Callback, Acc) ->
         couch_index_util:hexsig(couch_hash:md5_hash(term_to_binary(Info)))
     end),
     Args1 = Args0#mrargs{view_type=map},
-    Args2 = couch_mrview_util:validate_args(Args1),
-    {ok, Acc1} = case Args2#mrargs.preflight_fun of
+    Args2 = couch_mrview_util:set_extra(Args1, style, all_docs),
+    Args3 = couch_mrview_util:validate_args(Args2),
+    {ok, Acc1} = case Args3#mrargs.preflight_fun of
         PFFun when is_function(PFFun, 2) -> PFFun(Sig, Acc);
         _ -> {ok, Acc}
     end,
-    all_docs_fold(Db, Args2, Callback, Acc1).
+    all_docs_fold(Db, Args3, Callback, Acc1).
 
 
 query_view(Db, DDoc, VName) ->
diff --git a/src/couch_mrview/src/couch_mrview_updater.erl b/src/couch_mrview/src/couch_mrview_updater.erl
index 2b69eee..0c1a17c 100644
--- a/src/couch_mrview/src/couch_mrview_updater.erl
+++ b/src/couch_mrview/src/couch_mrview_updater.erl
@@ -315,7 +315,9 @@ write_kvs(State, UpdateSeq, ViewKVs, DocIdKeys, Seqs, Log0) ->
         design_opts=DesignOpts
     } = State,
 
-    Partitioned = couch_util:get_value(<<"partitioned">>, DesignOpts, false),
+    DbPartitioned = mem3:is_partitioned(State#mrst.db_name),
+    Partitioned = couch_util:get_value(<<"partitioned">>, DesignOpts, DbPartitioned),
+
     Revs = dict:from_list(dict:fetch_keys(Log0)),
 
     Log = dict:fold(fun({Id, _Rev}, DIKeys, Acc) ->
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
index 022de97..96b80ca 100644
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ b/src/couch_mrview/src/couch_mrview_util.erl
@@ -39,8 +39,11 @@
 -define(GET_VIEW_RETRY_COUNT, 1).
 -define(GET_VIEW_RETRY_DELAY, 50).
 -define(LOWEST_KEY, null).
--define(HIGHEST_KEY, {[{<<239, 191, 176>>, null}]}). % is {"\ufff0": null}
-
+-define(HIGHEST_KEY, {<<255, 255, 255, 255>>}).
+-define(PARTITION_START(P), <<P/binary, $:>>).
+-define(PARTITION_END(P), <<P/binary, $;>>).
+-define(LOWEST(A, B), (if A < B -> A; true -> B end)).
+-define(HIGHEST(A, B), (if A > B -> A; true -> B end)).
 
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("couch_mrview/include/couch_mrview.hrl").
@@ -583,20 +586,38 @@ validate_args(Args) ->
         _ -> mrverror(<<"Invalid value for `sorted`.">>)
     end,
 
-    case {get_extra(Args, partitioned, false), get_extra(Args, partition)} of
-        {true, undefined} ->
+    Style = get_extra(Args, style, normal),
+    Partitioned = get_extra(Args, partitioned, false),
+    Partition = get_extra(Args, partition),
+
+    case {Style, Partitioned, Partition} of
+        {all_docs, true, _} ->
+            ok; % _all_docs can be called with or without partition parameter.
+        {all_docs, false, undefined} ->
+            ok;
+        {all_docs, false, _Partition} ->
+            mrverror(<<"`partition` parameter is not supported in this db.">>);
+        {normal, true, undefined} ->
             mrverror(<<"`partition` parameter is mandatory for queries to this view.">>);
-        {true, _Partition} ->
+        {normal, true, _Partition} ->
             ok;
-        {false, undefined} ->
+        {normal, false, undefined} ->
             ok;
-        {false, _Partition} ->
+        {normal, false, _Partition} ->
             mrverror(<<"`partition` parameter is not supported in this view.">>)
     end,
 
-    Args1 = case get_extra(Args, partitioned, false) of
-        true  -> apply_partition(Args);
-        false -> Args
+    Args1 = case {Style, Partitioned, Partition} of
+        {all_docs, true, undefined} ->
+            Args;
+        {all_docs, true, Partition} ->
+            apply_partition(Args, all_docs);
+        {all_docs, false, _} ->
+            Args;
+        {normal, true, _} ->
+            apply_partition(Args, normal);
+        {normal, false, _} ->
+            Args
     end,
 
     Args1#mrargs{
@@ -617,9 +638,12 @@ determine_group_level(#mrargs{group=true, group_level=undefined}) ->
 determine_group_level(#mrargs{group_level=GroupLevel}) ->
     GroupLevel.
 
-apply_partition(#mrargs{} = Args0) ->
+apply_partition(#mrargs{} = Args0, Style) ->
     Partition = get_extra(Args0, partition),
-    apply_partition(Partition, Args0).
+    case Style of
+        normal   -> apply_partition(Partition, Args0);
+        all_docs -> apply_all_docs_partition(Partition, Args0)
+    end;
 
 apply_partition(Partition, #mrargs{keys=Keys} = Args) when Keys /= undefined ->
     Args#mrargs{keys=[{p, Partition, K} || K <- Keys]};
@@ -647,6 +671,33 @@ apply_partition(Partition, Args) ->
         end_key = {p, Partition, EK0}
     }.
 
+%% all_docs is special as it's not really a view and is already
+%% effectively partitioned as the partition is a prefix of all keys.
+apply_all_docs_partition(Partition, #mrargs{direction=fwd, start_key=undefined, end_key=undefined} = Args) ->
+    Args#mrargs{start_key = ?PARTITION_START(Partition), end_key = ?PARTITION_END(Partition)};
+
+apply_all_docs_partition(Partition, #mrargs{direction=rev, start_key=undefined, end_key=undefined} = Args) ->
+    Args#mrargs{start_key = ?PARTITION_END(Partition), end_key = ?PARTITION_START(Partition)};
+
+apply_all_docs_partition(Partition, #mrargs{direction=fwd, start_key=SK, end_key=undefined} = Args) ->
+    Args#mrargs{start_key = ?HIGHEST(?PARTITION_START(Partition), SK), end_key = ?PARTITION_END(Partition)};
+
+apply_all_docs_partition(Partition, #mrargs{direction=rev, start_key=SK, end_key=undefined} = Args) ->
+    Args#mrargs{start_key = ?LOWEST(?PARTITION_END(Partition), SK), end_key = ?PARTITION_START(Partition)};
+
+apply_all_docs_partition(Partition, #mrargs{direction=fwd, start_key=undefined, end_key=EK} = Args) ->
+    Args#mrargs{start_key = ?PARTITION_START(Partition), end_key = ?LOWEST(?PARTITION_END(Partition), EK)};
+
+apply_all_docs_partition(Partition, #mrargs{direction=rev, start_key=undefined, end_key=EK} = Args) ->
+    Args#mrargs{start_key = ?PARTITION_END(Partition), end_key = ?HIGHEST(?PARTITION_START(Partition), EK)};
+
+apply_all_docs_partition(Partition, #mrargs{direction=fwd, start_key=SK, end_key=EK} = Args) ->
+    Args#mrargs{start_key = ?HIGHEST(?PARTITION_START(Partition), SK), end_key = ?LOWEST(?PARTITION_END(Partition), EK)};
+
+apply_all_docs_partition(Partition, #mrargs{direction=rev, start_key=SK, end_key=EK} = Args) ->
+    Args#mrargs{start_key = ?LOWEST(?PARTITION_END(Partition), SK), end_key = ?HIGHEST(?PARTITION_START(Partition), EK)}.
+
+
 check_range(#mrargs{start_key=undefined}, _Cmp) ->
     ok;
 check_range(#mrargs{end_key=undefined}, _Cmp) ->
diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl
index 97e323e..4de365f 100644
--- a/src/fabric/src/fabric.erl
+++ b/src/fabric/src/fabric.erl
@@ -294,9 +294,12 @@ all_docs(DbName, Callback, Acc, QueryArgs) ->
         #mrargs{} | [option()]) ->
     {ok, any()} | {error, Reason :: term()}.
 
-all_docs(DbName, Options, Callback, Acc0, #mrargs{} = QueryArgs) when
+
+all_docs(DbName, Options, Callback, Acc0, #mrargs{} = QueryArgs0) when
         is_function(Callback, 2) ->
-    fabric_view_all_docs:go(dbname(DbName), opts(Options), QueryArgs, Callback, Acc0);
+    DbPartitioned = mem3:is_partitioned(dbname(DbName)),
+    QueryArgs1 = couch_mrview_util:set_extra(QueryArgs0, partitioned, DbPartitioned),
+    fabric_view_all_docs:go(dbname(DbName), opts(Options), QueryArgs1, Callback, Acc0);
 
 %% @doc convenience function that takes a keylist rather than a record
 %% @equiv all_docs(DbName, Callback, Acc0, kl_to_query_args(QueryArgs))
diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl
index 844b44d..c0e2974 100644
--- a/src/fabric/src/fabric_view.erl
+++ b/src/fabric/src/fabric_view.erl
@@ -122,7 +122,7 @@ maybe_send_row(State) ->
         user_acc = AccIn,
         query_args = QueryArgs
     } = State,
-    Partitioned = couch_mrview_util:get_extra(QueryArgs, partitioned, false),
+    Partitioned = couch_mrview_util:get_extra(QueryArgs, partitioned),
     case fabric_dict:any(0, Counters) of
     true ->
         {ok, State};
diff --git a/src/fabric/src/fabric_view_all_docs.erl b/src/fabric/src/fabric_view_all_docs.erl
index ac16dac..d515ab8 100644
--- a/src/fabric/src/fabric_view_all_docs.erl
+++ b/src/fabric/src/fabric_view_all_docs.erl
@@ -118,7 +118,7 @@ go(DbName, _Options, Workers, QueryArgs, Callback, Acc0) ->
     #mrargs{limit = Limit, skip = Skip, update_seq = UpdateSeq} = QueryArgs,
     State = #collector{
         db_name = DbName,
-        query_args = QueryArgs,
+        query_args = couch_mrview_util:set_extra(QueryArgs, style, all_docs),
         callback = Callback,
         counters = fabric_dict:init(Workers, 0),
         skip = Skip,
diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl
index dbea36e..63c010f 100644
--- a/src/mango/src/mango_cursor_view.erl
+++ b/src/mango/src/mango_cursor_view.erl
@@ -29,6 +29,7 @@
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("couch_mrview/include/couch_mrview.hrl").
 -include("mango_cursor.hrl").
+-include("mango_idx.hrl").
 -include("mango_idx_view.hrl").
 
 create(Db, Indexes, Selector, Opts) ->
@@ -71,7 +72,8 @@ explain(Cursor) ->
         {direction, Args#mrargs.direction},
         {stable, Args#mrargs.stable},
         {update, Args#mrargs.update},
-        {conflicts, Args#mrargs.conflicts}
+        {conflicts, Args#mrargs.conflicts},
+        {extra, {Args#mrargs.extra}}
     ]}}].
 
 
@@ -93,14 +95,27 @@ maybe_replace_max_json([H | T] = EndKey) when is_list(EndKey) ->
 maybe_replace_max_json(EndKey) ->
     EndKey.
 
-base_args(#cursor{index = Idx} = Cursor) ->
-    #mrargs{
+base_args(#cursor{index = Idx, opts = Opts} = Cursor) ->
+    Args1 = #mrargs{
         view_type = map,
         reduce = false,
         start_key = mango_idx:start_key(Idx, Cursor#cursor.ranges),
         end_key = mango_idx:end_key(Idx, Cursor#cursor.ranges),
         include_docs = true
-    }.
+    },
+    Args2 = add_partition_opts(Args1, couch_util:get_value(partition, Opts)),
+    add_style(Idx, Args2).
+
+add_partition_opts(#mrargs{} = Args, <<>>) ->
+    Args;
+add_partition_opts(#mrargs{} = Args, Partition) ->
+    Args1 = couch_mrview_util:set_extra(Args, partitioned, true),
+    couch_mrview_util:set_extra(Args1, partition, Partition).
+
+add_style(#idx{def = all_docs}, Args) ->
+    couch_mrview_util:set_extra(Args, style, all_docs);
+add_style(_, Args) ->
+    Args.
 
 
 execute(#cursor{db = Db, index = Idx, execution_stats = Stats} = Cursor0, UserFun, UserAcc) ->
diff --git a/src/mango/src/mango_opts.erl b/src/mango/src/mango_opts.erl
index 7bae9c9..87d876a 100644
--- a/src/mango/src/mango_opts.erl
+++ b/src/mango/src/mango_opts.erl
@@ -81,6 +81,12 @@ validate_find({Props}) ->
             {tag, selector},
             {validator, fun validate_selector/1}
         ]},
+        {<<"partition">>, [
+            {tag, partition},
+            {optional, true},
+            {default, <<>>},
+            {validator, fun is_string/1}
+        ]},
         {<<"use_index">>, [
             {tag, use_index},
             {optional, true},
diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
index aecca2f..f113357 100644
--- a/src/mem3/src/mem3.erl
+++ b/src/mem3/src/mem3.erl
@@ -352,7 +352,10 @@ is_partitioned(DbName0) when is_binary(DbName0) ->
             is_partitioned(mem3:shards(DbName))
     end;
 
-is_partitioned(Shards) when is_list(Shards) ->
+is_partitioned([#shard{} | _] = Shards) ->
+    lists:all(fun is_partitioned/1, Shards);
+
+is_partitioned([#ordered_shard{} | _] = Shards) ->
     lists:all(fun is_partitioned/1, Shards);
 
 is_partitioned(#shard{opts=Opts}) ->


[couchdb] 01/04: implement partitioned views

Posted by rn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch user-partitioned-dbs-6
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 86abbc3aa3777d27c3093e6eb555c6fe60dd72ff
Author: Robert Newson <rn...@apache.org>
AuthorDate: Tue Aug 7 15:44:33 2018 +0100

    implement partitioned views
    
    Co-authored-by: Robert Newson <rn...@apache.org>
    Co-authored-by: Paul J. Davis <pa...@gmail.com>
---
 src/couch/src/couch_btree.erl                 | 14 ++++++++
 src/couch/src/couch_ejson_compare.erl         |  4 +++
 src/couch_mrview/src/couch_mrview.erl         |  2 ++
 src/couch_mrview/src/couch_mrview_updater.erl | 14 ++++++--
 src/couch_mrview/src/couch_mrview_util.erl    | 50 ++++++++++++++++++++++++---
 src/fabric/src/fabric_view.erl                | 18 ++++++++--
 6 files changed, 92 insertions(+), 10 deletions(-)

diff --git a/src/couch/src/couch_btree.erl b/src/couch/src/couch_btree.erl
index ea224b1..d11d7e6 100644
--- a/src/couch/src/couch_btree.erl
+++ b/src/couch/src/couch_btree.erl
@@ -133,6 +133,20 @@ make_group_fun(Bt, exact) ->
     end;
 make_group_fun(Bt, GroupLevel) when is_integer(GroupLevel), GroupLevel > 0 ->
     fun
+        ({{p, _Partition, Key1}, _}, {{p, _Partition, Key2}, _}) ->
+            SL1 = lists:sublist(Key1, GroupLevel),
+            SL2 = lists:sublist(Key2, GroupLevel),
+            case less(Bt, {SL1, nil}, {SL2, nil}) of
+                false ->
+                    case less(Bt, {SL2, nil}, {SL1, nil}) of
+                        false ->
+                            true;
+                        _ ->
+                            false
+                    end;
+                _ ->
+                    false
+            end;
         ({[_|_] = Key1, _}, {[_|_] = Key2, _}) ->
             SL1 = lists:sublist(Key1, GroupLevel),
             SL2 = lists:sublist(Key2, GroupLevel),
diff --git a/src/couch/src/couch_ejson_compare.erl b/src/couch/src/couch_ejson_compare.erl
index 81adbb8..ca36c86 100644
--- a/src/couch/src/couch_ejson_compare.erl
+++ b/src/couch/src/couch_ejson_compare.erl
@@ -22,6 +22,10 @@ init() ->
     Dir = code:priv_dir(couch),
     ok = erlang:load_nif(filename:join(Dir, ?MODULE), NumScheds).
 
+% partitioned row comparison
+less({p, PA, A}, {p, PB, B}) ->
+    less([PA, A], [PB, B]);
+
 less(A, B) ->
     try
         less_nif(A, B)
diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl
index db467f0..09945f5 100644
--- a/src/couch_mrview/src/couch_mrview.erl
+++ b/src/couch_mrview/src/couch_mrview.erl
@@ -614,6 +614,8 @@ red_fold(Db, {NthRed, _Lang, View}=RedView, Args, Callback, UAcc) ->
     end, Acc, OptList),
     finish_fold(Acc2, []).
 
+red_fold({p, _Partition, Key}, Red, Acc) ->
+    red_fold(Key, Red, Acc);
 red_fold(_Key, _Red, #mracc{skip=N}=Acc) when N > 0 ->
     {ok, Acc#mracc{skip=N-1, last_go=ok}};
 red_fold(Key, Red, #mracc{meta_sent=false}=Acc) ->
diff --git a/src/couch_mrview/src/couch_mrview_updater.erl b/src/couch_mrview/src/couch_mrview_updater.erl
index 214f487..2b69eee 100644
--- a/src/couch_mrview/src/couch_mrview_updater.erl
+++ b/src/couch_mrview/src/couch_mrview_updater.erl
@@ -311,9 +311,11 @@ write_kvs(State, UpdateSeq, ViewKVs, DocIdKeys, Seqs, Log0) ->
     #mrst{
         id_btree=IdBtree,
         log_btree=LogBtree,
-        first_build=FirstBuild
+        first_build=FirstBuild,
+        design_opts=DesignOpts
     } = State,
 
+    Partitioned = couch_util:get_value(<<"partitioned">>, DesignOpts, false),
     Revs = dict:from_list(dict:fetch_keys(Log0)),
 
     Log = dict:fold(fun({Id, _Rev}, DIKeys, Acc) ->
@@ -328,8 +330,9 @@ write_kvs(State, UpdateSeq, ViewKVs, DocIdKeys, Seqs, Log0) ->
         _ -> update_log(LogBtree, Log, Revs, Seqs, FirstBuild)
     end,
 
-    UpdateView = fun(#mrview{id_num=ViewId}=View, {ViewId, {KVs, SKVs}}) ->
+    UpdateView = fun(#mrview{id_num=ViewId}=View, {ViewId, {KVs0, SKVs}}) ->
         #mrview{seq_indexed=SIndexed, keyseq_indexed=KSIndexed} = View,
+        KVs = if Partitioned -> inject_partition(KVs0); true -> KVs0 end,
         ToRem = couch_util:dict_find(ViewId, ToRemByView, []),
         {ok, VBtree2} = couch_btree:add_remove(View#mrview.btree, KVs, ToRem),
         NewUpdateSeq = case VBtree2 =/= View#mrview.btree of
@@ -378,6 +381,13 @@ write_kvs(State, UpdateSeq, ViewKVs, DocIdKeys, Seqs, Log0) ->
         log_btree=LogBtree2
     }.
 
+inject_partition(KVs) ->
+    [{{{p, partition(DocId), Key}, DocId}, Value} || {{Key, DocId}, Value} <- KVs].
+
+partition(DocId) ->
+    [Partition, _Rest] = binary:split(DocId, <<":">>),
+    Partition.
+
 update_id_btree(Btree, DocIdKeys, true) ->
     ToAdd = [{Id, DIKeys} || {Id, DIKeys} <- DocIdKeys, DIKeys /= []],
     couch_btree:query_modify(Btree, [], ToAdd, []);
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
index 592bfb5..022de97 100644
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ b/src/couch_mrview/src/couch_mrview_util.erl
@@ -38,6 +38,9 @@
 -define(MOD, couch_mrview_index).
 -define(GET_VIEW_RETRY_COUNT, 1).
 -define(GET_VIEW_RETRY_DELAY, 50).
+-define(LOWEST_KEY, null).
+-define(HIGHEST_KEY, {[{<<239, 191, 176>>, null}]}). % is {"\ufff0": null}
+
 
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("couch_mrview/include/couch_mrview.hrl").
@@ -226,11 +229,12 @@ view_sig(_Db, State, View, Args0) ->
     PurgeSeq = View#mrview.purge_seq,
     SeqIndexed = View#mrview.seq_indexed,
     KeySeqIndexed = View#mrview.keyseq_indexed,
+    Partitioned = get_extra(Args0, partitioned, false),
     Args = Args0#mrargs{
         preflight_fun=undefined,
         extra=[]
     },
-    Term = view_sig_term(Sig, UpdateSeq, PurgeSeq, KeySeqIndexed, SeqIndexed, Args),
+    Term = view_sig_term(Sig, UpdateSeq, PurgeSeq, KeySeqIndexed, SeqIndexed, Partitioned, Args),
     couch_index_util:hexsig(couch_hash:md5_hash(term_to_binary(Term))).
 
 view_sig_term(BaseSig, UpdateSeq, PurgeSeq, false, false) ->
@@ -238,10 +242,12 @@ view_sig_term(BaseSig, UpdateSeq, PurgeSeq, false, false) ->
 view_sig_term(BaseSig, UpdateSeq, PurgeSeq, KeySeqIndexed, SeqIndexed) ->
     {BaseSig, UpdateSeq, PurgeSeq, KeySeqIndexed, SeqIndexed}.
 
-view_sig_term(BaseSig, UpdateSeq, PurgeSeq, false, false, Args) ->
+view_sig_term(BaseSig, UpdateSeq, PurgeSeq, false, false, false, Args) ->
     {BaseSig, UpdateSeq, PurgeSeq, Args};
-view_sig_term(BaseSig, UpdateSeq, PurgeSeq, KeySeqIndexed, SeqIndexed, Args) ->
-    {BaseSig, UpdateSeq, PurgeSeq, KeySeqIndexed, SeqIndexed, Args}.
+view_sig_term(BaseSig, UpdateSeq, PurgeSeq, KeySeqIndexed, SeqIndexed, false, Args) ->
+    {BaseSig, UpdateSeq, PurgeSeq, KeySeqIndexed, SeqIndexed, Args};
+view_sig_term(BaseSig, UpdateSeq, PurgeSeq, KeySeqIndexed, SeqIndexed, Partitioned, Args) ->
+    {BaseSig, UpdateSeq, PurgeSeq, KeySeqIndexed, SeqIndexed, Partitioned, Args}.
 
 
 init_state(Db, Fd, #mrst{views=Views}=State, nil) ->
@@ -588,7 +594,12 @@ validate_args(Args) ->
             mrverror(<<"`partition` parameter is not supported in this view.">>)
     end,
 
-    Args#mrargs{
+    Args1 = case get_extra(Args, partitioned, false) of
+        true  -> apply_partition(Args);
+        false -> Args
+    end,
+
+    Args1#mrargs{
         start_key_docid=SKDocId,
         end_key_docid=EKDocId,
         group_level=GroupLevel
@@ -606,6 +617,35 @@ determine_group_level(#mrargs{group=true, group_level=undefined}) ->
 determine_group_level(#mrargs{group_level=GroupLevel}) ->
     GroupLevel.
 
+apply_partition(#mrargs{} = Args0) ->
+    Partition = get_extra(Args0, partition),
+    apply_partition(Partition, Args0).
+
+apply_partition(Partition, #mrargs{keys=Keys} = Args) when Keys /= undefined ->
+    Args#mrargs{keys=[{p, Partition, K} || K <- Keys]};
+
+apply_partition(_Partition, #mrargs{start_key={p, _, _}, end_key={p, _, _}} = Args) ->
+    Args; % already applied.
+
+apply_partition(Partition, Args) ->
+    #mrargs{
+        direction = Dir,
+        start_key = StartKey,
+        end_key = EndKey
+    } = Args,
+
+    {DefSK, DefEK} = case Dir of
+        fwd -> {?LOWEST_KEY, ?HIGHEST_KEY};
+        rev -> {?HIGHEST_KEY, ?LOWEST_KEY}
+    end,
+
+    SK0 = if StartKey /= undefined -> StartKey; true -> DefSK end,
+    EK0 = if EndKey /= undefined -> EndKey; true -> DefEK end,
+
+    Args#mrargs{
+        start_key = {p, Partition, SK0},
+        end_key = {p, Partition, EK0}
+    }.
 
 check_range(#mrargs{start_key=undefined}, _Cmp) ->
     ok;
diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl
index b4b8a8c..844b44d 100644
--- a/src/fabric/src/fabric_view.erl
+++ b/src/fabric/src/fabric_view.erl
@@ -119,8 +119,10 @@ maybe_send_row(State) ->
         counters = Counters,
         skip = Skip,
         limit = Limit,
-        user_acc = AccIn
+        user_acc = AccIn,
+        query_args = QueryArgs
     } = State,
+    Partitioned = couch_mrview_util:get_extra(QueryArgs, partitioned, false),
     case fabric_dict:any(0, Counters) of
     true ->
         {ok, State};
@@ -128,8 +130,14 @@ maybe_send_row(State) ->
         try get_next_row(State) of
         {_, NewState} when Skip > 0 ->
             maybe_send_row(NewState#collector{skip=Skip-1});
-        {Row, NewState} ->
-            case Callback(transform_row(possibly_embed_doc(NewState,Row)), AccIn) of
+        {Row0, NewState} ->
+            Row1 = possibly_embed_doc(NewState, Row0),
+            Row2 = if
+                Partitioned -> detach_partition(Row1);
+                true -> Row1
+            end,
+            Row3 = transform_row(Row2),
+            case Callback(Row3, AccIn) of
             {stop, Acc} ->
                 {stop, NewState#collector{user_acc=Acc, limit=Limit-1}};
             {ok, Acc} ->
@@ -194,6 +202,10 @@ possibly_embed_doc(#collector{db_name=DbName, query_args=Args},
         _ -> Row
     end.
 
+detach_partition(#view_row{key={p, _Partition, Key}} = Row) ->
+    Row#view_row{key = Key};
+detach_partition(#view_row{} = Row) ->
+    Row.
 
 keydict(undefined) ->
     undefined;