You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by rn...@apache.org on 2018/08/13 11:21:31 UTC

[couchdb] branch user-partitioned-dbs-6 created (now c88d71e)

This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a change to branch user-partitioned-dbs-6
in repository https://gitbox.apache.org/repos/asf/couchdb.git.


      at c88d71e  WIP crappy _all_docs hack

This branch includes the following new commits:

     new 13f2368  implement partitioned views
     new 7e15b63  mem3 tests for partitioned databases
     new c88d71e  WIP crappy _all_docs hack

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[couchdb] 02/03: mem3 tests for partitioned databases

Posted by rn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch user-partitioned-dbs-6
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 7e15b6302eb0eeacf09a24996dac95d28b629be9
Author: Garren Smith <ga...@gmail.com>
AuthorDate: Tue Aug 7 12:15:14 2018 +0200

    mem3 tests for partitioned databases
---
 src/mem3/src/mem3.erl        | 22 ++++++++++++++
 src/mem3/src/mem3_shards.erl | 68 +++++++++++++++++++++++++++++++++++++++++++-
 src/mem3/src/mem3_util.erl   | 59 +++++++++++++++++++++++++++++++++++++-
 3 files changed, 147 insertions(+), 2 deletions(-)

diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
index 99c0863..aecca2f 100644
--- a/src/mem3/src/mem3.erl
+++ b/src/mem3/src/mem3.erl
@@ -389,4 +389,26 @@ allowed_nodes_test_() ->
         ]
     }]}.
 
+is_partitioned_false_shards_db_test() ->
+    meck:expect(config, get, fun (_, _, Default) -> Default end),
+    ?assertEqual(is_partitioned(<<"_dbs">>), false),
+    meck:unload().
+
+is_partitioned_false_nodes_db_test() ->
+    meck:expect(config, get, fun (_, _, Default) -> Default end),
+    ?assertEqual(is_partitioned(<<"_nodes">>), false),
+    meck:unload().
+
+is_partitioned_true_partitioned_db_test() ->
+    Shard = #shard{
+        opts = [{partitioned, true}]
+    },
+    ?assertEqual(is_partitioned([Shard]), true).
+
+is_partitioned_false_partitioned_db_test() ->
+    Shard = #shard{
+        opts = []
+    },
+    ?assertEqual(is_partitioned([Shard]), false).
+
 -endif.
diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl
index 1cd7be6..cfbc32e 100644
--- a/src/mem3/src/mem3_shards.erl
+++ b/src/mem3/src/mem3_shards.erl
@@ -559,7 +559,9 @@ mem3_shards_test_() ->
             t_writer_does_not_delete_other_writers_for_same_shard(),
             t_spawn_writer_in_load_shards_from_db(),
             t_cache_insert_takes_new_update(),
-            t_cache_insert_ignores_stale_update_and_kills_worker()
+            t_cache_insert_ignores_stale_update_and_kills_worker(),
+            t_load_shards_from_disk_returns_correct_shard_for_partition(),
+            t_for_docid_returns_correct_shard_for_partition()
         ]
     }.
 
@@ -703,6 +705,70 @@ t_cache_insert_ignores_stale_update_and_kills_worker() ->
         ?assertEqual([], ets:tab2list(?OPENERS))
     end).
 
+t_load_shards_from_disk_returns_correct_shard_for_partition() ->
+    ?_test(begin
+        Shards = [
+            #ordered_shard{
+                name = <<"shards/80000000-9fffffff/db1.1533630706">>,
+                node = 'node1@127.0.0.1',
+                dbname = <<"db1">>,
+                range = [2147483648,2684354559],
+                ref = undefined,
+                order = 1,
+                opts = [{partitioned,true}]
+            }
+        ],
+        DbName = <<"db1">>,
+        DocId = <<"foo:123">>,
+        Doc = #doc{body = {[]}},
+        meck:expect(couch_db, open_doc, 3, {ok, Doc}),
+        meck:expect(couch_db, get_update_seq, 1, 1),
+        meck:expect(mem3_util, build_ordered_shards, 2, Shards),
+        meck:expect(mem3_util, ensure_exists, 1, {ok, <<"shard-name">>}),
+        meck:expect(couch_db, close, 1, ok),
+
+        [Shard] = load_shards_from_disk(DbName, DocId),
+
+        meck:validate(couch_db),
+        meck:validate(mem3_util),
+
+        ShardName = Shard#ordered_shard.name,
+        ?assertEqual(ShardName, <<"shards/80000000-9fffffff/db1.1533630706">>)
+    end).
+
+t_for_docid_returns_correct_shard_for_partition() ->
+        ?_test(begin
+            Shards = [
+                #ordered_shard{
+                    name = <<"shards/60000000-7fffffff/db1.1533630706">>,
+                    node = 'node1@127.0.0.1',
+                    dbname = <<"db1">>,
+                    range = [1610612736,2147483647],
+                    ref = undefined,
+                    order = 1,
+                    opts = [{partitioned,true}]
+                },
+                #ordered_shard{
+                    name = <<"shards/80000000-9fffffff/db1.1533630706">>,
+                    node = 'node1@127.0.0.1',
+                    dbname = <<"db1">>,
+                    range = [2147483648,2684354559],
+                    ref = undefined,
+                    order = 1,
+                    opts = [{partitioned,true}]
+                }
+            ],
+            DbName = <<"db1">>,
+            DocId = <<"foo:123">>,
+
+            true = ets:insert(?SHARDS, Shards),
+
+            [Shard] = for_docid(DbName, DocId, [ordered]),
+    
+            ShardName = Shard#ordered_shard.name,
+            ?assertEqual(ShardName, <<"shards/80000000-9fffffff/db1.1533630706">>)
+        end).
+
 
 mock_state(UpdateSeq) ->
     #st{
diff --git a/src/mem3/src/mem3_util.erl b/src/mem3/src/mem3_util.erl
index 9620e98..7b8dd16 100644
--- a/src/mem3/src/mem3_util.erl
+++ b/src/mem3/src/mem3_util.erl
@@ -35,7 +35,6 @@ hash(Item) when is_binary(Item) ->
 hash(Item) ->
     erlang:crc32(term_to_binary(Item)).
 
-
 docid_hash(DocId) when is_binary(DocId) ->
     docid_hash(DocId, []).
 
@@ -294,3 +293,61 @@ downcast(#ordered_shard{}=S) ->
       };
 downcast(Shards) when is_list(Shards) ->
     [downcast(Shard) || Shard <- Shards].
+
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+
+docid_hash_design_doc_test() ->
+    Id = <<"_design/ddoc">>,
+    Hash = docid_hash(Id),
+    ?assertEqual(Hash, erlang:crc32(Id)).
+
+docid_hash_doc_partition_false_test() ->
+    Id = <<"partitionkey:docid">>,
+    IdHash = erlang:crc32(Id),
+    Hash = docid_hash(Id),
+    ?assertEqual(Hash, IdHash),
+    Hash = docid_hash(Id, []),
+    ?assertEqual(Hash, IdHash).
+
+docid_hash_doc_partition_true_test() ->
+    Id = <<"partitionkey:doc:id">>,
+    Hash = docid_hash(Id, [{partitioned, true}]),
+    ?assertEqual(Hash, erlang:crc32(<<"partitionkey">>)).
+
+
+add_shards_by_node_adds_partition_prop_test() ->
+    DocProp = [
+    {<<"_id">>, <<"database-name">>},
+    {<<"_rev">>,<<"1-fb8e28457a6e0c49de1848b5e4a28238">>},
+    {<<"shard_suffix">>,".1533550200"},
+    {<<"changelog">>, [[<<"add">>,<<"00000000-1fffffff">>,<<"node1@127.0.0.1">>]]},
+    {<<"by_node">>, {[{<<"node1@127.0.0.1">>, [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>]}]}},
+    {<<"by_range">>, {[{<<"00000000-1fffffff">>,[<<"node1@127.0.0.1">>]}]}},
+    {<<"options">>,{[{partitioned,true}]}}
+   ],
+
+    [ShardRange | _] = build_shards_by_node(<<"database-name">>, DocProp),
+    Opts = ShardRange#shard.opts,
+    Partitioned = lists:keyfind(partitioned, 1, Opts),
+    ?assertEqual(Partitioned, {partitioned, true}).
+
+    
+add_shards_by_range_adds_partition_prop_test() ->
+    DocProp = [
+    {<<"_id">>, <<"database-name">>},
+    {<<"_rev">>,<<"1-fb8e28457a6e0c49de1848b5e4a28238">>},
+    {<<"shard_suffix">>,".1533550200"},
+    {<<"changelog">>, [[<<"add">>,<<"00000000-1fffffff">>,<<"node1@127.0.0.1">>]]},
+    {<<"by_node">>, {[{<<"node1@127.0.0.1">>, [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>]}]}},
+    {<<"by_range">>, {[{<<"00000000-1fffffff">>,[<<"node1@127.0.0.1">>]}]}},
+    {<<"options">>,{[{partitioned,true}]}}
+   ],
+
+    [ShardRange | _] = build_shards_by_range(<<"database-name">>, DocProp),
+    Opts = ShardRange#ordered_shard.opts,
+    Partitioned = lists:keyfind(partitioned, 1, Opts),
+    ?assertEqual(Partitioned, {partitioned, true}).
+
+-endif.


[couchdb] 01/03: implement partitioned views

Posted by rn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch user-partitioned-dbs-6
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 13f2368a651b3c0abb3964c991d2a2f57c00b2fa
Author: Robert Newson <rn...@apache.org>
AuthorDate: Tue Aug 7 15:44:33 2018 +0100

    implement partitioned views
---
 src/couch_mrview/src/couch_mrview_updater.erl | 14 +++++++--
 src/couch_mrview/src/couch_mrview_util.erl    | 41 ++++++++++++++++++++++++++-
 src/fabric/src/fabric_view.erl                | 19 +++++++++++--
 3 files changed, 68 insertions(+), 6 deletions(-)

diff --git a/src/couch_mrview/src/couch_mrview_updater.erl b/src/couch_mrview/src/couch_mrview_updater.erl
index 214f487..bfaf136 100644
--- a/src/couch_mrview/src/couch_mrview_updater.erl
+++ b/src/couch_mrview/src/couch_mrview_updater.erl
@@ -311,9 +311,11 @@ write_kvs(State, UpdateSeq, ViewKVs, DocIdKeys, Seqs, Log0) ->
     #mrst{
         id_btree=IdBtree,
         log_btree=LogBtree,
-        first_build=FirstBuild
+        first_build=FirstBuild,
+        design_opts=DesignOpts
     } = State,
 
+    Partitioned = couch_util:get_value(<<"partitioned">>, DesignOpts, false),
     Revs = dict:from_list(dict:fetch_keys(Log0)),
 
     Log = dict:fold(fun({Id, _Rev}, DIKeys, Acc) ->
@@ -328,8 +330,9 @@ write_kvs(State, UpdateSeq, ViewKVs, DocIdKeys, Seqs, Log0) ->
         _ -> update_log(LogBtree, Log, Revs, Seqs, FirstBuild)
     end,
 
-    UpdateView = fun(#mrview{id_num=ViewId}=View, {ViewId, {KVs, SKVs}}) ->
+    UpdateView = fun(#mrview{id_num=ViewId}=View, {ViewId, {KVs0, SKVs}}) ->
         #mrview{seq_indexed=SIndexed, keyseq_indexed=KSIndexed} = View,
+        KVs = if Partitioned -> inject_partition(KVs0); true -> KVs0 end,
         ToRem = couch_util:dict_find(ViewId, ToRemByView, []),
         {ok, VBtree2} = couch_btree:add_remove(View#mrview.btree, KVs, ToRem),
         NewUpdateSeq = case VBtree2 =/= View#mrview.btree of
@@ -378,6 +381,13 @@ write_kvs(State, UpdateSeq, ViewKVs, DocIdKeys, Seqs, Log0) ->
         log_btree=LogBtree2
     }.
 
+inject_partition(KVs) ->
+    [{{[partition(DocId), Key], DocId}, Value} || {{Key, DocId}, Value} <- KVs].
+
+partition(DocId) ->
+    [Partition, _Rest] = binary:split(DocId, <<":">>),
+    Partition.
+
 update_id_btree(Btree, DocIdKeys, true) ->
     ToAdd = [{Id, DIKeys} || {Id, DIKeys} <- DocIdKeys, DIKeys /= []],
     couch_btree:query_modify(Btree, [], ToAdd, []);
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
index 592bfb5..e4f06ff 100644
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ b/src/couch_mrview/src/couch_mrview_util.erl
@@ -38,6 +38,9 @@
 -define(MOD, couch_mrview_index).
 -define(GET_VIEW_RETRY_COUNT, 1).
 -define(GET_VIEW_RETRY_DELAY, 50).
+-define(LOWEST_KEY, null).
+-define(HIGHEST_KEY, {[{<<239, 191, 176>>, null}]}). % is {"\ufff0": null}
+
 
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("couch_mrview/include/couch_mrview.hrl").
@@ -588,7 +591,12 @@ validate_args(Args) ->
             mrverror(<<"`partition` parameter is not supported in this view.">>)
     end,
 
-    Args#mrargs{
+    Args1 = case get_extra(Args, partitioned, false) of
+        true  -> apply_partition(Args);
+        false -> Args
+    end,
+
+    Args1#mrargs{
         start_key_docid=SKDocId,
         end_key_docid=EKDocId,
         group_level=GroupLevel
@@ -606,6 +614,37 @@ determine_group_level(#mrargs{group=true, group_level=undefined}) ->
 determine_group_level(#mrargs{group_level=GroupLevel}) ->
     GroupLevel.
 
+apply_partition(#mrargs{} = Args0) ->
+    case get_extra(Args0, partition_applied, false) of
+        true ->
+            Args0;
+        false ->
+            Partition = get_extra(Args0, partition),
+            Args1 = apply_partition(Partition, Args0),
+            set_extra(Args1, partition_applied, true)
+    end.
+
+apply_partition(Partition, #mrargs{direction=fwd, start_key=undefined, end_key=undefined} = Args) ->
+    Args#mrargs{start_key=[Partition, ?LOWEST_KEY], end_key=[Partition, ?HIGHEST_KEY]};
+
+apply_partition(Partition, #mrargs{direction=rev, start_key=undefined, end_key=undefined} = Args) ->
+    Args#mrargs{start_key=[Partition, ?HIGHEST_KEY], end_key=[Partition, ?LOWEST_KEY]};
+
+apply_partition(Partition, #mrargs{direction=fwd, start_key=SK0, end_key=undefined} = Args) ->
+    Args#mrargs{start_key=[Partition, SK0], end_key=[Partition, ?HIGHEST_KEY]};
+
+apply_partition(Partition, #mrargs{direction=rev, start_key=SK0, end_key=undefined} = Args) ->
+    Args#mrargs{start_key=[Partition, SK0], end_key=[Partition, ?LOWEST_KEY]};
+
+apply_partition(Partition, #mrargs{direction=fwd, start_key=undefined, end_key=EK0} = Args) ->
+    Args#mrargs{start_key=[Partition, ?LOWEST_KEY], end_key=[Partition, EK0]};
+
+apply_partition(Partition, #mrargs{direction=rev, start_key=undefined, end_key=EK0} = Args) ->
+    Args#mrargs{start_key=[Partition, ?HIGHEST_KEY], end_key=[Partition, EK0]};
+
+apply_partition(Partition, #mrargs{start_key=SK0, end_key=EK0} = Args) ->
+    Args#mrargs{start_key=[Partition, SK0], end_key=[Partition, EK0]}.
+
 
 check_range(#mrargs{start_key=undefined}, _Cmp) ->
     ok;
diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl
index eae4cd6..994c739 100644
--- a/src/fabric/src/fabric_view.erl
+++ b/src/fabric/src/fabric_view.erl
@@ -119,8 +119,10 @@ maybe_send_row(State) ->
         counters = Counters,
         skip = Skip,
         limit = Limit,
-        user_acc = AccIn
+        user_acc = AccIn,
+        query_args = QueryArgs
     } = State,
+    Partitioned = couch_mrview_util:get_extra(QueryArgs, partitioned, false),
     case fabric_dict:any(0, Counters) of
     true ->
         {ok, State};
@@ -128,8 +130,14 @@ maybe_send_row(State) ->
         try get_next_row(State) of
         {_, NewState} when Skip > 0 ->
             maybe_send_row(NewState#collector{skip=Skip-1});
-        {Row, NewState} ->
-            case Callback(transform_row(possibly_embed_doc(NewState,Row)), AccIn) of
+        {Row0, NewState} ->
+            Row1 = possibly_embed_doc(NewState, Row0),
+            Row2 = if
+                Partitioned -> detach_partition(Row1);
+                true -> Row1
+            end,
+            Row3 = transform_row(Row2),
+            case Callback(Row3, AccIn) of
             {stop, Acc} ->
                 {stop, NewState#collector{user_acc=Acc, limit=Limit-1}};
             {ok, Acc} ->
@@ -194,6 +202,11 @@ possibly_embed_doc(#collector{db_name=DbName, query_args=Args},
         _ -> Row
     end.
 
+detach_partition(#view_row{key=[_Partition, Key]} = Row) ->
+    Row#view_row{key = Key};
+detach_partition(#view_row{key=null} = Row) ->
+    Row#view_row{key = null}.
+
 
 keydict(undefined) ->
     undefined;


[couchdb] 03/03: WIP crappy _all_docs hack

Posted by rn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch user-partitioned-dbs-6
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit c88d71e7751177f443807bac7fbe7d61e6305757
Author: Robert Newson <rn...@apache.org>
AuthorDate: Thu Aug 9 13:43:17 2018 +0100

    WIP crappy _all_docs hack
---
 src/chttpd/src/chttpd_db.erl               |   9 ++-
 src/couch_mrview/src/couch_mrview.erl      |   7 +-
 src/couch_mrview/src/couch_mrview_util.erl | 104 ++++++++++++++++++++++++++---
 src/fabric/src/fabric.erl                  |   7 +-
 src/fabric/src/fabric_view.erl             |   7 +-
 src/fabric/src/fabric_view_all_docs.erl    |   2 +-
 src/mem3/src/mem3.erl                      |   5 +-
 7 files changed, 118 insertions(+), 23 deletions(-)

diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index 57d85e1..83d0489 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -684,12 +684,15 @@ multi_all_docs_view(Req, Db, OP, Queries) ->
 all_docs_view(Req, Db, Keys, OP) ->
     Args0 = couch_mrview_http:parse_params(Req, Keys),
     Args1 = Args0#mrargs{view_type=map},
-    Args2 = couch_mrview_util:validate_args(Args1),
-    Args3 = set_namespace(OP, Args2),
+    DbPartitioned = mem3:is_partitioned(couch_db:name(Db)),
+    Args2 = couch_mrview_util:set_extra(Args1, partitioned, DbPartitioned),
+    Args3 = couch_mrview_util:set_extra(Args2, style, all_docs),
+    Args4 = couch_mrview_util:validate_args(Args3),
+    Args5 = set_namespace(OP, Args4),
     Options = [{user_ctx, Req#httpd.user_ctx}],
     Max = chttpd:chunked_response_buffer_size(),
     VAcc = #vacc{db=Db, req=Req, threshold=Max},
-    {ok, Resp} = fabric:all_docs(Db, Options, fun couch_mrview_http:view_cb/2, VAcc, Args3),
+    {ok, Resp} = fabric:all_docs(Db, Options, fun couch_mrview_http:view_cb/2, VAcc, Args5),
     {ok, Resp#vacc.resp}.
 
 db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl
index db467f0..e1db906 100644
--- a/src/couch_mrview/src/couch_mrview.erl
+++ b/src/couch_mrview/src/couch_mrview.erl
@@ -228,12 +228,13 @@ query_all_docs(Db, Args0, Callback, Acc) ->
         couch_index_util:hexsig(couch_hash:md5_hash(term_to_binary(Info)))
     end),
     Args1 = Args0#mrargs{view_type=map},
-    Args2 = couch_mrview_util:validate_args(Args1),
-    {ok, Acc1} = case Args2#mrargs.preflight_fun of
+    Args2 = couch_mrview_util:set_extra(Args1, style, all_docs),
+    Args3 = couch_mrview_util:validate_args(Args2),
+    {ok, Acc1} = case Args3#mrargs.preflight_fun of
         PFFun when is_function(PFFun, 2) -> PFFun(Sig, Acc);
         _ -> {ok, Acc}
     end,
-    all_docs_fold(Db, Args2, Callback, Acc1).
+    all_docs_fold(Db, Args3, Callback, Acc1).
 
 
 query_view(Db, DDoc, VName) ->
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
index e4f06ff..2505148 100644
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ b/src/couch_mrview/src/couch_mrview_util.erl
@@ -580,20 +580,38 @@ validate_args(Args) ->
         _ -> mrverror(<<"Invalid value for `sorted`.">>)
     end,
 
-    case {get_extra(Args, partitioned, false), get_extra(Args, partition)} of
-        {true, undefined} ->
+    Style = get_extra(Args, style, normal),
+    Partitioned = get_extra(Args, partitioned, false),
+    Partition = get_extra(Args, partition),
+
+    case {Style, Partitioned, Partition} of
+        {all_docs, true, _} ->
+            ok; % _all_docs can be called with or without partition parameter.
+        {all_docs, false, undefined} ->
+            ok;
+        {all_docs, false, _Partition} ->
+            mrverror(<<"`partition` parameter is not supported in this db.">>);
+        {normal, true, undefined} ->
             mrverror(<<"`partition` parameter is mandatory for queries to this view.">>);
-        {true, _Partition} ->
+        {normal, true, _Partition} ->
             ok;
-        {false, undefined} ->
+        {normal, false, undefined} ->
             ok;
-        {false, _Partition} ->
+        {normal, false, _Partition} ->
             mrverror(<<"`partition` parameter is not supported in this view.">>)
     end,
 
-    Args1 = case get_extra(Args, partitioned, false) of
-        true  -> apply_partition(Args);
-        false -> Args
+    Args1 = case {Style, Partitioned, Partition} of
+        {all_docs, true, undefined} ->
+            Args;
+        {all_docs, true, Partition} ->
+            apply_partition(Args, all_docs);
+        {all_docs, false, _} ->
+            Args;
+        {normal, true, _} ->
+            apply_partition(Args, normal);
+        {normal, false, _} ->
+            Args
     end,
 
     Args1#mrargs{
@@ -614,15 +632,18 @@ determine_group_level(#mrargs{group=true, group_level=undefined}) ->
 determine_group_level(#mrargs{group_level=GroupLevel}) ->
     GroupLevel.
 
-apply_partition(#mrargs{} = Args0) ->
+apply_partition(#mrargs{} = Args0, Style) ->
     case get_extra(Args0, partition_applied, false) of
         true ->
             Args0;
         false ->
             Partition = get_extra(Args0, partition),
-            Args1 = apply_partition(Partition, Args0),
+            Args1 = case Style of
+                normal   -> apply_partition(Partition, Args0);
+                all_docs -> apply_all_docs_partition(Partition, Args0)
+            end,
             set_extra(Args1, partition_applied, true)
-    end.
+    end;
 
 apply_partition(Partition, #mrargs{direction=fwd, start_key=undefined, end_key=undefined} = Args) ->
     Args#mrargs{start_key=[Partition, ?LOWEST_KEY], end_key=[Partition, ?HIGHEST_KEY]};
@@ -646,6 +667,67 @@ apply_partition(Partition, #mrargs{start_key=SK0, end_key=EK0} = Args) ->
     Args#mrargs{start_key=[Partition, SK0], end_key=[Partition, EK0]}.
 
 
+%% all_docs is special as it's not really a view and is already
+%% effectively partitioned as the partition is a prefix of key for the
+%% all_docs b+tree.
+apply_all_docs_partition(Partition, #mrargs{direction=fwd, start_key=undefined, end_key=undefined} = Args) ->
+    Args#mrargs{start_key = <<Partition/binary, $:>>, end_key = <<Partition/binary, $;>>};
+
+apply_all_docs_partition(Partition, #mrargs{direction=rev, start_key=undefined, end_key=undefined} = Args) ->
+    Args#mrargs{start_key = <<Partition/binary, $;>>, end_key = <<Partition/binary, $:>>};
+
+apply_all_docs_partition(Partition, #mrargs{direction=fwd, start_key=SK0, end_key=undefined} = Args) ->
+    case prefix(SK0, <<Partition/binary, $:>>) of
+        true ->
+            Args#mrargs{start_key = SK0, end_key = <<Partition/binary, $;>>};
+        false ->
+            mrverror(<<"`start_key` must be prefixed with selected partition.">>)
+    end;
+
+apply_all_docs_partition(Partition, #mrargs{direction=rev, start_key=SK0, end_key=undefined} = Args) ->
+    case prefix(SK0, <<Partition/binary, $:>>) of
+        true ->
+            Args#mrargs{start_key = SK0, end_key = <<Partition/binary, $:>>};
+        false ->
+            mrverror(<<"`start_key` must be prefixed with selected partition.">>)
+    end;
+
+apply_all_docs_partition(Partition, #mrargs{direction=fwd, start_key=undefined, end_key=EK0} = Args) ->
+    case prefix(EK0, <<Partition/binary, $:>>) of
+        true ->
+            Args#mrargs{start_key = <<Partition/binary, $:>>, end_key = EK0};
+        false ->
+            mrverror(<<"`end_key` must be prefixed with selected partition.">>)
+    end;
+
+apply_all_docs_partition(Partition, #mrargs{direction=rev, start_key=undefined, end_key=EK0} = Args) ->
+    case prefix(EK0, <<Partition/binary, $:>>) of
+        true ->
+            Args#mrargs{start_key = EK0, end_key = <<Partition/binary, $:>>};
+        false ->
+            mrverror(<<"`end_key` must be prefixed with selected partition.">>)
+    end;
+
+apply_all_docs_partition(Partition, #mrargs{start_key=SK0, end_key=EK0} = Args) ->
+    case {prefix(SK0, <<Partition/binary, $:>>), prefix(EK0, <<Partition/binary, $:>>)} of
+        {false, false} ->
+            mrverror(<<"`start_key` and `end_key` must be prefixed with selected partition.">>);
+        {false, true} ->
+            mrverror(<<"`start_key` must be prefixed with selected partition.">>);
+        {true, false} ->
+            mrverror(<<"`end_key` must be prefixed with selected partition.">>);
+        {true, true} ->
+            Args
+    end.
+
+prefix(Subject, Prefix) ->
+    case binary:match(Subject, Prefix) of
+        {0, _} ->
+            true;
+        _ ->
+            false
+    end.
+
 check_range(#mrargs{start_key=undefined}, _Cmp) ->
     ok;
 check_range(#mrargs{end_key=undefined}, _Cmp) ->
diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl
index 98f9280..359c4af 100644
--- a/src/fabric/src/fabric.erl
+++ b/src/fabric/src/fabric.erl
@@ -294,9 +294,12 @@ all_docs(DbName, Callback, Acc, QueryArgs) ->
         #mrargs{} | [option()]) ->
     {ok, any()} | {error, Reason :: term()}.
 
-all_docs(DbName, Options, Callback, Acc0, #mrargs{} = QueryArgs) when
+
+all_docs(DbName, Options, Callback, Acc0, #mrargs{} = QueryArgs0) when
         is_function(Callback, 2) ->
-    fabric_view_all_docs:go(dbname(DbName), opts(Options), QueryArgs, Callback, Acc0);
+    DbPartitioned = mem3:is_partitioned(dbname(DbName)),
+    QueryArgs1 = couch_mrview_util:set_extra(QueryArgs0, partitioned, DbPartitioned),
+    fabric_view_all_docs:go(dbname(DbName), opts(Options), QueryArgs1, Callback, Acc0);
 
 %% @doc convenience function that takes a keylist rather than a record
 %% @equiv all_docs(DbName, Callback, Acc0, kl_to_query_args(QueryArgs))
diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl
index 994c739..c38c209 100644
--- a/src/fabric/src/fabric_view.erl
+++ b/src/fabric/src/fabric_view.erl
@@ -122,7 +122,7 @@ maybe_send_row(State) ->
         user_acc = AccIn,
         query_args = QueryArgs
     } = State,
-    Partitioned = couch_mrview_util:get_extra(QueryArgs, partitioned, false),
+    Partitioned = couch_mrview_util:get_extra(QueryArgs, partitioned),
     case fabric_dict:any(0, Counters) of
     true ->
         {ok, State};
@@ -205,7 +205,10 @@ possibly_embed_doc(#collector{db_name=DbName, query_args=Args},
 detach_partition(#view_row{key=[_Partition, Key]} = Row) ->
     Row#view_row{key = Key};
 detach_partition(#view_row{key=null} = Row) ->
-    Row#view_row{key = null}.
+    Row#view_row{key = null};
+detach_partition(#view_row{} = Row) ->
+    Row.
+
 
 
 keydict(undefined) ->
diff --git a/src/fabric/src/fabric_view_all_docs.erl b/src/fabric/src/fabric_view_all_docs.erl
index ac16dac..d515ab8 100644
--- a/src/fabric/src/fabric_view_all_docs.erl
+++ b/src/fabric/src/fabric_view_all_docs.erl
@@ -118,7 +118,7 @@ go(DbName, _Options, Workers, QueryArgs, Callback, Acc0) ->
     #mrargs{limit = Limit, skip = Skip, update_seq = UpdateSeq} = QueryArgs,
     State = #collector{
         db_name = DbName,
-        query_args = QueryArgs,
+        query_args = couch_mrview_util:set_extra(QueryArgs, style, all_docs),
         callback = Callback,
         counters = fabric_dict:init(Workers, 0),
         skip = Skip,
diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
index aecca2f..f113357 100644
--- a/src/mem3/src/mem3.erl
+++ b/src/mem3/src/mem3.erl
@@ -352,7 +352,10 @@ is_partitioned(DbName0) when is_binary(DbName0) ->
             is_partitioned(mem3:shards(DbName))
     end;
 
-is_partitioned(Shards) when is_list(Shards) ->
+is_partitioned([#shard{} | _] = Shards) ->
+    lists:all(fun is_partitioned/1, Shards);
+
+is_partitioned([#ordered_shard{} | _] = Shards) ->
     lists:all(fun is_partitioned/1, Shards);
 
 is_partitioned(#shard{opts=Opts}) ->