You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by ja...@apache.org on 2017/10/07 15:28:34 UTC

[couchdb] branch 749-fix-couch_peruser-app-structure updated (2c12ecd -> 6470ee3)

This is an automated email from the ASF dual-hosted git repository.

jan pushed a change to branch 749-fix-couch_peruser-app-structure
in repository https://gitbox.apache.org/repos/asf/couchdb.git.


    from 2c12ecd  feat: mango test runner: do not rely on timeout for CouchDB start alone
     new 777af02  Whitelist system DB names as valid _dbs docids
     new 66915a5  Correct result count in Mango execution stats (#867)
     new c548da4  Handle deprecated random module
     new 2eb61df  Bump khash, b64, ioq deps
     new 6470ee3  Ensure a user creation is handlined on one node only

The 5 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 rebar.config.script                                |   6 +-
 src/chttpd/src/chttpd_db.erl                       |   8 +-
 src/couch/rebar.config.script                      |  11 +-
 src/couch/src/couch_debug.erl                      |   2 +-
 src/couch/src/couch_doc.erl                        |  96 +++++++------
 src/couch/src/couch_httpd_db.erl                   |  36 +++--
 src/couch/src/couch_multidb_changes.erl            |   2 +-
 .../src/couch_rand.erl}                            |  53 ++++---
 src/couch/src/couch_util.erl                       |   3 +-
 src/couch/test/couch_btree_tests.erl               |   8 +-
 src/couch/test/couch_doc_json_tests.erl            |  71 +++++++++
 src/couch/test/couch_doc_tests.erl                 |  16 ++-
 src/couch/test/couch_file_tests.erl                |   4 +-
 src/couch_log/test/couch_log_test.erl              |   3 +-
 src/couch_peruser/src/couch_peruser.app.src        |   2 +-
 src/couch_peruser/src/couch_peruser.erl            | 158 ++++++++++++++-------
 .../src/couch_replicator_doc_processor.erl         |  14 +-
 src/couch_replicator/src/couch_replicator_docs.erl |   2 +-
 .../src/couch_replicator_scheduler_job.erl         |   3 +-
 src/ddoc_cache/test/ddoc_cache_lru_test.erl        |   2 +-
 src/mango/src/mango_cursor_view.erl                |  10 +-
 src/mango/test/15-execution-stats-test.py          |   4 +
 22 files changed, 347 insertions(+), 167 deletions(-)
 copy src/{ddoc_cache/src/ddoc_cache_entry_validation_funs.erl => couch/src/couch_rand.erl} (51%)

-- 
To stop receiving notification emails like this one, please contact
['"commits@couchdb.apache.org" <co...@couchdb.apache.org>'].

[couchdb] 01/05: Whitelist system DB names as valid _dbs docids

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jan pushed a commit to branch 749-fix-couch_peruser-app-structure
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 777af02a9304028a6f29230c01e58a40911dc34c
Author: Joan Touzet <jo...@atypical.net>
AuthorDate: Mon Oct 2 20:50:30 2017 -0400

    Whitelist system DB names as valid _dbs docids
    
    Currently, it is impossible to PUT/POST modified shard maps to any
    `_dbs/_*` document because the document _ids are reserved. This change
    permits these specific db/docid combinations as valid, so PUT/POST
    operations can succeed. The specific list comes from SYSTEM_DATABASES.
    
    Unit tests have been added.
---
 src/chttpd/src/chttpd_db.erl            |  8 +--
 src/couch/src/couch_doc.erl             | 96 +++++++++++++++++++--------------
 src/couch/src/couch_httpd_db.erl        | 36 ++++++++-----
 src/couch/test/couch_doc_json_tests.erl | 71 ++++++++++++++++++++++++
 src/couch/test/couch_doc_tests.erl      | 16 ++++--
 5 files changed, 165 insertions(+), 62 deletions(-)

diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index c8826d5..7e46750 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -711,7 +711,7 @@ db_doc_req(#httpd{method='GET', mochi_req=MochiReq}=Req, Db, DocId) ->
 
 db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
     couch_httpd:validate_referer(Req),
-    couch_doc:validate_docid(DocId),
+    couch_doc:validate_docid(DocId, couch_db:name(Db)),
     chttpd:validate_ctype(Req, "multipart/form-data"),
 
     W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
@@ -766,9 +766,9 @@ db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
     #doc_query_args{
         update_type = UpdateType
     } = parse_doc_query(Req),
-    couch_doc:validate_docid(DocId),
-
     DbName = couch_db:name(Db),
+    couch_doc:validate_docid(DocId, DbName),
+
     W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
     Options = [{user_ctx,Ctx}, {w,W}],
 
@@ -1243,7 +1243,7 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa
                 % check for the existence of the doc to handle the 404 case.
                 couch_doc_open(Db, DocId, nil, [])
             end,
-            couch_doc:validate_docid(DocId),
+            couch_doc:validate_docid(DocId, couch_db:name(Db)),
             #doc{id=DocId};
         Rev ->
             case fabric:open_revs(Db, DocId, [Rev], [{user_ctx,Ctx}]) of
diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl
index eb96d44..f960ec5 100644
--- a/src/couch/src/couch_doc.erl
+++ b/src/couch/src/couch_doc.erl
@@ -13,8 +13,10 @@
 -module(couch_doc).
 
 -export([to_doc_info/1,to_doc_info_path/1,parse_rev/1,parse_revs/1,rev_to_str/1,revs_to_strs/1]).
--export([from_json_obj/1, from_json_obj_validate/1, to_json_obj/2,has_stubs/1, merge_stubs/2]).
--export([validate_docid/1, get_validate_doc_fun/1]).
+-export([from_json_obj/1, from_json_obj_validate/1]).
+-export([from_json_obj/2, from_json_obj_validate/2]).
+-export([to_json_obj/2, has_stubs/1, merge_stubs/2]).
+-export([validate_docid/1, validate_docid/2, get_validate_doc_fun/1]).
 -export([doc_from_multi_part_stream/2, doc_from_multi_part_stream/3]).
 -export([doc_from_multi_part_stream/4]).
 -export([doc_to_multi_part_stream/5, len_doc_to_multi_part_stream/4]).
@@ -126,8 +128,11 @@ doc_to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds},
     }.
 
 from_json_obj_validate(EJson) ->
+    from_json_obj_validate(EJson, undefined).
+
+from_json_obj_validate(EJson, DbName) ->
     MaxSize = config:get_integer("couchdb", "max_document_size", 4294967296),
-    Doc = from_json_obj(EJson),
+    Doc = from_json_obj(EJson, DbName),
     case couch_ejson_size:encoded_size(Doc#doc.body) =< MaxSize of
         true ->
              validate_attachment_sizes(Doc#doc.atts),
@@ -149,9 +154,11 @@ validate_attachment_sizes(Atts) ->
 
 
 from_json_obj({Props}) ->
-    transfer_fields(Props, #doc{body=[]});
+    from_json_obj({Props}, undefined).
 
-from_json_obj(_Other) ->
+from_json_obj({Props}, DbName) ->
+    transfer_fields(Props, #doc{body=[]}, DbName);
+from_json_obj(_Other, _) ->
     throw({bad_request, "Document must be a JSON object"}).
 
 parse_revid(RevId) when size(RevId) =:= 32 ->
@@ -191,6 +198,15 @@ parse_revs(_) ->
     throw({bad_request, "Invalid list of revisions"}).
 
 
+validate_docid(DocId, DbName) ->
+    case DbName =:= ?l2b(config:get("mem3", "shards_db", "_dbs")) andalso
+        lists:member(DocId, ?SYSTEM_DATABASES) of
+        true ->
+            ok;
+        false ->
+            validate_docid(DocId)
+    end.
+
 validate_docid(<<"">>) ->
     throw({illegal_docid, <<"Document id must not be empty">>});
 validate_docid(<<"_design/">>) ->
@@ -228,28 +244,28 @@ validate_docid(Id) ->
     couch_log:debug("Document id is not a string: ~p", [Id]),
     throw({illegal_docid, <<"Document id must be a string">>}).
 
-transfer_fields([], #doc{body=Fields}=Doc) ->
+transfer_fields([], #doc{body=Fields}=Doc, _) ->
     % convert fields back to json object
     Doc#doc{body={lists:reverse(Fields)}};
 
-transfer_fields([{<<"_id">>, Id} | Rest], Doc) ->
-    validate_docid(Id),
-    transfer_fields(Rest, Doc#doc{id=Id});
+transfer_fields([{<<"_id">>, Id} | Rest], Doc, DbName) ->
+    validate_docid(Id, DbName),
+    transfer_fields(Rest, Doc#doc{id=Id}, DbName);
 
-transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc) ->
+transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc, DbName) ->
     {Pos, RevId} = parse_rev(Rev),
     transfer_fields(Rest,
-            Doc#doc{revs={Pos, [RevId]}});
+            Doc#doc{revs={Pos, [RevId]}}, DbName);
 
-transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc) ->
+transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc, DbName) ->
     % we already got the rev from the _revisions
-    transfer_fields(Rest,Doc);
+    transfer_fields(Rest, Doc, DbName);
 
-transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) ->
+transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc, DbName) ->
     Atts = [couch_att:from_json(Name, Props) || {Name, {Props}} <- JsonBins],
-    transfer_fields(Rest, Doc#doc{atts=Atts});
+    transfer_fields(Rest, Doc#doc{atts=Atts}, DbName);
 
-transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) ->
+transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc, DbName) ->
     RevIds = couch_util:get_value(<<"ids">>, Props),
     Start = couch_util:get_value(<<"start">>, Props),
     if not is_integer(Start) ->
@@ -262,45 +278,45 @@ transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) ->
     [throw({doc_validation, "RevId isn't a string"}) ||
             RevId <- RevIds, not is_binary(RevId)],
     RevIds2 = [parse_revid(RevId) || RevId <- RevIds],
-    transfer_fields(Rest, Doc#doc{revs={Start, RevIds2}});
+    transfer_fields(Rest, Doc#doc{revs={Start, RevIds2}}, DbName);
 
-transfer_fields([{<<"_deleted">>, B} | Rest], Doc) when is_boolean(B) ->
-    transfer_fields(Rest, Doc#doc{deleted=B});
+transfer_fields([{<<"_deleted">>, B} | Rest], Doc, DbName) when is_boolean(B) ->
+    transfer_fields(Rest, Doc#doc{deleted=B}, DbName);
 
 % ignored fields
-transfer_fields([{<<"_revs_info">>, _} | Rest], Doc) ->
-    transfer_fields(Rest, Doc);
-transfer_fields([{<<"_local_seq">>, _} | Rest], Doc) ->
-    transfer_fields(Rest, Doc);
-transfer_fields([{<<"_conflicts">>, _} | Rest], Doc) ->
-    transfer_fields(Rest, Doc);
-transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc) ->
-    transfer_fields(Rest, Doc);
+transfer_fields([{<<"_revs_info">>, _} | Rest], Doc, DbName) ->
+    transfer_fields(Rest, Doc, DbName);
+transfer_fields([{<<"_local_seq">>, _} | Rest], Doc, DbName) ->
+    transfer_fields(Rest, Doc, DbName);
+transfer_fields([{<<"_conflicts">>, _} | Rest], Doc, DbName) ->
+    transfer_fields(Rest, Doc, DbName);
+transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc, DbName) ->
+    transfer_fields(Rest, Doc, DbName);
 
 % special fields for replication documents
 transfer_fields([{<<"_replication_state">>, _} = Field | Rest],
-    #doc{body=Fields} = Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+    #doc{body=Fields} = Doc, DbName) ->
+    transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
 transfer_fields([{<<"_replication_state_time">>, _} = Field | Rest],
-    #doc{body=Fields} = Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+    #doc{body=Fields} = Doc, DbName) ->
+    transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
 transfer_fields([{<<"_replication_state_reason">>, _} = Field | Rest],
-    #doc{body=Fields} = Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+    #doc{body=Fields} = Doc, DbName) ->
+    transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
 transfer_fields([{<<"_replication_id">>, _} = Field | Rest],
-    #doc{body=Fields} = Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+    #doc{body=Fields} = Doc, DbName) ->
+    transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
 transfer_fields([{<<"_replication_stats">>, _} = Field | Rest],
-    #doc{body=Fields} = Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+    #doc{body=Fields} = Doc, DbName) ->
+    transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
 
 % unknown special field
-transfer_fields([{<<"_",Name/binary>>, _} | _], _) ->
+transfer_fields([{<<"_",Name/binary>>, _} | _], _, _) ->
     throw({doc_validation,
             ?l2b(io_lib:format("Bad special document member: _~s", [Name]))});
 
-transfer_fields([Field | Rest], #doc{body=Fields}=Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]}).
+transfer_fields([Field | Rest], #doc{body=Fields}=Doc, DbName) ->
+    transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName).
 
 to_doc_info(FullDocInfo) ->
     {DocInfo, _Path} = to_doc_info_path(FullDocInfo),
diff --git a/src/couch/src/couch_httpd_db.erl b/src/couch/src/couch_httpd_db.erl
index 34a1539..05e63ba 100644
--- a/src/couch/src/couch_httpd_db.erl
+++ b/src/couch/src/couch_httpd_db.erl
@@ -257,7 +257,8 @@ db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) ->
 
 db_req(#httpd{method='POST',path_parts=[_DbName]}=Req, Db) ->
     couch_httpd:validate_ctype(Req, "application/json"),
-    Doc = couch_doc:from_json_obj_validate(couch_httpd:json_body(Req)),
+    DbName = couch_db:name(Db),
+    Doc = couch_doc:from_json_obj_validate(couch_httpd:json_body(Req), DbName),
     validate_attachment_names(Doc),
     Doc2 = case Doc#doc.id of
         <<"">> ->
@@ -303,6 +304,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
     couch_stats:increment_counter([couchdb, httpd, bulk_requests]),
     couch_httpd:validate_ctype(Req, "application/json"),
     {JsonProps} = couch_httpd:json_body_obj(Req),
+    DbName = couch_db:name(Db),
     case couch_util:get_value(<<"docs">>, JsonProps) of
     undefined ->
         send_error(Req, 400, <<"bad_request">>, <<"Missing JSON list of 'docs'">>);
@@ -320,7 +322,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
         true ->
             Docs = lists:map(
                 fun({ObjProps} = JsonObj) ->
-                    Doc = couch_doc:from_json_obj_validate(JsonObj),
+                    Doc = couch_doc:from_json_obj_validate(JsonObj, DbName),
                     validate_attachment_names(Doc),
                     Id = case Doc#doc.id of
                         <<>> -> couch_uuids:new();
@@ -354,7 +356,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
             end;
         false ->
             Docs = lists:map(fun(JsonObj) ->
-                    Doc = couch_doc:from_json_obj_validate(JsonObj),
+                    Doc = couch_doc:from_json_obj_validate(JsonObj, DbName),
                     validate_attachment_names(Doc),
                     Doc
                 end, DocsArray),
@@ -486,14 +488,17 @@ db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
 db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
     % check for the existence of the doc to handle the 404 case.
     couch_doc_open(Db, DocId, nil, []),
+    DbName = couch_db:name(Db),
     case couch_httpd:qs_value(Req, "rev") of
     undefined ->
         update_doc(Req, Db, DocId,
-                couch_doc_from_req(Req, DocId, {[{<<"_deleted">>,true}]}));
+                couch_doc_from_req(Req, DocId, {[{<<"_deleted">>,true}]},
+                    DbName));
     Rev ->
         update_doc(Req, Db, DocId,
                 couch_doc_from_req(Req, DocId,
-                    {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]}))
+                    {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]},
+                    DbName))
     end;
 
 db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
@@ -546,7 +551,8 @@ db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
 
 db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
     couch_httpd:validate_referer(Req),
-    couch_doc:validate_docid(DocId),
+    DbName = couch_db:name(Db),
+    couch_doc:validate_docid(DocId, DbName),
     couch_httpd:validate_ctype(Req, "multipart/form-data"),
     Form = couch_httpd:parse_form(Req),
     case couch_util:get_value("_doc", Form) of
@@ -554,7 +560,7 @@ db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
         Rev = couch_doc:parse_rev(couch_util:get_value("_rev", Form)),
         {ok, [{ok, Doc}]} = couch_db:open_doc_revs(Db, DocId, [Rev], []);
     Json ->
-        Doc = couch_doc_from_req(Req, DocId, ?JSON_DECODE(Json))
+        Doc = couch_doc_from_req(Req, DocId, ?JSON_DECODE(Json), DbName)
     end,
     UpdatedAtts = [
         couch_att:new([
@@ -580,14 +586,15 @@ db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
     update_doc(Req, Db, DocId, NewDoc);
 
 db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) ->
-    couch_doc:validate_docid(DocId),
+    DbName = couch_db:name(Db),
+    couch_doc:validate_docid(DocId, DbName),
 
     case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
     ("multipart/related;" ++ _) = ContentType ->
         couch_httpd:check_max_request_length(Req),
         {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(
             ContentType, fun() -> receive_request_data(Req) end),
-        Doc = couch_doc_from_req(Req, DocId, Doc0),
+        Doc = couch_doc_from_req(Req, DocId, Doc0, DbName),
         try
             Result = update_doc(Req, Db, DocId, Doc),
             WaitFun(),
@@ -599,7 +606,7 @@ db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) ->
         end;
     _Else ->
         Body = couch_httpd:json_body(Req),
-        Doc = couch_doc_from_req(Req, DocId, Body),
+        Doc = couch_doc_from_req(Req, DocId, Body, DbName),
         update_doc(Req, Db, DocId, Doc)
     end;
 
@@ -783,7 +790,7 @@ update_doc(Req, Db, DocId, #doc{deleted=Deleted}=Doc, Headers, UpdateType) ->
                 {rev, NewRevStr}]})
     end.
 
-couch_doc_from_req(Req, DocId, #doc{revs=Revs}=Doc) ->
+couch_doc_from_req(Req, DocId, #doc{revs=Revs}=Doc, _) ->
     validate_attachment_names(Doc),
     Rev = case couch_httpd:qs_value(Req, "rev") of
     undefined ->
@@ -810,8 +817,9 @@ couch_doc_from_req(Req, DocId, #doc{revs=Revs}=Doc) ->
         end
     end,
     Doc#doc{id=DocId, revs=Revs2};
-couch_doc_from_req(Req, DocId, Json) ->
-    couch_doc_from_req(Req, DocId, couch_doc:from_json_obj_validate(Json)).
+couch_doc_from_req(Req, DocId, Json, DbName) ->
+    couch_doc_from_req(Req, DocId,
+        couch_doc:from_json_obj_validate(Json, DbName), DbName).
 
 % Useful for debugging
 % couch_doc_open(Db, DocId) ->
@@ -1019,7 +1027,7 @@ db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileN
                 % check for the existence of the doc to handle the 404 case.
                 couch_doc_open(Db, DocId, nil, [])
             end,
-            couch_doc:validate_docid(DocId),
+            couch_doc:validate_docid(DocId, couch_db:name(Db)),
             #doc{id=DocId};
         Rev ->
             case couch_db:open_doc_revs(Db, DocId, [Rev], []) of
diff --git a/src/couch/test/couch_doc_json_tests.erl b/src/couch/test/couch_doc_json_tests.erl
index ce099d1..bcff064 100644
--- a/src/couch/test/couch_doc_json_tests.erl
+++ b/src/couch/test/couch_doc_json_tests.erl
@@ -171,6 +171,45 @@ from_json_success_cases() ->
         end,
         Cases).
 
+from_json_with_db_name_success_cases() ->
+    Cases = [
+        {
+            {[]},
+            <<"_dbs">>,
+            #doc{},
+            "DbName _dbs is acceptable with no docid"
+        },
+        {
+            {[{<<"_id">>, <<"zing!">>}]},
+            <<"_dbs">>,
+            #doc{id = <<"zing!">>},
+            "DbName _dbs is acceptable with a normal docid"
+        },
+        {
+            {[{<<"_id">>, <<"_users">>}]},
+            <<"_dbs">>,
+            #doc{id = <<"_users">>},
+            "_dbs/_users is acceptable"
+        },
+        {
+            {[{<<"_id">>, <<"_replicator">>}]},
+            <<"_dbs">>,
+            #doc{id = <<"_replicator">>},
+            "_dbs/_replicator is acceptable"
+        },
+        {
+            {[{<<"_id">>, <<"_global_changes">>}]},
+            <<"_dbs">>,
+            #doc{id = <<"_global_changes">>},
+            "_dbs/_global_changes is acceptable"
+        }
+    ],
+    lists:map(
+        fun({EJson, DbName, Expect, Msg}) ->
+            {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson, DbName))}
+        end,
+        Cases).
+
 from_json_error_cases() ->
     Cases = [
         {
@@ -261,6 +300,38 @@ from_json_error_cases() ->
             end
     end, Cases).
 
+from_json_with_dbname_error_cases() ->
+    Cases = [
+        {
+            {[{<<"_id">>, <<"_random">>}]},
+            <<"_dbs">>,
+            {illegal_docid,
+             <<"Only reserved document ids may start with underscore.">>},
+            "Disallow non-system-DB underscore prefixed docids in _dbs database."
+        },
+        {
+            {[{<<"_id">>, <<"_random">>}]},
+            <<"foobar">>,
+            {illegal_docid,
+             <<"Only reserved document ids may start with underscore.">>},
+            "Disallow arbitrary underscore prefixed docids in regular database."
+        },
+        {
+            {[{<<"_id">>, <<"_users">>}]},
+            <<"foobar">>,
+            {illegal_docid,
+             <<"Only reserved document ids may start with underscore.">>},
+            "Disallow system-DB docid _users in regular database."
+        }
+    ],
+
+    lists:map(
+        fun({EJson, DbName, Expect, Msg}) ->
+            Error = (catch couch_doc:from_json_obj_validate(EJson, DbName)),
+            {Msg, ?_assertMatch(Expect, Error)}
+        end,
+        Cases).
+
 to_json_success_cases() ->
     Cases = [
         {
diff --git a/src/couch/test/couch_doc_tests.erl b/src/couch/test/couch_doc_tests.erl
index 5d0448a..cf41df6 100644
--- a/src/couch/test/couch_doc_tests.erl
+++ b/src/couch/test/couch_doc_tests.erl
@@ -29,7 +29,7 @@ doc_from_multi_part_stream_test() ->
     ContentType = "multipart/related;boundary=multipart_related_boundary~~~~~~~~~~~~~~~~~~~~",
     DataFun = fun() -> request(start) end,
 
-    mock_config_max_document_id_length(),
+    mock_config(),
     {ok, #doc{id = <<"doc0">>, atts = [_]}, _Fun, _Parser} =
         couch_doc:doc_from_multi_part_stream(ContentType, DataFun),
     meck:unload(config),
@@ -77,7 +77,7 @@ len_doc_to_multi_part_stream_test() ->
 validate_docid_test_() ->
     {setup,
         fun() ->
-            mock_config_max_document_id_length(),
+            mock_config(),
             ok = meck:new(couch_db_plugin, [passthrough]),
             meck:expect(couch_db_plugin, validate_docid, fun(_) -> false end)
         end,
@@ -90,6 +90,9 @@ validate_docid_test_() ->
             ?_assertEqual(ok, couch_doc:validate_docid(<<"_design/idx">>)),
             ?_assertEqual(ok, couch_doc:validate_docid(<<"_local/idx">>)),
             ?_assertEqual(ok, couch_doc:validate_docid(large_id(1024))),
+            ?_assertEqual(ok, couch_doc:validate_docid(<<"_users">>, <<"_dbs">>)),
+            ?_assertEqual(ok, couch_doc:validate_docid(<<"_replicator">>, <<"_dbs">>)),
+            ?_assertEqual(ok, couch_doc:validate_docid(<<"_global_changes">>, <<"_dbs">>)),
             ?_assertThrow({illegal_docid, _},
                 couch_doc:validate_docid(<<>>)),
             ?_assertThrow({illegal_docid, _},
@@ -103,7 +106,11 @@ validate_docid_test_() ->
             ?_assertThrow({illegal_docid, _},
                 couch_doc:validate_docid(<<"_local/">>)),
             ?_assertThrow({illegal_docid, _},
-                couch_doc:validate_docid(large_id(1025)))
+                couch_doc:validate_docid(large_id(1025))),
+            ?_assertThrow({illegal_docid, _},
+                couch_doc:validate_docid(<<"_users">>, <<"foo">>)),
+            ?_assertThrow({illegal_docid, _},
+                couch_doc:validate_docid(<<"_weeee">>, <<"_dbs">>))
         ]
     }.
 
@@ -127,11 +134,12 @@ collected() ->
     B = binary:replace(iolist_to_binary(get(data)), <<"\r\n">>, <<0>>, [global]),
     binary:split(B, [<<0>>], [global]).
 
-mock_config_max_document_id_length() ->
+mock_config() ->
     ok = meck:new(config, [passthrough]),
     meck:expect(config, get,
         fun("couchdb", "max_document_id_length", "infinity") -> "1024";
            ("couchdb", "max_attachment_size", "infinity") -> "infinity";
+           ("mem3", "shards_db", "_dbs") -> "_dbs";
             (Key, Val, Default) -> meck:passthrough([Key, Val, Default])
         end
     ).

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 04/05: Bump khash, b64, ioq deps

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jan pushed a commit to branch 749-fix-couch_peruser-app-structure
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 2eb61df2470725ce9dfcd8cf2eee750e158f6c34
Author: Nick Vatamaniuc <va...@apache.org>
AuthorDate: Thu Oct 5 13:34:53 2017 -0400

    Bump khash, b64, ioq deps
    
    To fix random compatibility issue
---
 rebar.config.script | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/rebar.config.script b/rebar.config.script
index 61c34fd..39f0157 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -47,12 +47,12 @@ SubDirs = [
 DepDescs = [
 %% Independent Apps
 {config,           "config",           {tag, "1.0.1"}},
-{b64url,           "b64url",           {tag, "1.0.0"}},
+{b64url,           "b64url",           {tag, "1.0.1"}},
 {ets_lru,          "ets-lru",          {tag, "1.0.0"}},
-{khash,            "khash",            {tag, "1.0.0"}},
+{khash,            "khash",            {tag, "1.0.1"}},
 {snappy,           "snappy",           {tag, "CouchDB-1.0.0"}},
 {setup,            "setup",            {tag, "1.0.1"}},
-{ioq,              "ioq",              {tag, "1.0.0"}},
+{ioq,              "ioq",              {tag, "1.0.1"}},
 
 %% Non-Erlang deps
 {docs,             {url, "https://github.com/apache/couchdb-documentation"},

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 03/05: Handle deprecated random module

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jan pushed a commit to branch 749-fix-couch_peruser-app-structure
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit c548da4cdc4d854bd6347f0d20892234eafd0bd7
Author: Nick Vatamaniuc <va...@apache.org>
AuthorDate: Tue Oct 3 02:14:22 2017 -0400

    Handle deprecated random module
    
    Use erlang release version to decide if the newer `rand` module is present or
    not.
    
    `erlang:function_exported(rand, uniform, 0)` could not be used here as it
    returns false when function isn't loaded, even if module and function are both
    available.
---
 src/couch/rebar.config.script                      | 11 +++--
 src/couch/src/couch_debug.erl                      |  2 +-
 src/couch/src/couch_multidb_changes.erl            |  2 +-
 src/couch/src/couch_rand.erl                       | 57 ++++++++++++++++++++++
 src/couch/src/couch_util.erl                       |  3 +-
 src/couch/test/couch_btree_tests.erl               |  8 +--
 src/couch/test/couch_file_tests.erl                |  4 +-
 src/couch_log/test/couch_log_test.erl              |  3 +-
 .../src/couch_replicator_doc_processor.erl         | 14 +++---
 src/couch_replicator/src/couch_replicator_docs.erl |  2 +-
 .../src/couch_replicator_scheduler_job.erl         |  3 +-
 src/ddoc_cache/test/ddoc_cache_lru_test.erl        |  2 +-
 12 files changed, 85 insertions(+), 26 deletions(-)

diff --git a/src/couch/rebar.config.script b/src/couch/rebar.config.script
index 5586032..bd35e34 100644
--- a/src/couch/rebar.config.script
+++ b/src/couch/rebar.config.script
@@ -131,15 +131,18 @@ PortSpecs = case os:type() of
         os:cmd("chmod +x priv/couchspawnkillable"),
         BaseSpecs
 end,
-
+PlatformDefines = [
+   {platform_define, "^R16", 'NORANDMODULE'},
+   {platform_define, "^17", 'NORANDMODULE'},
+   {platform_define, "win32", 'WINDOWS'}
+],
 AddConfig = [
     {port_specs, PortSpecs},
-    {erl_opts, [
-        {platform_define, "win32", 'WINDOWS'},
+    {erl_opts, PlatformDefines ++ [
         {d, 'COUCHDB_VERSION', Version},
         {i, "../"}
     ]},
-    {eunit_compile_opts, [{platform_define, "win32", 'WINDOWS'}]}
+    {eunit_compile_opts, PlatformDefines}
 ].
 
 lists:foldl(fun({K, V}, CfgAcc) ->
diff --git a/src/couch/src/couch_debug.erl b/src/couch/src/couch_debug.erl
index 858a4fb..96c7a50 100644
--- a/src/couch/src/couch_debug.erl
+++ b/src/couch/src/couch_debug.erl
@@ -508,7 +508,7 @@ random_processes(Acc, Depth) ->
     end.
 
 oneof(Options) ->
-    lists:nth(random:uniform(length(Options)), Options).
+    lists:nth(couch_rand:uniform(length(Options)), Options).
 
 
 tree() ->
diff --git a/src/couch/src/couch_multidb_changes.erl b/src/couch/src/couch_multidb_changes.erl
index 5efccca..b6a7873 100644
--- a/src/couch/src/couch_multidb_changes.erl
+++ b/src/couch/src/couch_multidb_changes.erl
@@ -302,7 +302,7 @@ notify_fold(DbName, {Server, DbSuffix, Count}) ->
 % number of shards back to back during startup.
 jitter(N) ->
     Range = min(2 * N * ?AVG_DELAY_MSEC, ?MAX_DELAY_MSEC),
-    random:uniform(Range).
+    couch_rand:uniform(Range).
 
 
 scan_local_db(Server, DbSuffix) when is_pid(Server) ->
diff --git a/src/couch/src/couch_rand.erl b/src/couch/src/couch_rand.erl
new file mode 100644
index 0000000..f5a8fc6
--- /dev/null
+++ b/src/couch/src/couch_rand.erl
@@ -0,0 +1,57 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rand).
+
+
+-export([
+    uniform/0,
+    uniform/1
+]).
+
+
+-ifdef(NORANDMODULE).
+
+
+uniform() ->
+    maybe_set_random_seed(),
+    random:uniform().
+
+
+uniform(N) ->
+    maybe_set_random_seed(),
+    random:uniform(N).
+
+
+maybe_set_random_seed() ->
+    case get(random_seed) of
+        undefined ->
+            {_, Sec, USec} = os:timestamp(),
+            Seed = {erlang:phash2(self()), Sec, USec},
+            random:seed(Seed);
+        _ ->
+            ok
+    end.
+
+
+-else.
+
+
+uniform() ->
+    rand:uniform().
+
+
+uniform(N) ->
+    rand:uniform(N).
+
+
+-endif.
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index 42d10ec..631f00b 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -209,7 +209,8 @@ json_user_ctx(Db) ->
 
 % returns a random integer
 rand32() ->
-    crypto:rand_uniform(0, 16#100000000).
+    <<I:32>> = crypto:strong_rand_bytes(4),
+    I.
 
 % given a pathname "../foo/bar/" it gives back the fully qualified
 % absolute pathname.
diff --git a/src/couch/test/couch_btree_tests.erl b/src/couch/test/couch_btree_tests.erl
index 35cf416..3c8840a 100644
--- a/src/couch/test/couch_btree_tests.erl
+++ b/src/couch/test/couch_btree_tests.erl
@@ -82,7 +82,7 @@ btree_open_test_() ->
 
 sorted_kvs_test_() ->
     Funs = kvs_test_funs(),
-    Sorted = [{Seq, random:uniform()} || Seq <- lists:seq(1, ?ROWS)],
+    Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
     {
         "BTree with sorted keys",
         {
@@ -97,7 +97,7 @@ sorted_kvs_test_() ->
     }.
 
 rsorted_kvs_test_() ->
-    Sorted = [{Seq, random:uniform()} || Seq <- lists:seq(1, ?ROWS)],
+    Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
     Funs = kvs_test_funs(),
     Reversed = Sorted,
     {
@@ -115,7 +115,7 @@ rsorted_kvs_test_() ->
 
 shuffled_kvs_test_() ->
     Funs = kvs_test_funs(),
-    Sorted = [{Seq, random:uniform()} || Seq <- lists:seq(1, ?ROWS)],
+    Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
     Shuffled = shuffle(Sorted),
     {
         "BTree with shuffled keys",
@@ -479,7 +479,7 @@ randomize(T, List) ->
         end, randomize(List), lists:seq(1, (T - 1))).
 
 randomize(List) ->
-    D = lists:map(fun(A) -> {random:uniform(), A} end, List),
+    D = lists:map(fun(A) -> {couch_rand:uniform(), A} end, List),
     {_, D1} = lists:unzip(lists:keysort(1, D)),
     D1.
 
diff --git a/src/couch/test/couch_file_tests.erl b/src/couch/test/couch_file_tests.erl
index c16be16..a387615 100644
--- a/src/couch/test/couch_file_tests.erl
+++ b/src/couch/test/couch_file_tests.erl
@@ -311,14 +311,14 @@ check_header_recovery(CheckFun) ->
     ok.
 
 write_random_data(Fd) ->
-    write_random_data(Fd, 100 + random:uniform(1000)).
+    write_random_data(Fd, 100 + couch_rand:uniform(1000)).
 
 write_random_data(Fd, 0) ->
     {ok, Bytes} = couch_file:bytes(Fd),
     {ok, (1 + Bytes div ?BLOCK_SIZE) * ?BLOCK_SIZE};
 write_random_data(Fd, N) ->
     Choices = [foo, bar, <<"bizzingle">>, "bank", ["rough", stuff]],
-    Term = lists:nth(random:uniform(4) + 1, Choices),
+    Term = lists:nth(couch_rand:uniform(4) + 1, Choices),
     {ok, _, _} = couch_file:append_term(Fd, Term),
     write_random_data(Fd, N - 1).
 
diff --git a/src/couch_log/test/couch_log_test.erl b/src/couch_log/test/couch_log_test.erl
index 1777730..c7195f6 100644
--- a/src/couch_log/test/couch_log_test.erl
+++ b/src/couch_log/test/couch_log_test.erl
@@ -80,6 +80,5 @@ check_levels(TestLevel, [CfgLevel | RestLevels]) ->
 
 
 new_msg() ->
-    random:seed(os:timestamp()),
-    Bin = list_to_binary([random:uniform(255) || _ <- lists:seq(1, 16)]),
+    Bin = list_to_binary([couch_rand:uniform(255) || _ <- lists:seq(1, 16)]),
     couch_util:to_hex(Bin).
diff --git a/src/couch_replicator/src/couch_replicator_doc_processor.erl b/src/couch_replicator/src/couch_replicator_doc_processor.erl
index 28eb17c..d3c001f 100644
--- a/src/couch_replicator/src/couch_replicator_doc_processor.erl
+++ b/src/couch_replicator/src/couch_replicator_doc_processor.erl
@@ -423,20 +423,20 @@ error_backoff(ErrCnt) ->
     % ErrCnt is the exponent here. The reason 64 is used is to start at
     % 64 (about a minute) max range. Then first backoff would be 30 sec
     % on average. Then 1 minute and so on.
-    random:uniform(?INITIAL_BACKOFF_EXPONENT bsl Exp).
+    couch_rand:uniform(?INITIAL_BACKOFF_EXPONENT bsl Exp).
 
 
 -spec filter_backoff() -> seconds().
 filter_backoff() ->
     Total = ets:info(?MODULE, size),
-    % This value scaled by the number of replications. If the are a lot of
-    % them wait is longer, but not more than a day (?TS_DAY_SEC). If there
-    % are just few, wait is shorter, starting at about 30 seconds. `2 *` is
-    % used since the expected wait would then be 0.5 * Range so it is easier
-    % to see the average wait. `1 +` is used because random:uniform only
+    % This value scaled by the number of replications. If the are a lot of them
+    % wait is longer, but not more than a day (?TS_DAY_SEC). If there are just
+    % few, wait is shorter, starting at about 30 seconds. `2 *` is used since
+    % the expected wait would then be 0.5 * Range so it is easier to see the
+    % average wait. `1 +` is used because couch_rand:uniform only
     % accepts >= 1 values and crashes otherwise.
     Range = 1 + min(2 * (Total / 10), ?TS_DAY_SEC),
-    ?MIN_FILTER_DELAY_SEC + random:uniform(round(Range)).
+    ?MIN_FILTER_DELAY_SEC + couch_rand:uniform(round(Range)).
 
 
 % Document removed from db -- clear ets table and remove all scheduled jobs
diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl
index 9d844b9..d22b85f 100644
--- a/src/couch_replicator/src/couch_replicator_docs.erl
+++ b/src/couch_replicator/src/couch_replicator_docs.erl
@@ -316,7 +316,7 @@ update_rep_doc(RepDbName, RepDocId, KVs, Wait) when is_binary(RepDocId) ->
         throw:conflict ->
             Msg = "Conflict when updating replication doc `~s`. Retrying.",
             couch_log:error(Msg, [RepDocId]),
-            ok = timer:sleep(random:uniform(erlang:min(128, Wait)) * 100),
+            ok = timer:sleep(couch_rand:uniform(erlang:min(128, Wait)) * 100),
             update_rep_doc(RepDbName, RepDocId, KVs, Wait * 2)
     end;
 
diff --git a/src/couch_replicator/src/couch_replicator_scheduler_job.erl b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
index e7ce576..e2d8fb6 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler_job.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
@@ -110,7 +110,6 @@ init(InitArgs) ->
 do_init(#rep{options = Options, id = {BaseId, Ext}, user_ctx=UserCtx} = Rep) ->
     process_flag(trap_exit, true),
 
-    random:seed(os:timestamp()),
     timer:sleep(startup_jitter()),
 
     #rep_state{
@@ -468,7 +467,7 @@ format_status(_Opt, [_PDict, State]) ->
 startup_jitter() ->
     Jitter = config:get_integer("replicator", "startup_jitter",
         ?STARTUP_JITTER_DEFAULT),
-    random:uniform(erlang:max(1, Jitter)).
+    couch_rand:uniform(erlang:max(1, Jitter)).
 
 
 headers_strip_creds([], Acc) ->
diff --git a/src/ddoc_cache/test/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
index 2455115..60605b9 100644
--- a/src/ddoc_cache/test/ddoc_cache_lru_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
@@ -28,7 +28,7 @@ recover(<<"pause", _/binary>>) ->
     {ok, paused};
 
 recover(<<"big", _/binary>>) ->
-    {ok, [random:uniform() || _ <- lists:seq(1, 8192)]};
+    {ok, [couch_rand:uniform() || _ <- lists:seq(1, 8192)]};
 
 recover(DbName) ->
     {ok, DbName}.

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 02/05: Correct result count in Mango execution stats (#867)

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jan pushed a commit to branch 749-fix-couch_peruser-app-structure
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 66915a5e12f406aea33d1d51c33bc60793c83734
Author: Will Holley <wi...@gmail.com>
AuthorDate: Thu Oct 5 15:27:02 2017 +0100

    Correct result count in Mango execution stats (#867)
    
    Mango execution stats previously incremented the result count
    at a point where the final result might be discarded. Instead,
    increment the count when we know the result is being included
    in the response.
---
 src/mango/src/mango_cursor_view.erl       | 10 ++++------
 src/mango/test/15-execution-stats-test.py |  4 ++++
 2 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl
index 31e198f..59dd522 100644
--- a/src/mango/src/mango_cursor_view.erl
+++ b/src/mango/src/mango_cursor_view.erl
@@ -202,10 +202,7 @@ handle_message({row, Props}, Cursor) ->
                 true ->
                     Cursor2 = update_bookmark_keys(Cursor1, Props),
                     FinalDoc = mango_fields:extract(Doc, Cursor2#cursor.fields),
-                    Cursor3 = Cursor2#cursor {
-                        execution_stats = mango_execution_stats:incr_results_returned(Cursor2#cursor.execution_stats)
-                    },
-                    handle_doc(Cursor3, FinalDoc);
+                    handle_doc(Cursor2, FinalDoc);
                 false ->
                     {ok, Cursor1}
             end;
@@ -230,13 +227,14 @@ handle_all_docs_message(Message, Cursor) ->
 
 handle_doc(#cursor{skip = S} = C, _) when S > 0 ->
     {ok, C#cursor{skip = S - 1}};
-handle_doc(#cursor{limit = L} = C, Doc) when L > 0 ->
+handle_doc(#cursor{limit = L, execution_stats = Stats} = C, Doc) when L > 0 ->
     UserFun = C#cursor.user_fun,
     UserAcc = C#cursor.user_acc,
     {Go, NewAcc} = UserFun({row, Doc}, UserAcc),
     {Go, C#cursor{
         user_acc = NewAcc,
-        limit = L - 1
+        limit = L - 1,
+        execution_stats = mango_execution_stats:incr_results_returned(Stats)
     }};
 handle_doc(C, _Doc) ->
     {stop, C}.
diff --git a/src/mango/test/15-execution-stats-test.py b/src/mango/test/15-execution-stats-test.py
index 67c9e64..6b7408b 100644
--- a/src/mango/test/15-execution-stats-test.py
+++ b/src/mango/test/15-execution-stats-test.py
@@ -38,6 +38,10 @@ class ExecutionStatsTests(mango.UserDocsTests):
         self.assertEqual(resp["execution_stats"]["results_returned"], 3)
         self.assertGreater(resp["execution_stats"]["execution_time_ms"], 0)
 
+    def test_results_returned_limit(self):
+        resp = self.db.find({"age": {"$lt": 35}}, limit=2, return_raw=True, executionStats=True)
+        self.assertEqual(resp["execution_stats"]["results_returned"], len(resp["docs"]))
+
 @unittest.skipUnless(mango.has_text_service(), "requires text service")
 class ExecutionStatsTests_Text(mango.UserDocsTextTests):
 

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 05/05: Ensure a user creation is handlined on one node only

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jan pushed a commit to branch 749-fix-couch_peruser-app-structure
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 6470ee35f5283369fe0722ebb8e8de0768fc01dc
Author: Jan Lehnardt <ja...@apache.org>
AuthorDate: Sat Oct 7 17:04:54 2017 +0200

    Ensure a user creation is handlined on one node only
    
    This patch makes use of the mechanism that ensures that replications
    are only run on one node.
    
    When the cluster has nodes added/removed all changes listeners are
    restarted.
---
 src/couch_peruser/src/couch_peruser.app.src |   2 +-
 src/couch_peruser/src/couch_peruser.erl     | 158 +++++++++++++++++++---------
 2 files changed, 110 insertions(+), 50 deletions(-)

diff --git a/src/couch_peruser/src/couch_peruser.app.src b/src/couch_peruser/src/couch_peruser.app.src
index 777446d..42b7b25 100644
--- a/src/couch_peruser/src/couch_peruser.app.src
+++ b/src/couch_peruser/src/couch_peruser.app.src
@@ -14,7 +14,7 @@
     {description, "couch_peruser - maintains per-user databases in CouchDB"},
     {vsn, git},
     {registered, []},
-    {applications, [kernel, stdlib, config, couch, fabric]},
+    {applications, [kernel, stdlib, config, couch, fabric, couch_replicator, mem3]},
     {mod, {couch_peruser_app, []}},
     {env, []},
     {modules, [couch_peruser, couch_peruser_app, couch_peruser_sup]}
diff --git a/src/couch_peruser/src/couch_peruser.erl b/src/couch_peruser/src/couch_peruser.erl
index 63ef084..9161f56 100644
--- a/src/couch_peruser/src/couch_peruser.erl
+++ b/src/couch_peruser/src/couch_peruser.erl
@@ -22,6 +22,9 @@
 -export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2,
          terminate/2, code_change/3]).
 
+% cluster state notification callback
+-export([notify_cluster_event/2]).
+
 -export([init_changes_handler/1, changes_handler/3]).
 
 -record(state, {parent, db_name, delete_dbs, changes_pid, changes_ref}).
@@ -34,10 +37,13 @@ start_link() ->
     gen_server:start_link(?MODULE, [], []).
 
 init() ->
+    couch_log:debug("peruser: starting on node ~p", [node()]),
     case config:get_boolean("couch_peruser", "enable", false) of
     false ->
+        couch_log:debug("peruser: disabled on node ~p", [node()]),
         #clusterState{};
     true ->
+        couch_log:debug("peruser: enabled on node ~p", [node()]),
         DbName = ?l2b(config:get(
                          "couch_httpd_auth", "authentication_db", "_users")),
         DeleteDbs = config:get_boolean("couch_peruser", "delete_dbs", false),
@@ -47,21 +53,37 @@ init() ->
             db_name = DbName,
             delete_dbs = DeleteDbs
         },
-        try
-            States = lists:map(fun (A) ->
-                S = #state{parent = ClusterState#clusterState.parent,
-                           db_name = A#shard.name,
-                           delete_dbs = DeleteDbs},
-                {Pid, Ref} = spawn_opt(
-                    ?MODULE, init_changes_handler, [S], [link, monitor]),
-                S#state{changes_pid=Pid, changes_ref=Ref}
-            end, mem3:local_shards(DbName)),
-
-            ClusterState#clusterState{states = States}
-        catch error:database_does_not_exist ->
-            couch_log:warning("couch_peruser can't proceed as underlying database (~s) is missing, disables itself.", [DbName]),
-            config:set("couch_peruser", "enable", "false", lists:concat([binary_to_list(DbName), " is missing"]))
-        end
+
+        % set up cluster-stable listener
+        couch_replicator_clustering:link_cluster_event_listener(?MODULE,
+            notify_cluster_event, [self()]),
+
+        couch_log:debug("peruser: registered for cluster event on node ~p", [node()]),
+        ClusterState
+    end.
+
+% Cluster membership change notification callback
+-spec notify_cluster_event(pid(), {cluster, any()}) -> ok.
+notify_cluster_event(Server, {cluster, _} = Event) ->
+    couch_log:debug("peruser: received cluster event ~p on node ~p", [Event, node()]),
+    gen_server:cast(Server, Event).
+
+start_listening(#clusterState{db_name=DbName, delete_dbs=DeleteDbs} = ClusterState) ->
+    couch_log:debug("peruser: start_listening() on node ~p", [node()]),
+    try
+        States = lists:map(fun (A) ->
+            S = #state{parent = ClusterState#clusterState.parent,
+                       db_name = A#shard.name,
+                       delete_dbs = DeleteDbs},
+            {Pid, Ref} = spawn_opt(
+                ?MODULE, init_changes_handler, [S], [link, monitor]),
+            S#state{changes_pid=Pid, changes_ref=Ref}
+        end, mem3:local_shards(DbName)),
+
+        ClusterState#clusterState{states = States}
+    catch error:database_does_not_exist ->
+        couch_log:warning("couch_peruser can't proceed as underlying database (~s) is missing, disables itself.", [DbName]),
+        config:set("couch_peruser", "enable", "false", lists:concat([binary_to_list(DbName), " is missing"]))
     end.
 
 init_changes_handler(#state{db_name=DbName} = State) ->
@@ -76,24 +98,30 @@ init_changes_handler(#state{db_name=DbName} = State) ->
         ok
     end.
 
-changes_handler({change, {Doc}, _Prepend}, _ResType, State=#state{}) ->
+
+changes_handler({change, {Doc}, _Prepend}, _ResType, State=#state{db_name=DbName}) ->
     case couch_util:get_value(<<"id">>, Doc) of
-    <<"org.couchdb.user:",User/binary>> ->
-        case couch_util:get_value(<<"deleted">>, Doc, false) of
-        false ->
-            UserDb = ensure_user_db(User),
-            ok = ensure_security(User, UserDb, fun add_user/3),
-            State;
+    <<"org.couchdb.user:",User/binary>>=DocId ->
+        case should_handle_doc(DbName, DocId) of
         true ->
-            case State#state.delete_dbs of
-            true ->
-                _UserDb = delete_user_db(User),
-                State;
+            case couch_util:get_value(<<"deleted">>, Doc, false) of
             false ->
-                UserDb = user_db_name(User),
-                ok = ensure_security(User, UserDb, fun remove_user/3),
-                State
-            end
+                UserDb = ensure_user_db(User),
+                ok = ensure_security(User, UserDb, fun add_user/3),
+                State;
+            true ->
+                case State#state.delete_dbs of
+                true ->
+                    _UserDb = delete_user_db(User),
+                    State;
+                false ->
+                    UserDb = user_db_name(User),
+                    ok = ensure_security(User, UserDb, fun remove_user/3),
+                    State
+                end
+            end;
+        false ->
+            State
         end;
     _ ->
         State
@@ -101,6 +129,25 @@ changes_handler({change, {Doc}, _Prepend}, _ResType, State=#state{}) ->
 changes_handler(_Event, _ResType, State) ->
     State.
 
+should_handle_doc(DbName, DocId) ->
+  case couch_replicator_clustering:owner(DbName, DocId) of
+      unstable ->
+          % todo: when we do proper resume[1], we can return false here
+          % and rely on a module restart when the cluster is stable again
+          % in the meantime, we risk conflicts when the cluster gets unstable
+          % and users are being created.
+          % [1] https://github.com/apache/couchdb/issues/872
+          true;
+      ThisNode when ThisNode =:= node() ->
+          couch_log:debug("peruser: handling ~s/~s", [DbName, DocId]),
+          % do the deed
+          true;
+      _OtherNode ->
+          couch_log:debug("peruser: skipping ~s/~s", [DbName, DocId]),
+          false
+  end.
+
+
 delete_user_db(User) ->
     UserDb = user_db_name(User),
     try
@@ -158,20 +205,25 @@ remove_user(User, Prop, {Modified, SecProps}) ->
     end.
 
 ensure_security(User, UserDb, TransformFun) ->
-    {ok, Shards} = fabric:get_all_security(UserDb, [?ADMIN_CTX]),
-    {_ShardInfo, {SecProps}} = hd(Shards),
-    % assert that shards have the same security object
-    true = lists:all(fun ({_, {SecProps1}}) ->
-        SecProps =:= SecProps1
-    end, Shards),
-    case lists:foldl(
-           fun (Prop, SAcc) -> TransformFun(User, Prop, SAcc) end,
-           {false, SecProps},
-           [<<"admins">>, <<"members">>]) of
-    {false, _} ->
-        ok;
-    {true, SecProps1} ->
-        ok = fabric:set_security(UserDb, {SecProps1}, [?ADMIN_CTX])
+    case fabric:get_all_security(UserDb, [?ADMIN_CTX]) of
+    {error, no_majority} ->
+      % single node, ignore
+       ok;
+    {ok, Shards} ->
+        {_ShardInfo, {SecProps}} = hd(Shards),
+        % assert that shards have the same security object
+        true = lists:all(fun ({_, {SecProps1}}) ->
+            SecProps =:= SecProps1
+        end, Shards),
+        case lists:foldl(
+               fun (Prop, SAcc) -> TransformFun(User, Prop, SAcc) end,
+               {false, SecProps},
+               [<<"admins">>, <<"members">>]) of
+        {false, _} ->
+            ok;
+        {true, SecProps1} ->
+            ok = fabric:set_security(UserDb, {SecProps1}, [?ADMIN_CTX])
+        end
     end.
 
 user_db_name(User) ->
@@ -179,6 +231,11 @@ user_db_name(User) ->
         [string:to_lower(integer_to_list(X, 16)) || <<X>> <= User]),
     <<?USERDB_PREFIX,HexUser/binary>>.
 
+exit_changes(ClusterState) ->
+    lists:foreach(fun (State) ->
+        demonitor(State#state.changes_ref, [flush]),
+        exit(State#state.changes_pid, kill)
+    end, ClusterState#clusterState.states).
 
 %% gen_server callbacks
 
@@ -191,16 +248,19 @@ handle_call(_Msg, _From, State) ->
 
 
 handle_cast(update_config, ClusterState) when ClusterState#clusterState.states =/= undefined ->
-    lists:foreach(fun (State) ->
-        demonitor(State#state.changes_ref, [flush]),
-        exit(State#state.changes_pid, kill)
-    end, ClusterState#clusterState.states),
-
+    exit_changes(ClusterState),
     {noreply, init()};
 handle_cast(update_config, _) ->
     {noreply, init()};
 handle_cast(stop, State) ->
     {stop, normal, State};
+handle_cast({cluster, unstable}, ClusterState) when ClusterState#clusterState.states =/= undefined ->
+    exit_changes(ClusterState),
+    {noreply, init()};
+handle_cast({cluster, unstable}, _) ->
+    {noreply, init()};
+handle_cast({cluster, stable}, State) ->
+    {noreply, start_listening(State)};
 handle_cast(_Msg, State) ->
     {noreply, State}.
 

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.