You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by da...@apache.org on 2012/01/25 08:20:54 UTC

[4/4] git commit: Implement _security as a _local doc

Implement _security as a _local doc

As per mailing list discussion on enabling the syncing of _security
objects this builds upon the previous commits to enable the agreed upon
strategy of giving a revision tree to the _security object.

The old API is maintained with identical in the normal single node case.
Though it is also now possible to update the _local/_security doc
directly using the normal HTTP document and _bulk_docs API's to edit the
doc directly.

This document is special cased like _design docs to disallow non-admins
from editing it.


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/76fff259
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/76fff259
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/76fff259

Branch: refs/heads/new-security-object
Commit: 76fff259a62e1c2f443d625756d3f8d6c88ee6a8
Parents: 9d0db1b
Author: Paul Joseph Davis <da...@apache.org>
Authored: Mon Jan 23 17:59:57 2012 -0600
Committer: Paul Joseph Davis <da...@apache.org>
Committed: Wed Jan 25 01:14:07 2012 -0600

----------------------------------------------------------------------
 src/couchdb/couch_db.erl         |   22 ++++++----
 src/couchdb/couch_db.hrl         |    1 +
 src/couchdb/couch_db_updater.erl |   76 ++++++++++++++++++++++-----------
 3 files changed, 65 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/76fff259/src/couchdb/couch_db.erl
----------------------------------------------------------------------
diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl
index be65f53..987e8f7 100644
--- a/src/couchdb/couch_db.erl
+++ b/src/couchdb/couch_db.erl
@@ -359,21 +359,23 @@ check_is_member(#db{user_ctx=#user_ctx{name=Name,roles=Roles}=UserCtx}=Db) ->
         end
     end.
 
-get_admins(#db{security=SecProps}) ->
+get_admins(#db{security=Doc}) ->
+    #doc{body={SecProps}} = Doc,
     couch_util:get_value(<<"admins">>, SecProps, {[]}).
 
-get_members(#db{security=SecProps}) ->
+get_members(#db{security=Doc}) ->
+    #doc{body={SecProps}} = Doc,
     % we fallback to readers here for backwards compatibility
     couch_util:get_value(<<"members">>, SecProps,
         couch_util:get_value(<<"readers">>, SecProps, {[]})).
 
-get_security(#db{security=SecProps}) ->
-    {SecProps}.
+get_security(#db{security=SecDoc}) ->
+    SecDoc#doc.body.
 
-set_security(#db{update_pid=Pid}=Db, {NewSecProps}) when is_list(NewSecProps) ->
+set_security(#db{security=Doc}=Db, {NewSecProps}) when is_list(NewSecProps) ->
     check_is_admin(Db),
     ok = validate_security_object(NewSecProps),
-    ok = gen_server:call(Pid, {set_security, NewSecProps}, infinity),
+    {ok, _} = update_doc(Db, Doc#doc{body={NewSecProps}}, []),
     {ok, _} = ensure_full_commit(Db),
     ok;
 set_security(_, _) ->
@@ -459,10 +461,12 @@ group_alike_docs([{Doc,Ref}|Rest], [Bucket|RestBuckets]) ->
 
 validate_doc_update(#db{}=Db, #doc{id= <<"_design/",_/binary>>}, _GetDiskDocFun) ->
     catch check_is_admin(Db);
-validate_doc_update(#db{validate_doc_funs=[]}, _Doc, _GetDiskDocFun) ->
-    ok;
+validate_doc_update(Db, #doc{id= ?SECURITY_ID}, _GetDiskDocFun) ->
+    catch check_is_admin(Db);
 validate_doc_update(_Db, #doc{id= <<"_local/",_/binary>>}, _GetDiskDocFun) ->
     ok;
+validate_doc_update(#db{validate_doc_funs=[]}, _Doc, _GetDiskDocFun) ->
+    ok;
 validate_doc_update(Db, Doc, GetDiskDocFun) ->
     DiskDoc = GetDiskDocFun(),
     JsonCtx = couch_util:json_user_ctx(Db),
@@ -731,7 +735,7 @@ update_docs(Db, Docs, Options, interactive_edit) ->
     % request. This relies on couch_db_updater not collecting
     % more than one update that contains _local docs but this
     % is still trigerable with a _bulk_docs request.
-    UniqNRIds = lists:usort([Id || #doc{id=Id} <- NonRepDocs0]),
+    UniqNRIds = lists:usort([Id || [{#doc{id=Id}, _}] <- NonRepDocs0]),
     case length(UniqNRIds) == length(NonRepDocs0) of
         true -> ok;
         false -> throw({update_error, repeated_local_docs})

http://git-wip-us.apache.org/repos/asf/couchdb/blob/76fff259/src/couchdb/couch_db.hrl
----------------------------------------------------------------------
diff --git a/src/couchdb/couch_db.hrl b/src/couchdb/couch_db.hrl
index 65eb7f0..26ffe0d 100644
--- a/src/couchdb/couch_db.hrl
+++ b/src/couchdb/couch_db.hrl
@@ -11,6 +11,7 @@
 % the License.
 
 -define(LOCAL_DOC_PREFIX, "_local/").
+-define(SECURITY_ID, <<"_local/_security">>).
 -define(DESIGN_DOC_PREFIX0, "_design").
 -define(DESIGN_DOC_PREFIX, "_design/").
 -define(DEFAULT_COMPRESSION, snappy).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/76fff259/src/couchdb/couch_db_updater.erl
----------------------------------------------------------------------
diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl
index 862c48a..42b6460 100644
--- a/src/couchdb/couch_db_updater.erl
+++ b/src/couchdb/couch_db_updater.erl
@@ -67,14 +67,6 @@ handle_call(increment_update_seq, _From, Db) ->
     couch_db_update_notifier:notify({updated, Db#db.name}),
     {reply, {ok, Db2#db.update_seq}, Db2};
 
-handle_call({set_security, NewSec}, _From, #db{compression = Comp} = Db) ->
-    {ok, Ptr, _} = couch_file:append_term(
-        Db#db.updater_fd, NewSec, [{compression, Comp}]),
-    Db2 = commit_data(Db#db{security=NewSec, security_ptr=Ptr,
-            update_seq=Db#db.update_seq+1}),
-    ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
-    {reply, ok, Db2};
-
 handle_call({set_revs_limit, Limit}, _From, Db) ->
     Db2 = commit_data(Db#db{revs_limit=Limit,
             update_seq=Db#db.update_seq+1}),
@@ -183,7 +175,7 @@ handle_call({compact_done, CompactFilepath}, _From, #db{filepath=Filepath}=Db) -
     case Db#db.update_seq == NewSeq of
     true ->
         % suck up all the local docs into memory and write them to the new db
-        NewDb1 = copy_local_docs(Db, NewDb),
+        {ok, NewDb1} = update_security(copy_local_docs(Db, NewDb)),
         NewDb2 = commit_data(NewDb1#db{
             main_pid = Db#db.main_pid,
             filepath = Filepath,
@@ -231,16 +223,17 @@ handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts,
     try update_docs_int(Db, GroupedDocs3, NonRepDocs2, MergeConflicts,
                 FullCommit2) of
     {ok, Db2, UpdatedDDocIds} ->
-        ok = gen_server:call(Db#db.main_pid, {db_updated, Db2}),
-        if Db2#db.update_seq /= Db#db.update_seq ->
-            couch_db_update_notifier:notify({updated, Db2#db.name});
+        {ok, Db3} = maybe_update_security(Db2, NonRepDocs),
+        ok = gen_server:call(Db#db.main_pid, {db_updated, Db3}),
+        if Db3#db.update_seq /= Db#db.update_seq ->
+            couch_db_update_notifier:notify({updated, Db3#db.name});
         true -> ok
         end,
         [catch(ClientPid ! {done, self()}) || ClientPid <- Clients],
         lists:foreach(fun(DDocId) ->
-            couch_db_update_notifier:notify({ddoc_updated, {Db#db.name, DDocId}})
+            couch_db_update_notifier:notify({ddoc_updated, {Db3#db.name, DDocId}})
         end, UpdatedDDocIds),
-        {noreply, Db2}
+        {noreply, Db3}
     catch
         throw: retry ->
             [catch(ClientPid ! {retry, self()}) || ClientPid <- Clients],
@@ -450,19 +443,12 @@ init_db(DbName, Filepath, Fd, ReaderFd, Header0, Options) ->
             {join, fun(X,Y) -> btree_by_id_join(X,Y) end},
             {compression, Compression}
         ]),
-    case Header#db_header.security_ptr of
-    nil ->
-        Security = [],
-        SecurityPtr = nil;
-    SecurityPtr ->
-        {ok, Security} = couch_file:pread_term(Fd, SecurityPtr)
-    end,
     % convert start time tuple to microsecs and store as a binary string
     {MegaSecs, Secs, MicroSecs} = now(),
     StartTime = ?l2b(io_lib:format("~p",
             [(MegaSecs*1000000*1000000) + (Secs*1000000) + MicroSecs])),
     {ok, RefCntr} = couch_ref_counter:start([Fd, ReaderFd]),
-    #db{
+    Db = #db{
         update_pid=self(),
         fd = ReaderFd,
         updater_fd = Fd,
@@ -475,8 +461,6 @@ init_db(DbName, Filepath, Fd, ReaderFd, Header0, Options) ->
         update_seq = Header#db_header.update_seq,
         name = DbName,
         filepath = Filepath,
-        security = Security,
-        security_ptr = SecurityPtr,
         instance_start_time = StartTime,
         revs_limit = Header#db_header.revs_limit,
         fsync_options = FsyncOptions,
@@ -484,7 +468,21 @@ init_db(DbName, Filepath, Fd, ReaderFd, Header0, Options) ->
         compression = Compression,
         before_doc_update = couch_util:get_value(before_doc_update, Options, nil),
         after_doc_read = couch_util:get_value(after_doc_read, Options, nil)
-        }.
+        },
+    case Header#db_header.security_ptr of
+    local ->
+        {ok, Db1} = update_security(Db),
+        Db1#db{security_ptr=local};
+    nil ->
+        {ok, Db1} = init_security(Db, {[]}),
+        {ok, Db2} = update_security(Db1),
+        Db2#db{security_ptr=local};
+    SecurityPtr ->
+        {ok, Security} = couch_file:pread_term(Fd, SecurityPtr),
+        {ok, Db1} = init_security(Db, {Security}),
+        {ok, Db2} = update_security(Db1),
+        Db2#db{security_ptr=local}
+    end.
 
 open_reader_fd(Filepath, Options) ->
     {ok, Fd} = case lists:member(sys_db, Options) of
@@ -514,6 +512,31 @@ refresh_validate_doc_funs(Db0) ->
         end, DesignDocs),
     Db0#db{validate_doc_funs=ProcessDocFuns}.
 
+update_security(Db0) ->
+    Db = Db0#db{
+        user_ctx = #user_ctx{roles=[<<"_admin">>]},
+        after_doc_read=nil
+    },
+    {ok, Doc} = couch_db:open_doc_int(Db, ?SECURITY_ID, [ejson_body]),
+    {ok, Db0#db{security=Doc}}.
+
+maybe_update_security(Db, Docs) ->
+    Ids = [Doc#doc.id || [{Doc, _}] <- Docs],
+    case lists:member(?SECURITY_ID, Ids) of
+        true -> update_security(Db);
+        false -> {ok, Db}
+    end.
+
+init_security(Db0, Body) ->
+    Db1 = Db0#db{user_ctx=#user_ctx{roles=[<<"_admin">>]}},
+    Doc0 = #doc{id=?SECURITY_ID, body=Body},
+    RevId = couch_db:new_revid(Doc0),
+    Doc1 = Doc0#doc{revs={1, [RevId]}},
+    SummaryChunk = make_doc_summary(Db1, {Body, []}),
+    Doc2 = Doc1#doc{body={summary, SummaryChunk, nil}},
+    Req = [{nil, [{Doc2, nil}]}],
+    update_local_docs(Db1, Req).
+
 % rev tree functions
 
 flush_trees(_Db, [], AccFlushedTrees) ->
@@ -568,6 +591,9 @@ flush_trees(#db{updater_fd = Fd} = Db,
     flush_trees(Db, RestUnflushed, [InfoFlushed | AccFlushed]).
 
 
+send_result(nil, _, _) ->
+    % For doc writes from init_db
+    ok;
 send_result(Client, Ref, NewResult) ->
     % used to send a result to the client
     catch(Client ! {result, self(), {Ref, NewResult}}).