You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by ja...@apache.org on 2017/10/08 09:14:35 UTC

[couchdb] branch 749-fix-couch_peruser-app-structure updated (5c39bee -> baab400)

This is an automated email from the ASF dual-hosted git repository.

jan pushed a change to branch 749-fix-couch_peruser-app-structure
in repository https://gitbox.apache.org/repos/asf/couchdb.git.


    from 5c39bee  Ensure a user creation is handlined on one node only
     new 4023125  track cluster state in gen_server state and get notfied from mem3 directly
     new e749b7a  move couch_replication_clustering:owner/3 to mem3.erl
     new baab400  remove reliance on couch_replicator_clustering, handle cluster state internally

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 src/couch_peruser/src/couch_peruser.erl            | 119 +++++++++++++++------
 src/couch_replicator/src/couch_replicator.erl      |   2 +-
 .../src/couch_replicator_clustering.erl            |  10 +-
 src/mem3/src/mem3.erl                              |   8 +-
 4 files changed, 93 insertions(+), 46 deletions(-)

-- 
To stop receiving notification emails like this one, please contact
['"commits@couchdb.apache.org" <co...@couchdb.apache.org>'].

[couchdb] 03/03: remove reliance on couch_replicator_clustering, handle cluster state internally

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jan pushed a commit to branch 749-fix-couch_peruser-app-structure
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit baab4004d38cddecdc9c63828202a7b3dffe61c4
Author: Jan Lehnardt <ja...@apache.org>
AuthorDate: Sun Oct 8 11:14:28 2017 +0200

    remove reliance on couch_replicator_clustering, handle cluster state internally
---
 src/couch_peruser/src/couch_peruser.erl | 56 ++++++++++++++++++++++-----------
 1 file changed, 37 insertions(+), 19 deletions(-)

diff --git a/src/couch_peruser/src/couch_peruser.erl b/src/couch_peruser/src/couch_peruser.erl
index a31ff60..791431c 100644
--- a/src/couch_peruser/src/couch_peruser.erl
+++ b/src/couch_peruser/src/couch_peruser.erl
@@ -33,7 +33,8 @@
 ]).
 
 -record(state, {parent, db_name, delete_dbs, changes_pid, changes_ref}).
--record(clusterState, {parent,
+-record(clusterState, {
+    parent,
     db_name,
     delete_dbs,
     states,
@@ -48,10 +49,10 @@
 
 
 start_link() ->
-    gen_server:start_link(?MODULE, [], []).
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
 
 init() ->
-    couch_log:debug("peruser: starting on node ~p", [node()]),
+    couch_log:debug("peruser: starting on node ~p in pid ~p", [node(), self()]),
     case config:get_boolean("couch_peruser", "enable", false) of
     false ->
         couch_log:debug("peruser: disabled on node ~p", [node()]),
@@ -107,6 +108,8 @@ start_listening(#clusterState{db_name=DbName, delete_dbs=DeleteDbs} = ClusterSta
     end.
 
 init_changes_handler(#state{db_name=DbName} = State) ->
+    % leave for debugging
+    % couch_log:debug("peruser: init_changes_handler() on DbName ~p", [DbName]),
     try
         {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX, sys_db]),
         FunAcc = {fun ?MODULE:changes_handler/3, State},
@@ -120,6 +123,9 @@ init_changes_handler(#state{db_name=DbName} = State) ->
 
 
 changes_handler({change, {Doc}, _Prepend}, _ResType, State=#state{db_name=DbName}) ->
+    % leave for debugging
+    % couch_log:debug("peruser: changes_handler() on DbName/Doc ~p/~p", [DbName, Doc]),
+
     case couch_util:get_value(<<"id">>, Doc) of
     <<"org.couchdb.user:",User/binary>>=DocId ->
         case should_handle_doc(DbName, DocId) of
@@ -149,22 +155,28 @@ changes_handler({change, {Doc}, _Prepend}, _ResType, State=#state{db_name=DbName
 changes_handler(_Event, _ResType, State) ->
     State.
 
-should_handle_doc(DbName, DocId) ->
-  case couch_replicator_clustering:owner(DbName, DocId) of
-      unstable ->
-          % todo: when we do proper resume[1], we can return false here
-          % and rely on a module restart when the cluster is stable again
-          % in the meantime, we risk conflicts when the cluster gets unstable
-          % and users are being created.
-          % [1] https://github.com/apache/couchdb/issues/872
-          true;
-      ThisNode when ThisNode =:= node() ->
-          couch_log:debug("peruser: handling ~s/~s", [DbName, DocId]),
-          % do the deed
-          true;
-      _OtherNode ->
-          couch_log:debug("peruser: skipping ~s/~s", [DbName, DocId]),
-          false
+should_handle_doc(ShardName, DocId) ->
+    should_handle_doc_int(ShardName, DocId, is_stable()).
+
+should_handle_doc_int(ShardName, DocId, false) ->
+    % when the cluster is unstable, we have already stopped all Listeners
+    % the next stable event will restart all listeners and pick up this
+    % doc change
+    couch_log:debug("peruser: skipping, cluster unstable ~s/~s", [ShardName, DocId]),
+    false;
+should_handle_doc_int(ShardName, DocId, true) ->
+    DbName = mem3:dbname(ShardName),
+    Live = [erlang:node() | erlang:nodes()],
+    Shards = mem3:shards(DbName, DocId),
+    Nodes = [N || #shard{node=N} <- Shards, lists:member(N, Live)],
+    case mem3:owner(DbName, DocId, Nodes) of
+        ThisNode when ThisNode =:= node() ->
+            couch_log:debug("peruser: handling ~s/~s", [DbName, DocId]),
+            % do the deed
+            true;
+        _OtherNode ->
+            couch_log:debug("peruser: skipping ~s/~s", [DbName, DocId]),
+            false
   end.
 
 
@@ -258,6 +270,10 @@ exit_changes(ClusterState) ->
         exit(State#state.changes_pid, kill)
     end, ClusterState#clusterState.states).
 
+-spec is_stable() -> true | false.
+is_stable() ->
+    gen_server:call(?MODULE, is_stable).
+
 % Mem3 cluster callbacks
 
 cluster_unstable(Server) ->
@@ -274,6 +290,8 @@ init([]) ->
     ok = subscribe_for_changes(),
     {ok, init()}.
 
+handle_call(is_stable, _From, #clusterState{cluster_stable = IsStable} = State) ->
+    {reply, IsStable, State};
 handle_call(_Msg, _From, State) ->
     {reply, error, State}.
 

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 01/03: track cluster state in gen_server state and get notfied from mem3 directly

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jan pushed a commit to branch 749-fix-couch_peruser-app-structure
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 4023125598924f54fd422640f82d9df379f3c713
Author: Jan Lehnardt <ja...@apache.org>
AuthorDate: Sat Oct 7 23:17:30 2017 +0200

    track cluster state in gen_server state and get notfied from mem3 directly
---
 src/couch_peruser/src/couch_peruser.erl | 65 ++++++++++++++++++++++++---------
 1 file changed, 48 insertions(+), 17 deletions(-)

diff --git a/src/couch_peruser/src/couch_peruser.erl b/src/couch_peruser/src/couch_peruser.erl
index 9161f56..a31ff60 100644
--- a/src/couch_peruser/src/couch_peruser.erl
+++ b/src/couch_peruser/src/couch_peruser.erl
@@ -12,12 +12,11 @@
 
 -module(couch_peruser).
 -behaviour(gen_server).
+-behaviour(mem3_cluster).
 
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("mem3/include/mem3.hrl").
 
--define(USERDB_PREFIX, "userdb-").
-
 % gen_server callbacks
 -export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2,
          terminate/2, code_change/3]).
@@ -27,10 +26,25 @@
 
 -export([init_changes_handler/1, changes_handler/3]).
 
+% mem3_cluster callbacks
+-export([
+    cluster_stable/1,
+    cluster_unstable/1
+]).
+
 -record(state, {parent, db_name, delete_dbs, changes_pid, changes_ref}).
--record(clusterState, {parent, db_name, delete_dbs, states}).
+-record(clusterState, {parent,
+    db_name,
+    delete_dbs,
+    states,
+    mem3_cluster_pid,
+    cluster_stable
+}).
 
+-define(USERDB_PREFIX, "userdb-").
 -define(RELISTEN_DELAY, 5000).
+-define(DEFAULT_QUIET_PERIOD, 60). % seconds
+-define(DEFAULT_START_PERIOD, 5). % seconds
 
 
 start_link() ->
@@ -48,18 +62,24 @@ init() ->
                          "couch_httpd_auth", "authentication_db", "_users")),
         DeleteDbs = config:get_boolean("couch_peruser", "delete_dbs", false),
 
-        ClusterState = #clusterState{
-            parent = self(),
-            db_name = DbName,
-            delete_dbs = DeleteDbs
-        },
-
         % set up cluster-stable listener
-        couch_replicator_clustering:link_cluster_event_listener(?MODULE,
-            notify_cluster_event, [self()]),
+        Period = abs(config:get_integer("couch_peruser", "cluster_quiet_period",
+            ?DEFAULT_QUIET_PERIOD)),
+        StartPeriod = abs(config:get_integer("couch_peruser", "cluster_start_period",
+            ?DEFAULT_START_PERIOD)),
+
+        {ok, Mem3Cluster} = mem3_cluster:start_link(?MODULE, self(), StartPeriod,
+            Period),
 
         couch_log:debug("peruser: registered for cluster event on node ~p", [node()]),
-        ClusterState
+
+        #clusterState{
+            parent = self(),
+            db_name = DbName,
+            delete_dbs = DeleteDbs,
+            mem3_cluster_pid = Mem3Cluster,
+            cluster_stable = false
+        }
     end.
 
 % Cluster membership change notification callback
@@ -80,7 +100,7 @@ start_listening(#clusterState{db_name=DbName, delete_dbs=DeleteDbs} = ClusterSta
             S#state{changes_pid=Pid, changes_ref=Ref}
         end, mem3:local_shards(DbName)),
 
-        ClusterState#clusterState{states = States}
+        ClusterState#clusterState{states = States, cluster_stable = true}
     catch error:database_does_not_exist ->
         couch_log:warning("couch_peruser can't proceed as underlying database (~s) is missing, disables itself.", [DbName]),
         config:set("couch_peruser", "enable", "false", lists:concat([binary_to_list(DbName), " is missing"]))
@@ -166,6 +186,7 @@ ensure_user_db(User) ->
         {ok, _DbInfo} = fabric:get_db_info(UserDb)
     catch error:database_does_not_exist ->
         case fabric:create_db(UserDb, [?ADMIN_CTX]) of
+        {error, file_exists} -> ok;
         ok -> ok;
         accepted -> ok
         end
@@ -207,7 +228,7 @@ remove_user(User, Prop, {Modified, SecProps}) ->
 ensure_security(User, UserDb, TransformFun) ->
     case fabric:get_all_security(UserDb, [?ADMIN_CTX]) of
     {error, no_majority} ->
-      % single node, ignore
+       % single node, ignore
        ok;
     {ok, Shards} ->
         {_ShardInfo, {SecProps}} = hd(Shards),
@@ -237,6 +258,16 @@ exit_changes(ClusterState) ->
         exit(State#state.changes_pid, kill)
     end, ClusterState#clusterState.states).
 
+% Mem3 cluster callbacks
+
+cluster_unstable(Server) ->
+    gen_server:cast(Server, cluster_unstable),
+    Server.
+
+cluster_stable(Server) ->
+    gen_server:cast(Server, cluster_stable),
+    Server.
+
 %% gen_server callbacks
 
 init([]) ->
@@ -254,12 +285,12 @@ handle_cast(update_config, _) ->
     {noreply, init()};
 handle_cast(stop, State) ->
     {stop, normal, State};
-handle_cast({cluster, unstable}, ClusterState) when ClusterState#clusterState.states =/= undefined ->
+handle_cast(cluster_unstable, ClusterState) when ClusterState#clusterState.states =/= undefined ->
     exit_changes(ClusterState),
     {noreply, init()};
-handle_cast({cluster, unstable}, _) ->
+handle_cast(cluster_unstable, _) ->
     {noreply, init()};
-handle_cast({cluster, stable}, State) ->
+handle_cast(cluster_stable, State) ->
     {noreply, start_listening(State)};
 handle_cast(_Msg, State) ->
     {noreply, State}.

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 02/03: move couch_replication_clustering:owner/3 to mem3.erl

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jan pushed a commit to branch 749-fix-couch_peruser-app-structure
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit e749b7a48f53725bf9b22244aac38ef0714839c4
Author: Jan Lehnardt <ja...@apache.org>
AuthorDate: Sat Oct 7 23:24:14 2017 +0200

    move couch_replication_clustering:owner/3 to mem3.erl
---
 src/couch_replicator/src/couch_replicator.erl            |  2 +-
 src/couch_replicator/src/couch_replicator_clustering.erl | 10 +---------
 src/mem3/src/mem3.erl                                    |  8 +++++++-
 3 files changed, 9 insertions(+), 11 deletions(-)

diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl
index c67b37d..8b7cd5c 100644
--- a/src/couch_replicator/src/couch_replicator.erl
+++ b/src/couch_replicator/src/couch_replicator.erl
@@ -184,7 +184,7 @@ active_doc(DbName, DocId) ->
         Live = [node() | nodes()],
         Nodes = lists:usort([N || #shard{node=N} <- Shards,
             lists:member(N, Live)]),
-        Owner = couch_replicator_clustering:owner(DbName, DocId, Nodes),
+        Owner = mem3:owner(DbName, DocId, Nodes),
         case active_doc_rpc(DbName, DocId, [Owner]) of
             {ok, DocInfo} ->
                 {ok, DocInfo};
diff --git a/src/couch_replicator/src/couch_replicator_clustering.erl b/src/couch_replicator/src/couch_replicator_clustering.erl
index ed01465..3d5229b 100644
--- a/src/couch_replicator/src/couch_replicator_clustering.erl
+++ b/src/couch_replicator/src/couch_replicator_clustering.erl
@@ -45,7 +45,6 @@
 
 -export([
     owner/2,
-    owner/3,
     is_stable/0,
     link_cluster_event_listener/3
 ]).
@@ -96,13 +95,6 @@ owner(_DbName, _DocId) ->
     node().
 
 
-% Direct calculation of node membership. This is the algorithm part. It
-% doesn't read the shard map, just picks owner based on a hash.
--spec owner(binary(), binary(), [node()]) -> node().
-owner(DbName, DocId, Nodes) ->
-    hd(mem3_util:rotate_list({DbName, DocId}, lists:usort(Nodes))).
-
-
 -spec is_stable() -> true | false.
 is_stable() ->
     gen_server:call(?MODULE, is_stable).
@@ -200,4 +192,4 @@ owner_int(ShardName, DocId) ->
     Live = [node() | nodes()],
     Shards = mem3:shards(DbName, DocId),
     Nodes = [N || #shard{node=N} <- Shards, lists:member(N, Live)],
-    owner(DbName, DocId, Nodes).
+    mem3:owner(DbName, DocId, Nodes).
diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
index e2cbb2e..047154a 100644
--- a/src/mem3/src/mem3.erl
+++ b/src/mem3/src/mem3.erl
@@ -19,7 +19,7 @@
 -export([compare_nodelists/0, compare_shards/1]).
 -export([quorum/1, group_by_proximity/1]).
 -export([live_shards/2]).
--export([belongs/2]).
+-export([belongs/2, owner/3]).
 -export([get_placement/1]).
 
 %% For mem3 use only.
@@ -311,6 +311,12 @@ name(#shard{name=Name}) ->
 name(#ordered_shard{name=Name}) ->
     Name.
 
+% Direct calculation of node membership. This is the algorithm part. It
+% doesn't read the shard map, just picks owner based on a hash.
+-spec owner(binary(), binary(), [node()]) -> node().
+owner(DbName, DocId, Nodes) ->
+    hd(mem3_util:rotate_list({DbName, DocId}, lists:usort(Nodes))).
+
 
 -ifdef(TEST).
 

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.