You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by da...@apache.org on 2017/06/16 21:40:27 UTC
[couchdb] 01/03: Remove LRU from ddoc_cache
This is an automated email from the ASF dual-hosted git repository.
davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git
commit 834844601b131716beb343fc504e1d166e8348c2
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Jun 16 13:29:47 2017 -0500
Remove LRU from ddoc_cache
This is only a part of the necessary work to optimize the ddoc cache.
This will not work in production because the ets_lru max_age is not
implemented so once a ddoc is in the cache it won't be removed unless
everything stops using it (which for a busy database will be never
because of the match_newest code).
---
src/ddoc_cache/src/ddoc_cache.app.src | 7 +-
src/ddoc_cache/src/ddoc_cache.erl | 2 +-
src/ddoc_cache/src/ddoc_cache.hrl | 32 +++++
src/ddoc_cache/src/ddoc_cache_lru.erl | 221 +++++++++++++++++++++++++++++++
src/ddoc_cache/src/ddoc_cache_opener.erl | 123 ++++++-----------
src/ddoc_cache/src/ddoc_cache_sup.erl | 35 ++---
src/ddoc_cache/src/ddoc_cache_tables.erl | 64 +++++++++
7 files changed, 370 insertions(+), 114 deletions(-)
diff --git a/src/ddoc_cache/src/ddoc_cache.app.src b/src/ddoc_cache/src/ddoc_cache.app.src
index a64b2f5..084895e 100644
--- a/src/ddoc_cache/src/ddoc_cache.app.src
+++ b/src/ddoc_cache/src/ddoc_cache.app.src
@@ -21,6 +21,7 @@
ddoc_cache_util
]},
{registered, [
+ ddoc_cache_tables,
ddoc_cache_lru,
ddoc_cache_opener
]},
@@ -29,16 +30,10 @@
stdlib,
crypto,
couch_event,
- ets_lru,
mem3,
fabric,
couch_log,
couch_stats
]},
{mod, {ddoc_cache_app, []}},
- {env, [
- {max_objects, unlimited},
- {max_size, 104857600}, % 100M
- {max_lifetime, 60000} % 1m
- ]}
]}.
diff --git a/src/ddoc_cache/src/ddoc_cache.erl b/src/ddoc_cache/src/ddoc_cache.erl
index ed93309..07d89ac 100644
--- a/src/ddoc_cache/src/ddoc_cache.erl
+++ b/src/ddoc_cache/src/ddoc_cache.erl
@@ -91,7 +91,7 @@ open_custom(DbName, Mod) ->
evict(ShardDbName, DDocIds) ->
DbName = mem3:dbname(ShardDbName),
- ddoc_cache_opener:evict_docs(DbName, DDocIds).
+ ddoc_cache_lru:evict(DbName, DDocIds).
open(DbName, validation_funs) ->
open_validation_funs(DbName);
diff --git a/src/ddoc_cache/src/ddoc_cache.hrl b/src/ddoc_cache/src/ddoc_cache.hrl
new file mode 100644
index 0000000..8545914
--- /dev/null
+++ b/src/ddoc_cache/src/ddoc_cache.hrl
@@ -0,0 +1,32 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-type dbname() :: iodata().
+-type docid() :: iodata().
+-type doc_hash() :: <<_:128>>.
+-type revision() :: {pos_integer(), doc_hash()}.
+
+-define(CACHE, ddoc_cache_entries).
+-define(ATIMES, ddoc_cache_atimes).
+-define(OPENERS, ddoc_cache_openers).
+
+
+-record(entry, {
+ key,
+ val
+}).
+
+-record(opener, {
+ key,
+ pid,
+ clients
+}).
diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
new file mode 100644
index 0000000..9d8c397
--- /dev/null
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -0,0 +1,221 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_lru).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+ start_link/0,
+
+ insert/2,
+ accessed/1,
+ evict/1,
+ evict/2
+]).
+
+-export([
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3
+]).
+
+-export([
+ handle_db_event/3
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+-record(st, {
+ keys,
+ time,
+ max_size,
+ evictor
+}).
+
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+insert(Key, Val) ->
+ gen_server:call(?MODULE, {insert, Key}).
+
+
+accessed(Key) ->
+ gen_server:cast(?MODULE, {accessed, Key}).
+
+
+-spec evict(dbname(), [docid()]) -> ok.
+evict(DbName, DDocIds) ->
+ gen_server:cast(?MODULE, {evict, DbName, DDocIds}).
+
+
+init(_) ->
+ {ok, Keys} = khash:new(),
+ {ok, Evictor} = couch_event:link_listener(
+ ?MODULE, handle_db_event, nil, [all_dbs]
+ ),
+ MaxSize = config:get_integer("ddoc_cache", "max_size", 1000),
+ {ok, #st{
+ keys = Keys,
+ time = 0,
+ max_size = MaxSize,
+ evictor = Evictor
+ }}.
+
+
+terminate(_Reason, St) ->
+ case is_pid(St#st.evictor) of
+ true -> exit(St#st.evictor, kill);
+ false -> ok
+ end,
+ ok.
+
+
+handle_call({insert, Key, Val}, _From, St) ->
+ #st{
+ keys = Keys,
+ time = Time
+ } = St,
+ NewTime = Time + 1,
+ true = ets:insert(?CACHE, #entry{key = Key, val = Val}),
+ true = ets:insert(?ATIMES, {NewTime, Key}),
+ ok = khash:put(Keys, NewTime),
+ {reply, ok, trim(St#st{time = NewTime})};
+
+handle_call(Msg, _From, St) ->
+ {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
+
+
+handle_cast({accessed, Key}, _St) ->
+ #st{
+ keys = Keys,
+ time = Time
+ } = St,
+ NewTime = Time + 1,
+ case khash:lookup(Keys, Key) of
+ {value, OldTime} ->
+ true = ets:delete(?ATIMES, OldTime),
+ true = ets:insert(?ATIMES, {NewTime, Key}),
+ ok = khash:put(Keys, NewTime);
+ not_found ->
+ % Likely a client read from the cache while an
+ % eviction message was in our mailbox
+ ok
+ end,
+ {noreply, St};
+
+handle_cast({evict, _} = Msg, St) ->
+ gen_server:abcast(mem3:nodes(), ?MODULE, Msg),
+ {noreply, St};
+
+handle_cast({evict, _, _} = Msg, St) ->
+ gen_server:abcast(mem3:nodes(), ?MODULE, Msg),
+ {noreply, St};
+
+handle_cast({do_evict, DbName} = Msg, St) ->
+ Pattern = #entry{
+ key = {DbName, '$1', '_'},
+ val = '_',
+ _ = '_'
+ },
+ DDocIds = lists:flatten(ets:match(?CACHE, Pattern)),
+ handle_cast({do_evict, DbName, DDocIds});
+
+handle_cast({do_evict, DbName, DDocIds}, St) ->
+ Pattern = #entry{
+ key = {DbName, '$1'},
+ val = '_',
+ _ = '_'
+ },
+ CustomKeys = lists:flatten(ets:match(?CACHE, Pattern)),
+ lists:foreach(fun(Mod) ->
+ ets:delete(?CACHE, {DbName, Mod})
+ end, CustomKeys),
+ lists:foreach(fun(DDocId) ->
+ RevPattern = #entry{
+ key = {DbName, DDocId, '$1'},
+ val = '_',
+ _ = '_'
+ },
+ Revs = lists:flatten(ets:match(?CACHE, RevPattern)),
+ lists:foreach(fun(Rev) ->
+ ets:delete(?CACHE, {DbName, DDocId, Rev})
+ end, Revs)
+ end, DDocIds),
+ {noreply, St};
+
+handle_cast(Msg, St) ->
+ {stop, {invalid_cast, Msg}, St}.
+
+
+handle_info({'EXIT', Pid, Reason}, #st{evictor=Pid}=St) ->
+ couch_log:error("ddoc_cache_opener evictor died ~w", [Reason]),
+ {ok, Evictor} = couch_event:link_listener(
+ ?MODULE, handle_db_event, nil, [all_dbs]
+ ),
+ {noreply, St#st{evictor=Evictor}};
+
+handle_info(Msg, St) ->
+ {stop, {invalid_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+ {ok, St}.
+
+
+handle_db_event(ShardDbName, created, St) ->
+ gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
+ {ok, St};
+
+handle_db_event(ShardDbName, deleted, St) ->
+ gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
+ {ok, St};
+
+handle_db_event(_DbName, _Event, St) ->
+ {ok, St}.
+
+
+trim(St) ->
+ #st{
+ keys = Keys,
+ max_size = MaxSize
+ } = St,
+ case khash:size(Keys) > MaxSize of
+ true ->
+ case ets:first(?ATIMES) of
+ '$end_of_table' ->
+ St;
+ ATime ->
+ trim(remove(St, ATime))
+ end;
+ false ->
+ St
+ end.
+
+
+remove(St, ATime) ->
+ #st{
+ keys = Keys
+ } = St,
+ {value, Key} = khash:lookup(Keys, ATime),
+ true = ets:delete(?CACHE, Key),
+ true = ets:delete(?ATIMES, ATime),
+ ok = khash:del(Keys, Key),
+ St.
diff --git a/src/ddoc_cache/src/ddoc_cache_opener.erl b/src/ddoc_cache/src/ddoc_cache_opener.erl
index b76a228..a4adffc 100644
--- a/src/ddoc_cache/src/ddoc_cache_opener.erl
+++ b/src/ddoc_cache/src/ddoc_cache_opener.erl
@@ -35,7 +35,6 @@
open_doc/2,
open_doc/3,
open_validation_funs/1,
- evict_docs/2,
lookup/1,
match_newest/1,
recover_doc/2,
@@ -43,29 +42,17 @@
recover_validation_funs/1
]).
-export([
- handle_db_event/3
-]).
--export([
fetch_doc_data/1
]).
--define(CACHE, ddoc_cache_lru).
--define(OPENING, ddoc_cache_opening).
+-include("ddoc_cache.hrl").
--type dbname() :: iodata().
--type docid() :: iodata().
--type doc_hash() :: <<_:128>>.
--type revision() :: {pos_integer(), doc_hash()}.
--record(opener, {
- key,
- pid,
- clients
-}).
+-define(LRU, ddoc_cache_lru).
+
-record(st, {
- db_ddocs,
- evictor
+ db_ddocs
}).
start_link() ->
@@ -86,14 +73,12 @@ open_validation_funs(DbName) ->
Resp = gen_server:call(?MODULE, {open, {DbName, validation_funs}}, infinity),
handle_open_response(Resp).
--spec evict_docs(dbname(), [docid()]) -> ok.
-evict_docs(DbName, DocIds) ->
- gen_server:cast(?MODULE, {evict, DbName, DocIds}).
lookup(Key) ->
- try ets_lru:lookup_d(?CACHE, Key) of
- {ok, _} = Resp ->
- Resp;
+ try ets:lookup(?CACHE, Key) of
+ [#entry{key = Key, val = Val}] ->
+ ddoc_cache_lru:accessed(Key),
+ {ok, Val};
_ ->
missing
catch
@@ -102,10 +87,19 @@ lookup(Key) ->
end.
match_newest(Key) ->
- try ets_lru:match_object(?CACHE, Key, '_') of
+ Pattern = #entry{
+ key = Key,
+ val = '_',
+ _ = '_'
+ },
+ try ets:match_object(?CACHE, Pattern) of
[] ->
missing;
- Docs ->
+ Entries ->
+ Docs = lists:map(fun(#entry{key = K, val = V}) ->
+ ddoc_cache_lru:accessed(K),
+ V
+ end, Entries),
Sorted = lists:sort(
fun (#doc{deleted=DelL, revs=L}, #doc{deleted=DelR, revs=R}) ->
{not DelL, L} > {not DelR, R}
@@ -133,40 +127,22 @@ recover_validation_funs(DbName) ->
end, DDocs),
{ok, Funs}.
-handle_db_event(ShardDbName, created, St) ->
- gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
- {ok, St};
-handle_db_event(ShardDbName, deleted, St) ->
- gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
- {ok, St};
-handle_db_event(_DbName, _Event, St) ->
- {ok, St}.
init(_) ->
process_flag(trap_exit, true),
- _ = ets:new(?OPENING, [set, protected, named_table, {keypos, #opener.key}]),
- {ok, Evictor} = couch_event:link_listener(
- ?MODULE, handle_db_event, nil, [all_dbs]
- ),
- {ok, #st{
- evictor = Evictor
- }}.
-
-terminate(_Reason, St) ->
- case is_pid(St#st.evictor) of
- true -> exit(St#st.evictor, kill);
- false -> ok
- end,
+ {ok, #st{}}.
+
+terminate(_Reason, _St) ->
ok.
handle_call({open, OpenerKey}, From, St) ->
- case ets:lookup(?OPENING, OpenerKey) of
+ case ets:lookup(?OPENERS, OpenerKey) of
[#opener{clients=Clients}=O] ->
- ets:insert(?OPENING, O#opener{clients=[From | Clients]}),
+ ets:insert(?OPENERS, O#opener{clients=[From | Clients]}),
{noreply, St};
[] ->
Pid = spawn_link(?MODULE, fetch_doc_data, [OpenerKey]),
- ets:insert(?OPENING, #opener{key=OpenerKey, pid=Pid, clients=[From]}),
+ ets:insert(?OPENERS, #opener{key=OpenerKey, pid=Pid, clients=[From]}),
{noreply, St}
end;
@@ -174,38 +150,19 @@ handle_call(Msg, _From, St) ->
{stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-handle_cast({evict, DbName}, St) ->
- gen_server:abcast(mem3:nodes(), ?MODULE, {do_evict, DbName}),
- {noreply, St};
-
-handle_cast({evict, DbName, DDocIds}, St) ->
- gen_server:abcast(mem3:nodes(), ?MODULE, {do_evict, DbName, DDocIds}),
+% The do_evict clauses are upgrades while we're
+% in a rolling reboot.
+handle_cast({do_evict, _} = Msg, St) ->
+ gen_server:cast(?LRU, Msg),
{noreply, St};
-handle_cast({do_evict, DbName}, St) ->
- DDocIds = lists:flatten(ets_lru:match(?CACHE, {DbName, '$1', '_'}, '_')),
- handle_cast({do_evict, DbName, DDocIds}, St);
-
-handle_cast({do_evict, DbName, DDocIds}, St) ->
- CustomKeys = lists:flatten(ets_lru:match(?CACHE, {DbName, '$1'}, '_')),
- lists:foreach(fun(Mod) ->
- ets_lru:remove(?CACHE, {DbName, Mod})
- end, CustomKeys),
- lists:foreach(fun(DDocId) ->
- Revs = ets_lru:match(?CACHE, {DbName, DDocId, '$1'}, '_'),
- lists:foreach(fun([Rev]) ->
- ets_lru:remove(?CACHE, {DbName, DDocId, Rev})
- end, Revs)
- end, DDocIds),
+handle_cast({do_evict, _, _}, St) ->
+ gen_server:cast(?LRU, Msg)
{noreply, St};
handle_cast(Msg, St) ->
{stop, {invalid_cast, Msg}, St}.
-handle_info({'EXIT', Pid, Reason}, #st{evictor=Pid}=St) ->
- couch_log:error("ddoc_cache_opener evictor died ~w", [Reason]),
- {ok, Evictor} = couch_event:link_listener(?MODULE, handle_db_event, nil, [all_dbs]),
- {noreply, St#st{evictor=Evictor}};
handle_info({'EXIT', _Pid, {open_ok, OpenerKey, Resp}}, St) ->
respond(OpenerKey, {open_ok, Resp}),
@@ -217,10 +174,10 @@ handle_info({'EXIT', _Pid, {open_error, OpenerKey, Type, Error}}, St) ->
handle_info({'EXIT', Pid, Reason}, St) ->
Pattern = #opener{pid=Pid, _='_'},
- case ets:match_object(?OPENING, Pattern) of
+ case ets:match_object(?OPENERS, Pattern) of
[#opener{key=OpenerKey, clients=Clients}] ->
- _ = [gen_server:reply(C, {error, Reason}) || C <- Clients],
- ets:delete(?OPENING, OpenerKey),
+ [gen_server:reply(C, {error, Reason}) || C <- Clients],
+ ets:delete(?OPENERS, OpenerKey),
{noreply, St};
[] ->
{stop, {unknown_pid_died, {Pid, Reason}}, St}
@@ -238,14 +195,14 @@ code_change(_OldVsn, State, _Extra) ->
({dbname(), docid(), revision()}) -> no_return().
fetch_doc_data({DbName, validation_funs}=OpenerKey) ->
{ok, Funs} = recover_validation_funs(DbName),
- ok = ets_lru:insert(?CACHE, OpenerKey, Funs),
+ ok = ddoc_cache_lru:insert(OpenerKey, Funs),
exit({open_ok, OpenerKey, {ok, Funs}});
fetch_doc_data({DbName, Mod}=OpenerKey) when is_atom(Mod) ->
% This is not actually a docid but rather a custom cache key.
% Treat the argument as a code module and invoke its recover function.
try Mod:recover(DbName) of
{ok, Result} ->
- ok = ets_lru:insert(?CACHE, OpenerKey, Result),
+ ok = ddoc_cache_lru:insert(OpenerKey, Result),
exit({open_ok, OpenerKey, {ok, Result}});
Else ->
exit({open_ok, OpenerKey, Else})
@@ -258,7 +215,7 @@ fetch_doc_data({DbName, DocId}=OpenerKey) ->
{ok, Doc} ->
{RevDepth, [RevHash| _]} = Doc#doc.revs,
Rev = {RevDepth, RevHash},
- ok = ets_lru:insert(?CACHE, {DbName, DocId, Rev}, Doc),
+ ok = ddoc_cache_lru:insert({DbName, DocId, Rev}, Doc),
exit({open_ok, OpenerKey, {ok, Doc}});
Else ->
exit({open_ok, OpenerKey, Else})
@@ -269,7 +226,7 @@ fetch_doc_data({DbName, DocId}=OpenerKey) ->
fetch_doc_data({DbName, DocId, Rev}=OpenerKey) ->
try recover_doc(DbName, DocId, Rev) of
{ok, Doc} ->
- ok = ets_lru:insert(?CACHE, {DbName, DocId, Rev}, Doc),
+ ok = ddoc_cache_lru:insert({DbName, DocId, Rev}, Doc),
exit({open_ok, OpenerKey, {ok, Doc}});
Else ->
exit({open_ok, OpenerKey, Else})
@@ -287,6 +244,6 @@ handle_open_response(Resp) ->
end.
respond(OpenerKey, Resp) ->
- [#opener{clients=Clients}] = ets:lookup(?OPENING, OpenerKey),
+ [#opener{clients=Clients}] = ets:lookup(?OPENERS, OpenerKey),
_ = [gen_server:reply(C, Resp) || C <- Clients],
- ets:delete(?OPENING, OpenerKey).
+ ets:delete(?OPENERS, OpenerKey).
diff --git a/src/ddoc_cache/src/ddoc_cache_sup.erl b/src/ddoc_cache/src/ddoc_cache_sup.erl
index 85e90b3..ddb1232 100644
--- a/src/ddoc_cache/src/ddoc_cache_sup.erl
+++ b/src/ddoc_cache/src/ddoc_cache_sup.erl
@@ -27,12 +27,20 @@ start_link() ->
init([]) ->
Children = [
{
+ ddoc_cache_tables,
+ {ddoc_cache_tables, start_link, []},
+ permanent,
+ 5000,
+ worker,
+ [ddoc_cache_tables]
+ },
+ {
ddoc_cache_lru,
- {ets_lru, start_link, [ddoc_cache_lru, lru_opts()]},
+ {ddoc_cache_lru, start_link, []},
permanent,
5000,
worker,
- [ets_lru]
+ [ddoc_cache_lru]
},
{
ddoc_cache_opener,
@@ -43,25 +51,4 @@ init([]) ->
[ddoc_cache_opener]
}
],
- {ok, {{one_for_one, 5, 10}, Children}}.
-
-
-lru_opts() ->
- case application:get_env(ddoc_cache, max_objects) of
- {ok, MxObjs} when is_integer(MxObjs), MxObjs >= 0 ->
- [{max_objects, MxObjs}];
- _ ->
- []
- end ++
- case application:get_env(ddoc_cache, max_size) of
- {ok, MxSize} when is_integer(MxSize), MxSize >= 0 ->
- [{max_size, MxSize}];
- _ ->
- []
- end ++
- case application:get_env(ddoc_cache, max_lifetime) of
- {ok, MxLT} when is_integer(MxLT), MxLT >= 0 ->
- [{max_lifetime, MxLT}];
- _ ->
- []
- end.
+ {ok, {{one_for_all, 5, 10}, Children}}.
diff --git a/src/ddoc_cache/src/ddoc_cache_tables.erl b/src/ddoc_cache/src/ddoc_cache_tables.erl
new file mode 100644
index 0000000..9b35943
--- /dev/null
+++ b/src/ddoc_cache/src/ddoc_cache_tables.erl
@@ -0,0 +1,64 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_tables).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+ start_link/0
+]).
+
+-export([
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+init(_) ->
+ BaseOpts = [public, named_table],
+ ets:new(?CACHE, [set, {read_concurrency, true}] ++ BaseOpts),
+ ets:new(?ATIMES, [sorted_set] ++ BaseOpts),
+ ets:new(?OPENING, [set, {keypos, #opener.key}] ++ BaseOpts),
+ {ok, nil}.
+
+
+terminate(_Reason, _St) ->
+ ok.
+
+
+handle_call(Msg, _From, St) ->
+ {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+ {stop, {invalid_cast, Msg}, St}.
+
+
+handle_info(Msg, St) ->
+ {stop, {invalid_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+ {ok, St}.
--
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.