You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by da...@apache.org on 2017/07/10 18:29:23 UTC

[couchdb] branch optimize-ddoc-cache updated (66dd4f0 -> 62bfbf4)

This is an automated email from the ASF dual-hosted git repository.

davisp pushed a change to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git.


    omit 66dd4f0  FIXUP: Don't manually track cache size
    omit 5238348  FIXUP: Ignore unused variable
    omit 7c376bd  FIXUP: Remember to clear out the waiters list
    omit 9151fb5  FIXUP: Make the refresh timeout configurable
    omit 85b5385  FIXUP: Reuse fabric lookup results
    omit 0e9aadc  FIXUP: Comment on use of no_ddocid
    omit 32cd254  FIXUP: Evict unused entries
    omit 8e5b15f  Remove debug logging
    omit 413322b  FIXUP: Automatically detect coverage runs
    omit 70e9c6e  FIXUP: Add tests for no VDU behavior
    omit e9714e8  FIXUP: Re-enable code purging
    omit 5417c56  FIXUP: Add gen_server behavior
    omit 1fa3851  TMP: Simple benchmark script
    omit 409e71c  Rewrite ddoc_cache to improve performance
    omit d7d1f8b  Remove duplicated eviction messages
     add 06772ad  Update CONTRIBUTING.md to reflect monorepo
     add a5b8795  Merge pull request #625 from apache/contributing-md-update
     add 409ea97  Also enable node decom using string "true"
     add 7589340  bump docs dep
     add 736e732  Fix mismatch between MAX_DBS_OPEN and default.ini
     add dbe6cf7  Fix eunit timeout option for compression tests
     add 7f54516  Remove yet another invalid test case (ddoc_cache related)
     add 858088e  Make map functions distinct with each invocation
     add cc42357  feat: enable compaction daemon by default
     add 8b127f4  test: expect compaction daemon to be off
     add ff32a15  Fix couch_replicator_changes_reader:process_change
     add 5b329ac  Merge pull request #648 from cloudant/fixup-for-fix-replicator-progress-reporting-2
     add 3b23d21  Remove some sleeps from change feed test.
     add add912a  Bump jiffy dep
     add a461c44  really disable compaction daemon for JS tests
     add 7885d80  wipe all databases on test exit
     add a1b5e13  disable unstable stats.js test
     add 3195578  Pass db open options to fabric_view_map
     add 3e4da60  Merge pull request #645 from cloudant/pass-user_ctx-in-fabric_view_map
     add 83f085d  Fix Windows build
     add da7aa54  Disable compaction daemon on eunit run couch startups
     add 13a6e1f  Merge branch 'master' of https://github.com/apache/couchdb
     add 017d76f  Fix Windows release builds
     add 34b803a  Remove deprecated OAuth 1.0 implementation
     new 8e13441  Remove duplicated eviction messages
     new b879a4b  Rewrite ddoc_cache to improve performance
     new 123d875  TMP: Simple benchmark script
     new 738610a  FIXUP: Add gen_server behavior
     new 4c5bc14  FIXUP: Re-enable code purging
     new bb6a651  FIXUP: Add tests for no VDU behavior
     new 5d871fb  FIXUP: Automatically detect coverage runs
     new 6fd34e3  Remove debug logging
     new f1fa94e  FIXUP: Evict unused entries
     new a54baa2  FIXUP: Comment on use of no_ddocid
     new a10cff2  FIXUP: Reuse fabric lookup results
     new cd3687a  FIXUP: Make the refresh timeout configurable
     new 9fbb086  FIXUP: Remember to clear out the waiters list
     new 0743e3f  FIXUP: Ignore unused variable
     new 66f03d2  FIXUP: Don't manually track cache size
     new 69dcaa4  FIXUP: Crash the LRU if its evictor dies
     new 62bfbf4  FIXUP: Remove unnecessary catch

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (66dd4f0)
            \
             N -- N -- N   refs/heads/optimize-ddoc-cache (62bfbf4)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 17 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 CONTRIBUTING.md                                    |   8 +-
 LICENSE                                            |  25 -
 Makefile.win                                       |   8 +-
 NOTICE                                             |   4 -
 build-aux/print-committerlist.sh                   |   2 +-
 configure.ps1                                      |   2 +-
 dev/run                                            |   4 +-
 license.skip                                       |   2 -
 rebar.config.script                                |   8 +-
 rel/overlay/etc/default.ini                        |  31 +-
 rel/reltool.config                                 |   2 -
 src/chttpd/src/chttpd_show.erl                     |   3 +-
 src/chttpd/src/chttpd_view.erl                     |  10 +-
 src/couch/include/couch_js_functions.hrl           |  18 -
 src/couch/src/couch.app.src                        |   1 -
 src/couch/src/couch.erl                            |   1 -
 src/couch/src/couch_httpd_handlers.erl             |   1 -
 src/couch/src/couch_httpd_oauth.erl                | 391 ----------------
 src/couch/src/couch_server.erl                     |   2 +-
 src/couch/src/test_util.erl                        |   2 +-
 src/couch/test/chttpd_endpoints_tests.erl          |   1 -
 src/couch/test/couch_changes_tests.erl             |  37 +-
 src/couch/test/couchdb_auth_tests.erl              |   2 +-
 src/couch/test/couchdb_compaction_daemon_tests.erl |   5 +-
 src/couch/test/couchdb_file_compression_tests.erl  |  26 +-
 src/couch/test/couchdb_vhosts_tests.erl            | 139 ------
 src/couch/test/fixtures/os_daemon_configer.escript |   1 -
 .../src/couch_replicator_changes_reader.erl        |   2 +-
 src/ddoc_cache/src/ddoc_cache_lru.erl              |  11 +-
 src/ddoc_cache/test/ddoc_cache_coverage_test.erl   |  18 +-
 src/fabric/src/fabric.erl                          |  23 +-
 src/fabric/src/fabric_view_map.erl                 |  11 +-
 src/mem3/src/mem3.erl                              |  35 +-
 test/javascript/couch.js                           |   3 +
 test/javascript/oauth.js                           | 511 ---------------------
 test/javascript/run                                |   1 -
 test/javascript/tests/config.js                    |   3 -
 test/javascript/tests/delayed_commits.js           |   6 +
 test/javascript/tests/design_docs.js               |  16 -
 test/javascript/tests/oauth_users_db.js            | 168 -------
 test/javascript/tests/proxyauth.js                 |   2 +
 test/javascript/tests/reader_acl.js                |   7 +-
 test/javascript/tests/replication.js               |   3 -
 test/javascript/tests/replicator_db_security.js    |  28 --
 test/javascript/tests/rev_stemming.js              |   2 +
 test/javascript/tests/rewrite.js                   |   5 +-
 test/javascript/tests/rewrite_js.js                |   3 +
 test/javascript/tests/security_validation.js       |   5 +
 test/javascript/tests/stats.js                     |  25 +
 49 files changed, 207 insertions(+), 1417 deletions(-)
 delete mode 100644 src/couch/src/couch_httpd_oauth.erl
 delete mode 100644 test/javascript/oauth.js
 delete mode 100644 test/javascript/tests/oauth_users_db.js

-- 
To stop receiving notification emails like this one, please contact
['"commits@couchdb.apache.org" <co...@couchdb.apache.org>'].

[couchdb] 07/17: FIXUP: Automatically detect coverage runs

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 5d871fb39c38f1d88af8a6df86c6bc9949ef4a9c
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jul 6 11:08:34 2017 -0500

    FIXUP: Automatically detect coverage runs
---
 src/ddoc_cache/test/ddoc_cache_tutil.erl | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/src/ddoc_cache/test/ddoc_cache_tutil.erl b/src/ddoc_cache/test/ddoc_cache_tutil.erl
index acf3db1..6463b38 100644
--- a/src/ddoc_cache/test/ddoc_cache_tutil.erl
+++ b/src/ddoc_cache/test/ddoc_cache_tutil.erl
@@ -83,8 +83,13 @@ purge_modules() ->
     case application:get_key(ddoc_cache, modules) of
         {ok, Mods} ->
             lists:foreach(fun(Mod) ->
-                code:delete(Mod),
-                code:purge(Mod)
+                case code:which(Mod) of
+                    cover_compiled ->
+                        ok;
+                    _ ->
+                        code:delete(Mod),
+                        code:purge(Mod)
+                end
             end, Mods);
         undefined ->
             ok

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 11/17: FIXUP: Reuse fabric lookup results

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit a10cff2cd8ddb3380fa78adf2f50e0c67c7ce919
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jul 6 13:48:15 2017 -0500

    FIXUP: Reuse fabric lookup results
    
    On suggestion from @chewbranca I've gone ahead and made the optimization
    to insert the revid or non-revid specific version of the request design
    document.
---
 src/ddoc_cache/src/ddoc_cache_entry.erl            | 33 +++++++++++++++---
 src/ddoc_cache/src/ddoc_cache_entry_custom.erl     |  7 +++-
 src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl     | 13 +++++++-
 src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl | 12 ++++++-
 .../src/ddoc_cache_entry_validation_funs.erl       |  7 +++-
 src/ddoc_cache/src/ddoc_cache_lru.erl              | 30 ++++++++++++-----
 src/ddoc_cache/test/ddoc_cache_basic_test.erl      | 39 +++++++++++++++++-----
 src/ddoc_cache/test/ddoc_cache_entry_test.erl      |  6 ++--
 src/ddoc_cache/test/ddoc_cache_eviction_test.erl   |  6 ++--
 src/ddoc_cache/test/ddoc_cache_no_cache_test.erl   |  1 +
 src/ddoc_cache/test/ddoc_cache_open_error_test.erl |  2 +-
 src/ddoc_cache/test/ddoc_cache_refresh_test.erl    | 17 +++++++---
 src/ddoc_cache/test/ddoc_cache_remove_test.erl     | 30 +++++++++++++----
 13 files changed, 161 insertions(+), 42 deletions(-)

diff --git a/src/ddoc_cache/src/ddoc_cache_entry.erl b/src/ddoc_cache/src/ddoc_cache_entry.erl
index 914e32e..79c3dcf 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry.erl
@@ -18,8 +18,9 @@
     dbname/1,
     ddocid/1,
     recover/1,
+    insert/2,
 
-    start_link/1,
+    start_link/2,
     shutdown/1,
     open/2,
     accessed/1,
@@ -65,8 +66,12 @@ recover({Mod, Arg}) ->
     Mod:recover(Arg).
 
 
-start_link(Key) ->
-    Pid = proc_lib:spawn_link(?MODULE, init, [Key]),
+insert({Mod, Arg}, Value) ->
+    Mod:insert(Arg, Value).
+
+
+start_link(Key, Default) ->
+    Pid = proc_lib:spawn_link(?MODULE, init, [{Key, Default}]),
     {ok, Pid}.
 
 
@@ -99,7 +104,7 @@ refresh(Pid) ->
     gen_server:cast(Pid, force_refresh).
 
 
-init(Key) ->
+init({Key, undefined}) ->
     true = ets:update_element(?CACHE, Key, {#entry.pid, self()}),
     St = #st{
         key = Key,
@@ -108,6 +113,26 @@ init(Key) ->
         accessed = 1
     },
     ?EVENT(started, Key),
+    gen_server:enter_loop(?MODULE, [], St);
+
+init({Key, Default}) ->
+    Updates = [
+        {#entry.val, Default},
+        {#entry.pid, self()}
+    ],
+    NewTs = os:timestamp(),
+    true = ets:update_element(?CACHE, Key, Updates),
+    true = ets:insert(?LRU, {{NewTs, Key, self()}}),
+    Msg = {'$gen_cast', refresh},
+    St = #st{
+        key = Key,
+        val = {open_ok, {ok, Default}},
+        opener = erlang:send_after(?REFRESH_TIMEOUT, self(), Msg),
+        waiters = undefined,
+        ts = NewTs,
+        accessed = 1
+    },
+    ?EVENT(default_started, Key),
     gen_server:enter_loop(?MODULE, [], St).
 
 
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_custom.erl b/src/ddoc_cache/src/ddoc_cache_entry_custom.erl
index d858ad6..9eaf16f 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry_custom.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_custom.erl
@@ -16,7 +16,8 @@
 -export([
     dbname/1,
     ddocid/1,
-    recover/1
+    recover/1,
+    insert/2
 ]).
 
 
@@ -30,3 +31,7 @@ ddocid(_) ->
 
 recover({DbName, Mod}) ->
     Mod:recover(DbName).
+
+
+insert(_, _) ->
+    ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
index cac9abc..5248469 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
@@ -16,7 +16,8 @@
 -export([
     dbname/1,
     ddocid/1,
-    recover/1
+    recover/1,
+    insert/2
 ]).
 
 
@@ -33,3 +34,13 @@ ddocid({_, DDocId}) ->
 
 recover({DbName, DDocId}) ->
     fabric:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]).
+
+
+insert({DbName, DDocId}, {ok, #doc{revs = Revs} = DDoc}) ->
+    {Depth, [RevId | _]} = Revs,
+    Rev = {Depth, RevId},
+    Key = {ddoc_cache_entry_ddocid_rev, {DbName, DDocId, Rev}},
+    spawn(fun() -> ddoc_cache_lru:insert(Key, DDoc) end);
+
+insert(_, _) ->
+    ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
index 012abab..868fa77 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
@@ -16,7 +16,8 @@
 -export([
     dbname/1,
     ddocid/1,
-    recover/1
+    recover/1,
+    insert/2
 ]).
 
 
@@ -35,3 +36,12 @@ recover({DbName, DDocId, Rev}) ->
     Opts = [ejson_body, ?ADMIN_CTX],
     {ok, [Resp]} = fabric:open_revs(DbName, DDocId, [Rev], Opts),
     Resp.
+
+
+insert({DbName, DDocId, _Rev}, {ok, #doc{} = DDoc}) ->
+    Key = {ddoc_cache_entry_ddocid, {DbName, DDocId}},
+    spawn(fun() -> ddoc_cache_lru:insert(Key, DDoc) end);
+
+insert(_, _) ->
+    ok.
+
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl b/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
index 3d43f7a..2182dea 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
@@ -16,7 +16,8 @@
 -export([
     dbname/1,
     ddocid/1,
-    recover/1
+    recover/1,
+    insert/2
 ]).
 
 
@@ -37,3 +38,7 @@ recover(DbName) ->
         end
     end, DDocs),
     {ok, Funs}.
+
+
+insert(_, _) ->
+    ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
index cbe481e..6ae4de4 100644
--- a/src/ddoc_cache/src/ddoc_cache_lru.erl
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -18,6 +18,7 @@
 -export([
     start_link/0,
     open/1,
+    insert/2,
     refresh/2
 ]).
 
@@ -53,9 +54,9 @@ start_link() ->
 open(Key) ->
     try ets:lookup(?CACHE, Key) of
         [] ->
-            lru_start(Key);
+            lru_start(Key, true);
         [#entry{pid = undefined}] ->
-            lru_start(Key);
+            lru_start(Key, false);
         [#entry{val = undefined, pid = Pid}] ->
             couch_stats:increment_counter([ddoc_cache, miss]),
             ddoc_cache_entry:open(Pid, Key);
@@ -69,6 +70,15 @@ open(Key) ->
     end.
 
 
+insert(Key, Value) ->
+    case ets:lookup(?CACHE, Key) of
+        [] ->
+            gen_server:call(?MODULE, {start, Key, Value}, infinity);
+        [#entry{}] ->
+            ok
+    end.
+
+
 refresh(DbName, DDocIds) ->
     gen_server:cast(?MODULE, {refresh, DbName, DDocIds}).
 
@@ -96,7 +106,7 @@ terminate(_Reason, St) ->
     ok.
 
 
-handle_call({start, Key}, _From, St) ->
+handle_call({start, Key, Default}, _From, St) ->
     #st{
         pids = Pids,
         dbs = Dbs,
@@ -108,7 +118,7 @@ handle_call({start, Key}, _From, St) ->
             case trim(St, CurSize, max(0, MaxSize)) of
                 {ok, N} ->
                     true = ets:insert_new(?CACHE, #entry{key = Key}),
-                    {ok, Pid} = ddoc_cache_entry:start_link(Key),
+                    {ok, Pid} = ddoc_cache_entry:start_link(Key, Default),
                     true = ets:update_element(?CACHE, Key, {#entry.pid, Pid}),
                     ok = khash:put(Pids, Pid, Key),
                     store_key(Dbs, Key, Pid),
@@ -167,7 +177,7 @@ handle_cast({do_refresh, DbName, DDocIdList}, St) ->
             lists:foreach(fun(DDocId) ->
                 case khash:lookup(DDocIds, DDocId) of
                     {value, Keys} ->
-                        khash:fold(Keys, fun(_, Pid, _) ->
+                        khash:fold(Keys, fun(Key, Pid, _) ->
                             ddoc_cache_entry:refresh(Pid)
                         end, nil);
                     not_found ->
@@ -222,11 +232,15 @@ handle_db_event(_DbName, _Event, St) ->
     {ok, St}.
 
 
-lru_start(Key) ->
-    case gen_server:call(?MODULE, {start, Key}, infinity) of
+lru_start(Key, DoInsert) ->
+    case gen_server:call(?MODULE, {start, Key, undefined}, infinity) of
         {ok, Pid} ->
             couch_stats:increment_counter([ddoc_cache, miss]),
-            ddoc_cache_entry:open(Pid, Key);
+            Resp = ddoc_cache_entry:open(Pid, Key),
+            if not DoInsert -> ok; true ->
+                ddoc_cache_entry:insert(Key, Resp)
+            end,
+            Resp;
         full ->
             couch_stats:increment_counter([ddoc_cache, recovery]),
             ddoc_cache_entry:recover(Key)
diff --git a/src/ddoc_cache/test/ddoc_cache_basic_test.erl b/src/ddoc_cache/test/ddoc_cache_basic_test.erl
index f908c78..7f6dbc9 100644
--- a/src/ddoc_cache/test/ddoc_cache_basic_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_basic_test.erl
@@ -27,11 +27,22 @@ recover(DbName) ->
     {ok, {DbName, totes_custom}}.
 
 
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:new(ddoc_cache_ev, [passthrough]),
+    Ctx.
+
+
+stop_couch(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
 check_basic_test_() ->
     {
         setup,
-        fun ddoc_cache_tutil:start_couch/0,
-        fun ddoc_cache_tutil:stop_couch/1,
+        fun start_couch/0,
+        fun stop_couch/1,
         {with, [
             fun cache_ddoc/1,
             fun cache_ddoc_rev/1,
@@ -58,25 +69,31 @@ check_no_vdu_test_() ->
 
 cache_ddoc({DbName, _}) ->
     ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
     ?assertEqual(0, ets:info(?CACHE, size)),
     Resp1 = ddoc_cache:open_doc(DbName, ?FOOBAR),
     ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp1),
-    ?assertEqual(1, ets:info(?CACHE, size)),
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+    ?assertEqual(2, ets:info(?CACHE, size)),
     Resp2 = ddoc_cache:open_doc(DbName, ?FOOBAR),
     ?assertEqual(Resp1, Resp2),
-    ?assertEqual(1, ets:info(?CACHE, size)).
+    ?assertEqual(2, ets:info(?CACHE, size)).
 
 
 cache_ddoc_rev({DbName, _}) ->
     ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
     Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
     ?assertEqual(0, ets:info(?CACHE, size)),
     Resp1 = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
     ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp1),
-    ?assertEqual(1, ets:info(?CACHE, size)),
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+    ?assertEqual(2, ets:info(?CACHE, size)),
     Resp2 = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
     ?assertEqual(Resp1, Resp2),
-    ?assertEqual(1, ets:info(?CACHE, size)),
+    ?assertEqual(2, ets:info(?CACHE, size)),
 
     % Assert that the non-rev cache entry is separate
     Resp3 = ddoc_cache:open_doc(DbName, ?FOOBAR),
@@ -108,12 +125,16 @@ cache_custom({DbName, _}) ->
 
 cache_ddoc_refresher_unchanged({DbName, _}) ->
     ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
     ?assertEqual(0, ets:info(?CACHE, size)),
     ddoc_cache:open_doc(DbName, ?FOOBAR),
-    [Entry1] = ets:lookup(?CACHE, ets:first(?CACHE)),
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+    Tab1 = [_, _] = lists:sort(ets:tab2list(?CACHE)),
     ddoc_cache:open_doc(DbName, ?FOOBAR),
-    [Entry2] = ets:lookup(?CACHE, ets:first(?CACHE)),
-    ?assertEqual(Entry1, Entry2).
+    meck:wait(ddoc_cache_ev, event, [accessed, '_'], 1000),
+    Tab2 = lists:sort(ets:tab2list(?CACHE)),
+    ?assertEqual(Tab2, Tab1).
 
 
 dont_cache_not_found({DbName, _}) ->
diff --git a/src/ddoc_cache/test/ddoc_cache_entry_test.erl b/src/ddoc_cache/test/ddoc_cache_entry_test.erl
index e593bf7..381185c 100644
--- a/src/ddoc_cache/test/ddoc_cache_entry_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_entry_test.erl
@@ -61,7 +61,7 @@ check_entry_test_() ->
 cancel_and_replace_opener(_) ->
     Key = {ddoc_cache_entry_custom, {<<"foo">>, ?MODULE}},
     true = ets:insert_new(?CACHE, #entry{key = Key}),
-    {ok, Entry} = ddoc_cache_entry:start_link(Key),
+    {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined),
     Opener1 = element(4, sys:get_state(Entry)),
     Ref1 = erlang:monitor(process, Opener1),
     gen_server:cast(Entry, force_refresh),
@@ -78,7 +78,7 @@ condenses_access_messages({DbName, _}) ->
     meck:reset(ddoc_cache_ev),
     Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}},
     true = ets:insert(?CACHE, #entry{key = Key}),
-    {ok, Entry} = ddoc_cache_entry:start_link(Key),
+    {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined),
     erlang:suspend_process(Entry),
     lists:foreach(fun(_) ->
         gen_server:cast(Entry, accessed)
@@ -105,7 +105,7 @@ evict_when_not_accessed(_) ->
     meck:reset(ddoc_cache_ev),
     Key = {ddoc_cache_entry_custom, {<<"bar">>, ?MODULE}},
     true = ets:insert_new(?CACHE, #entry{key = Key}),
-    {ok, Entry} = ddoc_cache_entry:start_link(Key),
+    {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined),
     Ref = erlang:monitor(process, Entry),
     ?assertEqual(1, element(7, sys:get_state(Entry))),
     ok = gen_server:cast(Entry, refresh),
diff --git a/src/ddoc_cache/test/ddoc_cache_eviction_test.erl b/src/ddoc_cache/test/ddoc_cache_eviction_test.erl
index 0b9f57b..30b4fb7 100644
--- a/src/ddoc_cache/test/ddoc_cache_eviction_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_eviction_test.erl
@@ -86,8 +86,10 @@ check_upgrade_clause({DbName, _}) ->
     ddoc_cache_tutil:clear(),
     meck:reset(ddoc_cache_ev),
     {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
-    ?assertEqual(1, ets:info(?CACHE, size)),
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+    ?assertEqual(2, ets:info(?CACHE, size)),
     gen_server:cast(ddoc_cache_opener, {do_evict, DbName}),
     meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000),
-    meck:wait(ddoc_cache_ev, event, [removed, '_'], 1000),
+    meck:wait(2, ddoc_cache_ev, event, [removed, '_'], 1000),
     ?assertEqual(0, ets:info(?CACHE, size)).
diff --git a/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl b/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl
index a5a5751..637a6e8 100644
--- a/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl
@@ -20,6 +20,7 @@
 ddoc(DDocId) ->
     {ok, #doc{
         id = DDocId,
+        revs = {1, [<<"deadbeefdeadbeef">>]},
         body = {[
             {<<"ohai">>, null}
         ]}
diff --git a/src/ddoc_cache/test/ddoc_cache_open_error_test.erl b/src/ddoc_cache/test/ddoc_cache_open_error_test.erl
index 0ac2390..f3a9b10 100644
--- a/src/ddoc_cache/test/ddoc_cache_open_error_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_open_error_test.erl
@@ -31,7 +31,7 @@ stop_couch(Ctx) ->
     ddoc_cache_tutil:stop_couch(Ctx).
 
 
-check_basic_test_() ->
+check_open_error_test_() ->
     {
         setup,
         fun start_couch/0,
diff --git a/src/ddoc_cache/test/ddoc_cache_refresh_test.erl b/src/ddoc_cache/test/ddoc_cache_refresh_test.erl
index 7bc1704..f145987 100644
--- a/src/ddoc_cache/test/ddoc_cache_refresh_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_refresh_test.erl
@@ -58,8 +58,11 @@ refresh_ddoc({DbName, _}) ->
     ddoc_cache_tutil:clear(),
     meck:reset(ddoc_cache_ev),
     {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
-    ?assertEqual(1, ets:info(?CACHE, size)),
-    [#entry{key = Key, val = DDoc}] = ets:tab2list(?CACHE),
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+
+    ?assertEqual(2, ets:info(?CACHE, size)),
+    [#entry{key = Key, val = DDoc}, _] = lists:sort(ets:tab2list(?CACHE)),
     NewDDoc = DDoc#doc{
         body = {[{<<"foo">>, <<"baz">>}]}
     },
@@ -69,7 +72,7 @@ refresh_ddoc({DbName, _}) ->
     },
     meck:wait(ddoc_cache_ev, event, [updated, {Key, Expect}], 1000),
     ?assertMatch({ok, Expect}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
-    ?assertEqual(1, ets:info(?CACHE, size)).
+    ?assertEqual(2, ets:info(?CACHE, size)).
 
 
 refresh_ddoc_rev({DbName, _}) ->
@@ -77,7 +80,11 @@ refresh_ddoc_rev({DbName, _}) ->
     meck:reset(ddoc_cache_ev),
     Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
     {ok, RevDDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
-    [#entry{key = Key, val = DDoc}] = ets:tab2list(?CACHE),
+
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+
+    [_, #entry{key = Key, val = DDoc}] = lists:sort(ets:tab2list(?CACHE)),
     NewDDoc = DDoc#doc{
         body = {[{<<"foo">>, <<"kazam">>}]}
     },
@@ -86,7 +93,7 @@ refresh_ddoc_rev({DbName, _}) ->
     % getting the same original response from the cache
     meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000),
     ?assertMatch({ok, RevDDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)),
-    ?assertEqual(1, ets:info(?CACHE, size)).
+    ?assertEqual(2, ets:info(?CACHE, size)).
 
 
 refresh_vdu({DbName, _}) ->
diff --git a/src/ddoc_cache/test/ddoc_cache_remove_test.erl b/src/ddoc_cache/test/ddoc_cache_remove_test.erl
index 7596b99..8787482 100644
--- a/src/ddoc_cache/test/ddoc_cache_remove_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_remove_test.erl
@@ -67,16 +67,26 @@ remove_ddoc({DbName, _}) ->
     meck:reset(ddoc_cache_ev),
     ?assertEqual(0, ets:info(?CACHE, size)),
     {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
-    ?assertEqual(1, ets:info(?CACHE, size)),
-    [#entry{key = Key, val = DDoc}] = ets:tab2list(?CACHE),
+
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+
+    [#entry{val = DDoc}, #entry{val = DDoc}] = ets:tab2list(?CACHE),
+    {Depth, [RevId | _]} = DDoc#doc.revs,
     NewDDoc = DDoc#doc{
         deleted = true,
         body = {[]}
     },
     {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
-    meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
+
+    DDocIdKey = {ddoc_cache_entry_ddocid, {DbName, ?FOOBAR}},
+    Rev = {Depth, RevId},
+    DDocIdRevKey = {ddoc_cache_entry_ddocid_rev, {DbName, ?FOOBAR, Rev}},
+    meck:wait(ddoc_cache_ev, event, [removed, DDocIdKey], 1000),
+    meck:wait(ddoc_cache_ev, event, [update_noop, DDocIdRevKey], 1000),
+
     ?assertMatch({not_found, deleted}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
-    ?assertEqual(0, ets:info(?CACHE, size)).
+    ?assertEqual(1, ets:info(?CACHE, size)).
 
 
 remove_ddoc_rev({DbName, _}) ->
@@ -84,7 +94,15 @@ remove_ddoc_rev({DbName, _}) ->
     meck:reset(ddoc_cache_ev),
     Rev = ddoc_cache_tutil:get_rev(DbName, ?VDU),
     {ok, _} = ddoc_cache:open_doc(DbName, ?VDU, Rev),
-    [#entry{key = Key, val = DDoc, pid = Pid}] = ets:tab2list(?CACHE),
+
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+
+    % Notice the sort so that we know we're getting the
+    % revid version second.
+    [_, #entry{key = Key, val = DDoc, pid = Pid}]
+            = lists:sort(ets:tab2list(?CACHE)),
+
     NewDDoc = DDoc#doc{
         body = {[{<<"an">>, <<"update">>}]}
     },
@@ -101,7 +119,7 @@ remove_ddoc_rev({DbName, _}) ->
             {{not_found, missing}, _},
             ddoc_cache:open_doc(DbName, ?VDU, Rev)
         ),
-    ?assertEqual(0, ets:info(?CACHE, size)).
+    ?assertEqual(1, ets:info(?CACHE, size)).
 
 
 remove_ddoc_rev_only({DbName, _}) ->

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 03/17: TMP: Simple benchmark script

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 123d875e9c21445af255463004d3d65204d9971c
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Jun 28 10:58:34 2017 -0500

    TMP: Simple benchmark script
---
 src/ddoc_cache/src/ddoc_cache_speed.erl | 61 +++++++++++++++++++++++++++++++++
 1 file changed, 61 insertions(+)

diff --git a/src/ddoc_cache/src/ddoc_cache_speed.erl b/src/ddoc_cache/src/ddoc_cache_speed.erl
new file mode 100644
index 0000000..38cd0b9
--- /dev/null
+++ b/src/ddoc_cache/src/ddoc_cache_speed.erl
@@ -0,0 +1,61 @@
+-module(ddoc_cache_speed).
+
+-export([
+    go/1,
+    recover/1
+]).
+
+
+-define(RANGE, 1000).
+
+
+go(WorkerCount) when is_integer(WorkerCount), WorkerCount > 0 ->
+    spawn_workers(WorkerCount),
+    report().
+
+
+recover(DbName) ->
+    {ok, {stuff, DbName}}.
+
+
+spawn_workers(0) ->
+    ok;
+
+spawn_workers(WorkerCount) ->
+    Self = self(),
+    WorkerDb = list_to_binary(integer_to_list(WorkerCount)),
+    spawn(fun() ->
+        do_work(Self, WorkerDb, 0)
+    end),
+    spawn_workers(WorkerCount - 1).
+
+
+do_work(Parent, WorkerDb, Count) when Count >= 25 ->
+    Parent ! {done, Count},
+    do_work(Parent, WorkerDb, 0);
+
+do_work(Parent, WorkerDb, Count) ->
+    {ok, _} = ddoc_cache:open_custom(WorkerDb, ?MODULE),
+    do_work(Parent, WorkerDb, Count + 1).
+
+
+report() ->
+    report(os:timestamp(), 0).
+
+
+report(Start, Count) ->
+    Now = os:timestamp(),
+    case timer:now_diff(Now, Start) of
+        N when N > 1000000 ->
+            {_, MQL} = process_info(whereis(ddoc_cache_lru), message_queue_len),
+            io:format("~p ~p~n", [Count, MQL]),
+            report(Now, 0);
+        _ ->
+            receive
+                {done, Done} ->
+                    report(Start, Count + Done)
+            after 100 ->
+                report(Start, Count)
+            end
+    end.
+

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 01/17: Remove duplicated eviction messages

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 8e134414f5998d4a29214d7e4e1d8320ae1e1173
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Tue Jun 20 10:20:01 2017 -0500

    Remove duplicated eviction messages
    
    This is an old merge artifact that was duplicating the event
    notifications twice per design document update.
---
 src/couch/src/couch_db_updater.erl | 11 +----------
 1 file changed, 1 insertion(+), 10 deletions(-)

diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
index 49061b2..2b448fd 100644
--- a/src/couch/src/couch_db_updater.erl
+++ b/src/couch/src/couch_db_updater.erl
@@ -923,16 +923,7 @@ update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) ->
         (_) -> []
     end, Ids),
 
-    Db4 = case length(UpdatedDDocIds) > 0 of
-        true ->
-            couch_event:notify(Db3#db.name, ddoc_updated),
-            ddoc_cache:evict(Db3#db.name, UpdatedDDocIds),
-            refresh_validate_doc_funs(Db3);
-        false ->
-            Db3
-    end,
-
-    {ok, commit_data(Db4, not FullCommit), UpdatedDDocIds}.
+    {ok, commit_data(Db3, not FullCommit), UpdatedDDocIds}.
 
 update_local_docs(Db, []) ->
     {ok, Db};

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 10/17: FIXUP: Comment on use of no_ddocid

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit a54baa2c9968d192c3d91d21575e279a6cea865b
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jul 6 13:07:55 2017 -0500

    FIXUP: Comment on use of no_ddocid
---
 src/ddoc_cache/src/ddoc_cache_lru.erl | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
index 5d47639..cbe481e 100644
--- a/src/ddoc_cache/src/ddoc_cache_lru.erl
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -158,6 +158,10 @@ handle_cast({do_refresh, DbName, DDocIdList}, St) ->
     #st{
         dbs = Dbs
     } = St,
+    % We prepend no_ddocid to the DDocIdList below
+    % so that we refresh all custom and validation
+    % function entries which load data from all
+    % design documents.
     case khash:lookup(Dbs, DbName) of
         {value, DDocIds} ->
             lists:foreach(fun(DDocId) ->

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 04/17: FIXUP: Add gen_server behavior

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 738610aeca17c570e8506ea6986d253a45adb7c6
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Jun 30 11:05:35 2017 -0500

    FIXUP: Add gen_server behavior
---
 src/ddoc_cache/src/ddoc_cache_entry.erl | 1 +
 1 file changed, 1 insertion(+)

diff --git a/src/ddoc_cache/src/ddoc_cache_entry.erl b/src/ddoc_cache/src/ddoc_cache_entry.erl
index 4dee2a1..6213746 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry.erl
@@ -11,6 +11,7 @@
 % the License.
 
 -module(ddoc_cache_entry).
+-behaviour(gen_server).
 
 
 -export([

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 08/17: Remove debug logging

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 6fd34e31a344f21e85e799a19aaed544cc8a30bb
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jul 6 11:09:46 2017 -0500

    Remove debug logging
---
 src/ddoc_cache/test/ddoc_cache_lru_test.erl | 8 +-------
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git a/src/ddoc_cache/test/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
index f5cef16..77b39cd 100644
--- a/src/ddoc_cache/test/ddoc_cache_lru_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
@@ -90,13 +90,7 @@ check_multi_start(_) ->
     receive {'DOWN', OpenerRef, _, _, _} -> ok end,
     lists:foreach(fun({CPid, Ref}) ->
         receive
-            {'DOWN', Ref, _, _, normal} -> ok;
-            {'DOWN', Ref, _, _, Other} ->
-                io:format(standard_error, "OTHER: ~p~n", [Other]);
-            Other when not is_tuple(Other) orelse element(1, Other) /= 'DOWN' ->
-                io:format(standard_error, "MSG: ~p~n", [Other])
-        after 2000 ->
-            io:format(standard_error, "BLAH?!: ~p ~p", [CPid, process_info(CPid, current_stacktrace)])
+            {'DOWN', Ref, _, _, normal} -> ok
         end
     end, Clients).
 

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 05/17: FIXUP: Re-enable code purging

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 4c5bc1478c872c119e4178271a1f707db980c5f2
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Jun 30 11:38:16 2017 -0500

    FIXUP: Re-enable code purging
    
    I have to disable this locally while doing coverage reports. However in
    CI it'll fail when something loads ddoc_cache code before we get to
    ddoc_cache's test suite. The pre-loaded version comes from the non-TEST
    compiled module which breaks test assertions.
---
 src/ddoc_cache/test/ddoc_cache_tutil.erl | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/ddoc_cache/test/ddoc_cache_tutil.erl b/src/ddoc_cache/test/ddoc_cache_tutil.erl
index cdd372b..6782b9d 100644
--- a/src/ddoc_cache/test/ddoc_cache_tutil.erl
+++ b/src/ddoc_cache/test/ddoc_cache_tutil.erl
@@ -21,7 +21,7 @@
 
 
 start_couch() ->
-    %purge_modules(),
+    purge_modules(),
     Ctx = test_util:start_couch(?CONFIG_CHAIN, [chttpd, ddoc_cache]),
     TmpDb = ?tempdb(),
     ok = fabric:create_db(TmpDb, [{q, "1"}, {n, "1"}]),

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 17/17: FIXUP: Remove unnecessary catch

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 62bfbf467915fe27b4fac4cb4a4a6413c159c990
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Mon Jul 10 13:26:25 2017 -0500

    FIXUP: Remove unnecessary catch
---
 src/ddoc_cache/src/ddoc_cache_lru.erl | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
index ff70342..49aa62d 100644
--- a/src/ddoc_cache/src/ddoc_cache_lru.erl
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -99,7 +99,7 @@ init(_) ->
 
 terminate(_Reason, St) ->
     case is_pid(St#st.evictor) of
-        true -> catch exit(St#st.evictor, kill);
+        true -> exit(St#st.evictor, kill);
         false -> ok
     end,
     ok.

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 06/17: FIXUP: Add tests for no VDU behavior

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit bb6a651932b801c959c16c907f96f0f27ec4c37f
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jul 6 10:18:36 2017 -0500

    FIXUP: Add tests for no VDU behavior
---
 src/ddoc_cache/src/ddoc_cache_entry.erl       | 11 ++++-----
 src/ddoc_cache/test/ddoc_cache_basic_test.erl | 32 +++++++++++++++++++++++++++
 src/ddoc_cache/test/ddoc_cache_tutil.erl      |  9 +++++++-
 3 files changed, 46 insertions(+), 6 deletions(-)

diff --git a/src/ddoc_cache/src/ddoc_cache_entry.erl b/src/ddoc_cache/src/ddoc_cache_entry.erl
index 6213746..2a90077 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry.erl
@@ -182,17 +182,18 @@ handle_cast(Msg, St) ->
 handle_info({'DOWN', _, _, Pid, Resp}, #st{key = Key, opener = Pid} = St) ->
     case Resp of
         {open_ok, Key, {ok, Val}} ->
-            if not is_list(St#st.waiters) -> ok; true ->
-                respond(St#st.waiters, {open_ok, {ok, Val}})
-            end,
             update_cache(St, Val),
             Msg = {'$gen_cast', refresh},
             Timer = erlang:send_after(?REFRESH_TIMEOUT, self(), Msg),
-            NewSt = St#st{
+            NewSt1 = St#st{
                 val = {open_ok, {ok, Val}},
                 opener = Timer
             },
-            {noreply, update_lru(NewSt)};
+            NewSt2 = update_lru(NewSt1),
+            if not is_list(St#st.waiters) -> ok; true ->
+                respond(St#st.waiters, {open_ok, {ok, Val}})
+            end,
+            {noreply, NewSt2};
         {Status, Key, Other} ->
             NewSt = St#st{
                 val = {Status, Other},
diff --git a/src/ddoc_cache/test/ddoc_cache_basic_test.erl b/src/ddoc_cache/test/ddoc_cache_basic_test.erl
index 227ac54..f908c78 100644
--- a/src/ddoc_cache/test/ddoc_cache_basic_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_basic_test.erl
@@ -44,6 +44,18 @@ check_basic_test_() ->
     }.
 
 
+check_no_vdu_test_() ->
+    {
+        setup,
+        fun() -> ddoc_cache_tutil:start_couch([{write_ddocs, false}]) end,
+        fun ddoc_cache_tutil:stop_couch/1,
+        {with, [
+            fun cache_no_vdu_no_ddoc/1,
+            fun cache_no_vdu_empty_ddoc/1
+        ]}
+    }.
+
+
 cache_ddoc({DbName, _}) ->
     ddoc_cache_tutil:clear(),
     ?assertEqual(0, ets:info(?CACHE, size)),
@@ -120,3 +132,23 @@ deprecated_api_works({DbName, _}) ->
     {ok, _} = ddoc_cache:open(DbName, ?MODULE),
     {ok, _} = ddoc_cache:open(DbName, validation_funs).
 
+
+cache_no_vdu_no_ddoc({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    Resp = ddoc_cache:open_validation_funs(DbName),
+    ?assertEqual({ok, []}, Resp),
+    ?assertEqual(1, ets:info(?CACHE, size)),
+    ?assertEqual(1, ets:info(?LRU, size)).
+
+
+cache_no_vdu_empty_ddoc({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    DDoc = #doc{
+        id = <<"_design/no_vdu">>,
+        body = {[]}
+    },
+    {ok, _} = fabric:update_docs(DbName, [DDoc], [?ADMIN_CTX]),
+    Resp = ddoc_cache:open_validation_funs(DbName),
+    ?assertEqual({ok, []}, Resp),
+    ?assertEqual(1, ets:info(?CACHE, size)),
+    ?assertEqual(1, ets:info(?LRU, size)).
diff --git a/src/ddoc_cache/test/ddoc_cache_tutil.erl b/src/ddoc_cache/test/ddoc_cache_tutil.erl
index 6782b9d..acf3db1 100644
--- a/src/ddoc_cache/test/ddoc_cache_tutil.erl
+++ b/src/ddoc_cache/test/ddoc_cache_tutil.erl
@@ -21,11 +21,18 @@
 
 
 start_couch() ->
+    start_couch([{write_ddocs, true}]).
+
+
+start_couch(Options) ->
+    WriteDDocs = couch_util:get_value(write_ddocs, Options, true),
     purge_modules(),
     Ctx = test_util:start_couch(?CONFIG_CHAIN, [chttpd, ddoc_cache]),
     TmpDb = ?tempdb(),
     ok = fabric:create_db(TmpDb, [{q, "1"}, {n, "1"}]),
-    {ok, _} = fabric:update_docs(TmpDb, ddocs(), [?ADMIN_CTX]),
+    if not WriteDDocs -> ok; true ->
+        {ok, _} = fabric:update_docs(TmpDb, ddocs(), [?ADMIN_CTX])
+    end,
     {TmpDb, Ctx}.
 
 

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 09/17: FIXUP: Evict unused entries

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit f1fa94eecd3128ac0b65cb22f87546f609164d59
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jul 6 11:09:58 2017 -0500

    FIXUP: Evict unused entries
    
    This is to address @chewbranca's comments about the rev-specific cache
    entries sticking around until they're evicted from cache for size
    limitations. Now they'll disappear after the first ?REFRESH_TIMEOUT that
    they aren't accessed.
---
 src/ddoc_cache/src/ddoc_cache_entry.erl       | 34 ++++++++++++++++++++++-----
 src/ddoc_cache/test/ddoc_cache_entry_test.erl | 23 ++++++++++++++++--
 2 files changed, 49 insertions(+), 8 deletions(-)

diff --git a/src/ddoc_cache/src/ddoc_cache_entry.erl b/src/ddoc_cache/src/ddoc_cache_entry.erl
index 2a90077..914e32e 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry.erl
@@ -48,7 +48,8 @@
     val,
     opener,
     waiters,
-    ts
+    ts,
+    accessed
 }).
 
 
@@ -95,7 +96,7 @@ accessed(Pid) ->
 
 
 refresh(Pid) ->
-    gen_server:cast(Pid, refresh).
+    gen_server:cast(Pid, force_refresh).
 
 
 init(Key) ->
@@ -103,7 +104,8 @@ init(Key) ->
     St = #st{
         key = Key,
         opener = spawn_opener(Key),
-        waiters = []
+        waiters = [],
+        accessed = 1
     },
     ?EVENT(started, Key),
     gen_server:enter_loop(?MODULE, [], St).
@@ -153,7 +155,25 @@ handle_call(Msg, _From, St) ->
 handle_cast(accessed, St) ->
     ?EVENT(accessed, St#st.key),
     drain_accessed(),
-    {noreply, update_lru(St)};
+    NewSt = St#st{
+        accessed = St#st.accessed + 1
+    },
+    {noreply, update_lru(NewSt)};
+
+handle_cast(force_refresh, St) ->
+    % If we had frequent design document updates
+    % they could end up racing accessed events and
+    % end up prematurely evicting this entry from
+    % cache. To prevent this we just make sure that
+    % accessed is set to at least 1 before we
+    % execute a refresh.
+    NewSt = if St#st.accessed > 0 -> St; true ->
+        St#st{accessed = 1}
+    end,
+    handle_cast(refresh, NewSt);
+
+handle_cast(refresh, #st{accessed = 0} = St) ->
+    {stop, normal, St};
 
 handle_cast(refresh, #st{opener = Ref} = St) when is_reference(Ref) ->
     #st{
@@ -161,7 +181,8 @@ handle_cast(refresh, #st{opener = Ref} = St) when is_reference(Ref) ->
     } = St,
     erlang:cancel_timer(Ref),
     NewSt = St#st{
-        opener = spawn_opener(Key)
+        opener = spawn_opener(Key),
+        accessed = 0
     },
     {noreply, NewSt};
 
@@ -171,7 +192,8 @@ handle_cast(refresh, #st{opener = Pid} = St) when is_pid(Pid) ->
         {'DOWN', _, _, Pid, _} -> ok
     end,
     NewSt = St#st{
-        opener = spawn_opener(St#st.key)
+        opener = spawn_opener(St#st.key),
+        accessed = 0
     },
     {noreply, NewSt};
 
diff --git a/src/ddoc_cache/test/ddoc_cache_entry_test.erl b/src/ddoc_cache/test/ddoc_cache_entry_test.erl
index 62afc72..e593bf7 100644
--- a/src/ddoc_cache/test/ddoc_cache_entry_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_entry_test.erl
@@ -50,6 +50,7 @@ check_entry_test_() ->
             fun cancel_and_replace_opener/1,
             fun condenses_access_messages/1,
             fun kill_opener_on_terminate/1,
+            fun evict_when_not_accessed/1,
             fun open_dead_entry/1,
             fun handles_bad_messages/1,
             fun handles_code_change/1
@@ -63,7 +64,7 @@ cancel_and_replace_opener(_) ->
     {ok, Entry} = ddoc_cache_entry:start_link(Key),
     Opener1 = element(4, sys:get_state(Entry)),
     Ref1 = erlang:monitor(process, Opener1),
-    gen_server:cast(Entry, refresh),
+    gen_server:cast(Entry, force_refresh),
     receive {'DOWN', Ref1, _, _, _} -> ok end,
     Opener2 = element(4, sys:get_state(Entry)),
     ?assert(Opener2 /= Opener1),
@@ -95,11 +96,29 @@ condenses_access_messages({DbName, _}) ->
 kill_opener_on_terminate(_) ->
     Pid = spawn(fun() -> receive _ -> ok end end),
     ?assert(is_process_alive(Pid)),
-    St = {st, key, val, Pid, waiters, ts},
+    St = {st, key, val, Pid, waiters, ts, accessed},
     ?assertEqual(ok, ddoc_cache_entry:terminate(normal, St)),
     ?assert(not is_process_alive(Pid)).
 
 
+evict_when_not_accessed(_) ->
+    meck:reset(ddoc_cache_ev),
+    Key = {ddoc_cache_entry_custom, {<<"bar">>, ?MODULE}},
+    true = ets:insert_new(?CACHE, #entry{key = Key}),
+    {ok, Entry} = ddoc_cache_entry:start_link(Key),
+    Ref = erlang:monitor(process, Entry),
+    ?assertEqual(1, element(7, sys:get_state(Entry))),
+    ok = gen_server:cast(Entry, refresh),
+
+    meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000),
+
+    ?assertEqual(0, element(7, sys:get_state(Entry))),
+    ok = gen_server:cast(Entry, refresh),
+    receive {'DOWN', Ref, _, _, Reason} -> Reason end,
+    ?assertEqual(normal, Reason),
+    ?assertEqual(0, ets:info(?CACHE, size)).
+
+
 open_dead_entry({DbName, _}) ->
     Pid = spawn(fun() -> ok end),
     Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}},

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 02/17: Rewrite ddoc_cache to improve performance

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit b879a4bd28fffb337edaa5fce4dd97bbceae5fae
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Jun 16 13:29:47 2017 -0500

    Rewrite ddoc_cache to improve performance
    
    There were a couple issues with the previous ddoc_cache implementation
    that made it possible to tip over the ddoc_cache_opener process. First,
    there were a lot of messages flowing through a single gen_server. And
    second, the cache relied on periodically evicting entries to ensure
    proper behavior in not caching an entry forever after it had changed on
    disk.
    
    The new version makes two important changes. First, entries now have an
    associated process that manages the cache entry. This process will
    periodically refresh the entry and if the entry has changed or no longer
    exists the process will remove its entry from cache.
    
    The second major change is that the cache entry process directly mutates
    the related ets table entries so that our performance is not dependent
    on the speed of ets table mutations. Using a custom entry that does no
    work the cache can now sustain roughly one million operations a second
    with a twenty thousand clients fighting over a cache limited to one
    thousand items. In production this means that cache performance will
    likely be rate limited by other factors like loading design documents
    from disk.
---
 src/couch/src/couch_db_updater.erl                 |   2 +-
 src/ddoc_cache/src/ddoc_cache.app.src              |  22 +-
 src/ddoc_cache/src/ddoc_cache.erl                  |  79 ++----
 .../src/{ddoc_cache_util.erl => ddoc_cache.hrl}    |  50 ++--
 src/ddoc_cache/src/ddoc_cache_entry.erl            | 279 ++++++++++++++++++++
 ..._cache_util.erl => ddoc_cache_entry_custom.erl} |  22 +-
 ..._cache_util.erl => ddoc_cache_entry_ddocid.erl} |  25 +-
 ...he_util.erl => ddoc_cache_entry_ddocid_rev.erl} |  27 +-
 ...il.erl => ddoc_cache_entry_validation_funs.erl} |  29 +-
 src/ddoc_cache/src/ddoc_cache_lru.erl              | 293 +++++++++++++++++++++
 src/ddoc_cache/src/ddoc_cache_opener.erl           | 245 +----------------
 src/ddoc_cache/src/ddoc_cache_sup.erl              |  35 +--
 src/ddoc_cache/src/ddoc_cache_tables.erl           |  68 +++++
 src/ddoc_cache/test/ddoc_cache_basic_test.erl      | 122 +++++++++
 src/ddoc_cache/test/ddoc_cache_coverage_test.erl   |  86 ++++++
 src/ddoc_cache/test/ddoc_cache_disabled_test.erl   |  52 ++++
 src/ddoc_cache/test/ddoc_cache_entry_test.erl      | 120 +++++++++
 .../ddoc_cache_util.erl => test/ddoc_cache_ev.erl} |  21 +-
 src/ddoc_cache/test/ddoc_cache_eviction_test.erl   |  93 +++++++
 src/ddoc_cache/test/ddoc_cache_lru_test.erl        | 167 ++++++++++++
 src/ddoc_cache/test/ddoc_cache_no_cache_test.erl   |  78 ++++++
 src/ddoc_cache/test/ddoc_cache_open_error_test.erl |  46 ++++
 src/ddoc_cache/test/ddoc_cache_opener_test.erl     |  33 +++
 src/ddoc_cache/test/ddoc_cache_refresh_test.erl    | 167 ++++++++++++
 src/ddoc_cache/test/ddoc_cache_remove_test.erl     | 206 +++++++++++++++
 .../ddoc_cache_test.hrl}                           |  30 +--
 src/ddoc_cache/test/ddoc_cache_tutil.erl           |  84 ++++++
 27 files changed, 2036 insertions(+), 445 deletions(-)

diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
index 2b448fd..1c4b561 100644
--- a/src/couch/src/couch_db_updater.erl
+++ b/src/couch/src/couch_db_updater.erl
@@ -319,7 +319,7 @@ handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts,
                     couch_event:notify(Db2#db.name, {ddoc_updated, DDocId})
                 end, UpdatedDDocIds),
                 couch_event:notify(Db2#db.name, ddoc_updated),
-                ddoc_cache:evict(Db2#db.name, UpdatedDDocIds),
+                ddoc_cache:refresh(Db2#db.name, UpdatedDDocIds),
                 refresh_validate_doc_funs(Db2);
             false ->
                 Db2
diff --git a/src/ddoc_cache/src/ddoc_cache.app.src b/src/ddoc_cache/src/ddoc_cache.app.src
index a64b2f5..38fceda 100644
--- a/src/ddoc_cache/src/ddoc_cache.app.src
+++ b/src/ddoc_cache/src/ddoc_cache.app.src
@@ -13,14 +13,8 @@
 {application, ddoc_cache, [
     {description, "Design Document Cache"},
     {vsn, git},
-    {modules, [
-        ddoc_cache,
-        ddoc_cache_app,
-        ddoc_cache_opener,
-        ddoc_cache_sup,
-        ddoc_cache_util
-    ]},
     {registered, [
+        ddoc_cache_tables,
         ddoc_cache_lru,
         ddoc_cache_opener
     ]},
@@ -28,17 +22,11 @@
         kernel,
         stdlib,
         crypto,
+        config,
         couch_event,
-        ets_lru,
-        mem3,
-        fabric,
         couch_log,
-        couch_stats
+        couch_stats,
+        fabric
     ]},
-    {mod, {ddoc_cache_app, []}},
-    {env, [
-        {max_objects, unlimited},
-        {max_size, 104857600}, % 100M
-        {max_lifetime, 60000} % 1m
-    ]}
+    {mod, {ddoc_cache_app, []}}
 ]}.
diff --git a/src/ddoc_cache/src/ddoc_cache.erl b/src/ddoc_cache/src/ddoc_cache.erl
index ed93309..50cac30 100644
--- a/src/ddoc_cache/src/ddoc_cache.erl
+++ b/src/ddoc_cache/src/ddoc_cache.erl
@@ -12,86 +12,43 @@
 
 -module(ddoc_cache).
 
--export([
-    start/0,
-    stop/0
-]).
 
 -export([
     open_doc/2,
     open_doc/3,
     open_validation_funs/1,
-    evict/2,
+    open_custom/2,
+    refresh/2,
 
     %% deprecated
     open/2
 ]).
 
-start() ->
-    application:start(ddoc_cache).
-
-stop() ->
-    application:stop(ddoc_cache).
 
 open_doc(DbName, DocId) ->
-    Key = {DbName, DocId, '_'},
-    case ddoc_cache_opener:match_newest(Key) of
-        {ok, _} = Resp ->
-            couch_stats:increment_counter([ddoc_cache, hit]),
-            Resp;
-        missing ->
-            couch_stats:increment_counter([ddoc_cache, miss]),
-            ddoc_cache_opener:open_doc(DbName, DocId);
-        recover ->
-            couch_stats:increment_counter([ddoc_cache, recovery]),
-            ddoc_cache_opener:recover_doc(DbName, DocId)
-    end.
+    Key = {ddoc_cache_entry_ddocid, {DbName, DocId}},
+    ddoc_cache_lru:open(Key).
+
 
 open_doc(DbName, DocId, RevId) ->
-    Key = {DbName, DocId, RevId},
-    case ddoc_cache_opener:lookup(Key) of
-        {ok, _} = Resp ->
-            couch_stats:increment_counter([ddoc_cache, hit]),
-            Resp;
-        missing ->
-            couch_stats:increment_counter([ddoc_cache, miss]),
-            ddoc_cache_opener:open_doc(DbName, DocId, RevId);
-        recover ->
-            couch_stats:increment_counter([ddoc_cache, recovery]),
-            ddoc_cache_opener:recover_doc(DbName, DocId, RevId)
-    end.
+    Key = {ddoc_cache_entry_ddocid_rev, {DbName, DocId, RevId}},
+    ddoc_cache_lru:open(Key).
+
 
 open_validation_funs(DbName) ->
-    Key = {DbName, validation_funs},
-    case ddoc_cache_opener:lookup(Key) of
-        {ok, _} = Resp ->
-            couch_stats:increment_counter([ddoc_cache, hit]),
-            Resp;
-        missing ->
-            couch_stats:increment_counter([ddoc_cache, miss]),
-            ddoc_cache_opener:open_validation_funs(DbName);
-        recover ->
-            couch_stats:increment_counter([ddoc_cache, recovery]),
-            ddoc_cache_opener:recover_validation_funs(DbName)
-    end.
+    Key = {ddoc_cache_entry_validation_funs, DbName},
+    ddoc_cache_lru:open(Key).
+
 
 open_custom(DbName, Mod) ->
-    Key = {DbName, Mod},
-    case ddoc_cache_opener:lookup(Key) of
-        {ok, _} = Resp ->
-            couch_stats:increment_counter([ddoc_cache, hit]),
-            Resp;
-        missing ->
-            couch_stats:increment_counter([ddoc_cache, miss]),
-            ddoc_cache_opener:open_doc(DbName, Mod);
-        recover ->
-            couch_stats:increment_counter([ddoc_cache, recovery]),
-            Mod:recover(DbName)
-    end.
-
-evict(ShardDbName, DDocIds) ->
+    Key = {ddoc_cache_entry_custom, {DbName, Mod}},
+    ddoc_cache_lru:open(Key).
+
+
+refresh(ShardDbName, DDocIds) when is_list(DDocIds) ->
     DbName = mem3:dbname(ShardDbName),
-    ddoc_cache_opener:evict_docs(DbName, DDocIds).
+    ddoc_cache_lru:refresh(DbName, DDocIds).
+
 
 open(DbName, validation_funs) ->
     open_validation_funs(DbName);
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/src/ddoc_cache.hrl
similarity index 52%
copy from src/ddoc_cache/src/ddoc_cache_util.erl
copy to src/ddoc_cache/src/ddoc_cache.hrl
index fb3c0b9..dba0d37 100644
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ b/src/ddoc_cache/src/ddoc_cache.hrl
@@ -10,25 +10,31 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
--module(ddoc_cache_util).
-
-
--export([
-    new_uuid/0
-]).
-
-
-new_uuid() ->
-    to_hex(crypto:rand_bytes(16), []).
-
-
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
-
-
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.
+-type dbname() :: iodata().
+-type docid() :: iodata().
+-type doc_hash() :: <<_:128>>.
+-type revision() :: {pos_integer(), doc_hash()}.
+
+-define(CACHE, ddoc_cache_entries).
+-define(LRU, ddoc_cache_lru).
+-define(REFRESH_TIMEOUT, 67000).
+-define(SHUTDOWN_TIMEOUT, 1000).
+
+-record(entry, {
+    key,
+    val,
+    pid
+}).
+
+-record(opener, {
+    key,
+    pid,
+    clients
+}).
+
+
+-ifdef(TEST).
+-define(EVENT(Name, Arg), ddoc_cache_ev:event(Name, Arg)).
+-else.
+-define(EVENT(Name, Arg), ignore).
+-endif.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry.erl b/src/ddoc_cache/src/ddoc_cache_entry.erl
new file mode 100644
index 0000000..4dee2a1
--- /dev/null
+++ b/src/ddoc_cache/src/ddoc_cache_entry.erl
@@ -0,0 +1,279 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_entry).
+
+
+-export([
+    dbname/1,
+    ddocid/1,
+    recover/1,
+
+    start_link/1,
+    shutdown/1,
+    open/2,
+    accessed/1,
+    refresh/1
+]).
+
+-export([
+    init/1,
+    terminate/2,
+    handle_call/3,
+    handle_cast/2,
+    handle_info/2,
+    code_change/3
+]).
+
+-export([
+    do_open/1
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+-record(st, {
+    key,
+    val,
+    opener,
+    waiters,
+    ts
+}).
+
+
+dbname({Mod, Arg}) ->
+    Mod:dbname(Arg).
+
+
+ddocid({Mod, Arg}) ->
+    Mod:ddocid(Arg).
+
+
+recover({Mod, Arg}) ->
+    Mod:recover(Arg).
+
+
+start_link(Key) ->
+    Pid = proc_lib:spawn_link(?MODULE, init, [Key]),
+    {ok, Pid}.
+
+
+shutdown(Pid) ->
+    ok = gen_server:call(Pid, shutdown).
+
+
+open(Pid, Key) ->
+    try
+        Resp = gen_server:call(Pid, open),
+        case Resp of
+            {open_ok, Val} ->
+                Val;
+            {open_error, {T, R, S}} ->
+                erlang:raise(T, R, S)
+        end
+    catch exit:_ ->
+        % Its possible that this process was evicted just
+        % before we tried talking to it. Just fallback
+        % to a standard recovery
+        recover(Key)
+    end.
+
+
+accessed(Pid) ->
+    gen_server:cast(Pid, accessed).
+
+
+refresh(Pid) ->
+    gen_server:cast(Pid, refresh).
+
+
+init(Key) ->
+    true = ets:update_element(?CACHE, Key, {#entry.pid, self()}),
+    St = #st{
+        key = Key,
+        opener = spawn_opener(Key),
+        waiters = []
+    },
+    ?EVENT(started, Key),
+    gen_server:enter_loop(?MODULE, [], St).
+
+
+terminate(_Reason, St) ->
+    #st{
+        key = Key,
+        opener = Pid,
+        ts = Ts
+    } = St,
+    % We may have already deleted our cache entry
+    % during shutdown
+    Pattern = #entry{key = Key, pid = self(), _ = '_'},
+    CacheMSpec = [{Pattern, [], [true]}],
+    true = ets:select_delete(?CACHE, CacheMSpec) < 2,
+    % We may have already deleted our LRU entry
+    % during shutdown
+    if Ts == undefined -> ok; true ->
+        LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
+        true = ets:select_delete(?LRU, LruMSpec) < 2
+    end,
+    % Blow away any current opener if it exists
+    if not is_pid(Pid) -> ok; true ->
+        catch exit(Pid, kill)
+    end,
+    ok.
+
+
+handle_call(open, From, #st{val = undefined} = St) ->
+    NewSt = St#st{
+        waiters = [From | St#st.waiters]
+    },
+    {noreply, NewSt};
+
+handle_call(open, _From, St) ->
+    {reply, St#st.val, St};
+
+handle_call(shutdown, _From, St) ->
+    remove_from_cache(St),
+    {stop, normal, ok, St};
+
+handle_call(Msg, _From, St) ->
+    {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(accessed, St) ->
+    ?EVENT(accessed, St#st.key),
+    drain_accessed(),
+    {noreply, update_lru(St)};
+
+handle_cast(refresh, #st{opener = Ref} = St) when is_reference(Ref) ->
+    #st{
+        key = Key
+    } = St,
+    erlang:cancel_timer(Ref),
+    NewSt = St#st{
+        opener = spawn_opener(Key)
+    },
+    {noreply, NewSt};
+
+handle_cast(refresh, #st{opener = Pid} = St) when is_pid(Pid) ->
+    catch exit(Pid, kill),
+    receive
+        {'DOWN', _, _, Pid, _} -> ok
+    end,
+    NewSt = St#st{
+        opener = spawn_opener(St#st.key)
+    },
+    {noreply, NewSt};
+
+handle_cast(Msg, St) ->
+    {stop, {bad_cast, Msg}, St}.
+
+
+handle_info({'DOWN', _, _, Pid, Resp}, #st{key = Key, opener = Pid} = St) ->
+    case Resp of
+        {open_ok, Key, {ok, Val}} ->
+            if not is_list(St#st.waiters) -> ok; true ->
+                respond(St#st.waiters, {open_ok, {ok, Val}})
+            end,
+            update_cache(St, Val),
+            Msg = {'$gen_cast', refresh},
+            Timer = erlang:send_after(?REFRESH_TIMEOUT, self(), Msg),
+            NewSt = St#st{
+                val = {open_ok, {ok, Val}},
+                opener = Timer
+            },
+            {noreply, update_lru(NewSt)};
+        {Status, Key, Other} ->
+            NewSt = St#st{
+                val = {Status, Other},
+                opener = undefined,
+                waiters = undefined
+            },
+            remove_from_cache(NewSt),
+            if not is_list(St#st.waiters) -> ok; true ->
+                respond(St#st.waiters, {Status, Other})
+            end,
+            {stop, normal, NewSt}
+    end;
+
+handle_info(Msg, St) ->
+    {stop, {bad_info, Msg}, St}.
+
+
+code_change(_, St, _) ->
+    {ok, St}.
+
+
+spawn_opener(Key) ->
+    {Pid, _} = erlang:spawn_monitor(?MODULE, do_open, [Key]),
+    Pid.
+
+
+do_open(Key) ->
+    try recover(Key) of
+        Resp ->
+            erlang:exit({open_ok, Key, Resp})
+    catch T:R ->
+        S = erlang:get_stacktrace(),
+        erlang:exit({open_error, Key, {T, R, S}})
+    end.
+
+
+update_lru(#st{key = Key, ts = Ts} = St) ->
+    if Ts == undefined -> ok; true ->
+        MSpec = [{{{Ts, Key, self()}}, [], [true]}],
+        1 = ets:select_delete(?LRU, MSpec)
+    end,
+    NewTs = os:timestamp(),
+    true = ets:insert(?LRU, {{NewTs, Key, self()}}),
+    St#st{ts = NewTs}.
+
+
+update_cache(#st{val = undefined} = St, Val) ->
+    true = ets:update_element(?CACHE, St#st.key, {#entry.val, Val}),
+    ?EVENT(inserted, St#st.key);
+
+update_cache(#st{val = V1} = _St, V2) when {open_ok, {ok, V2}} == V1 ->
+    ?EVENT(update_noop, _St#st.key);
+
+update_cache(St, Val) ->
+    true = ets:update_element(?CACHE, St#st.key, {#entry.val, Val}),
+    ?EVENT(updated, {St#st.key, Val}).
+
+
+remove_from_cache(St) ->
+    #st{
+        key = Key,
+        ts = Ts
+    } = St,
+    Pattern = #entry{key = Key, pid = self(), _ = '_'},
+    CacheMSpec = [{Pattern, [], [true]}],
+    1 = ets:select_delete(?CACHE, CacheMSpec),
+    if Ts == undefined -> ok; true ->
+        LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
+        1 = ets:select_delete(?LRU, LruMSpec)
+    end,
+    ?EVENT(removed, St#st.key),
+    ok.
+
+
+drain_accessed() ->
+    receive
+        {'$gen_cast', accessed} ->
+            drain_accessed()
+    after 0 ->
+        ok
+    end.
+
+
+respond(Waiters, Resp) ->
+    [gen_server:reply(W, Resp) || W <- Waiters].
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/src/ddoc_cache_entry_custom.erl
similarity index 62%
copy from src/ddoc_cache/src/ddoc_cache_util.erl
copy to src/ddoc_cache/src/ddoc_cache_entry_custom.erl
index fb3c0b9..d858ad6 100644
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_custom.erl
@@ -10,25 +10,23 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
--module(ddoc_cache_util).
+-module(ddoc_cache_entry_custom).
 
 
 -export([
-    new_uuid/0
+    dbname/1,
+    ddocid/1,
+    recover/1
 ]).
 
 
-new_uuid() ->
-    to_hex(crypto:rand_bytes(16), []).
+dbname({DbName, _}) ->
+    DbName.
 
 
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
+ddocid(_) ->
+    no_ddocid.
 
 
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.
+recover({DbName, Mod}) ->
+    Mod:recover(DbName).
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
similarity index 62%
copy from src/ddoc_cache/src/ddoc_cache_util.erl
copy to src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
index fb3c0b9..cac9abc 100644
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
@@ -10,25 +10,26 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
--module(ddoc_cache_util).
+-module(ddoc_cache_entry_ddocid).
 
 
 -export([
-    new_uuid/0
+    dbname/1,
+    ddocid/1,
+    recover/1
 ]).
 
 
-new_uuid() ->
-    to_hex(crypto:rand_bytes(16), []).
+-include_lib("couch/include/couch_db.hrl").
 
 
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
+dbname({DbName, _}) ->
+    DbName.
 
 
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.
+ddocid({_, DDocId}) ->
+    DDocId.
+
+
+recover({DbName, DDocId}) ->
+    fabric:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]).
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
similarity index 61%
copy from src/ddoc_cache/src/ddoc_cache_util.erl
copy to src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
index fb3c0b9..012abab 100644
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
@@ -10,25 +10,28 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
--module(ddoc_cache_util).
+-module(ddoc_cache_entry_ddocid_rev).
 
 
 -export([
-    new_uuid/0
+    dbname/1,
+    ddocid/1,
+    recover/1
 ]).
 
 
-new_uuid() ->
-    to_hex(crypto:rand_bytes(16), []).
+-include_lib("couch/include/couch_db.hrl").
 
 
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
+dbname({DbName, _, _}) ->
+    DbName.
 
 
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.
+ddocid({_, DDocId, _}) ->
+    DDocId.
+
+
+recover({DbName, DDocId, Rev}) ->
+    Opts = [ejson_body, ?ADMIN_CTX],
+    {ok, [Resp]} = fabric:open_revs(DbName, DDocId, [Rev], Opts),
+    Resp.
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
similarity index 58%
copy from src/ddoc_cache/src/ddoc_cache_util.erl
copy to src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
index fb3c0b9..3d43f7a 100644
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
@@ -10,25 +10,30 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
--module(ddoc_cache_util).
+-module(ddoc_cache_entry_validation_funs).
 
 
 -export([
-    new_uuid/0
+    dbname/1,
+    ddocid/1,
+    recover/1
 ]).
 
 
-new_uuid() ->
-    to_hex(crypto:rand_bytes(16), []).
+dbname(DbName) ->
+    DbName.
 
 
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
+ddocid(_) ->
+    no_ddocid.
 
 
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.
+recover(DbName) ->
+    {ok, DDocs} = fabric:design_docs(mem3:dbname(DbName)),
+    Funs = lists:flatmap(fun(DDoc) ->
+        case couch_doc:get_validate_doc_fun(DDoc) of
+            nil -> [];
+            Fun -> [Fun]
+        end
+    end, DDocs),
+    {ok, Funs}.
diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
new file mode 100644
index 0000000..5d47639
--- /dev/null
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -0,0 +1,293 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_lru).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+    start_link/0,
+    open/1,
+    refresh/2
+]).
+
+-export([
+    init/1,
+    terminate/2,
+    handle_call/3,
+    handle_cast/2,
+    handle_info/2,
+    code_change/3
+]).
+
+-export([
+    handle_db_event/3
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+-record(st, {
+    pids, % pid -> key
+    dbs, % dbname -> docid -> key -> pid
+    size,
+    evictor
+}).
+
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+open(Key) ->
+    try ets:lookup(?CACHE, Key) of
+        [] ->
+            lru_start(Key);
+        [#entry{pid = undefined}] ->
+            lru_start(Key);
+        [#entry{val = undefined, pid = Pid}] ->
+            couch_stats:increment_counter([ddoc_cache, miss]),
+            ddoc_cache_entry:open(Pid, Key);
+        [#entry{val = Val, pid = Pid}] ->
+            couch_stats:increment_counter([ddoc_cache, hit]),
+            ddoc_cache_entry:accessed(Pid),
+            {ok, Val}
+    catch _:_ ->
+        couch_stats:increment_counter([ddoc_cache, recovery]),
+        ddoc_cache_entry:recover(Key)
+    end.
+
+
+refresh(DbName, DDocIds) ->
+    gen_server:cast(?MODULE, {refresh, DbName, DDocIds}).
+
+
+init(_) ->
+    process_flag(trap_exit, true),
+    {ok, Pids} = khash:new(),
+    {ok, Dbs} = khash:new(),
+    {ok, Evictor} = couch_event:link_listener(
+            ?MODULE, handle_db_event, nil, [all_dbs]
+        ),
+    {ok, #st{
+        pids = Pids,
+        dbs = Dbs,
+        size = 0,
+        evictor = Evictor
+    }}.
+
+
+terminate(_Reason, St) ->
+    case is_pid(St#st.evictor) of
+        true -> catch exit(St#st.evictor, kill);
+        false -> ok
+    end,
+    ok.
+
+
+handle_call({start, Key}, _From, St) ->
+    #st{
+        pids = Pids,
+        dbs = Dbs,
+        size = CurSize
+    } = St,
+    case ets:lookup(?CACHE, Key) of
+        [] ->
+            MaxSize = config:get_integer("ddoc_cache", "max_size", 1000),
+            case trim(St, CurSize, max(0, MaxSize)) of
+                {ok, N} ->
+                    true = ets:insert_new(?CACHE, #entry{key = Key}),
+                    {ok, Pid} = ddoc_cache_entry:start_link(Key),
+                    true = ets:update_element(?CACHE, Key, {#entry.pid, Pid}),
+                    ok = khash:put(Pids, Pid, Key),
+                    store_key(Dbs, Key, Pid),
+                    {reply, {ok, Pid}, St#st{size = CurSize - N + 1}};
+                full ->
+                    ?EVENT(full, Key),
+                    {reply, full, St}
+            end;
+        [#entry{pid = Pid}] ->
+            {reply, {ok, Pid}, St}
+    end;
+
+handle_call(Msg, _From, St) ->
+    {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
+
+
+handle_cast({evict, DbName}, St) ->
+    gen_server:abcast(mem3:nodes(), ?MODULE, {do_evict, DbName}),
+    {noreply, St};
+
+handle_cast({refresh, DbName, DDocIds}, St) ->
+    gen_server:abcast(mem3:nodes(), ?MODULE, {do_refresh, DbName, DDocIds}),
+    {noreply, St};
+
+handle_cast({do_evict, DbName}, St) ->
+    #st{
+        dbs = Dbs
+    } = St,
+    ToRem = case khash:lookup(Dbs, DbName) of
+        {value, DDocIds} ->
+            AccOut = khash:fold(DDocIds, fun(_, Keys, Acc1) ->
+                khash:to_list(Keys) ++ Acc1
+            end, []),
+            ?EVENT(evicted, DbName),
+            AccOut;
+        not_found ->
+            ?EVENT(evict_noop, DbName),
+            []
+    end,
+    lists:foreach(fun({Key, Pid}) ->
+        remove_entry(St, Key, Pid)
+    end, ToRem),
+    khash:del(Dbs, DbName),
+    {noreply, St};
+
+handle_cast({do_refresh, DbName, DDocIdList}, St) ->
+    #st{
+        dbs = Dbs
+    } = St,
+    case khash:lookup(Dbs, DbName) of
+        {value, DDocIds} ->
+            lists:foreach(fun(DDocId) ->
+                case khash:lookup(DDocIds, DDocId) of
+                    {value, Keys} ->
+                        khash:fold(Keys, fun(_, Pid, _) ->
+                            ddoc_cache_entry:refresh(Pid)
+                        end, nil);
+                    not_found ->
+                        ok
+                end
+            end, [no_ddocid | DDocIdList]);
+        not_found ->
+            ok
+    end,
+    {noreply, St};
+
+handle_cast(Msg, St) ->
+    {stop, {invalid_cast, Msg}, St}.
+
+
+handle_info({'EXIT', Pid, _Reason}, #st{evictor = Pid} = St) ->
+    ?EVENT(evictor_died, Pid),
+    {ok, Evictor} = couch_event:link_listener(
+            ?MODULE, handle_db_event, nil, [all_dbs]
+        ),
+    {noreply, St#st{evictor=Evictor}};
+
+handle_info({'EXIT', Pid, normal}, St) ->
+    % This clause handles when an entry starts
+    % up but encounters an error or uncacheable
+    % response from its recover call.
+    #st{
+        pids = Pids
+    } = St,
+    {value, Key} = khash:lookup(Pids, Pid),
+    khash:del(Pids, Pid),
+    remove_key(St, Key),
+    {noreply, St};
+
+handle_info(Msg, St) ->
+    {stop, {invalid_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+    {ok, St}.
+
+
+handle_db_event(ShardDbName, created, St) ->
+    gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
+    {ok, St};
+
+handle_db_event(ShardDbName, deleted, St) ->
+    gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
+    {ok, St};
+
+handle_db_event(_DbName, _Event, St) ->
+    {ok, St}.
+
+
+lru_start(Key) ->
+    case gen_server:call(?MODULE, {start, Key}, infinity) of
+        {ok, Pid} ->
+            couch_stats:increment_counter([ddoc_cache, miss]),
+            ddoc_cache_entry:open(Pid, Key);
+        full ->
+            couch_stats:increment_counter([ddoc_cache, recovery]),
+            ddoc_cache_entry:recover(Key)
+    end.
+
+
+trim(_, _, 0) ->
+    full;
+
+trim(_St, CurSize, MaxSize) when CurSize < MaxSize ->
+    {ok, 0};
+
+trim(St, CurSize, MaxSize) when CurSize >= MaxSize ->
+    case ets:first(?LRU) of
+        '$end_of_table' ->
+            full;
+        {_Ts, Key, Pid} ->
+            remove_entry(St, Key, Pid),
+            {ok, 1}
+    end.
+
+
+remove_entry(St, Key, Pid) ->
+    #st{
+        pids = Pids
+    } = St,
+    unlink(Pid),
+    ddoc_cache_entry:shutdown(Pid),
+    khash:del(Pids, Pid),
+    remove_key(St, Key).
+
+
+store_key(Dbs, Key, Pid) ->
+    DbName = ddoc_cache_entry:dbname(Key),
+    DDocId = ddoc_cache_entry:ddocid(Key),
+    case khash:lookup(Dbs, DbName) of
+        {value, DDocIds} ->
+            case khash:lookup(DDocIds, DDocId) of
+                {value, Keys} ->
+                    khash:put(Keys, Key, Pid);
+                not_found ->
+                    {ok, Keys} = khash:from_list([{Key, Pid}]),
+                    khash:put(DDocIds, DDocId, Keys)
+            end;
+        not_found ->
+            {ok, Keys} = khash:from_list([{Key, Pid}]),
+            {ok, DDocIds} = khash:from_list([{DDocId, Keys}]),
+            khash:put(Dbs, DbName, DDocIds)
+    end.
+
+
+remove_key(St, Key) ->
+    #st{
+        dbs = Dbs
+    } = St,
+    DbName = ddoc_cache_entry:dbname(Key),
+    DDocId = ddoc_cache_entry:ddocid(Key),
+    {value, DDocIds} = khash:lookup(Dbs, DbName),
+    {value, Keys} = khash:lookup(DDocIds, DDocId),
+    khash:del(Keys, Key),
+    case khash:size(Keys) of
+        0 -> khash:del(DDocIds, DDocId);
+        _ -> ok
+    end,
+    case khash:size(DDocIds) of
+        0 -> khash:del(Dbs, DbName);
+        _ -> ok
+    end.
diff --git a/src/ddoc_cache/src/ddoc_cache_opener.erl b/src/ddoc_cache/src/ddoc_cache_opener.erl
index b76a228..7839bcb 100644
--- a/src/ddoc_cache/src/ddoc_cache_opener.erl
+++ b/src/ddoc_cache/src/ddoc_cache_opener.erl
@@ -14,279 +14,52 @@
 -behaviour(gen_server).
 -vsn(1).
 
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
-
 -export([
     start_link/0
 ]).
+
 -export([
     init/1,
     terminate/2,
-
     handle_call/3,
     handle_cast/2,
     handle_info/2,
-
     code_change/3
 ]).
 
--export([
-    open_doc/2,
-    open_doc/3,
-    open_validation_funs/1,
-    evict_docs/2,
-    lookup/1,
-    match_newest/1,
-    recover_doc/2,
-    recover_doc/3,
-    recover_validation_funs/1
-]).
--export([
-    handle_db_event/3
-]).
--export([
-    fetch_doc_data/1
-]).
-
--define(CACHE, ddoc_cache_lru).
--define(OPENING, ddoc_cache_opening).
-
--type dbname() :: iodata().
--type docid() :: iodata().
--type doc_hash() :: <<_:128>>.
--type revision() :: {pos_integer(), doc_hash()}.
-
--record(opener, {
-    key,
-    pid,
-    clients
-}).
-
--record(st, {
-    db_ddocs,
-    evictor
-}).
 
 start_link() ->
     gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
 
--spec open_doc(dbname(), docid()) -> {ok, #doc{}}.
-open_doc(DbName, DocId) ->
-    Resp = gen_server:call(?MODULE, {open, {DbName, DocId}}, infinity),
-    handle_open_response(Resp).
-
--spec open_doc(dbname(), docid(), revision()) -> {ok, #doc{}}.
-open_doc(DbName, DocId, Rev) ->
-    Resp = gen_server:call(?MODULE, {open, {DbName, DocId, Rev}}, infinity),
-    handle_open_response(Resp).
-
--spec open_validation_funs(dbname()) -> {ok, [fun()]}.
-open_validation_funs(DbName) ->
-    Resp = gen_server:call(?MODULE, {open, {DbName, validation_funs}}, infinity),
-    handle_open_response(Resp).
-
--spec evict_docs(dbname(), [docid()]) -> ok.
-evict_docs(DbName, DocIds) ->
-    gen_server:cast(?MODULE, {evict, DbName, DocIds}).
-
-lookup(Key) ->
-    try ets_lru:lookup_d(?CACHE, Key) of
-        {ok, _} = Resp ->
-            Resp;
-        _ ->
-            missing
-    catch
-        error:badarg ->
-            recover
-    end.
-
-match_newest(Key) ->
-    try ets_lru:match_object(?CACHE, Key, '_') of
-        [] ->
-            missing;
-        Docs ->
-            Sorted = lists:sort(
-                fun (#doc{deleted=DelL, revs=L}, #doc{deleted=DelR, revs=R}) ->
-                    {not DelL, L} > {not DelR, R}
-                end, Docs),
-            {ok, hd(Sorted)}
-    catch
-        error:badarg ->
-            recover
-    end.
-
-recover_doc(DbName, DDocId) ->
-    fabric:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]).
-
-recover_doc(DbName, DDocId, Rev) ->
-    {ok, [Resp]} = fabric:open_revs(DbName, DDocId, [Rev], [ejson_body, ?ADMIN_CTX]),
-    Resp.
-
-recover_validation_funs(DbName) ->
-    {ok, DDocs} = fabric:design_docs(mem3:dbname(DbName)),
-    Funs = lists:flatmap(fun(DDoc) ->
-        case couch_doc:get_validate_doc_fun(DDoc) of
-            nil -> [];
-            Fun -> [Fun]
-        end
-    end, DDocs),
-    {ok, Funs}.
-
-handle_db_event(ShardDbName, created, St) ->
-    gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
-    {ok, St};
-handle_db_event(ShardDbName, deleted, St) ->
-    gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
-    {ok, St};
-handle_db_event(_DbName, _Event, St) ->
-    {ok, St}.
 
 init(_) ->
-    process_flag(trap_exit, true),
-    _ = ets:new(?OPENING, [set, protected, named_table, {keypos, #opener.key}]),
-    {ok, Evictor} = couch_event:link_listener(
-            ?MODULE, handle_db_event, nil, [all_dbs]
-        ),
-    {ok, #st{
-        evictor = Evictor
-    }}.
+    {ok, nil}.
 
-terminate(_Reason, St) ->
-    case is_pid(St#st.evictor) of
-        true -> exit(St#st.evictor, kill);
-        false -> ok
-    end,
+terminate(_Reason, _St) ->
     ok.
 
-handle_call({open, OpenerKey}, From, St) ->
-    case ets:lookup(?OPENING, OpenerKey) of
-        [#opener{clients=Clients}=O] ->
-            ets:insert(?OPENING, O#opener{clients=[From | Clients]}),
-            {noreply, St};
-        [] ->
-            Pid = spawn_link(?MODULE, fetch_doc_data, [OpenerKey]),
-            ets:insert(?OPENING, #opener{key=OpenerKey, pid=Pid, clients=[From]}),
-            {noreply, St}
-    end;
 
 handle_call(Msg, _From, St) ->
     {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
 
 
-handle_cast({evict, DbName}, St) ->
-    gen_server:abcast(mem3:nodes(), ?MODULE, {do_evict, DbName}),
+% The do_evict clauses are upgrades while we're
+% in a rolling reboot.
+handle_cast({do_evict, _} = Msg, St) ->
+    gen_server:cast(ddoc_cache_lru, Msg),
     {noreply, St};
 
-handle_cast({evict, DbName, DDocIds}, St) ->
-    gen_server:abcast(mem3:nodes(), ?MODULE, {do_evict, DbName, DDocIds}),
-    {noreply, St};
-
-handle_cast({do_evict, DbName}, St) ->
-    DDocIds = lists:flatten(ets_lru:match(?CACHE, {DbName, '$1', '_'}, '_')),
-    handle_cast({do_evict, DbName, DDocIds}, St);
-
 handle_cast({do_evict, DbName, DDocIds}, St) ->
-    CustomKeys = lists:flatten(ets_lru:match(?CACHE, {DbName, '$1'}, '_')),
-    lists:foreach(fun(Mod) ->
-        ets_lru:remove(?CACHE, {DbName, Mod})
-    end, CustomKeys),
-    lists:foreach(fun(DDocId) ->
-        Revs = ets_lru:match(?CACHE, {DbName, DDocId, '$1'}, '_'),
-        lists:foreach(fun([Rev]) ->
-            ets_lru:remove(?CACHE, {DbName, DDocId, Rev})
-        end, Revs)
-    end, DDocIds),
+    gen_server:cast(ddoc_cache_lru, {do_refresh, DbName, DDocIds}),
     {noreply, St};
 
 handle_cast(Msg, St) ->
     {stop, {invalid_cast, Msg}, St}.
 
-handle_info({'EXIT', Pid, Reason}, #st{evictor=Pid}=St) ->
-    couch_log:error("ddoc_cache_opener evictor died ~w", [Reason]),
-    {ok, Evictor} = couch_event:link_listener(?MODULE, handle_db_event, nil, [all_dbs]),
-    {noreply, St#st{evictor=Evictor}};
-
-handle_info({'EXIT', _Pid, {open_ok, OpenerKey, Resp}}, St) ->
-    respond(OpenerKey, {open_ok, Resp}),
-    {noreply, St};
-
-handle_info({'EXIT', _Pid, {open_error, OpenerKey, Type, Error}}, St) ->
-    respond(OpenerKey, {open_error, Type, Error}),
-    {noreply, St};
-
-handle_info({'EXIT', Pid, Reason}, St) ->
-    Pattern = #opener{pid=Pid, _='_'},
-    case ets:match_object(?OPENING, Pattern) of
-        [#opener{key=OpenerKey, clients=Clients}] ->
-            _ = [gen_server:reply(C, {error, Reason}) || C <- Clients],
-            ets:delete(?OPENING, OpenerKey),
-            {noreply, St};
-        [] ->
-            {stop, {unknown_pid_died, {Pid, Reason}}, St}
-    end;
 
 handle_info(Msg, St) ->
     {stop, {invalid_info, Msg}, St}.
 
+
 code_change(_OldVsn, State, _Extra) ->
     {ok, State}.
-
--spec fetch_doc_data({dbname(), validation_funs}) -> no_return();
-                    ({dbname(), atom()}) -> no_return();
-                    ({dbname(), docid()}) -> no_return();
-                    ({dbname(), docid(), revision()}) -> no_return().
-fetch_doc_data({DbName, validation_funs}=OpenerKey) ->
-    {ok, Funs} = recover_validation_funs(DbName),
-    ok = ets_lru:insert(?CACHE, OpenerKey, Funs),
-    exit({open_ok, OpenerKey, {ok, Funs}});
-fetch_doc_data({DbName, Mod}=OpenerKey) when is_atom(Mod) ->
-    % This is not actually a docid but rather a custom cache key.
-    % Treat the argument as a code module and invoke its recover function.
-    try Mod:recover(DbName) of
-        {ok, Result} ->
-            ok = ets_lru:insert(?CACHE, OpenerKey, Result),
-            exit({open_ok, OpenerKey, {ok, Result}});
-        Else ->
-            exit({open_ok, OpenerKey, Else})
-    catch
-        Type:Reason ->
-            exit({open_error, OpenerKey, Type, Reason})
-    end;
-fetch_doc_data({DbName, DocId}=OpenerKey) ->
-    try recover_doc(DbName, DocId) of
-        {ok, Doc} ->
-            {RevDepth, [RevHash| _]} = Doc#doc.revs,
-            Rev = {RevDepth, RevHash},
-            ok = ets_lru:insert(?CACHE, {DbName, DocId, Rev}, Doc),
-            exit({open_ok, OpenerKey, {ok, Doc}});
-        Else ->
-            exit({open_ok, OpenerKey, Else})
-    catch
-        Type:Reason ->
-            exit({open_error, OpenerKey, Type, Reason})
-    end;
-fetch_doc_data({DbName, DocId, Rev}=OpenerKey) ->
-    try recover_doc(DbName, DocId, Rev) of
-        {ok, Doc} ->
-            ok = ets_lru:insert(?CACHE, {DbName, DocId, Rev}, Doc),
-            exit({open_ok, OpenerKey, {ok, Doc}});
-        Else ->
-            exit({open_ok, OpenerKey, Else})
-    catch
-        Type:Reason ->
-            exit({open_error, OpenerKey, Type, Reason})
-    end.
-
-handle_open_response(Resp) ->
-    case Resp of
-        {open_ok, Value} -> Value;
-        {open_error, throw, Error} -> throw(Error);
-        {open_error, error, Error} -> erlang:error(Error);
-        {open_error, exit, Error} -> exit(Error)
-    end.
-
-respond(OpenerKey, Resp) ->
-    [#opener{clients=Clients}] = ets:lookup(?OPENING, OpenerKey),
-    _ = [gen_server:reply(C, Resp) || C <- Clients],
-    ets:delete(?OPENING, OpenerKey).
diff --git a/src/ddoc_cache/src/ddoc_cache_sup.erl b/src/ddoc_cache/src/ddoc_cache_sup.erl
index 85e90b3..35393e1 100644
--- a/src/ddoc_cache/src/ddoc_cache_sup.erl
+++ b/src/ddoc_cache/src/ddoc_cache_sup.erl
@@ -27,12 +27,20 @@ start_link() ->
 init([]) ->
     Children = [
         {
+            ddoc_cache_tables,
+            {ddoc_cache_tables, start_link, []},
+            permanent,
+            5000,
+            worker,
+            [ddoc_cache_tables]
+        },
+        {
             ddoc_cache_lru,
-            {ets_lru, start_link, [ddoc_cache_lru, lru_opts()]},
+            {ddoc_cache_lru, start_link, []},
             permanent,
             5000,
             worker,
-            [ets_lru]
+            [ddoc_cache_lru]
         },
         {
             ddoc_cache_opener,
@@ -43,25 +51,4 @@ init([]) ->
             [ddoc_cache_opener]
         }
     ],
-    {ok, {{one_for_one, 5, 10}, Children}}.
-
-
-lru_opts() ->
-    case application:get_env(ddoc_cache, max_objects) of
-        {ok, MxObjs} when is_integer(MxObjs), MxObjs >= 0 ->
-            [{max_objects, MxObjs}];
-        _ ->
-            []
-    end ++
-    case application:get_env(ddoc_cache, max_size) of
-        {ok, MxSize} when is_integer(MxSize), MxSize >= 0 ->
-            [{max_size, MxSize}];
-        _ ->
-            []
-    end ++
-    case application:get_env(ddoc_cache, max_lifetime) of
-        {ok, MxLT} when is_integer(MxLT), MxLT >= 0 ->
-            [{max_lifetime, MxLT}];
-        _ ->
-            []
-    end.
+    {ok, {{one_for_all, 25, 1}, Children}}.
diff --git a/src/ddoc_cache/src/ddoc_cache_tables.erl b/src/ddoc_cache/src/ddoc_cache_tables.erl
new file mode 100644
index 0000000..5856776
--- /dev/null
+++ b/src/ddoc_cache/src/ddoc_cache_tables.erl
@@ -0,0 +1,68 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_tables).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+    start_link/0
+]).
+
+-export([
+    init/1,
+    terminate/2,
+    handle_call/3,
+    handle_cast/2,
+    handle_info/2,
+    code_change/3
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+init(_) ->
+    BaseOpts = [public, named_table],
+    CacheOpts = [
+        set,
+        {read_concurrency, true},
+        {keypos, #entry.key}
+    ] ++ BaseOpts,
+    ets:new(?CACHE, CacheOpts),
+    ets:new(?LRU, [ordered_set, {write_concurrency, true}] ++ BaseOpts),
+    {ok, nil}.
+
+
+terminate(_Reason, _St) ->
+    ok.
+
+
+handle_call(Msg, _From, St) ->
+    {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+    {stop, {invalid_cast, Msg}, St}.
+
+
+handle_info(Msg, St) ->
+    {stop, {invalid_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+    {ok, St}.
diff --git a/src/ddoc_cache/test/ddoc_cache_basic_test.erl b/src/ddoc_cache/test/ddoc_cache_basic_test.erl
new file mode 100644
index 0000000..227ac54
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_basic_test.erl
@@ -0,0 +1,122 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_basic_test).
+
+
+-export([
+    recover/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+recover(DbName) ->
+    {ok, {DbName, totes_custom}}.
+
+
+check_basic_test_() ->
+    {
+        setup,
+        fun ddoc_cache_tutil:start_couch/0,
+        fun ddoc_cache_tutil:stop_couch/1,
+        {with, [
+            fun cache_ddoc/1,
+            fun cache_ddoc_rev/1,
+            fun cache_vdu/1,
+            fun cache_custom/1,
+            fun cache_ddoc_refresher_unchanged/1,
+            fun dont_cache_not_found/1,
+            fun deprecated_api_works/1
+        ]}
+    }.
+
+
+cache_ddoc({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    Resp1 = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp1),
+    ?assertEqual(1, ets:info(?CACHE, size)),
+    Resp2 = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    ?assertEqual(Resp1, Resp2),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+
+cache_ddoc_rev({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    Resp1 = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
+    ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp1),
+    ?assertEqual(1, ets:info(?CACHE, size)),
+    Resp2 = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
+    ?assertEqual(Resp1, Resp2),
+    ?assertEqual(1, ets:info(?CACHE, size)),
+
+    % Assert that the non-rev cache entry is separate
+    Resp3 = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp3),
+    ?assertEqual(2, ets:info(?CACHE, size)).
+
+
+cache_vdu({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    Resp1 = ddoc_cache:open_validation_funs(DbName),
+    ?assertMatch({ok, [_]}, Resp1),
+    ?assertEqual(1, ets:info(?CACHE, size)),
+    Resp2 = ddoc_cache:open_validation_funs(DbName),
+    ?assertEqual(Resp1, Resp2),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+
+cache_custom({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    Resp1 = ddoc_cache:open_custom(DbName, ?MODULE),
+    ?assertMatch({ok, {DbName, totes_custom}}, Resp1),
+    ?assertEqual(1, ets:info(?CACHE, size)),
+    Resp2 = ddoc_cache:open_custom(DbName, ?MODULE),
+    ?assertEqual(Resp1, Resp2),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+
+cache_ddoc_refresher_unchanged({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    ddoc_cache:open_doc(DbName, ?FOOBAR),
+    [Entry1] = ets:lookup(?CACHE, ets:first(?CACHE)),
+    ddoc_cache:open_doc(DbName, ?FOOBAR),
+    [Entry2] = ets:lookup(?CACHE, ets:first(?CACHE)),
+    ?assertEqual(Entry1, Entry2).
+
+
+dont_cache_not_found({DbName, _}) ->
+    DDocId = <<"_design/not_found">>,
+    ddoc_cache_tutil:clear(),
+    Resp = ddoc_cache:open_doc(DbName, DDocId),
+    ?assertEqual({not_found, missing}, Resp),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    ?assertEqual(0, ets:info(?LRU, size)).
+
+
+deprecated_api_works({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    {ok, _} = ddoc_cache:open(DbName, ?FOOBAR),
+    {ok, _} = ddoc_cache:open(DbName, <<"foobar">>),
+    {ok, _} = ddoc_cache:open(DbName, ?MODULE),
+    {ok, _} = ddoc_cache:open(DbName, validation_funs).
+
diff --git a/src/ddoc_cache/test/ddoc_cache_coverage_test.erl b/src/ddoc_cache/test/ddoc_cache_coverage_test.erl
new file mode 100644
index 0000000..395f560
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_coverage_test.erl
@@ -0,0 +1,86 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_coverage_test).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+coverage_test_() ->
+    {
+        setup,
+        fun ddoc_cache_tutil:start_couch/0,
+        fun ddoc_cache_tutil:stop_couch/1,
+        [
+            fun restart_lru/0,
+            fun restart_tables/0,
+            fun restart_evictor/0
+        ]
+    }.
+
+
+restart_lru() ->
+    send_bad_messages(ddoc_cache_lru),
+    ?assertEqual(ok, ddoc_cache_lru:terminate(bang, {st, a, b, c, d})),
+    ?assertEqual({ok, foo}, ddoc_cache_lru:code_change(1, foo, [])).
+
+
+restart_tables() ->
+    send_bad_messages(ddoc_cache_tables),
+    ?assertEqual(ok, ddoc_cache_tables:terminate(bang, baz)),
+    ?assertEqual({ok, foo}, ddoc_cache_tables:code_change(1, foo, [])).
+
+
+restart_evictor() ->
+    meck:new(ddoc_cache_ev, [passthrough]),
+    try
+        State = sys:get_state(ddoc_cache_lru),
+        Evictor = element(5, State),
+        Ref = erlang:monitor(process, Evictor),
+        exit(Evictor, shutdown),
+        receive
+            {'DOWN', Ref, _, _, Reason} ->
+                couch_log:error("MONITOR: ~p", [Reason]),
+                ok
+        end,
+        meck:wait(ddoc_cache_ev, event, [evictor_died, '_'], 1000),
+        NewState = sys:get_state(ddoc_cache_lru),
+        NewEvictor = element(5, NewState),
+        ?assertNotEqual(Evictor, NewEvictor)
+    after
+        meck:unload()
+    end.
+
+
+send_bad_messages(Name) ->
+    wait_for_restart(Name, fun() ->
+        ?assertEqual({invalid_call, foo}, gen_server:call(Name, foo))
+    end),
+    wait_for_restart(Name, fun() ->
+        gen_server:cast(Name, foo)
+    end),
+    wait_for_restart(Name, fun() ->
+        whereis(Name) ! foo
+    end).
+
+
+wait_for_restart(Server, Fun) ->
+    Ref = erlang:monitor(process, whereis(Server)),
+    Fun(),
+    receive
+        {'DOWN', Ref, _, _, _} ->
+            ok
+    end,
+    ?assert(is_pid(test_util:wait_process(Server))).
diff --git a/src/ddoc_cache/test/ddoc_cache_disabled_test.erl b/src/ddoc_cache/test/ddoc_cache_disabled_test.erl
new file mode 100644
index 0000000..ef73180
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_disabled_test.erl
@@ -0,0 +1,52 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_disabled_test).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    config:set("ddoc_cache", "max_size", "0", false),
+    Ctx.
+
+
+check_disabled_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun ddoc_cache_tutil:stop_couch/1,
+        {with, [
+            fun resp_ok/1,
+            fun resp_not_found/1
+        ]}
+    }.
+
+
+resp_ok({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    Resp = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    ?assertEqual(0, ets:info(?LRU, size)).
+
+
+resp_not_found({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    Resp = ddoc_cache:open_doc(DbName, <<"_design/not_found">>),
+    ?assertEqual({not_found, missing}, Resp),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    ?assertEqual(0, ets:info(?LRU, size)).
diff --git a/src/ddoc_cache/test/ddoc_cache_entry_test.erl b/src/ddoc_cache/test/ddoc_cache_entry_test.erl
new file mode 100644
index 0000000..62afc72
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_entry_test.erl
@@ -0,0 +1,120 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_entry_test).
+
+
+-export([
+    recover/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+recover(<<"foo">>) ->
+    timer:sleep(30000);
+
+recover(DbName) ->
+    {ok, {DbName, such_custom}}.
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:new(ddoc_cache_ev, [passthrough]),
+    Ctx.
+
+
+stop_couch(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+check_entry_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun stop_couch/1,
+        {with, [
+            fun cancel_and_replace_opener/1,
+            fun condenses_access_messages/1,
+            fun kill_opener_on_terminate/1,
+            fun open_dead_entry/1,
+            fun handles_bad_messages/1,
+            fun handles_code_change/1
+        ]}
+    }.
+
+
+cancel_and_replace_opener(_) ->
+    Key = {ddoc_cache_entry_custom, {<<"foo">>, ?MODULE}},
+    true = ets:insert_new(?CACHE, #entry{key = Key}),
+    {ok, Entry} = ddoc_cache_entry:start_link(Key),
+    Opener1 = element(4, sys:get_state(Entry)),
+    Ref1 = erlang:monitor(process, Opener1),
+    gen_server:cast(Entry, refresh),
+    receive {'DOWN', Ref1, _, _, _} -> ok end,
+    Opener2 = element(4, sys:get_state(Entry)),
+    ?assert(Opener2 /= Opener1),
+    ?assert(is_process_alive(Opener2)),
+    % Clean up after ourselves
+    unlink(Entry),
+    ddoc_cache_entry:shutdown(Entry).
+
+
+condenses_access_messages({DbName, _}) ->
+    meck:reset(ddoc_cache_ev),
+    Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}},
+    true = ets:insert(?CACHE, #entry{key = Key}),
+    {ok, Entry} = ddoc_cache_entry:start_link(Key),
+    erlang:suspend_process(Entry),
+    lists:foreach(fun(_) ->
+        gen_server:cast(Entry, accessed)
+    end, lists:seq(1, 100)),
+    erlang:resume_process(Entry),
+    meck:wait(1, ddoc_cache_ev, event, [accessed, Key], 1000),
+    ?assertError(
+            timeout,
+            meck:wait(2, ddoc_cache_ev, event, [accessed, Key], 100)
+        ),
+    unlink(Entry),
+    ddoc_cache_entry:shutdown(Entry).
+
+
+kill_opener_on_terminate(_) ->
+    Pid = spawn(fun() -> receive _ -> ok end end),
+    ?assert(is_process_alive(Pid)),
+    St = {st, key, val, Pid, waiters, ts},
+    ?assertEqual(ok, ddoc_cache_entry:terminate(normal, St)),
+    ?assert(not is_process_alive(Pid)).
+
+
+open_dead_entry({DbName, _}) ->
+    Pid = spawn(fun() -> ok end),
+    Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}},
+    ?assertEqual(recover(DbName), ddoc_cache_entry:open(Pid, Key)).
+
+
+handles_bad_messages(_) ->
+    CallExpect = {stop, {bad_call, foo}, {bad_call, foo}, baz},
+    CastExpect = {stop, {bad_cast, foo}, bar},
+    InfoExpect = {stop, {bad_info, foo}, bar},
+    ?assertEqual(CallExpect, ddoc_cache_entry:handle_call(foo, bar, baz)),
+    ?assertEqual(CastExpect, ddoc_cache_entry:handle_cast(foo, bar)),
+    ?assertEqual(InfoExpect, ddoc_cache_entry:handle_info(foo, bar)).
+
+
+handles_code_change(_) ->
+    CCExpect = {ok, bar},
+    ?assertEqual(CCExpect, ddoc_cache_entry:code_change(foo, bar, baz)).
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/test/ddoc_cache_ev.erl
similarity index 61%
copy from src/ddoc_cache/src/ddoc_cache_util.erl
copy to src/ddoc_cache/test/ddoc_cache_ev.erl
index fb3c0b9..a451342 100644
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ b/src/ddoc_cache/test/ddoc_cache_ev.erl
@@ -10,25 +10,12 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
--module(ddoc_cache_util).
-
+-module(ddoc_cache_ev).
 
 -export([
-    new_uuid/0
+    event/2
 ]).
 
 
-new_uuid() ->
-    to_hex(crypto:rand_bytes(16), []).
-
-
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
-
-
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.
+event(Name, Arg) ->
+    couch_log:error("~s :: ~s :: ~p", [?MODULE, Name, Arg]).
diff --git a/src/ddoc_cache/test/ddoc_cache_eviction_test.erl b/src/ddoc_cache/test/ddoc_cache_eviction_test.erl
new file mode 100644
index 0000000..0b9f57b
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_eviction_test.erl
@@ -0,0 +1,93 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_eviction_test).
+
+
+-export([
+    recover/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+recover(DbName) ->
+    {ok, {DbName, totes_custom}}.
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:new(ddoc_cache_ev, [passthrough]),
+    Ctx.
+
+
+stop_couch(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+check_eviction_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun stop_couch/1,
+        {with, [
+            fun evict_all/1,
+            fun dont_evict_all_unrelated/1,
+            fun check_upgrade_clause/1
+        ]}
+    }.
+
+
+evict_all({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
+    ShardName = element(2, hd(mem3:shards(DbName))),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
+    {ok, _} = ddoc_cache:open_validation_funs(DbName),
+    {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
+    ?assertEqual(4, ets:info(?CACHE, size)),
+    {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo),
+    meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000),
+    meck:wait(4, ddoc_cache_ev, event, [removed, '_'], 1000),
+    ?assertEqual(0, ets:info(?CACHE, size)).
+
+
+dont_evict_all_unrelated({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
+    {ok, _} = ddoc_cache:open_validation_funs(DbName),
+    {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
+    ?assertEqual(4, ets:info(?CACHE, size)),
+    ShardName = <<"shards/00000000-ffffffff/test.1384769918">>,
+    {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo),
+    meck:wait(ddoc_cache_ev, event, [evict_noop, <<"test">>], 1000),
+    ?assertEqual(4, ets:info(?CACHE, size)).
+
+
+check_upgrade_clause({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    ?assertEqual(1, ets:info(?CACHE, size)),
+    gen_server:cast(ddoc_cache_opener, {do_evict, DbName}),
+    meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000),
+    meck:wait(ddoc_cache_ev, event, [removed, '_'], 1000),
+    ?assertEqual(0, ets:info(?CACHE, size)).
diff --git a/src/ddoc_cache/test/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
new file mode 100644
index 0000000..f5cef16
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
@@ -0,0 +1,167 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_lru_test).
+
+
+-export([
+    recover/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+recover(<<"pause", _/binary>>) ->
+    receive go -> ok end,
+    {ok, paused};
+
+recover(DbName) ->
+    {ok, DbName}.
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    config:set("ddoc_cache", "max_size", "5", false),
+    meck:new(ddoc_cache_ev, [passthrough]),
+    Ctx.
+
+
+stop_couch(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+check_not_started_test() ->
+    % Starting couch, but not ddoc_cache
+    Ctx = test_util:start_couch(),
+    try
+        Key = {ddoc_cache_entry_custom, {<<"dbname">>, ?MODULE}},
+        ?assertEqual({ok, <<"dbname">>}, ddoc_cache_lru:open(Key))
+    after
+        test_util:stop_couch(Ctx)
+    end.
+
+
+check_lru_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun stop_couch/1,
+        {with, [
+            fun check_multi_start/1,
+            fun check_multi_open/1,
+            fun check_capped_size/1,
+            fun check_full_cache/1
+        ]}
+    }.
+
+
+check_multi_start(_) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Key = {ddoc_cache_entry_custom, {<<"pause">>, ?MODULE}},
+    % These will all get sent through ddoc_cache_lru
+    Clients = lists:map(fun(_) ->
+        spawn_monitor(fun() ->
+            ddoc_cache_lru:open(Key)
+        end)
+    end, lists:seq(1, 10)),
+    meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
+    lists:foreach(fun({Pid, _Ref}) ->
+        ?assert(is_process_alive(Pid))
+    end, Clients),
+    [#entry{pid = Pid}] = ets:tab2list(?CACHE),
+    Opener = element(4, sys:get_state(Pid)),
+    OpenerRef = erlang:monitor(process, Opener),
+    ?assert(is_process_alive(Opener)),
+    Opener ! go,
+    receive {'DOWN', OpenerRef, _, _, _} -> ok end,
+    lists:foreach(fun({CPid, Ref}) ->
+        receive
+            {'DOWN', Ref, _, _, normal} -> ok;
+            {'DOWN', Ref, _, _, Other} ->
+                io:format(standard_error, "OTHER: ~p~n", [Other]);
+            Other when not is_tuple(Other) orelse element(1, Other) /= 'DOWN' ->
+                io:format(standard_error, "MSG: ~p~n", [Other])
+        after 2000 ->
+            io:format(standard_error, "BLAH?!: ~p ~p", [CPid, process_info(CPid, current_stacktrace)])
+        end
+    end, Clients).
+
+
+check_multi_open(_) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Key = {ddoc_cache_entry_custom, {<<"pause">>, ?MODULE}},
+    % We wait after the first client so that
+    % the rest of the clients go directly to
+    % ddoc_cache_entry bypassing ddoc_cache_lru
+    Client1 = spawn_monitor(fun() ->
+        ddoc_cache_lru:open(Key)
+    end),
+    meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
+    Clients = [Client1] ++ lists:map(fun(_) ->
+        spawn_monitor(fun() ->
+            ddoc_cache_lru:open(Key)
+        end)
+    end, lists:seq(1, 9)),
+    lists:foreach(fun({Pid, _Ref}) ->
+        ?assert(is_process_alive(Pid))
+    end, Clients),
+    [#entry{pid = Pid}] = ets:tab2list(?CACHE),
+    Opener = element(4, sys:get_state(Pid)),
+    OpenerRef = erlang:monitor(process, Opener),
+    ?assert(is_process_alive(Opener)),
+    Opener ! go,
+    receive {'DOWN', OpenerRef, _, _, _} -> ok end,
+    lists:foreach(fun({_, Ref}) ->
+        receive {'DOWN', Ref, _, _, normal} -> ok end
+    end, Clients).
+
+
+check_capped_size(_) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    lists:foreach(fun(I) ->
+        DbName = list_to_binary(integer_to_list(I)),
+        ddoc_cache:open_custom(DbName, ?MODULE),
+        meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
+        ?assertEqual(I, ets:info(?CACHE, size))
+    end, lists:seq(1, 5)),
+    lists:foreach(fun(I) ->
+        DbName = list_to_binary(integer_to_list(I)),
+        ddoc_cache:open_custom(DbName, ?MODULE),
+        meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
+        ?assertEqual(5, ets:info(?CACHE, size))
+    end, lists:seq(6, 20)).
+
+
+check_full_cache(_) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    lists:foreach(fun(I) ->
+        DbSuffix = list_to_binary(integer_to_list(I)),
+        DbName = <<"pause", DbSuffix/binary>>,
+        spawn(fun() -> ddoc_cache:open_custom(DbName, ?MODULE) end),
+        meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
+        ?assertEqual(I, ets:info(?CACHE, size))
+    end, lists:seq(1, 5)),
+    lists:foreach(fun(I) ->
+        DbSuffix = list_to_binary(integer_to_list(I)),
+        DbName = <<"pause", DbSuffix/binary>>,
+        spawn(fun() -> ddoc_cache:open_custom(DbName, ?MODULE) end),
+        meck:wait(I - 5, ddoc_cache_ev, event, [full, '_'], 1000),
+        ?assertEqual(5, ets:info(?CACHE, size))
+    end, lists:seq(6, 20)).
diff --git a/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl b/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl
new file mode 100644
index 0000000..a5a5751
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl
@@ -0,0 +1,78 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_no_cache_test).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+ddoc(DDocId) ->
+    {ok, #doc{
+        id = DDocId,
+        body = {[
+            {<<"ohai">>, null}
+        ]}
+    }}.
+
+
+not_found(_DDocId) ->
+    {not_found, missing}.
+
+
+return_error(_DDocId) ->
+    {error, timeout}.
+
+
+start(Resp) ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:new(fabric),
+    meck:expect(fabric, open_doc, fun(_, DDocId, _) ->
+        Resp(DDocId)
+    end),
+    Ctx.
+
+
+stop(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+no_cache_open_ok_test() ->
+    Ctx = start(fun ddoc/1),
+    try
+        Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>),
+        ?assertEqual(ddoc(<<"bar">>), Resp)
+    after
+        stop(Ctx)
+    end.
+
+
+no_cache_open_not_found_test() ->
+    Ctx = start(fun not_found/1),
+    try
+        Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>),
+        ?assertEqual(not_found(<<"bar">>), Resp)
+    after
+        stop(Ctx)
+    end.
+
+
+no_cache_open_error_test() ->
+    Ctx = start(fun return_error/1),
+    try
+        Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>),
+        ?assertEqual(return_error(<<"bar">>), Resp)
+    after
+        stop(Ctx)
+    end.
diff --git a/src/ddoc_cache/test/ddoc_cache_open_error_test.erl b/src/ddoc_cache/test/ddoc_cache_open_error_test.erl
new file mode 100644
index 0000000..0ac2390
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_open_error_test.erl
@@ -0,0 +1,46 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_open_error_test).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:expect(fabric, open_doc, fun(_, ?FOOBAR, _) ->
+        erlang:error(test_kaboom)
+    end),
+    Ctx.
+
+
+stop_couch(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+check_basic_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun stop_couch/1,
+        {with, [
+            fun handle_open_error/1
+        ]}
+    }.
+
+
+handle_open_error({DbName, _}) ->
+    ?assertError(test_kaboom, ddoc_cache:open_doc(DbName, ?FOOBAR)).
diff --git a/src/ddoc_cache/test/ddoc_cache_opener_test.erl b/src/ddoc_cache/test/ddoc_cache_opener_test.erl
new file mode 100644
index 0000000..c384636
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_opener_test.erl
@@ -0,0 +1,33 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_opener_test).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+empty_hull_test() ->
+    InitExpect = {ok, nil},
+    TermExpect = ok,
+    CallExpect = {stop, {invalid_call, foo}, {invalid_call, foo}, baz},
+    CastExpect = {stop, {invalid_cast, foo}, bar},
+    InfoExpect = {stop, {invalid_info, foo}, bar},
+    CCExpect = {ok, bar},
+    ?assertEqual(InitExpect, ddoc_cache_opener:init(foo)),
+    ?assertEqual(TermExpect, ddoc_cache_opener:terminate(foo, bar)),
+    ?assertEqual(CallExpect, ddoc_cache_opener:handle_call(foo, bar, baz)),
+    ?assertEqual(CastExpect, ddoc_cache_opener:handle_cast(foo, bar)),
+    ?assertEqual(InfoExpect, ddoc_cache_opener:handle_info(foo, bar)),
+    ?assertEqual(CCExpect, ddoc_cache_opener:code_change(foo, bar, baz)).
diff --git a/src/ddoc_cache/test/ddoc_cache_refresh_test.erl b/src/ddoc_cache/test/ddoc_cache_refresh_test.erl
new file mode 100644
index 0000000..7bc1704
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_refresh_test.erl
@@ -0,0 +1,167 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_refresh_test).
+
+
+-export([
+    recover/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+recover(DbName) ->
+    {ok, {DbName, rand_string()}}.
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:new(ddoc_cache_ev, [passthrough]),
+    Ctx.
+
+
+stop_couch(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+check_refresh_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun stop_couch/1,
+        {with, [
+            fun refresh_ddoc/1,
+            fun refresh_ddoc_rev/1,
+            fun refresh_vdu/1,
+            fun refresh_custom/1,
+            fun refresh_multiple/1,
+            fun check_upgrade_clause/1
+        ]}
+    }.
+
+
+refresh_ddoc({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    ?assertEqual(1, ets:info(?CACHE, size)),
+    [#entry{key = Key, val = DDoc}] = ets:tab2list(?CACHE),
+    NewDDoc = DDoc#doc{
+        body = {[{<<"foo">>, <<"baz">>}]}
+    },
+    {ok, {Depth, RevId}} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    Expect = NewDDoc#doc{
+        revs = {Depth, [RevId | element(2, DDoc#doc.revs)]}
+    },
+    meck:wait(ddoc_cache_ev, event, [updated, {Key, Expect}], 1000),
+    ?assertMatch({ok, Expect}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+
+refresh_ddoc_rev({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
+    {ok, RevDDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
+    [#entry{key = Key, val = DDoc}] = ets:tab2list(?CACHE),
+    NewDDoc = DDoc#doc{
+        body = {[{<<"foo">>, <<"kazam">>}]}
+    },
+    {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    % We pass the rev explicitly so we assert that we're
+    % getting the same original response from the cache
+    meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000),
+    ?assertMatch({ok, RevDDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+
+refresh_vdu({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    {ok, [_]} = ddoc_cache:open_validation_funs(DbName),
+    [#entry{key = Key}] = ets:tab2list(?CACHE),
+    {ok, DDoc} = fabric:open_doc(DbName, ?VDU, [?ADMIN_CTX]),
+    {ok, _} = fabric:update_doc(DbName, DDoc#doc{body = {[]}}, [?ADMIN_CTX]),
+    meck:wait(ddoc_cache_ev, event, [updated, {Key, []}], 1000),
+    ?assertMatch({ok, []}, ddoc_cache:open_validation_funs(DbName)),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+
+refresh_custom({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    {ok, Resp1} = ddoc_cache:open_custom(DbName, ?MODULE),
+    {ok, DDoc} = fabric:open_doc(DbName, ?VDU, [?CUSTOM]),
+    {ok, _} = fabric:update_doc(DbName, DDoc#doc{body = {[]}}, [?ADMIN_CTX]),
+    meck:wait(ddoc_cache_ev, event, [updated, '_'], 1000),
+    ?assertNotEqual({ok, Resp1}, ddoc_cache:open_custom(DbName, ?MODULE)),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+
+refresh_multiple({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
+    {ok, DDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    {ok, DDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
+    ?assertEqual(2, ets:info(?CACHE, size)),
+    % Relying on the sort order of entry keys to make
+    % sure our entries line up for this test
+    [
+        #entry{key = NoRevKey, val = DDoc},
+        #entry{key = RevKey, val = DDoc}
+    ] = lists:sort(ets:tab2list(?CACHE)),
+    NewDDoc = DDoc#doc{
+        body = {[{<<"foo">>, <<"kalamazoo">>}]}
+    },
+    {ok, {Depth, RevId}} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    Updated = NewDDoc#doc{
+        revs = {Depth, [RevId | element(2, DDoc#doc.revs)]}
+    },
+    meck:wait(ddoc_cache_ev, event, [update_noop, RevKey], 1000),
+    meck:wait(ddoc_cache_ev, event, [updated, {NoRevKey, Updated}], 1000),
+    % We pass the rev explicitly so we assert that we're
+    % getting the same original response from the cache
+    ?assertEqual({ok, Updated}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
+    ?assertEqual({ok, DDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)),
+    ?assertEqual(2, ets:info(?CACHE, size)).
+
+
+check_upgrade_clause({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    [#entry{key = Key}] = ets:tab2list(?CACHE),
+    gen_server:cast(ddoc_cache_opener, {do_evict, DbName, [?FOOBAR]}),
+    meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000).
+
+
+rand_string() ->
+    Bin = crypto:rand_bytes(8),
+    to_hex(Bin, []).
+
+
+to_hex(<<>>, Acc) ->
+    list_to_binary(lists:reverse(Acc));
+to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
+    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
+
+
+hexdig(C) when C >= 0, C =< 9 ->
+    C + $0;
+hexdig(C) when C >= 10, C =< 15 ->
+    C + $A - 10.
\ No newline at end of file
diff --git a/src/ddoc_cache/test/ddoc_cache_remove_test.erl b/src/ddoc_cache/test/ddoc_cache_remove_test.erl
new file mode 100644
index 0000000..7596b99
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_remove_test.erl
@@ -0,0 +1,206 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_remove_test).
+
+
+-export([
+    recover/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+recover(DbName) ->
+    {ok, #doc{body = {Body}}} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
+    case couch_util:get_value(<<"status">>, Body) of
+        <<"ok">> ->
+            {ok, yay};
+        <<"not_ok">> ->
+            {ruh, roh};
+        <<"error">> ->
+            erlang:error(thpppt)
+    end.
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:new(ddoc_cache_ev, [passthrough]),
+    Ctx.
+
+
+stop_couch(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+check_refresh_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun stop_couch/1,
+        {with, [
+            fun remove_ddoc/1,
+            fun remove_ddoc_rev/1,
+            fun remove_ddoc_rev_only/1,
+            fun remove_custom_not_ok/1,
+            fun remove_custom_error/1
+        ]}
+    }.
+
+
+remove_ddoc({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    ?assertEqual(1, ets:info(?CACHE, size)),
+    [#entry{key = Key, val = DDoc}] = ets:tab2list(?CACHE),
+    NewDDoc = DDoc#doc{
+        deleted = true,
+        body = {[]}
+    },
+    {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
+    ?assertMatch({not_found, deleted}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
+    ?assertEqual(0, ets:info(?CACHE, size)).
+
+
+remove_ddoc_rev({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Rev = ddoc_cache_tutil:get_rev(DbName, ?VDU),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?VDU, Rev),
+    [#entry{key = Key, val = DDoc, pid = Pid}] = ets:tab2list(?CACHE),
+    NewDDoc = DDoc#doc{
+        body = {[{<<"an">>, <<"update">>}]}
+    },
+    {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000),
+    % Compact the database so that the old rev is removed
+    lists:foreach(fun(Shard) ->
+        do_compact(Shard#shard.name)
+    end, mem3:local_shards(DbName)),
+    % Trigger a refresh rather than wait for the timeout
+    ddoc_cache_entry:refresh(Pid),
+    meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
+    ?assertMatch(
+            {{not_found, missing}, _},
+            ddoc_cache:open_doc(DbName, ?VDU, Rev)
+        ),
+    ?assertEqual(0, ets:info(?CACHE, size)).
+
+
+remove_ddoc_rev_only({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Rev = ddoc_cache_tutil:get_rev(DbName, ?VDU),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?VDU),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?VDU, Rev),
+    % Relying on the sort order of keys to keep
+    % these lined up for testing
+    [
+        #entry{key = NoRevKey, val = DDoc, pid = NoRevPid},
+        #entry{key = RevKey, val = DDoc, pid = RevPid}
+    ] = lists:sort(ets:tab2list(?CACHE)),
+    NewDDoc = DDoc#doc{
+        body = {[{<<"new">>, <<"awesomeness">>}]}
+    },
+    {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    meck:wait(ddoc_cache_ev, event, [updated, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [update_noop, RevKey], 1000),
+    % Compact the database so that the old rev is removed
+    lists:foreach(fun(Shard) ->
+        do_compact(Shard#shard.name)
+    end, mem3:local_shards(DbName)),
+    % Trigger a refresh rather than wait for the timeout
+    ddoc_cache_entry:refresh(NoRevPid),
+    ddoc_cache_entry:refresh(RevPid),
+    meck:wait(ddoc_cache_ev, event, [update_noop, NoRevKey], 1000),
+    meck:wait(ddoc_cache_ev, event, [removed, RevKey], 1000),
+    ?assertMatch({ok, _}, ddoc_cache:open_doc(DbName, ?VDU)),
+    ?assertMatch(
+            {{not_found, missing}, _},
+            ddoc_cache:open_doc(DbName, ?VDU, Rev)
+        ),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+remove_custom_not_ok({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    init_custom_ddoc(DbName),
+    {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
+    [#entry{key = Key}] = ets:tab2list(?CACHE),
+    {ok, DDoc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
+    NewDDoc = DDoc#doc{
+        body = {[{<<"status">>, <<"not_ok">>}]}
+    },
+    {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
+    ?assertEqual({ruh, roh}, ddoc_cache:open_custom(DbName, ?MODULE)),
+    ?assertEqual(0, ets:info(?CACHE, size)).
+
+
+remove_custom_error({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    init_custom_ddoc(DbName),
+    {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
+    [#entry{key = Key}] = ets:tab2list(?CACHE),
+    {ok, DDoc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
+    NewDDoc = DDoc#doc{
+        body = {[{<<"status">>, <<"error">>}]}
+    },
+    {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
+    ?assertError(thpppt, ddoc_cache:open_custom(DbName, ?MODULE)),
+    ?assertEqual(0, ets:info(?CACHE, size)).
+
+
+init_custom_ddoc(DbName) ->
+    Body = {[{<<"status">>, <<"ok">>}]},
+    {ok, Doc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
+    NewDoc = Doc#doc{body = Body},
+    {ok, _} = fabric:update_doc(DbName, NewDoc, [?ADMIN_CTX]).
+
+
+do_compact(ShardName) ->
+    {ok, Db} = couch_db:open_int(ShardName, []),
+    try
+        {ok, Pid} = couch_db:start_compact(Db),
+        Ref = erlang:monitor(process, Pid),
+        receive
+            {'DOWN', Ref, _, _, _} ->
+                ok
+        end
+    after
+        couch_db:close(Db)
+    end,
+    wait_for_compaction(ShardName).
+
+
+wait_for_compaction(ShardName) ->
+    {ok, Db} = couch_db:open_int(ShardName, []),
+    CompactRunning = try
+        {ok, Info} = couch_db:get_db_info(Db),
+        couch_util:get_value(compact_running, Info)
+    after
+        couch_db:close(Db)
+    end,
+    if not CompactRunning -> ok; true ->
+        timer:sleep(100),
+        wait_for_compaction(ShardName)
+    end.
\ No newline at end of file
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/test/ddoc_cache_test.hrl
similarity index 59%
rename from src/ddoc_cache/src/ddoc_cache_util.erl
rename to src/ddoc_cache/test/ddoc_cache_test.hrl
index fb3c0b9..73f7bc2 100644
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ b/src/ddoc_cache/test/ddoc_cache_test.hrl
@@ -10,25 +10,17 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
--module(ddoc_cache_util).
 
+-define(CACHE, ddoc_cache_entries).
+-define(LRU, ddoc_cache_lru).
+-define(OPENERS, ddoc_cache_openers).
 
--export([
-    new_uuid/0
-]).
+-define(FOOBAR, <<"_design/foobar">>).
+-define(VDU, <<"_design/vdu">>).
+-define(CUSTOM, <<"_design/custom">>).
 
-
-new_uuid() ->
-    to_hex(crypto:rand_bytes(16), []).
-
-
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
-
-
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.
+-record(entry, {
+    key,
+    val,
+    pid
+}).
diff --git a/src/ddoc_cache/test/ddoc_cache_tutil.erl b/src/ddoc_cache/test/ddoc_cache_tutil.erl
new file mode 100644
index 0000000..cdd372b
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_tutil.erl
@@ -0,0 +1,84 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_tutil).
+
+
+-compile(export_all).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+
+
+start_couch() ->
+    %purge_modules(),
+    Ctx = test_util:start_couch(?CONFIG_CHAIN, [chttpd, ddoc_cache]),
+    TmpDb = ?tempdb(),
+    ok = fabric:create_db(TmpDb, [{q, "1"}, {n, "1"}]),
+    {ok, _} = fabric:update_docs(TmpDb, ddocs(), [?ADMIN_CTX]),
+    {TmpDb, Ctx}.
+
+
+stop_couch({_TmpDb, Ctx}) ->
+    test_util:stop_couch(Ctx).
+
+
+clear() ->
+    application:stop(ddoc_cache),
+    application:start(ddoc_cache).
+
+
+get_rev(DbName, DDocId) ->
+    {_, Ref} = erlang:spawn_monitor(fun() ->
+        {ok, #doc{revs = Revs}} = fabric:open_doc(DbName, DDocId, [?ADMIN_CTX]),
+        {Depth, [RevId | _]} = Revs,
+        exit({Depth, RevId})
+    end),
+    receive
+        {'DOWN', Ref, _, _, Rev} -> Rev
+    end.
+
+
+ddocs() ->
+    FooBar = #doc{
+        id = <<"_design/foobar">>,
+        body = {[
+            {<<"foo">>, <<"bar">>}
+        ]}
+    },
+    VDU = #doc{
+        id = <<"_design/vdu">>,
+        body = {[
+            {<<"validate_doc_update">>, <<"function(doc) {return;}">>}
+        ]}
+    },
+    Custom = #doc{
+        id = <<"_design/custom">>,
+        body = {[
+            {<<"status">>, <<"ok">>},
+            {<<"custom">>, <<"hotrod">>}
+        ]}
+    },
+    [FooBar, VDU, Custom].
+
+
+purge_modules() ->
+    case application:get_key(ddoc_cache, modules) of
+        {ok, Mods} ->
+            lists:foreach(fun(Mod) ->
+                code:delete(Mod),
+                code:purge(Mod)
+            end, Mods);
+        undefined ->
+            ok
+    end.

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 12/17: FIXUP: Make the refresh timeout configurable

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit cd3687ac39ef42e602de5b8e35f169efc8d3fe1a
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jul 6 14:11:13 2017 -0500

    FIXUP: Make the refresh timeout configurable
---
 rel/overlay/etc/default.ini             |  8 ++++++++
 src/ddoc_cache/src/ddoc_cache_entry.erl | 13 ++++++++-----
 2 files changed, 16 insertions(+), 5 deletions(-)

diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index eaa0801..be178e2 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -188,6 +188,14 @@ credentials = false
 ; List of accepted methods
 ; methods =
 
+; Configuration for the design document cache
+;[ddoc_cache]
+; The maximum number of entries to keep in the cache
+;max_size = 1000
+; The period each cache entry should wait before
+; automatically refreshing in milliseconds
+;refresh_timeout = 67000
+
 [x_frame_options]
 ; Settings same-origin will return X-Frame-Options: SAMEORIGIN.
 ; If same origin is set, it will ignore the hosts setting
diff --git a/src/ddoc_cache/src/ddoc_cache_entry.erl b/src/ddoc_cache/src/ddoc_cache_entry.erl
index 79c3dcf..db5e0b1 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry.erl
@@ -123,11 +123,10 @@ init({Key, Default}) ->
     NewTs = os:timestamp(),
     true = ets:update_element(?CACHE, Key, Updates),
     true = ets:insert(?LRU, {{NewTs, Key, self()}}),
-    Msg = {'$gen_cast', refresh},
     St = #st{
         key = Key,
         val = {open_ok, {ok, Default}},
-        opener = erlang:send_after(?REFRESH_TIMEOUT, self(), Msg),
+        opener = start_timer(),
         waiters = undefined,
         ts = NewTs,
         accessed = 1
@@ -230,11 +229,9 @@ handle_info({'DOWN', _, _, Pid, Resp}, #st{key = Key, opener = Pid} = St) ->
     case Resp of
         {open_ok, Key, {ok, Val}} ->
             update_cache(St, Val),
-            Msg = {'$gen_cast', refresh},
-            Timer = erlang:send_after(?REFRESH_TIMEOUT, self(), Msg),
             NewSt1 = St#st{
                 val = {open_ok, {ok, Val}},
-                opener = Timer
+                opener = start_timer()
             },
             NewSt2 = update_lru(NewSt1),
             if not is_list(St#st.waiters) -> ok; true ->
@@ -267,6 +264,12 @@ spawn_opener(Key) ->
     Pid.
 
 
+start_timer() ->
+    TimeOut = config:get_integer(
+            "ddoc_cache", "refresh_timeout", ?REFRESH_TIMEOUT),
+    erlang:send_after(TimeOut, self(), {'$gen_cast', refresh}).
+
+
 do_open(Key) ->
     try recover(Key) of
         Resp ->

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 15/17: FIXUP: Don't manually track cache size

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 66f03d2c1e6d179af22c7a427bd498f6c2074eed
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jul 6 16:37:23 2017 -0500

    FIXUP: Don't manually track cache size
    
    Not quite sure why I started doing this in the first place. I think it
    was becuase I was worried ets:info/2 might be expensive but I can get
    away with only calling it once here so no big deal any more.
---
 src/ddoc_cache/src/ddoc_cache_lru.erl            | 14 ++++-----
 src/ddoc_cache/test/ddoc_cache_coverage_test.erl |  6 ++--
 src/ddoc_cache/test/ddoc_cache_lru_test.erl      | 38 ++++++++++++++++++++++--
 3 files changed, 45 insertions(+), 13 deletions(-)

diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
index 35173f4..2764959 100644
--- a/src/ddoc_cache/src/ddoc_cache_lru.erl
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -42,7 +42,6 @@
 -record(st, {
     pids, % pid -> key
     dbs, % dbname -> docid -> key -> pid
-    size,
     evictor
 }).
 
@@ -93,7 +92,6 @@ init(_) ->
     {ok, #st{
         pids = Pids,
         dbs = Dbs,
-        size = 0,
         evictor = Evictor
     }}.
 
@@ -109,20 +107,20 @@ terminate(_Reason, St) ->
 handle_call({start, Key, Default}, _From, St) ->
     #st{
         pids = Pids,
-        dbs = Dbs,
-        size = CurSize
+        dbs = Dbs
     } = St,
     case ets:lookup(?CACHE, Key) of
         [] ->
             MaxSize = config:get_integer("ddoc_cache", "max_size", 1000),
+            CurSize = ets:info(?CACHE, size),
             case trim(St, CurSize, max(0, MaxSize)) of
-                {ok, N} ->
+                ok ->
                     true = ets:insert_new(?CACHE, #entry{key = Key}),
                     {ok, Pid} = ddoc_cache_entry:start_link(Key, Default),
                     true = ets:update_element(?CACHE, Key, {#entry.pid, Pid}),
                     ok = khash:put(Pids, Pid, Key),
                     store_key(Dbs, Key, Pid),
-                    {reply, {ok, Pid}, St#st{size = CurSize - N + 1}};
+                    {reply, {ok, Pid}, St};
                 full ->
                     ?EVENT(full, Key),
                     {reply, full, St}
@@ -251,7 +249,7 @@ trim(_, _, 0) ->
     full;
 
 trim(_St, CurSize, MaxSize) when CurSize < MaxSize ->
-    {ok, 0};
+    ok;
 
 trim(St, CurSize, MaxSize) when CurSize >= MaxSize ->
     case ets:first(?LRU) of
@@ -259,7 +257,7 @@ trim(St, CurSize, MaxSize) when CurSize >= MaxSize ->
             full;
         {_Ts, Key, Pid} ->
             remove_entry(St, Key, Pid),
-            {ok, 1}
+            trim(St, CurSize - 1, MaxSize)
     end.
 
 
diff --git a/src/ddoc_cache/test/ddoc_cache_coverage_test.erl b/src/ddoc_cache/test/ddoc_cache_coverage_test.erl
index 395f560..91182ca 100644
--- a/src/ddoc_cache/test/ddoc_cache_coverage_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_coverage_test.erl
@@ -33,7 +33,7 @@ coverage_test_() ->
 
 restart_lru() ->
     send_bad_messages(ddoc_cache_lru),
-    ?assertEqual(ok, ddoc_cache_lru:terminate(bang, {st, a, b, c, d})),
+    ?assertEqual(ok, ddoc_cache_lru:terminate(bang, {st, a, b, c})),
     ?assertEqual({ok, foo}, ddoc_cache_lru:code_change(1, foo, [])).
 
 
@@ -47,7 +47,7 @@ restart_evictor() ->
     meck:new(ddoc_cache_ev, [passthrough]),
     try
         State = sys:get_state(ddoc_cache_lru),
-        Evictor = element(5, State),
+        Evictor = element(4, State),
         Ref = erlang:monitor(process, Evictor),
         exit(Evictor, shutdown),
         receive
@@ -57,7 +57,7 @@ restart_evictor() ->
         end,
         meck:wait(ddoc_cache_ev, event, [evictor_died, '_'], 1000),
         NewState = sys:get_state(ddoc_cache_lru),
-        NewEvictor = element(5, NewState),
+        NewEvictor = element(4, NewState),
         ?assertNotEqual(Evictor, NewEvictor)
     after
         meck:unload()
diff --git a/src/ddoc_cache/test/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
index 77b39cd..dd77828 100644
--- a/src/ddoc_cache/test/ddoc_cache_lru_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
@@ -63,7 +63,8 @@ check_lru_test_() ->
             fun check_multi_start/1,
             fun check_multi_open/1,
             fun check_capped_size/1,
-            fun check_full_cache/1
+            fun check_full_cache/1,
+            fun check_cache_refill/1
         ]}
     }.
 
@@ -88,7 +89,7 @@ check_multi_start(_) ->
     ?assert(is_process_alive(Opener)),
     Opener ! go,
     receive {'DOWN', OpenerRef, _, _, _} -> ok end,
-    lists:foreach(fun({CPid, Ref}) ->
+    lists:foreach(fun({_, Ref}) ->
         receive
             {'DOWN', Ref, _, _, normal} -> ok
         end
@@ -159,3 +160,36 @@ check_full_cache(_) ->
         meck:wait(I - 5, ddoc_cache_ev, event, [full, '_'], 1000),
         ?assertEqual(5, ets:info(?CACHE, size))
     end, lists:seq(6, 20)).
+
+
+check_cache_refill({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+
+    InitDDoc = fun(I) ->
+        NumBin = list_to_binary(integer_to_list(I)),
+        DDocId = <<"_design/", NumBin/binary>>,
+        Doc = #doc{id = DDocId, body = {[]}},
+        {ok, _} = fabric:update_doc(DbName, Doc, [?ADMIN_CTX]),
+        {ok, _} = ddoc_cache:open_doc(DbName, DDocId),
+        {ddoc_cache_entry_ddocid, {DbName, DDocId}}
+    end,
+
+    lists:foreach(fun(I) ->
+        Key = InitDDoc(I),
+        couch_log:error("STARTED? ~p", [Key]),
+        meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
+        ?assert(ets:info(?CACHE, size) > 0)
+    end, lists:seq(1, 5)),
+
+    ShardName = element(2, hd(mem3:shards(DbName))),
+    {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo),
+    meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000),
+    meck:wait(10, ddoc_cache_ev, event, [removed, '_'], 1000),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+
+    lists:foreach(fun(I) ->
+        Key = InitDDoc(I),
+        meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
+        ?assert(ets:info(?CACHE, size) > 0)
+    end, lists:seq(6, 10)).

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 14/17: FIXUP: Ignore unused variable

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 0743e3f6c5421bd206f0c4f10d8237e9870d7247
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jul 6 15:54:59 2017 -0500

    FIXUP: Ignore unused variable
---
 src/ddoc_cache/src/ddoc_cache_lru.erl | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
index 6ae4de4..35173f4 100644
--- a/src/ddoc_cache/src/ddoc_cache_lru.erl
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -177,7 +177,7 @@ handle_cast({do_refresh, DbName, DDocIdList}, St) ->
             lists:foreach(fun(DDocId) ->
                 case khash:lookup(DDocIds, DDocId) of
                     {value, Keys} ->
-                        khash:fold(Keys, fun(Key, Pid, _) ->
+                        khash:fold(Keys, fun(_, Pid, _) ->
                             ddoc_cache_entry:refresh(Pid)
                         end, nil);
                     not_found ->

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 13/17: FIXUP: Remember to clear out the waiters list

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 9fbb086583639bb7a6c5e51419ad41ca3cfd8e46
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jul 6 14:12:01 2017 -0500

    FIXUP: Remember to clear out the waiters list
---
 src/ddoc_cache/src/ddoc_cache_entry.erl | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/src/ddoc_cache/src/ddoc_cache_entry.erl b/src/ddoc_cache/src/ddoc_cache_entry.erl
index db5e0b1..2660293 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry.erl
@@ -231,7 +231,8 @@ handle_info({'DOWN', _, _, Pid, Resp}, #st{key = Key, opener = Pid} = St) ->
             update_cache(St, Val),
             NewSt1 = St#st{
                 val = {open_ok, {ok, Val}},
-                opener = start_timer()
+                opener = start_timer(),
+                waiters = undefined
             },
             NewSt2 = update_lru(NewSt1),
             if not is_list(St#st.waiters) -> ok; true ->

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 16/17: FIXUP: Crash the LRU if its evictor dies

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 69dcaa4598b447a6dddcbc3fb9055a43136b01bd
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Mon Jul 10 13:24:15 2017 -0500

    FIXUP: Crash the LRU if its evictor dies
    
    The time between the process death and restart is a window where we
    could miss eviction events which would lead to a temporarily inconsitent
    cache. Given that we expect this to happen basically never we'll just
    reuse the restart the world approach to maintaining consistency.
---
 src/ddoc_cache/src/ddoc_cache_lru.erl            |  9 +++------
 src/ddoc_cache/test/ddoc_cache_coverage_test.erl | 18 ++++++++----------
 2 files changed, 11 insertions(+), 16 deletions(-)

diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
index 2764959..ff70342 100644
--- a/src/ddoc_cache/src/ddoc_cache_lru.erl
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -89,6 +89,7 @@ init(_) ->
     {ok, Evictor} = couch_event:link_listener(
             ?MODULE, handle_db_event, nil, [all_dbs]
         ),
+    ?EVENT(lru_init, nil),
     {ok, #st{
         pids = Pids,
         dbs = Dbs,
@@ -191,12 +192,8 @@ handle_cast(Msg, St) ->
     {stop, {invalid_cast, Msg}, St}.
 
 
-handle_info({'EXIT', Pid, _Reason}, #st{evictor = Pid} = St) ->
-    ?EVENT(evictor_died, Pid),
-    {ok, Evictor} = couch_event:link_listener(
-            ?MODULE, handle_db_event, nil, [all_dbs]
-        ),
-    {noreply, St#st{evictor=Evictor}};
+handle_info({'EXIT', Pid, Reason}, #st{evictor = Pid} = St) ->
+    {stop, Reason, St};
 
 handle_info({'EXIT', Pid, normal}, St) ->
     % This clause handles when an entry starts
diff --git a/src/ddoc_cache/test/ddoc_cache_coverage_test.erl b/src/ddoc_cache/test/ddoc_cache_coverage_test.erl
index 91182ca..57959f5 100644
--- a/src/ddoc_cache/test/ddoc_cache_coverage_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_coverage_test.erl
@@ -26,7 +26,7 @@ coverage_test_() ->
         [
             fun restart_lru/0,
             fun restart_tables/0,
-            fun restart_evictor/0
+            fun stop_on_evictor_death/0
         ]
     }.
 
@@ -43,22 +43,20 @@ restart_tables() ->
     ?assertEqual({ok, foo}, ddoc_cache_tables:code_change(1, foo, [])).
 
 
-restart_evictor() ->
+stop_on_evictor_death() ->
     meck:new(ddoc_cache_ev, [passthrough]),
     try
-        State = sys:get_state(ddoc_cache_lru),
+        Lru = whereis(ddoc_cache_lru),
+        State = sys:get_state(Lru),
         Evictor = element(4, State),
-        Ref = erlang:monitor(process, Evictor),
+        Ref = erlang:monitor(process, Lru),
         exit(Evictor, shutdown),
         receive
             {'DOWN', Ref, _, _, Reason} ->
-                couch_log:error("MONITOR: ~p", [Reason]),
-                ok
+                ?assertEqual(shutdown, Reason)
         end,
-        meck:wait(ddoc_cache_ev, event, [evictor_died, '_'], 1000),
-        NewState = sys:get_state(ddoc_cache_lru),
-        NewEvictor = element(4, NewState),
-        ?assertNotEqual(Evictor, NewEvictor)
+        meck:wait(ddoc_cache_ev, event, [lru_init, '_'], 1000),
+        ?assert(whereis(ddoc_cache_lru) /= Lru)
     after
         meck:unload()
     end.

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.