You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by da...@apache.org on 2017/07/25 16:17:27 UTC

[couchdb] branch optimize-ddoc-cache updated (99443c3 -> 8eee7b1)

This is an automated email from the ASF dual-hosted git repository.

davisp pushed a change to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git.


    omit 99443c3  FIXUP: Remove debug logging
    omit 876f5da  FIXUP: Remove bad assertions and debug log
    omit da5eca6  FIXUP: Use the shard record definition
    omit 61ae43a  FIXUP: Label state variable with a name
    omit 4d54571  FIXUP: Reorder clauses for readability
    omit b0e35a2  FIXUP: Factor out common code.
    omit 102b5f5  FIXUP: Remove old tables process
    omit cb6cf4c  FIXUP: Change from entry count to memory size
    omit 8e66b07  FIXUP: Process processes in the process of dying
    omit d538f02  Silence compiler warning
    omit c850ec5  FIXUP: Tyypoo
    omit 1b4cbf2  Fix test race with ddoc_cache
    omit 91ea494  FIXUP: Move invalidation to ddoc_cache_entry
    omit 652b263  TMP: soak-javascript target
    omit f3f3510  FIXUP: Prevent dirty reads from cache
    omit 3181e05  FIXUP: Don't send possibly large messages
    omit eefdc0d  FIXUP: Add vsn attribute
    omit 53f60c5  FIXUP: Remove unnecessary catch
    omit 72fd30b  FIXUP: Crash the LRU if its evictor dies
    omit 7cd7a8d  FIXUP: Don't manually track cache size
    omit 2ebb155  FIXUP: Ignore unused variable
    omit 7b00668  FIXUP: Remember to clear out the waiters list
    omit ae7873b  FIXUP: Make the refresh timeout configurable
    omit 45cf36c  FIXUP: Reuse fabric lookup results
    omit 02d45e8  FIXUP: Comment on use of no_ddocid
    omit 43b0ff8  FIXUP: Evict unused entries
    omit 877a1af  Remove debug logging
    omit 7000fa0  FIXUP: Automatically detect coverage runs
    omit d8ce868  FIXUP: Add tests for no VDU behavior
    omit d559d9a  FIXUP: Re-enable code purging
    omit 15fc956  FIXUP: Add gen_server behavior
    omit 0737d4a  TMP: Simple benchmark script
    omit 345d82b  Rewrite ddoc_cache to improve performance
    omit 0ea1058  Remove duplicated eviction messages
     add 70b9b81  Increase timeout of some replication tests
     add b245d5b  Temporarily disable Jenkins builds
     add 42f26d5  Explicitly mention Facebook "BSD+Patents" license in NOTICE per LEGAL-303
     add 07c3509  Improve JS restartServer() support function
     add 4b63ba8  Fix couch_peruser EUnit test
     add 9fbbbd7  bump all deps to tags
     add d5b4eba  Do not persist restart times setting in os_daemons_tests
     add dd56185  Bump config dep to 1.0.1 (ncrease timeouts for set and get).
     add 375adce  Restore Jenkins builds on master
     add 82559b0  Merge branch 'master' of https://github.com/apache/couchdb
     add a666d57  Fix link to changelog/whatsnew
     add 351679b  Increase timeout in couch's couch_db_mpr_tests module to 30 seconds
     add dbe4eba  Strip ?rev off of logfile-uploader's printed URL
     add e767b34  Update default.ini with all changes since 2.0
     new 22fe0b5  Remove duplicated eviction messages
     new 6b1a815  Rewrite ddoc_cache to improve performance
     new 8eee7b1  TMP: soak-javascript target

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (99443c3)
            \
             N -- N -- N   refs/heads/optimize-ddoc-cache (8eee7b1)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 Jenkinsfile                                        | 343 +++++++++++++++------
 NOTICE                                             |   2 +
 README.rst                                         |   4 +-
 build-aux/logfile-uploader.py                      |   2 +-
 rebar.config.script                                |  22 +-
 rel/overlay/etc/default.ini                        |  22 +-
 src/couch/test/couch_db_mpr_tests.erl              |  45 +--
 src/couch/test/couchdb_os_daemons_tests.erl        |   2 +-
 src/couch_peruser/test/couch_peruser_test.erl      |  60 ++--
 .../test/couch_replicator_large_atts_tests.erl     |   2 +-
 ...ch_replicator_small_max_request_size_target.erl |   2 +-
 src/ddoc_cache/src/ddoc_cache_speed.erl            |  61 ----
 test/javascript/test_setup.js                      |  26 +-
 13 files changed, 354 insertions(+), 239 deletions(-)
 delete mode 100644 src/ddoc_cache/src/ddoc_cache_speed.erl

-- 
To stop receiving notification emails like this one, please contact
['"commits@couchdb.apache.org" <co...@couchdb.apache.org>'].

[couchdb] 02/03: Rewrite ddoc_cache to improve performance

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 6b1a815b850ea58b4de440152870e68a9c3afd82
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Jun 16 13:29:47 2017 -0500

    Rewrite ddoc_cache to improve performance
    
    There were a couple issues with the previous ddoc_cache implementation
    that made it possible to tip over the ddoc_cache_opener process. First,
    there were a lot of messages flowing through a single gen_server. And
    second, the cache relied on periodically evicting entries to ensure
    proper behavior in not caching an entry forever after it had changed on
    disk.
    
    The new version makes two important changes. First, entries now have an
    associated process that manages the cache entry. This process will
    periodically refresh the entry and if the entry has changed or no longer
    exists the process will remove its entry from cache.
    
    The second major change is that the cache entry process directly mutates
    the related ets table entries so that our performance is not dependent
    on the speed of ets table mutations. Using a custom entry that does no
    work the cache can now sustain roughly one million operations a second
    with a twenty thousand clients fighting over a cache limited to one
    thousand items. In production this means that cache performance will
    likely be rate limited by other factors like loading design documents
    from disk.
---
 rel/overlay/etc/default.ini                        |  20 +-
 src/couch/src/couch_db_updater.erl                 |   2 +-
 src/ddoc_cache/src/ddoc_cache.app.src              |  23 +-
 src/ddoc_cache/src/ddoc_cache.erl                  |  79 ++---
 .../src/{ddoc_cache_util.erl => ddoc_cache.hrl}    |  50 +--
 src/ddoc_cache/src/ddoc_cache_entry.erl            | 352 +++++++++++++++++++++
 ..._cache_util.erl => ddoc_cache_entry_custom.erl} |  27 +-
 src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl     |  46 +++
 ...he_util.erl => ddoc_cache_entry_ddocid_rev.erl} |  37 ++-
 ...il.erl => ddoc_cache_entry_validation_funs.erl} |  34 +-
 src/ddoc_cache/src/ddoc_cache_lru.erl              | 314 ++++++++++++++++++
 src/ddoc_cache/src/ddoc_cache_opener.erl           | 244 +-------------
 src/ddoc_cache/src/ddoc_cache_sup.erl              |  27 +-
 .../{ddoc_cache_util.erl => ddoc_cache_value.erl}  |  21 +-
 src/ddoc_cache/test/ddoc_cache_basic_test.erl      | 175 ++++++++++
 src/ddoc_cache/test/ddoc_cache_coverage_test.erl   |  77 +++++
 src/ddoc_cache/test/ddoc_cache_disabled_test.erl   |  52 +++
 src/ddoc_cache/test/ddoc_cache_entry_test.erl      | 159 ++++++++++
 .../ddoc_cache_util.erl => test/ddoc_cache_ev.erl} |  21 +-
 src/ddoc_cache/test/ddoc_cache_eviction_test.erl   |  96 ++++++
 src/ddoc_cache/test/ddoc_cache_lru_test.erl        | 185 +++++++++++
 src/ddoc_cache/test/ddoc_cache_no_cache_test.erl   |  79 +++++
 src/ddoc_cache/test/ddoc_cache_open_error_test.erl |  46 +++
 src/ddoc_cache/test/ddoc_cache_opener_test.erl     |  33 ++
 src/ddoc_cache/test/ddoc_cache_refresh_test.erl    | 174 ++++++++++
 src/ddoc_cache/test/ddoc_cache_remove_test.erl     | 224 +++++++++++++
 .../ddoc_cache_test.hrl}                           |  30 +-
 src/ddoc_cache/test/ddoc_cache_tutil.erl           |  96 ++++++
 test/javascript/tests/rewrite.js                   |  14 +-
 29 files changed, 2279 insertions(+), 458 deletions(-)

diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index c6f4b99..af36cf9 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -95,8 +95,8 @@ enable_xframe_options = false
 ; CouchDB can optionally enforce a maximum uri length;
 ; max_uri_length = 8000
 ; changes_timeout = 60000
-; config_whitelist = 
-; max_uri_length = 
+; config_whitelist =
+; max_uri_length =
 ; rewrite_limit = 100
 ; x_forwarded_host = X-Forwarded-Host
 ; x_forwarded_proto = X-Forwarded-Proto
@@ -105,7 +105,7 @@ enable_xframe_options = false
 max_http_request_size = 67108864 ; 64 MB
 
 ; [httpd_design_handlers]
-; _view = 
+; _view =
 
 ; [ioq]
 ; concurrency = 10
@@ -119,7 +119,7 @@ port = 6984
 
 ; [chttpd_auth_cache]
 ; max_lifetime = 600000
-; max_objects = 
+; max_objects =
 ; max_size = 104857600
 
 ; [mem3]
@@ -130,7 +130,7 @@ port = 6984
 
 ; [fabric]
 ; all_docs_concurrency = 10
-; changes_duration = 
+; changes_duration =
 ; shard_timeout_factor = 2
 ; uuid_prefix_len = 7
 
@@ -163,7 +163,7 @@ iterations = 10 ; iterations for password hashing
 ; proxy_use_secret = false
 ; comma-separated list of public fields, 404 if empty
 ; public_fields =
-; secret = 
+; secret =
 ; users_db_public = false
 
 ; CSP (Content Security Policy) Support for _utils
@@ -194,6 +194,14 @@ credentials = false
 ; List of accepted methods
 ; methods =
 
+; Configuration for the design document cache
+;[ddoc_cache]
+; The maximum size of the cache in bytes
+;max_size = 104857600 ; 10MiB
+; The period each cache entry should wait before
+; automatically refreshing in milliseconds
+;refresh_timeout = 67000
+
 [x_frame_options]
 ; Settings same-origin will return X-Frame-Options: SAMEORIGIN.
 ; If same origin is set, it will ignore the hosts setting
diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
index b4ad257..78e0b8c 100644
--- a/src/couch/src/couch_db_updater.erl
+++ b/src/couch/src/couch_db_updater.erl
@@ -319,7 +319,7 @@ handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts,
                     couch_event:notify(Db2#db.name, {ddoc_updated, DDocId})
                 end, UpdatedDDocIds),
                 couch_event:notify(Db2#db.name, ddoc_updated),
-                ddoc_cache:evict(Db2#db.name, UpdatedDDocIds),
+                ddoc_cache:refresh(Db2#db.name, UpdatedDDocIds),
                 refresh_validate_doc_funs(Db2);
             false ->
                 Db2
diff --git a/src/ddoc_cache/src/ddoc_cache.app.src b/src/ddoc_cache/src/ddoc_cache.app.src
index a64b2f5..c931ca0 100644
--- a/src/ddoc_cache/src/ddoc_cache.app.src
+++ b/src/ddoc_cache/src/ddoc_cache.app.src
@@ -13,14 +13,8 @@
 {application, ddoc_cache, [
     {description, "Design Document Cache"},
     {vsn, git},
-    {modules, [
-        ddoc_cache,
-        ddoc_cache_app,
-        ddoc_cache_opener,
-        ddoc_cache_sup,
-        ddoc_cache_util
-    ]},
     {registered, [
+        ddoc_cache_tables,
         ddoc_cache_lru,
         ddoc_cache_opener
     ]},
@@ -28,17 +22,12 @@
         kernel,
         stdlib,
         crypto,
+        config,
         couch_event,
-        ets_lru,
-        mem3,
-        fabric,
         couch_log,
-        couch_stats
+        couch_stats,
+        mem3,
+        fabric
     ]},
-    {mod, {ddoc_cache_app, []}},
-    {env, [
-        {max_objects, unlimited},
-        {max_size, 104857600}, % 100M
-        {max_lifetime, 60000} % 1m
-    ]}
+    {mod, {ddoc_cache_app, []}}
 ]}.
diff --git a/src/ddoc_cache/src/ddoc_cache.erl b/src/ddoc_cache/src/ddoc_cache.erl
index ed93309..50cac30 100644
--- a/src/ddoc_cache/src/ddoc_cache.erl
+++ b/src/ddoc_cache/src/ddoc_cache.erl
@@ -12,86 +12,43 @@
 
 -module(ddoc_cache).
 
--export([
-    start/0,
-    stop/0
-]).
 
 -export([
     open_doc/2,
     open_doc/3,
     open_validation_funs/1,
-    evict/2,
+    open_custom/2,
+    refresh/2,
 
     %% deprecated
     open/2
 ]).
 
-start() ->
-    application:start(ddoc_cache).
-
-stop() ->
-    application:stop(ddoc_cache).
 
 open_doc(DbName, DocId) ->
-    Key = {DbName, DocId, '_'},
-    case ddoc_cache_opener:match_newest(Key) of
-        {ok, _} = Resp ->
-            couch_stats:increment_counter([ddoc_cache, hit]),
-            Resp;
-        missing ->
-            couch_stats:increment_counter([ddoc_cache, miss]),
-            ddoc_cache_opener:open_doc(DbName, DocId);
-        recover ->
-            couch_stats:increment_counter([ddoc_cache, recovery]),
-            ddoc_cache_opener:recover_doc(DbName, DocId)
-    end.
+    Key = {ddoc_cache_entry_ddocid, {DbName, DocId}},
+    ddoc_cache_lru:open(Key).
+
 
 open_doc(DbName, DocId, RevId) ->
-    Key = {DbName, DocId, RevId},
-    case ddoc_cache_opener:lookup(Key) of
-        {ok, _} = Resp ->
-            couch_stats:increment_counter([ddoc_cache, hit]),
-            Resp;
-        missing ->
-            couch_stats:increment_counter([ddoc_cache, miss]),
-            ddoc_cache_opener:open_doc(DbName, DocId, RevId);
-        recover ->
-            couch_stats:increment_counter([ddoc_cache, recovery]),
-            ddoc_cache_opener:recover_doc(DbName, DocId, RevId)
-    end.
+    Key = {ddoc_cache_entry_ddocid_rev, {DbName, DocId, RevId}},
+    ddoc_cache_lru:open(Key).
+
 
 open_validation_funs(DbName) ->
-    Key = {DbName, validation_funs},
-    case ddoc_cache_opener:lookup(Key) of
-        {ok, _} = Resp ->
-            couch_stats:increment_counter([ddoc_cache, hit]),
-            Resp;
-        missing ->
-            couch_stats:increment_counter([ddoc_cache, miss]),
-            ddoc_cache_opener:open_validation_funs(DbName);
-        recover ->
-            couch_stats:increment_counter([ddoc_cache, recovery]),
-            ddoc_cache_opener:recover_validation_funs(DbName)
-    end.
+    Key = {ddoc_cache_entry_validation_funs, DbName},
+    ddoc_cache_lru:open(Key).
+
 
 open_custom(DbName, Mod) ->
-    Key = {DbName, Mod},
-    case ddoc_cache_opener:lookup(Key) of
-        {ok, _} = Resp ->
-            couch_stats:increment_counter([ddoc_cache, hit]),
-            Resp;
-        missing ->
-            couch_stats:increment_counter([ddoc_cache, miss]),
-            ddoc_cache_opener:open_doc(DbName, Mod);
-        recover ->
-            couch_stats:increment_counter([ddoc_cache, recovery]),
-            Mod:recover(DbName)
-    end.
-
-evict(ShardDbName, DDocIds) ->
+    Key = {ddoc_cache_entry_custom, {DbName, Mod}},
+    ddoc_cache_lru:open(Key).
+
+
+refresh(ShardDbName, DDocIds) when is_list(DDocIds) ->
     DbName = mem3:dbname(ShardDbName),
-    ddoc_cache_opener:evict_docs(DbName, DDocIds).
+    ddoc_cache_lru:refresh(DbName, DDocIds).
+
 
 open(DbName, validation_funs) ->
     open_validation_funs(DbName);
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/src/ddoc_cache.hrl
similarity index 52%
copy from src/ddoc_cache/src/ddoc_cache_util.erl
copy to src/ddoc_cache/src/ddoc_cache.hrl
index 24c4b0d..dba0d37 100644
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ b/src/ddoc_cache/src/ddoc_cache.hrl
@@ -10,25 +10,31 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
--module(ddoc_cache_util).
-
-
--export([
-    new_uuid/0
-]).
-
-
-new_uuid() ->
-    to_hex(crypto:strong_rand_bytes(16), []).
-
-
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
-
-
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.
+-type dbname() :: iodata().
+-type docid() :: iodata().
+-type doc_hash() :: <<_:128>>.
+-type revision() :: {pos_integer(), doc_hash()}.
+
+-define(CACHE, ddoc_cache_entries).
+-define(LRU, ddoc_cache_lru).
+-define(REFRESH_TIMEOUT, 67000).
+-define(SHUTDOWN_TIMEOUT, 1000).
+
+-record(entry, {
+    key,
+    val,
+    pid
+}).
+
+-record(opener, {
+    key,
+    pid,
+    clients
+}).
+
+
+-ifdef(TEST).
+-define(EVENT(Name, Arg), ddoc_cache_ev:event(Name, Arg)).
+-else.
+-define(EVENT(Name, Arg), ignore).
+-endif.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry.erl b/src/ddoc_cache/src/ddoc_cache_entry.erl
new file mode 100644
index 0000000..79f67bd
--- /dev/null
+++ b/src/ddoc_cache/src/ddoc_cache_entry.erl
@@ -0,0 +1,352 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_entry).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+    dbname/1,
+    ddocid/1,
+    recover/1,
+    insert/2,
+
+    start_link/2,
+    shutdown/1,
+    open/2,
+    accessed/1,
+    refresh/1
+]).
+
+-export([
+    init/1,
+    terminate/2,
+    handle_call/3,
+    handle_cast/2,
+    handle_info/2,
+    code_change/3
+]).
+
+-export([
+    do_open/1
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+-ifndef(TEST).
+-define(ENTRY_SHUTDOWN_TIMEOUT, 5000).
+-else.
+-define(ENTRY_SHUTDOWN_TIMEOUT, 500).
+-endif.
+
+
+-record(st, {
+    key,
+    val,
+    opener,
+    waiters,
+    ts,
+    accessed
+}).
+
+
+dbname({Mod, Arg}) ->
+    Mod:dbname(Arg).
+
+
+ddocid({Mod, Arg}) ->
+    Mod:ddocid(Arg).
+
+
+recover({Mod, Arg}) ->
+    Mod:recover(Arg).
+
+
+insert({Mod, Arg}, Value) ->
+    Mod:insert(Arg, Value).
+
+
+start_link(Key, Default) ->
+    Pid = proc_lib:spawn_link(?MODULE, init, [{Key, Default}]),
+    {ok, Pid}.
+
+
+shutdown(Pid) ->
+    Ref = erlang:monitor(process, Pid),
+    ok = gen_server:cast(Pid, shutdown),
+    receive
+        {'DOWN', Ref, process, Pid, normal} ->
+            ok;
+        {'DOWN', Ref, process, Pid, Reason} ->
+            erlang:exit(Reason)
+    after ?ENTRY_SHUTDOWN_TIMEOUT ->
+        erlang:demonitor(Ref, [flush]),
+        erlang:exit({timeout, {entry_shutdown, Pid}})
+    end.
+
+
+open(Pid, Key) ->
+    try
+        Resp = gen_server:call(Pid, open),
+        case Resp of
+            {open_ok, Val} ->
+                Val;
+            {open_error, {T, R, S}} ->
+                erlang:raise(T, R, S)
+        end
+    catch exit:_ ->
+        % Its possible that this process was evicted just
+        % before we tried talking to it. Just fallback
+        % to a standard recovery
+        recover(Key)
+    end.
+
+
+accessed(Pid) ->
+    gen_server:cast(Pid, accessed).
+
+
+refresh(Pid) ->
+    gen_server:cast(Pid, force_refresh).
+
+
+init({Key, undefined}) ->
+    true = ets:update_element(?CACHE, Key, {#entry.pid, self()}),
+    St = #st{
+        key = Key,
+        opener = spawn_opener(Key),
+        waiters = [],
+        accessed = 1
+    },
+    ?EVENT(started, Key),
+    gen_server:enter_loop(?MODULE, [], St);
+
+init({Key, Wrapped}) ->
+    Default = ddoc_cache_value:unwrap(Wrapped),
+    Updates = [
+        {#entry.val, Default},
+        {#entry.pid, self()}
+    ],
+    NewTs = os:timestamp(),
+    true = ets:update_element(?CACHE, Key, Updates),
+    true = ets:insert(?LRU, {{NewTs, Key, self()}}),
+    St = #st{
+        key = Key,
+        val = {open_ok, {ok, Default}},
+        opener = start_timer(),
+        waiters = [],
+        ts = NewTs,
+        accessed = 1
+    },
+    ?EVENT(default_started, Key),
+    gen_server:enter_loop(?MODULE, [], St, hibernate).
+
+
+terminate(_Reason, St) ->
+    #st{
+        key = Key,
+        opener = Pid,
+        ts = Ts
+    } = St,
+    % We may have already deleted our cache entry
+    % during shutdown
+    Pattern = #entry{key = Key, pid = self(), _ = '_'},
+    CacheMSpec = [{Pattern, [], [true]}],
+    true = ets:select_delete(?CACHE, CacheMSpec) < 2,
+    % We may have already deleted our LRU entry
+    % during shutdown
+    if Ts == undefined -> ok; true ->
+        LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
+        true = ets:select_delete(?LRU, LruMSpec) < 2
+    end,
+    % Blow away any current opener if it exists
+    if not is_pid(Pid) -> ok; true ->
+        catch exit(Pid, kill)
+    end,
+    ok.
+
+
+handle_call(open, From, #st{opener = Pid} = St) when is_pid(Pid) ->
+    NewSt = St#st{
+        waiters = [From | St#st.waiters]
+    },
+    {noreply, NewSt};
+
+handle_call(open, _From, St) ->
+    {reply, St#st.val, St};
+
+handle_call(Msg, _From, St) ->
+    {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(accessed, St) ->
+    ?EVENT(accessed, St#st.key),
+    drain_accessed(),
+    NewSt = St#st{
+        accessed = St#st.accessed + 1
+    },
+    {noreply, update_lru(NewSt)};
+
+handle_cast(force_refresh, St) ->
+    % If we had frequent design document updates
+    % they could end up racing accessed events and
+    % end up prematurely evicting this entry from
+    % cache. To prevent this we just make sure that
+    % accessed is set to at least 1 before we
+    % execute a refresh.
+    NewSt = if St#st.accessed > 0 -> St; true ->
+        St#st{accessed = 1}
+    end,
+    % We remove the cache entry value so that any
+    % new client comes to us for the refreshed
+    % value.
+    true = ets:update_element(?CACHE, St#st.key, {#entry.val, undefined}),
+    handle_cast(refresh, NewSt);
+
+handle_cast(refresh, #st{accessed = 0} = St) ->
+    {stop, normal, St};
+
+handle_cast(refresh, #st{opener = Ref} = St) when is_reference(Ref) ->
+    #st{
+        key = Key
+    } = St,
+    erlang:cancel_timer(Ref),
+    NewSt = St#st{
+        opener = spawn_opener(Key),
+        accessed = 0
+    },
+    {noreply, NewSt};
+
+handle_cast(refresh, #st{opener = Pid} = St) when is_pid(Pid) ->
+    catch exit(Pid, kill),
+    receive
+        {'DOWN', _, _, Pid, _} -> ok
+    end,
+    NewSt = St#st{
+        opener = spawn_opener(St#st.key),
+        accessed = 0
+    },
+    {noreply, NewSt};
+
+handle_cast(shutdown, St) ->
+    remove_from_cache(St),
+    {stop, normal, St};
+
+handle_cast(Msg, St) ->
+    {stop, {bad_cast, Msg}, St}.
+
+
+handle_info({'DOWN', _, _, Pid, Resp}, #st{key = Key, opener = Pid} = St) ->
+    case Resp of
+        {open_ok, Key, {ok, Val}} ->
+            update_cache(St, Val),
+            NewSt1 = St#st{
+                val = {open_ok, {ok, Val}},
+                opener = start_timer(),
+                waiters = []
+            },
+            NewSt2 = update_lru(NewSt1),
+            respond(St#st.waiters, {open_ok, {ok, Val}}),
+            {noreply, NewSt2};
+        {Status, Key, Other} ->
+            NewSt = St#st{
+                val = {Status, Other},
+                opener = undefined,
+                waiters = undefined
+            },
+            remove_from_cache(NewSt),
+            respond(St#st.waiters, {Status, Other}),
+            {stop, normal, NewSt}
+    end;
+
+handle_info(Msg, St) ->
+    {stop, {bad_info, Msg}, St}.
+
+
+code_change(_, St, _) ->
+    {ok, St}.
+
+
+spawn_opener(Key) ->
+    {Pid, _} = erlang:spawn_monitor(?MODULE, do_open, [Key]),
+    Pid.
+
+
+start_timer() ->
+    TimeOut = config:get_integer(
+            "ddoc_cache", "refresh_timeout", ?REFRESH_TIMEOUT),
+    erlang:send_after(TimeOut, self(), {'$gen_cast', refresh}).
+
+
+do_open(Key) ->
+    try recover(Key) of
+        Resp ->
+            erlang:exit({open_ok, Key, Resp})
+    catch T:R ->
+        S = erlang:get_stacktrace(),
+        erlang:exit({open_error, Key, {T, R, S}})
+    end.
+
+
+update_lru(#st{key = Key, ts = Ts} = St) ->
+    remove_from_lru(Ts, Key),
+    NewTs = os:timestamp(),
+    true = ets:insert(?LRU, {{NewTs, Key, self()}}),
+    St#st{ts = NewTs}.
+
+
+update_cache(#st{val = undefined} = St, Val) ->
+    true = ets:update_element(?CACHE, St#st.key, {#entry.val, Val}),
+    ?EVENT(inserted, St#st.key);
+
+update_cache(#st{val = V1} = _St, V2) when {open_ok, {ok, V2}} == V1 ->
+    ?EVENT(update_noop, _St#st.key);
+
+update_cache(St, Val) ->
+    true = ets:update_element(?CACHE, St#st.key, {#entry.val, Val}),
+    ?EVENT(updated, {St#st.key, Val}).
+
+
+remove_from_cache(St) ->
+    #st{
+        key = Key,
+        ts = Ts
+    } = St,
+    Pattern = #entry{key = Key, pid = self(), _ = '_'},
+    CacheMSpec = [{Pattern, [], [true]}],
+    1 = ets:select_delete(?CACHE, CacheMSpec),
+    remove_from_lru(Ts, Key),
+    ?EVENT(removed, St#st.key),
+    ok.
+
+
+remove_from_lru(Ts, Key) ->
+    if Ts == undefined -> ok; true ->
+        LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
+        1 = ets:select_delete(?LRU, LruMSpec)
+    end.
+
+
+drain_accessed() ->
+    receive
+        {'$gen_cast', accessed} ->
+            drain_accessed()
+    after 0 ->
+        ok
+    end.
+
+
+respond(Waiters, Resp) ->
+    [gen_server:reply(W, Resp) || W <- Waiters].
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/src/ddoc_cache_entry_custom.erl
similarity index 61%
copy from src/ddoc_cache/src/ddoc_cache_util.erl
copy to src/ddoc_cache/src/ddoc_cache_entry_custom.erl
index 24c4b0d..9eaf16f 100644
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_custom.erl
@@ -10,25 +10,28 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
--module(ddoc_cache_util).
+-module(ddoc_cache_entry_custom).
 
 
 -export([
-    new_uuid/0
+    dbname/1,
+    ddocid/1,
+    recover/1,
+    insert/2
 ]).
 
 
-new_uuid() ->
-    to_hex(crypto:strong_rand_bytes(16), []).
+dbname({DbName, _}) ->
+    DbName.
 
 
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
+ddocid(_) ->
+    no_ddocid.
 
 
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.
+recover({DbName, Mod}) ->
+    Mod:recover(DbName).
+
+
+insert(_, _) ->
+    ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
new file mode 100644
index 0000000..5248469
--- /dev/null
+++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
@@ -0,0 +1,46 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_entry_ddocid).
+
+
+-export([
+    dbname/1,
+    ddocid/1,
+    recover/1,
+    insert/2
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+dbname({DbName, _}) ->
+    DbName.
+
+
+ddocid({_, DDocId}) ->
+    DDocId.
+
+
+recover({DbName, DDocId}) ->
+    fabric:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]).
+
+
+insert({DbName, DDocId}, {ok, #doc{revs = Revs} = DDoc}) ->
+    {Depth, [RevId | _]} = Revs,
+    Rev = {Depth, RevId},
+    Key = {ddoc_cache_entry_ddocid_rev, {DbName, DDocId, Rev}},
+    spawn(fun() -> ddoc_cache_lru:insert(Key, DDoc) end);
+
+insert(_, _) ->
+    ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
similarity index 50%
copy from src/ddoc_cache/src/ddoc_cache_util.erl
copy to src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
index 24c4b0d..868fa77 100644
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
@@ -10,25 +10,38 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
--module(ddoc_cache_util).
+-module(ddoc_cache_entry_ddocid_rev).
 
 
 -export([
-    new_uuid/0
+    dbname/1,
+    ddocid/1,
+    recover/1,
+    insert/2
 ]).
 
 
-new_uuid() ->
-    to_hex(crypto:strong_rand_bytes(16), []).
+-include_lib("couch/include/couch_db.hrl").
 
 
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
+dbname({DbName, _, _}) ->
+    DbName.
 
 
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.
+ddocid({_, DDocId, _}) ->
+    DDocId.
+
+
+recover({DbName, DDocId, Rev}) ->
+    Opts = [ejson_body, ?ADMIN_CTX],
+    {ok, [Resp]} = fabric:open_revs(DbName, DDocId, [Rev], Opts),
+    Resp.
+
+
+insert({DbName, DDocId, _Rev}, {ok, #doc{} = DDoc}) ->
+    Key = {ddoc_cache_entry_ddocid, {DbName, DDocId}},
+    spawn(fun() -> ddoc_cache_lru:insert(Key, DDoc) end);
+
+insert(_, _) ->
+    ok.
+
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
similarity index 55%
copy from src/ddoc_cache/src/ddoc_cache_util.erl
copy to src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
index 24c4b0d..2182dea 100644
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
@@ -10,25 +10,35 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
--module(ddoc_cache_util).
+-module(ddoc_cache_entry_validation_funs).
 
 
 -export([
-    new_uuid/0
+    dbname/1,
+    ddocid/1,
+    recover/1,
+    insert/2
 ]).
 
 
-new_uuid() ->
-    to_hex(crypto:strong_rand_bytes(16), []).
+dbname(DbName) ->
+    DbName.
 
 
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
+ddocid(_) ->
+    no_ddocid.
 
 
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.
+recover(DbName) ->
+    {ok, DDocs} = fabric:design_docs(mem3:dbname(DbName)),
+    Funs = lists:flatmap(fun(DDoc) ->
+        case couch_doc:get_validate_doc_fun(DDoc) of
+            nil -> [];
+            Fun -> [Fun]
+        end
+    end, DDocs),
+    {ok, Funs}.
+
+
+insert(_, _) ->
+    ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
new file mode 100644
index 0000000..b387768
--- /dev/null
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -0,0 +1,314 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_lru).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+    start_link/0,
+    open/1,
+    insert/2,
+    refresh/2
+]).
+
+-export([
+    init/1,
+    terminate/2,
+    handle_call/3,
+    handle_cast/2,
+    handle_info/2,
+    code_change/3
+]).
+
+-export([
+    handle_db_event/3
+]).
+
+
+-include("ddoc_cache.hrl").
+
+
+-record(st, {
+    pids, % pid -> key
+    dbs, % dbname -> docid -> key -> pid
+    evictor
+}).
+
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+open(Key) ->
+    try ets:lookup(?CACHE, Key) of
+        [] ->
+            lru_start(Key, true);
+        [#entry{pid = undefined}] ->
+            lru_start(Key, false);
+        [#entry{val = undefined, pid = Pid}] ->
+            couch_stats:increment_counter([ddoc_cache, miss]),
+            ddoc_cache_entry:open(Pid, Key);
+        [#entry{val = Val, pid = Pid}] ->
+            couch_stats:increment_counter([ddoc_cache, hit]),
+            ddoc_cache_entry:accessed(Pid),
+            {ok, Val}
+    catch _:_ ->
+        couch_stats:increment_counter([ddoc_cache, recovery]),
+        ddoc_cache_entry:recover(Key)
+    end.
+
+
+insert(Key, Value) ->
+    case ets:lookup(?CACHE, Key) of
+        [] ->
+            Wrapped = ddoc_cache_value:wrap(Value),
+            gen_server:call(?MODULE, {start, Key, Wrapped}, infinity);
+        [#entry{}] ->
+            ok
+    end.
+
+
+refresh(DbName, DDocIds) ->
+    gen_server:cast(?MODULE, {refresh, DbName, DDocIds}).
+
+
+init(_) ->
+    process_flag(trap_exit, true),
+    BaseOpts = [public, named_table],
+    CacheOpts = [
+        set,
+        {read_concurrency, true},
+        {keypos, #entry.key}
+    ] ++ BaseOpts,
+    ets:new(?CACHE, CacheOpts),
+    ets:new(?LRU, [ordered_set, {write_concurrency, true}] ++ BaseOpts),
+    {ok, Pids} = khash:new(),
+    {ok, Dbs} = khash:new(),
+    {ok, Evictor} = couch_event:link_listener(
+            ?MODULE, handle_db_event, nil, [all_dbs]
+        ),
+    ?EVENT(lru_init, nil),
+    {ok, #st{
+        pids = Pids,
+        dbs = Dbs,
+        evictor = Evictor
+    }}.
+
+
+terminate(_Reason, St) ->
+    case is_pid(St#st.evictor) of
+        true -> exit(St#st.evictor, kill);
+        false -> ok
+    end,
+    ok.
+
+
+handle_call({start, Key, Default}, _From, St) ->
+    #st{
+        pids = Pids,
+        dbs = Dbs
+    } = St,
+    case ets:lookup(?CACHE, Key) of
+        [] ->
+            MaxSize = config:get_integer("ddoc_cache", "max_size", 104857600),
+            case trim(St, max(0, MaxSize)) of
+                ok ->
+                    true = ets:insert_new(?CACHE, #entry{key = Key}),
+                    {ok, Pid} = ddoc_cache_entry:start_link(Key, Default),
+                    true = ets:update_element(?CACHE, Key, {#entry.pid, Pid}),
+                    ok = khash:put(Pids, Pid, Key),
+                    store_key(Dbs, Key, Pid),
+                    {reply, {ok, Pid}, St};
+                full ->
+                    ?EVENT(full, Key),
+                    {reply, full, St}
+            end;
+        [#entry{pid = Pid}] ->
+            {reply, {ok, Pid}, St}
+    end;
+
+handle_call(Msg, _From, St) ->
+    {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
+
+
+handle_cast({evict, DbName}, St) ->
+    gen_server:abcast(mem3:nodes(), ?MODULE, {do_evict, DbName}),
+    {noreply, St};
+
+handle_cast({refresh, DbName, DDocIds}, St) ->
+    gen_server:abcast(mem3:nodes(), ?MODULE, {do_refresh, DbName, DDocIds}),
+    {noreply, St};
+
+handle_cast({do_evict, DbName}, St) ->
+    #st{
+        dbs = Dbs
+    } = St,
+    ToRem = case khash:lookup(Dbs, DbName) of
+        {value, DDocIds} ->
+            AccOut = khash:fold(DDocIds, fun(_, Keys, Acc1) ->
+                khash:to_list(Keys) ++ Acc1
+            end, []),
+            ?EVENT(evicted, DbName),
+            AccOut;
+        not_found ->
+            ?EVENT(evict_noop, DbName),
+            []
+    end,
+    lists:foreach(fun({Key, Pid}) ->
+        remove_entry(St, Key, Pid)
+    end, ToRem),
+    khash:del(Dbs, DbName),
+    {noreply, St};
+
+handle_cast({do_refresh, DbName, DDocIdList}, St) ->
+    #st{
+        dbs = Dbs
+    } = St,
+    % We prepend no_ddocid to the DDocIdList below
+    % so that we refresh all custom and validation
+    % function entries which load data from all
+    % design documents.
+    case khash:lookup(Dbs, DbName) of
+        {value, DDocIds} ->
+            lists:foreach(fun(DDocId) ->
+                case khash:lookup(DDocIds, DDocId) of
+                    {value, Keys} ->
+                        khash:fold(Keys, fun(_, Pid, _) ->
+                            ddoc_cache_entry:refresh(Pid)
+                        end, nil);
+                    not_found ->
+                        ok
+                end
+            end, [no_ddocid | DDocIdList]);
+        not_found ->
+            ok
+    end,
+    {noreply, St};
+
+handle_cast(Msg, St) ->
+    {stop, {invalid_cast, Msg}, St}.
+
+
+handle_info({'EXIT', Pid, Reason}, #st{evictor = Pid} = St) ->
+    {stop, Reason, St};
+
+handle_info({'EXIT', Pid, normal}, St) ->
+    % This clause handles when an entry starts
+    % up but encounters an error or uncacheable
+    % response from its recover call.
+    #st{
+        pids = Pids
+    } = St,
+    {value, Key} = khash:lookup(Pids, Pid),
+    khash:del(Pids, Pid),
+    remove_key(St, Key),
+    {noreply, St};
+
+handle_info(Msg, St) ->
+    {stop, {invalid_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+    {ok, St}.
+
+
+handle_db_event(ShardDbName, created, St) ->
+    gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
+    {ok, St};
+
+handle_db_event(ShardDbName, deleted, St) ->
+    gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
+    {ok, St};
+
+handle_db_event(_DbName, _Event, St) ->
+    {ok, St}.
+
+
+lru_start(Key, DoInsert) ->
+    case gen_server:call(?MODULE, {start, Key, undefined}, infinity) of
+        {ok, Pid} ->
+            couch_stats:increment_counter([ddoc_cache, miss]),
+            Resp = ddoc_cache_entry:open(Pid, Key),
+            if not DoInsert -> ok; true ->
+                ddoc_cache_entry:insert(Key, Resp)
+            end,
+            Resp;
+        full ->
+            couch_stats:increment_counter([ddoc_cache, recovery]),
+            ddoc_cache_entry:recover(Key)
+    end.
+
+
+trim(_, 0) ->
+    full;
+
+trim(St, MaxSize) ->
+    CurSize = ets:info(?CACHE, memory) * erlang:system_info(wordsize),
+    if CurSize =< MaxSize -> ok; true ->
+        case ets:first(?LRU) of
+            {_Ts, Key, Pid} ->
+                remove_entry(St, Key, Pid),
+                trim(St, MaxSize);
+            '$end_of_table' ->
+                full
+        end
+    end.
+
+
+remove_entry(St, Key, Pid) ->
+    #st{
+        pids = Pids
+    } = St,
+    unlink(Pid),
+    ddoc_cache_entry:shutdown(Pid),
+    khash:del(Pids, Pid),
+    remove_key(St, Key).
+
+
+store_key(Dbs, Key, Pid) ->
+    DbName = ddoc_cache_entry:dbname(Key),
+    DDocId = ddoc_cache_entry:ddocid(Key),
+    case khash:lookup(Dbs, DbName) of
+        {value, DDocIds} ->
+            case khash:lookup(DDocIds, DDocId) of
+                {value, Keys} ->
+                    khash:put(Keys, Key, Pid);
+                not_found ->
+                    {ok, Keys} = khash:from_list([{Key, Pid}]),
+                    khash:put(DDocIds, DDocId, Keys)
+            end;
+        not_found ->
+            {ok, Keys} = khash:from_list([{Key, Pid}]),
+            {ok, DDocIds} = khash:from_list([{DDocId, Keys}]),
+            khash:put(Dbs, DbName, DDocIds)
+    end.
+
+
+remove_key(St, Key) ->
+    #st{
+        dbs = Dbs
+    } = St,
+    DbName = ddoc_cache_entry:dbname(Key),
+    DDocId = ddoc_cache_entry:ddocid(Key),
+    {value, DDocIds} = khash:lookup(Dbs, DbName),
+    {value, Keys} = khash:lookup(DDocIds, DDocId),
+    khash:del(Keys, Key),
+    case khash:size(Keys) of
+        0 -> khash:del(DDocIds, DDocId);
+        _ -> ok
+    end,
+    case khash:size(DDocIds) of
+        0 -> khash:del(Dbs, DbName);
+        _ -> ok
+    end.
diff --git a/src/ddoc_cache/src/ddoc_cache_opener.erl b/src/ddoc_cache/src/ddoc_cache_opener.erl
index b76a228..52de542 100644
--- a/src/ddoc_cache/src/ddoc_cache_opener.erl
+++ b/src/ddoc_cache/src/ddoc_cache_opener.erl
@@ -14,279 +14,53 @@
 -behaviour(gen_server).
 -vsn(1).
 
--include_lib("couch/include/couch_db.hrl").
--include_lib("mem3/include/mem3.hrl").
 
 -export([
     start_link/0
 ]).
+
 -export([
     init/1,
     terminate/2,
-
     handle_call/3,
     handle_cast/2,
     handle_info/2,
-
     code_change/3
 ]).
 
--export([
-    open_doc/2,
-    open_doc/3,
-    open_validation_funs/1,
-    evict_docs/2,
-    lookup/1,
-    match_newest/1,
-    recover_doc/2,
-    recover_doc/3,
-    recover_validation_funs/1
-]).
--export([
-    handle_db_event/3
-]).
--export([
-    fetch_doc_data/1
-]).
-
--define(CACHE, ddoc_cache_lru).
--define(OPENING, ddoc_cache_opening).
-
--type dbname() :: iodata().
--type docid() :: iodata().
--type doc_hash() :: <<_:128>>.
--type revision() :: {pos_integer(), doc_hash()}.
-
--record(opener, {
-    key,
-    pid,
-    clients
-}).
-
--record(st, {
-    db_ddocs,
-    evictor
-}).
 
 start_link() ->
     gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
 
--spec open_doc(dbname(), docid()) -> {ok, #doc{}}.
-open_doc(DbName, DocId) ->
-    Resp = gen_server:call(?MODULE, {open, {DbName, DocId}}, infinity),
-    handle_open_response(Resp).
-
--spec open_doc(dbname(), docid(), revision()) -> {ok, #doc{}}.
-open_doc(DbName, DocId, Rev) ->
-    Resp = gen_server:call(?MODULE, {open, {DbName, DocId, Rev}}, infinity),
-    handle_open_response(Resp).
-
--spec open_validation_funs(dbname()) -> {ok, [fun()]}.
-open_validation_funs(DbName) ->
-    Resp = gen_server:call(?MODULE, {open, {DbName, validation_funs}}, infinity),
-    handle_open_response(Resp).
-
--spec evict_docs(dbname(), [docid()]) -> ok.
-evict_docs(DbName, DocIds) ->
-    gen_server:cast(?MODULE, {evict, DbName, DocIds}).
-
-lookup(Key) ->
-    try ets_lru:lookup_d(?CACHE, Key) of
-        {ok, _} = Resp ->
-            Resp;
-        _ ->
-            missing
-    catch
-        error:badarg ->
-            recover
-    end.
-
-match_newest(Key) ->
-    try ets_lru:match_object(?CACHE, Key, '_') of
-        [] ->
-            missing;
-        Docs ->
-            Sorted = lists:sort(
-                fun (#doc{deleted=DelL, revs=L}, #doc{deleted=DelR, revs=R}) ->
-                    {not DelL, L} > {not DelR, R}
-                end, Docs),
-            {ok, hd(Sorted)}
-    catch
-        error:badarg ->
-            recover
-    end.
-
-recover_doc(DbName, DDocId) ->
-    fabric:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]).
-
-recover_doc(DbName, DDocId, Rev) ->
-    {ok, [Resp]} = fabric:open_revs(DbName, DDocId, [Rev], [ejson_body, ?ADMIN_CTX]),
-    Resp.
-
-recover_validation_funs(DbName) ->
-    {ok, DDocs} = fabric:design_docs(mem3:dbname(DbName)),
-    Funs = lists:flatmap(fun(DDoc) ->
-        case couch_doc:get_validate_doc_fun(DDoc) of
-            nil -> [];
-            Fun -> [Fun]
-        end
-    end, DDocs),
-    {ok, Funs}.
-
-handle_db_event(ShardDbName, created, St) ->
-    gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
-    {ok, St};
-handle_db_event(ShardDbName, deleted, St) ->
-    gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
-    {ok, St};
-handle_db_event(_DbName, _Event, St) ->
-    {ok, St}.
 
 init(_) ->
-    process_flag(trap_exit, true),
-    _ = ets:new(?OPENING, [set, protected, named_table, {keypos, #opener.key}]),
-    {ok, Evictor} = couch_event:link_listener(
-            ?MODULE, handle_db_event, nil, [all_dbs]
-        ),
-    {ok, #st{
-        evictor = Evictor
-    }}.
+    {ok, nil}.
 
-terminate(_Reason, St) ->
-    case is_pid(St#st.evictor) of
-        true -> exit(St#st.evictor, kill);
-        false -> ok
-    end,
+terminate(_Reason, _St) ->
     ok.
 
-handle_call({open, OpenerKey}, From, St) ->
-    case ets:lookup(?OPENING, OpenerKey) of
-        [#opener{clients=Clients}=O] ->
-            ets:insert(?OPENING, O#opener{clients=[From | Clients]}),
-            {noreply, St};
-        [] ->
-            Pid = spawn_link(?MODULE, fetch_doc_data, [OpenerKey]),
-            ets:insert(?OPENING, #opener{key=OpenerKey, pid=Pid, clients=[From]}),
-            {noreply, St}
-    end;
 
 handle_call(Msg, _From, St) ->
     {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
 
 
-handle_cast({evict, DbName}, St) ->
-    gen_server:abcast(mem3:nodes(), ?MODULE, {do_evict, DbName}),
+% The do_evict clauses are upgrades while we're
+% in a rolling reboot.
+handle_cast({do_evict, _} = Msg, St) ->
+    gen_server:cast(ddoc_cache_lru, Msg),
     {noreply, St};
 
-handle_cast({evict, DbName, DDocIds}, St) ->
-    gen_server:abcast(mem3:nodes(), ?MODULE, {do_evict, DbName, DDocIds}),
-    {noreply, St};
-
-handle_cast({do_evict, DbName}, St) ->
-    DDocIds = lists:flatten(ets_lru:match(?CACHE, {DbName, '$1', '_'}, '_')),
-    handle_cast({do_evict, DbName, DDocIds}, St);
-
 handle_cast({do_evict, DbName, DDocIds}, St) ->
-    CustomKeys = lists:flatten(ets_lru:match(?CACHE, {DbName, '$1'}, '_')),
-    lists:foreach(fun(Mod) ->
-        ets_lru:remove(?CACHE, {DbName, Mod})
-    end, CustomKeys),
-    lists:foreach(fun(DDocId) ->
-        Revs = ets_lru:match(?CACHE, {DbName, DDocId, '$1'}, '_'),
-        lists:foreach(fun([Rev]) ->
-            ets_lru:remove(?CACHE, {DbName, DDocId, Rev})
-        end, Revs)
-    end, DDocIds),
+    gen_server:cast(ddoc_cache_lru, {do_refresh, DbName, DDocIds}),
     {noreply, St};
 
 handle_cast(Msg, St) ->
     {stop, {invalid_cast, Msg}, St}.
 
-handle_info({'EXIT', Pid, Reason}, #st{evictor=Pid}=St) ->
-    couch_log:error("ddoc_cache_opener evictor died ~w", [Reason]),
-    {ok, Evictor} = couch_event:link_listener(?MODULE, handle_db_event, nil, [all_dbs]),
-    {noreply, St#st{evictor=Evictor}};
-
-handle_info({'EXIT', _Pid, {open_ok, OpenerKey, Resp}}, St) ->
-    respond(OpenerKey, {open_ok, Resp}),
-    {noreply, St};
-
-handle_info({'EXIT', _Pid, {open_error, OpenerKey, Type, Error}}, St) ->
-    respond(OpenerKey, {open_error, Type, Error}),
-    {noreply, St};
-
-handle_info({'EXIT', Pid, Reason}, St) ->
-    Pattern = #opener{pid=Pid, _='_'},
-    case ets:match_object(?OPENING, Pattern) of
-        [#opener{key=OpenerKey, clients=Clients}] ->
-            _ = [gen_server:reply(C, {error, Reason}) || C <- Clients],
-            ets:delete(?OPENING, OpenerKey),
-            {noreply, St};
-        [] ->
-            {stop, {unknown_pid_died, {Pid, Reason}}, St}
-    end;
 
 handle_info(Msg, St) ->
     {stop, {invalid_info, Msg}, St}.
 
+
 code_change(_OldVsn, State, _Extra) ->
     {ok, State}.
-
--spec fetch_doc_data({dbname(), validation_funs}) -> no_return();
-                    ({dbname(), atom()}) -> no_return();
-                    ({dbname(), docid()}) -> no_return();
-                    ({dbname(), docid(), revision()}) -> no_return().
-fetch_doc_data({DbName, validation_funs}=OpenerKey) ->
-    {ok, Funs} = recover_validation_funs(DbName),
-    ok = ets_lru:insert(?CACHE, OpenerKey, Funs),
-    exit({open_ok, OpenerKey, {ok, Funs}});
-fetch_doc_data({DbName, Mod}=OpenerKey) when is_atom(Mod) ->
-    % This is not actually a docid but rather a custom cache key.
-    % Treat the argument as a code module and invoke its recover function.
-    try Mod:recover(DbName) of
-        {ok, Result} ->
-            ok = ets_lru:insert(?CACHE, OpenerKey, Result),
-            exit({open_ok, OpenerKey, {ok, Result}});
-        Else ->
-            exit({open_ok, OpenerKey, Else})
-    catch
-        Type:Reason ->
-            exit({open_error, OpenerKey, Type, Reason})
-    end;
-fetch_doc_data({DbName, DocId}=OpenerKey) ->
-    try recover_doc(DbName, DocId) of
-        {ok, Doc} ->
-            {RevDepth, [RevHash| _]} = Doc#doc.revs,
-            Rev = {RevDepth, RevHash},
-            ok = ets_lru:insert(?CACHE, {DbName, DocId, Rev}, Doc),
-            exit({open_ok, OpenerKey, {ok, Doc}});
-        Else ->
-            exit({open_ok, OpenerKey, Else})
-    catch
-        Type:Reason ->
-            exit({open_error, OpenerKey, Type, Reason})
-    end;
-fetch_doc_data({DbName, DocId, Rev}=OpenerKey) ->
-    try recover_doc(DbName, DocId, Rev) of
-        {ok, Doc} ->
-            ok = ets_lru:insert(?CACHE, {DbName, DocId, Rev}, Doc),
-            exit({open_ok, OpenerKey, {ok, Doc}});
-        Else ->
-            exit({open_ok, OpenerKey, Else})
-    catch
-        Type:Reason ->
-            exit({open_error, OpenerKey, Type, Reason})
-    end.
-
-handle_open_response(Resp) ->
-    case Resp of
-        {open_ok, Value} -> Value;
-        {open_error, throw, Error} -> throw(Error);
-        {open_error, error, Error} -> erlang:error(Error);
-        {open_error, exit, Error} -> exit(Error)
-    end.
-
-respond(OpenerKey, Resp) ->
-    [#opener{clients=Clients}] = ets:lookup(?OPENING, OpenerKey),
-    _ = [gen_server:reply(C, Resp) || C <- Clients],
-    ets:delete(?OPENING, OpenerKey).
diff --git a/src/ddoc_cache/src/ddoc_cache_sup.erl b/src/ddoc_cache/src/ddoc_cache_sup.erl
index 85e90b3..6fff9ef 100644
--- a/src/ddoc_cache/src/ddoc_cache_sup.erl
+++ b/src/ddoc_cache/src/ddoc_cache_sup.erl
@@ -28,11 +28,11 @@ init([]) ->
     Children = [
         {
             ddoc_cache_lru,
-            {ets_lru, start_link, [ddoc_cache_lru, lru_opts()]},
+            {ddoc_cache_lru, start_link, []},
             permanent,
             5000,
             worker,
-            [ets_lru]
+            [ddoc_cache_lru]
         },
         {
             ddoc_cache_opener,
@@ -43,25 +43,4 @@ init([]) ->
             [ddoc_cache_opener]
         }
     ],
-    {ok, {{one_for_one, 5, 10}, Children}}.
-
-
-lru_opts() ->
-    case application:get_env(ddoc_cache, max_objects) of
-        {ok, MxObjs} when is_integer(MxObjs), MxObjs >= 0 ->
-            [{max_objects, MxObjs}];
-        _ ->
-            []
-    end ++
-    case application:get_env(ddoc_cache, max_size) of
-        {ok, MxSize} when is_integer(MxSize), MxSize >= 0 ->
-            [{max_size, MxSize}];
-        _ ->
-            []
-    end ++
-    case application:get_env(ddoc_cache, max_lifetime) of
-        {ok, MxLT} when is_integer(MxLT), MxLT >= 0 ->
-            [{max_lifetime, MxLT}];
-        _ ->
-            []
-    end.
+    {ok, {{one_for_one, 25, 1}, Children}}.
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/src/ddoc_cache_value.erl
similarity index 61%
copy from src/ddoc_cache/src/ddoc_cache_util.erl
copy to src/ddoc_cache/src/ddoc_cache_value.erl
index 24c4b0d..21a5bb5 100644
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ b/src/ddoc_cache/src/ddoc_cache_value.erl
@@ -10,25 +10,18 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
--module(ddoc_cache_util).
+-module(ddoc_cache_value).
 
 
 -export([
-    new_uuid/0
+    wrap/1,
+    unwrap/1
 ]).
 
 
-new_uuid() ->
-    to_hex(crypto:strong_rand_bytes(16), []).
+wrap(Value) ->
+    {?MODULE, term_to_binary(Value)}.
 
 
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
-
-
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.
+unwrap({?MODULE, Bin}) when is_binary(Bin) ->
+    binary_to_term(Bin).
diff --git a/src/ddoc_cache/test/ddoc_cache_basic_test.erl b/src/ddoc_cache/test/ddoc_cache_basic_test.erl
new file mode 100644
index 0000000..7f6dbc9
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_basic_test.erl
@@ -0,0 +1,175 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_basic_test).
+
+
+-export([
+    recover/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+recover(DbName) ->
+    {ok, {DbName, totes_custom}}.
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:new(ddoc_cache_ev, [passthrough]),
+    Ctx.
+
+
+stop_couch(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+check_basic_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun stop_couch/1,
+        {with, [
+            fun cache_ddoc/1,
+            fun cache_ddoc_rev/1,
+            fun cache_vdu/1,
+            fun cache_custom/1,
+            fun cache_ddoc_refresher_unchanged/1,
+            fun dont_cache_not_found/1,
+            fun deprecated_api_works/1
+        ]}
+    }.
+
+
+check_no_vdu_test_() ->
+    {
+        setup,
+        fun() -> ddoc_cache_tutil:start_couch([{write_ddocs, false}]) end,
+        fun ddoc_cache_tutil:stop_couch/1,
+        {with, [
+            fun cache_no_vdu_no_ddoc/1,
+            fun cache_no_vdu_empty_ddoc/1
+        ]}
+    }.
+
+
+cache_ddoc({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    Resp1 = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp1),
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+    ?assertEqual(2, ets:info(?CACHE, size)),
+    Resp2 = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    ?assertEqual(Resp1, Resp2),
+    ?assertEqual(2, ets:info(?CACHE, size)).
+
+
+cache_ddoc_rev({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    Resp1 = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
+    ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp1),
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+    ?assertEqual(2, ets:info(?CACHE, size)),
+    Resp2 = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
+    ?assertEqual(Resp1, Resp2),
+    ?assertEqual(2, ets:info(?CACHE, size)),
+
+    % Assert that the non-rev cache entry is separate
+    Resp3 = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp3),
+    ?assertEqual(2, ets:info(?CACHE, size)).
+
+
+cache_vdu({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    Resp1 = ddoc_cache:open_validation_funs(DbName),
+    ?assertMatch({ok, [_]}, Resp1),
+    ?assertEqual(1, ets:info(?CACHE, size)),
+    Resp2 = ddoc_cache:open_validation_funs(DbName),
+    ?assertEqual(Resp1, Resp2),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+
+cache_custom({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    Resp1 = ddoc_cache:open_custom(DbName, ?MODULE),
+    ?assertMatch({ok, {DbName, totes_custom}}, Resp1),
+    ?assertEqual(1, ets:info(?CACHE, size)),
+    Resp2 = ddoc_cache:open_custom(DbName, ?MODULE),
+    ?assertEqual(Resp1, Resp2),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+
+cache_ddoc_refresher_unchanged({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    ddoc_cache:open_doc(DbName, ?FOOBAR),
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+    Tab1 = [_, _] = lists:sort(ets:tab2list(?CACHE)),
+    ddoc_cache:open_doc(DbName, ?FOOBAR),
+    meck:wait(ddoc_cache_ev, event, [accessed, '_'], 1000),
+    Tab2 = lists:sort(ets:tab2list(?CACHE)),
+    ?assertEqual(Tab2, Tab1).
+
+
+dont_cache_not_found({DbName, _}) ->
+    DDocId = <<"_design/not_found">>,
+    ddoc_cache_tutil:clear(),
+    Resp = ddoc_cache:open_doc(DbName, DDocId),
+    ?assertEqual({not_found, missing}, Resp),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    ?assertEqual(0, ets:info(?LRU, size)).
+
+
+deprecated_api_works({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    {ok, _} = ddoc_cache:open(DbName, ?FOOBAR),
+    {ok, _} = ddoc_cache:open(DbName, <<"foobar">>),
+    {ok, _} = ddoc_cache:open(DbName, ?MODULE),
+    {ok, _} = ddoc_cache:open(DbName, validation_funs).
+
+
+cache_no_vdu_no_ddoc({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    Resp = ddoc_cache:open_validation_funs(DbName),
+    ?assertEqual({ok, []}, Resp),
+    ?assertEqual(1, ets:info(?CACHE, size)),
+    ?assertEqual(1, ets:info(?LRU, size)).
+
+
+cache_no_vdu_empty_ddoc({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    DDoc = #doc{
+        id = <<"_design/no_vdu">>,
+        body = {[]}
+    },
+    {ok, _} = fabric:update_docs(DbName, [DDoc], [?ADMIN_CTX]),
+    Resp = ddoc_cache:open_validation_funs(DbName),
+    ?assertEqual({ok, []}, Resp),
+    ?assertEqual(1, ets:info(?CACHE, size)),
+    ?assertEqual(1, ets:info(?LRU, size)).
diff --git a/src/ddoc_cache/test/ddoc_cache_coverage_test.erl b/src/ddoc_cache/test/ddoc_cache_coverage_test.erl
new file mode 100644
index 0000000..b1a185b
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_coverage_test.erl
@@ -0,0 +1,77 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_coverage_test).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+coverage_test_() ->
+    {
+        setup,
+        fun ddoc_cache_tutil:start_couch/0,
+        fun ddoc_cache_tutil:stop_couch/1,
+        [
+            fun restart_lru/0,
+            fun stop_on_evictor_death/0
+        ]
+    }.
+
+
+restart_lru() ->
+    send_bad_messages(ddoc_cache_lru),
+    ?assertEqual(ok, ddoc_cache_lru:terminate(bang, {st, a, b, c})),
+    ?assertEqual({ok, foo}, ddoc_cache_lru:code_change(1, foo, [])).
+
+
+stop_on_evictor_death() ->
+    meck:new(ddoc_cache_ev, [passthrough]),
+    try
+        Lru = whereis(ddoc_cache_lru),
+        State = sys:get_state(Lru),
+        Evictor = element(4, State),
+        Ref = erlang:monitor(process, Lru),
+        exit(Evictor, shutdown),
+        receive
+            {'DOWN', Ref, _, _, Reason} ->
+                ?assertEqual(shutdown, Reason)
+        end,
+        meck:wait(ddoc_cache_ev, event, [lru_init, '_'], 1000),
+        ?assert(whereis(ddoc_cache_lru) /= Lru)
+    after
+        meck:unload()
+    end.
+
+
+send_bad_messages(Name) ->
+    wait_for_restart(Name, fun() ->
+        ?assertEqual({invalid_call, foo}, gen_server:call(Name, foo))
+    end),
+    wait_for_restart(Name, fun() ->
+        gen_server:cast(Name, foo)
+    end),
+    wait_for_restart(Name, fun() ->
+        whereis(Name) ! foo
+    end).
+
+
+wait_for_restart(Server, Fun) ->
+    Ref = erlang:monitor(process, whereis(Server)),
+    Fun(),
+    receive
+        {'DOWN', Ref, _, _, _} ->
+            ok
+    end,
+    ?assert(is_pid(test_util:wait_process(Server))).
diff --git a/src/ddoc_cache/test/ddoc_cache_disabled_test.erl b/src/ddoc_cache/test/ddoc_cache_disabled_test.erl
new file mode 100644
index 0000000..ef73180
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_disabled_test.erl
@@ -0,0 +1,52 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_disabled_test).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    config:set("ddoc_cache", "max_size", "0", false),
+    Ctx.
+
+
+check_disabled_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun ddoc_cache_tutil:stop_couch/1,
+        {with, [
+            fun resp_ok/1,
+            fun resp_not_found/1
+        ]}
+    }.
+
+
+resp_ok({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    Resp = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    ?assertEqual(0, ets:info(?LRU, size)).
+
+
+resp_not_found({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    Resp = ddoc_cache:open_doc(DbName, <<"_design/not_found">>),
+    ?assertEqual({not_found, missing}, Resp),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    ?assertEqual(0, ets:info(?LRU, size)).
diff --git a/src/ddoc_cache/test/ddoc_cache_entry_test.erl b/src/ddoc_cache/test/ddoc_cache_entry_test.erl
new file mode 100644
index 0000000..dd7a039
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_entry_test.erl
@@ -0,0 +1,159 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_entry_test).
+
+
+-export([
+    recover/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+recover(<<"foo">>) ->
+    timer:sleep(30000);
+
+recover(DbName) ->
+    {ok, {DbName, such_custom}}.
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:new(ddoc_cache_ev, [passthrough]),
+    Ctx.
+
+
+stop_couch(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+check_entry_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun stop_couch/1,
+        {with, [
+            fun cancel_and_replace_opener/1,
+            fun condenses_access_messages/1,
+            fun kill_opener_on_terminate/1,
+            fun evict_when_not_accessed/1,
+            fun open_dead_entry/1,
+            fun handles_bad_messages/1,
+            fun handles_code_change/1
+        ]}
+    }.
+
+
+cancel_and_replace_opener(_) ->
+    Key = {ddoc_cache_entry_custom, {<<"foo">>, ?MODULE}},
+    true = ets:insert_new(?CACHE, #entry{key = Key}),
+    {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined),
+    Opener1 = element(4, sys:get_state(Entry)),
+    Ref1 = erlang:monitor(process, Opener1),
+    gen_server:cast(Entry, force_refresh),
+    receive {'DOWN', Ref1, _, _, _} -> ok end,
+    Opener2 = element(4, sys:get_state(Entry)),
+    ?assert(Opener2 /= Opener1),
+    ?assert(is_process_alive(Opener2)),
+    % Clean up after ourselves
+    unlink(Entry),
+    ddoc_cache_entry:shutdown(Entry).
+
+
+condenses_access_messages({DbName, _}) ->
+    meck:reset(ddoc_cache_ev),
+    Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}},
+    true = ets:insert(?CACHE, #entry{key = Key}),
+    {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined),
+    erlang:suspend_process(Entry),
+    lists:foreach(fun(_) ->
+        gen_server:cast(Entry, accessed)
+    end, lists:seq(1, 100)),
+    erlang:resume_process(Entry),
+    meck:wait(1, ddoc_cache_ev, event, [accessed, Key], 1000),
+    ?assertError(
+            timeout,
+            meck:wait(2, ddoc_cache_ev, event, [accessed, Key], 100)
+        ),
+    unlink(Entry),
+    ddoc_cache_entry:shutdown(Entry).
+
+
+kill_opener_on_terminate(_) ->
+    Pid = spawn(fun() -> receive _ -> ok end end),
+    ?assert(is_process_alive(Pid)),
+    St = {st, key, val, Pid, waiters, ts, accessed},
+    ?assertEqual(ok, ddoc_cache_entry:terminate(normal, St)),
+    ?assert(not is_process_alive(Pid)).
+
+
+evict_when_not_accessed(_) ->
+    meck:reset(ddoc_cache_ev),
+    Key = {ddoc_cache_entry_custom, {<<"bar">>, ?MODULE}},
+    true = ets:insert_new(?CACHE, #entry{key = Key}),
+    {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined),
+    Ref = erlang:monitor(process, Entry),
+    AccessCount1 = element(7, sys:get_state(Entry)),
+    ?assertEqual(1, AccessCount1),
+    ok = gen_server:cast(Entry, refresh),
+
+    meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000),
+
+    AccessCount2 = element(7, sys:get_state(Entry)),
+    ?assertEqual(0, AccessCount2),
+    ok = gen_server:cast(Entry, refresh),
+    receive {'DOWN', Ref, _, _, Reason} -> Reason end,
+    ?assertEqual(normal, Reason),
+    ?assertEqual(0, ets:info(?CACHE, size)).
+
+
+open_dead_entry({DbName, _}) ->
+    Pid = spawn(fun() -> ok end),
+    Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}},
+    ?assertEqual(recover(DbName), ddoc_cache_entry:open(Pid, Key)).
+
+
+handles_bad_messages(_) ->
+    CallExpect = {stop, {bad_call, foo}, {bad_call, foo}, baz},
+    CastExpect = {stop, {bad_cast, foo}, bar},
+    InfoExpect = {stop, {bad_info, foo}, bar},
+    ?assertEqual(CallExpect, ddoc_cache_entry:handle_call(foo, bar, baz)),
+    ?assertEqual(CastExpect, ddoc_cache_entry:handle_cast(foo, bar)),
+    ?assertEqual(InfoExpect, ddoc_cache_entry:handle_info(foo, bar)).
+
+
+handles_code_change(_) ->
+    CCExpect = {ok, bar},
+    ?assertEqual(CCExpect, ddoc_cache_entry:code_change(foo, bar, baz)).
+
+
+handles_bad_shutdown_test_() ->
+    {timeout, 10, ?_test(begin
+        ErrorPid = spawn(fun() ->
+            receive
+               _ -> exit(bad_shutdown)
+            end
+        end),
+        ?assertExit(bad_shutdown, ddoc_cache_entry:shutdown(ErrorPid)),
+        NotDeadYetPid = spawn(fun() ->
+            timer:sleep(infinity)
+        end),
+        ?assertExit(
+                {timeout, {entry_shutdown, NotDeadYetPid}},
+                ddoc_cache_entry:shutdown(NotDeadYetPid)
+            )
+    end)}.
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/test/ddoc_cache_ev.erl
similarity index 61%
copy from src/ddoc_cache/src/ddoc_cache_util.erl
copy to src/ddoc_cache/test/ddoc_cache_ev.erl
index 24c4b0d..a451342 100644
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ b/src/ddoc_cache/test/ddoc_cache_ev.erl
@@ -10,25 +10,12 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
--module(ddoc_cache_util).
-
+-module(ddoc_cache_ev).
 
 -export([
-    new_uuid/0
+    event/2
 ]).
 
 
-new_uuid() ->
-    to_hex(crypto:strong_rand_bytes(16), []).
-
-
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
-
-
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.
+event(Name, Arg) ->
+    couch_log:error("~s :: ~s :: ~p", [?MODULE, Name, Arg]).
diff --git a/src/ddoc_cache/test/ddoc_cache_eviction_test.erl b/src/ddoc_cache/test/ddoc_cache_eviction_test.erl
new file mode 100644
index 0000000..5a02a5c
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_eviction_test.erl
@@ -0,0 +1,96 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_eviction_test).
+
+
+-export([
+    recover/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+recover(DbName) ->
+    {ok, {DbName, totes_custom}}.
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:new(ddoc_cache_ev, [passthrough]),
+    Ctx.
+
+
+stop_couch(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+check_eviction_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun stop_couch/1,
+        {with, [
+            fun evict_all/1,
+            fun dont_evict_all_unrelated/1,
+            fun check_upgrade_clause/1
+        ]}
+    }.
+
+
+evict_all({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
+    #shard{name = ShardName} = hd(mem3:shards(DbName)),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
+    {ok, _} = ddoc_cache:open_validation_funs(DbName),
+    {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
+    ?assertEqual(4, ets:info(?CACHE, size)),
+    {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo),
+    meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000),
+    meck:wait(4, ddoc_cache_ev, event, [removed, '_'], 1000),
+    ?assertEqual(0, ets:info(?CACHE, size)).
+
+
+dont_evict_all_unrelated({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
+    {ok, _} = ddoc_cache:open_validation_funs(DbName),
+    {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
+    ?assertEqual(4, ets:info(?CACHE, size)),
+    ShardName = <<"shards/00000000-ffffffff/test.1384769918">>,
+    {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo),
+    meck:wait(ddoc_cache_ev, event, [evict_noop, <<"test">>], 1000),
+    ?assertEqual(4, ets:info(?CACHE, size)).
+
+
+check_upgrade_clause({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+    ?assertEqual(2, ets:info(?CACHE, size)),
+    gen_server:cast(ddoc_cache_opener, {do_evict, DbName}),
+    meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000),
+    meck:wait(2, ddoc_cache_ev, event, [removed, '_'], 1000),
+    ?assertEqual(0, ets:info(?CACHE, size)).
diff --git a/src/ddoc_cache/test/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
new file mode 100644
index 0000000..fab232b
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
@@ -0,0 +1,185 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_lru_test).
+
+
+-export([
+    recover/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+recover(<<"pause", _/binary>>) ->
+    receive go -> ok end,
+    {ok, paused};
+
+recover(<<"big", _/binary>>) ->
+    {ok, [random:uniform() || _ <- lists:seq(1, 8192)]};
+
+recover(DbName) ->
+    {ok, DbName}.
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:new(ddoc_cache_ev, [passthrough]),
+    Ctx.
+
+
+stop_couch(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+check_not_started_test() ->
+    % Starting couch, but not ddoc_cache
+    Ctx = test_util:start_couch(),
+    try
+        Key = {ddoc_cache_entry_custom, {<<"dbname">>, ?MODULE}},
+        ?assertEqual({ok, <<"dbname">>}, ddoc_cache_lru:open(Key))
+    after
+        test_util:stop_couch(Ctx)
+    end.
+
+
+check_lru_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun stop_couch/1,
+        {with, [
+            fun check_multi_start/1,
+            fun check_multi_open/1,
+            fun check_capped_size/1,
+            fun check_cache_refill/1
+        ]}
+    }.
+
+
+check_multi_start(_) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Key = {ddoc_cache_entry_custom, {<<"pause">>, ?MODULE}},
+    % These will all get sent through ddoc_cache_lru
+    Clients = lists:map(fun(_) ->
+        spawn_monitor(fun() ->
+            ddoc_cache_lru:open(Key)
+        end)
+    end, lists:seq(1, 10)),
+    meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
+    lists:foreach(fun({Pid, _Ref}) ->
+        ?assert(is_process_alive(Pid))
+    end, Clients),
+    [#entry{pid = Pid}] = ets:tab2list(?CACHE),
+    Opener = element(4, sys:get_state(Pid)),
+    OpenerRef = erlang:monitor(process, Opener),
+    ?assert(is_process_alive(Opener)),
+    Opener ! go,
+    receive {'DOWN', OpenerRef, _, _, _} -> ok end,
+    lists:foreach(fun({_, Ref}) ->
+        receive
+            {'DOWN', Ref, _, _, normal} -> ok
+        end
+    end, Clients).
+
+
+check_multi_open(_) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Key = {ddoc_cache_entry_custom, {<<"pause">>, ?MODULE}},
+    % We wait after the first client so that
+    % the rest of the clients go directly to
+    % ddoc_cache_entry bypassing ddoc_cache_lru
+    Client1 = spawn_monitor(fun() ->
+        ddoc_cache_lru:open(Key)
+    end),
+    meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
+    Clients = [Client1] ++ lists:map(fun(_) ->
+        spawn_monitor(fun() ->
+            ddoc_cache_lru:open(Key)
+        end)
+    end, lists:seq(1, 9)),
+    lists:foreach(fun({Pid, _Ref}) ->
+        ?assert(is_process_alive(Pid))
+    end, Clients),
+    [#entry{pid = Pid}] = ets:tab2list(?CACHE),
+    Opener = element(4, sys:get_state(Pid)),
+    OpenerRef = erlang:monitor(process, Opener),
+    ?assert(is_process_alive(Opener)),
+    Opener ! go,
+    receive {'DOWN', OpenerRef, _, _, _} -> ok end,
+    lists:foreach(fun({_, Ref}) ->
+        receive {'DOWN', Ref, _, _, normal} -> ok end
+    end, Clients).
+
+
+check_capped_size(_) ->
+    % The extra factor of two in the size checks is
+    % a fudge factor. We don't reject entries from
+    % the cache if they would put us over the limit
+    % as we don't have the size information a
+    % priori.
+    config:set("ddoc_cache", "max_size", "1048576", false),
+    MaxSize = 1048576,
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    lists:foreach(fun(I) ->
+        DbName = list_to_binary("big_" ++ integer_to_list(I)),
+        ddoc_cache:open_custom(DbName, ?MODULE),
+        meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
+        ?assert(cache_size() < MaxSize * 2)
+    end, lists:seq(1, 25)),
+    lists:foreach(fun(I) ->
+        DbName = list_to_binary("big_" ++ integer_to_list(I)),
+        ddoc_cache:open_custom(DbName, ?MODULE),
+        meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
+        ?assert(cache_size() < MaxSize * 2)
+    end, lists:seq(26, 100)).
+
+
+check_cache_refill({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+
+    InitDDoc = fun(I) ->
+        NumBin = list_to_binary(integer_to_list(I)),
+        DDocId = <<"_design/", NumBin/binary>>,
+        Doc = #doc{id = DDocId, body = {[]}},
+        {ok, _} = fabric:update_doc(DbName, Doc, [?ADMIN_CTX]),
+        {ok, _} = ddoc_cache:open_doc(DbName, DDocId),
+        {ddoc_cache_entry_ddocid, {DbName, DDocId}}
+    end,
+
+    lists:foreach(fun(I) ->
+        Key = InitDDoc(I),
+        meck:wait(ddoc_cache_ev, event, [started, Key], 1000)
+    end, lists:seq(1, 5)),
+
+    ShardName = element(2, hd(mem3:shards(DbName))),
+    {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo),
+    meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000),
+    meck:wait(10, ddoc_cache_ev, event, [removed, '_'], 1000),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+
+    lists:foreach(fun(I) ->
+        Key = InitDDoc(I),
+        meck:wait(ddoc_cache_ev, event, [started, Key], 1000)
+    end, lists:seq(6, 10)).
+
+
+cache_size() ->
+    ets:info(?CACHE, memory) * erlang:system_info(wordsize).
diff --git a/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl b/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl
new file mode 100644
index 0000000..637a6e8
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl
@@ -0,0 +1,79 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_no_cache_test).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+ddoc(DDocId) ->
+    {ok, #doc{
+        id = DDocId,
+        revs = {1, [<<"deadbeefdeadbeef">>]},
+        body = {[
+            {<<"ohai">>, null}
+        ]}
+    }}.
+
+
+not_found(_DDocId) ->
+    {not_found, missing}.
+
+
+return_error(_DDocId) ->
+    {error, timeout}.
+
+
+start(Resp) ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:new(fabric),
+    meck:expect(fabric, open_doc, fun(_, DDocId, _) ->
+        Resp(DDocId)
+    end),
+    Ctx.
+
+
+stop(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+no_cache_open_ok_test() ->
+    Ctx = start(fun ddoc/1),
+    try
+        Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>),
+        ?assertEqual(ddoc(<<"bar">>), Resp)
+    after
+        stop(Ctx)
+    end.
+
+
+no_cache_open_not_found_test() ->
+    Ctx = start(fun not_found/1),
+    try
+        Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>),
+        ?assertEqual(not_found(<<"bar">>), Resp)
+    after
+        stop(Ctx)
+    end.
+
+
+no_cache_open_error_test() ->
+    Ctx = start(fun return_error/1),
+    try
+        Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>),
+        ?assertEqual(return_error(<<"bar">>), Resp)
+    after
+        stop(Ctx)
+    end.
diff --git a/src/ddoc_cache/test/ddoc_cache_open_error_test.erl b/src/ddoc_cache/test/ddoc_cache_open_error_test.erl
new file mode 100644
index 0000000..f3a9b10
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_open_error_test.erl
@@ -0,0 +1,46 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_open_error_test).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:expect(fabric, open_doc, fun(_, ?FOOBAR, _) ->
+        erlang:error(test_kaboom)
+    end),
+    Ctx.
+
+
+stop_couch(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+check_open_error_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun stop_couch/1,
+        {with, [
+            fun handle_open_error/1
+        ]}
+    }.
+
+
+handle_open_error({DbName, _}) ->
+    ?assertError(test_kaboom, ddoc_cache:open_doc(DbName, ?FOOBAR)).
diff --git a/src/ddoc_cache/test/ddoc_cache_opener_test.erl b/src/ddoc_cache/test/ddoc_cache_opener_test.erl
new file mode 100644
index 0000000..c384636
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_opener_test.erl
@@ -0,0 +1,33 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_opener_test).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+empty_hull_test() ->
+    InitExpect = {ok, nil},
+    TermExpect = ok,
+    CallExpect = {stop, {invalid_call, foo}, {invalid_call, foo}, baz},
+    CastExpect = {stop, {invalid_cast, foo}, bar},
+    InfoExpect = {stop, {invalid_info, foo}, bar},
+    CCExpect = {ok, bar},
+    ?assertEqual(InitExpect, ddoc_cache_opener:init(foo)),
+    ?assertEqual(TermExpect, ddoc_cache_opener:terminate(foo, bar)),
+    ?assertEqual(CallExpect, ddoc_cache_opener:handle_call(foo, bar, baz)),
+    ?assertEqual(CastExpect, ddoc_cache_opener:handle_cast(foo, bar)),
+    ?assertEqual(InfoExpect, ddoc_cache_opener:handle_info(foo, bar)),
+    ?assertEqual(CCExpect, ddoc_cache_opener:code_change(foo, bar, baz)).
diff --git a/src/ddoc_cache/test/ddoc_cache_refresh_test.erl b/src/ddoc_cache/test/ddoc_cache_refresh_test.erl
new file mode 100644
index 0000000..f145987
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_refresh_test.erl
@@ -0,0 +1,174 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_refresh_test).
+
+
+-export([
+    recover/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+recover(DbName) ->
+    {ok, {DbName, rand_string()}}.
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:new(ddoc_cache_ev, [passthrough]),
+    Ctx.
+
+
+stop_couch(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+check_refresh_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun stop_couch/1,
+        {with, [
+            fun refresh_ddoc/1,
+            fun refresh_ddoc_rev/1,
+            fun refresh_vdu/1,
+            fun refresh_custom/1,
+            fun refresh_multiple/1,
+            fun check_upgrade_clause/1
+        ]}
+    }.
+
+
+refresh_ddoc({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+
+    ?assertEqual(2, ets:info(?CACHE, size)),
+    [#entry{key = Key, val = DDoc}, _] = lists:sort(ets:tab2list(?CACHE)),
+    NewDDoc = DDoc#doc{
+        body = {[{<<"foo">>, <<"baz">>}]}
+    },
+    {ok, {Depth, RevId}} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    Expect = NewDDoc#doc{
+        revs = {Depth, [RevId | element(2, DDoc#doc.revs)]}
+    },
+    meck:wait(ddoc_cache_ev, event, [updated, {Key, Expect}], 1000),
+    ?assertMatch({ok, Expect}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
+    ?assertEqual(2, ets:info(?CACHE, size)).
+
+
+refresh_ddoc_rev({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
+    {ok, RevDDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
+
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+
+    [_, #entry{key = Key, val = DDoc}] = lists:sort(ets:tab2list(?CACHE)),
+    NewDDoc = DDoc#doc{
+        body = {[{<<"foo">>, <<"kazam">>}]}
+    },
+    {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    % We pass the rev explicitly so we assert that we're
+    % getting the same original response from the cache
+    meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000),
+    ?assertMatch({ok, RevDDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)),
+    ?assertEqual(2, ets:info(?CACHE, size)).
+
+
+refresh_vdu({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    {ok, [_]} = ddoc_cache:open_validation_funs(DbName),
+    [#entry{key = Key}] = ets:tab2list(?CACHE),
+    {ok, DDoc} = fabric:open_doc(DbName, ?VDU, [?ADMIN_CTX]),
+    {ok, _} = fabric:update_doc(DbName, DDoc#doc{body = {[]}}, [?ADMIN_CTX]),
+    meck:wait(ddoc_cache_ev, event, [updated, {Key, []}], 1000),
+    ?assertMatch({ok, []}, ddoc_cache:open_validation_funs(DbName)),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+
+refresh_custom({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    {ok, Resp1} = ddoc_cache:open_custom(DbName, ?MODULE),
+    {ok, DDoc} = fabric:open_doc(DbName, ?VDU, [?CUSTOM]),
+    {ok, _} = fabric:update_doc(DbName, DDoc#doc{body = {[]}}, [?ADMIN_CTX]),
+    meck:wait(ddoc_cache_ev, event, [updated, '_'], 1000),
+    ?assertNotEqual({ok, Resp1}, ddoc_cache:open_custom(DbName, ?MODULE)),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+
+refresh_multiple({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR),
+    {ok, DDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    {ok, DDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev),
+    ?assertEqual(2, ets:info(?CACHE, size)),
+    % Relying on the sort order of entry keys to make
+    % sure our entries line up for this test
+    [
+        #entry{key = NoRevKey, val = DDoc},
+        #entry{key = RevKey, val = DDoc}
+    ] = lists:sort(ets:tab2list(?CACHE)),
+    NewDDoc = DDoc#doc{
+        body = {[{<<"foo">>, <<"kalamazoo">>}]}
+    },
+    {ok, {Depth, RevId}} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    Updated = NewDDoc#doc{
+        revs = {Depth, [RevId | element(2, DDoc#doc.revs)]}
+    },
+    meck:wait(ddoc_cache_ev, event, [update_noop, RevKey], 1000),
+    meck:wait(ddoc_cache_ev, event, [updated, {NoRevKey, Updated}], 1000),
+    % We pass the rev explicitly so we assert that we're
+    % getting the same original response from the cache
+    ?assertEqual({ok, Updated}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
+    ?assertEqual({ok, DDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)),
+    ?assertEqual(2, ets:info(?CACHE, size)).
+
+
+check_upgrade_clause({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
+    [#entry{key = Key}] = ets:tab2list(?CACHE),
+    gen_server:cast(ddoc_cache_opener, {do_evict, DbName, [?FOOBAR]}),
+    meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000).
+
+
+rand_string() ->
+    Bin = crypto:rand_bytes(8),
+    to_hex(Bin, []).
+
+
+to_hex(<<>>, Acc) ->
+    list_to_binary(lists:reverse(Acc));
+to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
+    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
+
+
+hexdig(C) when C >= 0, C =< 9 ->
+    C + $0;
+hexdig(C) when C >= 10, C =< 15 ->
+    C + $A - 10.
\ No newline at end of file
diff --git a/src/ddoc_cache/test/ddoc_cache_remove_test.erl b/src/ddoc_cache/test/ddoc_cache_remove_test.erl
new file mode 100644
index 0000000..8787482
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_remove_test.erl
@@ -0,0 +1,224 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_remove_test).
+
+
+-export([
+    recover/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("ddoc_cache_test.hrl").
+
+
+recover(DbName) ->
+    {ok, #doc{body = {Body}}} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
+    case couch_util:get_value(<<"status">>, Body) of
+        <<"ok">> ->
+            {ok, yay};
+        <<"not_ok">> ->
+            {ruh, roh};
+        <<"error">> ->
+            erlang:error(thpppt)
+    end.
+
+
+start_couch() ->
+    Ctx = ddoc_cache_tutil:start_couch(),
+    meck:new(ddoc_cache_ev, [passthrough]),
+    Ctx.
+
+
+stop_couch(Ctx) ->
+    meck:unload(),
+    ddoc_cache_tutil:stop_couch(Ctx).
+
+
+check_refresh_test_() ->
+    {
+        setup,
+        fun start_couch/0,
+        fun stop_couch/1,
+        {with, [
+            fun remove_ddoc/1,
+            fun remove_ddoc_rev/1,
+            fun remove_ddoc_rev_only/1,
+            fun remove_custom_not_ok/1,
+            fun remove_custom_error/1
+        ]}
+    }.
+
+
+remove_ddoc({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    ?assertEqual(0, ets:info(?CACHE, size)),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR),
+
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+
+    [#entry{val = DDoc}, #entry{val = DDoc}] = ets:tab2list(?CACHE),
+    {Depth, [RevId | _]} = DDoc#doc.revs,
+    NewDDoc = DDoc#doc{
+        deleted = true,
+        body = {[]}
+    },
+    {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+
+    DDocIdKey = {ddoc_cache_entry_ddocid, {DbName, ?FOOBAR}},
+    Rev = {Depth, RevId},
+    DDocIdRevKey = {ddoc_cache_entry_ddocid_rev, {DbName, ?FOOBAR, Rev}},
+    meck:wait(ddoc_cache_ev, event, [removed, DDocIdKey], 1000),
+    meck:wait(ddoc_cache_ev, event, [update_noop, DDocIdRevKey], 1000),
+
+    ?assertMatch({not_found, deleted}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+
+remove_ddoc_rev({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Rev = ddoc_cache_tutil:get_rev(DbName, ?VDU),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?VDU, Rev),
+
+    meck:wait(ddoc_cache_ev, event, [started, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000),
+
+    % Notice the sort so that we know we're getting the
+    % revid version second.
+    [_, #entry{key = Key, val = DDoc, pid = Pid}]
+            = lists:sort(ets:tab2list(?CACHE)),
+
+    NewDDoc = DDoc#doc{
+        body = {[{<<"an">>, <<"update">>}]}
+    },
+    {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000),
+    % Compact the database so that the old rev is removed
+    lists:foreach(fun(Shard) ->
+        do_compact(Shard#shard.name)
+    end, mem3:local_shards(DbName)),
+    % Trigger a refresh rather than wait for the timeout
+    ddoc_cache_entry:refresh(Pid),
+    meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
+    ?assertMatch(
+            {{not_found, missing}, _},
+            ddoc_cache:open_doc(DbName, ?VDU, Rev)
+        ),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+
+remove_ddoc_rev_only({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    Rev = ddoc_cache_tutil:get_rev(DbName, ?VDU),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?VDU),
+    {ok, _} = ddoc_cache:open_doc(DbName, ?VDU, Rev),
+    % Relying on the sort order of keys to keep
+    % these lined up for testing
+    [
+        #entry{key = NoRevKey, val = DDoc, pid = NoRevPid},
+        #entry{key = RevKey, val = DDoc, pid = RevPid}
+    ] = lists:sort(ets:tab2list(?CACHE)),
+    NewDDoc = DDoc#doc{
+        body = {[{<<"new">>, <<"awesomeness">>}]}
+    },
+    {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    meck:wait(ddoc_cache_ev, event, [updated, '_'], 1000),
+    meck:wait(ddoc_cache_ev, event, [update_noop, RevKey], 1000),
+    % Compact the database so that the old rev is removed
+    lists:foreach(fun(Shard) ->
+        do_compact(Shard#shard.name)
+    end, mem3:local_shards(DbName)),
+    % Trigger a refresh rather than wait for the timeout
+    ddoc_cache_entry:refresh(NoRevPid),
+    ddoc_cache_entry:refresh(RevPid),
+    meck:wait(ddoc_cache_ev, event, [update_noop, NoRevKey], 1000),
+    meck:wait(ddoc_cache_ev, event, [removed, RevKey], 1000),
+    ?assertMatch({ok, _}, ddoc_cache:open_doc(DbName, ?VDU)),
+    ?assertMatch(
+            {{not_found, missing}, _},
+            ddoc_cache:open_doc(DbName, ?VDU, Rev)
+        ),
+    ?assertEqual(1, ets:info(?CACHE, size)).
+
+remove_custom_not_ok({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    init_custom_ddoc(DbName),
+    {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
+    [#entry{key = Key}] = ets:tab2list(?CACHE),
+    {ok, DDoc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
+    NewDDoc = DDoc#doc{
+        body = {[{<<"status">>, <<"not_ok">>}]}
+    },
+    {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
+    ?assertEqual({ruh, roh}, ddoc_cache:open_custom(DbName, ?MODULE)),
+    ?assertEqual(0, ets:info(?CACHE, size)).
+
+
+remove_custom_error({DbName, _}) ->
+    ddoc_cache_tutil:clear(),
+    meck:reset(ddoc_cache_ev),
+    init_custom_ddoc(DbName),
+    {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE),
+    [#entry{key = Key}] = ets:tab2list(?CACHE),
+    {ok, DDoc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
+    NewDDoc = DDoc#doc{
+        body = {[{<<"status">>, <<"error">>}]}
+    },
+    {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
+    meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
+    ?assertError(thpppt, ddoc_cache:open_custom(DbName, ?MODULE)),
+    ?assertEqual(0, ets:info(?CACHE, size)).
+
+
+init_custom_ddoc(DbName) ->
+    Body = {[{<<"status">>, <<"ok">>}]},
+    {ok, Doc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
+    NewDoc = Doc#doc{body = Body},
+    {ok, _} = fabric:update_doc(DbName, NewDoc, [?ADMIN_CTX]).
+
+
+do_compact(ShardName) ->
+    {ok, Db} = couch_db:open_int(ShardName, []),
+    try
+        {ok, Pid} = couch_db:start_compact(Db),
+        Ref = erlang:monitor(process, Pid),
+        receive
+            {'DOWN', Ref, _, _, _} ->
+                ok
+        end
+    after
+        couch_db:close(Db)
+    end,
+    wait_for_compaction(ShardName).
+
+
+wait_for_compaction(ShardName) ->
+    {ok, Db} = couch_db:open_int(ShardName, []),
+    CompactRunning = try
+        {ok, Info} = couch_db:get_db_info(Db),
+        couch_util:get_value(compact_running, Info)
+    after
+        couch_db:close(Db)
+    end,
+    if not CompactRunning -> ok; true ->
+        timer:sleep(100),
+        wait_for_compaction(ShardName)
+    end.
\ No newline at end of file
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/test/ddoc_cache_test.hrl
similarity index 59%
rename from src/ddoc_cache/src/ddoc_cache_util.erl
rename to src/ddoc_cache/test/ddoc_cache_test.hrl
index 24c4b0d..73f7bc2 100644
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ b/src/ddoc_cache/test/ddoc_cache_test.hrl
@@ -10,25 +10,17 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
--module(ddoc_cache_util).
 
+-define(CACHE, ddoc_cache_entries).
+-define(LRU, ddoc_cache_lru).
+-define(OPENERS, ddoc_cache_openers).
 
--export([
-    new_uuid/0
-]).
+-define(FOOBAR, <<"_design/foobar">>).
+-define(VDU, <<"_design/vdu">>).
+-define(CUSTOM, <<"_design/custom">>).
 
-
-new_uuid() ->
-    to_hex(crypto:strong_rand_bytes(16), []).
-
-
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
-
-
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.
+-record(entry, {
+    key,
+    val,
+    pid
+}).
diff --git a/src/ddoc_cache/test/ddoc_cache_tutil.erl b/src/ddoc_cache/test/ddoc_cache_tutil.erl
new file mode 100644
index 0000000..6463b38
--- /dev/null
+++ b/src/ddoc_cache/test/ddoc_cache_tutil.erl
@@ -0,0 +1,96 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ddoc_cache_tutil).
+
+
+-compile(export_all).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+
+
+start_couch() ->
+    start_couch([{write_ddocs, true}]).
+
+
+start_couch(Options) ->
+    WriteDDocs = couch_util:get_value(write_ddocs, Options, true),
+    purge_modules(),
+    Ctx = test_util:start_couch(?CONFIG_CHAIN, [chttpd, ddoc_cache]),
+    TmpDb = ?tempdb(),
+    ok = fabric:create_db(TmpDb, [{q, "1"}, {n, "1"}]),
+    if not WriteDDocs -> ok; true ->
+        {ok, _} = fabric:update_docs(TmpDb, ddocs(), [?ADMIN_CTX])
+    end,
+    {TmpDb, Ctx}.
+
+
+stop_couch({_TmpDb, Ctx}) ->
+    test_util:stop_couch(Ctx).
+
+
+clear() ->
+    application:stop(ddoc_cache),
+    application:start(ddoc_cache).
+
+
+get_rev(DbName, DDocId) ->
+    {_, Ref} = erlang:spawn_monitor(fun() ->
+        {ok, #doc{revs = Revs}} = fabric:open_doc(DbName, DDocId, [?ADMIN_CTX]),
+        {Depth, [RevId | _]} = Revs,
+        exit({Depth, RevId})
+    end),
+    receive
+        {'DOWN', Ref, _, _, Rev} -> Rev
+    end.
+
+
+ddocs() ->
+    FooBar = #doc{
+        id = <<"_design/foobar">>,
+        body = {[
+            {<<"foo">>, <<"bar">>}
+        ]}
+    },
+    VDU = #doc{
+        id = <<"_design/vdu">>,
+        body = {[
+            {<<"validate_doc_update">>, <<"function(doc) {return;}">>}
+        ]}
+    },
+    Custom = #doc{
+        id = <<"_design/custom">>,
+        body = {[
+            {<<"status">>, <<"ok">>},
+            {<<"custom">>, <<"hotrod">>}
+        ]}
+    },
+    [FooBar, VDU, Custom].
+
+
+purge_modules() ->
+    case application:get_key(ddoc_cache, modules) of
+        {ok, Mods} ->
+            lists:foreach(fun(Mod) ->
+                case code:which(Mod) of
+                    cover_compiled ->
+                        ok;
+                    _ ->
+                        code:delete(Mod),
+                        code:purge(Mod)
+                end
+            end, Mods);
+        undefined ->
+            ok
+    end.
diff --git a/test/javascript/tests/rewrite.js b/test/javascript/tests/rewrite.js
index 8ff3229..a984936 100644
--- a/test/javascript/tests/rewrite.js
+++ b/test/javascript/tests/rewrite.js
@@ -416,13 +416,15 @@ couchTests.rewrite = function(debug) {
         T(typeof(result['_revs_info']) === "object");
 
         // test path relative to server
-        designDoc.rewrites.push({
-          "from": "uuids",
+        T(db.save({
+          _id: "_design/test2",
+          rewrites: [{
+            "from": "uuids",
           "to": "../../../_uuids"
-        });
-        T(db.save(designDoc).ok);
+          }]
+        }).ok);
         
-        var xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/uuids");
+        var xhr = CouchDB.request("GET", "/"+dbName+"/_design/test2/_rewrite/uuids");
         T(xhr.status == 500);
         var result = JSON.parse(xhr.responseText);
         T(result.error == "insecure_rewrite_rule");
@@ -432,7 +434,7 @@ couchTests.rewrite = function(debug) {
             key: "secure_rewrites",
             value: "false"}],
           function() {
-            var xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/uuids?cache=bust");
+            var xhr = CouchDB.request("GET", "/"+dbName+"/_design/test2/_rewrite/uuids?cache=bust");
             T(xhr.status == 200);
             var result = JSON.parse(xhr.responseText);
             T(result.uuids.length == 1);

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 03/03: TMP: soak-javascript target

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 8eee7b14b3b06aa497f2b0d1b925c5bfab921069
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Tue Jul 11 18:25:12 2017 -0500

    TMP: soak-javascript target
---
 Makefile | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/Makefile b/Makefile
index 9b1701e..1fba02c 100644
--- a/Makefile
+++ b/Makefile
@@ -124,6 +124,21 @@ endif
             -c 'startup_jitter=0' \
             test/javascript/run $(suites)
 
+.PHONY: soak-javascript
+soak-javascript:
+	@mkdir -p share/www/script/test
+ifeq ($(IN_RELEASE), true)
+	@cp test/javascript/tests/lorem*.txt share/www/script.test/
+else
+	@mkdir -p src/fauxton/dist/release/test
+	@cp test/javascript/tests/lorem*.txt src/fauxton/dist/release/test/
+endif
+	@rm -rf dev/lib
+	while [ $$? -eq 0 ]; do \
+		dev/run -n 1 -q --with-admin-party-please \
+				-c 'startup_jitter=0' \
+				test/javascript/run $(suites); \
+	done
 
 .PHONY: check-qs
 # target: check-qs - Run query server tests (ruby and rspec required!)

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.

[couchdb] 01/03: Remove duplicated eviction messages

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 22fe0b57a061a4502264ced2fb34c9a5a348f934
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Tue Jun 20 10:20:01 2017 -0500

    Remove duplicated eviction messages
    
    This is an old merge artifact that was duplicating the event
    notifications twice per design document update.
---
 src/couch/src/couch_db_updater.erl | 11 +----------
 1 file changed, 1 insertion(+), 10 deletions(-)

diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
index 8a0fb8c..b4ad257 100644
--- a/src/couch/src/couch_db_updater.erl
+++ b/src/couch/src/couch_db_updater.erl
@@ -923,16 +923,7 @@ update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) ->
         (_) -> []
     end, Ids),
 
-    Db4 = case length(UpdatedDDocIds) > 0 of
-        true ->
-            couch_event:notify(Db3#db.name, ddoc_updated),
-            ddoc_cache:evict(Db3#db.name, UpdatedDDocIds),
-            refresh_validate_doc_funs(Db3);
-        false ->
-            Db3
-    end,
-
-    {ok, commit_data(Db4, not FullCommit), UpdatedDDocIds}.
+    {ok, commit_data(Db3, not FullCommit), UpdatedDDocIds}.
 
 update_local_docs(Db, []) ->
     {ok, Db};

-- 
To stop receiving notification emails like this one, please contact
"commits@couchdb.apache.org" <co...@couchdb.apache.org>.