You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by da...@apache.org on 2017/07/20 21:18:26 UTC

[couchdb] branch optimize-ddoc-cache updated: FIXUP: Change from entry count to memory size

This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch optimize-ddoc-cache
in repository https://gitbox.apache.org/repos/asf/couchdb.git


The following commit(s) were added to refs/heads/optimize-ddoc-cache by this push:
     new cb6cf4c  FIXUP: Change from entry count to memory size
cb6cf4c is described below

commit cb6cf4c71a7e994ccb7d7eb43a9d67e40c5a694e
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jul 20 16:17:11 2017 -0500

    FIXUP: Change from entry count to memory size
    
    This changes cache eviction from a count of entries to the total amount
    of memory used by the cache (as approximated by the memory used by ets
    for the ?CACHE table). This behavior reverts to match the thresholds
    used by the previous version as well as let users make more direct
    choices on how large to make their design document caches.
---
 rel/overlay/etc/default.ini                 | 16 +++++-----
 src/ddoc_cache/src/ddoc_cache_lru.erl       | 28 ++++++++---------
 src/ddoc_cache/test/ddoc_cache_lru_test.erl | 47 ++++++++++++-----------------
 3 files changed, 42 insertions(+), 49 deletions(-)

diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index be178e2..b9d0f85 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -91,15 +91,15 @@ enable_xframe_options = false
 ; CouchDB can optionally enforce a maximum uri length;
 ; max_uri_length = 8000
 ; changes_timeout = 60000
-; config_whitelist = 
-; max_uri_length = 
+; config_whitelist =
+; max_uri_length =
 ; rewrite_limit = 100
 ; x_forwarded_host = X-Forwarded-Host
 ; x_forwarded_proto = X-Forwarded-Proto
 ; x_forwarded_ssl = X-Forwarded-Ssl
 
 ; [httpd_design_handlers]
-; _view = 
+; _view =
 
 ; [ioq]
 ; concurrency = 10
@@ -113,7 +113,7 @@ port = 6984
 
 ; [chttpd_auth_cache]
 ; max_lifetime = 600000
-; max_objects = 
+; max_objects =
 ; max_size = 104857600
 
 ; [mem3]
@@ -124,7 +124,7 @@ port = 6984
 
 ; [fabric]
 ; all_docs_concurrency = 10
-; changes_duration = 
+; changes_duration =
 ; shard_timeout_factor = 2
 ; uuid_prefix_len = 7
 
@@ -157,7 +157,7 @@ iterations = 10 ; iterations for password hashing
 ; proxy_use_secret = false
 ; comma-separated list of public fields, 404 if empty
 ; public_fields =
-; secret = 
+; secret =
 ; users_db_public = false
 
 ; CSP (Content Security Policy) Support for _utils
@@ -190,8 +190,8 @@ credentials = false
 
 ; Configuration for the design document cache
 ;[ddoc_cache]
-; The maximum number of entries to keep in the cache
-;max_size = 1000
+; The maximum size of the cache in bytes
+;max_size = 104857600 ; 10MiB
 ; The period each cache entry should wait before
 ; automatically refreshing in milliseconds
 ;refresh_timeout = 67000
diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
index 12f5d10..95329f6 100644
--- a/src/ddoc_cache/src/ddoc_cache_lru.erl
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -113,9 +113,8 @@ handle_call({start, Key, Default}, _From, St) ->
     } = St,
     case ets:lookup(?CACHE, Key) of
         [] ->
-            MaxSize = config:get_integer("ddoc_cache", "max_size", 1000),
-            CurSize = ets:info(?CACHE, size),
-            case trim(St, CurSize, max(0, MaxSize)) of
+            MaxSize = config:get_integer("ddoc_cache", "max_size", 104857600),
+            case trim(St, max(0, MaxSize)) of
                 ok ->
                     true = ets:insert_new(?CACHE, #entry{key = Key}),
                     {ok, Pid} = ddoc_cache_entry:start_link(Key, Default),
@@ -243,19 +242,20 @@ lru_start(Key, DoInsert) ->
     end.
 
 
-trim(_, _, 0) ->
+trim(_, 0) ->
     full;
 
-trim(_St, CurSize, MaxSize) when CurSize < MaxSize ->
-    ok;
-
-trim(St, CurSize, MaxSize) when CurSize >= MaxSize ->
-    case ets:first(?LRU) of
-        '$end_of_table' ->
-            full;
-        {_Ts, Key, Pid} ->
-            remove_entry(St, Key, Pid),
-            trim(St, CurSize - 1, MaxSize)
+trim(St, MaxSize) ->
+    CurSize = ets:info(?CACHE, memory) * erlang:system_info(wordsize),
+    couch_log:error("SIZE: ~b :: ~b~n", [CurSize, MaxSize]),
+    if CurSize =< MaxSize -> ok; true ->
+        case ets:first(?LRU) of
+            '$end_of_table' ->
+                full;
+            {_Ts, Key, Pid} ->
+                remove_entry(St, Key, Pid),
+                trim(St, MaxSize)
+        end
     end.
 
 
diff --git a/src/ddoc_cache/test/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
index dd77828..0653afb 100644
--- a/src/ddoc_cache/test/ddoc_cache_lru_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
@@ -27,13 +27,15 @@ recover(<<"pause", _/binary>>) ->
     receive go -> ok end,
     {ok, paused};
 
+recover(<<"big", _/binary>>) ->
+    {ok, [random:uniform() || _ <- lists:seq(1, 8192)]};
+
 recover(DbName) ->
     {ok, DbName}.
 
 
 start_couch() ->
     Ctx = ddoc_cache_tutil:start_couch(),
-    config:set("ddoc_cache", "max_size", "5", false),
     meck:new(ddoc_cache_ev, [passthrough]),
     Ctx.
 
@@ -63,7 +65,6 @@ check_lru_test_() ->
             fun check_multi_start/1,
             fun check_multi_open/1,
             fun check_capped_size/1,
-            fun check_full_cache/1,
             fun check_cache_refill/1
         ]}
     }.
@@ -127,39 +128,27 @@ check_multi_open(_) ->
 
 
 check_capped_size(_) ->
+    % The extra factor of two in the size checks is
+    % a fudge factor. We don't reject entries from
+    % the cache if they would put us over the limit
+    % as we don't have the size information a
+    % priori.
+    config:set("ddoc_cache", "max_size", "1048576", false),
+    MaxSize = 1048576,
     ddoc_cache_tutil:clear(),
     meck:reset(ddoc_cache_ev),
     lists:foreach(fun(I) ->
-        DbName = list_to_binary(integer_to_list(I)),
+        DbName = list_to_binary("big_" ++ integer_to_list(I)),
         ddoc_cache:open_custom(DbName, ?MODULE),
         meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
-        ?assertEqual(I, ets:info(?CACHE, size))
-    end, lists:seq(1, 5)),
+        ?assert(cache_size() < MaxSize * 2)
+    end, lists:seq(1, 25)),
     lists:foreach(fun(I) ->
-        DbName = list_to_binary(integer_to_list(I)),
+        DbName = list_to_binary("big_" ++ integer_to_list(I)),
         ddoc_cache:open_custom(DbName, ?MODULE),
         meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
-        ?assertEqual(5, ets:info(?CACHE, size))
-    end, lists:seq(6, 20)).
-
-
-check_full_cache(_) ->
-    ddoc_cache_tutil:clear(),
-    meck:reset(ddoc_cache_ev),
-    lists:foreach(fun(I) ->
-        DbSuffix = list_to_binary(integer_to_list(I)),
-        DbName = <<"pause", DbSuffix/binary>>,
-        spawn(fun() -> ddoc_cache:open_custom(DbName, ?MODULE) end),
-        meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
-        ?assertEqual(I, ets:info(?CACHE, size))
-    end, lists:seq(1, 5)),
-    lists:foreach(fun(I) ->
-        DbSuffix = list_to_binary(integer_to_list(I)),
-        DbName = <<"pause", DbSuffix/binary>>,
-        spawn(fun() -> ddoc_cache:open_custom(DbName, ?MODULE) end),
-        meck:wait(I - 5, ddoc_cache_ev, event, [full, '_'], 1000),
-        ?assertEqual(5, ets:info(?CACHE, size))
-    end, lists:seq(6, 20)).
+        ?assert(cache_size() < MaxSize * 2)
+    end, lists:seq(26, 100)).
 
 
 check_cache_refill({DbName, _}) ->
@@ -193,3 +182,7 @@ check_cache_refill({DbName, _}) ->
         meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
         ?assert(ets:info(?CACHE, size) > 0)
     end, lists:seq(6, 10)).
+
+
+cache_size() ->
+    ets:info(?CACHE, memory) * erlang:system_info(wordsize).

-- 
To stop receiving notification emails like this one, please contact
['"commits@couchdb.apache.org" <co...@couchdb.apache.org>'].