You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by da...@apache.org on 2017/02/03 16:28:34 UTC

[1/4] couch commit: updated refs/heads/COUCHDB-3288-remove-public-db-record to c9fc361 [Forced Update!]

Repository: couchdb-couch
Updated Branches:
  refs/heads/COUCHDB-3288-remove-public-db-record 997bc616d -> c9fc36129 (forced update)


Fix compaction daemon tests

There was a race condition in the compaction daemon tests where the
compaction daemon would think a compaction had finished but the test
would grab file size info before the compaction had swapped out which
breaks the assertion that fragmentation was reduced.

There was also a slight error in calculating the fragmentation in the
test suite by using the wrong size value.


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/09aa667c
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/09aa667c
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/09aa667c

Branch: refs/heads/COUCHDB-3288-remove-public-db-record
Commit: 09aa667c8ff1e4799c7216f40135837d1ea6da32
Parents: 87b35c4
Author: Paul J. Davis <pa...@gmail.com>
Authored: Thu Feb 2 12:33:55 2017 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Fri Feb 3 09:48:12 2017 -0600

----------------------------------------------------------------------
 test/couchdb_compaction_daemon_tests.erl | 119 +++++++++++++++++---------
 1 file changed, 79 insertions(+), 40 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/09aa667c/test/couchdb_compaction_daemon_tests.erl
----------------------------------------------------------------------
diff --git a/test/couchdb_compaction_daemon_tests.erl b/test/couchdb_compaction_daemon_tests.erl
index 4c35fb7..20879a7 100644
--- a/test/couchdb_compaction_daemon_tests.erl
+++ b/test/couchdb_compaction_daemon_tests.erl
@@ -26,6 +26,10 @@ start() ->
     Ctx.
 
 setup() ->
+    ok = meck:new(couch_db_updater, [passthrough]),
+    ok = meck:new(couch_mrview_compactor, [passthrough]),
+    ok = meck:new(couch_compaction_daemon, [passthrough]),
+
     DbName = ?tempdb(),
     {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
     create_design_doc(Db),
@@ -40,6 +44,11 @@ teardown(DbName) ->
         end,
         Configs),
     couch_server:delete(DbName, [?ADMIN_CTX]),
+
+    (catch meck:unload(couch_compaction_daemon)),
+    (catch meck:unload(couch_mrview_compactor)),
+    (catch meck:unload(couch_db_updater)),
+
     ok.
 
 
@@ -66,6 +75,8 @@ should_compact_by_default_rule(DbName) ->
         {ok, Db} = couch_db:open_int(DbName, []),
         populate(DbName, 70, 70, 200 * 1024),
 
+        CompactionMonitor = spawn_compaction_monitor(DbName),
+
         {_, DbFileSize} = get_db_frag(DbName),
         {_, ViewFileSize} = get_view_frag(DbName),
 
@@ -75,8 +86,7 @@ should_compact_by_default_rule(DbName) ->
                 false)
         end),
 
-        wait_compaction_started(DbName),
-        wait_compaction_finished(DbName),
+        wait_for_compaction(CompactionMonitor),
 
         with_config_change(DbName, fun() ->
             ok = config:delete("compactions", "_default", false)
@@ -100,6 +110,8 @@ should_compact_by_dbname_rule(DbName) ->
         {ok, Db} = couch_db:open_int(DbName, []),
         populate(DbName, 70, 70, 200 * 1024),
 
+        CompactionMonitor = spawn_compaction_monitor(DbName),
+
         {_, DbFileSize} = get_db_frag(DbName),
         {_, ViewFileSize} = get_view_frag(DbName),
 
@@ -109,8 +121,7 @@ should_compact_by_dbname_rule(DbName) ->
                 false)
         end),
 
-        wait_compaction_started(DbName),
-        wait_compaction_finished(DbName),
+        wait_for_compaction(CompactionMonitor),
 
         with_config_change(DbName, fun() ->
             ok = config:delete("compactions", ?b2l(DbName), false)
@@ -190,7 +201,7 @@ get_db_frag(DbName) ->
     {ok, Info} = couch_db:get_db_info(Db),
     couch_db:close(Db),
     FileSize = get_size(file, Info),
-    DataSize = get_size(external, Info),
+    DataSize = get_size(active, Info),
     {round((FileSize - DataSize) / FileSize * 100), FileSize}.
 
 get_view_frag(DbName) ->
@@ -198,49 +209,77 @@ get_view_frag(DbName) ->
     {ok, Info} = couch_mrview:get_info(Db, <<"_design/foo">>),
     couch_db:close(Db),
     FileSize = get_size(file, Info),
-    DataSize = get_size(external, Info),
+    DataSize = get_size(active, Info),
     {round((FileSize - DataSize) / FileSize * 100), FileSize}.
 
 get_size(Kind, Info) ->
     couch_util:get_nested_json_value({Info}, [sizes, Kind]).
 
-wait_compaction_started(DbName) ->
-    WaitFun = fun() ->
-        case is_compaction_running(DbName) of
-            false -> wait;
-            true ->  ok
-        end
-    end,
-    case test_util:wait(WaitFun, ?TIMEOUT) of
-        timeout ->
-            erlang:error({assertion_failed,
-                          [{module, ?MODULE},
-                           {line, ?LINE},
-                           {reason, "Compaction starting timeout"}]});
-        _ ->
-            ok
-    end.
-
-wait_compaction_finished(DbName) ->
-    WaitFun = fun() ->
-        case is_compaction_running(DbName) of
-            true -> wait;
-            false -> ok
-        end
+spawn_compaction_monitor(DbName) ->
+    TestPid = self(),
+    {Pid, Ref} = spawn_monitor(fun() ->
+        DaemonPid = whereis(couch_compaction_daemon),
+        DbPid = couch_util:with_db(DbName, fun(Db) ->
+            couch_db:get_pid(Db)
+        end),
+        {ok, ViewPid} = couch_index_server:get_index(couch_mrview_index,
+                DbName, <<"_design/foo">>),
+        TestPid ! {self(), started},
+        receive
+            {TestPid, go} -> ok
+        after ?TIMEOUT ->
+            erlang:error(timeout)
+        end,
+        meck:wait(
+                1,
+                couch_compaction_daemon,
+                handle_cast,
+                [{config_update, '_', '_'}, '_'],
+                DaemonPid,
+                ?TIMEOUT
+            ),
+        meck:wait(
+                1,
+                couch_db_updater,
+                handle_cast,
+                [{compact_done, '_'}, '_'],
+                DbPid,
+                ?TIMEOUT
+            ),
+        meck:wait(
+                1,
+                couch_mrview_compactor,
+                swap_compacted,
+                ['_', '_'],
+                ViewPid,
+                ?TIMEOUT
+            )
+    end),
+    receive
+        {Pid, started} -> ok;
+        {'DOWN', Ref, _, _, _} -> erlang:error(monitor_failure)
+    after ?TIMEOUT ->
+        erlang:error({assertion_failed, [
+                {module, ?MODULE},
+                {line, ?LINE},
+                {reason, "Compaction starting timeout"}
+            ]})
     end,
-    case test_util:wait(WaitFun, ?TIMEOUT) of
-        timeout ->
-            erlang:error({assertion_failed,
-                          [{module, ?MODULE},
-                           {line, ?LINE},
-                           {reason, "Compaction timeout"}]});
-        _ ->
-            ok
+    {Pid, Ref}.
+
+wait_for_compaction({Pid, Ref}) ->
+    Pid ! {self(), go},
+    receive
+        {'DOWN', Ref, _, _, normal} -> ok;
+        {'DOWN', Ref, _, _, Other} -> erlang:error(Other)
+    after ?TIMEOUT ->
+        erlang:error({assertion_failed, [
+                {module, ?MODULE},
+                {line, ?LINE},
+                {reason, "Compaction finishing timeout"}
+            ]})
     end.
 
-is_compaction_running(_DbName) ->
-    couch_compaction_daemon:in_progress() /= [].
-
 is_idle(DbName) ->
     {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
     Monitors = couch_db:monitored_by(Db),


[3/4] couch commit: updated refs/heads/COUCHDB-3288-remove-public-db-record to c9fc361

Posted by da...@apache.org.
Update couch_server to not use the db record

This removes introspection of the #db record by couch_server. While its
required for the pluggable storage engine upgrade, its also nice to
remove the hacky overloading of #db record fields for couch_server
logic.

COUCHDB-3288


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/e4b4b57d
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/e4b4b57d
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/e4b4b57d

Branch: refs/heads/COUCHDB-3288-remove-public-db-record
Commit: e4b4b57dd01615be86f36044a24db31e38a9ae1c
Parents: 3f8a0d0
Author: Paul J. Davis <pa...@gmail.com>
Authored: Fri Feb 3 10:20:30 2017 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Fri Feb 3 10:24:04 2017 -0600

----------------------------------------------------------------------
 src/couch_db.erl         |  23 +++++++++
 src/couch_lru.erl        |  14 ++---
 src/couch_server.erl     | 115 ++++++++++++++++++++++--------------------
 src/couch_server_int.hrl |  23 +++++++++
 4 files changed, 113 insertions(+), 62 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/e4b4b57d/src/couch_db.erl
----------------------------------------------------------------------
diff --git a/src/couch_db.erl b/src/couch_db.erl
index 06a4e3a..5d06278 100644
--- a/src/couch_db.erl
+++ b/src/couch_db.erl
@@ -19,6 +19,9 @@
     reopen/1,
     close/1,
 
+    incref/1,
+    decref/1,
+
     monitor/1,
     monitored_by/1,
     is_idle/1,
@@ -34,7 +37,9 @@
     get_db_info/1,
     get_doc_count/1,
     get_epochs/1,
+    get_instance_start_time/1,
     get_last_purged/1,
+    get_pid/1,
     get_revs_limit/1,
     get_security/1,
     get_update_seq/1,
@@ -46,6 +51,7 @@
     increment_update_seq/1,
     set_revs_limit/2,
     set_security/2,
+    set_user_ctx/2,
 
     ensure_full_commit/1,
     ensure_full_commit/2,
@@ -181,6 +187,14 @@ reopen(#db{main_pid = Pid, fd = Fd, fd_monitor = OldRef, user_ctx = UserCtx}) ->
         {ok, NewDb#db{user_ctx = UserCtx, fd_monitor = NewRef}}
     end.
 
+incref(#db{fd = Fd} = Db) ->
+    Ref = erlang:monitor(process, Fd),
+    {ok, Db#db{fd_monitor = Ref}}.
+
+decref(#db{fd_monitor = Monitor}) ->
+    erlang:demonitor(Monitor, [flush]),
+    ok.
+
 is_system_db(#db{options = Options}) ->
     lists:member(sys_db, Options).
 
@@ -381,6 +395,9 @@ get_last_purged(#db{}=Db) ->
             couch_file:pread_term(Db#db.fd, Pointer)
     end.
 
+get_pid(#db{main_pid = Pid}) ->
+    Pid.
+
 get_doc_count(Db) ->
     {ok, {Count, _, _}} = couch_btree:full_reduce(Db#db.id_tree),
     {ok, Count}.
@@ -393,6 +410,9 @@ get_epochs(#db{}=Db) ->
     validate_epochs(Epochs),
     Epochs.
 
+get_instance_start_time(#db{instance_start_time = IST}) ->
+    IST.
+
 get_compacted_seq(#db{}=Db) ->
     couch_db_header:compacted_seq(Db#db.header).
 
@@ -585,6 +605,9 @@ set_security(#db{main_pid=Pid}=Db, {NewSecProps}) when is_list(NewSecProps) ->
 set_security(_, _) ->
     throw(bad_request).
 
+set_user_ctx(#db{} = Db, UserCtx) ->
+    {ok, Db#db{user_ctx = UserCtx}}.
+
 validate_security_object(SecProps) ->
     Admins = couch_util:get_value(<<"admins">>, SecProps, {[]}),
     % we fallback to readers here for backwards compatibility

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/e4b4b57d/src/couch_lru.erl
----------------------------------------------------------------------
diff --git a/src/couch_lru.erl b/src/couch_lru.erl
index d58eb69..6d7baa9 100644
--- a/src/couch_lru.erl
+++ b/src/couch_lru.erl
@@ -13,7 +13,7 @@
 -module(couch_lru).
 -export([new/0, insert/2, update/2, close/1]).
 
--include_lib("couch/include/couch_db.hrl").
+-include("couch_server_int.hrl").
 
 new() ->
     {gb_trees:empty(), dict:new()}.
@@ -42,16 +42,18 @@ close({Tree, _} = Cache) ->
 close_int(none, _) ->
     erlang:error(all_dbs_active);
 close_int({Lru, DbName, Iter}, {Tree, Dict} = Cache) ->
-    case ets:update_element(couch_dbs, DbName, {#db.fd_monitor, locked}) of
+    case ets:update_element(couch_dbs, DbName, {#entry.lock, locked}) of
     true ->
-        [#db{main_pid = Pid} = Db] = ets:lookup(couch_dbs, DbName),
+        [#entry{db = Db}] = ets:lookup(couch_dbs, DbName),
         case couch_db:is_idle(Db) of true ->
+            DbPid = couch_db:get_pid(Db),
             true = ets:delete(couch_dbs, DbName),
-            true = ets:delete(couch_dbs_pid_to_name, Pid),
-            exit(Pid, kill),
+            true = ets:delete(couch_dbs_pid_to_name, DbPid),
+            exit(DbPid, kill),
             {gb_trees:delete(Lru, Tree), dict:erase(DbName, Dict)};
         false ->
-            true = ets:update_element(couch_dbs, DbName, {#db.fd_monitor, nil}),
+            ElemSpec = {#entry.lock, unlocked},
+            true = ets:update_element(couch_dbs, DbName, ElemSpec),
             couch_stats:increment_counter([couchdb, couch_server, lru_skip]),
             close_int(gb_trees:next(Iter), update(DbName, Cache))
         end;

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/e4b4b57d/src/couch_server.erl
----------------------------------------------------------------------
diff --git a/src/couch_server.erl b/src/couch_server.erl
index 59bffa5..f365718 100644
--- a/src/couch_server.erl
+++ b/src/couch_server.erl
@@ -26,6 +26,7 @@
 -export([handle_config_change/5, handle_config_terminate/3]).
 
 -include_lib("couch/include/couch_db.hrl").
+-include("couch_server_int.hrl").
 
 -define(MAX_DBS_OPEN, 100).
 -define(RELISTEN_DELAY, 5000).
@@ -74,16 +75,18 @@ open(DbName, Options0) ->
     Options = maybe_add_sys_db_callbacks(DbName, Options0),
     Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
     case ets:lookup(couch_dbs, DbName) of
-    [#db{fd=Fd, fd_monitor=Lock} = Db] when Lock =/= locked ->
+    [#entry{db = Db0, lock = Lock}] when Lock =/= locked ->
         update_lru(DbName, Options),
-        {ok, Db#db{user_ctx=Ctx, fd_monitor=erlang:monitor(process,Fd)}};
+        {ok, Db1} = couch_db:incref(Db0),
+        couch_db:set_user_ctx(Db1, Ctx);
     _ ->
         Timeout = couch_util:get_value(timeout, Options, infinity),
         Create = couch_util:get_value(create_if_missing, Options, false),
         case gen_server:call(couch_server, {open, DbName, Options}, Timeout) of
-        {ok, #db{fd=Fd} = Db} ->
+        {ok, Db0} ->
             update_lru(DbName, Options),
-            {ok, Db#db{user_ctx=Ctx, fd_monitor=erlang:monitor(process,Fd)}};
+            {ok, Db1} = couch_db:incref(Db0),
+            couch_db:set_user_ctx(Db1, Ctx);
         {not_found, no_db_file} when Create ->
             couch_log:warning("creating missing database: ~s", [DbName]),
             couch_server:create(DbName, Options);
@@ -104,9 +107,10 @@ close_lru() ->
 create(DbName, Options0) ->
     Options = maybe_add_sys_db_callbacks(DbName, Options0),
     case gen_server:call(couch_server, {create, DbName, Options}, infinity) of
-    {ok, #db{fd=Fd} = Db} ->
+    {ok, Db0} ->
         Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
-        {ok, Db#db{user_ctx=Ctx, fd_monitor=erlang:monitor(process,Fd)}};
+        {ok, Db1} = couch_db:incref(Db0),
+        couch_db:set_user_ctx(Db1, Ctx);
     Error ->
         Error
     end.
@@ -188,7 +192,7 @@ init([]) ->
     ok = config:listen_for_changes(?MODULE, nil),
     ok = couch_file:init_delete_dir(RootDir),
     hash_admin_passwords(),
-    ets:new(couch_dbs, [set, protected, named_table, {keypos, #db.name}]),
+    ets:new(couch_dbs, [set, protected, named_table, {keypos, #entry.name}]),
     ets:new(couch_dbs_pid_to_name, [set, protected, named_table]),
     process_flag(trap_exit, true),
     {ok, #server{root_dir=RootDir,
@@ -200,8 +204,9 @@ terminate(Reason, Srv) ->
     couch_log:error("couch_server terminating with ~p, state ~2048p",
                     [Reason,
                      Srv#server{lru = redacted}]),
-    ets:foldl(fun(#db{main_pid=Pid}, _) -> couch_util:shutdown_sync(Pid) end,
-        nil, couch_dbs),
+    ets:foldl(fun(Db, _) ->
+        couch_util:shutdown_sync(couch_db:get_pid(Db))
+    end, nil, couch_dbs),
     ok.
 
 handle_config_change("couchdb", "database_dir", _, _, _) ->
@@ -306,15 +311,13 @@ open_async(Server, From, DbName, Filepath, Options) ->
         true -> create;
         false -> open
     end,
-    % icky hack of field values - compactor_pid used to store clients
-    % and fd used for opening request info
-    true = ets:insert(couch_dbs, #db{
+    true = ets:insert(couch_dbs, #entry{
         name = DbName,
-        fd = ReqType,
-        main_pid = Opener,
-        compactor_pid = [From],
-        fd_monitor = locked,
-        options = Options
+        pid = Opener,
+        lock = locked,
+        waiters = [From],
+        req_type = ReqType,
+        db_options = Options
     }),
     true = ets:insert(couch_dbs_pid_to_name, {Opener, DbName}),
     db_opened(Server, Options).
@@ -337,16 +340,15 @@ handle_call({open_result, T0, DbName, {ok, Db}}, {FromPid, _Tag}, Server) ->
     true = ets:delete(couch_dbs_pid_to_name, FromPid),
     OpenTime = timer:now_diff(os:timestamp(), T0) / 1000,
     couch_stats:update_histogram([couchdb, db_open_time], OpenTime),
-    % icky hack of field values - compactor_pid used to store clients
-    % and fd used to possibly store a creation request
+    DbPid = couch_db:get_pid(Db),
     case ets:lookup(couch_dbs, DbName) of
         [] ->
             % db was deleted during async open
-            exit(Db#db.main_pid, kill),
+            exit(DbPid, kill),
             {reply, ok, Server};
-        [#db{fd=ReqType, compactor_pid=Froms}] ->
-            link(Db#db.main_pid),
-            [gen_server:reply(From, {ok, Db}) || From <- Froms],
+        [#entry{req_type = ReqType, waiters = Waiters} = Entry] ->
+            link(DbPid),
+            [gen_server:reply(Waiter, {ok, Db}) || Waiter <- Waiters],
             % Cancel the creation request if it exists.
             case ReqType of
                 {create, DbName, _Filepath, _Options, CrFrom} ->
@@ -354,8 +356,15 @@ handle_call({open_result, T0, DbName, {ok, Db}}, {FromPid, _Tag}, Server) ->
                 _ ->
                     ok
             end,
-            true = ets:insert(couch_dbs, Db),
-            true = ets:insert(couch_dbs_pid_to_name, {Db#db.main_pid, DbName}),
+            true = ets:insert(couch_dbs, #entry{
+                name = DbName,
+                db = Db,
+                pid = DbPid,
+                lock = unlocked,
+                db_options = Entry#entry.db_options,
+                start_time = couch_db:get_instance_start_time(Db)
+            }),
+            true = ets:insert(couch_dbs_pid_to_name, {DbPid, DbName}),
             Lru = case couch_db:is_system_db(Db) of
                 false ->
                     couch_lru:insert(DbName, Server#server.lru);
@@ -367,13 +376,12 @@ handle_call({open_result, T0, DbName, {ok, Db}}, {FromPid, _Tag}, Server) ->
 handle_call({open_result, T0, DbName, {error, eexist}}, From, Server) ->
     handle_call({open_result, T0, DbName, file_exists}, From, Server);
 handle_call({open_result, _T0, DbName, Error}, {FromPid, _Tag}, Server) ->
-    % icky hack of field values - compactor_pid used to store clients
     case ets:lookup(couch_dbs, DbName) of
         [] ->
             % db was deleted during async open
             {reply, ok, Server};
-        [#db{fd=ReqType, compactor_pid=Froms}=Db] ->
-            [gen_server:reply(From, Error) || From <- Froms],
+        [#entry{req_type = ReqType, waiters = Waiters} = Entry] ->
+            [gen_server:reply(Waiter, Error) || Waiter <- Waiters],
             couch_log:info("open_result error ~p for ~s", [Error, DbName]),
             true = ets:delete(couch_dbs, DbName),
             true = ets:delete(couch_dbs_pid_to_name, FromPid),
@@ -383,7 +391,7 @@ handle_call({open_result, _T0, DbName, Error}, {FromPid, _Tag}, Server) ->
                 _ ->
                     Server
             end,
-            {reply, ok, db_closed(NewServer, Db#db.options)}
+            {reply, ok, db_closed(NewServer, Entry#entry.db_options)}
     end;
 handle_call({open, DbName, Options}, From, Server) ->
     case ets:lookup(couch_dbs, DbName) of
@@ -401,15 +409,14 @@ handle_call({open, DbName, Options}, From, Server) ->
         Error ->
             {reply, Error, Server}
         end;
-    [#db{compactor_pid = Froms} = Db] when is_list(Froms) ->
-        % icky hack of field values - compactor_pid used to store clients
-        true = ets:insert(couch_dbs, Db#db{compactor_pid = [From|Froms]}),
-        if length(Froms) =< 10 -> ok; true ->
+    [#entry{waiters = Waiters} = Entry] when is_list(Waiters) ->
+        true = ets:insert(couch_dbs, Entry#entry{waiters = [From | Waiters]}),
+        if length(Waiters) =< 10 -> ok; true ->
             Fmt = "~b clients waiting to open db ~s",
-            couch_log:info(Fmt, [length(Froms), DbName])
+            couch_log:info(Fmt, [length(Waiters), DbName])
         end,
         {noreply, Server};
-    [#db{} = Db] ->
+    [#entry{db = Db}] ->
         {reply, {ok, Db}, Server}
     end;
 handle_call({create, DbName, Options}, From, Server) ->
@@ -426,14 +433,13 @@ handle_call({create, DbName, Options}, From, Server) ->
             CloseError ->
                 {reply, CloseError, Server}
             end;
-        [#db{fd=open}=Db] ->
+        [#entry{req_type = open} = Entry] ->
             % We're trying to create a database while someone is in
             % the middle of trying to open it. We allow one creator
             % to wait while we figure out if it'll succeed.
-            % icky hack of field values - fd used to store create request
             CrOptions = [create | Options],
-            NewDb = Db#db{fd={create, DbName, Filepath, CrOptions, From}},
-            true = ets:insert(couch_dbs, NewDb),
+            Req = {create, DbName, Filepath, CrOptions, From},
+            true = ets:insert(couch_dbs, Entry#entry{req_type = Req}),
             {noreply, Server};
         [_AlreadyRunningDb] ->
             {reply, file_exists, Server}
@@ -449,18 +455,17 @@ handle_call({delete, DbName, Options}, _From, Server) ->
         Server2 =
         case ets:lookup(couch_dbs, DbName) of
         [] -> Server;
-        [#db{main_pid=Pid, compactor_pid=Froms} = Db] when is_list(Froms) ->
-            % icky hack of field values - compactor_pid used to store clients
+        [#entry{pid = Pid, waiters = Waiters} = Entry] when is_list(Waiters) ->
             true = ets:delete(couch_dbs, DbName),
             true = ets:delete(couch_dbs_pid_to_name, Pid),
             exit(Pid, kill),
-            [gen_server:reply(F, not_found) || F <- Froms],
-            db_closed(Server, Db#db.options);
-        [#db{main_pid=Pid} = Db] ->
+            [gen_server:reply(Waiter, not_found) || Waiter <- Waiters],
+            db_closed(Server, Entry#entry.db_options);
+        [#entry{pid = Pid} = Entry] ->
             true = ets:delete(couch_dbs, DbName),
             true = ets:delete(couch_dbs_pid_to_name, Pid),
             exit(Pid, kill),
-            db_closed(Server, Db#db.options)
+            db_closed(Server, Entry#entry.db_options)
         end,
 
         %% Delete any leftover compaction files. If we don't do this a
@@ -486,11 +491,12 @@ handle_call({delete, DbName, Options}, _From, Server) ->
     Error ->
         {reply, Error, Server}
     end;
-handle_call({db_updated, #db{}=Db}, _From, Server0) ->
-    #db{name = DbName, instance_start_time = StartTime} = Db,
-    Server = try ets:lookup_element(couch_dbs, DbName, #db.instance_start_time) of
+handle_call({db_updated, Db}, _From, Server0) ->
+    DbName = couch_db:name(Db),
+    StartTime = couch_db:get_instance_start_time(Db),
+    Server = try ets:lookup_element(couch_dbs, DbName, #entry.start_time) of
         StartTime ->
-            true = ets:insert(couch_dbs, Db),
+            true = ets:update_element(couch_dbs, DbName, {#entry.db, Db}),
             Lru = case couch_db:is_system_db(Db) of
                 false -> couch_lru:update(DbName, Server0#server.lru);
                 true -> Server0#server.lru
@@ -518,22 +524,19 @@ handle_info({'EXIT', _Pid, config_change}, Server) ->
 handle_info({'EXIT', Pid, Reason}, Server) ->
     case ets:lookup(couch_dbs_pid_to_name, Pid) of
     [{Pid, DbName}] ->
-        [#db{compactor_pid=Froms}=Db] = ets:lookup(couch_dbs, DbName),
+        [#entry{waiters = Waiters} = Entry] = ets:lookup(couch_dbs, DbName),
         if Reason /= snappy_nif_not_loaded -> ok; true ->
             Msg = io_lib:format("To open the database `~s`, Apache CouchDB "
                 "must be built with Erlang OTP R13B04 or higher.", [DbName]),
             couch_log:error(Msg, [])
         end,
         couch_log:info("db ~s died with reason ~p", [DbName, Reason]),
-        % icky hack of field values - compactor_pid used to store clients
-        if is_list(Froms) ->
-            [gen_server:reply(From, Reason) || From <- Froms];
-        true ->
-            ok
+        if not is_list(Waiters) -> ok; true ->
+            [gen_server:reply(Waiter, Reason) || Waiter <- Waiters]
         end,
         true = ets:delete(couch_dbs, DbName),
         true = ets:delete(couch_dbs_pid_to_name, Pid),
-        {noreply, db_closed(Server, Db#db.options)};
+        {noreply, db_closed(Server, Entry#entry.db_options)};
     [] ->
         {noreply, Server}
     end;

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/e4b4b57d/src/couch_server_int.hrl
----------------------------------------------------------------------
diff --git a/src/couch_server_int.hrl b/src/couch_server_int.hrl
new file mode 100644
index 0000000..537a6ab
--- /dev/null
+++ b/src/couch_server_int.hrl
@@ -0,0 +1,23 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-record(entry, {
+    name,
+    db,
+    pid,
+    lock,
+    waiters,
+    req_type,
+    db_options,
+    start_time
+}).


[4/4] couch commit: updated refs/heads/COUCHDB-3288-remove-public-db-record to c9fc361

Posted by da...@apache.org.
Remove public access to the db record

This completes the removal of public access to the db record from the
couch application. The large majority of which is removing direct access
to the #db.name, #db.main_pid, and #db.update_seq fields.

COUCHDB-3288


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/c9fc3612
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/c9fc3612
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/c9fc3612

Branch: refs/heads/COUCHDB-3288-remove-public-db-record
Commit: c9fc3612946b8aedf89fa2978c6e1259fdbad41d
Parents: e4b4b57
Author: Paul J. Davis <pa...@gmail.com>
Authored: Wed Feb 1 15:15:09 2017 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Fri Feb 3 10:27:05 2017 -0600

----------------------------------------------------------------------
 include/couch_db.hrl                     | 27 -------------
 src/couch_auth_cache.erl                 | 17 +++++---
 src/couch_changes.erl                    | 56 ++++++++++++++-------------
 src/couch_compaction_daemon.erl          |  4 +-
 src/couch_db.erl                         | 55 ++++++++++++++++++++++++++
 src/couch_db_int.hrl                     | 38 ++++++++++++++++++
 src/couch_db_plugin.erl                  |  6 ++-
 src/couch_db_updater.erl                 |  1 +
 src/couch_httpd_db.erl                   | 12 +++---
 src/couch_users_db.erl                   |  8 ++--
 src/couch_util.erl                       | 15 +++++--
 test/couch_auth_cache_tests.erl          |  2 +-
 test/couch_changes_tests.erl             |  2 +-
 test/couch_db_plugin_tests.erl           | 13 ++++---
 test/couch_server_tests.erl              | 11 ++++--
 test/couchdb_compaction_daemon_tests.erl |  2 +-
 test/couchdb_views_tests.erl             | 21 +++++-----
 17 files changed, 192 insertions(+), 98 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/include/couch_db.hrl
----------------------------------------------------------------------
diff --git a/include/couch_db.hrl b/include/couch_db.hrl
index e7cd85d..5abb316 100644
--- a/include/couch_db.hrl
+++ b/include/couch_db.hrl
@@ -128,33 +128,6 @@
     handler
 }).
 
--record(db, {
-    main_pid = nil,
-    compactor_pid = nil,
-    instance_start_time, % number of microsecs since jan 1 1970 as a binary string
-    fd,
-    fd_monitor,
-    header = couch_db_header:new(),
-    committed_update_seq,
-    id_tree,
-    seq_tree,
-    local_tree,
-    update_seq,
-    name,
-    filepath,
-    validate_doc_funs = undefined,
-    security = [],
-    security_ptr = nil,
-    user_ctx = #user_ctx{},
-    waiting_delayed_commit = nil,
-    revs_limit = 1000,
-    fsync_options = [],
-    options = [],
-    compression,
-    before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc
-    after_doc_read = nil    % nil | fun(Doc, Db) -> NewDoc
-}).
-
 -record(view_fold_helper_funs, {
     reduce_count,
     passed_end,

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/src/couch_auth_cache.erl
----------------------------------------------------------------------
diff --git a/src/couch_auth_cache.erl b/src/couch_auth_cache.erl
index 9b00a9d..54a6794 100644
--- a/src/couch_auth_cache.erl
+++ b/src/couch_auth_cache.erl
@@ -289,8 +289,9 @@ reinit_cache(#state{db_mon_ref = Ref, closed = Closed} = State) ->
     true = ets:insert(?STATE, {auth_db_name, AuthDbName}),
     AuthDb = open_auth_db(),
     true = ets:insert(?STATE, {auth_db, AuthDb}),
+    DbPid = couch_db:get_pid(AuthDb),
     NewState#state{closed = [Ref|Closed],
-                   db_mon_ref = erlang:monitor(process, AuthDb#db.main_pid)}.
+                   db_mon_ref = erlang:monitor(process, DbPid)}.
 
 
 add_cache_entry(_, _, _, #state{max_cache_size = 0} = State) ->
@@ -331,13 +332,15 @@ refresh_entries(AuthDb) ->
     nil ->
         ok;
     AuthDb2 ->
-        case AuthDb2#db.update_seq > AuthDb#db.update_seq of
+        AuthDbSeq = couch_db:get_update_seq(AuthDb),
+        AuthDb2Seq = couch_db:get_update_seq(AuthDb2),
+        case AuthDb2Seq > AuthDbSeq of
         true ->
             {ok, _, _} = couch_db:enum_docs_since(
                 AuthDb2,
-                AuthDb#db.update_seq,
+                AuthDbSeq,
                 fun(DocInfo, _, _) -> refresh_entry(AuthDb2, DocInfo) end,
-                AuthDb#db.update_seq,
+                AuthDbSeq,
                 []
             ),
             true = ets:insert(?STATE, {auth_db, AuthDb2});
@@ -395,7 +398,9 @@ cache_needs_refresh() ->
             nil ->
                 false;
             AuthDb2 ->
-                AuthDb2#db.update_seq > AuthDb#db.update_seq
+                AuthDbSeq = couch_db:get_update_seq(AuthDb),
+                AuthDb2Seq = couch_db:get_update_seq(AuthDb2),
+                AuthDb2Seq > AuthDbSeq
             end
         end,
         false
@@ -416,7 +421,7 @@ exec_if_auth_db(Fun) ->
 
 exec_if_auth_db(Fun, DefRes) ->
     case ets:lookup(?STATE, auth_db) of
-    [{auth_db, #db{} = AuthDb}] ->
+    [{auth_db, AuthDb}] ->
         Fun(AuthDb);
     _ ->
         DefRes

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/src/couch_changes.erl
----------------------------------------------------------------------
diff --git a/src/couch_changes.erl b/src/couch_changes.erl
index 52ff39d..ea7f65c 100644
--- a/src/couch_changes.erl
+++ b/src/couch_changes.erl
@@ -78,9 +78,10 @@ handle_changes(Args1, Req, Db0, Type) ->
         _ ->
             {false, undefined, undefined}
     end,
+    DbName = couch_db:name(Db0),
     {StartListenerFun, View} = if UseViewChanges ->
         {ok, {_, View0, _}, _, _} = couch_mrview_util:get_view(
-                Db0#db.name, DDocName, ViewName, #mrargs{}),
+                DbName, DDocName, ViewName, #mrargs{}),
         case View0#mrview.seq_btree of
             #btree{} ->
                 ok;
@@ -89,14 +90,14 @@ handle_changes(Args1, Req, Db0, Type) ->
         end,
         SNFun = fun() ->
             couch_event:link_listener(
-                 ?MODULE, handle_view_event, {self(), DDocName}, [{dbname, Db0#db.name}]
+                 ?MODULE, handle_view_event, {self(), DDocName}, [{dbname, DbName}]
             )
         end,
         {SNFun, View0};
     true ->
         SNFun = fun() ->
             couch_event:link_listener(
-                 ?MODULE, handle_db_event, self(), [{dbname, Db0#db.name}]
+                 ?MODULE, handle_db_event, self(), [{dbname, DbName}]
             )
         end,
         {SNFun, undefined}
@@ -111,7 +112,7 @@ handle_changes(Args1, Req, Db0, Type) ->
         end,
         View2 = if UseViewChanges ->
             {ok, {_, View1, _}, _, _} = couch_mrview_util:get_view(
-                    Db0#db.name, DDocName, ViewName, #mrargs{}),
+                    DbName, DDocName, ViewName, #mrargs{}),
             View1;
         true ->
             undefined
@@ -219,11 +220,11 @@ configure_filter("_view", Style, Req, Db) ->
             catch _:_ ->
                 view
             end,
-            case Db#db.id_tree of
-                undefined ->
+            case couch_db:is_clustered(Db) of
+                true ->
                     DIR = fabric_util:doc_id_and_rev(DDoc),
                     {fetch, FilterType, Style, DIR, VName};
-                _ ->
+                false ->
                     {FilterType, Style, DDoc, VName}
             end;
         [] ->
@@ -242,11 +243,11 @@ configure_filter(FilterName, Style, Req, Db) ->
         [DName, FName] ->
             {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
             check_member_exists(DDoc, [<<"filters">>, FName]),
-            case Db#db.id_tree of
-                undefined ->
+            case couch_db:is_clustered(Db) of
+                true ->
                     DIR = fabric_util:doc_id_and_rev(DDoc),
                     {fetch, custom, Style, Req, DIR, FName};
-                _ ->
+                false->
                     {custom, Style, Req, DDoc, FName}
             end;
 
@@ -395,15 +396,19 @@ check_fields(_Fields) ->
     throw({bad_request, "Selector error: fields must be JSON array"}).
 
 
-open_ddoc(#db{name=DbName, id_tree=undefined}, DDocId) ->
-    case ddoc_cache:open_doc(mem3:dbname(DbName), DDocId) of
-        {ok, _} = Resp -> Resp;
-        Else -> throw(Else)
-    end;
 open_ddoc(Db, DDocId) ->
-    case couch_db:open_doc(Db, DDocId, [ejson_body]) of
-        {ok, _} = Resp -> Resp;
-        Else -> throw(Else)
+    DbName = couch_db:name(Db),
+    case couch_db:is_clustered(Db) of
+        true ->
+            case ddoc_cache:open_doc(mem3:dbname(DbName), DDocId) of
+                {ok, _} = Resp -> Resp;
+                Else -> throw(Else)
+            end;
+        false ->
+            case couch_db:open_doc(Db, DDocId, [ejson_body]) of
+                {ok, _} = Resp -> Resp;
+                Else -> throw(Else)
+            end
     end.
 
 
@@ -566,7 +571,7 @@ can_optimize(_, _) ->
 
 
 send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
-    Lookups = couch_btree:lookup(Db#db.id_tree, DocIds),
+    Lookups = couch_db:get_full_doc_infos(Db, DocIds),
     FullInfos = lists:foldl(fun
         ({ok, FDI}, Acc) -> [FDI | Acc];
         (not_found, Acc) -> Acc
@@ -575,11 +580,9 @@ send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
 
 
 send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
-    FoldFun = fun(FullDocInfo, _, Acc) ->
-        {ok, [FullDocInfo | Acc]}
-    end,
+    FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
     KeyOpts = [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}],
-    {ok, _, FullInfos} = couch_btree:fold(Db#db.id_tree, FoldFun, [], KeyOpts),
+    {ok, FullInfos} = couch_db:fold_docs(Db, FoldFun, [], KeyOpts),
     send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
 
 
@@ -640,8 +643,8 @@ keep_sending_changes(Args, Acc0, FirstRound) ->
     true ->
         case wait_updated(Timeout, TimeoutFun, UserAcc2) of
         {updated, UserAcc4} ->
-            DbOptions1 = [{user_ctx, Db#db.user_ctx} | DbOptions],
-            case couch_db:open(Db#db.name, DbOptions1) of
+            DbOptions1 = [{user_ctx, couch_db:get_user_ctx(Db)} | DbOptions],
+            case couch_db:open(couch_db:name(Db), DbOptions1) of
             {ok, Db2} ->
                 keep_sending_changes(
                   Args#changes_args{limit=NewLimit},
@@ -665,7 +668,8 @@ keep_sending_changes(Args, Acc0, FirstRound) ->
 maybe_refresh_view(_, undefined, undefined) ->
     undefined;
 maybe_refresh_view(Db, DDocName, ViewName) ->
-    {ok, {_, View, _}, _, _} = couch_mrview_util:get_view(Db#db.name, DDocName, ViewName, #mrargs{}),
+    DbName = couch_db:name(Db),
+    {ok, {_, View, _}, _, _} = couch_mrview_util:get_view(DbName, DDocName, ViewName, #mrargs{}),
     View.
 
 end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) ->

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/src/couch_compaction_daemon.erl
----------------------------------------------------------------------
diff --git a/src/couch_compaction_daemon.erl b/src/couch_compaction_daemon.erl
index 8f95eb2..f3b646d 100644
--- a/src/couch_compaction_daemon.erl
+++ b/src/couch_compaction_daemon.erl
@@ -319,7 +319,7 @@ can_db_compact(#config{db_frag = Threshold} = Config, Db) ->
         {Frag, SpaceRequired} = frag(DbInfo),
         couch_log:debug("Fragmentation for database `~s` is ~p%, estimated"
                         " space for compaction is ~p bytes.",
-                        [Db#db.name, Frag, SpaceRequired]),
+                        [couch_db:name(Db), Frag, SpaceRequired]),
         case check_frag(Threshold, Frag) of
         false ->
             false;
@@ -332,7 +332,7 @@ can_db_compact(#config{db_frag = Threshold} = Config, Db) ->
                 couch_log:warning("Compaction daemon - skipping database `~s` "
                     "compaction: the estimated necessary disk space is about ~p"
                     " bytes but the currently available disk space is ~p bytes.",
-                    [Db#db.name, SpaceRequired, Free]),
+                    [couch_db:name(Db), SpaceRequired, Free]),
                 false
             end
         end

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/src/couch_db.erl
----------------------------------------------------------------------
diff --git a/src/couch_db.erl b/src/couch_db.erl
index 5d06278..8cf647d 100644
--- a/src/couch_db.erl
+++ b/src/couch_db.erl
@@ -22,6 +22,9 @@
     incref/1,
     decref/1,
 
+    clustered_db/2,
+    clustered_db/3,
+
     monitor/1,
     monitored_by/1,
     is_idle/1,
@@ -32,21 +35,28 @@
 
     name/1,
     compression/1,
+    get_after_doc_read_fun/1,
+    get_before_doc_update_fun/1,
     get_committed_update_seq/1,
     get_compacted_seq/1,
+    get_compactor_pid/1,
     get_db_info/1,
     get_doc_count/1,
     get_epochs/1,
+    get_filepath/1,
     get_instance_start_time/1,
     get_last_purged/1,
     get_pid/1,
     get_revs_limit/1,
     get_security/1,
     get_update_seq/1,
+    get_user_ctx/1,
     get_uuid/1,
     get_purge_seq/1,
 
+    is_db/1,
     is_system_db/1,
+    is_clustered/1,
 
     increment_update_seq/1,
     set_revs_limit/2,
@@ -80,6 +90,8 @@
 
     with_stream/3,
 
+    fold_docs/4,
+    fold_local_docs/4,
     enum_docs/4,
     enum_docs_reduce_to_count/1,
 
@@ -113,6 +125,7 @@
 
 
 -include_lib("couch/include/couch_db.hrl").
+-include("couch_db_int.hrl").
 
 -define(DBNAME_REGEX,
     "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex
@@ -187,6 +200,12 @@ reopen(#db{main_pid = Pid, fd = Fd, fd_monitor = OldRef, user_ctx = UserCtx}) ->
         {ok, NewDb#db{user_ctx = UserCtx, fd_monitor = NewRef}}
     end.
 
+clustered_db(DbName, UserCtx) ->
+    clustered_db(DbName, UserCtx, []).
+
+clustered_db(DbName, UserCtx, SecProps) ->
+    {ok, #db{name = DbName, user_ctx = UserCtx, security = SecProps}}.
+
 incref(#db{fd = Fd} = Db) ->
     Ref = erlang:monitor(process, Fd),
     {ok, Db#db{fd_monitor = Ref}}.
@@ -195,9 +214,19 @@ decref(#db{fd_monitor = Monitor}) ->
     erlang:demonitor(Monitor, [flush]),
     ok.
 
+is_db(#db{}) ->
+    true;
+is_db(_) ->
+    false.
+
 is_system_db(#db{options = Options}) ->
     lists:member(sys_db, Options).
 
+is_clustered(#db{main_pid = nil}) ->
+    true;
+is_clustered(#db{}) ->
+    false.
+
 ensure_full_commit(#db{main_pid=Pid, instance_start_time=StartTime}) ->
     ok = gen_server:call(Pid, full_commit, infinity),
     {ok, StartTime}.
@@ -378,12 +407,21 @@ increment_update_seq(#db{main_pid=Pid}) ->
 purge_docs(#db{main_pid=Pid}, IdsRevs) ->
     gen_server:call(Pid, {purge_docs, IdsRevs}).
 
+get_after_doc_read_fun(#db{after_doc_read = Fun}) ->
+    Fun.
+
+get_before_doc_update_fun(#db{before_doc_update = Fun}) ->
+    Fun.
+
 get_committed_update_seq(#db{committed_update_seq=Seq}) ->
     Seq.
 
 get_update_seq(#db{update_seq=Seq})->
     Seq.
 
+get_user_ctx(#db{user_ctx = UserCtx}) ->
+    UserCtx.
+
 get_purge_seq(#db{}=Db) ->
     couch_db_header:purge_seq(Db#db.header).
 
@@ -410,12 +448,18 @@ get_epochs(#db{}=Db) ->
     validate_epochs(Epochs),
     Epochs.
 
+get_filepath(#db{filepath = FilePath}) ->
+    FilePath.
+
 get_instance_start_time(#db{instance_start_time = IST}) ->
     IST.
 
 get_compacted_seq(#db{}=Db) ->
     couch_db_header:compacted_seq(Db#db.header).
 
+get_compactor_pid(#db{compactor_pid = Pid}) ->
+    Pid.
+
 get_db_info(Db) ->
     #db{fd=Fd,
         header=Header,
@@ -1365,6 +1409,17 @@ enum_docs_since(Db, SinceSeq, InFun, Acc, Options) ->
             [{start_key, SinceSeq + 1} | Options]),
     {ok, enum_docs_since_reduce_to_count(LastReduction), AccOut}.
 
+
+fold_docs(Db, InFun, InAcc, Opts) ->
+    Wrapper = fun(FDI, _, Acc) -> InFun(FDI, Acc) end,
+    {ok, _, AccOut} = couch_btree:fold(Db#db.id_tree, Wrapper, InAcc, Opts),
+    {ok, AccOut}.
+
+fold_local_docs(Db, InFun, InAcc, Opts) ->
+    Wrapper = fun(FDI, _, Acc) -> InFun(FDI, Acc) end,
+    {ok, _, AccOut} = couch_btree:fold(Db#db.local_tree, Wrapper, InAcc, Opts),
+    {ok, AccOut}.
+
 enum_docs(Db, InFun, InAcc, Options0) ->
     {NS, Options} = extract_namespace(Options0),
     enum_docs(Db, NS, InFun, InAcc, Options).

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/src/couch_db_int.hrl
----------------------------------------------------------------------
diff --git a/src/couch_db_int.hrl b/src/couch_db_int.hrl
new file mode 100644
index 0000000..fc739b7
--- /dev/null
+++ b/src/couch_db_int.hrl
@@ -0,0 +1,38 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-record(db, {
+    main_pid = nil,
+    compactor_pid = nil,
+    instance_start_time, % number of microsecs since jan 1 1970 as a binary string
+    fd,
+    fd_monitor,
+    header = couch_db_header:new(),
+    committed_update_seq,
+    id_tree,
+    seq_tree,
+    local_tree,
+    update_seq,
+    name,
+    filepath,
+    validate_doc_funs = undefined,
+    security = [],
+    security_ptr = nil,
+    user_ctx = #user_ctx{},
+    waiting_delayed_commit = nil,
+    revs_limit = 1000,
+    fsync_options = [],
+    options = [],
+    compression,
+    before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc
+    after_doc_read = nil    % nil | fun(Doc, Db) -> NewDoc
+}).
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/src/couch_db_plugin.erl
----------------------------------------------------------------------
diff --git a/src/couch_db_plugin.erl b/src/couch_db_plugin.erl
index 774e9e0..740b812 100644
--- a/src/couch_db_plugin.erl
+++ b/src/couch_db_plugin.erl
@@ -32,13 +32,15 @@
 validate_dbname(DbName, Normalized, Default) ->
     maybe_handle(validate_dbname, [DbName, Normalized], Default).
 
-before_doc_update(#db{before_doc_update = Fun} = Db, Doc0) ->
+before_doc_update(Db, Doc0) ->
+    Fun = couch_db:get_before_doc_update_fun(Db),
     case with_pipe(before_doc_update, [Doc0, Db]) of
         [Doc1, _Db] when is_function(Fun) -> Fun(Doc1, Db);
         [Doc1, _Db] -> Doc1
     end.
 
-after_doc_read(#db{after_doc_read = Fun} = Db, Doc0) ->
+after_doc_read(Db, Doc0) ->
+    Fun = couch_db:get_after_doc_read_fun(Db),
     case with_pipe(after_doc_read, [Doc0, Db]) of
         [Doc1, _Db] when is_function(Fun) -> Fun(Doc1, Db);
         [Doc1, _Db] -> Doc1

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/src/couch_db_updater.erl
----------------------------------------------------------------------
diff --git a/src/couch_db_updater.erl b/src/couch_db_updater.erl
index 7872635..1970b78 100644
--- a/src/couch_db_updater.erl
+++ b/src/couch_db_updater.erl
@@ -20,6 +20,7 @@
 -export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
 
 -include_lib("couch/include/couch_db.hrl").
+-include("couch_db_int.hrl").
 
 -record(comp_header, {
     db_header,

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/src/couch_httpd_db.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_db.erl b/src/couch_httpd_db.erl
index 3793a06..fe42dfe 100644
--- a/src/couch_httpd_db.erl
+++ b/src/couch_httpd_db.erl
@@ -70,7 +70,8 @@ handle_changes_req(#httpd{method='GET'}=Req, Db, ChangesArgs, ChangesFun) ->
 handle_changes_req(#httpd{}=Req, _Db, _ChangesArgs, _ChangesFun) ->
     couch_httpd:send_method_not_allowed(Req, "GET,HEAD,POST").
 
-handle_changes_req1(Req, #db{name=DbName}=Db, ChangesArgs, ChangesFun) ->
+handle_changes_req1(Req, Db, ChangesArgs, ChangesFun) ->
+    DbName = couch_db:name(Db),
     AuthDbName = ?l2b(config:get("couch_httpd_auth", "authentication_db")),
     case AuthDbName of
     DbName ->
@@ -287,7 +288,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, Db) -
         RequiredSeq > CommittedSeq ->
             couch_db:ensure_full_commit(Db);
         true ->
-            {ok, Db#db.instance_start_time}
+            {ok, couch_db:get_instance_start_time(Db)}
         end
     end,
     send_json(Req, 201, {[
@@ -733,7 +734,8 @@ update_doc_result_to_json(DocId, Error) ->
 
 
 update_doc(Req, Db, DocId, #doc{deleted=false}=Doc) ->
-    Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(Db#db.name) ++ "/" ++ couch_util:url_encode(DocId)),
+    DbName = couch_db:name(Db),
+    Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(DocId)),
     update_doc(Req, Db, DocId, Doc, [{"Location", Loc}]);
 update_doc(Req, Db, DocId, Doc) ->
     update_doc(Req, Db, DocId, Doc, []).
@@ -1033,7 +1035,7 @@ db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileN
         [];
     _ ->
         [{"Location", absolute_uri(Req, "/" ++
-            couch_util:url_encode(Db#db.name) ++ "/" ++
+            couch_util:url_encode(couch_db:name(Db)) ++ "/" ++
             couch_util:url_encode(DocId) ++ "/" ++
             couch_util:url_encode(FileName)
         )}]
@@ -1145,7 +1147,7 @@ parse_changes_query(Req, Db) ->
         {"descending", "true"} ->
             Args#changes_args{dir=rev};
         {"since", "now"} ->
-            UpdateSeq = couch_util:with_db(Db#db.name, fun(WDb) ->
+            UpdateSeq = couch_util:with_db(couch_db:name(Db), fun(WDb) ->
                                         couch_db:get_update_seq(WDb)
                                 end),
             Args#changes_args{since=UpdateSeq};

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/src/couch_users_db.erl
----------------------------------------------------------------------
diff --git a/src/couch_users_db.erl b/src/couch_users_db.erl
index 6f7b9af..c7b41f1 100644
--- a/src/couch_users_db.erl
+++ b/src/couch_users_db.erl
@@ -39,8 +39,8 @@
 %   -> 404 // Not Found
 % Else
 %   -> save_doc
-before_doc_update(Doc, #db{user_ctx = UserCtx} = Db) ->
-    #user_ctx{name=Name} = UserCtx,
+before_doc_update(Doc, Db) ->
+    #user_ctx{name=Name} = couch_db:get_user_ctx(Db),
     DocName = get_doc_name(Doc),
     case (catch couch_db:check_is_admin(Db)) of
     ok ->
@@ -108,8 +108,8 @@ after_doc_read(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, Db) ->
         throw({forbidden,
         <<"Only administrators can view design docs in the users database.">>})
     end;
-after_doc_read(Doc, #db{user_ctx = UserCtx} = Db) ->
-    #user_ctx{name=Name} = UserCtx,
+after_doc_read(Doc, Db) ->
+    #user_ctx{name=Name} = couch_db:get_user_ctx(Db),
     DocName = get_doc_name(Doc),
     case (catch couch_db:check_is_admin(Db)) of
     ok ->

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/src/couch_util.erl
----------------------------------------------------------------------
diff --git a/src/couch_util.erl b/src/couch_util.erl
index 6001ae2..d688c12 100644
--- a/src/couch_util.erl
+++ b/src/couch_util.erl
@@ -198,7 +198,9 @@ json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
 json_apply_field({Key, NewValue}, [], Acc) ->
     {[{Key, NewValue}|Acc]}.
 
-json_user_ctx(#db{name=ShardName, user_ctx=Ctx}) ->
+json_user_ctx(Db) ->
+    ShardName = couch_db:name(Db),
+    Ctx = couch_db:get_user_ctx(Db),
     {[{<<"db">>, mem3:dbname(ShardName)},
             {<<"name">>,Ctx#user_ctx.name},
             {<<"roles">>,Ctx#user_ctx.roles}]}.
@@ -455,9 +457,7 @@ encode_doc_id(Id) ->
     url_encode(Id).
 
 
-with_db(Db, Fun) when is_record(Db, db) ->
-    Fun(Db);
-with_db(DbName, Fun) ->
+with_db(DbName, Fun)  when is_binary(DbName) ->
     case couch_db:open_int(DbName, [?ADMIN_CTX]) of
         {ok, Db} ->
             try
@@ -467,6 +467,13 @@ with_db(DbName, Fun) ->
             end;
         Else ->
             throw(Else)
+    end;
+with_db(Db, Fun) ->
+    case couch_db:is_db(Db) of
+        true ->
+            Fun(Db);
+        false ->
+            erlang:error({invalid_db, Db})
     end.
 
 rfc1123_date() ->

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/test/couch_auth_cache_tests.erl
----------------------------------------------------------------------
diff --git a/test/couch_auth_cache_tests.erl b/test/couch_auth_cache_tests.erl
index 76179de..08aecd1 100644
--- a/test/couch_auth_cache_tests.erl
+++ b/test/couch_auth_cache_tests.erl
@@ -265,7 +265,7 @@ hash_password(Password) ->
 shutdown_db(DbName) ->
     {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
     ok = couch_db:close(AuthDb),
-    couch_util:shutdown_sync(AuthDb#db.main_pid),
+    couch_util:shutdown_sync(couch_db:get_pid(AuthDb)),
     ok = timer:sleep(1000).
 
 get_doc_rev(DbName, UserName) ->

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/test/couch_changes_tests.erl
----------------------------------------------------------------------
diff --git a/test/couch_changes_tests.erl b/test/couch_changes_tests.erl
index 3c0e5f6..494d90f 100644
--- a/test/couch_changes_tests.erl
+++ b/test/couch_changes_tests.erl
@@ -645,7 +645,7 @@ should_filter_by_user_ctx({DbName, _}) ->
             ]}),
             ChArgs = #changes_args{filter = "app/valid"},
             UserCtx = #user_ctx{name = <<"doc3">>, roles = []},
-            DbRec = #db{name = DbName, user_ctx = UserCtx},
+            {ok, DbRec} = couch_db:clustered_db(DbName, UserCtx),
             Req = {json_req, {[{
                 <<"userCtx">>, couch_util:json_user_ctx(DbRec)
             }]}},

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/test/couch_db_plugin_tests.erl
----------------------------------------------------------------------
diff --git a/test/couch_db_plugin_tests.erl b/test/couch_db_plugin_tests.erl
index ea9b230..94dd3df 100644
--- a/test/couch_db_plugin_tests.erl
+++ b/test/couch_db_plugin_tests.erl
@@ -43,6 +43,7 @@ data_providers() -> [].
 data_subscriptions() -> [].
 processes() -> [].
 notify(_, _, _) -> ok.
+fake_db() -> element(2, couch_db:clustered_db(fake, totes_fake)).
 
 setup() ->
     couch_tests:setup([
@@ -133,33 +134,33 @@ validate_dbname_pass() ->
 before_doc_update_match() ->
     ?assertMatch(
         {true, [before_doc_update, doc]},
-        couch_db_plugin:before_doc_update(#db{}, {true, [doc]})).
+        couch_db_plugin:before_doc_update(fake_db(), {true, [doc]})).
 
 before_doc_update_no_match() ->
     ?assertMatch(
         {false, [doc]},
-        couch_db_plugin:before_doc_update(#db{}, {false, [doc]})).
+        couch_db_plugin:before_doc_update(fake_db(), {false, [doc]})).
 
 before_doc_update_throw() ->
     ?assertThrow(
         before_doc_update,
-        couch_db_plugin:before_doc_update(#db{}, {fail, [doc]})).
+        couch_db_plugin:before_doc_update(fake_db(), {fail, [doc]})).
 
 
 after_doc_read_match() ->
     ?assertMatch(
         {true, [after_doc_read, doc]},
-        couch_db_plugin:after_doc_read(#db{}, {true, [doc]})).
+        couch_db_plugin:after_doc_read(fake_db(), {true, [doc]})).
 
 after_doc_read_no_match() ->
     ?assertMatch(
         {false, [doc]},
-        couch_db_plugin:after_doc_read(#db{}, {false, [doc]})).
+        couch_db_plugin:after_doc_read(fake_db(), {false, [doc]})).
 
 after_doc_read_throw() ->
     ?assertThrow(
         after_doc_read,
-        couch_db_plugin:after_doc_read(#db{}, {fail, [doc]})).
+        couch_db_plugin:after_doc_read(fake_db(), {fail, [doc]})).
 
 
 validate_docid_match() ->

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/test/couch_server_tests.erl
----------------------------------------------------------------------
diff --git a/test/couch_server_tests.erl b/test/couch_server_tests.erl
index c8f8381..4fd7ff2 100644
--- a/test/couch_server_tests.erl
+++ b/test/couch_server_tests.erl
@@ -32,8 +32,7 @@ setup(_) ->
     setup().
 
 teardown(Db) ->
-    (catch couch_db:close(Db)),
-    (catch file:delete(Db#db.filepath)).
+    (catch couch_db:close(Db)).
 
 teardown(rename, Db) ->
     config:set("couchdb", "enable_database_recovery", "false", false),
@@ -61,7 +60,9 @@ make_test_case(Mod, Funs) ->
         {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]}
     }.
 
-should_rename_on_delete(_, #db{filepath = Origin, name = DbName}) ->
+should_rename_on_delete(_, Db) ->
+    DbName = couch_db:name(Db),
+    Origin = couch_db:get_filepath(Db),
     ?_test(begin
         ?assert(filelib:is_regular(Origin)),
         ?assertMatch(ok, couch_server:delete(DbName, [])),
@@ -74,7 +75,9 @@ should_rename_on_delete(_, #db{filepath = Origin, name = DbName}) ->
         ?assert(filelib:is_regular(Renamed))
     end).
 
-should_delete(_, #db{filepath = Origin, name = DbName}) ->
+should_delete(_, Db) ->
+    DbName = couch_db:name(Db),
+    Origin = couch_db:get_filepath(Db),
     ?_test(begin
         ?assert(filelib:is_regular(Origin)),
         ?assertMatch(ok, couch_server:delete(DbName, [])),

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/test/couchdb_compaction_daemon_tests.erl
----------------------------------------------------------------------
diff --git a/test/couchdb_compaction_daemon_tests.erl b/test/couchdb_compaction_daemon_tests.erl
index 20879a7..6d423d9 100644
--- a/test/couchdb_compaction_daemon_tests.erl
+++ b/test/couchdb_compaction_daemon_tests.erl
@@ -182,7 +182,7 @@ update(DbName) ->
     lists:foreach(fun(_) ->
         Doc = couch_doc:from_json_obj({[{<<"_id">>, couch_uuids:new()}]}),
         {ok, _} = couch_db:update_docs(Db, [Doc]),
-        query_view(Db#db.name)
+        query_view(couch_db:name(Db))
     end, lists:seq(1, 200)),
     couch_db:close(Db).
 

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c9fc3612/test/couchdb_views_tests.erl
----------------------------------------------------------------------
diff --git a/test/couchdb_views_tests.erl b/test/couchdb_views_tests.erl
index 7b04e85..69277e6 100644
--- a/test/couchdb_views_tests.erl
+++ b/test/couchdb_views_tests.erl
@@ -340,7 +340,7 @@ couchdb_1283() ->
         ]}),
         {ok, _} = couch_db:update_doc(MDb1, DDoc, []),
         ok = populate_db(MDb1, 100, 100),
-        query_view(MDb1#db.name, "foo", "foo"),
+        query_view(couch_db:name(MDb1), "foo", "foo"),
         ok = couch_db:close(MDb1),
 
         {ok, Db1} = couch_db:create(?tempdb(), [?ADMIN_CTX]),
@@ -350,8 +350,8 @@ couchdb_1283() ->
         {ok, Db3} = couch_db:create(?tempdb(), [?ADMIN_CTX]),
         ok = couch_db:close(Db3),
 
-        Writer1 = spawn_writer(Db1#db.name),
-        Writer2 = spawn_writer(Db2#db.name),
+        Writer1 = spawn_writer(couch_db:name(Db1)),
+        Writer2 = spawn_writer(couch_db:name(Db2)),
 
         ?assert(is_process_alive(Writer1)),
         ?assert(is_process_alive(Writer2)),
@@ -361,16 +361,16 @@ couchdb_1283() ->
 
         %% Below we do exactly the same as couch_mrview:compact holds inside
         %% because we need have access to compaction Pid, not a Ref.
-        %% {ok, MonRef} = couch_mrview:compact(MDb1#db.name, <<"_design/foo">>,
+        %% {ok, MonRef} = couch_mrview:compact(MDb1, <<"_design/foo">>,
         %%                                     [monitor]),
         {ok, Pid} = couch_index_server:get_index(
-            couch_mrview_index, MDb1#db.name, <<"_design/foo">>),
+            couch_mrview_index, couch_db:name(MDb1), <<"_design/foo">>),
         {ok, CPid} = gen_server:call(Pid, compact),
         %% By suspending compaction process we ensure that compaction won't get
         %% finished too early to make get_writer_status assertion fail.
         erlang:suspend_process(CPid),
         MonRef = erlang:monitor(process, CPid),
-        Writer3 = spawn_writer(Db3#db.name),
+        Writer3 = spawn_writer(couch_db:name(Db3)),
         ?assert(is_process_alive(Writer3)),
         ?assertEqual({error, all_dbs_active}, get_writer_status(Writer3)),
 
@@ -528,7 +528,8 @@ view_cleanup(DbName) ->
 
 count_users(DbName) ->
     {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
-    {monitored_by, Monitors} = erlang:process_info(Db#db.main_pid, monitored_by),
+    DbPid = couch_db:get_pid(Db),
+    {monitored_by, Monitors} = erlang:process_info(DbPid, monitored_by),
     CouchFiles = [P || P <- Monitors, couch_file:process_info(P) =/= undefined],
     ok = couch_db:close(Db),
     length(lists:usort(Monitors) -- [self() | CouchFiles]).
@@ -554,7 +555,8 @@ restore_backup_db_file(DbName) ->
 
     {ok, Db} = couch_db:open_int(DbName, []),
     ok = couch_db:close(Db),
-    exit(Db#db.main_pid, shutdown),
+    DbPid = couch_db:get_pid(Db),
+    exit(DbPid, shutdown),
 
     DbFile = filename:join([DbDir, ?b2l(DbName) ++ ".couch"]),
     ok = file:delete(DbFile),
@@ -575,7 +577,8 @@ wait_db_compact_done(_DbName, 0) ->
 wait_db_compact_done(DbName, N) ->
     {ok, Db} = couch_db:open_int(DbName, []),
     ok = couch_db:close(Db),
-    case is_pid(Db#db.compactor_pid) of
+    CompactorPid = couch_db:get_compactor_pid(Db),
+    case is_pid(CompactorPid) of
     false ->
         ok;
     true ->


[2/4] couch commit: updated refs/heads/COUCHDB-3288-remove-public-db-record to c9fc361

Posted by da...@apache.org.
Move calculate_start_seq and owner_of

These functions were originally implemented in fabric_rpc.erl where they
really didn't belong. Moving them to couch_db.erl allows us to keep the
unit tests intact rather than just removing them now that the #db record
is being made private.

COUCHDB-3288


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/3f8a0d0d
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/3f8a0d0d
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/3f8a0d0d

Branch: refs/heads/COUCHDB-3288-remove-public-db-record
Commit: 3f8a0d0d94dcaff170b7d81429c5ccb58cd055a2
Parents: 09aa667
Author: Paul J. Davis <pa...@gmail.com>
Authored: Fri Feb 3 09:59:23 2017 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Fri Feb 3 10:11:13 2017 -0600

----------------------------------------------------------------------
 src/couch_db.erl | 103 +++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 102 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/3f8a0d0d/src/couch_db.erl
----------------------------------------------------------------------
diff --git a/src/couch_db.erl b/src/couch_db.erl
index 2adcd33..06a4e3a 100644
--- a/src/couch_db.erl
+++ b/src/couch_db.erl
@@ -83,6 +83,9 @@
     changes_since/5,
     count_changes_since/2,
 
+    calculate_start_seq/3,
+    owner_of/2,
+
     start_compact/1,
     cancel_compact/1,
     wait_for_compaction/1,
@@ -386,7 +389,9 @@ get_uuid(#db{}=Db) ->
     couch_db_header:uuid(Db#db.header).
 
 get_epochs(#db{}=Db) ->
-    couch_db_header:epochs(Db#db.header).
+    Epochs = couch_db_header:epochs(Db#db.header),
+    validate_epochs(Epochs),
+    Epochs.
 
 get_compacted_seq(#db{}=Db) ->
     couch_db_header:compacted_seq(Db#db.header).
@@ -1360,6 +1365,78 @@ enum_docs(Db, NS, InFun, InAcc, Options0) ->
         Db#db.id_tree, FoldFun, InAcc, Options),
     {ok, enum_docs_reduce_to_count(LastReduce), OutAcc}.
 
+
+calculate_start_seq(_Db, _Node, Seq) when is_integer(Seq) ->
+    Seq;
+calculate_start_seq(Db, Node, {Seq, Uuid}) ->
+    % Treat the current node as the epoch node
+    calculate_start_seq(Db, Node, {Seq, Uuid, Node});
+calculate_start_seq(Db, _Node, {Seq, Uuid, EpochNode}) ->
+    case is_prefix(Uuid, get_uuid(Db)) of
+        true ->
+            case is_owner(EpochNode, Seq, get_epochs(Db)) of
+                true -> Seq;
+                false -> 0
+            end;
+        false ->
+            %% The file was rebuilt, most likely in a different
+            %% order, so rewind.
+            0
+    end;
+calculate_start_seq(Db, _Node, {replace, OriginalNode, Uuid, Seq}) ->
+    case is_prefix(Uuid, couch_db:get_uuid(Db)) of
+        true ->
+            start_seq(get_epochs(Db), OriginalNode, Seq);
+        false ->
+            {replace, OriginalNode, Uuid, Seq}
+    end.
+
+
+validate_epochs(Epochs) ->
+    %% Assert uniqueness.
+    case length(Epochs) == length(lists:ukeysort(2, Epochs)) of
+        true  -> ok;
+        false -> erlang:error(duplicate_epoch)
+    end,
+    %% Assert order.
+    case Epochs == lists:sort(fun({_, A}, {_, B}) -> B =< A end, Epochs) of
+        true  -> ok;
+        false -> erlang:error(epoch_order)
+    end.
+
+
+is_prefix(Pattern, Subject) ->
+     binary:longest_common_prefix([Pattern, Subject]) == size(Pattern).
+
+
+is_owner(Node, Seq, Epochs) ->
+    Node =:= owner_of(Epochs, Seq).
+
+
+owner_of(Db, Seq) when not is_list(Db) ->
+    owner_of(get_epochs(Db), Seq);
+owner_of([], _Seq) ->
+    undefined;
+owner_of([{EpochNode, EpochSeq} | _Rest], Seq) when Seq > EpochSeq ->
+    EpochNode;
+owner_of([_ | Rest], Seq) ->
+    owner_of(Rest, Seq).
+
+
+start_seq([{OrigNode, EpochSeq} | _], OrigNode, Seq) when Seq > EpochSeq ->
+    %% OrigNode is the owner of the Seq so we can safely stream from there
+    Seq;
+start_seq([{_, NewSeq}, {OrigNode, _} | _], OrigNode, Seq) when Seq > NewSeq ->
+    %% We transferred this file before Seq was written on OrigNode, so we need
+    %% to stream from the beginning of the next epoch. Note that it is _not_
+    %% necessary for the current node to own the epoch beginning at NewSeq
+    NewSeq;
+start_seq([_ | Rest], OrigNode, Seq) ->
+    start_seq(Rest, OrigNode, Seq);
+start_seq([], OrigNode, Seq) ->
+    erlang:error({epoch_mismatch, OrigNode, Seq}).
+
+
 extract_namespace(Options0) ->
     case proplists:split(Options0, [namespace]) of
         {[[{namespace, NS}]], Options} ->
@@ -1698,6 +1775,30 @@ should_fail_validate_dbname(DbName) ->
         ok
     end)}.
 
+calculate_start_seq_test() ->
+    %% uuid mismatch is always a rewind.
+    Hdr1 = couch_db_header:new(),
+    Hdr2 = couch_db_header:set(Hdr1, [{epochs, [{node1, 1}]}, {uuid, <<"uuid1">>}]),
+    ?assertEqual(0, calculate_start_seq(#db{header=Hdr2}, node1, {1, <<"uuid2">>})),
+    %% uuid matches and seq is owned by node.
+    Hdr3 = couch_db_header:set(Hdr2, [{epochs, [{node1, 1}]}]),
+    ?assertEqual(2, calculate_start_seq(#db{header=Hdr3}, node1, {2, <<"uuid1">>})),
+    %% uuids match but seq is not owned by node.
+    Hdr4 = couch_db_header:set(Hdr2, [{epochs, [{node2, 2}, {node1, 1}]}]),
+    ?assertEqual(0, calculate_start_seq(#db{header=Hdr4}, node1, {3, <<"uuid1">>})),
+    %% return integer if we didn't get a vector.
+    ?assertEqual(4, calculate_start_seq(#db{}, foo, 4)).
+
+is_owner_test() ->
+    ?assertNot(is_owner(foo, 1, [])),
+    ?assertNot(is_owner(foo, 1, [{foo, 1}])),
+    ?assert(is_owner(foo, 2, [{foo, 1}])),
+    ?assert(is_owner(foo, 50, [{bar, 100}, {foo, 1}])),
+    ?assert(is_owner(foo, 50, [{baz, 200}, {bar, 100}, {foo, 1}])),
+    ?assert(is_owner(bar, 150, [{baz, 200}, {bar, 100}, {foo, 1}])),
+    ?assertError(duplicate_epoch, validate_epochs([{foo, 1}, {bar, 1}])),
+    ?assertError(epoch_order, validate_epochs([{foo, 100}, {bar, 200}])).
+
 to_binary(DbName) when is_list(DbName) ->
     ?l2b(DbName);
 to_binary(DbName) when is_binary(DbName) ->