You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by ko...@apache.org on 2020/01/06 18:14:31 UTC

[couchdb-ioq] branch basic-bypass created (now e641a74)

This is an automated email from the ASF dual-hosted git repository.

kocolosk pushed a change to branch basic-bypass
in repository https://gitbox.apache.org/repos/asf/couchdb-ioq.git.


      at e641a74  Enable users to bypass IOQ for certain IO classes

This branch includes the following new commits:

     new e641a74  Enable users to bypass IOQ for certain IO classes

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[couchdb-ioq] 01/01: Enable users to bypass IOQ for certain IO classes

Posted by ko...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

kocolosk pushed a commit to branch basic-bypass
in repository https://gitbox.apache.org/repos/asf/couchdb-ioq.git

commit e641a740978447f0b29785580e46d2e30e822001
Author: Adam Kocoloski <ko...@apache.org>
AuthorDate: Wed Dec 18 15:22:18 2019 -0500

    Enable users to bypass IOQ for certain IO classes
    
    This patch allows an administrator to configure a "bypass" which
    will cause a particular class of IO to be submitted directly to the
    file descriptor or OS process instead of going through the IO queueing
    mechanism. Installing a bypass can result in higher throughput and
    lower latency, at the expense of less control over the stability of the
    system.
    
    A bypass is configured via the `ioq.priority` configuration block:
    
    [ioq.bypass]
    read = true
    write = true
    compaction = false
    
    This configuration will cause user-submitted read IO to be submitted
    directly. At this time the following classes are available:
    
    - os_process
    - read
    - write
    - view_update
    - shard_sync
    - compaction
    
    This also expands the "compaction" queue to be a general-purpose
    "background" queue that handles IO for both compaction and internal
    replication (aka shard_sync). The other four classes are handled by the
    "interactive" queue. As before, the [ioq] ratio setting determines the
    likelihood that background IO will be selected ahead of interactive IO
    when both queues are non-empty.
---
 src/ioq.erl | 49 ++++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 40 insertions(+), 9 deletions(-)

diff --git a/src/ioq.erl b/src/ioq.erl
index 9ca2656..81d94a3 100644
--- a/src/ioq.erl
+++ b/src/ioq.erl
@@ -26,7 +26,7 @@
     concurrency,
     ratio,
     interactive=queue:new(),
-    compaction=queue:new(),
+    background=queue:new(),
     running=[]
 }).
 
@@ -41,7 +41,38 @@
 start_link() ->
     gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
 
-call(Fd, Msg, Priority) ->
+call(Fd, Msg, Metadata) ->
+    Priority = io_class(Msg, Metadata),
+    case bypass(Priority) of
+        true ->
+            gen_server:call(Fd, Msg);
+        false ->
+            queued_call(Fd, Msg, Priority)
+    end.
+
+bypass(Priority) ->
+    config:get("ioq.bypass", atom_to_list(Priority)) =:= "true".
+
+io_class({prompt, _}, _) ->
+    os_process;
+io_class({data, _}, _) ->
+    os_process;
+io_class(_, {interactive, _}) ->
+    read;
+io_class(_, {db_update, _}) ->
+    write;
+io_class(_, {view_update, _, _}) ->
+    view_update;
+io_class(_, {internal_repl, _}) ->
+    shard_sync;
+io_class(_, {db_compact, _}) ->
+    compaction;
+io_class(_, {view_compact, _, _}) ->
+    compaction;
+io_class(_, _) ->
+    other.
+
+queued_call(Fd, Msg, Priority) ->
     Request = #request{fd=Fd, msg=Msg, priority=Priority, from=self()},
     try
         gen_server:call(?MODULE, Request, infinity)
@@ -107,10 +138,10 @@ code_change(_Vsn, State, _Extra) ->
 terminate(_Reason, _State) ->
     ok.
 
-enqueue_request(#request{priority={db_compact, _}}=Request, #state{}=State) ->
-    State#state{compaction=queue:in(Request, State#state.compaction)};
-enqueue_request(#request{priority={view_compact, _, _}}=Request, #state{}=State) ->
-    State#state{compaction=queue:in(Request, State#state.compaction)};
+enqueue_request(#request{priority=compaction}=Request, #state{}=State) ->
+    State#state{background=queue:in(Request, State#state.background)};
+enqueue_request(#request{priority=shard_sync}=Request, #state{}=State) ->
+    State#state{background=queue:in(Request, State#state.background)};
 enqueue_request(#request{}=Request, #state{}=State) ->
     State#state{interactive=queue:in(Request, State#state.interactive)}.
 
@@ -128,17 +159,17 @@ maybe_submit_request(State) ->
     State.
 
 make_next_request(#state{}=State) ->
-    case {queue:is_empty(State#state.compaction), queue:is_empty(State#state.interactive)} of
+    case {queue:is_empty(State#state.background), queue:is_empty(State#state.interactive)} of
         {true, true} ->
             State;
         {true, false} ->
             choose_next_request(#state.interactive, State);
         {false, true} ->
-            choose_next_request(#state.compaction, State);
+            choose_next_request(#state.background, State);
         {false, false} ->
             case couch_rand:uniform() < State#state.ratio of
                 true ->
-                    choose_next_request(#state.compaction, State);
+                    choose_next_request(#state.background, State);
                 false ->
                     choose_next_request(#state.interactive, State)
             end