You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by rn...@apache.org on 2019/10/28 13:13:05 UTC

[couchdb] branch 1523-bye-bye-5986-rnewson updated (e921863 -> 69c93c4)

This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a change to branch 1523-bye-bye-5986-rnewson
in repository https://gitbox.apache.org/repos/asf/couchdb.git.


 discard e921863  add handle_request/1
 discard 9f4e761  extract get_httpd_handlers function
 discard f4a8c30  Move get_stats/0
 discard 8b96ef9  Remove global _system handler
 discard a77d4cb  Move _node handler to new module
     add 5a27256  Update Fauxton to 1.2.2
     add 1dd00d6  Merge pull request #2257 from apache/fauxton-1.2.2
     add 0a85b75  export get_servers_from_env/1 for ken
     add 78a7ff2  Merge pull request #2260 from apache/ken-query-servers
     add 110d968  Update ken to 1.0.5
     add a8891d4  Merge pull request #2262 from apache/ken-1.0.5
     add 1f22451  update ken to 1.0.6
     add ae29ffc  Merge pull request #2266 from apache/ken-1.0.6
     add 79cfc1d  Avoid churning replication jobs if there is enough room to run pending jobs
     add a47f0fa  Make changes feed return bad request for invalid heartbeat values
     add be2364d  Merge pull request #2270 from bessbd/changes-feed-input-validation
     add fc6cc98  Remove old clause which is no longer used
     add b99d6e0  Merge pull request #2276 from cloudant/remove-inets-client-remains
     new 62134d8  Move _node handler to new module
     new c6531fe  Remove global _system handler
     new ea440c2  Move get_stats/0
     new 8126e2f  extract get_httpd_handlers function
     new 5262f84  add handle_request/1
     new 209cb61  Add /_node//_all_dbs
     new 69c93c4  Include proxy host and port in connection pool key

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (e921863)
            \
             N -- N -- N   refs/heads/1523-bye-bye-5986-rnewson (69c93c4)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 7 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 rebar.config.script                                |  4 +-
 src/chttpd/src/chttpd.erl                          |  3 --
 src/chttpd/src/chttpd_db.erl                       |  9 +++-
 src/chttpd/src/chttpd_node.erl                     |  8 ++++
 src/couch/src/couch_httpd.erl                      |  3 --
 src/couch/src/couch_proc_manager.erl               |  3 +-
 .../src/couch_replicator_connection.erl            | 48 ++++++++++++++--------
 .../src/couch_replicator_httpc.erl                 |  8 +---
 .../src/couch_replicator_httpc_pool.erl            | 16 +++++---
 .../src/couch_replicator_scheduler.erl             | 36 +++++++++++++++-
 test/elixir/test/changes_test.exs                  | 43 +++++++++++++++++++
 test/javascript/tests/design_docs.js               | 32 +--------------
 test/javascript/tests/view_update_seq.js           |  2 +-
 13 files changed, 142 insertions(+), 73 deletions(-)
 create mode 100644 test/elixir/test/changes_test.exs


[couchdb] 02/07: Remove global _system handler

Posted by rn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch 1523-bye-bye-5986-rnewson
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit c6531fecdc649bd28d8b5e02a63c121c1d8c2b89
Author: Joan Touzet <jo...@atypical.net>
AuthorDate: Fri Oct 11 18:04:39 2019 +0100

    Remove global _system handler
---
 src/chttpd/src/chttpd_misc.erl | 8 --------
 src/couch/src/couch.app.src    | 3 +--
 2 files changed, 1 insertion(+), 10 deletions(-)

diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index dd24712..a75f108 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -19,7 +19,6 @@
     handle_favicon_req/2,
     handle_replicate_req/1,
     handle_reload_query_servers_req/1,
-    handle_system_req/1,
     handle_task_status_req/1,
     handle_up_req/1,
     handle_utils_dir_req/1,
@@ -274,13 +273,6 @@ handle_uuids_req(Req) ->
     couch_httpd_misc_handlers:handle_uuids_req(Req).
 
 
-% Note: this resource is exposed on the backdoor interface, but it's in chttpd
-% because it's not couch trunk
-handle_system_req(Req) ->
-    Stats = get_stats(),
-    EJSON = couch_stats_httpd:to_ejson(Stats),
-    send_json(Req, EJSON).
-
 get_stats() ->
     Other = erlang:memory(system) - lists:sum([X || {_,X} <-
         erlang:memory([atom, code, binary, ets])]),
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
index 706b439..2b642c0 100644
--- a/src/couch/src/couch.app.src
+++ b/src/couch/src/couch.app.src
@@ -60,8 +60,7 @@
             {"_uuids", "{couch_httpd_misc_handlers, handle_uuids_req}"},
             {"_stats", "{couch_stats_httpd, handle_stats_req}"},
             {"_session", "{couch_httpd_auth, handle_session_req}"},
-            {"_plugins", "{couch_plugins_httpd, handle_req}"},
-            {"_system", "{chttpd_misc, handle_system_req}"}
+            {"_plugins", "{couch_plugins_httpd, handle_req}"}
         ]},
           { httpd_db_handlers, [
             {"_all_docs", "{couch_mrview_http, handle_all_docs_req}"},


[couchdb] 07/07: Include proxy host and port in connection pool key

Posted by rn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch 1523-bye-bye-5986-rnewson
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 69c93c448bbf48031077d8d040f0eb60945480d9
Author: Robert Newson <rn...@apache.org>
AuthorDate: Thu Oct 24 21:30:02 2019 +0100

    Include proxy host and port in connection pool key
---
 .../src/couch_replicator_connection.erl            | 48 ++++++++++++++--------
 .../src/couch_replicator_httpc.erl                 |  8 +---
 .../src/couch_replicator_httpc_pool.erl            | 16 +++++---
 3 files changed, 43 insertions(+), 29 deletions(-)

diff --git a/src/couch_replicator/src/couch_replicator_connection.erl b/src/couch_replicator/src/couch_replicator_connection.erl
index f3e4a86..cee6fb9 100644
--- a/src/couch_replicator/src/couch_replicator_connection.erl
+++ b/src/couch_replicator/src/couch_replicator_connection.erl
@@ -29,7 +29,7 @@
 ]).
 
 -export([
-   acquire/1,
+   acquire/2,
    release/1
 ]).
 
@@ -53,6 +53,8 @@
     worker,
     host,
     port,
+    proxy_host,
+    proxy_port
     mref
 }).
 
@@ -73,18 +75,22 @@ init([]) ->
     {ok, #state{close_interval=Interval, timer=Timer}}.
 
 
-acquire(URL) when is_binary(URL) ->
-    acquire(binary_to_list(URL));
+acquire(Url, ProxyUrl) when is_binary(Url) ->
+    acquire(binary_to_list(Url), ProxyUrl);
 
-acquire(URL0) ->
-    URL = couch_util:url_strip_password(URL0),
-    case gen_server:call(?MODULE, {acquire, URL}) of
+acquire(Url, ProxyUrl) when is_binary(ProxyUrl) ->
+    acquire(Url, binary_to_list(ProxyUrl));
+
+acquire(Url0, ProxyUrl0) ->
+    Url = couch_util:url_strip_password(Url0),
+    ProxyUrl = couch_util:url_strip_password(ProxyUrl0),
+    case gen_server:call(?MODULE, {acquire, Url, ProxyUrl}) of
         {ok, Worker} ->
             link(Worker),
             {ok, Worker};
         {error, all_allocated} ->
-            {ok, Pid} = ibrowse:spawn_link_worker_process(URL),
-            ok = gen_server:call(?MODULE, {create, URL, Pid}),
+            {ok, Pid} = ibrowse:spawn_link_worker_process(Url),
+            ok = gen_server:call(?MODULE, {create, Url, ProxyUrl, Pid}),
             {ok, Pid};
         {error, Reason} ->
             {error, Reason}
@@ -96,11 +102,14 @@ release(Worker) ->
     gen_server:cast(?MODULE, {release, Worker}).
 
 
-handle_call({acquire, URL}, From, State) ->
+handle_call({acquire, Url, ProxyUrl}, From, State) ->
     {Pid, _Ref} = From,
-    case ibrowse_lib:parse_url(URL) of
-        #url{host=Host, port=Port} ->
-            Pat = #connection{host=Host, port=Port, mref=undefined, _='_'},
+    case {ibrowse_lib:parse_url(Url), ibrowse_lib:parse_url(ProxyUrl)} of
+        {#url{host=Host, port=Port}, #url{host=ProxyHost, port=ProxyPort}} ->
+            Pat = #connection{
+	        host=Host, port=Port,
+		proxy_host=ProxyHost, proxy_port=ProxyPort,
+		mref=undefined, _='_'},
             case ets:match_object(?MODULE, Pat, 1) of
                 '$end_of_table' ->
                     {reply, {error, all_allocated}, State};
@@ -111,20 +120,25 @@ handle_call({acquire, URL}, From, State) ->
                         Pid)}),
                     {reply, {ok, Worker#connection.worker}, State}
             end;
-        {error, invalid_uri} ->
+        {{error, invalid_uri}, _} ->
+            {reply, {error, invalid_uri}, State}
+        {_, {error, invalid_uri}} ->
             {reply, {error, invalid_uri}, State}
     end;
 
-handle_call({create, URL, Worker}, From, State) ->
+handle_call({create, Url, ProxyUrl, Worker}, From, State) ->
     {Pid, _Ref} = From,
-    case ibrowse_lib:parse_url(URL) of
-        #url{host=Host, port=Port} ->
+    case {ibrowse_lib:parse_url(Url), ibrowse_lib:parse_url(ProxyUrl)} of
+        {#url{host=Host, port=Port}, #url{host=ProxyHost, port=ProxyPort}} ->
             link(Worker),
             couch_stats:increment_counter([couch_replicator, connection,
                 creates]),
             true = ets:insert_new(
                 ?MODULE,
-                #connection{host=Host, port=Port, worker=Worker,
+                #connection{
+		    host=Host, port=Port,
+		    proxy_host=ProxyHost, proxy_port=ProxyPort,
+		    worker=Worker,
                     mref=monitor(process, Pid)}
             ),
             {reply, ok, State}
diff --git a/src/couch_replicator/src/couch_replicator_httpc.erl b/src/couch_replicator/src/couch_replicator_httpc.erl
index e4cf116..4dce319 100644
--- a/src/couch_replicator/src/couch_replicator_httpc.erl
+++ b/src/couch_replicator/src/couch_replicator_httpc.erl
@@ -45,13 +45,9 @@ setup(Db) ->
         httpc_pool = nil,
         url = Url,
         http_connections = MaxConns,
-        proxy_url = ProxyURL
+        proxy_url = ProxyUrl
     } = Db,
-    HttpcURL = case ProxyURL of
-        undefined -> Url;
-        _ when is_list(ProxyURL) -> ProxyURL
-    end,
-    {ok, Pid} = couch_replicator_httpc_pool:start_link(HttpcURL,
+    {ok, Pid} = couch_replicator_httpc_pool:start_link(Url, ProxyUrl,
         [{max_connections, MaxConns}]),
     case couch_replicator_auth:initialize(Db#httpdb{httpc_pool = Pid}) of
         {ok, Db1} ->
diff --git a/src/couch_replicator/src/couch_replicator_httpc_pool.erl b/src/couch_replicator/src/couch_replicator_httpc_pool.erl
index 33fb61f..377be50 100644
--- a/src/couch_replicator/src/couch_replicator_httpc_pool.erl
+++ b/src/couch_replicator/src/couch_replicator_httpc_pool.erl
@@ -15,7 +15,7 @@
 -vsn(1).
 
 % public API
--export([start_link/2, stop/1]).
+-export([start_link/3, stop/1]).
 -export([get_worker/1, release_worker/2, release_worker_sync/2]).
 
 % gen_server API
@@ -30,6 +30,7 @@
 
 -record(state, {
     url,
+    proxy_url,
     limit,                  % max # of workers allowed
     workers = [],
     waiting = queue:new(),  % blocked clients waiting for a worker
@@ -37,8 +38,8 @@
 }).
 
 
-start_link(Url, Options) ->
-    gen_server:start_link(?MODULE, {Url, Options}, []).
+start_link(Url, ProxyUrl, Options) ->
+    gen_server:start_link(?MODULE, {Url, ProxyUrl, Options}, []).
 
 stop(Pool) ->
     ok = gen_server:call(Pool, stop, infinity).
@@ -54,10 +55,11 @@ release_worker(Pool, Worker) ->
 release_worker_sync(Pool, Worker) ->
     ok = gen_server:call(Pool, {release_worker_sync, Worker}).
 
-init({Url, Options}) ->
+init({Url, ProxyUrl, Options}) ->
     process_flag(trap_exit, true),
     State = #state{
         url = Url,
+	proxy_url = ProxyUrl,
         limit = get_value(max_connections, Options)
     },
     {ok, State}.
@@ -68,6 +70,7 @@ handle_call(get_worker, From, State) ->
         waiting = Waiting,
         callers = Callers,
         url = Url,
+	proxy_url = ProxyUrl,
         limit = Limit,
         workers = Workers
     } = State,
@@ -77,7 +80,7 @@ handle_call(get_worker, From, State) ->
     false ->
         % If the call to acquire fails, the worker pool will crash with a
         % badmatch.
-        {ok, Worker} = couch_replicator_connection:acquire(Url),
+        {ok, Worker} = couch_replicator_connection:acquire(Url, ProxyUrl),
         NewState = State#state{
             workers = [Worker | Workers],
             callers = monitor_client(Callers, Worker, From)
@@ -97,6 +100,7 @@ handle_cast({release_worker, Worker}, State) ->
 handle_info({'EXIT', Pid, _Reason}, State) ->
     #state{
         url = Url,
+	proxy_url = ProxyUrl,
         workers = Workers,
         waiting = Waiting,
         callers = Callers
@@ -111,7 +115,7 @@ handle_info({'EXIT', Pid, _Reason}, State) ->
                     {noreply, State#state{workers = Workers2,
                         callers = NewCallers0}};
                 {{value, From}, Waiting2} ->
-                    {ok, Worker} = couch_replicator_connection:acquire(Url),
+                    {ok, Worker} = couch_replicator_connection:acquire(Url, ProxyUrl),
                     NewCallers1 = monitor_client(NewCallers0, Worker, From),
                     gen_server:reply(From, {ok, Worker}),
                     NewState = State#state{


[couchdb] 05/07: add handle_request/1

Posted by rn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch 1523-bye-bye-5986-rnewson
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 5262f84f9e49774201394925f42d54c0964e7f4e
Author: Joan Touzet <jo...@atypical.net>
AuthorDate: Mon Oct 14 16:44:54 2019 +0100

    add handle_request/1
---
 src/couch/src/couch_httpd.erl | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index 1085a5b..f71ff8d 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -37,6 +37,7 @@
 -export([validate_host/1]).
 -export([validate_bind_address/1]).
 -export([check_max_request_length/1]).
+-export([handle_request/1]).
 
 
 -define(HANDLER_NAME_IN_MODULE_POS, 6).
@@ -219,6 +220,11 @@ make_arity_3_fun(SpecStr) ->
 make_fun_spec_strs(SpecStr) ->
     re:split(SpecStr, "(?<=})\\s*,\\s*(?={)", [{return, list}]).
 
+handle_request(MochiReq) ->
+    DefaultFun = make_arity_1_fun("{couch_httpd_db, handle_request}"),
+    EmptyDict = dict:new(),
+    handle_request(MochiReq, DefaultFun, EmptyDict, EmptyDict, EmptyDict).
+
 handle_request(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers,
     DesignUrlHandlers) ->
     %% reset rewrite count for new request


[couchdb] 01/07: Move _node handler to new module

Posted by rn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch 1523-bye-bye-5986-rnewson
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 62134d87353cd6042468054a9ea4074d591b98dd
Author: Joan Touzet <jo...@atypical.net>
AuthorDate: Fri Oct 11 16:40:08 2019 +0100

    Move _node handler to new module
---
 src/chttpd/src/chttpd_httpd_handlers.erl |   2 +-
 src/chttpd/src/chttpd_misc.erl           | 127 --------------------------
 src/chttpd/src/chttpd_node.erl           | 149 +++++++++++++++++++++++++++++++
 3 files changed, 150 insertions(+), 128 deletions(-)

diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl b/src/chttpd/src/chttpd_httpd_handlers.erl
index 000f29b..5e86ea8 100644
--- a/src/chttpd/src/chttpd_httpd_handlers.erl
+++ b/src/chttpd/src/chttpd_httpd_handlers.erl
@@ -21,7 +21,7 @@ url_handler(<<"_all_dbs">>)        -> fun chttpd_misc:handle_all_dbs_req/1;
 url_handler(<<"_dbs_info">>)       -> fun chttpd_misc:handle_dbs_info_req/1;
 url_handler(<<"_active_tasks">>)   -> fun chttpd_misc:handle_task_status_req/1;
 url_handler(<<"_scheduler">>)      -> fun couch_replicator_httpd:handle_scheduler_req/1;
-url_handler(<<"_node">>)           -> fun chttpd_misc:handle_node_req/1;
+url_handler(<<"_node">>)           -> fun chttpd_node:handle_node_req/1;
 url_handler(<<"_reload_query_servers">>) -> fun chttpd_misc:handle_reload_query_servers_req/1;
 url_handler(<<"_replicate">>)      -> fun chttpd_misc:handle_replicate_req/1;
 url_handler(<<"_uuids">>)          -> fun chttpd_misc:handle_uuids_req/1;
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 17122bf..dd24712 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -15,7 +15,6 @@
 -export([
     handle_all_dbs_req/1,
     handle_dbs_info_req/1,
-    handle_node_req/1,
     handle_favicon_req/1,
     handle_favicon_req/2,
     handle_replicate_req/1,
@@ -275,132 +274,6 @@ handle_uuids_req(Req) ->
     couch_httpd_misc_handlers:handle_uuids_req(Req).
 
 
-% Node-specific request handler (_config and _stats)
-% Support _local meaning this node
-handle_node_req(#httpd{path_parts=[_, <<"_local">>]}=Req) ->
-    send_json(Req, 200, {[{name, node()}]});
-handle_node_req(#httpd{path_parts=[A, <<"_local">>|Rest]}=Req) ->
-    handle_node_req(Req#httpd{path_parts=[A, node()] ++ Rest});
-% GET /_node/$node/_config
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>]}=Req) ->
-    Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
-        case dict:is_key(Section, Acc) of
-        true ->
-            dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
-        false ->
-            dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
-        end
-    end, dict:new(), call_node(Node, config, all, [])),
-    KVs = dict:fold(fun(Section, Values, Acc) ->
-        [{list_to_binary(Section), {Values}} | Acc]
-    end, [], Grouped),
-    send_json(Req, 200, {KVs});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>]}=Req) ->
-    send_method_not_allowed(Req, "GET");
-% GET /_node/$node/_config/Section
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section]}=Req) ->
-    KVs = [{list_to_binary(Key), list_to_binary(Value)}
-            || {Key, Value} <- call_node(Node, config, get, [Section])],
-    send_json(Req, 200, {KVs});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section]}=Req) ->
-    send_method_not_allowed(Req, "GET");
-% PUT /_node/$node/_config/Section/Key
-% "value"
-handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
-    couch_util:check_config_blacklist(Section),
-    Value = couch_util:trim(chttpd:json_body(Req)),
-    Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
-    OldValue = call_node(Node, config, get, [Section, Key, ""]),
-    case call_node(Node, config, set, [Section, Key, ?b2l(Value), Persist]) of
-        ok ->
-            send_json(Req, 200, list_to_binary(OldValue));
-        {error, Reason} ->
-            chttpd:send_error(Req, {bad_request, Reason})
-    end;
-% GET /_node/$node/_config/Section/Key
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
-    case call_node(Node, config, get, [Section, Key, undefined]) of
-    undefined ->
-        throw({not_found, unknown_config_value});
-    Value ->
-        send_json(Req, 200, list_to_binary(Value))
-    end;
-% DELETE /_node/$node/_config/Section/Key
-handle_node_req(#httpd{method='DELETE',path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
-    couch_util:check_config_blacklist(Section),
-    Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
-    case call_node(Node, config, get, [Section, Key, undefined]) of
-    undefined ->
-        throw({not_found, unknown_config_value});
-    OldValue ->
-        case call_node(Node, config, delete, [Section, Key, Persist]) of
-            ok ->
-                send_json(Req, 200, list_to_binary(OldValue));
-            {error, Reason} ->
-                chttpd:send_error(Req, {bad_request, Reason})
-        end
-    end;
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key]}=Req) ->
-    send_method_not_allowed(Req, "GET,PUT,DELETE");
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key | _]}=Req) ->
-    chttpd:send_error(Req, not_found);
-% GET /_node/$node/_stats
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_stats">> | Path]}=Req) ->
-    flush(Node, Req),
-    Stats0 = call_node(Node, couch_stats, fetch, []),
-    Stats = couch_stats_httpd:transform_stats(Stats0),
-    Nested = couch_stats_httpd:nest(Stats),
-    EJSON0 = couch_stats_httpd:to_ejson(Nested),
-    EJSON1 = couch_stats_httpd:extract_path(Path, EJSON0),
-    chttpd:send_json(Req, EJSON1);
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_stats">>]}=Req) ->
-    send_method_not_allowed(Req, "GET");
-% GET /_node/$node/_system
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_system">>]}=Req) ->
-    Stats = call_node(Node, chttpd_misc, get_stats, []),
-    EJSON = couch_stats_httpd:to_ejson(Stats),
-    send_json(Req, EJSON);
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_system">>]}=Req) ->
-    send_method_not_allowed(Req, "GET");
-% POST /_node/$node/_restart
-handle_node_req(#httpd{method='POST', path_parts=[_, Node, <<"_restart">>]}=Req) ->
-    call_node(Node, init, restart, []),
-    send_json(Req, 200, {[{ok, true}]});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_restart">>]}=Req) ->
-    send_method_not_allowed(Req, "POST");
-handle_node_req(#httpd{path_parts=[_]}=Req) ->
-    chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
-handle_node_req(#httpd{path_parts=[_, _Node]}=Req) ->
-    chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
-handle_node_req(Req) ->
-    chttpd:send_error(Req, not_found).
-
-
-call_node(Node0, Mod, Fun, Args) when is_binary(Node0) ->
-    Node1 = try
-                list_to_existing_atom(?b2l(Node0))
-            catch
-                error:badarg ->
-                    throw({not_found, <<"no such node: ", Node0/binary>>})
-            end,
-    call_node(Node1, Mod, Fun, Args);
-call_node(Node, Mod, Fun, Args) when is_atom(Node) ->
-    case rpc:call(Node, Mod, Fun, Args) of
-        {badrpc, nodedown} ->
-            Reason = ?l2b(io_lib:format("~s is down", [Node])),
-            throw({error, {nodedown, Reason}});
-        Else ->
-            Else
-    end.
-
-flush(Node, Req) ->
-    case couch_util:get_value("flush", chttpd:qs(Req)) of
-        "true" ->
-            call_node(Node, couch_stats_aggregator, flush, []);
-        _Else ->
-            ok
-    end.
-
 % Note: this resource is exposed on the backdoor interface, but it's in chttpd
 % because it's not couch trunk
 handle_system_req(Req) ->
diff --git a/src/chttpd/src/chttpd_node.erl b/src/chttpd/src/chttpd_node.erl
new file mode 100644
index 0000000..0f0b7e9
--- /dev/null
+++ b/src/chttpd/src/chttpd_node.erl
@@ -0,0 +1,149 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License.  You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_node).
+
+-export([
+    handle_node_req/1
+]).
+
+-include_lib("couch/include/couch_db.hrl").
+
+-import(chttpd,
+    [send_json/2,send_json/3,send_method_not_allowed/2,
+    send_chunk/2,start_chunked_response/3]).
+
+% Node-specific request handler (_config and _stats)
+% Support _local meaning this node
+handle_node_req(#httpd{path_parts=[_, <<"_local">>]}=Req) ->
+    send_json(Req, 200, {[{name, node()}]});
+handle_node_req(#httpd{path_parts=[A, <<"_local">>|Rest]}=Req) ->
+    handle_node_req(Req#httpd{path_parts=[A, node()] ++ Rest});
+% GET /_node/$node/_config
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>]}=Req) ->
+    Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
+        case dict:is_key(Section, Acc) of
+        true ->
+            dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
+        false ->
+            dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
+        end
+    end, dict:new(), call_node(Node, config, all, [])),
+    KVs = dict:fold(fun(Section, Values, Acc) ->
+        [{list_to_binary(Section), {Values}} | Acc]
+    end, [], Grouped),
+    send_json(Req, 200, {KVs});
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>]}=Req) ->
+    send_method_not_allowed(Req, "GET");
+% GET /_node/$node/_config/Section
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section]}=Req) ->
+    KVs = [{list_to_binary(Key), list_to_binary(Value)}
+            || {Key, Value} <- call_node(Node, config, get, [Section])],
+    send_json(Req, 200, {KVs});
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section]}=Req) ->
+    send_method_not_allowed(Req, "GET");
+% PUT /_node/$node/_config/Section/Key
+% "value"
+handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+    couch_util:check_config_blacklist(Section),
+    Value = couch_util:trim(chttpd:json_body(Req)),
+    Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
+    OldValue = call_node(Node, config, get, [Section, Key, ""]),
+    case call_node(Node, config, set, [Section, Key, ?b2l(Value), Persist]) of
+        ok ->
+            send_json(Req, 200, list_to_binary(OldValue));
+        {error, Reason} ->
+            chttpd:send_error(Req, {bad_request, Reason})
+    end;
+% GET /_node/$node/_config/Section/Key
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+    case call_node(Node, config, get, [Section, Key, undefined]) of
+    undefined ->
+        throw({not_found, unknown_config_value});
+    Value ->
+        send_json(Req, 200, list_to_binary(Value))
+    end;
+% DELETE /_node/$node/_config/Section/Key
+handle_node_req(#httpd{method='DELETE',path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+    couch_util:check_config_blacklist(Section),
+    Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
+    case call_node(Node, config, get, [Section, Key, undefined]) of
+    undefined ->
+        throw({not_found, unknown_config_value});
+    OldValue ->
+        case call_node(Node, config, delete, [Section, Key, Persist]) of
+            ok ->
+                send_json(Req, 200, list_to_binary(OldValue));
+            {error, Reason} ->
+                chttpd:send_error(Req, {bad_request, Reason})
+        end
+    end;
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key]}=Req) ->
+    send_method_not_allowed(Req, "GET,PUT,DELETE");
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key | _]}=Req) ->
+    chttpd:send_error(Req, not_found);
+% GET /_node/$node/_stats
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_stats">> | Path]}=Req) ->
+    flush(Node, Req),
+    Stats0 = call_node(Node, couch_stats, fetch, []),
+    Stats = couch_stats_httpd:transform_stats(Stats0),
+    Nested = couch_stats_httpd:nest(Stats),
+    EJSON0 = couch_stats_httpd:to_ejson(Nested),
+    EJSON1 = couch_stats_httpd:extract_path(Path, EJSON0),
+    chttpd:send_json(Req, EJSON1);
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_stats">>]}=Req) ->
+    send_method_not_allowed(Req, "GET");
+% GET /_node/$node/_system
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_system">>]}=Req) ->
+    Stats = call_node(Node, chttpd_misc, get_stats, []),
+    EJSON = couch_stats_httpd:to_ejson(Stats),
+    send_json(Req, EJSON);
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_system">>]}=Req) ->
+    send_method_not_allowed(Req, "GET");
+% POST /_node/$node/_restart
+handle_node_req(#httpd{method='POST', path_parts=[_, Node, <<"_restart">>]}=Req) ->
+    call_node(Node, init, restart, []),
+    send_json(Req, 200, {[{ok, true}]});
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_restart">>]}=Req) ->
+    send_method_not_allowed(Req, "POST");
+handle_node_req(#httpd{path_parts=[_]}=Req) ->
+    chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
+handle_node_req(#httpd{path_parts=[_, _Node]}=Req) ->
+    chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
+handle_node_req(Req) ->
+    chttpd:send_error(Req, not_found).
+
+
+call_node(Node0, Mod, Fun, Args) when is_binary(Node0) ->
+    Node1 = try
+                list_to_existing_atom(?b2l(Node0))
+            catch
+                error:badarg ->
+                    throw({not_found, <<"no such node: ", Node0/binary>>})
+            end,
+    call_node(Node1, Mod, Fun, Args);
+call_node(Node, Mod, Fun, Args) when is_atom(Node) ->
+    case rpc:call(Node, Mod, Fun, Args) of
+        {badrpc, nodedown} ->
+            Reason = ?l2b(io_lib:format("~s is down", [Node])),
+            throw({error, {nodedown, Reason}});
+        Else ->
+            Else
+    end.
+
+flush(Node, Req) ->
+    case couch_util:get_value("flush", chttpd:qs(Req)) of
+        "true" ->
+            call_node(Node, couch_stats_aggregator, flush, []);
+        _Else ->
+            ok
+    end.


[couchdb] 03/07: Move get_stats/0

Posted by rn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch 1523-bye-bye-5986-rnewson
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit ea440c2d17bb6060ffd14bee7692ed986e9b8efb
Author: Joan Touzet <jo...@atypical.net>
AuthorDate: Fri Oct 11 18:07:10 2019 +0100

    Move get_stats/0
---
 src/chttpd/src/chttpd_misc.erl | 89 +----------------------------------------
 src/chttpd/src/chttpd_node.erl | 91 +++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 90 insertions(+), 90 deletions(-)

diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index a75f108..ffb5295 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -25,8 +25,7 @@
     handle_utils_dir_req/2,
     handle_uuids_req/1,
     handle_welcome_req/1,
-    handle_welcome_req/2,
-    get_stats/0
+    handle_welcome_req/2
 ]).
 
 -include_lib("couch/include/couch_db.hrl").
@@ -273,85 +272,6 @@ handle_uuids_req(Req) ->
     couch_httpd_misc_handlers:handle_uuids_req(Req).
 
 
-get_stats() ->
-    Other = erlang:memory(system) - lists:sum([X || {_,X} <-
-        erlang:memory([atom, code, binary, ets])]),
-    Memory = [{other, Other} | erlang:memory([atom, atom_used, processes,
-        processes_used, binary, code, ets])],
-    {NumberOfGCs, WordsReclaimed, _} = statistics(garbage_collection),
-    {{input, Input}, {output, Output}} = statistics(io),
-    {CF, CDU} = db_pid_stats(),
-    MessageQueues0 = [{couch_file, {CF}}, {couch_db_updater, {CDU}}],
-    MessageQueues = MessageQueues0 ++ message_queues(registered()),
-    [
-        {uptime, couch_app:uptime() div 1000},
-        {memory, {Memory}},
-        {run_queue, statistics(run_queue)},
-        {ets_table_count, length(ets:all())},
-        {context_switches, element(1, statistics(context_switches))},
-        {reductions, element(1, statistics(reductions))},
-        {garbage_collection_count, NumberOfGCs},
-        {words_reclaimed, WordsReclaimed},
-        {io_input, Input},
-        {io_output, Output},
-        {os_proc_count, couch_proc_manager:get_proc_count()},
-        {stale_proc_count, couch_proc_manager:get_stale_proc_count()},
-        {process_count, erlang:system_info(process_count)},
-        {process_limit, erlang:system_info(process_limit)},
-        {message_queues, {MessageQueues}},
-        {internal_replication_jobs, mem3_sync:get_backlog()},
-        {distribution, {get_distribution_stats()}}
-    ].
-
-db_pid_stats() ->
-    {monitors, M} = process_info(whereis(couch_stats_process_tracker), monitors),
-    Candidates = [Pid || {process, Pid} <- M],
-    CouchFiles = db_pid_stats(couch_file, Candidates),
-    CouchDbUpdaters = db_pid_stats(couch_db_updater, Candidates),
-    {CouchFiles, CouchDbUpdaters}.
-
-db_pid_stats(Mod, Candidates) ->
-    Mailboxes = lists:foldl(
-        fun(Pid, Acc) ->
-            case process_info(Pid, [message_queue_len, dictionary]) of
-                undefined ->
-                    Acc;
-                PI ->
-                    Dictionary = proplists:get_value(dictionary, PI, []),
-                    case proplists:get_value('$initial_call', Dictionary) of
-                        {Mod, init, 1} ->
-                            case proplists:get_value(message_queue_len, PI) of
-                                undefined -> Acc;
-                                Len -> [Len|Acc]
-                            end;
-                        _  ->
-                            Acc
-                    end
-            end
-        end, [], Candidates
-    ),
-    format_pid_stats(Mailboxes).
-
-format_pid_stats([]) ->
-    [];
-format_pid_stats(Mailboxes) ->
-    Sorted = lists:sort(Mailboxes),
-    Count = length(Sorted),
-    [
-        {count, Count},
-        {min, hd(Sorted)},
-        {max, lists:nth(Count, Sorted)},
-        {'50', lists:nth(round(Count * 0.5), Sorted)},
-        {'90', lists:nth(round(Count * 0.9), Sorted)},
-        {'99', lists:nth(round(Count * 0.99), Sorted)}
-    ].
-
-get_distribution_stats() ->
-    lists:map(fun({Node, Socket}) ->
-        {ok, Stats} = inet:getstat(Socket),
-        {Node, {Stats}}
-    end, erlang:system_info(dist_ctrl)).
-
 handle_up_req(#httpd{method='GET'} = Req) ->
     case config:get("couchdb", "maintenance_mode") of
     "true" ->
@@ -371,13 +291,6 @@ handle_up_req(#httpd{method='GET'} = Req) ->
 handle_up_req(Req) ->
     send_method_not_allowed(Req, "GET,HEAD").
 
-message_queues(Registered) ->
-    lists:map(fun(Name) ->
-        Type = message_queue_len,
-        {Type, Length} = process_info(whereis(Name), Type),
-        {Name, Length}
-    end, Registered).
-
 get_docroot() ->
     % if the env var isn’t set, let’s not throw an error, but
     % assume the current working dir is what we want
diff --git a/src/chttpd/src/chttpd_node.erl b/src/chttpd/src/chttpd_node.erl
index 0f0b7e9..6381a4e 100644
--- a/src/chttpd/src/chttpd_node.erl
+++ b/src/chttpd/src/chttpd_node.erl
@@ -13,7 +13,8 @@
 -module(chttpd_node).
 
 -export([
-    handle_node_req/1
+    handle_node_req/1,
+    get_stats/0
 ]).
 
 -include_lib("couch/include/couch_db.hrl").
@@ -104,7 +105,7 @@ handle_node_req(#httpd{path_parts=[_, _Node, <<"_stats">>]}=Req) ->
     send_method_not_allowed(Req, "GET");
 % GET /_node/$node/_system
 handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_system">>]}=Req) ->
-    Stats = call_node(Node, chttpd_misc, get_stats, []),
+    Stats = call_node(Node, chttpd_node, get_stats, []),
     EJSON = couch_stats_httpd:to_ejson(Stats),
     send_json(Req, EJSON);
 handle_node_req(#httpd{path_parts=[_, _Node, <<"_system">>]}=Req) ->
@@ -147,3 +148,89 @@ flush(Node, Req) ->
         _Else ->
             ok
     end.
+
+get_stats() ->
+    Other = erlang:memory(system) - lists:sum([X || {_,X} <-
+        erlang:memory([atom, code, binary, ets])]),
+    Memory = [{other, Other} | erlang:memory([atom, atom_used, processes,
+        processes_used, binary, code, ets])],
+    {NumberOfGCs, WordsReclaimed, _} = statistics(garbage_collection),
+    {{input, Input}, {output, Output}} = statistics(io),
+    {CF, CDU} = db_pid_stats(),
+    MessageQueues0 = [{couch_file, {CF}}, {couch_db_updater, {CDU}}],
+    MessageQueues = MessageQueues0 ++ message_queues(registered()),
+    [
+        {uptime, couch_app:uptime() div 1000},
+        {memory, {Memory}},
+        {run_queue, statistics(run_queue)},
+        {ets_table_count, length(ets:all())},
+        {context_switches, element(1, statistics(context_switches))},
+        {reductions, element(1, statistics(reductions))},
+        {garbage_collection_count, NumberOfGCs},
+        {words_reclaimed, WordsReclaimed},
+        {io_input, Input},
+        {io_output, Output},
+        {os_proc_count, couch_proc_manager:get_proc_count()},
+        {stale_proc_count, couch_proc_manager:get_stale_proc_count()},
+        {process_count, erlang:system_info(process_count)},
+        {process_limit, erlang:system_info(process_limit)},
+        {message_queues, {MessageQueues}},
+        {internal_replication_jobs, mem3_sync:get_backlog()},
+        {distribution, {get_distribution_stats()}}
+    ].
+
+db_pid_stats() ->
+    {monitors, M} = process_info(whereis(couch_stats_process_tracker), monitors),
+    Candidates = [Pid || {process, Pid} <- M],
+    CouchFiles = db_pid_stats(couch_file, Candidates),
+    CouchDbUpdaters = db_pid_stats(couch_db_updater, Candidates),
+    {CouchFiles, CouchDbUpdaters}.
+
+db_pid_stats(Mod, Candidates) ->
+    Mailboxes = lists:foldl(
+        fun(Pid, Acc) ->
+            case process_info(Pid, [message_queue_len, dictionary]) of
+                undefined ->
+                    Acc;
+                PI ->
+                    Dictionary = proplists:get_value(dictionary, PI, []),
+                    case proplists:get_value('$initial_call', Dictionary) of
+                        {Mod, init, 1} ->
+                            case proplists:get_value(message_queue_len, PI) of
+                                undefined -> Acc;
+                                Len -> [Len|Acc]
+                            end;
+                        _  ->
+                            Acc
+                    end
+            end
+        end, [], Candidates
+    ),
+    format_pid_stats(Mailboxes).
+
+format_pid_stats([]) ->
+    [];
+format_pid_stats(Mailboxes) ->
+    Sorted = lists:sort(Mailboxes),
+    Count = length(Sorted),
+    [
+        {count, Count},
+        {min, hd(Sorted)},
+        {max, lists:nth(Count, Sorted)},
+        {'50', lists:nth(round(Count * 0.5), Sorted)},
+        {'90', lists:nth(round(Count * 0.9), Sorted)},
+        {'99', lists:nth(round(Count * 0.99), Sorted)}
+    ].
+
+get_distribution_stats() ->
+    lists:map(fun({Node, Socket}) ->
+        {ok, Stats} = inet:getstat(Socket),
+        {Node, {Stats}}
+    end, erlang:system_info(dist_ctrl)).
+
+message_queues(Registered) ->
+    lists:map(fun(Name) ->
+        Type = message_queue_len,
+        {Type, Length} = process_info(whereis(Name), Type),
+        {Name, Length}
+    end, Registered).


[couchdb] 04/07: extract get_httpd_handlers function

Posted by rn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch 1523-bye-bye-5986-rnewson
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 8126e2f746f7ec7946aa94986df08f770d49adc1
Author: Joan Touzet <jo...@atypical.net>
AuthorDate: Mon Oct 14 16:42:26 2019 +0100

    extract get_httpd_handlers function
---
 src/couch/src/couch_httpd.erl | 58 ++++++++++++++++++++++---------------------
 1 file changed, 30 insertions(+), 28 deletions(-)

diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index 10b44d1..1085a5b 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -104,38 +104,14 @@ start_link(Name, Options) ->
                       Else -> Else
                   end,
     ok = validate_bind_address(BindAddress),
-    DefaultFun = make_arity_1_fun("{couch_httpd_db, handle_request}"),
-
-    {ok, HttpdGlobalHandlers} = application:get_env(httpd_global_handlers),
-
-    UrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
-        end, HttpdGlobalHandlers),
-
-    {ok, HttpdDbHandlers} = application:get_env(httpd_db_handlers),
-
-    DbUrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
-        end, HttpdDbHandlers),
-
-    {ok, HttpdDesignHandlers} = application:get_env(httpd_design_handlers),
 
-    DesignUrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_3_fun(SpecStr)}
-        end, HttpdDesignHandlers),
-
-    UrlHandlers = dict:from_list(UrlHandlersList),
-    DbUrlHandlers = dict:from_list(DbUrlHandlersList),
-    DesignUrlHandlers = dict:from_list(DesignUrlHandlersList),
     {ok, ServerOptions} = couch_util:parse_term(
         config:get("httpd", "server_options", "[]")),
     {ok, SocketOptions} = couch_util:parse_term(
         config:get("httpd", "socket_options", "[]")),
 
     set_auth_handlers(),
+    Handlers = get_httpd_handlers(),
 
     % ensure uuid is set so that concurrent replications
     % get the same value.
@@ -148,9 +124,7 @@ start_link(Name, Options) ->
         _ ->
             ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions)
         end,
-        apply(?MODULE, handle_request, [
-            Req, DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers
-        ])
+        apply(?MODULE, handle_request, [Req | Handlers])
     end,
 
     % set mochiweb options
@@ -187,6 +161,34 @@ set_auth_handlers() ->
 auth_handler_name(SpecStr) ->
     lists:nth(?HANDLER_NAME_IN_MODULE_POS, re:split(SpecStr, "[\\W_]", [])).
 
+get_httpd_handlers() ->
+    {ok, HttpdGlobalHandlers} = application:get_env(httpd_global_handlers),
+
+    UrlHandlersList = lists:map(
+        fun({UrlKey, SpecStr}) ->
+            {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
+        end, HttpdGlobalHandlers),
+
+    {ok, HttpdDbHandlers} = application:get_env(httpd_db_handlers),
+
+    DbUrlHandlersList = lists:map(
+        fun({UrlKey, SpecStr}) ->
+            {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
+        end, HttpdDbHandlers),
+
+    {ok, HttpdDesignHandlers} = application:get_env(httpd_design_handlers),
+
+    DesignUrlHandlersList = lists:map(
+        fun({UrlKey, SpecStr}) ->
+            {?l2b(UrlKey), make_arity_3_fun(SpecStr)}
+        end, HttpdDesignHandlers),
+
+    UrlHandlers = dict:from_list(UrlHandlersList),
+    DbUrlHandlers = dict:from_list(DbUrlHandlersList),
+    DesignUrlHandlers = dict:from_list(DesignUrlHandlersList),
+    DefaultFun = make_arity_1_fun("{couch_httpd_db, handle_request}"),
+    [DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers].
+
 % SpecStr is a string like "{my_module, my_fun}"
 %  or "{my_module, my_fun, <<"my_arg">>}"
 make_arity_1_fun(SpecStr) ->


[couchdb] 06/07: Add /_node//_all_dbs

Posted by rn...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rnewson pushed a commit to branch 1523-bye-bye-5986-rnewson
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 209cb61f2d50c319315f3907f850d2caefb6dda5
Author: Joan Touzet <jo...@atypical.net>
AuthorDate: Thu Oct 24 17:03:14 2019 +0100

    Add /_node//_all_dbs
---
 src/chttpd/src/chttpd_node.erl | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/src/chttpd/src/chttpd_node.erl b/src/chttpd/src/chttpd_node.erl
index 6381a4e..1715e56 100644
--- a/src/chttpd/src/chttpd_node.erl
+++ b/src/chttpd/src/chttpd_node.erl
@@ -116,6 +116,14 @@ handle_node_req(#httpd{method='POST', path_parts=[_, Node, <<"_restart">>]}=Req)
     send_json(Req, 200, {[{ok, true}]});
 handle_node_req(#httpd{path_parts=[_, _Node, <<"_restart">>]}=Req) ->
     send_method_not_allowed(Req, "POST");
+
+% GET /_node/$node/_all_dbs
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_all_dbs">>]}=Req) ->
+    {ok, DbNames} = call_node(Node, couch_server, all_databases, []),
+    send_json(Req, DbNames);
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_all_dbs">>]}=Req) ->
+    send_method_not_allowed(Req, "GET");
+
 handle_node_req(#httpd{path_parts=[_]}=Req) ->
     chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
 handle_node_req(#httpd{path_parts=[_, _Node]}=Req) ->