You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by ja...@apache.org on 2018/03/26 09:03:56 UTC

[couchdb] branch master updated (45da9f3 -> e7c48b3)

This is an automated email from the ASF dual-hosted git repository.

jan pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/couchdb.git.


    from 45da9f3  Validate password_scheme in user doc
     new 3d702d8  Revert "Revert "re-enable "flaky" test in quest to nail down #745""
     new e7c48b3  Improve 413 response handling

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 src/couch/src/couch_httpd.erl                      | 13 ++++++++++
 .../src/couch_replicator_httpc.erl                 | 20 ++++++++++------
 ...ch_replicator_small_max_request_size_target.erl | 28 +++++++++++-----------
 3 files changed, 40 insertions(+), 21 deletions(-)

-- 
To stop receiving notification emails like this one, please contact
jan@apache.org.

[couchdb] 02/02: Improve 413 response handling

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit e7c48b3d2db46e05f651567d611fb51f8ec7db1f
Author: Nick Vatamaniuc <va...@apache.org>
AuthorDate: Fri Mar 23 18:03:05 2018 -0400

    Improve 413 response handling
    
    Previously, when the server decided too much data was sent in the client's
    request, it would immediately send a 413 response and close the socket. In the
    meantime there could be unread data on the socket since the client keeps
    streaming data. When this happens the connection is reset instead of going
    through regular close sequence. The client, specifically the replicator client,
    detected the reset before it had a chance to process the 413 response. This
    lead to a retry, since it was interpreted as generic network error, instead of
    a proper 413 HTTP error.
    
    The improvement is to flush the receive socket before and after sending a 413
    response, then close the connection. This reduces the chance of the socket
    being closed with unread data, avoids a TCP reset, and gives the client a
    better chance of parsing the 413 response. This is mostly geared to work with
    the replicator client but should help other clients as well.
    
    Also the connection on both the server and the client sides is closed after a
    413 event. This avoids a few race conditions were it is not clear how much data
    is on the socket after the 413 is processed. On the server side, the `close`
    response header is set and socket is closed. On the client side, a flag is set
    such that right before the worker release back to the pool it is stopped, which
    closes the socket.
---
 src/couch/src/couch_httpd.erl                       | 13 +++++++++++++
 src/couch_replicator/src/couch_replicator_httpc.erl | 20 +++++++++++++-------
 2 files changed, 26 insertions(+), 7 deletions(-)

diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index 1694ac8..050282a 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -1170,6 +1170,19 @@ before_response(Req0, Code0, Headers0, Args0) ->
 
 respond_(#httpd{mochi_req = MochiReq}, Code, Headers, _Args, start_response) ->
     MochiReq:start_response({Code, Headers});
+respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) ->
+    % Special handling for the 413 response. Make sure the socket is closed as
+    % we don't know how much data was read before the error was thrown. Also
+    % drain all the data in the receive buffer to avoid connction being reset
+    % before the 413 response is parsed by the client. This is still racy, it
+    % just increases the chances of 413 being detected correctly by the client
+    % (rather than getting a brutal TCP reset).
+    erlang:put(mochiweb_request_force_close, true),
+    Socket = MochiReq:get(socket),
+    mochiweb_socket:recv(Socket, 0, 0),
+    Result = MochiReq:Type({413, Headers, Args}),
+    mochiweb_socket:recv(Socket, 0, 0),
+    Result;
 respond_(#httpd{mochi_req = MochiReq}, Code, Headers, Args, Type) ->
     MochiReq:Type({Code, Headers, Args}).
 
diff --git a/src/couch_replicator/src/couch_replicator_httpc.erl b/src/couch_replicator/src/couch_replicator_httpc.erl
index 6e78751..2f865c6 100644
--- a/src/couch_replicator/src/couch_replicator_httpc.erl
+++ b/src/couch_replicator/src/couch_replicator_httpc.erl
@@ -28,7 +28,7 @@
 -define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
 -define(MAX_WAIT, 5 * 60 * 1000).
 -define(STREAM_STATUS, ibrowse_stream_status).
-
+-define(STOP_HTTP_WORKER, stop_http_worker).
 
 % This limit is for the number of messages we're willing to discard
 % from an HTTP stream in clean_mailbox/1 before killing the worker
@@ -78,10 +78,14 @@ send_req(HttpDb, Params1, Callback) ->
         throw:{retry, NewHttpDb0, NewParams0} ->
             {retry, NewHttpDb0, NewParams0}
     after
-        ok = couch_replicator_httpc_pool:release_worker(
-            HttpDb#httpdb.httpc_pool,
-            Worker
-        ),
+        Pool = HttpDb#httpdb.httpc_pool,
+        case get(?STOP_HTTP_WORKER) of
+            stop ->
+                ok = stop_and_release_worker(Pool, Worker),
+                erase(?STOP_HTTP_WORKER);
+            undefined ->
+                ok = couch_replicator_httpc_pool:release_worker(Pool, Worker)
+        end,
         clean_mailbox(Response)
     end,
     % This is necessary to keep this tail-recursive. Calling
@@ -138,7 +142,7 @@ stop_and_release_worker(Pool, Worker) ->
     ok = couch_replicator_httpc_pool:release_worker_sync(Pool, Worker).
 
 process_response({error, sel_conn_closed}, Worker, HttpDb, Params, _Cb) ->
-    stop_and_release_worker(HttpDb#httpdb.httpc_pool, Worker),
+    put(?STOP_HTTP_WORKER, stop),
     maybe_retry(sel_conn_closed, Worker, HttpDb, Params);
 
 
@@ -147,7 +151,7 @@ process_response({error, sel_conn_closed}, Worker, HttpDb, Params, _Cb) ->
 %% and closes the socket, ibrowse will detect that error when it sends
 %% next request.
 process_response({error, connection_closing}, Worker, HttpDb, Params, _Cb) ->
-    stop_and_release_worker(HttpDb#httpdb.httpc_pool, Worker),
+    put(?STOP_HTTP_WORKER, stop),
     maybe_retry({error, connection_closing}, Worker, HttpDb, Params);
 
 process_response({ibrowse_req_id, ReqId}, Worker, HttpDb, Params, Callback) ->
@@ -167,6 +171,7 @@ process_response({ok, Code, Headers, Body}, Worker, HttpDb, Params, Callback) ->
             ?JSON_DECODE(Json)
         end,
         process_auth_response(HttpDb, Ok, Headers, Params),
+        if Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop); true -> ok end,
         Callback(Ok, Headers, EJson);
     R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
         backoff_success(HttpDb, Params),
@@ -194,6 +199,7 @@ process_stream_response(ReqId, Worker, HttpDb, Params, Callback) ->
                 stream_data_self(HttpDb1, Params, Worker, ReqId, Callback)
             end,
             put(?STREAM_STATUS, {streaming, Worker}),
+            if Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop); true -> ok end,
             ibrowse:stream_next(ReqId),
             try
                 Ret = Callback(Ok, Headers, StreamDataFun),

-- 
To stop receiving notification emails like this one, please contact
jan@apache.org.

[couchdb] 01/02: Revert "Revert "re-enable "flaky" test in quest to nail down #745""

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 3d702d8e85068d65272dc10b4fa7015bf66fedf5
Author: Nick Vatamaniuc <va...@apache.org>
AuthorDate: Tue Mar 13 15:18:41 2018 -0400

    Revert "Revert "re-enable "flaky" test in quest to nail down #745""
    
    This reverts commit ba624ea7c26984b8ea52ec5e20534dc5f8508d3a.
---
 ...ch_replicator_small_max_request_size_target.erl | 28 +++++++++++-----------
 1 file changed, 14 insertions(+), 14 deletions(-)

diff --git a/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl b/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl
index 6f3308c..af3a285 100644
--- a/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl
+++ b/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl
@@ -61,8 +61,8 @@ reduce_max_request_size_test_() ->
             % attachment which exceed maximum request size are simply
             % closed instead of returning a 413 request. That makes these
             % tests flaky.
-            % ++ [{Pair, fun should_replicate_one_with_attachment/2}
-            %  || Pair <- Pairs]
+            ++ [{Pair, fun should_replicate_one_with_attachment/2}
+             || Pair <- Pairs]
         }
     }.
 
@@ -90,12 +90,12 @@ should_replicate_one({From, To}, {_Ctx, {Source, Target}}) ->
 % POST-ing individual documents directly and skip bulk_docs. Test that case
 % separately
 % See note in main test function why this was disabled.
-% should_replicate_one_with_attachment({From, To}, {_Ctx, {Source, Target}}) ->
-%    {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
-%     {inorder, [should_populate_source_one_large_attachment(Source),
-%                should_populate_source(Source),
-%                should_replicate(Source, Target),
-%                should_compare_databases(Source, Target, [<<"doc0">>])]}}.
+should_replicate_one_with_attachment({From, To}, {_Ctx, {Source, Target}}) ->
+   {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+    {inorder, [should_populate_source_one_large_attachment(Source),
+               should_populate_source(Source),
+               should_replicate(Source, Target),
+               should_compare_databases(Source, Target, [<<"doc0">>])]}}.
 
 
 should_populate_source({remote, Source}) ->
@@ -112,11 +112,11 @@ should_populate_source_one_large_one_small(Source) ->
     {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_one_small(Source, 12000, 3000))}.
 
 
-% should_populate_source_one_large_attachment({remote, Source}) ->
-%    should_populate_source_one_large_attachment(Source);
+should_populate_source_one_large_attachment({remote, Source}) ->
+   should_populate_source_one_large_attachment(Source);
 
-% should_populate_source_one_large_attachment(Source) ->
-%   {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_attachment(Source, 70000, 70000))}.
+should_populate_source_one_large_attachment(Source) ->
+  {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_attachment(Source, 70000, 70000))}.
 
 
 should_replicate({remote, Source}, Target) ->
@@ -156,8 +156,8 @@ one_large_one_small(DbName, Large, Small) ->
     add_doc(DbName, <<"doc1">>, Small, 0).
 
 
-% one_large_attachment(DbName, Size, AttSize) ->
-%    add_doc(DbName, <<"doc0">>, Size, AttSize).
+one_large_attachment(DbName, Size, AttSize) ->
+   add_doc(DbName, <<"doc0">>, Size, AttSize).
 
 
 add_doc(DbName, DocId, Size, AttSize) when is_binary(DocId) ->

-- 
To stop receiving notification emails like this one, please contact
jan@apache.org.