You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by da...@apache.org on 2014/02/12 07:20:50 UTC

[01/33] Replacement of inets with ibrowse. Fixes COUCHDB-179 and enhances replication. Thanks Jason Davies and Adam Kocoloski for the fix, Maximillian Dornseif for reporting.

Updated Branches:
  refs/heads/import-master [created] 1167b0e3c


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/4d1d8294/ibrowse_lib.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lib.erl b/ibrowse_lib.erl
new file mode 100644
index 0000000..67c5eee
--- /dev/null
+++ b/ibrowse_lib.erl
@@ -0,0 +1,399 @@
+%%% File    : ibrowse_lib.erl
+%%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
+%%% Description : 
+%%% Created : 27 Feb 2004 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
+%% @doc Module with a few useful functions
+
+-module(ibrowse_lib).
+-vsn('$Id: ibrowse_lib.erl,v 1.6 2008/03/27 01:35:50 chandrusf Exp $ ').
+-author('chandru').
+-ifdef(debug).
+-compile(export_all).
+-endif.
+
+-include("ibrowse.hrl").
+
+-export([
+	 get_trace_status/2,
+	 do_trace/2,
+	 do_trace/3,
+	 url_encode/1,
+	 decode_rfc822_date/1,
+	 status_code/1,
+	 dec2hex/2,
+	 drv_ue/1,
+	 drv_ue/2,
+	 encode_base64/1,
+	 decode_base64/1,
+	 get_value/2,
+	 get_value/3,
+	 parse_url/1,
+	 printable_date/0
+	]).
+
+get_trace_status(Host, Port) ->
+    ibrowse:get_config_value({trace, Host, Port}, false).
+
+drv_ue(Str) ->
+    [{port, Port}| _] = ets:lookup(ibrowse_table, port),
+    drv_ue(Str, Port).
+drv_ue(Str, Port) ->
+    case erlang:port_control(Port, 1, Str) of
+	[] ->
+	    Str;
+	Res ->
+	    Res
+    end.
+
+%% @doc URL-encodes a string based on RFC 1738. Returns a flat list.
+%% @spec url_encode(Str) -> UrlEncodedStr
+%% Str = string()
+%% UrlEncodedStr = string()
+url_encode(Str) when list(Str) ->
+    url_encode_char(lists:reverse(Str), []).
+
+url_encode_char([X | T], Acc) when X >= $0, X =< $9 ->
+    url_encode_char(T, [X | Acc]);
+url_encode_char([X | T], Acc) when X >= $a, X =< $z ->
+    url_encode_char(T, [X | Acc]);
+url_encode_char([X | T], Acc) when X >= $A, X =< $Z ->
+    url_encode_char(T, [X | Acc]);
+url_encode_char([X | T], Acc) when X == $-; X == $_; X == $. ->
+    url_encode_char(T, [X | Acc]);
+url_encode_char([32 | T], Acc) ->
+    url_encode_char(T, [$+ | Acc]);
+url_encode_char([X | T], Acc) ->
+    url_encode_char(T, [$%, d2h(X bsr 4), d2h(X band 16#0f) | Acc]);
+url_encode_char([], Acc) ->
+    Acc.
+
+d2h(N) when N<10 -> N+$0;
+d2h(N) -> N+$a-10.
+
+decode_rfc822_date(String) when list(String) ->
+    case catch decode_rfc822_date_1(string:tokens(String, ", \t\r\n")) of
+	{'EXIT', _} ->
+	    {error, invalid_date};
+	Res ->
+	    Res
+    end.
+
+% TODO: Have to handle the Zone
+decode_rfc822_date_1([_,DayInt,Month,Year, Time,Zone]) ->
+    decode_rfc822_date_1([DayInt,Month,Year, Time,Zone]);
+decode_rfc822_date_1([Day,Month,Year, Time,_Zone]) ->
+    DayI = list_to_integer(Day),
+    MonthI = month_int(Month),
+    YearI = list_to_integer(Year),
+    TimeTup = case string:tokens(Time, ":") of
+		  [H,M] ->
+		      {list_to_integer(H),
+		       list_to_integer(M),
+		       0};
+		  [H,M,S] ->
+		      {list_to_integer(H),
+		       list_to_integer(M),
+		       list_to_integer(S)}
+	      end,
+    {{YearI,MonthI,DayI}, TimeTup}.
+
+month_int("Jan") -> 1;
+month_int("Feb") -> 2;
+month_int("Mar") -> 3;
+month_int("Apr") -> 4;
+month_int("May") -> 5;
+month_int("Jun") -> 6;
+month_int("Jul") -> 7;
+month_int("Aug") -> 8;
+month_int("Sep") -> 9;
+month_int("Oct") -> 10;
+month_int("Nov") -> 11;
+month_int("Dec") -> 12.
+
+%% @doc Given a status code, returns an atom describing the status code. 
+%% @spec status_code(StatusCode::status_code()) -> StatusDescription
+%% status_code() = string() | integer()
+%% StatusDescription = atom()
+status_code(100) -> continue;
+status_code(101) -> switching_protocols;
+status_code(102) -> processing;
+status_code(200) -> ok;
+status_code(201) -> created;
+status_code(202) -> accepted;
+status_code(203) -> non_authoritative_information;
+status_code(204) -> no_content;
+status_code(205) -> reset_content;
+status_code(206) -> partial_content;
+status_code(207) -> multi_status;
+status_code(300) -> multiple_choices;
+status_code(301) -> moved_permanently;
+status_code(302) -> found;
+status_code(303) -> see_other;
+status_code(304) -> not_modified;
+status_code(305) -> use_proxy;
+status_code(306) -> unused;
+status_code(307) -> temporary_redirect;
+status_code(400) -> bad_request;
+status_code(401) -> unauthorized;
+status_code(402) -> payment_required;
+status_code(403) -> forbidden;
+status_code(404) -> not_found;
+status_code(405) -> method_not_allowed;
+status_code(406) -> not_acceptable;
+status_code(407) -> proxy_authentication_required;
+status_code(408) -> request_timeout;
+status_code(409) -> conflict;
+status_code(410) -> gone;
+status_code(411) -> length_required;
+status_code(412) -> precondition_failed;
+status_code(413) -> request_entity_too_large;
+status_code(414) -> request_uri_too_long;
+status_code(415) -> unsupported_media_type;
+status_code(416) -> requested_range_not_satisfiable;
+status_code(417) -> expectation_failed;
+status_code(422) -> unprocessable_entity;
+status_code(423) -> locked;
+status_code(424) -> failed_dependency;
+status_code(500) -> internal_server_error;
+status_code(501) -> not_implemented;
+status_code(502) -> bad_gateway;
+status_code(503) -> service_unavailable;
+status_code(504) -> gateway_timeout;
+status_code(505) -> http_version_not_supported;
+status_code(507) -> insufficient_storage;
+status_code(X) when is_list(X) -> status_code(list_to_integer(X));
+status_code(_)   -> unknown_status_code.
+
+%% @doc dec2hex taken from gtk.erl in std dist
+%% M = integer() -- number of hex digits required
+%% N = integer() -- the number to represent as hex
+%% @spec dec2hex(M::integer(), N::integer()) -> string()
+dec2hex(M,N) -> dec2hex(M,N,[]).
+
+dec2hex(0,_N,Ack) -> Ack;
+dec2hex(M,N,Ack) -> dec2hex(M-1,N bsr 4,[d2h(N band 15)|Ack]).
+
+%% @doc Implements the base64 encoding algorithm. The output data type matches in the input data type.
+%% @spec encode_base64(In) -> Out
+%% In = string() | binary()
+%% Out = string() | binary()
+encode_base64(List) when list(List) ->
+    encode_base64_1(list_to_binary(List));
+encode_base64(Bin) when binary(Bin) ->
+    List = encode_base64_1(Bin),
+    list_to_binary(List).
+
+encode_base64_1(<<A:6, B:6, C:6, D:6, Rest/binary>>) ->
+    [int_to_b64(A), int_to_b64(B),
+     int_to_b64(C), int_to_b64(D) | encode_base64_1(Rest)];
+encode_base64_1(<<A:6, B:6, C:4>>) ->
+    [int_to_b64(A), int_to_b64(B), int_to_b64(C bsl 2), $=];
+encode_base64_1(<<A:6, B:2>>) ->
+    [int_to_b64(A), int_to_b64(B bsl 4), $=, $=];
+encode_base64_1(<<>>) ->
+    [].
+
+%% @doc Implements the base64 decoding algorithm. The output data type matches in the input data type.
+%% @spec decode_base64(In) -> Out | exit({error, invalid_input})
+%% In = string() | binary()
+%% Out = string() | binary()
+decode_base64(List) when list(List) ->
+    decode_base64_1(List, []);
+decode_base64(Bin) when binary(Bin) ->
+    List = decode_base64_1(binary_to_list(Bin), []),
+    list_to_binary(List).
+
+decode_base64_1([H | T], Acc) when ((H == $\t) or
+				    (H == 32) or
+				    (H == $\r) or
+				    (H == $\n)) ->
+    decode_base64_1(T, Acc);
+
+decode_base64_1([$=, $=], Acc) ->
+    lists:reverse(Acc);
+decode_base64_1([$=, _ | _], _Acc) ->
+    exit({error, invalid_input});
+
+decode_base64_1([A1, B1, $=, $=], Acc) ->
+    A = b64_to_int(A1),
+    B = b64_to_int(B1),
+    Oct1 = (A bsl 2) bor (B bsr 4),
+    decode_base64_1([], [Oct1 | Acc]);
+decode_base64_1([A1, B1, C1, $=], Acc) ->
+    A = b64_to_int(A1),
+    B = b64_to_int(B1),
+    C = b64_to_int(C1),
+    Oct1 = (A bsl 2) bor (B bsr 4),
+    Oct2 = ((B band 16#f) bsl 6) bor (C bsr 2),
+    decode_base64_1([], [Oct2, Oct1 | Acc]);
+decode_base64_1([A1, B1, C1, D1 | T], Acc) ->
+    A = b64_to_int(A1),
+    B = b64_to_int(B1),
+    C = b64_to_int(C1),
+    D = b64_to_int(D1),
+    Oct1 = (A bsl 2) bor (B bsr 4),
+    Oct2 = ((B band 16#f) bsl 4) bor (C bsr 2),
+    Oct3 = ((C band 2#11) bsl 6) bor D,
+    decode_base64_1(T, [Oct3, Oct2, Oct1 | Acc]);
+decode_base64_1([], Acc) ->
+    lists:reverse(Acc).
+
+%% Taken from httpd_util.erl
+int_to_b64(X) when X >= 0, X =< 25 -> X + $A;
+int_to_b64(X) when X >= 26, X =< 51 -> X - 26 + $a;
+int_to_b64(X) when X >= 52, X =< 61 -> X - 52 + $0;
+int_to_b64(62) -> $+;
+int_to_b64(63) -> $/.
+
+%% Taken from httpd_util.erl
+b64_to_int(X) when X >= $A, X =< $Z -> X - $A;
+b64_to_int(X) when X >= $a, X =< $z -> X - $a + 26;
+b64_to_int(X) when X >= $0, X =< $9 -> X - $0 + 52;
+b64_to_int($+) -> 62;
+b64_to_int($/) -> 63.
+
+get_value(Tag, TVL, DefVal) ->
+    case lists:keysearch(Tag, 1, TVL) of
+	false ->
+	    DefVal;
+	{value, {_, Val}} ->
+	    Val
+    end.
+
+get_value(Tag, TVL) ->
+    {value, {_, V}} = lists:keysearch(Tag,1,TVL),
+    V.
+
+parse_url(Url) ->
+    parse_url(Url, get_protocol, #url{abspath=Url}, []).
+
+parse_url([$:, $/, $/ | _], get_protocol, Url, []) ->
+    {invalid_uri_1, Url};
+parse_url([$:, $/, $/ | T], get_protocol, Url, TmpAcc) ->
+    Prot = list_to_atom(lists:reverse(TmpAcc)),
+    parse_url(T, get_username, 
+	      Url#url{protocol = Prot},
+	      []);
+parse_url([$/ | T], get_username, Url, TmpAcc) ->
+    %% No username/password. No  port number
+    Url#url{host = lists:reverse(TmpAcc),
+	    port = default_port(Url#url.protocol),
+	    path = [$/ | T]};
+parse_url([$: | T], get_username, Url, TmpAcc) ->
+    %% It is possible that no username/password has been
+    %% specified. But we'll continue with the assumption that there is
+    %% a username/password. If we encounter a '@' later on, there is a
+    %% username/password indeed. If we encounter a '/', it was
+    %% actually the hostname
+    parse_url(T, get_password, 
+	      Url#url{username = lists:reverse(TmpAcc)},
+	      []);
+parse_url([$@ | T], get_username, Url, TmpAcc) ->
+    parse_url(T, get_host, 
+	      Url#url{username = lists:reverse(TmpAcc),
+		      password = ""},
+	      []);
+parse_url([$@ | T], get_password, Url, TmpAcc) ->
+    parse_url(T, get_host, 
+	      Url#url{password = lists:reverse(TmpAcc)},
+	      []);
+parse_url([$/ | T], get_password, Url, TmpAcc) ->
+    %% Ok, what we thought was the username/password was the hostname
+    %% and portnumber
+    #url{username=User} = Url,
+    Port = list_to_integer(lists:reverse(TmpAcc)),
+    Url#url{host = User,
+	    port = Port,
+	    username = undefined,
+	    password = undefined,
+	    path = [$/ | T]};
+parse_url([$: | T], get_host, #url{} = Url, TmpAcc) ->
+    parse_url(T, get_port, 
+	      Url#url{host = lists:reverse(TmpAcc)},
+	      []);
+parse_url([$/ | T], get_host, #url{protocol=Prot} = Url, TmpAcc) ->
+    Url#url{host = lists:reverse(TmpAcc),
+	    port = default_port(Prot),
+	    path = [$/ | T]};
+parse_url([$/ | T], get_port, #url{protocol=Prot} = Url, TmpAcc) ->
+    Port = case TmpAcc of
+	       [] ->
+		   default_port(Prot);
+	       _ ->
+		   list_to_integer(lists:reverse(TmpAcc))
+	   end,
+    Url#url{port = Port, path = [$/ | T]};
+parse_url([H | T], State, Url, TmpAcc) ->
+    parse_url(T, State, Url, [H | TmpAcc]);
+parse_url([], get_host, Url, TmpAcc) when TmpAcc /= [] ->
+    Url#url{host = lists:reverse(TmpAcc),
+	    port = default_port(Url#url.protocol),
+	    path = "/"};
+parse_url([], get_username, Url, TmpAcc) when TmpAcc /= [] ->
+    Url#url{host = lists:reverse(TmpAcc),
+	    port = default_port(Url#url.protocol),
+	    path = "/"};
+parse_url([], get_port, #url{protocol=Prot} = Url, TmpAcc) ->
+    Port = case TmpAcc of
+	       [] ->
+		   default_port(Prot);
+	       _ ->
+		   list_to_integer(lists:reverse(TmpAcc))
+	   end,
+    Url#url{port = Port, 
+	    path = "/"};
+parse_url([], get_password, Url, TmpAcc) ->
+    %% Ok, what we thought was the username/password was the hostname
+    %% and portnumber
+    #url{username=User} = Url,
+    Port = case TmpAcc of
+	       [] ->
+		   default_port(Url#url.protocol);
+	       _ ->
+		   list_to_integer(lists:reverse(TmpAcc))
+	   end,
+    Url#url{host = User,
+	    port = Port,
+	    username = undefined,
+	    password = undefined,
+	    path = "/"};
+parse_url([], State, Url, TmpAcc) ->
+    {invalid_uri_2, State, Url, TmpAcc}.
+
+default_port(http)  -> 80;
+default_port(https) -> 443;
+default_port(ftp)   -> 21.
+
+printable_date() ->
+    {{Y,Mo,D},{H, M, S}} = calendar:local_time(),
+    {_,_,MicroSecs} = now(),
+    [integer_to_list(Y),
+     $-,
+     integer_to_list(Mo),
+     $-,
+     integer_to_list(D),
+     $_,
+     integer_to_list(H),
+     $:,
+     integer_to_list(M),
+     $:,
+     integer_to_list(S),
+     $:,
+     integer_to_list(MicroSecs div 1000)].
+
+do_trace(Fmt, Args) ->
+    do_trace(get(my_trace_flag), Fmt, Args).
+
+-ifdef(DEBUG).
+do_trace(_, Fmt, Args) ->
+    io:format("~s -- (~s) - "++Fmt,
+	      [printable_date(), 
+	       get(ibrowse_trace_token) | Args]).
+-else.
+do_trace(true, Fmt, Args) ->
+    io:format("~s -- (~s) - "++Fmt,
+	      [printable_date(), 
+	       get(ibrowse_trace_token) | Args]);
+do_trace(_, _, _) ->
+    ok.
+-endif.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/4d1d8294/ibrowse_sup.erl
----------------------------------------------------------------------
diff --git a/ibrowse_sup.erl b/ibrowse_sup.erl
new file mode 100644
index 0000000..300435d
--- /dev/null
+++ b/ibrowse_sup.erl
@@ -0,0 +1,65 @@
+%%%-------------------------------------------------------------------
+%%% File    : ibrowse_sup.erl
+%%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
+%%% Description : 
+%%%
+%%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+-module(ibrowse_sup).
+-vsn('$Id: ibrowse_sup.erl,v 1.1 2005/05/05 22:28:28 chandrusf Exp $ ').
+
+-behaviour(supervisor).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+%%--------------------------------------------------------------------
+-export([
+	 start_link/0
+        ]).
+
+%%--------------------------------------------------------------------
+%% Internal exports
+%%--------------------------------------------------------------------
+-export([
+	 init/1
+        ]).
+
+%%--------------------------------------------------------------------
+%% Macros
+%%--------------------------------------------------------------------
+-define(SERVER, ?MODULE).
+
+%%--------------------------------------------------------------------
+%% Records
+%%--------------------------------------------------------------------
+
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start_link/0
+%% Description: Starts the supervisor
+%%--------------------------------------------------------------------
+start_link() ->
+    supervisor:start_link({local, ?SERVER}, ?MODULE, []).
+
+%%====================================================================
+%% Server functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Func: init/1
+%% Returns: {ok,  {SupFlags,  [ChildSpec]}} |
+%%          ignore                          |
+%%          {error, Reason}   
+%%--------------------------------------------------------------------
+init([]) ->
+    AChild = {ibrowse,{ibrowse,start_link,[]},
+	      permanent,2000,worker,[ibrowse, ibrowse_http_client]},
+    {ok,{{one_for_all,10,1}, [AChild]}}.
+
+%%====================================================================
+%% Internal functions
+%%====================================================================

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/4d1d8294/ibrowse_test.erl
----------------------------------------------------------------------
diff --git a/ibrowse_test.erl b/ibrowse_test.erl
new file mode 100644
index 0000000..b4429c9
--- /dev/null
+++ b/ibrowse_test.erl
@@ -0,0 +1,226 @@
+%%% File    : ibrowse_test.erl
+%%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
+%%% Description : Test ibrowse
+%%% Created : 14 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
+
+-module(ibrowse_test).
+-vsn('$Id: ibrowse_test.erl,v 1.3 2008/05/21 15:28:11 chandrusf Exp $ ').
+-export([
+	 load_test/3,
+	 send_reqs_1/3,
+	 do_send_req/2,
+	 unit_tests/0,
+	 unit_tests/1,
+	 drv_ue_test/0,
+	 drv_ue_test/1,
+	 ue_test/0,
+	 ue_test/1
+	]).
+
+-import(ibrowse_lib, [printable_date/0]).
+
+%% Use ibrowse:set_max_sessions/3 and ibrowse:set_max_pipeline_size/3 to
+%% tweak settings before running the load test. The defaults are 10 and 10.
+load_test(Url, NumWorkers, NumReqsPerWorker) when is_list(Url),
+                                                  is_integer(NumWorkers),
+                                                  is_integer(NumReqsPerWorker),
+                                                  NumWorkers > 0,
+                                                  NumReqsPerWorker > 0 ->
+    proc_lib:spawn(?MODULE, send_reqs_1, [Url, NumWorkers, NumReqsPerWorker]).
+
+send_reqs_1(Url, NumWorkers, NumReqsPerWorker) ->
+    Start_time = now(),
+    ets:new(pid_table, [named_table, public]),
+    ets:new(ibrowse_test_results, [named_table, public]),
+    ets:new(ibrowse_errors, [named_table, public, ordered_set]),
+    init_results(),
+    process_flag(trap_exit, true),
+    log_msg("Starting spawning of workers...~n", []),
+    spawn_workers(Url, NumWorkers, NumReqsPerWorker),
+    log_msg("Finished spawning workers...~n", []),
+    do_wait(),
+    End_time = now(),
+    log_msg("All workers are done...~n", []),
+    log_msg("ibrowse_test_results table: ~n~p~n", [ets:tab2list(ibrowse_test_results)]),
+    log_msg("Start time: ~1000.p~n", [calendar:now_to_local_time(Start_time)]),
+    log_msg("End time  : ~1000.p~n", [calendar:now_to_local_time(End_time)]),
+    Elapsed_time_secs = trunc(timer:now_diff(End_time, Start_time) / 1000000),
+    log_msg("Elapsed   : ~p~n", [Elapsed_time_secs]),
+    log_msg("Reqs/sec  : ~p~n", [(NumWorkers*NumReqsPerWorker) / Elapsed_time_secs]),
+    dump_errors().
+
+init_results() ->
+    ets:insert(ibrowse_test_results, {crash, 0}),
+    ets:insert(ibrowse_test_results, {send_failed, 0}),
+    ets:insert(ibrowse_test_results, {other_error, 0}),
+    ets:insert(ibrowse_test_results, {success, 0}),
+    ets:insert(ibrowse_test_results, {retry_later, 0}),
+    ets:insert(ibrowse_test_results, {trid_mismatch, 0}),
+    ets:insert(ibrowse_test_results, {success_no_trid, 0}),
+    ets:insert(ibrowse_test_results, {failed, 0}),
+    ets:insert(ibrowse_test_results, {timeout, 0}),
+    ets:insert(ibrowse_test_results, {req_id, 0}).
+
+spawn_workers(_Url, 0, _) ->
+    ok;
+spawn_workers(Url, NumWorkers, NumReqsPerWorker) ->
+    Pid = proc_lib:spawn_link(?MODULE, do_send_req, [Url, NumReqsPerWorker]),
+    ets:insert(pid_table, {Pid, []}),
+    spawn_workers(Url, NumWorkers - 1, NumReqsPerWorker).
+
+do_wait() ->
+    receive
+	{'EXIT', _, normal} ->
+	    do_wait();
+	{'EXIT', Pid, Reason} ->
+	    ets:delete(pid_table, Pid),
+	    ets:insert(ibrowse_errors, {Pid, Reason}),
+	    ets:update_counter(ibrowse_test_results, crash, 1),
+	    do_wait();
+	Msg ->
+	    io:format("Recvd unknown message...~p~n", [Msg]),
+	    do_wait()
+    after 1000 ->
+	    case ets:info(pid_table, size) of
+		0 ->
+		    done;
+		_ ->
+		    do_wait()
+	    end
+    end.
+		     
+do_send_req(Url, NumReqs) ->
+    do_send_req_1(Url, NumReqs).
+
+do_send_req_1(_Url, 0) ->
+    ets:delete(pid_table, self());
+do_send_req_1(Url, NumReqs) ->
+    Counter = integer_to_list(ets:update_counter(ibrowse_test_results, req_id, 1)),
+    case ibrowse:send_req(Url, [{"ib_req_id", Counter}], get, [], [], 10000) of
+	{ok, _Status, Headers, _Body} ->
+	    case lists:keysearch("ib_req_id", 1, Headers) of
+		{value, {_, Counter}} ->
+		    ets:update_counter(ibrowse_test_results, success, 1);
+		{value, _} ->
+		    ets:update_counter(ibrowse_test_results, trid_mismatch, 1);
+		false ->
+		    ets:update_counter(ibrowse_test_results, success_no_trid, 1)
+	    end;
+	{error, req_timedout} ->
+	    ets:update_counter(ibrowse_test_results, timeout, 1);
+	{error, send_failed} ->
+	    ets:update_counter(ibrowse_test_results, send_failed, 1);
+	{error, retry_later} ->
+	    ets:update_counter(ibrowse_test_results, retry_later, 1);
+	Err ->
+	    ets:insert(ibrowse_errors, {now(), Err}),
+	    ets:update_counter(ibrowse_test_results, other_error, 1),
+	    ok
+    end,
+    do_send_req_1(Url, NumReqs-1).
+
+dump_errors() ->
+    case ets:info(ibrowse_errors, size) of
+	0 ->
+	    ok;
+	_ ->
+	    {A, B, C} = now(),
+	    Filename = lists:flatten(
+			 io_lib:format("ibrowse_errors_~p_~p_~p.txt" , [A, B, C])),
+	    case file:open(Filename, [write, delayed_write, raw]) of
+		{ok, Iod} ->
+		    dump_errors(ets:first(ibrowse_errors), Iod);
+		Err ->
+		    io:format("failed to create file ~s. Reason: ~p~n", [Filename, Err]),
+		    ok
+	    end
+    end.
+
+dump_errors('$end_of_table', Iod) ->
+    file:close(Iod);
+dump_errors(Key, Iod) ->
+    [{_, Term}] = ets:lookup(ibrowse_errors, Key),
+    file:write(Iod, io_lib:format("~p~n", [Term])),
+    dump_errors(ets:next(ibrowse_errors, Key), Iod).
+
+%%------------------------------------------------------------------------------
+%% Unit Tests
+%%------------------------------------------------------------------------------
+-define(TEST_LIST, [{"http://intranet/messenger", get},
+		    {"http://www.google.co.uk", get},
+		    {"http://www.google.com", get},
+		    {"http://www.google.com", options}, 
+		    {"http://www.sun.com", get},
+		    {"http://www.oracle.com", get},
+		    {"http://www.bbc.co.uk", get},
+		    {"http://www.bbc.co.uk", trace},
+		    {"http://www.bbc.co.uk", options},
+		    {"http://yaws.hyber.org", get},
+		    {"http://jigsaw.w3.org/HTTP/ChunkedScript", get},
+		    {"http://jigsaw.w3.org/HTTP/TE/foo.txt", get},
+		    {"http://jigsaw.w3.org/HTTP/TE/bar.txt", get},
+		    {"http://jigsaw.w3.org/HTTP/connection.html", get},
+		    {"http://jigsaw.w3.org/HTTP/cc.html", get},
+		    {"http://jigsaw.w3.org/HTTP/cc-private.html", get},
+		    {"http://jigsaw.w3.org/HTTP/cc-proxy-revalidate.html", get},
+		    {"http://jigsaw.w3.org/HTTP/cc-nocache.html", get},
+		    {"http://jigsaw.w3.org/HTTP/h-content-md5.html", get},
+		    {"http://jigsaw.w3.org/HTTP/h-retry-after.html", get},
+		    {"http://jigsaw.w3.org/HTTP/h-retry-after-date.html", get},
+		    {"http://jigsaw.w3.org/HTTP/neg", get},
+		    {"http://jigsaw.w3.org/HTTP/negbad", get},
+		    {"http://jigsaw.w3.org/HTTP/400/toolong/", get},
+		    {"http://jigsaw.w3.org/HTTP/300/", get},
+		    {"http://jigsaw.w3.org/HTTP/Basic/", get, [{basic_auth, {"guest", "guest"}}]},
+		    {"http://jigsaw.w3.org/HTTP/CL/", get}
+		   ]).
+
+unit_tests() ->
+    unit_tests([]).
+
+unit_tests(Options) ->
+    lists:foreach(fun({Url, Method}) ->
+			  execute_req(Url, Method, Options);
+		     ({Url, Method, X_Opts}) ->
+			  execute_req(Url, Method, X_Opts ++ Options)
+		  end, ?TEST_LIST).
+
+execute_req(Url, Method) ->
+    execute_req(Url, Method, []).
+
+execute_req(Url, Method, Options) ->
+    io:format("~s, ~p: ", [Url, Method]),
+    Result = (catch ibrowse:send_req(Url, [], Method, [], Options)),
+    case Result of 
+	{ok, SCode, _H, _B} ->
+	    io:format("Status code: ~p~n", [SCode]);
+	Err ->
+	    io:format("Err -> ~p~n", [Err])
+    end.
+
+drv_ue_test() ->
+    drv_ue_test(lists:duplicate(1024, 127)).
+drv_ue_test(Data) ->
+    [{port, Port}| _] = ets:lookup(ibrowse_table, port),
+%     erl_ddll:unload_driver("ibrowse_drv"),
+%     timer:sleep(1000),
+%     erl_ddll:load_driver("../priv", "ibrowse_drv"),
+%     Port = open_port({spawn, "ibrowse_drv"}, []),
+    {Time, Res} = timer:tc(ibrowse_lib, drv_ue, [Data, Port]),
+    io:format("Time -> ~p~n", [Time]),
+    io:format("Data Length -> ~p~n", [length(Data)]),
+    io:format("Res Length -> ~p~n", [length(Res)]).
+%    io:format("Result -> ~s~n", [Res]).
+
+ue_test() ->
+    ue_test(lists:duplicate(1024, $?)).
+ue_test(Data) ->
+    {Time, Res} = timer:tc(ibrowse_lib, url_encode, [Data]),
+    io:format("Time -> ~p~n", [Time]),
+    io:format("Data Length -> ~p~n", [length(Data)]),
+    io:format("Res Length -> ~p~n", [length(Res)]).
+%    io:format("Result -> ~s~n", [Res]).
+
+log_msg(Fmt, Args) ->
+    io:format("~s -- " ++ Fmt,
+	      [ibrowse_lib:printable_date() | Args]).


[18/33] Bumping ibrowse library to version 1.6.2 (latest). It has a few important bug fixes and new features, such as, for example:

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/b7fafdcc/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 65d9cb9..1633e5b 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -6,8 +6,6 @@
 %%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
 %%%-------------------------------------------------------------------
 -module(ibrowse_http_client).
--vsn('$Id: ibrowse_http_client.erl,v 1.19 2009/07/01 22:43:19 chandrusf Exp $ ').
-
 -behaviour(gen_server).
 %%--------------------------------------------------------------------
 %% Include files
@@ -16,11 +14,11 @@
 %%--------------------------------------------------------------------
 %% External exports
 -export([
-	 start_link/1,
-	 start/1,
-	 stop/1,
-	 send_req/7
-	]).
+         start_link/1,
+         start/1,
+         stop/1,
+         send_req/7
+        ]).
 
 -ifdef(debug).
 -compile(export_all).
@@ -28,41 +26,45 @@
 
 %% gen_server callbacks
 -export([
-	 init/1,
-	 handle_call/3,
-	 handle_cast/2,
-	 handle_info/2,
-	 terminate/2,
-	 code_change/3
-	]).
+         init/1,
+         handle_call/3,
+         handle_cast/2,
+         handle_info/2,
+         terminate/2,
+         code_change/3
+        ]).
 
 -include("ibrowse.hrl").
 
--record(state, {host, port,
-		use_proxy = false, proxy_auth_digest,
-		ssl_options = [], is_ssl = false, socket,
-		reqs=queue:new(), cur_req, status=idle, http_status_code,
-		reply_buffer = <<>>, rep_buf_size=0, streamed_size = 0,
-		recvd_headers=[],
-		is_closing, send_timer, content_length,
-		deleted_crlf = false, transfer_encoding,
-		chunk_size, chunk_size_buffer = <<>>, recvd_chunk_size,
-		lb_ets_tid, cur_pipeline_size = 0, prev_req_id
-	       }).
+-record(state, {host, port, connect_timeout,
+                use_proxy = false, proxy_auth_digest,
+                ssl_options = [], is_ssl = false, socket,
+                proxy_tunnel_setup = false,
+                tunnel_setup_queue = [],
+                reqs=queue:new(), cur_req, status=idle, http_status_code,
+                reply_buffer = <<>>, rep_buf_size=0, streamed_size = 0,
+                recvd_headers=[],
+                status_line, raw_headers, 
+                is_closing, send_timer, content_length,
+                deleted_crlf = false, transfer_encoding,
+                chunk_size, chunk_size_buffer = <<>>, recvd_chunk_size,
+                lb_ets_tid, cur_pipeline_size = 0, prev_req_id
+               }).
 
 -record(request, {url, method, options, from,
-		  stream_to, caller_controls_socket = false,
-		  req_id,
-		  stream_chunk_size,
-		  save_response_to_file = false,
-		  tmp_file_name, tmp_file_fd,
-		  response_format}).
+                  stream_to, caller_controls_socket = false, 
+                  caller_socket_options = [],
+                  req_id,
+                  stream_chunk_size,
+                  save_response_to_file = false, 
+                  tmp_file_name, tmp_file_fd,
+                  response_format}).
 
 -import(ibrowse_lib, [
-		      get_value/2,
-		      get_value/3,
-		      do_trace/2
-		     ]).
+                      get_value/2,
+                      get_value/3,
+                      do_trace/2
+                     ]).
 
 -define(DEFAULT_STREAM_CHUNK_SIZE, 1024*1024).
 
@@ -80,7 +82,8 @@ start_link(Args) ->
     gen_server:start_link(?MODULE, Args, []).
 
 stop(Conn_pid) ->
-    gen_server:call(Conn_pid, stop).
+    catch gen_server:call(Conn_pid, stop),
+    ok.
 
 send_req(Conn_Pid, Url, Headers, Method, Body, Options, Timeout) ->
     gen_server:call(
@@ -101,26 +104,23 @@ send_req(Conn_Pid, Url, Headers, Method, Body, Options, Timeout) ->
 %%--------------------------------------------------------------------
 init({Lb_Tid, #url{host = Host, port = Port}, {SSLOptions, Is_ssl}}) ->
     State = #state{host = Host,
-		   port = Port,
-		   ssl_options = SSLOptions,
-		   is_ssl = Is_ssl,
-		   lb_ets_tid = Lb_Tid},
+                   port = Port,
+                   ssl_options = SSLOptions,
+                   is_ssl = Is_ssl,
+                   lb_ets_tid = Lb_Tid},
     put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
     put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
     {ok, State};
+init(Url) when is_list(Url) ->
+    case catch ibrowse_lib:parse_url(Url) of
+        #url{protocol = Protocol} = Url_rec ->
+            init({undefined, Url_rec, {[], Protocol == https}});
+        {'EXIT', _} ->
+            {error, invalid_url}
+    end;
 init({Host, Port}) ->
     State = #state{host = Host,
-		   port = Port},
-    put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
-    put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
-    {ok, State};
-init(#url{host=Host, port=Port, protocol=Protocol}) ->
-    State = #state{
-        host = Host,
-        port = Port,
-        is_ssl = (Protocol == https),
-        ssl_options = [{ssl_imp, new}, {depth, 9}]
-    },
+                   port = Port},
     put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
     put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
     {ok, State}.
@@ -141,13 +141,13 @@ handle_call({send_req, _}, _From, #state{is_closing = true} = State) ->
     {reply, {error, connection_closing}, State};
 
 handle_call({send_req, {Url, Headers, Method, Body, Options, Timeout}},
-	    From, State) ->
+            From, State) ->
     send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State);
 
 handle_call(stop, _From, State) ->
     do_close(State),
     do_error_reply(State, closing_on_request),
-    {stop, normal, ok, State#state{socket=undefined}};
+    {stop, normal, ok, State};
 
 handle_call(Request, _From, State) ->
     Reply = {unknown_request, Request},
@@ -177,9 +177,8 @@ handle_info({ssl, _Sock, Data}, State) ->
     handle_sock_data(Data, State);
 
 handle_info({stream_next, Req_id}, #state{socket = Socket,
-					  is_ssl = Is_ssl,
-					  cur_req = #request{req_id = Req_id}} = State) ->
-    do_setopts(Socket, [{active, once}], Is_ssl),
+                                          cur_req = #request{req_id = Req_id}} = State) ->
+    do_setopts(Socket, [{active, once}], State),
     {noreply, State};
 
 handle_info({stream_next, _Req_id}, State) ->
@@ -204,13 +203,13 @@ handle_info({ssl_error, _Sock}, State) ->
     {stop, normal, State};
 
 handle_info({req_timedout, From}, State) ->
-    case lists:keysearch(From, #request.from, queue:to_list(State#state.reqs)) of
-	false ->
-	    {noreply, State};
-	{value, _} ->
-	    shutting_down(State),
-	    do_error_reply(State, req_timedout),
-	    {stop, normal, State}
+    case lists:keymember(From, #request.from, queue:to_list(State#state.reqs)) of
+        false ->
+            {noreply, State};
+        true ->
+            shutting_down(State),
+            do_error_reply(State, req_timedout),
+            {stop, normal, State}
     end;
 
 handle_info(timeout, State) ->
@@ -224,7 +223,7 @@ handle_info({trace, Bool}, State) ->
 
 handle_info(Info, State) ->
     io:format("Unknown message recvd for ~1000.p:~1000.p -> ~p~n",
-	      [State#state.host, State#state.port, Info]),
+              [State#state.host, State#state.port, Info]),
     io:format("Recvd unknown message ~p when in state: ~p~n", [Info, State]),
     {noreply, State}.
 
@@ -260,133 +259,132 @@ handle_sock_data(Data, #state{status=idle}=State) ->
 
 handle_sock_data(Data, #state{status = get_header}=State) ->
     case parse_response(Data, State) of
-	{error, _Reason} ->
-	    shutting_down(State),
-	    {stop, normal, State};
-	stop ->
-	    shutting_down(State),
-	    {stop, normal, State};
-	State_1 ->
-	    active_once(State_1),
-	    {noreply, State_1, get_inac_timeout(State_1)}
+        {error, _Reason} ->
+            shutting_down(State),
+            {stop, normal, State};
+        State_1 ->
+            active_once(State_1),
+            set_inac_timer(State_1),
+            {noreply, State_1}
     end;
 
 handle_sock_data(Data, #state{status           = get_body,
-			      content_length   = CL,
-			      http_status_code = StatCode,
-			      recvd_headers    = Headers,
-			      chunk_size       = CSz} = State) ->
+                              content_length   = CL,
+                              http_status_code = StatCode,
+                              recvd_headers    = Headers,
+                              chunk_size       = CSz} = State) ->
     case (CL == undefined) and (CSz == undefined) of
-	true ->
-	    case accumulate_response(Data, State) of
-		{error, Reason} ->
-		    shutting_down(State),
-		    fail_pipelined_requests(State,
-					    {error, {Reason, {stat_code, StatCode}, Headers}}),
-		    {stop, normal, State};
-		State_1 ->
-		    active_once(State_1),
-		    {noreply, State_1, get_inac_timeout(State_1)}
-	    end;
-	_ ->
-	    case parse_11_response(Data, State) of
-		{error, Reason} ->
-		    shutting_down(State),
-		    fail_pipelined_requests(State,
-					    {error, {Reason, {stat_code, StatCode}, Headers}}),
-		    {stop, normal, State};
-		stop ->
-		    shutting_down(State),
-		    {stop, normal, State};
-		State_1 ->
-		    active_once(State_1),
-		    {noreply, State_1, get_inac_timeout(State_1)}
-	    end
+        true ->
+            case accumulate_response(Data, State) of
+                {error, Reason} ->
+                    shutting_down(State),
+                    fail_pipelined_requests(State,
+                                            {error, {Reason, {stat_code, StatCode}, Headers}}),
+                    {stop, normal, State};
+                State_1 ->
+                    active_once(State_1),
+                    set_inac_timer(State_1),
+                    {noreply, State_1}
+            end;
+        _ ->
+            case parse_11_response(Data, State) of
+                {error, Reason} ->
+                    shutting_down(State),
+                    fail_pipelined_requests(State,
+                                            {error, {Reason, {stat_code, StatCode}, Headers}}),
+                    {stop, normal, State};
+                State_1 ->
+                    active_once(State_1),
+                    set_inac_timer(State_1),
+                    {noreply, State_1}
+            end
     end.
 
 accumulate_response(Data,
-		    #state{
-		      cur_req = #request{save_response_to_file = true,
-					 tmp_file_fd = undefined} = CurReq,
-		      http_status_code=[$2 | _]}=State) ->
-    TmpFilename = make_tmp_filename(),
+                    #state{
+                      cur_req = #request{save_response_to_file = Srtf,
+                                         tmp_file_fd = undefined} = CurReq,
+                      http_status_code=[$2 | _]}=State) when Srtf /= false ->
+    TmpFilename = make_tmp_filename(Srtf),
     case file:open(TmpFilename, [write, delayed_write, raw]) of
-	{ok, Fd} ->
-	    accumulate_response(Data, State#state{
-					cur_req = CurReq#request{
-						    tmp_file_fd = Fd,
-						    tmp_file_name = TmpFilename}});
-	{error, Reason} ->
-	    {error, {file_open_error, Reason}}
+        {ok, Fd} ->
+            accumulate_response(Data, State#state{
+                                        cur_req = CurReq#request{
+                                                    tmp_file_fd = Fd,
+                                                    tmp_file_name = TmpFilename}});
+        {error, Reason} ->
+            {error, {file_open_error, Reason}}
     end;
-accumulate_response(Data, #state{cur_req = #request{save_response_to_file = true,
-						    tmp_file_fd = Fd},
-				 transfer_encoding=chunked,
-				 reply_buffer = Reply_buf,
-				 http_status_code=[$2 | _]
-				} = State) ->
+accumulate_response(Data, #state{cur_req = #request{save_response_to_file = Srtf,
+                                                    tmp_file_fd = Fd},
+                                 transfer_encoding=chunked,
+                                 reply_buffer = Reply_buf,
+                                 http_status_code=[$2 | _]
+                                } = State) when Srtf /= false ->
     case file:write(Fd, [Reply_buf, Data]) of
-	ok ->
-	    State#state{reply_buffer = <<>>};
-	{error, Reason} ->
-	    {error, {file_write_error, Reason}}
+        ok ->
+            State#state{reply_buffer = <<>>};
+        {error, Reason} ->
+            {error, {file_write_error, Reason}}
     end;
-accumulate_response(Data, #state{cur_req = #request{save_response_to_file = true,
-						    tmp_file_fd = Fd},
-				 reply_buffer = RepBuf,
-				 http_status_code=[$2 | _]
-				} = State) ->
+accumulate_response(Data, #state{cur_req = #request{save_response_to_file = Srtf,
+                                                    tmp_file_fd = Fd},
+                                 reply_buffer = RepBuf,
+                                 http_status_code=[$2 | _]
+                                } = State) when Srtf /= false ->
     case file:write(Fd, [RepBuf, Data]) of
-	ok ->
-	    State#state{reply_buffer = <<>>};
-	{error, Reason} ->
-	    {error, {file_write_error, Reason}}
+        ok ->
+            State#state{reply_buffer = <<>>};
+        {error, Reason} ->
+            {error, {file_write_error, Reason}}
     end;
 accumulate_response(<<>>, State) ->
     State;
 accumulate_response(Data, #state{reply_buffer = RepBuf,
-				 rep_buf_size = RepBufSize,
-				 streamed_size = Streamed_size,
-				 cur_req = CurReq}=State) ->
+                                 rep_buf_size = RepBufSize,
+                                 streamed_size = Streamed_size,
+                                 cur_req = CurReq}=State) ->
     #request{stream_to=StreamTo, req_id=ReqId,
-	     stream_chunk_size = Stream_chunk_size,
-	     response_format = Response_format,
-	     caller_controls_socket = Caller_controls_socket} = CurReq,
+             stream_chunk_size = Stream_chunk_size,
+             response_format = Response_format,
+             caller_controls_socket = Caller_controls_socket} = CurReq,
     RepBuf_1 = list_to_binary([RepBuf, Data]),
     New_data_size = RepBufSize - Streamed_size,
     case StreamTo of
-	undefined ->
-	    State#state{reply_buffer = RepBuf_1};
-	_ when Caller_controls_socket == true ->
-	    do_interim_reply(StreamTo, Response_format, ReqId, RepBuf_1),
-	    State#state{reply_buffer = <<>>,
-			streamed_size = Streamed_size + size(RepBuf_1)};
-	_ when New_data_size >= Stream_chunk_size ->
-	    {Stream_chunk, Rem_data} = split_binary(RepBuf_1, Stream_chunk_size),
-	    do_interim_reply(StreamTo, Response_format, ReqId, Stream_chunk),
-	    accumulate_response(
-	      Rem_data,
-	      State#state{
-		reply_buffer = <<>>,
-		streamed_size = Streamed_size + Stream_chunk_size});
-	_ ->
-	    State#state{reply_buffer = RepBuf_1}
+        undefined ->
+            State#state{reply_buffer = RepBuf_1};
+        _ when Caller_controls_socket == true ->
+            do_interim_reply(StreamTo, Response_format, ReqId, RepBuf_1),
+            State#state{reply_buffer = <<>>, 
+                        streamed_size = Streamed_size + size(RepBuf_1)};
+        _ when New_data_size >= Stream_chunk_size ->
+            {Stream_chunk, Rem_data} = split_binary(RepBuf_1, Stream_chunk_size),
+            do_interim_reply(StreamTo, Response_format, ReqId, Stream_chunk),
+            accumulate_response(
+              Rem_data,
+              State#state{
+                reply_buffer = <<>>,
+                streamed_size = Streamed_size + Stream_chunk_size});
+        _ ->
+            State#state{reply_buffer = RepBuf_1}
     end.
 
-make_tmp_filename() ->
+make_tmp_filename(true) ->
     DownloadDir = ibrowse:get_config_value(download_dir, filename:absname("./")),
     {A,B,C} = now(),
     filename:join([DownloadDir,
-		   "ibrowse_tmp_file_"++
-		   integer_to_list(A) ++
-		   integer_to_list(B) ++
-		   integer_to_list(C)]).
+                   "ibrowse_tmp_file_"++
+                   integer_to_list(A) ++
+                   integer_to_list(B) ++
+                   integer_to_list(C)]);
+make_tmp_filename(File) when is_list(File) ->
+    File.
 
 
 %%--------------------------------------------------------------------
 %% Handles the case when the server closes the socket
 %%--------------------------------------------------------------------
-handle_sock_closed(#state{status=get_header}=State) ->
+handle_sock_closed(#state{status=get_header} = State) ->
     shutting_down(State),
     do_error_reply(State, connection_closed);
 
@@ -397,40 +395,73 @@ handle_sock_closed(#state{cur_req=undefined} = State) ->
 %% Connection-Close header and has closed the socket to indicate end
 %% of response. There maybe requests pipelined which need a response.
 handle_sock_closed(#state{reply_buffer = Buf, reqs = Reqs, http_status_code = SC,
-			  is_closing = IsClosing,
-			  cur_req = #request{tmp_file_name=TmpFilename,
-					     tmp_file_fd=Fd} = CurReq,
-			  status = get_body, recvd_headers = Headers}=State) ->
+                          is_closing = IsClosing,
+                          cur_req = #request{tmp_file_name=TmpFilename,
+                                             tmp_file_fd=Fd} = CurReq,
+                          status = get_body,
+                          recvd_headers = Headers,
+                          status_line = Status_line,
+                          raw_headers = Raw_headers
+                         }=State) ->
     #request{from=From, stream_to=StreamTo, req_id=ReqId,
-	     response_format = Resp_format} = CurReq,
+             response_format = Resp_format,
+             options = Options} = CurReq,
     case IsClosing of
-	true ->
-	    {_, Reqs_1} = queue:out(Reqs),
-	    case TmpFilename of
-		undefined ->
-		    do_reply(State, From, StreamTo, ReqId, Resp_format,
-			     {ok, SC, Headers, Buf});
-		_ ->
-		    file:close(Fd),
-		    do_reply(State, From, StreamTo, ReqId, Resp_format,
-			     {ok, SC, Headers, {file, TmpFilename}})
-	    end,
-	    do_error_reply(State#state{reqs = Reqs_1}, connection_closed),
-	    State;
-	_ ->
-	    do_error_reply(State, connection_closed),
-	    State
+        true ->
+            {_, Reqs_1} = queue:out(Reqs),
+            Body = case TmpFilename of
+                       undefined ->
+                           Buf;
+                       _ ->
+                           file:close(Fd),
+                           {file, TmpFilename}
+                   end,
+            Reply = case get_value(give_raw_headers, Options, false) of
+                          true ->
+                            {ok, Status_line, Raw_headers, Body};
+                        false ->
+                            {ok, SC, Headers, Buf}
+                    end,
+            do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+            do_error_reply(State#state{reqs = Reqs_1}, connection_closed),
+            State;
+        _ ->
+            do_error_reply(State, connection_closed),
+            State
     end.
 
-do_connect(Host, Port, _Options, #state{is_ssl=true, ssl_options=SSLOptions}, Timeout) ->
+do_connect(Host, Port, Options, #state{is_ssl      = true,
+                                       use_proxy   = false,
+                                       ssl_options = SSLOptions},
+           Timeout) ->
+    Caller_socket_options = get_value(socket_options, Options, []),
+    Other_sock_options = filter_sock_options(SSLOptions ++ Caller_socket_options),
     ssl:connect(Host, Port,
-		[binary, {nodelay, true}, {active, false} | SSLOptions],
-		Timeout);
-do_connect(Host, Port, _Options, _State, Timeout) ->
-    gen_tcp:connect(Host, Port,
-		    [binary, {nodelay, true}, {active, false}],
-		    Timeout).
-
+                [binary, {nodelay, true}, {active, false} | Other_sock_options],
+                Timeout);
+do_connect(Host, Port, Options, _State, Timeout) ->
+    Caller_socket_options = get_value(socket_options, Options, []),
+    Other_sock_options = filter_sock_options(Caller_socket_options),
+    gen_tcp:connect(Host, to_integer(Port),
+                    [binary, {nodelay, true}, {active, false} | Other_sock_options],
+                    Timeout).
+
+%% We don't want the caller to specify certain options
+filter_sock_options(Opts) ->
+    lists:filter(fun({active, _}) ->
+                         false;
+                    ({packet, _}) ->
+                         false;
+                    (list) ->
+                         false;
+                    (_) ->
+                         true
+                 end, Opts).
+
+do_send(Req, #state{socket = Sock,
+                    is_ssl = true,
+                    use_proxy = true,
+                    proxy_tunnel_setup = Pts}) when Pts /= done ->  gen_tcp:send(Sock, Req);
 do_send(Req, #state{socket = Sock, is_ssl = true})  ->  ssl:send(Sock, Req);
 do_send(Req, #state{socket = Sock, is_ssl = false}) ->  gen_tcp:send(Sock, Req).
 
@@ -450,261 +481,328 @@ do_send_body(Body, State) ->
 
 do_send_body1(Source, Resp, State) ->
     case Resp of
-	{ok, Data} ->
-	    do_send(Data, State),
-	    do_send_body({Source}, State);
-	{ok, Data, New_source_state} ->
-	    do_send(Data, State),
-	    do_send_body({Source, New_source_state}, State);
-	eof ->
-	    ok;
-	Err ->
-	    Err
+        {ok, Data} ->
+            do_send(Data, State),
+            do_send_body({Source}, State);
+        {ok, Data, New_source_state} ->
+            do_send(Data, State),
+            do_send_body({Source, New_source_state}, State);
+        eof ->
+            ok;
+        Err ->
+            Err
     end.
 
 do_close(#state{socket = undefined})            ->  ok;
+do_close(#state{socket = Sock,
+                is_ssl = true,
+                use_proxy = true,
+                proxy_tunnel_setup = Pts
+               }) when Pts /= done ->  gen_tcp:close(Sock);
 do_close(#state{socket = Sock, is_ssl = true})  ->  ssl:close(Sock);
 do_close(#state{socket = Sock, is_ssl = false}) ->  gen_tcp:close(Sock).
 
 active_once(#state{cur_req = #request{caller_controls_socket = true}}) ->
     ok;
-active_once(#state{socket = Socket, is_ssl = Is_ssl}) ->
-    do_setopts(Socket, [{active, once}], Is_ssl).
+active_once(#state{socket = Socket} = State) ->
+    do_setopts(Socket, [{active, once}], State).
 
-do_setopts(Sock, Opts, true)  ->  ssl:setopts(Sock, Opts);
-do_setopts(Sock, Opts, false) ->  inet:setopts(Sock, Opts).
+do_setopts(_Sock, [],   _)    ->  ok;
+do_setopts(Sock, Opts, #state{is_ssl = true,
+                              use_proxy = true,
+                              proxy_tunnel_setup = Pts}
+                             ) when Pts /= done ->  inet:setopts(Sock, Opts);
+do_setopts(Sock, Opts, #state{is_ssl = true}) -> ssl:setopts(Sock, Opts);
+do_setopts(Sock, Opts, _) ->  inet:setopts(Sock, Opts).
 
 check_ssl_options(Options, State) ->
     case get_value(is_ssl, Options, false) of
-	false ->
-	    State;
-	true ->
-	    State#state{is_ssl=true, ssl_options=get_value(ssl_options, Options)}
+        false ->
+            State;
+        true ->
+            State#state{is_ssl=true, ssl_options=get_value(ssl_options, Options)}
     end.
 
 send_req_1(From,
-	   #url{host = Host,
-		port = Port} = Url,
-	   Headers, Method, Body, Options, Timeout,
-	   #state{socket = undefined} = State) ->
+           #url{host = Host,
+                port = Port} = Url,
+           Headers, Method, Body, Options, Timeout,
+           #state{socket = undefined} = State) ->
     {Host_1, Port_1, State_1} =
-	case get_value(proxy_host, Options, false) of
-	    false ->
-		{Host, Port, State};
-	    PHost ->
-		ProxyUser     = get_value(proxy_user, Options, []),
-		ProxyPassword = get_value(proxy_password, Options, []),
-		Digest        = http_auth_digest(ProxyUser, ProxyPassword),
-		{PHost, get_value(proxy_port, Options, 80),
-		 State#state{use_proxy = true,
-			     proxy_auth_digest = Digest}}
-	end,
+        case get_value(proxy_host, Options, false) of
+            false ->
+                {Host, Port, State};
+            PHost ->
+                ProxyUser     = get_value(proxy_user, Options, []),
+                ProxyPassword = get_value(proxy_password, Options, []),
+                Digest        = http_auth_digest(ProxyUser, ProxyPassword),
+                {PHost, get_value(proxy_port, Options, 80),
+                 State#state{use_proxy = true,
+                             proxy_auth_digest = Digest}}
+        end,
     State_2 = check_ssl_options(Options, State_1),
     do_trace("Connecting...~n", []),
     Start_ts = now(),
     Conn_timeout = get_value(connect_timeout, Options, Timeout),
     case do_connect(Host_1, Port_1, Options, State_2, Conn_timeout) of
-	{ok, Sock} ->
-	    do_trace("Connected!~n", []),
-	    End_ts = now(),
-	    Timeout_1 = case Timeout of
-			    infinity ->
-				infinity;
-			    _ ->
-				Timeout - trunc(round(timer:now_diff(End_ts, Start_ts) / 1000))
-			end,
-	    State_3 = State_2#state{socket = Sock},
-	    send_req_1(From, Url, Headers, Method, Body, Options, Timeout_1, State_3);
-	Err ->
-	    shutting_down(State_2),
-	    do_trace("Error connecting. Reason: ~1000.p~n", [Err]),
-	    gen_server:reply(From, {error, conn_failed}),
-	    {stop, normal, State_2}
+        {ok, Sock} ->
+            do_trace("Connected!~n", []),
+            End_ts = now(),
+            Timeout_1 = case Timeout of
+                            infinity ->
+                                infinity;
+                            _ ->
+                                Timeout - trunc(round(timer:now_diff(End_ts, Start_ts) / 1000))
+                        end,
+            State_3 = State_2#state{socket = Sock,
+                                    connect_timeout = Conn_timeout},
+            send_req_1(From, Url, Headers, Method, Body, Options, Timeout_1, State_3);
+        Err ->
+            shutting_down(State_2),
+            do_trace("Error connecting. Reason: ~1000.p~n", [Err]),
+            gen_server:reply(From, {error, conn_failed}),
+            {stop, normal, State_2}
     end;
+
+%% Send a CONNECT request.
+%% Wait for 200 OK
+%% Upgrade to SSL connection
+%% Then send request
+
 send_req_1(From,
-	   #url{abspath = AbsPath,
-		host    = Host,
-		port    = Port,
-		path    = RelPath} = Url,
-	   Headers, Method, Body, Options, Timeout,
-	   #state{status = Status} = State) ->
+           #url{
+                host    = Server_host,
+                port    = Server_port
+                } = Url,
+           Headers, Method, Body, Options, Timeout,
+           #state{
+                  proxy_tunnel_setup = false,
+                  use_proxy = true,
+                  is_ssl    = true} = State) ->
+    NewReq = #request{
+      method                 = connect,
+      options                = Options
+     },
+    State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
+    Pxy_auth_headers = maybe_modify_headers(Url, Method, Options, [], State_1),
+    Path = [Server_host, $:, integer_to_list(Server_port)],
+    {Req, Body_1} = make_request(connect, Pxy_auth_headers,
+                                 Path, Path,
+                                 [], Options, State_1),
+    trace_request(Req),
+    case do_send(Req, State) of
+        ok ->
+            case do_send_body(Body_1, State_1) of
+                ok ->
+                    active_once(State_1),
+                    Ref = case Timeout of
+                              infinity ->
+                                  undefined;
+                              _ ->
+                                  erlang:send_after(Timeout, self(), {req_timedout, From})
+                          end,
+                    State_2 = State_1#state{status     = get_header,
+                                            cur_req    = NewReq,
+                                            send_timer = Ref,
+                                            proxy_tunnel_setup = in_progress,
+                                            tunnel_setup_queue = [{From, Url, Headers, Method, Body, Options, Timeout}]},
+                    set_inac_timer(State_1),
+                    {noreply, State_2};
+                Err ->
+                    shutting_down(State_1),
+                    do_trace("Send failed... Reason: ~p~n", [Err]),
+                    gen_server:reply(From, {error, send_failed}),
+                    {stop, normal, State_1}
+            end;
+        Err ->
+            shutting_down(State_1),
+            do_trace("Send failed... Reason: ~p~n", [Err]),
+            gen_server:reply(From, {error, send_failed}),
+            {stop, normal, State_1}
+    end;
+
+send_req_1(From, Url, Headers, Method, Body, Options, Timeout, 
+           #state{proxy_tunnel_setup = in_progress,
+                  tunnel_setup_queue = Q} = State) ->
+    do_trace("Queued SSL request awaiting tunnel setup: ~n"
+             "URL     : ~s~n"
+             "Method  : ~p~n"
+             "Headers : ~p~n", [Url, Method, Headers]),
+    {noreply, State#state{tunnel_setup_queue = [{From, Url, Headers, Method, Body, Options, Timeout} | Q]}};
+
+send_req_1(From,
+           #url{abspath = AbsPath,
+                path    = RelPath} = Url,
+           Headers, Method, Body, Options, Timeout,
+           #state{status    = Status,
+                  socket    = Socket,
+                  is_ssl    = Is_ssl} = State) ->
     ReqId = make_req_id(),
     Resp_format = get_value(response_format, Options, list),
+    Caller_socket_options = get_value(socket_options, Options, []),
     {StreamTo, Caller_controls_socket} =
-	case get_value(stream_to, Options, undefined) of
-	    {Caller, once} when is_pid(Caller) or
-				is_atom(Caller) ->
-		Async_pid_rec = {{req_id_pid, ReqId}, self()},
-		true = ets:insert(ibrowse_stream, Async_pid_rec),
-		{Caller, true};
-	    undefined ->
-		{undefined, false};
-	    Caller when is_pid(Caller) or
-			is_atom(Caller) ->
-		{Caller, false};
-	    Stream_to_inv ->
-		exit({invalid_option, {stream_to, Stream_to_inv}})
-	end,
+        case get_value(stream_to, Options, undefined) of
+            {Caller, once} when is_pid(Caller) or
+                                is_atom(Caller) ->
+                Async_pid_rec = {{req_id_pid, ReqId}, self()},
+                true = ets:insert(ibrowse_stream, Async_pid_rec), 
+                {Caller, true};
+            undefined ->
+                {undefined, false};
+            Caller when is_pid(Caller) or
+                        is_atom(Caller) ->
+                {Caller, false};
+            Stream_to_inv ->
+                exit({invalid_option, {stream_to, Stream_to_inv}})
+        end,
     SaveResponseToFile = get_value(save_response_to_file, Options, false),
     NewReq = #request{url                    = Url,
-		      method                 = Method,
-		      stream_to              = StreamTo,
-		      caller_controls_socket = Caller_controls_socket,
-		      options                = Options,
-		      req_id                 = ReqId,
-		      save_response_to_file  = SaveResponseToFile,
-		      stream_chunk_size      = get_stream_chunk_size(Options),
-		      response_format        = Resp_format,
-		      from                   = From},
+                      method                 = Method,
+                      stream_to              = StreamTo,
+                      caller_controls_socket = Caller_controls_socket,
+                      caller_socket_options  = Caller_socket_options,
+                      options                = Options,
+                      req_id                 = ReqId,
+                      save_response_to_file  = SaveResponseToFile,
+                      stream_chunk_size      = get_stream_chunk_size(Options),
+                      response_format        = Resp_format,
+                      from                   = From},
     State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
-    Headers_1 = add_auth_headers(Url, Options, Headers, State),
-    HostHeaderValue = case lists:keysearch(host_header, 1, Options) of
-			  false ->
-			      case Port of
-				  80 -> Host;
-				  _ -> [Host, ":", integer_to_list(Port)]
-			      end;
-			  {value, {_, Host_h_val}} ->
-			      Host_h_val
-		      end,
+    Headers_1 = maybe_modify_headers(Url, Method, Options, Headers, State_1),
     {Req, Body_1} = make_request(Method,
-				 [{"Host", HostHeaderValue} | Headers_1],
-				 AbsPath, RelPath, Body, Options, State#state.use_proxy),
-    case get(my_trace_flag) of
-	true ->
-	    %%Avoid the binary operations if trace is not on...
-	    NReq = binary_to_list(list_to_binary(Req)),
-	    do_trace("Sending request: ~n"
-		     "--- Request Begin ---~n~s~n"
-		     "--- Request End ---~n", [NReq]);
-	_ -> ok
-    end,
-    case do_send(Req, State) of
-	ok ->
-	    case do_send_body(Body_1, State) of
-		ok ->
-		    State_2 = inc_pipeline_counter(State_1),
-		    active_once(State_1),
-		    Ref = case Timeout of
-			      infinity ->
-				  undefined;
-			      _ ->
-				  erlang:send_after(Timeout, self(), {req_timedout, From})
-			  end,
-		    State_3 = case Status of
-				  idle ->
-				      State_2#state{status     = get_header,
-						    cur_req    = NewReq,
-						    send_timer = Ref};
-				  _ ->
-				      State_2#state{send_timer = Ref}
-			      end,
-		    case StreamTo of
-			undefined ->
-			    ok;
-			_ ->
-			    gen_server:reply(From, {ibrowse_req_id, ReqId})
-		    end,
-		    {noreply, State_3, get_inac_timeout(State_3)};
-		Err ->
-		    shutting_down(State_1),
-		    do_trace("Send failed... Reason: ~p~n", [Err]),
-		    gen_server:reply(From, {error, send_failed}),
-		    {stop, normal, State_1}
-	    end;
-	Err ->
-	    shutting_down(State_1),
-	    do_trace("Send failed... Reason: ~p~n", [Err]),
-	    gen_server:reply(From, {error, send_failed}),
-	    {stop, normal, State_1}
+                                 Headers_1,
+                                 AbsPath, RelPath, Body, Options, State_1),
+    trace_request(Req),
+    do_setopts(Socket, Caller_socket_options, Is_ssl),
+    case do_send(Req, State_1) of
+        ok ->
+            case do_send_body(Body_1, State_1) of
+                ok ->
+                    State_2 = inc_pipeline_counter(State_1),
+                    active_once(State_2),
+                    Ref = case Timeout of
+                              infinity ->
+                                  undefined;
+                              _ ->
+                                  erlang:send_after(Timeout, self(), {req_timedout, From})
+                          end,
+                    State_3 = case Status of
+                                  idle ->
+                                      State_2#state{status     = get_header,
+                                                    cur_req    = NewReq,
+                                                    send_timer = Ref};
+                                  _ ->
+                                      State_2#state{send_timer = Ref}
+                              end,
+                    case StreamTo of
+                        undefined ->
+                            ok;
+                        _ ->
+                            gen_server:reply(From, {ibrowse_req_id, ReqId})
+                    end,
+                    set_inac_timer(State_1),
+                    {noreply, State_3};
+                Err ->
+                    shutting_down(State_1),
+                    do_trace("Send failed... Reason: ~p~n", [Err]),
+                    gen_server:reply(From, {error, send_failed}),
+                    {stop, normal, State_1}
+            end;
+        Err ->
+            shutting_down(State_1),
+            do_trace("Send failed... Reason: ~p~n", [Err]),
+            gen_server:reply(From, {error, send_failed}),
+            {stop, normal, State_1}
+    end.
+
+maybe_modify_headers(#url{}, connect, _, Headers, State) ->
+    add_proxy_auth_headers(State, Headers);
+maybe_modify_headers(#url{host = Host, port = Port} = Url,
+                     _Method,
+                     Options, Headers, State) ->
+    case get_value(headers_as_is, Options, false) of
+        false ->
+            Headers_1 = add_auth_headers(Url, Options, Headers, State),
+            HostHeaderValue = case lists:keysearch(host_header, 1, Options) of
+                                  false ->
+                                      case Port of
+                                          80 -> Host;
+                                          _ -> [Host, ":", integer_to_list(Port)]
+                                      end;
+                                  {value, {_, Host_h_val}} ->
+                                      Host_h_val
+                              end,
+            [{"Host", HostHeaderValue} | Headers_1];
+        true ->
+            Headers
     end.
 
 add_auth_headers(#url{username = User,
-		      password = UPw},
-		 Options,
-		 Headers,
-		 #state{use_proxy = UseProxy,
-		        proxy_auth_digest = ProxyAuthDigest}) ->
+                      password = UPw},
+                 Options,
+                 Headers,
+                 State) ->
     Headers_1 = case User of
-		    undefined ->
-			case get_value(basic_auth, Options, undefined) of
-			    undefined ->
-				Headers;
-			    {U,P} ->
-				[{"Authorization", ["Basic ", http_auth_digest(U, P)]} | Headers]
-			end;
-		    _ ->
-			[{"Authorization", ["Basic ", http_auth_digest(User, UPw)]} | Headers]
-		end,
-    case UseProxy of
-	false ->
-	    Headers_1;
-	true when ProxyAuthDigest == [] ->
-	    Headers_1;
-	true ->
-	    [{"Proxy-Authorization", ["Basic ", ProxyAuthDigest]} | Headers_1]
-    end.
+                    undefined ->
+                        case get_value(basic_auth, Options, undefined) of
+                            undefined ->
+                                Headers;
+                            {U,P} ->
+                                [{"Authorization", ["Basic ", http_auth_digest(U, P)]} | Headers]
+                        end;
+                    _ ->
+                        [{"Authorization", ["Basic ", http_auth_digest(User, UPw)]} | Headers]
+                end,
+    add_proxy_auth_headers(State, Headers_1).
+
+add_proxy_auth_headers(#state{use_proxy = false}, Headers) ->
+    Headers;
+add_proxy_auth_headers(#state{proxy_auth_digest = []}, Headers) ->
+    Headers;
+add_proxy_auth_headers(#state{proxy_auth_digest = Auth_digest}, Headers) ->
+    [{"Proxy-Authorization", ["Basic ", Auth_digest]} | Headers].
 
 http_auth_digest([], []) ->
     [];
 http_auth_digest(Username, Password) ->
-    encode_base64(Username ++ [$: | Password]).
+    ibrowse_lib:encode_base64(Username ++ [$: | Password]).
 
-encode_base64([]) ->
-    [];
-encode_base64([A]) ->
-    [e(A bsr 2), e((A band 3) bsl 4), $=, $=];
-encode_base64([A,B]) ->
-    [e(A bsr 2), e(((A band 3) bsl 4) bor (B bsr 4)), e((B band 15) bsl 2), $=];
-encode_base64([A,B,C|Ls]) ->
-    encode_base64_do(A,B,C, Ls).
-encode_base64_do(A,B,C, Rest) ->
-    BB = (A bsl 16) bor (B bsl 8) bor C,
-    [e(BB bsr 18), e((BB bsr 12) band 63),
-     e((BB bsr 6) band 63), e(BB band 63)|encode_base64(Rest)].
-
-e(X) when X >= 0, X < 26 -> X+65;
-e(X) when X>25, X<52     -> X+71;
-e(X) when X>51, X<62     -> X-4;
-e(62)                    -> $+;
-e(63)                    -> $/;
-e(X)                     -> exit({bad_encode_base64_token, X}).
-
-make_request(Method, Headers, AbsPath, RelPath, Body, Options, UseProxy) ->
+make_request(Method, Headers, AbsPath, RelPath, Body, Options,
+             #state{use_proxy = UseProxy}) ->
     HttpVsn = http_vsn_string(get_value(http_vsn, Options, {1,1})),
     Headers_1 =
-	case get_value(content_length, Headers, false) of
-	    false when (Body == []) or
-	               (Body == <<>>) or
-	               is_tuple(Body) or
-	               is_function(Body) ->
-		Headers;
-	    false when is_binary(Body) ->
-		[{"content-length", integer_to_list(size(Body))} | Headers];
-	    false ->
-		[{"content-length", integer_to_list(length(Body))} | Headers];
-	    _ ->
-		Headers
-	end,
+        case get_value(content_length, Headers, false) of
+            false when (Body == []) or
+                       (Body == <<>>) or
+                       is_tuple(Body) or
+                       is_function(Body) ->
+                Headers;
+            false when is_binary(Body) ->
+                [{"content-length", integer_to_list(size(Body))} | Headers];
+            false ->
+                [{"content-length", integer_to_list(length(Body))} | Headers];
+            _ ->
+                Headers
+        end,
     {Headers_2, Body_1} =
-	case get_value(transfer_encoding, Options, false) of
-	    false ->
-		{Headers_1, Body};
-	    {chunked, ChunkSize} ->
-		{[{X, Y} || {X, Y} <- Headers_1,
-			    X /= "Content-Length",
-			    X /= "content-length",
-			    X /= content_length] ++
-		 [{"Transfer-Encoding", "chunked"}],
-		 chunk_request_body(Body, ChunkSize)}
-	end,
+        case get_value(transfer_encoding, Options, false) of
+            false ->
+                {Headers_1, Body};
+            {chunked, ChunkSize} ->
+                {[{X, Y} || {X, Y} <- Headers_1,
+                            X /= "Content-Length",
+                            X /= "content-length",
+                            X /= content_length] ++
+                 [{"Transfer-Encoding", "chunked"}],
+                 chunk_request_body(Body, ChunkSize)}
+        end,
     Headers_3 = cons_headers(Headers_2),
     Uri = case get_value(use_absolute_uri, Options, false) or UseProxy of
-	      true ->
-		  AbsPath;
-	      false ->
-		  RelPath
-	  end,
+              true ->
+                  AbsPath;
+              false ->
+                  RelPath
+          end,
     {[method(Method), " ", Uri, " ", HttpVsn, crnl(), Headers_3, crnl()], Body_1}.
 
 http_vsn_string({0,9}) -> "HTTP/0.9";
@@ -717,7 +815,7 @@ cons_headers([], Acc) ->
     encode_headers(Acc);
 cons_headers([{basic_auth, {U,P}} | T], Acc) ->
     cons_headers(T, [{"Authorization",
-		      ["Basic ", ibrowse_lib:encode_base64(U++":"++P)]} | Acc]);
+                      ["Basic ", ibrowse_lib:encode_base64(U++":"++P)]} | Acc]);
 cons_headers([{cookie, Cookie} | T], Acc) ->
     cons_headers(T, [{"Cookie", Cookie} | Acc]);
 cons_headers([{content_length, L} | T], Acc) ->
@@ -748,24 +846,23 @@ chunk_request_body(Body, ChunkSize, Acc) when is_binary(Body),
                                               size(Body) >= ChunkSize ->
     <<ChunkBody:ChunkSize/binary, Rest/binary>> = Body,
     Chunk = [ibrowse_lib:dec2hex(4, ChunkSize),"\r\n",
-	     ChunkBody, "\r\n"],
+             ChunkBody, "\r\n"],
     chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
 chunk_request_body(Body, _ChunkSize, Acc) when is_binary(Body) ->
     BodySize = size(Body),
     Chunk = [ibrowse_lib:dec2hex(4, BodySize),"\r\n",
-	     Body, "\r\n"],
+             Body, "\r\n"],
     LastChunk = "0\r\n",
     lists:reverse(["\r\n", LastChunk, Chunk | Acc]);
-chunk_request_body(Body, ChunkSize, Acc) when is_list(Body),
-                                              length(Body) >= ChunkSize ->
+chunk_request_body(Body, ChunkSize, Acc) when length(Body) >= ChunkSize ->
     {ChunkBody, Rest} = split_list_at(Body, ChunkSize),
     Chunk = [ibrowse_lib:dec2hex(4, ChunkSize),"\r\n",
-	     ChunkBody, "\r\n"],
+             ChunkBody, "\r\n"],
     chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
 chunk_request_body(Body, _ChunkSize, Acc) when is_list(Body) ->
     BodySize = length(Body),
     Chunk = [ibrowse_lib:dec2hex(4, BodySize),"\r\n",
-	     Body, "\r\n"],
+             Body, "\r\n"],
     LastChunk = "0\r\n",
     lists:reverse(["\r\n", LastChunk, Chunk | Acc]).
 
@@ -773,114 +870,172 @@ chunk_request_body(Body, _ChunkSize, Acc) when is_list(Body) ->
 parse_response(_Data, #state{cur_req = undefined}=State) ->
     State#state{status = idle};
 parse_response(Data, #state{reply_buffer = Acc, reqs = Reqs,
-			    cur_req = CurReq} = State) ->
+                            cur_req = CurReq} = State) ->
     #request{from=From, stream_to=StreamTo, req_id=ReqId,
-	     method=Method, response_format = Resp_format} = CurReq,
+             method=Method, response_format = Resp_format,
+             options = Options
+            } = CurReq,
     MaxHeaderSize = ibrowse:get_config_value(max_headers_size, infinity),
     case scan_header(Acc, Data) of
-	{yes, Headers, Data_1}  ->
-	    do_trace("Recvd Header Data -> ~s~n----~n", [Headers]),
-	    do_trace("Recvd headers~n--- Headers Begin ---~n~s~n--- Headers End ---~n~n", [Headers]),
-	    {HttpVsn, StatCode, Headers_1} = parse_headers(Headers),
-	    do_trace("HttpVsn: ~p StatusCode: ~p Headers_1 -> ~1000.p~n", [HttpVsn, StatCode, Headers_1]),
-	    LCHeaders = [{to_lower(X), Y} || {X,Y} <- Headers_1],
-	    ConnClose = to_lower(get_value("connection", LCHeaders, "false")),
-	    IsClosing = is_connection_closing(HttpVsn, ConnClose),
-	    case IsClosing of
-		true ->
+        {yes, Headers, Data_1}  ->
+            do_trace("Recvd Header Data -> ~s~n----~n", [Headers]),
+            do_trace("Recvd headers~n--- Headers Begin ---~n~s~n--- Headers End ---~n~n", [Headers]),
+            {HttpVsn, StatCode, Headers_1, Status_line, Raw_headers} = parse_headers(Headers),
+            do_trace("HttpVsn: ~p StatusCode: ~p Headers_1 -> ~1000.p~n", [HttpVsn, StatCode, Headers_1]),
+            LCHeaders = [{to_lower(X), Y} || {X,Y} <- Headers_1],
+            ConnClose = to_lower(get_value("connection", LCHeaders, "false")),
+            IsClosing = is_connection_closing(HttpVsn, ConnClose),
+            case IsClosing of
+                true ->
                     shutting_down(State);
-		false ->
-		    ok
-	    end,
-	    State_1 = State#state{recvd_headers=Headers_1, status=get_body,
-				  reply_buffer = <<>>,
-				  http_status_code=StatCode, is_closing=IsClosing},
-	    put(conn_close, ConnClose),
-	    TransferEncoding = to_lower(get_value("transfer-encoding", LCHeaders, "false")),
-	    case get_value("content-length", LCHeaders, undefined) of
-		_ when Method == head ->
-		    {_, Reqs_1} = queue:out(Reqs),
-		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
-		    State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
-					 {ok, StatCode, Headers_1, []}),
-		    cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
-		    State_2 = reset_state(State_1_1),
-		    State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
-		    parse_response(Data_1, State_3);
-		_ when hd(StatCode) == $1 ->
-		    %% No message body is expected. Server may send
-		    %% one or more 1XX responses before a proper
-		    %% response.
-		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
-		    do_trace("Recvd a status code of ~p. Ignoring and waiting for a proper response~n", [StatCode]),
-		    parse_response(Data_1, State_1#state{recvd_headers = [],
-							 status = get_header});
-		_ when StatCode == "204";
-		       StatCode == "304" ->
-		    %% No message body is expected for these Status Codes.
-		    %% RFC2616 - Sec 4.4
-		    {_, Reqs_1} = queue:out(Reqs),
-		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
-		    State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
-					 {ok, StatCode, Headers_1, []}),
-		    cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
-		    State_2 = reset_state(State_1_1),
-		    State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
-		    parse_response(Data_1, State_3);
-		_ when TransferEncoding == "chunked" ->
-		    do_trace("Chunked encoding detected...~n",[]),
-		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
-		    case parse_11_response(Data_1, State_1#state{transfer_encoding=chunked,
-								 chunk_size=chunk_start,
-								 reply_buffer = <<>>}) of
-			{error, Reason} ->
-			    fail_pipelined_requests(State_1,
-						    {error, {Reason,
-							     {stat_code, StatCode}, Headers_1}}),
-			    {error, Reason};
-			State_2 ->
-			    State_2
-		    end;
-		undefined when HttpVsn == "HTTP/1.0";
-			       ConnClose == "close" ->
-		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
-		    State_1#state{reply_buffer = Data_1};
-		undefined ->
-		    fail_pipelined_requests(State_1,
-					    {error, {content_length_undefined,
-						     {stat_code, StatCode}, Headers}}),
-		    {error, content_length_undefined};
-		V ->
-		    case catch list_to_integer(V) of
-			V_1 when is_integer(V_1), V_1 >= 0 ->
-			    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
-			    do_trace("Recvd Content-Length of ~p~n", [V_1]),
-			    State_2 = State_1#state{rep_buf_size=0,
-						    reply_buffer = <<>>,
-						    content_length=V_1},
-			    case parse_11_response(Data_1, State_2) of
-				{error, Reason} ->
-				    fail_pipelined_requests(State_1,
-							    {error, {Reason,
-								     {stat_code, StatCode}, Headers_1}}),
-				    {error, Reason};
-				State_3 ->
-				    State_3
-			    end;
-			_ ->
-			    fail_pipelined_requests(State_1,
-					    {error, {content_length_undefined,
-						     {stat_code, StatCode}, Headers}}),
-			    {error, content_length_undefined}
-		    end
-	    end;
-	{no, Acc_1} when MaxHeaderSize == infinity ->
-	    State#state{reply_buffer = Acc_1};
-	{no, Acc_1} when size(Acc_1) < MaxHeaderSize ->
-	    State#state{reply_buffer = Acc_1};
-	{no, _Acc_1} ->
-	    fail_pipelined_requests(State, {error, max_headers_size_exceeded}),
-	    {error, max_headers_size_exceeded}
+                false ->
+                    ok
+            end,
+            Give_raw_headers = get_value(give_raw_headers, Options, false),
+            State_1 = case Give_raw_headers of
+                          true ->
+                              State#state{recvd_headers=Headers_1, status=get_body,
+                                          reply_buffer = <<>>,
+                                          status_line = Status_line,
+                                          raw_headers = Raw_headers,
+                                          http_status_code=StatCode, is_closing=IsClosing};
+                          false ->
+                              State#state{recvd_headers=Headers_1, status=get_body,
+                                          reply_buffer = <<>>,
+                                          http_status_code=StatCode, is_closing=IsClosing}
+                      end,
+            put(conn_close, ConnClose),
+            TransferEncoding = to_lower(get_value("transfer-encoding", LCHeaders, "false")),
+            case get_value("content-length", LCHeaders, undefined) of
+                _ when Method == connect, 
+                       hd(StatCode) == $2 ->
+                    cancel_timer(State#state.send_timer),
+                    {_, Reqs_1} = queue:out(Reqs),
+                    upgrade_to_ssl(set_cur_request(State#state{reqs = Reqs_1,
+                                                               recvd_headers = [],
+                                                               status = idle
+                                                              }));
+                _ when Method == connect ->
+                    {_, Reqs_1} = queue:out(Reqs),
+                    do_error_reply(State#state{reqs = Reqs_1},
+                                   {error, proxy_tunnel_failed}),
+                    {error, proxy_tunnel_failed};
+                _ when Method == head ->
+                    {_, Reqs_1} = queue:out(Reqs),
+                    send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+                    State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
+                                         {ok, StatCode, Headers_1, []}),
+                    cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
+                    State_2 = reset_state(State_1_1),
+                    State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
+                    parse_response(Data_1, State_3);
+                _ when hd(StatCode) =:= $1 ->
+                    %% No message body is expected. Server may send
+                    %% one or more 1XX responses before a proper
+                    %% response.
+                    send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+                    do_trace("Recvd a status code of ~p. Ignoring and waiting for a proper response~n", [StatCode]),
+                    parse_response(Data_1, State_1#state{recvd_headers = [],
+                                                         status = get_header});
+                _ when StatCode =:= "204";
+                       StatCode =:= "304" ->
+                    %% No message body is expected for these Status Codes.
+                    %% RFC2616 - Sec 4.4
+                    {_, Reqs_1} = queue:out(Reqs),
+                    send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+                    State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
+                                         {ok, StatCode, Headers_1, []}),
+                    cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
+                    State_2 = reset_state(State_1_1),
+                    State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
+                    parse_response(Data_1, State_3);
+                _ when TransferEncoding =:= "chunked" ->
+                    do_trace("Chunked encoding detected...~n",[]),
+                    send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+                    case parse_11_response(Data_1, State_1#state{transfer_encoding=chunked,
+                                                                 chunk_size=chunk_start,
+                                                                 reply_buffer = <<>>}) of
+                        {error, Reason} ->
+                            fail_pipelined_requests(State_1,
+                                                    {error, {Reason,
+                                                             {stat_code, StatCode}, Headers_1}}),
+                            {error, Reason};
+                        State_2 ->
+                            State_2
+                    end;
+                undefined when HttpVsn =:= "HTTP/1.0";
+                ConnClose =:= "close" ->
+                    send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+                    State_1#state{reply_buffer = Data_1};
+                undefined ->
+                    fail_pipelined_requests(State_1,
+                                            {error, {content_length_undefined,
+                                                     {stat_code, StatCode}, Headers}}),
+                    {error, content_length_undefined};
+                V ->
+                    case catch list_to_integer(V) of
+                        V_1 when is_integer(V_1), V_1 >= 0 ->
+                            send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+                            do_trace("Recvd Content-Length of ~p~n", [V_1]),
+                            State_2 = State_1#state{rep_buf_size=0,
+                                                    reply_buffer = <<>>,
+                                                    content_length=V_1},
+                            case parse_11_response(Data_1, State_2) of
+                                {error, Reason} ->
+                                    fail_pipelined_requests(State_1,
+                                                            {error, {Reason,
+                                                                     {stat_code, StatCode}, Headers_1}}),
+                                    {error, Reason};
+                                State_3 ->
+                                    State_3
+                            end;
+                        _ ->
+                            fail_pipelined_requests(State_1,
+                                                    {error, {content_length_undefined,
+                                                             {stat_code, StatCode}, Headers}}),
+                            {error, content_length_undefined}
+                    end
+            end;
+        {no, Acc_1} when MaxHeaderSize == infinity ->
+            State#state{reply_buffer = Acc_1};
+        {no, Acc_1} when size(Acc_1) < MaxHeaderSize ->
+            State#state{reply_buffer = Acc_1};
+        {no, _Acc_1} ->
+            fail_pipelined_requests(State, {error, max_headers_size_exceeded}),
+            {error, max_headers_size_exceeded}
+    end.
+
+upgrade_to_ssl(#state{socket = Socket, 
+                      connect_timeout = Conn_timeout,
+                      ssl_options = Ssl_options,
+                      tunnel_setup_queue = Q} = State) ->
+    case ssl:connect(Socket, Ssl_options, Conn_timeout) of
+        {ok, Ssl_socket} ->
+            do_trace("Upgraded to SSL socket!!~n", []),
+            State_1 = State#state{socket = Ssl_socket,
+                                  proxy_tunnel_setup = done},
+            send_queued_requests(lists:reverse(Q), State_1);
+        Err ->
+            do_trace("Upgrade to SSL socket failed. Reson: ~p~n", [Err]),
+            do_error_reply(State, {error, send_failed}),
+            {error, send_failed}
+    end.
+
+send_queued_requests([], State) ->
+    do_trace("Sent all queued requests via SSL connection~n", []),
+    State#state{tunnel_setup_queue = done};
+send_queued_requests([{From, Url, Headers, Method, Body, Options, Timeout} | Q],
+                     State) ->
+    case send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State) of
+        {noreply, State_1} ->
+            send_queued_requests(Q, State_1);
+        _ ->
+            do_trace("Error sending queued SSL request: ~n"
+                     "URL     : ~s~n"
+                     "Method  : ~p~n"
+                     "Headers : ~p~n", [Url, Method, Headers]),
+            do_error_reply(State, {error, send_failed}),
+            {error, send_failed}
     end.
 
 is_connection_closing("HTTP/0.9", _)       -> true;
@@ -890,200 +1045,215 @@ is_connection_closing(_, _)                -> false.
 
 %% This clause determines the chunk size when given data from the beginning of the chunk
 parse_11_response(DataRecvd,
-		  #state{transfer_encoding = chunked,
-			 chunk_size = chunk_start,
-			 chunk_size_buffer = Chunk_sz_buf
-			} = State) ->
+                  #state{transfer_encoding = chunked, 
+                         chunk_size = chunk_start,
+                         chunk_size_buffer = Chunk_sz_buf
+                        } = State) ->
     case scan_crlf(Chunk_sz_buf, DataRecvd) of
-	{yes, ChunkHeader, Data_1} ->
-	    case parse_chunk_header(ChunkHeader) of
-		{error, Reason} ->
-		    {error, Reason};
-		ChunkSize ->
-		    %%
-		    %% Do we have to preserve the chunk encoding when
-		    %% streaming? NO. This should be transparent to the client
-		    %% process. Chunked encoding was only introduced to make
-		    %% it efficient for the server.
-		    %%
-		    RemLen = size(Data_1),
-		    do_trace("Determined chunk size: ~p. Already recvd: ~p~n", [ChunkSize, RemLen]),
-		    parse_11_response(Data_1, State#state{chunk_size_buffer = <<>>,
-							  deleted_crlf = true,
-							  recvd_chunk_size = 0,
-							  chunk_size = ChunkSize})
-	    end;
-	{no, Data_1} ->
-	    State#state{chunk_size_buffer = Data_1}
+        {yes, ChunkHeader, Data_1} ->
+            ChunkSize = parse_chunk_header(ChunkHeader),
+            %%
+            %% Do we have to preserve the chunk encoding when
+            %% streaming? NO. This should be transparent to the client
+            %% process. Chunked encoding was only introduced to make
+            %% it efficient for the server.
+            %%
+            RemLen = size(Data_1),
+            do_trace("Determined chunk size: ~p. Already recvd: ~p~n",
+                     [ChunkSize, RemLen]),
+            parse_11_response(Data_1, State#state{chunk_size_buffer = <<>>,
+                                                  deleted_crlf = true,
+                                                  recvd_chunk_size = 0,
+                                                  chunk_size = ChunkSize});
+        {no, Data_1} ->
+            State#state{chunk_size_buffer = Data_1}
     end;
 
 %% This clause is to remove the CRLF between two chunks
 %%
 parse_11_response(DataRecvd,
-		  #state{transfer_encoding = chunked,
-			 chunk_size = tbd,
-			 chunk_size_buffer = Buf}=State) ->
+                  #state{transfer_encoding = chunked, 
+                         chunk_size = tbd,
+                         chunk_size_buffer = Buf}=State) ->
     case scan_crlf(Buf, DataRecvd) of
-	{yes, _, NextChunk} ->
-	    State_1 = State#state{chunk_size = chunk_start,
-				  chunk_size_buffer = <<>>,
-				  deleted_crlf = true},
-	    parse_11_response(NextChunk, State_1);
-	{no, Data_1} ->
-	    State#state{chunk_size_buffer = Data_1}
+        {yes, _, NextChunk} ->
+            State_1 = State#state{chunk_size = chunk_start,
+                                  chunk_size_buffer = <<>>,
+                                  deleted_crlf = true},
+            parse_11_response(NextChunk, State_1);
+        {no, Data_1} ->
+            State#state{chunk_size_buffer = Data_1}
     end;
 
 %% This clause deals with the end of a chunked transfer. ibrowse does
 %% not support Trailers in the Chunked Transfer encoding. Any trailer
 %% received is silently discarded.
 parse_11_response(DataRecvd,
-		  #state{transfer_encoding = chunked, chunk_size = 0,
-			 cur_req = CurReq,
-			 deleted_crlf = DelCrlf,
-			 chunk_size_buffer = Trailer, reqs = Reqs}=State) ->
+                  #state{transfer_encoding = chunked, chunk_size = 0, 
+                         cur_req = CurReq,
+                         deleted_crlf = DelCrlf,
+                         chunk_size_buffer = Trailer, reqs = Reqs}=State) ->
     do_trace("Detected end of chunked transfer...~n", []),
     DataRecvd_1 = case DelCrlf of
-		      false ->
-			  DataRecvd;
-		      true ->
-			  <<$\r, $\n, DataRecvd/binary>>
+                      false ->
+                          DataRecvd;
+                      true ->
+                          <<$\r, $\n, DataRecvd/binary>>
                   end,
     case scan_header(Trailer, DataRecvd_1) of
-	{yes, _TEHeaders, Rem} ->
-	    {_, Reqs_1} = queue:out(Reqs),
-	    State_1 = handle_response(CurReq, State#state{reqs = Reqs_1}),
-	    parse_response(Rem, reset_state(State_1));
-	{no, Rem} ->
-	    State#state{chunk_size_buffer = Rem, deleted_crlf = false}
+        {yes, _TEHeaders, Rem} ->
+            {_, Reqs_1} = queue:out(Reqs),
+            State_1 = handle_response(CurReq, State#state{reqs = Reqs_1}),
+            parse_response(Rem, reset_state(State_1));
+        {no, Rem} ->
+            State#state{chunk_size_buffer = Rem, deleted_crlf = false}
     end;
 
 %% This clause extracts a chunk, given the size.
 parse_11_response(DataRecvd,
-		  #state{transfer_encoding = chunked,
-			 chunk_size = CSz,
-			 recvd_chunk_size = Recvd_csz,
-			 rep_buf_size = RepBufSz} = State) ->
+                  #state{transfer_encoding = chunked,
+                         chunk_size = CSz,
+                         recvd_chunk_size = Recvd_csz,
+                         rep_buf_size = RepBufSz} = State) ->
     NeedBytes = CSz - Recvd_csz,
     DataLen = size(DataRecvd),
     do_trace("Recvd more data: size: ~p. NeedBytes: ~p~n", [DataLen, NeedBytes]),
     case DataLen >= NeedBytes of
-	true ->
-	    {RemChunk, RemData} = split_binary(DataRecvd, NeedBytes),
-	    do_trace("Recvd another chunk...~n", []),
-	    do_trace("RemData -> ~p~n", [RemData]),
-	    case accumulate_response(RemChunk, State) of
-		{error, Reason} ->
-		    do_trace("Error accumulating response --> ~p~n", [Reason]),
-		    {error, Reason};
-		#state{} = State_1 ->
-		    State_2 = State_1#state{chunk_size=tbd},
-		    parse_11_response(RemData, State_2)
-	    end;
-	false ->
-	    accumulate_response(DataRecvd,
-				State#state{rep_buf_size = RepBufSz + DataLen,
-					    recvd_chunk_size = Recvd_csz + DataLen})
+        true ->
+            {RemChunk, RemData} = split_binary(DataRecvd, NeedBytes),
+            do_trace("Recvd another chunk...~n", []),
+            do_trace("RemData -> ~p~n", [RemData]),
+            case accumulate_response(RemChunk, State) of
+                {error, Reason} ->
+                    do_trace("Error accumulating response --> ~p~n", [Reason]),
+                    {error, Reason};
+                #state{} = State_1 ->
+                    State_2 = State_1#state{chunk_size=tbd},
+                    parse_11_response(RemData, State_2)
+            end;
+        false ->
+            accumulate_response(DataRecvd,
+                                State#state{rep_buf_size = RepBufSz + DataLen,
+                                            recvd_chunk_size = Recvd_csz + DataLen})
     end;
 
 %% This clause to extract the body when Content-Length is specified
 parse_11_response(DataRecvd,
-		  #state{content_length=CL, rep_buf_size=RepBufSz,
-			 reqs=Reqs}=State) ->
+                  #state{content_length=CL, rep_buf_size=RepBufSz,
+                         reqs=Reqs}=State) ->
     NeedBytes = CL - RepBufSz,
     DataLen = size(DataRecvd),
     case DataLen >= NeedBytes of
-	true ->
-	    {RemBody, Rem} = split_binary(DataRecvd, NeedBytes),
-	    {_, Reqs_1} = queue:out(Reqs),
-	    State_1 = accumulate_response(RemBody, State),
-	    State_2 = handle_response(State_1#state.cur_req, State_1#state{reqs=Reqs_1}),
-	    State_3 = reset_state(State_2),
-	    parse_response(Rem, State_3);
-	false ->
-	    accumulate_response(DataRecvd, State#state{rep_buf_size = (RepBufSz+DataLen)})
+        true ->
+            {RemBody, Rem} = split_binary(DataRecvd, NeedBytes),
+            {_, Reqs_1} = queue:out(Reqs),
+            State_1 = accumulate_response(RemBody, State),
+            State_2 = handle_response(State_1#state.cur_req, State_1#state{reqs=Reqs_1}),
+            State_3 = reset_state(State_2),
+            parse_response(Rem, State_3);
+        false ->
+            accumulate_response(DataRecvd, State#state{rep_buf_size = (RepBufSz+DataLen)})
     end.
 
 handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
-			 response_format = Resp_format,
-			 save_response_to_file = SaveResponseToFile,
-			 tmp_file_name = TmpFilename,
-			 tmp_file_fd = Fd
-			},
-		#state{http_status_code = SCode,
-		       send_timer = ReqTimer,
-		       reply_buffer = RepBuf,
-		       recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
+                         response_format = Resp_format,
+                         save_response_to_file = SaveResponseToFile,
+                         tmp_file_name = TmpFilename,
+                         tmp_file_fd = Fd,
+                         options       = Options
+                        },
+                #state{http_status_code = SCode,
+                       status_line   = Status_line,
+                       raw_headers   = Raw_headers,
+                       send_timer    = ReqTimer,
+                       reply_buffer  = RepBuf,
+                       recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
     Body = RepBuf,
     State_1 = set_cur_request(State),
     file:close(Fd),
     ResponseBody = case TmpFilename of
-		       undefined ->
-			   Body;
-		       _ ->
-			   {file, TmpFilename}
-		   end,
-    State_2 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
-		       {ok, SCode, RespHeaders, ResponseBody}),
+                       undefined ->
+                           Body;
+                       _ ->
+                           {file, TmpFilename}
+                   end,
+    Reply = case get_value(give_raw_headers, Options, false) of
+                true ->
+                    {ok, Status_line, Raw_headers, ResponseBody};
+                false ->
+                    {ok, SCode, RespHeaders, ResponseBody}
+            end,
+    State_2 = do_reply(State_1, From, StreamTo, ReqId, Resp_format, Reply),
     cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
     State_2;
 handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
-			 response_format = Resp_format},
-		#state{http_status_code=SCode, recvd_headers=RespHeaders,
-		       reply_buffer = RepBuf,
-		       send_timer=ReqTimer}=State) ->
+                         response_format = Resp_format,
+                         options = Options},
+                #state{http_status_code = SCode,
+                       status_line      = Status_line,
+                       raw_headers      = Raw_headers,
+                       recvd_headers    = RespHeaders,
+                       reply_buffer     = RepBuf,
+                       send_timer       = ReqTimer} = State) ->
     Body = RepBuf,
 %%    State_1 = set_cur_request(State),
+    Reply = case get_value(give_raw_headers, Options, false) of
+                true ->
+                    {ok, Status_line, Raw_headers, Body};
+                false ->
+                    {ok, SCode, RespHeaders, Body}
+            end,
     State_1 = case get(conn_close) of
-	"close" ->
-	    do_reply(State, From, StreamTo, ReqId, Resp_format,
-		     {ok, SCode, RespHeaders, Body}),
-	    exit(normal);
-	_ ->
-	    State_1_1 = do_reply(State, From, StreamTo, ReqId, Resp_format,
-				 {ok, SCode, RespHeaders, Body}),
-	    cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
-	    State_1_1
+        "close" ->
+            do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+            exit(normal);
+        _ ->
+            State_1_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+            cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
+            State_1_1
     end,
     set_cur_request(State_1).
 
 reset_state(State) ->
     State#state{status            = get_header,
-		rep_buf_size      = 0,
-		streamed_size     = 0,
-		content_length    = undefined,
-		reply_buffer      = <<>>,
-		chunk_size_buffer = <<>>,
-		recvd_headers     = [],
-		deleted_crlf      = false,
-		http_status_code  = undefined,
-		chunk_size        = undefined,
-		transfer_encoding = undefined}.
+                rep_buf_size      = 0,
+                streamed_size     = 0,
+                content_length    = undefined,
+                reply_buffer      = <<>>,
+                chunk_size_buffer = <<>>,
+                recvd_headers     = [],
+                status_line       = undefined,
+                raw_headers       = undefined,
+                deleted_crlf      = false,
+                http_status_code  = undefined,
+                chunk_size        = undefined,
+                transfer_encoding = undefined}.
 
 set_cur_request(#state{reqs = Reqs} = State) ->
     case queue:to_list(Reqs) of
-	[] ->
-	    State#state{cur_req = undefined};
-	[NextReq | _] ->
-	    State#state{cur_req = NextReq}
+        [] ->
+            State#state{cur_req = undefined};
+        [NextReq | _] ->
+            State#state{cur_req = NextReq}
     end.
 
 parse_headers(Headers) ->
     case scan_crlf(Headers) of
-	{yes, StatusLine, T} ->
-	    parse_headers(StatusLine, T);
-	{no, StatusLine} ->
-	    parse_headers(StatusLine, <<>>)
+        {yes, StatusLine, T} ->
+            parse_headers(StatusLine, T);
+        {no, StatusLine} ->
+            parse_headers(StatusLine, <<>>)
     end.
 
 parse_headers(StatusLine, Headers) ->
     Headers_1 = parse_headers_1(Headers),
     case parse_status_line(StatusLine) of
-	{ok, HttpVsn, StatCode, _Msg} ->
-	    put(http_prot_vsn, HttpVsn),
-	    {HttpVsn, StatCode, Headers_1};
-	_ -> %% A HTTP 0.9 response?
-	    put(http_prot_vsn, "HTTP/0.9"),
-	    {"HTTP/0.9", undefined, Headers}
+        {ok, HttpVsn, StatCode, _Msg} ->
+            put(http_prot_vsn, HttpVsn),
+            {HttpVsn, StatCode, Headers_1, StatusLine, Headers};
+        _ -> %% A HTTP 0.9 response?
+            put(http_prot_vsn, "HTTP/0.9"),
+            {"HTTP/0.9", undefined, Headers, StatusLine, Headers}
     end.
 
 % From RFC 2616
@@ -1094,22 +1264,22 @@ parse_headers(StatusLine, Headers) ->
 %    SP. A recipient MAY replace any linear white space with a single
 %    SP before interpreting the field value or forwarding the message
 %    downstream.
-	parse_headers_1(B) when is_binary(B) ->
-					   parse_headers_1(binary_to_list(B));
-	parse_headers_1(String) ->
-					   parse_headers_1(String, [], []).
+parse_headers_1(B) when is_binary(B) ->
+    parse_headers_1(binary_to_list(B));
+parse_headers_1(String) ->
+    parse_headers_1(String, [], []).
 
-parse_headers_1([$\n, H |T], [$\r | L], Acc) when H == 32;
-						  H == $\t ->
+parse_headers_1([$\n, H |T], [$\r | L], Acc) when H =:= 32;
+                                                  H =:= $\t ->
     parse_headers_1(lists:dropwhile(fun(X) ->
-					    is_whitespace(X)
-				    end, T), [32 | L], Acc);
+                                            is_whitespace(X)
+                                    end, T), [32 | L], Acc);
 parse_headers_1([$\n|T], [$\r | L], Acc) ->
     case parse_header(lists:reverse(L)) of
-	invalid ->
-	    parse_headers_1(T, [], Acc);
-	NewHeader ->
-	    parse_headers_1(T, [], [NewHeader | Acc])
+        invalid ->
+            parse_headers_1(T, [], Acc);
+        NewHeader ->
+            parse_headers_1(T, [], [NewHeader | Acc])
     end;
 parse_headers_1([H|T],  L, Acc) ->
     parse_headers_1(T, [H|L], Acc);
@@ -1117,11 +1287,11 @@ parse_headers_1([], [], Acc) ->
     lists:reverse(Acc);
 parse_headers_1([], L, Acc) ->
     Acc_1 = case parse_header(lists:reverse(L)) of
-		invalid ->
-		    Acc;
-		NewHeader ->
-		    [NewHeader | Acc]
-	    end,
+                invalid ->
+                    Acc;
+                NewHeader ->
+                    [NewHeader | Acc]
+            end,
     lists:reverse(Acc_1).
 
 parse_status_line(Line) when is_binary(Line) ->
@@ -1139,10 +1309,9 @@ parse_status_line([H | T], get_status_code, ProtVsn, StatCode) ->
 parse_status_line([], _, _, _) ->
     http_09.
 
-parse_header(B) when is_binary(B) ->
-    parse_header(binary_to_list(B));
 parse_header(L) ->
     parse_header(L, []).
+
 parse_header([$: | V], Acc) ->
     {lists:reverse(Acc), string:strip(V)};
 parse_header([H | T], Acc) ->
@@ -1152,11 +1321,11 @@ parse_header([], _) ->
 
 scan_header(Bin) ->
     case get_crlf_crlf_pos(Bin, 0) of
-	{yes, Pos} ->
-	    {Headers, <<_:4/binary, Body/binary>>} = split_binary(Bin, Pos),
-	    {yes, Headers, Body};
-	no ->
-	    {no, Bin}
+        {yes, Pos} ->
+            {Headers, <<_:4/binary, Body/binary>>} = split_binary(Bin, Pos),
+            {yes, Headers, Body};
+        no ->
+            {no, Bin}
     end.
 
 scan_header(Bin1, Bin2) when size(Bin1) < 4 ->
@@ -1168,11 +1337,11 @@ scan_header(Bin1, Bin2) ->
     <<Headers_prefix:Bin1_already_scanned_size/binary, Rest/binary>> = Bin1,
     Bin_to_scan = <<Rest/binary, Bin2/binary>>,
     case get_crlf_crlf_pos(Bin_to_scan, 0) of
-	{yes, Pos} ->
-	    {Headers_suffix, <<_:4/binary, Body/binary>>} = split_binary(Bin_to_scan, Pos),
-	    {yes, <<Headers_prefix/binary, Headers_suffix/binary>>, Body};
-	no ->
-	    {no, <<Bin1/binary, Bin2/binary>>}
+        {yes, Pos} ->
+            {Headers_suffix, <<_:4/binary, Body/binary>>} = split_binary(Bin_to_scan, Pos),
+            {yes, <<Headers_prefix/binary, Headers_suffix/binary>>, Body};
+        no ->
+            {no, <<Bin1/binary, Bin2/binary>>}
     end.
 
 get_crlf_crlf_pos(<<$\r, $\n, $\r, $\n, _/binary>>, Pos) -> {yes, Pos};
@@ -1181,11 +1350,11 @@ get_crlf_crlf_pos(<<>>, _)                               -> no.
 
 scan_crlf(Bin) ->
     case get_crlf_pos(Bin) of
-	{yes, Pos} ->
-	    {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin, Pos),
-	    {yes, Prefix, Suffix};
-	no ->
-	    {no, Bin}
+        {yes, Pos} ->
+            {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin, Pos),
+            {yes, Prefix, Suffix};
+        no ->
+            {no, Bin}
     end.
 
 scan_crlf(<<>>, Bin2) ->
@@ -1199,11 +1368,11 @@ scan_crlf_1(Bin1_head_size, Bin1, Bin2) ->
     <<Bin1_head:Bin1_head_size/binary, Bin1_tail/binary>> = Bin1,
     Bin3 = <<Bin1_tail/binary, Bin2/binary>>,
     case get_crlf_pos(Bin3) of
-	{yes, Pos} ->
-	    {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin3, Pos),
-	    {yes, list_to_binary([Bin1_head, Prefix]), Suffix};
-	no ->
-	    {no, list_to_binary([Bin1, Bin2])}
+        {yes, Pos} ->
+            {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin3, Pos),
+            {yes, list_to_binary([Bin1_head, Prefix]), Suffix};
+        no ->
+            {no, list_to_binary([Bin1, Bin2])}
     end.
 
 get_crlf_pos(Bin) ->
@@ -1213,13 +1382,6 @@ get_crlf_pos(<<$\r, $\n, _/binary>>, Pos) -> {yes, Pos};
 get_crlf_pos(<<_, Rest/binary>>, Pos)     -> get_crlf_pos(Rest, Pos + 1);
 get_crlf_pos(<<>>, _)                     -> no.
 
-%% scan_crlf(<<$\n, T/binary>>, [$\r | L]) -> {yes, lists:reverse(L), T};
-%% scan_crlf(<<H, T/binary>>,  L)          -> scan_crlf(T, [H|L]);
-%% scan_crlf(<<>>, L)                      -> {no, L};
-%% scan_crlf([$\n|T], [$\r | L])           -> {yes, lists:reverse(L), T};
-%% scan_crlf([H|T],  L)                    -> scan_crlf(T, [H|L]);
-%% scan_crlf([], L)                        -> {no, L}.
-
 fmt_val(L) when is_list(L)    -> L;
 fmt_val(I) when is_integer(I) -> integer_to_list(I);
 fmt_val(A) when is_atom(A)    -> atom_to_list(A);
@@ -1240,7 +1402,8 @@ method(proppatch) -> "PROPPATCH";
 method(lock)      -> "LOCK";
 method(unlock)    -> "UNLOCK";
 method(move)      -> "MOVE";
-method(copy)      -> "COPY".
+method(copy)      -> "COPY";
+method(connect)   -> "CONNECT".
 
 %% From RFC 2616
 %%
@@ -1250,19 +1413,19 @@ method(copy)      -> "COPY".
 % fields. This allows dynamically produced content to be transferred
 % along with the information necessary for the recipient to verify
 % that it has received the full message.
-% 	Chunked-Body = 	*chunk
-% 			last-chunk
-% 			trailer
-% 			CRLF
-% 	chunk = chunk-size [ chunk-extension ] CRLF
-% 		chunk-data CRLF
-% 	chunk-size = 1*HEX
-% 	last-chunk = 1*("0") [ chunk-extension ] CRLF
-% 	chunk-extension= *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
-% 	chunk-ext-name = token
-% 	chunk-ext-val = token | quoted-string
-% 	chunk-data = chunk-size(OCTET)
-% 	trailer = *(entity-header CRLF)
+%       Chunked-Body =  *chunk
+%                       last-chunk
+%                       trailer
+%                       CRLF
+%       chunk = chunk-size [ chunk-extension ] CRLF
+%               chunk-data CRLF
+%       chunk-size = 1*HEX
+%       last-chunk = 1*("0") [ chunk-extension ] CRLF
+%       chunk-extension= *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
+%       chunk-ext-name = token
+%       chunk-ext-val = token | quoted-string
+%       chunk-data = chunk-size(OCTET)
+%       trailer = *(entity-header CRLF)
 % The chunk-size field is a string of hex digits indicating the size
 % of the chunk. The chunked encoding is ended by any chunk whose size
 % is zero, followed by the trailer, which is terminated by an empty
@@ -1271,8 +1434,6 @@ method(copy)      -> "COPY".
 %% The parsing implemented here discards all chunk extensions. It also
 %% strips trailing spaces from the chunk size fields as Apache 1.3.27 was
 %% sending them.
-parse_chunk_header([]) ->
-    throw({error, invalid_chunk_size});
 parse_chunk_header(ChunkHeader) ->
     parse_chunk_header(ChunkHeader, []).
 
@@ -1280,10 +1441,10 @@ parse_chunk_header(<<$;, _/binary>>, Acc) ->
     hexlist_to_integer(lists:reverse(Acc));
 parse_chunk_header(<<H, T/binary>>, Acc) ->
     case is_whitespace(H) of
-	true ->
-	    parse_chunk_header(T, Acc);
-	false ->
-	    parse_chunk_header(T, [H | Acc])
+        true ->
+            parse_chunk_header(T, Acc);
+        false ->
+            parse_chunk_header(T, [H | Acc])
     end;
 parse_chunk_header(<<>>, Acc) ->
     hexlist_to_integer(lists:reverse(Acc)).
@@ -1294,24 +1455,31 @@ is_whitespace($\n) -> true;
 is_whitespace($\t) -> true;
 is_whitespace(_)   -> false.
 
-
-send_async_headers(_ReqId, undefined, _StatCode, _Headers) ->
+send_async_headers(_ReqId, undefined, _, _State) ->
     ok;
-send_async_headers(ReqId, StreamTo, StatCode, Headers) ->
-    catch StreamTo ! {ibrowse_async_headers, ReqId, StatCode, Headers}.
+send_async_headers(ReqId, StreamTo, Give_raw_headers, 
+                   #state{status_line = Status_line, raw_headers = Raw_headers, 
+                          recvd_headers = Headers, http_status_code = StatCode
+                          }) ->
+    case Give_raw_headers of
+        false ->
+            catch StreamTo ! {ibrowse_async_headers, ReqId, StatCode, Headers};
+        true ->
+            catch StreamTo ! {ibrowse_async_headers, ReqId, Status_line, Raw_headers}
+    end.
 
 format_response_data(Resp_format, Body) ->
     case Resp_format of
-	list when is_list(Body) ->
-	    flatten(Body);
-	list when is_binary(Body) ->
-	    binary_to_list(Body);
-	binary when is_list(Body) ->
-	    list_to_binary(Body);
-	_ ->
-	    %% This is to cater for sending messages such as
-	    %% {chunk_start, _}, chunk_end etc
-	    Body
+        list when is_list(Body) ->
+            flatten(Body);
+        list when is_binary(Body) ->
+            binary_to_list(Body);
+        binary when is_list(Body) ->
+            list_to_binary(Body);
+        _ ->
+            %% This is to cater for sending messages such as
+            %% {chunk_start, _}, chunk_end etc
+            Body
     end.
 
 do_reply(State, From, undefined, _, Resp_format, {ok, St_code, Headers, Body}) ->
@@ -1322,14 +1490,14 @@ do_reply(State, From, undefined, _, _, Msg) ->
     gen_server:reply(From, Msg),
     dec_pipeline_counter(State);
 do_reply(#state{prev_req_id = Prev_req_id} = State,
-	 _From, StreamTo, ReqId, Resp_format, {ok, _, _, Body}) ->
+         _From, StreamTo, ReqId, Resp_format, {ok, _, _, Body}) ->
     State_1 = dec_pipeline_counter(State),
     case Body of
-	[] ->
-	    ok;
-	_ ->
-	    Body_1 = format_response_data(Resp_format, Body),
-	    catch StreamTo ! {ibrowse_async_response, ReqId, Body_1}
+        [] ->
+            ok;
+        _ ->
+            Body_1 = format_response_data(Resp_format, Body),
+            catch StreamTo ! {ibrowse_async_response, ReqId, Body_1}
     end,
     catch StreamTo ! {ibrowse_async_response_end, ReqId},
     %% We don't want to delete the Req-id to Pid mapping straightaway
@@ -1356,23 +1524,28 @@ do_interim_reply(StreamTo, Response_format, ReqId, Msg) ->
     Msg_1 = format_response_data(Response_format, Msg),
     catch StreamTo ! {ibrowse_async_response, ReqId, Msg_1}.
 
-do_error_reply(#state{reqs = Reqs} = State, Err) ->
+do_error_reply(#state{reqs = Reqs, tunnel_setup_queue = Tun_q} = State, Err) ->
     ReqList = queue:to_list(Reqs),
     lists:foreach(fun(#request{from=From, stream_to=StreamTo, req_id=ReqId,
-			       response_format = Resp_format}) ->
-			  ets:delete(ibrowse_stream, {req_id_pid, ReqId}),
+                               response_format = Resp_format}) ->
+                          ets:delete(ibrowse_stream, {req_id_pid, ReqId}),
                           do_reply(State, From, StreamTo, ReqId, Resp_format, {error, Err})
-		  end, ReqList).
+                  end, ReqList),
+    lists:foreach(
+      fun({From, _Url, _Headers, _Method, _Body, _Options, _Timeout}) ->
+              do_reply(State, From, undefined, undefined, undefined, Err)
+      end, Tun_q).
 
 fail_pipelined_requests(#state{reqs = Reqs, cur_req = CurReq} = State, Reply) ->
     {_, Reqs_1} = queue:out(Reqs),
     #request{from=From, stream_to=StreamTo, req_id=ReqId,
-	     response_format = Resp_format} = CurReq,
+             response_format = Resp_format} = CurReq,
     do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
     do_error_reply(State#state{reqs = Reqs_1}, previous_request_failed).
 
 split_list_at(List, N) ->
     split_list_at(List, N, []).
+
 split_list_at([], _, Acc) ->
     {lists:reverse(Acc), []};
 split_list_at(List2, 0, List1) ->
@@ -1382,6 +1555,7 @@ split_list_at([H | List2], N, List1) ->
 
 hexlist_to_integer(List) ->
     hexlist_to_integer(lists:reverse(List), 1, 0).
+
 hexlist_to_integer([H | T], Multiplier, Acc) ->
     hexlist_to_integer(T, Multiplier*16, Multiplier*to_ascii(H) + Acc);
 hexlist_to_integer([], _, Acc) ->
@@ -1416,10 +1590,10 @@ cancel_timer(Ref)       -> erlang:cancel_timer(Ref).
 cancel_timer(Ref, {eat_message, Msg}) ->
     cancel_timer(Ref),
     receive
-	Msg ->
-	    ok
+        Msg ->
+            ok
     after 0 ->
-	    ok
+            ok
     end.
 
 make_req_id() ->
@@ -1437,7 +1611,7 @@ to_lower([], Acc) ->
 shutting_down(#state{lb_ets_tid = undefined}) ->
     ok;
 shutting_down(#state{lb_ets_tid = Tid,
-		     cur_pipeline_size = Sz}) ->
+                     cur_pipeline_size = Sz}) ->
     catch ets:delete(Tid, {Sz, self()}).
 
 inc_pipeline_counter(#state{is_closing = true} = State) ->
@@ -1450,7 +1624,7 @@ dec_pipeline_counter(#state{is_closing = true} = State) ->
 dec_pipeline_counter(#state{lb_ets_tid = undefined} = State) ->
     State;
 dec_pipeline_counter(#state{cur_pipeline_size = Pipe_sz,
-			    lb_ets_tid = Tid} = State) ->
+                            lb_ets_tid = Tid} = State) ->
     ets:delete(Tid, {Pipe_sz, self()}),
     ets:insert(Tid, {{Pipe_sz - 1, self()}, []}),
     State#state{cur_pipeline_size = Pipe_sz - 1}.
@@ -1464,13 +1638,35 @@ flatten([]) ->
 
 get_stream_chunk_size(Options) ->
     case lists:keysearch(stream_chunk_size, 1, Options) of
-	{value, {_, V}} when V > 0 ->
-	    V;
-	_ ->
-	    ?DEFAULT_STREAM_CHUNK_SIZE
+        {value, {_, V}} when V > 0 ->
+            V;
+        _ ->
+            ?DEFAULT_STREAM_CHUNK_SIZE
     end.
 
-get_inac_timeout(#state{cur_req = #request{options = Opts}}) ->
+set_inac_timer(State) ->
+    set_inac_timer(State, get_inac_timeout(State)).
+
+set_inac_timer(_State, Timeout) when is_integer(Timeout) ->
+    erlang:send_after(Timeout, self(), timeout);
+set_inac_timer(_, _) ->
+    undefined.
+
+get_inac_timeout(#state{cur_req = #request{options = Opts}}) -> 
     get_value(inactivity_timeout, Opts, infinity);
 get_inac_timeout(#state{cur_req = undefined}) ->
     infinity.
+
+trace_request(Req) ->
+    case get(my_trace_flag) of
+        true ->
+            %%Avoid the binary operations if trace is not on...
+            NReq = binary_to_list(list_to_binary(Req)),
+            do_trace("Sending request: ~n"
+                     "--- Request Begin ---~n~s~n"
+                     "--- Request End ---~n", [NReq]);
+        _ -> ok
+    end.
+
+to_integer(X) when is_list(X)    -> list_to_integer(X); 
+to_integer(X) when is_integer(X) -> X.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/b7fafdcc/ibrowse_lb.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lb.erl b/ibrowse_lb.erl
index 834054a..6bc600b 100644
--- a/ibrowse_lb.erl
+++ b/ibrowse_lb.erl
@@ -1,13 +1,11 @@
 %%%-------------------------------------------------------------------
 %%% File    : ibrowse_lb.erl
 %%% Author  : chandru <ch...@t-mobile.co.uk>
-%%% Description :
+%%% Description : 
 %%%
 %%% Created :  6 Mar 2008 by chandru <ch...@t-mobile.co.uk>
 %%%-------------------------------------------------------------------
 -module(ibrowse_lb).
-
--vsn('$Id: ibrowse_lb.erl,v 1.2 2009/07/01 22:43:19 chandrusf Exp $ ').
 -author(chandru).
 -behaviour(gen_server).
 %%--------------------------------------------------------------------
@@ -101,14 +99,14 @@ spawn_connection(Lb_pid, Url,
 % 	    #state{max_sessions = Max_sess,
 % 		   ets_tid = Tid,
 % 		   max_pipeline_size = Max_pipe_sz,
-% 		   num_cur_sessions = Num} = State)
+% 		   num_cur_sessions = Num} = State) 
 %     when Num >= Max ->
 %     Reply = find_best_connection(Tid),
 %     {reply, sorry_dude_reuse, State};
 
 %% Update max_sessions in #state with supplied value
 handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
-	    #state{num_cur_sessions = Num} = State)
+	    #state{num_cur_sessions = Num} = State) 
     when Num >= Max_sess ->
     State_1 = maybe_create_ets(State),
     Reply = find_best_connection(State_1#state.ets_tid, Max_pipe),


[20/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Correcting ibrowse version number.


git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@997675 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/cb51bb13
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/cb51bb13
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/cb51bb13

Branch: refs/heads/import-master
Commit: cb51bb13946ea51b2ccc060a5ce69a04838a2eda
Parents: b7fafdc
Author: Filipe David Borba Manana <fd...@apache.org>
Authored: Thu Sep 16 10:08:17 2010 +0000
Committer: Filipe David Borba Manana <fd...@apache.org>
Committed: Thu Sep 16 10:08:17 2010 +0000

----------------------------------------------------------------------
 Makefile.am | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/cb51bb13/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index 510f36a..b517486 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -10,7 +10,7 @@
 ## License for the specific language governing permissions and limitations under
 ## the License.
 
-ibrowseebindir = $(localerlanglibdir)/ibrowse-1.5.2/ebin
+ibrowseebindir = $(localerlanglibdir)/ibrowse-1.6.2/ebin
 
 ibrowse_file_collection = \
 	ibrowse.app.in \


[30/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Whitespace


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/64f1b8c5
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/64f1b8c5
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/64f1b8c5

Branch: refs/heads/import-master
Commit: 64f1b8c5a0244be05f7ecad967be44dae2e1c700
Parents: ca1ed96
Author: Bob Dionne <bi...@apache.org>
Authored: Tue Oct 23 19:50:08 2012 -0400
Committer: Bob Dionne <bo...@cloudant.com>
Committed: Tue Oct 23 19:56:22 2012 -0400

----------------------------------------------------------------------
 ibrowse_http_client.erl | 38 +++++++++++++++++++-------------------
 1 file changed, 19 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/64f1b8c5/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index eb2bf31..fd91d40 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -46,7 +46,7 @@
                 reqs=queue:new(), cur_req, status=idle, http_status_code,
                 reply_buffer = <<>>, rep_buf_size=0, streamed_size = 0,
                 recvd_headers=[],
-                status_line, raw_headers, 
+                status_line, raw_headers,
                 is_closing, send_timer, content_length,
                 deleted_crlf = false, transfer_encoding,
                 chunk_size, chunk_size_buffer = <<>>,
@@ -55,11 +55,11 @@
                }).
 
 -record(request, {url, method, options, from,
-                  stream_to, caller_controls_socket = false, 
+                  stream_to, caller_controls_socket = false,
                   caller_socket_options = [],
                   req_id,
                   stream_chunk_size,
-                  save_response_to_file = false, 
+                  save_response_to_file = false,
                   tmp_file_name, tmp_file_fd, preserve_chunked_encoding,
                   response_format}).
 
@@ -208,7 +208,7 @@ handle_info({stream_close, _Req_id}, State) ->
     do_error_reply(State, closing_on_request),
     {stop, normal, State};
 
-handle_info({tcp_closed, _Sock}, State) ->    
+handle_info({tcp_closed, _Sock}, State) ->
     do_trace("TCP connection closed by peer!~n", []),
     handle_sock_closed(State),
     {stop, normal, State};
@@ -405,7 +405,7 @@ accumulate_response(Data, #state{reply_buffer      = RepBuf,
             State#state{reply_buffer = RepBuf_1};
         _ when Caller_controls_socket == true ->
             do_interim_reply(StreamTo, Response_format, ReqId, RepBuf_1),
-            State#state{reply_buffer = <<>>, 
+            State#state{reply_buffer = <<>>,
                         interim_reply_sent = true,
                         streamed_size = Streamed_size + size(RepBuf_1)};
         _ when New_data_size >= Stream_chunk_size ->
@@ -703,7 +703,7 @@ send_req_1(From,
             {stop, normal, State_1}
     end;
 
-send_req_1(From, Url, Headers, Method, Body, Options, Timeout, 
+send_req_1(From, Url, Headers, Method, Body, Options, Timeout,
            #state{proxy_tunnel_setup = in_progress,
                   tunnel_setup_queue = Q} = State) ->
     do_trace("Queued SSL request awaiting tunnel setup: ~n"
@@ -727,7 +727,7 @@ send_req_1(From,
             {Caller, once} when is_pid(Caller) or
                                 is_atom(Caller) ->
                 Async_pid_rec = {{req_id_pid, ReqId}, self()},
-                true = ets:insert(ibrowse_stream, Async_pid_rec), 
+                true = ets:insert(ibrowse_stream, Async_pid_rec),
                 {Caller, true};
             undefined ->
                 {undefined, false};
@@ -916,7 +916,7 @@ is_chunked_encoding_specified(Options) ->
     case get_value(transfer_encoding, Options, false) of
         false ->
             false;
-        {chunked, _} -> 
+        {chunked, _} ->
             true;
         chunked ->
             true
@@ -1027,7 +1027,7 @@ parse_response(Data, #state{reply_buffer = Acc, reqs = Reqs,
             put(conn_close, ConnClose),
             TransferEncoding = to_lower(get_value("transfer-encoding", LCHeaders, "false")),
             case get_value("content-length", LCHeaders, undefined) of
-                _ when Method == connect, 
+                _ when Method == connect,
                        hd(StatCode) == $2 ->
                     cancel_timer(State#state.send_timer),
                     {_, Reqs_1} = queue:out(Reqs),
@@ -1125,7 +1125,7 @@ parse_response(Data, #state{reply_buffer = Acc, reqs = Reqs,
             {error, max_headers_size_exceeded}
     end.
 
-upgrade_to_ssl(#state{socket = Socket, 
+upgrade_to_ssl(#state{socket = Socket,
                       connect_timeout = Conn_timeout,
                       ssl_options = Ssl_options,
                       tunnel_setup_queue = Q} = State) ->
@@ -1165,7 +1165,7 @@ is_connection_closing(_, _)                -> false.
 
 %% This clause determines the chunk size when given data from the beginning of the chunk
 parse_11_response(DataRecvd,
-                  #state{transfer_encoding = chunked, 
+                  #state{transfer_encoding = chunked,
                          chunk_size        = chunk_start,
                          chunk_size_buffer = Chunk_sz_buf
                         } = State) ->
@@ -1193,7 +1193,7 @@ parse_11_response(DataRecvd,
 %% This clause is to remove the CRLF between two chunks
 %%
 parse_11_response(DataRecvd,
-                  #state{transfer_encoding = chunked, 
+                  #state{transfer_encoding = chunked,
                          chunk_size = tbd,
                          chunk_size_buffer = Buf
                         } = State) ->
@@ -1212,7 +1212,7 @@ parse_11_response(DataRecvd,
 %% not support Trailers in the Chunked Transfer encoding. Any trailer
 %% received is silently discarded.
 parse_11_response(DataRecvd,
-                  #state{transfer_encoding = chunked, chunk_size = 0, 
+                  #state{transfer_encoding = chunked, chunk_size = 0,
                          cur_req           = CurReq,
                          deleted_crlf      = DelCrlf,
                          chunk_size_buffer = Trailer,
@@ -1301,9 +1301,9 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
                        recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
     Body = RepBuf,
     case Fd of
-        undefined -> 
+        undefined ->
             ok;
-        _ -> 
+        _ ->
             ok = file:close(Fd)
     end,
     ResponseBody = case TmpFilename of
@@ -1595,8 +1595,8 @@ is_whitespace(_)   -> false.
 
 send_async_headers(_ReqId, undefined, _, _State) ->
     ok;
-send_async_headers(ReqId, StreamTo, Give_raw_headers, 
-                   #state{status_line = Status_line, raw_headers = Raw_headers, 
+send_async_headers(ReqId, StreamTo, Give_raw_headers,
+                   #state{status_line = Status_line, raw_headers = Raw_headers,
                           recvd_headers = Headers, http_status_code = StatCode,
                           cur_req = #request{options = Opts}
                          }) ->
@@ -1808,7 +1808,7 @@ set_inac_timer(State, Timeout) when is_integer(Timeout) ->
 set_inac_timer(State, _) ->
     State.
 
-get_inac_timeout(#state{cur_req = #request{options = Opts}}) -> 
+get_inac_timeout(#state{cur_req = #request{options = Opts}}) ->
     get_value(inactivity_timeout, Opts, infinity);
 get_inac_timeout(#state{cur_req = undefined}) ->
     case ibrowse:get_config_value(inactivity_timeout, undefined) of
@@ -1851,5 +1851,5 @@ trace_request_body(Body) ->
             ok
     end.
 
-to_binary(X) when is_list(X)   -> list_to_binary(X); 
+to_binary(X) when is_list(X)   -> list_to_binary(X);
 to_binary(X) when is_binary(X) -> X.


[22/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Fix ibrowse 2.0.x inactivity timeouts not getting cleared.
Patch submitted upstream: http://github.com/cmullaparthi/ibrowse/issues/#issue/17



git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@1030534 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/a284c874
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/a284c874
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/a284c874

Branch: refs/heads/import-master
Commit: a284c874f0c72361dbe7362e8a15a67c969d80a0
Parents: db7f903
Author: Filipe David Borba Manana <fd...@apache.org>
Authored: Wed Nov 3 17:03:05 2010 +0000
Committer: Filipe David Borba Manana <fd...@apache.org>
Committed: Wed Nov 3 17:03:05 2010 +0000

----------------------------------------------------------------------
 ibrowse_http_client.erl | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/a284c874/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 16d9b87..2dd209d 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -1713,7 +1713,15 @@ set_inac_timer(State) ->
     set_inac_timer(State, get_inac_timeout(State)).
 
 set_inac_timer(_State, Timeout) when is_integer(Timeout) ->
-    erlang:send_after(Timeout, self(), timeout);
+    TimerRef = erlang:send_after(Timeout, self(), timeout),
+    case erlang:put(inac_timer, TimerRef) of
+    OldTimer when is_reference(OldTimer) ->
+        erlang:cancel_timer(OldTimer),
+        receive timeout -> ok after 0 -> ok end;
+    _ ->
+        ok
+    end,
+    TimerRef;
 set_inac_timer(_, _) ->
     undefined.
 


[07/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
ibrowse now allows user to control socket.  Thanks again Chandru

git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@790953 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/50228c10
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/50228c10
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/50228c10

Branch: refs/heads/import-master
Commit: 50228c10f6d2f393febad425452b04ba03c56c06
Parents: 7292757
Author: Adam Kocoloski <ko...@apache.org>
Authored: Fri Jul 3 15:56:51 2009 +0000
Committer: Adam Kocoloski <ko...@apache.org>
Committed: Fri Jul 3 15:56:51 2009 +0000

----------------------------------------------------------------------
 ibrowse.erl             |  37 ++--
 ibrowse_http_client.erl | 441 ++++++++++++++++++++++---------------------
 ibrowse_test.erl        |  46 ++++-
 3 files changed, 289 insertions(+), 235 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/50228c10/ibrowse.erl
----------------------------------------------------------------------
diff --git a/ibrowse.erl b/ibrowse.erl
index 1b0daad..0d3478b 100644
--- a/ibrowse.erl
+++ b/ibrowse.erl
@@ -89,6 +89,7 @@
 	 send_req_direct/5,
 	 send_req_direct/6,
 	 send_req_direct/7,
+	 stream_next/1,
 	 set_max_sessions/3,
 	 set_max_pipeline_size/3,
 	 set_dest/3,
@@ -150,7 +151,8 @@ stop() ->
 %% respHeader() = {headerName(), headerValue()}
 %% headerName() = string()
 %% headerValue() = string()
-%% response() = {ok, Status, ResponseHeaders, ResponseBody} | {error, Reason}
+%% response() = {ok, Status, ResponseHeaders, ResponseBody} | {ibrowse_req_id, req_id() } | {error, Reason}
+%% req_id = term()
 %% ResponseBody = string() | {file, Filename}
 %% Reason = term()
 send_req(Url, Headers, Method) ->
@@ -425,7 +427,20 @@ send_req_direct(Conn_pid, Url, Headers, Method, Body, Options, Timeout) ->
 	Err ->
 	    {error, {url_parsing_failed, Err}}
     end.
-    
+
+%% @doc Tell ibrowse to stream the next chunk of data to the
+%% caller. Should be used in conjunction with the
+%% <code>stream_to</code> option
+%% @spec stream_next(Req_id :: req_id()) -> ok | {error, unknown_req_id}
+stream_next(Req_id) ->    
+    case ets:lookup(ibrowse_stream, {req_id_pid, Req_id}) of
+	[] ->
+	    {error, unknown_req_id};
+	[{_, Pid}] ->
+	    catch Pid ! {stream_next, Req_id},
+	    ok
+    end.
+
 %% @doc Turn tracing on for the ibrowse process
 trace_on() ->
     ibrowse ! {trace, true}.
@@ -522,6 +537,7 @@ init(_) ->
     put(ibrowse_trace_token, "ibrowse"),
     ets:new(ibrowse_lb, [named_table, public, {keypos, 2}]),
     ets:new(ibrowse_conf, [named_table, protected, {keypos, 2}]),
+    ets:new(ibrowse_stream, [named_table, public]),
     import_config(),
     {ok, #state{}}.
 
@@ -539,9 +555,9 @@ import_config(Filename) ->
 	{ok, Terms} ->
 	    ets:delete_all_objects(ibrowse_conf),
 	    Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options}) 
-		     when list(Host), integer(Port),
-		     integer(MaxSess), MaxSess > 0,
-		     integer(MaxPipe), MaxPipe > 0, list(Options) ->
+		     when is_list(Host), is_integer(Port),
+		          is_integer(MaxSess), MaxSess > 0,
+		          is_integer(MaxPipe), MaxPipe > 0, is_list(Options) ->
 			  I = [{{max_sessions, Host, Port}, MaxSess},
 			       {{max_pipeline_size, Host, Port}, MaxPipe},
 			       {{options, Host, Port}, Options}],
@@ -641,13 +657,6 @@ handle_info(all_trace_off, State) ->
 		      true ->
 			  catch Pid ! {trace, false}
 		  end;
-	     (#client_conn{key = {H, P, Pid}}, _) ->
-		  case lists:member({H, P}, Trace_on_dests) of
-		      false ->
-			  ok;
-		      true ->
-			  catch Pid ! {trace, false}
-		  end;
 	     (_, Acc) ->
 		  Acc
 	  end,
@@ -664,10 +673,6 @@ handle_info({trace, Bool, Host, Port}, State) ->
 	     when H == Host,
 		  P == Port ->
 		  catch Pid ! {trace, Bool};
-	     (#client_conn{key = {H, P, Pid}}, _)
-	     when H == Host,
-		  P == Port ->
-		  catch Pid ! {trace, Bool};
 	     (_, Acc) ->
 		  Acc
 	  end,

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/50228c10/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 24214ff..3cacf39 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -47,11 +47,12 @@
 		is_closing, send_timer, content_length,
 		deleted_crlf = false, transfer_encoding,
 		chunk_size, chunk_size_buffer = <<>>, recvd_chunk_size,
-		lb_ets_tid, cur_pipeline_size = 0
+		lb_ets_tid, cur_pipeline_size = 0, prev_req_id
 	       }).
 
 -record(request, {url, method, options, from,
-		  stream_to, req_id,
+		  stream_to, caller_controls_socket = false, 
+		  req_id,
 		  stream_chunk_size,
 		  save_response_to_file = false, 
 		  tmp_file_name, tmp_file_fd,
@@ -126,144 +127,15 @@ init({Host, Port}) ->
 %%--------------------------------------------------------------------
 %% Received a request when the remote server has already sent us a
 %% Connection: Close header
-handle_call({send_req, _},
-	    _From,
-	    #state{is_closing=true}=State) ->
+handle_call({send_req, _}, _From, #state{is_closing = true} = State) ->
     {reply, {error, connection_closing}, State};
 
 handle_call({send_req, {Url, Headers, Method, Body, Options, Timeout}},
-	    From,
-	    #state{socket=undefined,
-		   host=Host, port=Port}=State) ->
-    Resp_format = get_value(response_format, Options, list),
-    {Host_1, Port_1, State_1} =
-	case get_value(proxy_host, Options, false) of
-	    false ->
-		{Host, Port, State};
-	    PHost ->
-		ProxyUser = get_value(proxy_user, Options, []),
-		ProxyPassword = get_value(proxy_password, Options, []),
-		Digest = http_auth_digest(ProxyUser, ProxyPassword),
-		{PHost, get_value(proxy_port, Options, 80),
-		 State#state{use_proxy = true,
-			     proxy_auth_digest = Digest}}
-	end,
-    StreamTo = get_value(stream_to, Options, undefined),
-    ReqId = make_req_id(),
-    SaveResponseToFile = get_value(save_response_to_file, Options, false),
-    NewReq = #request{url=Url,
-		      method=Method,
-		      stream_to=StreamTo,
-		      options=Options,
-		      req_id=ReqId,
-		      save_response_to_file = SaveResponseToFile,
-		      stream_chunk_size = get_stream_chunk_size(Options),
-		      response_format = Resp_format,
-		      from=From},
-    Reqs = queue:in(NewReq, State#state.reqs),
-    State_2 = check_ssl_options(Options, State_1#state{reqs = Reqs}),
-    do_trace("Connecting...~n", []),
-    Start_ts = now(),
-    Conn_timeout = get_value(connect_timeout, Options, Timeout),
-    case do_connect(Host_1, Port_1, Options, State_2, Conn_timeout) of
-	{ok, Sock} ->
-	    do_trace("Connected!~n", []),
-	    End_ts = now(),
-	    Ref = case Timeout of
-		      infinity ->
-			  undefined;
-		      _ ->
-			  Rem_time = Timeout - trunc(round(timer:now_diff(End_ts, Start_ts) / 1000)),
-			  case Rem_time > 0 of
-			      true ->
-				  erlang:send_after(Rem_time, self(), {req_timedout, From});
-			      false ->
-				  shutting_down(State_2),
-				  do_error_reply(State_2, req_timedout),
-				  exit(normal)
-			  end
-		  end,
-	    case send_req_1(Url, Headers, Method, Body, Options, Sock, State_2) of
-		ok ->
-		    do_setopts(Sock, [{active, once}], State_2#state.is_ssl),
-		    case StreamTo of
-			undefined ->
-			    ok;
-			_ ->
-			    gen_server:reply(From, {ibrowse_req_id, ReqId})
-		    end,
-		    State_3 = inc_pipeline_counter(State_2#state{socket = Sock,
-								 send_timer = Ref,
-								 cur_req = NewReq,
-								 status = get_header}),
-		    {noreply, State_3, get_inac_timeout(State_3)};
-		Err ->
-		    shutting_down(State_2),
-		    do_trace("Send failed... Reason: ~p~n", [Err]),
-		    gen_server:reply(From, {error, send_failed}),
-		    {stop, normal, State_2}
-	    end;
-	Err ->
-	    shutting_down(State_2),
-	    do_trace("Error connecting. Reason: ~1000.p~n", [Err]),
-	    gen_server:reply(From, {error, conn_failed}),
-	    {stop, normal, State_2}
-    end;
-
-%% Request which is to be pipelined
-handle_call({send_req, {Url, Headers, Method,
-			 Body, Options, Timeout}},
-	    From,
-	    #state{socket=Sock, status=Status, reqs=Reqs}=State) ->
-    do_trace("Recvd request in connected state. Status -> ~p NumPending: ~p~n", [Status, length(queue:to_list(Reqs))]),
-    Resp_format = get_value(response_format, Options, list),
-    StreamTo = get_value(stream_to, Options, undefined),
-    SaveResponseToFile = get_value(save_response_to_file, Options, false),
-    ReqId = make_req_id(),
-    NewReq = #request{url=Url,
-		      stream_to=StreamTo,
-		      method=Method,
-		      options=Options,
-		      req_id=ReqId,
-		      save_response_to_file = SaveResponseToFile,
-		      stream_chunk_size = get_stream_chunk_size(Options),
-		      response_format = Resp_format,
-		      from=From},
-    State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
-    case send_req_1(Url, Headers, Method, Body, Options, Sock, State_1) of
-	ok ->
-	    State_2 = inc_pipeline_counter(State_1),
-	    do_setopts(Sock, [{active, once}], State#state.is_ssl),
-	    case Timeout of
-		infinity ->
-		    ok;
-		_ ->
-		    erlang:send_after(Timeout, self(), {req_timedout, From})
-	    end,
-	    State_3 = case Status of
-			  idle ->
-			      State_2#state{status = get_header,
-					    cur_req = NewReq};
-			  _ ->
-			      State_2
-		      end,
-	    case StreamTo of
-		undefined ->
-		    ok;
-		_ ->
-		    gen_server:reply(From, {ibrowse_req_id, ReqId})
-	    end,
-	    {noreply, State_3, get_inac_timeout(State_3)};
-	Err ->
-	    shutting_down(State_1),
-	    do_trace("Send request failed: Reason: ~p~n", [Err]),
-	    gen_server:reply(From, {error, send_failed}),
-	    do_error_reply(State, send_failed),
-	    {stop, normal, State_1}
-    end;
+	    From, State) ->
+    send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State);
 
-handle_call(stop, _From, #state{socket = Socket, is_ssl = Is_ssl} = State) ->
-    do_close(Socket, Is_ssl),
+handle_call(stop, _From, State) ->
+    do_close(State),
     do_error_reply(State, closing_on_request),
     {stop, normal, State};
 
@@ -294,6 +166,15 @@ handle_info({tcp, _Sock, Data}, #state{status = Status} = State) ->
 handle_info({ssl, _Sock, Data}, State) ->
     handle_sock_data(Data, State);
 
+handle_info({stream_next, Req_id}, #state{socket = Socket,
+					  is_ssl = Is_ssl,
+					  cur_req = #request{req_id = Req_id}} = State) ->
+    do_setopts(Socket, [{active, once}], Is_ssl),
+    {noreply, State};
+
+handle_info({stream_next, _Req_id}, State) ->
+    {noreply, State};
+
 handle_info({tcp_closed, _Sock}, State) ->
     do_trace("TCP connection closed by peer!~n", []),
     handle_sock_closed(State),
@@ -332,12 +213,7 @@ handle_info(Info, State) ->
 %% Returns: any (ignored by gen_server)
 %%--------------------------------------------------------------------
 terminate(_Reason, State) ->
-    case State#state.socket of
-	undefined ->
-	    ok;
-	Sock ->
-	    do_close(Sock, State#state.is_ssl)
-    end.
+    do_close(State).
 
 %%--------------------------------------------------------------------
 %% Func: code_change/3
@@ -358,10 +234,10 @@ handle_sock_data(Data, #state{status=idle}=State) ->
     do_trace("Data recvd on socket in state idle!. ~1000.p~n", [Data]),
     shutting_down(State),
     do_error_reply(State, data_in_status_idle),
-    do_close(State#state.socket, State#state.is_ssl),
+    do_close(State),
     {stop, normal, State};
 
-handle_sock_data(Data, #state{status=get_header, socket=Sock}=State) ->
+handle_sock_data(Data, #state{status = get_header}=State) ->
     case parse_response(Data, State) of
 	{error, _Reason} ->
 	    shutting_down(State),
@@ -370,14 +246,15 @@ handle_sock_data(Data, #state{status=get_header, socket=Sock}=State) ->
 	    shutting_down(State),
 	    {stop, normal, State};
 	State_1 ->
-	    do_setopts(Sock, [{active, once}], State#state.is_ssl),
+	    active_once(State_1),
 	    {noreply, State_1, get_inac_timeout(State_1)}
     end;
 
-handle_sock_data(Data, #state{status=get_body, content_length=CL,
+handle_sock_data(Data, #state{status           = get_body,
+			      content_length   = CL,
 			      http_status_code = StatCode,
-			      recvd_headers=Headers,
-			      chunk_size=CSz, socket=Sock}=State) ->
+			      recvd_headers    = Headers,
+			      chunk_size       = CSz} = State) ->
     case (CL == undefined) and (CSz == undefined) of
 	true ->
 	    case accumulate_response(Data, State) of
@@ -387,7 +264,7 @@ handle_sock_data(Data, #state{status=get_body, content_length=CL,
 					    {error, {Reason, {stat_code, StatCode}, Headers}}),
 		    {stop, normal, State};
 		State_1 ->
-		    do_setopts(Sock, [{active, once}], State#state.is_ssl),
+		    active_once(State_1),
 		    {noreply, State_1, get_inac_timeout(State_1)}
 	    end;
 	_ ->
@@ -401,7 +278,7 @@ handle_sock_data(Data, #state{status=get_body, content_length=CL,
 		    shutting_down(State),
 		    {stop, normal, State};
 		State_1 ->
-		    do_setopts(Sock, [{active, once}], State#state.is_ssl),
+		    active_once(State_1),
 		    {noreply, State_1, get_inac_timeout(State_1)}
 	    end
     end.
@@ -452,22 +329,27 @@ accumulate_response(Data, #state{reply_buffer = RepBuf,
 				 cur_req = CurReq}=State) ->
     #request{stream_to=StreamTo, req_id=ReqId,
 	     stream_chunk_size = Stream_chunk_size,
-	     response_format = Response_format} = CurReq,
+	     response_format = Response_format,
+	     caller_controls_socket = Caller_controls_socket} = CurReq,
     RepBuf_1 = concat_binary([RepBuf, Data]),
     New_data_size = RepBufSize - Streamed_size,
     case StreamTo of
 	undefined ->
 	    State#state{reply_buffer = RepBuf_1};
-	_ when New_data_size < Stream_chunk_size ->
-	    State#state{reply_buffer = RepBuf_1};
-	_ ->
+	_ when Caller_controls_socket == true ->
+	    do_interim_reply(StreamTo, Response_format, ReqId, RepBuf_1),
+	    State#state{reply_buffer = <<>>, 
+			streamed_size = Streamed_size + size(RepBuf_1)};
+	_ when New_data_size >= Stream_chunk_size ->
 	    {Stream_chunk, Rem_data} = split_binary(RepBuf_1, Stream_chunk_size),
 	    do_interim_reply(StreamTo, Response_format, ReqId, Stream_chunk),
 	    accumulate_response(
 	      Rem_data,
 	      State#state{
 		reply_buffer = <<>>,
-		streamed_size = Streamed_size + Stream_chunk_size})
+		streamed_size = Streamed_size + Stream_chunk_size});
+	_ ->
+	    State#state{reply_buffer = RepBuf_1}
     end.
 
 make_tmp_filename() ->
@@ -528,37 +410,45 @@ do_connect(Host, Port, _Options, _State, Timeout) ->
 		    [binary, {nodelay, true}, {active, false}],
 		    Timeout).
 
-do_send(Sock, Req, true)  ->  ssl:send(Sock, Req);
-do_send(Sock, Req, false) ->  gen_tcp:send(Sock, Req).
+do_send(Req, #state{socket = Sock, is_ssl = true})  ->  ssl:send(Sock, Req);
+do_send(Req, #state{socket = Sock, is_ssl = false}) ->  gen_tcp:send(Sock, Req).
 
 %% @spec do_send_body(Sock::socket_descriptor(), Source::source_descriptor(), IsSSL::boolean()) -> ok | error()
 %% source_descriptor() = fun_arity_0           |
 %%                       {fun_arity_0}         |
 %%                       {fun_arity_1, term()}
 %% error() = term()
-do_send_body(Sock, Source, IsSSL) when is_function(Source) ->
-    do_send_body(Sock, {Source}, IsSSL);
-do_send_body(Sock, {Source}, IsSSL) when is_function(Source) ->
-    do_send_body1(Sock, Source, IsSSL, Source());
-do_send_body(Sock, {Source, State}, IsSSL) when is_function(Source) ->
-    do_send_body1(Sock, Source, IsSSL, Source(State));
-do_send_body(Sock, Body, IsSSL) ->
-    do_send(Sock, Body, IsSSL).
-
-do_send_body1(Sock, Source, IsSSL, Resp) ->
+do_send_body(Source, State) when is_function(Source) ->
+    do_send_body({Source}, State);
+do_send_body({Source}, State) when is_function(Source) ->
+    do_send_body1(Source, Source(), State);
+do_send_body({Source, Source_state}, State) when is_function(Source) ->
+    do_send_body1(Source, Source(Source_state), State);
+do_send_body(Body, State) ->
+    do_send(Body, State).
+
+do_send_body1(Source, Resp, State) ->
     case Resp of
 	{ok, Data} ->
-	    do_send(Sock, Data, IsSSL),
-	    do_send_body(Sock, {Source}, IsSSL);
-	{ok, Data, NewState} ->
-	    do_send(Sock, Data, IsSSL),
-	    do_send_body(Sock, {Source, NewState}, IsSSL);
-	eof -> ok;
-	Err -> Err
+	    do_send(Data, State),
+	    do_send_body({Source}, State);
+	{ok, Data, New_source_state} ->
+	    do_send(Data, State),
+	    do_send_body({Source, New_source_state}, State);
+	eof ->
+	    ok;
+	Err ->
+	    Err
     end.
 
-do_close(Sock, true)  ->  ssl:close(Sock);
-do_close(Sock, false) ->  gen_tcp:close(Sock).
+do_close(#state{socket = undefined})            ->  ok;
+do_close(#state{socket = Sock, is_ssl = true})  ->  ssl:close(Sock);
+do_close(#state{socket = Sock, is_ssl = false}) ->  gen_tcp:close(Sock).
+
+active_once(#state{cur_req = #request{caller_controls_socket = true}}) ->
+    ok;
+active_once(#state{socket = Socket, is_ssl = Is_ssl}) ->
+    do_setopts(Socket, [{active, once}], Is_ssl).
 
 do_setopts(Sock, Opts, true)  ->  ssl:setopts(Sock, Opts);
 do_setopts(Sock, Opts, false) ->  inet:setopts(Sock, Opts).
@@ -571,11 +461,81 @@ check_ssl_options(Options, State) ->
 	    State#state{is_ssl=true, ssl_options=get_value(ssl_options, Options)}
     end.
 
-send_req_1(#url{abspath = AbsPath,
-		host = Host,
-		port = Port,
-		path = RelPath} = Url,
-	   Headers, Method, Body, Options, Sock, State) ->
+send_req_1(From,
+	   #url{host = Host,
+		port = Port} = Url,
+	   Headers, Method, Body, Options, Timeout,
+	   #state{socket = undefined} = State) ->
+    {Host_1, Port_1, State_1} =
+	case get_value(proxy_host, Options, false) of
+	    false ->
+		{Host, Port, State};
+	    PHost ->
+		ProxyUser     = get_value(proxy_user, Options, []),
+		ProxyPassword = get_value(proxy_password, Options, []),
+		Digest        = http_auth_digest(ProxyUser, ProxyPassword),
+		{PHost, get_value(proxy_port, Options, 80),
+		 State#state{use_proxy = true,
+			     proxy_auth_digest = Digest}}
+	end,
+    State_2 = check_ssl_options(Options, State_1),
+    do_trace("Connecting...~n", []),
+    Start_ts = now(),
+    Conn_timeout = get_value(connect_timeout, Options, Timeout),
+    case do_connect(Host_1, Port_1, Options, State_2, Conn_timeout) of
+	{ok, Sock} ->
+	    do_trace("Connected!~n", []),
+	    End_ts = now(),
+	    Timeout_1 = case Timeout of
+			    infinity ->
+				infinity;
+			    _ ->
+				Timeout - trunc(round(timer:now_diff(End_ts, Start_ts) / 1000))
+			end,
+	    State_3 = State_2#state{socket = Sock},
+	    send_req_1(From, Url, Headers, Method, Body, Options, Timeout_1, State_3);
+	Err ->
+	    shutting_down(State_2),
+	    do_trace("Error connecting. Reason: ~1000.p~n", [Err]),
+	    gen_server:reply(From, {error, conn_failed}),
+	    {stop, normal, State_2}
+    end;
+send_req_1(From,
+	   #url{abspath = AbsPath,
+		host    = Host,
+		port    = Port,
+		path    = RelPath} = Url,
+	   Headers, Method, Body, Options, Timeout,
+	   #state{status = Status} = State) ->
+    ReqId = make_req_id(),
+    Resp_format = get_value(response_format, Options, list),
+    {StreamTo, Caller_controls_socket} =
+	case get_value(stream_to, Options, undefined) of
+	    {Caller, once} when is_pid(Caller) or
+				is_atom(Caller) ->
+		Async_pid_rec = {{req_id_pid, ReqId}, self()},
+		true = ets:insert(ibrowse_stream, Async_pid_rec), 
+		{Caller, true};
+	    undefined ->
+		{undefined, false};
+	    Caller when is_pid(Caller) or
+			is_atom(Caller) ->
+		{Caller, false};
+	    Stream_to_inv ->
+		exit({invalid_option, {stream_to, Stream_to_inv}})
+	end,
+    SaveResponseToFile = get_value(save_response_to_file, Options, false),
+    NewReq = #request{url                    = Url,
+		      method                 = Method,
+		      stream_to              = StreamTo,
+		      caller_controls_socket = Caller_controls_socket,
+		      options                = Options,
+		      req_id                 = ReqId,
+		      save_response_to_file  = SaveResponseToFile,
+		      stream_chunk_size      = get_stream_chunk_size(Options),
+		      response_format        = Resp_format,
+		      from                   = From},
+    State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
     Headers_1 = add_auth_headers(Url, Options, Headers, State),
     HostHeaderValue = case lists:keysearch(host_header, 1, Options) of
 			  false ->
@@ -598,14 +558,45 @@ send_req_1(#url{abspath = AbsPath,
 		     "--- Request End ---~n", [NReq]);
 	_ -> ok
     end,
-    SndRes = case do_send(Sock, Req, State#state.is_ssl) of
-		 ok -> do_send_body(Sock, Body_1, State#state.is_ssl);
-		 Err ->
-		     io:format("Err: ~p~n", [Err]),
-		     Err
-	     end,
-    do_setopts(Sock, [{active, once}], State#state.is_ssl),
-    SndRes.
+    case do_send(Req, State) of
+	ok ->
+	    case do_send_body(Body_1, State) of
+		ok ->
+		    State_2 = inc_pipeline_counter(State_1),
+		    active_once(State_1),
+		    Ref = case Timeout of
+			      infinity ->
+				  undefined;
+			      _ ->
+				  erlang:send_after(Timeout, self(), {req_timedout, From})
+			  end,
+		    State_3 = case Status of
+				  idle ->
+				      State_2#state{status     = get_header,
+						    cur_req    = NewReq,
+						    send_timer = Ref};
+				  _ ->
+				      State_2#state{send_timer = Ref}
+			      end,
+		    case StreamTo of
+			undefined ->
+			    ok;
+			_ ->
+			    gen_server:reply(From, {ibrowse_req_id, ReqId})
+		    end,
+		    {noreply, State_3, get_inac_timeout(State_3)};
+		Err ->
+		    shutting_down(State_1),
+		    do_trace("Send failed... Reason: ~p~n", [Err]),
+		    gen_server:reply(From, {error, send_failed}),
+		    {stop, normal, State_1}
+	    end;
+	Err ->
+	    shutting_down(State_1),
+	    do_trace("Send failed... Reason: ~p~n", [Err]),
+	    gen_server:reply(From, {error, send_failed}),
+	    {stop, normal, State_1}
+    end.
 
 add_auth_headers(#url{username = User,
 		      password = UPw},
@@ -719,9 +710,9 @@ encode_headers(L) ->
     encode_headers(L, []).
 encode_headers([{http_vsn, _Val} | T], Acc) ->
     encode_headers(T, Acc);
-encode_headers([{Name,Val} | T], Acc) when list(Name) ->
+encode_headers([{Name,Val} | T], Acc) when is_list(Name) ->
     encode_headers(T, [[Name, ": ", fmt_val(Val), crnl()] | Acc]);
-encode_headers([{Name,Val} | T], Acc) when atom(Name) ->
+encode_headers([{Name,Val} | T], Acc) when is_atom(Name) ->
     encode_headers(T, [[atom_to_list(Name), ": ", fmt_val(Val), crnl()] | Acc]);
 encode_headers([], Acc) ->
     lists:reverse(Acc).
@@ -732,25 +723,25 @@ chunk_request_body(Body, ChunkSize) ->
 chunk_request_body(Body, _ChunkSize, Acc) when Body == <<>>; Body == [] ->
     LastChunk = "0\r\n",
     lists:reverse(["\r\n", LastChunk | Acc]);
-chunk_request_body(Body, ChunkSize, Acc) when binary(Body),
+chunk_request_body(Body, ChunkSize, Acc) when is_binary(Body),
                                               size(Body) >= ChunkSize ->
     <<ChunkBody:ChunkSize/binary, Rest/binary>> = Body,
     Chunk = [ibrowse_lib:dec2hex(4, ChunkSize),"\r\n",
 	     ChunkBody, "\r\n"],
     chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
-chunk_request_body(Body, _ChunkSize, Acc) when binary(Body) ->
+chunk_request_body(Body, _ChunkSize, Acc) when is_binary(Body) ->
     BodySize = size(Body),
     Chunk = [ibrowse_lib:dec2hex(4, BodySize),"\r\n",
 	     Body, "\r\n"],
     LastChunk = "0\r\n",
     lists:reverse(["\r\n", LastChunk, Chunk | Acc]);
-chunk_request_body(Body, ChunkSize, Acc) when list(Body),
+chunk_request_body(Body, ChunkSize, Acc) when is_list(Body),
                                               length(Body) >= ChunkSize ->
     {ChunkBody, Rest} = split_list_at(Body, ChunkSize),
     Chunk = [ibrowse_lib:dec2hex(4, ChunkSize),"\r\n",
 	     ChunkBody, "\r\n"],
     chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
-chunk_request_body(Body, _ChunkSize, Acc) when list(Body) ->
+chunk_request_body(Body, _ChunkSize, Acc) when is_list(Body) ->
     BodySize = length(Body),
     Chunk = [ibrowse_lib:dec2hex(4, BodySize),"\r\n",
 	     Body, "\r\n"],
@@ -840,7 +831,7 @@ parse_response(Data, #state{reply_buffer = Acc, reqs = Reqs,
 		    {error, content_length_undefined};
 		V ->
 		    case catch list_to_integer(V) of
-			V_1 when integer(V_1), V_1 >= 0 ->
+			V_1 when is_integer(V_1), V_1 >= 0 ->
 			    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
 			    do_trace("Recvd Content-Length of ~p~n", [V_1]),
 			    State_2 = State_1#state{rep_buf_size=0,
@@ -1058,17 +1049,20 @@ set_cur_request(#state{reqs = Reqs} = State) ->
 parse_headers(Headers) ->
     case scan_crlf(Headers) of
 	{yes, StatusLine, T} ->
-	    Headers_1 = parse_headers_1(T),
-	    case parse_status_line(StatusLine) of
-		{ok, HttpVsn, StatCode, _Msg} ->
-		    put(http_prot_vsn, HttpVsn),
-		    {HttpVsn, StatCode, Headers_1};
-		_ -> %% A HTTP 0.9 response?
-		    put(http_prot_vsn, "HTTP/0.9"),
-		    {"HTTP/0.9", undefined, Headers}
-	    end;
-	_ ->
-	    {error, no_status_line}
+	    parse_headers(StatusLine, T);
+	{no, StatusLine} ->
+	    parse_headers(StatusLine, <<>>)
+    end.
+
+parse_headers(StatusLine, Headers) ->
+    Headers_1 = parse_headers_1(Headers),
+    case parse_status_line(StatusLine) of
+	{ok, HttpVsn, StatCode, _Msg} ->
+	    put(http_prot_vsn, HttpVsn),
+	    {HttpVsn, StatCode, Headers_1};
+	_ -> %% A HTTP 0.9 response?
+	    put(http_prot_vsn, "HTTP/0.9"),
+	    {"HTTP/0.9", undefined, Headers}
     end.
 
 % From RFC 2616
@@ -1079,10 +1073,10 @@ parse_headers(Headers) ->
 %    SP. A recipient MAY replace any linear white space with a single
 %    SP before interpreting the field value or forwarding the message
 %    downstream.
-parse_headers_1(B) when is_binary(B) ->
-    parse_headers_1(binary_to_list(B));
-parse_headers_1(String) ->
-    parse_headers_1(String, [], []).
+	parse_headers_1(B) when is_binary(B) ->
+					   parse_headers_1(binary_to_list(B));
+	parse_headers_1(String) ->
+					   parse_headers_1(String, [], []).
 
 parse_headers_1([$\n, H |T], [$\r | L], Acc) when H == 32;
 						  H == $\t ->
@@ -1205,10 +1199,10 @@ get_crlf_pos(<<>>, _)                     -> no.
 %% scan_crlf([H|T],  L)                    -> scan_crlf(T, [H|L]);
 %% scan_crlf([], L)                        -> {no, L}.
 
-fmt_val(L) when list(L)    -> L;
-fmt_val(I) when integer(I) -> integer_to_list(I);
-fmt_val(A) when atom(A)    -> atom_to_list(A);
-fmt_val(Term)              -> io_lib:format("~p", [Term]).
+fmt_val(L) when is_list(L)    -> L;
+fmt_val(I) when is_integer(I) -> integer_to_list(I);
+fmt_val(A) when is_atom(A)    -> atom_to_list(A);
+fmt_val(Term)                 -> io_lib:format("~p", [Term]).
 
 crnl() -> "\r\n".
 
@@ -1306,7 +1300,8 @@ do_reply(State, From, undefined, _, Resp_format, {ok, St_code, Headers, Body}) -
 do_reply(State, From, undefined, _, _, Msg) ->
     gen_server:reply(From, Msg),
     dec_pipeline_counter(State);
-do_reply(State, _From, StreamTo, ReqId, Resp_format, {ok, _, _, Body}) ->
+do_reply(#state{prev_req_id = Prev_req_id} = State,
+	 _From, StreamTo, ReqId, Resp_format, {ok, _, _, Body}) ->
     State_1 = dec_pipeline_counter(State),
     case Body of
 	[] ->
@@ -1316,7 +1311,18 @@ do_reply(State, _From, StreamTo, ReqId, Resp_format, {ok, _, _, Body}) ->
 	    catch StreamTo ! {ibrowse_async_response, ReqId, Body_1}
     end,
     catch StreamTo ! {ibrowse_async_response_end, ReqId},
-    State_1;
+    %% We don't want to delete the Req-id to Pid mapping straightaway
+    %% as the client may send a stream_next message just while we are
+    %% sending back this ibrowse_async_response_end message. If we
+    %% deleted this mapping straightaway, the caller will see a
+    %% {error, unknown_req_id} when it calls ibrowse:stream_next/1. To
+    %% get around this, we store the req id, and clear it after the
+    %% next request. If there are wierd combinations of stream,
+    %% stream_once and sync requests on the same connection, it will
+    %% take a while for the req_id-pid mapping to get cleared, but it
+    %% should do no harm.
+    ets:delete(ibrowse_stream, {req_id_pid, Prev_req_id}),
+    State_1#state{prev_req_id = ReqId};
 do_reply(State, _From, StreamTo, ReqId, Resp_format, Msg) ->
     State_1 = dec_pipeline_counter(State),
     Msg_1 = format_response_data(Resp_format, Msg),
@@ -1333,6 +1339,7 @@ do_error_reply(#state{reqs = Reqs} = State, Err) ->
     ReqList = queue:to_list(Reqs),
     lists:foreach(fun(#request{from=From, stream_to=StreamTo, req_id=ReqId,
 			       response_format = Resp_format}) ->
+			  ets:delete(ibrowse_stream, {req_id_pid, ReqId}),
                           do_reply(State, From, StreamTo, ReqId, Resp_format, {error, Err})
 		  end, ReqList).
 

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/50228c10/ibrowse_test.erl
----------------------------------------------------------------------
diff --git a/ibrowse_test.erl b/ibrowse_test.erl
index f3559b5..ad3e812 100644
--- a/ibrowse_test.erl
+++ b/ibrowse_test.erl
@@ -18,9 +18,50 @@
 	 ue_test/1,
 	 verify_chunked_streaming/0,
 	 verify_chunked_streaming/1,
-	 i_do_async_req_list/4
+	 i_do_async_req_list/4,
+	 test_stream_once/3,
+	 test_stream_once/4
 	]).
 
+test_stream_once(Url, Method, Options) ->
+    test_stream_once(Url, Method, Options, 5000).
+
+test_stream_once(Url, Method, Options, Timeout) ->
+    case ibrowse:send_req(Url, [], Method, [], [{stream_to, {self(), once}} | Options], Timeout) of
+	{ibrowse_req_id, Req_id} ->
+	    case ibrowse:stream_next(Req_id) of
+		ok ->
+		    test_stream_once(Req_id);
+		Err ->
+		    Err
+	    end;
+	Err ->
+	    Err
+    end.
+
+test_stream_once(Req_id) ->
+    receive
+	{ibrowse_async_headers, Req_id, StatCode, Headers} ->
+	    io:format("Recvd headers~n~p~n", [{ibrowse_async_headers, Req_id, StatCode, Headers}]),
+	    case ibrowse:stream_next(Req_id) of
+		ok ->
+		    test_stream_once(Req_id);
+		Err ->
+		    Err
+	    end;
+	{ibrowse_async_response, Req_id, {error, Err}} ->
+	    io:format("Recvd error: ~p~n", [Err]);
+	{ibrowse_async_response, Req_id, Body_1} ->
+	    io:format("Recvd body part: ~n~p~n", [{ibrowse_async_response, Req_id, Body_1}]),
+	    case ibrowse:stream_next(Req_id) of
+		ok ->
+		    test_stream_once(Req_id);
+		Err ->
+		    Err
+	    end;
+	{ibrowse_async_response_end, Req_id} ->
+	    ok
+    end.
 %% Use ibrowse:set_max_sessions/3 and ibrowse:set_max_pipeline_size/3 to
 %% tweak settings before running the load test. The defaults are 10 and 10.
 load_test(Url, NumWorkers, NumReqsPerWorker) when is_list(Url),
@@ -182,7 +223,8 @@ unit_tests() ->
     unit_tests([]).
 
 unit_tests(Options) ->
-    {Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options]),
+    Options_1 = Options ++ [{connect_timeout, 5000}],
+    {Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options_1]),
     receive 
 	{done, Pid} ->
 	    ok;


[28/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Upgrade ibrowse to version 2.1.3

This version fixes several bugs and adds a few minor improvements.
For a list and description of the changes relative to the previous
version, see the README file at:

https://github.com/cmullaparthi/ibrowse



git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@1061340 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/72320820
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/72320820
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/72320820

Branch: refs/heads/import-master
Commit: 72320820ce8f9d881f58f87162b628a79e3036b8
Parents: cb08a0d
Author: Filipe David Borba Manana <fd...@apache.org>
Authored: Thu Jan 20 15:27:41 2011 +0000
Committer: Filipe David Borba Manana <fd...@apache.org>
Committed: Thu Jan 20 15:27:41 2011 +0000

----------------------------------------------------------------------
 Makefile.am             |  2 +-
 ibrowse.app.in          |  2 +-
 ibrowse.erl             | 18 ++++++------
 ibrowse_http_client.erl | 65 ++++++++++++++++++++++++--------------------
 ibrowse_test.erl        | 40 ++++++++++++---------------
 5 files changed, 64 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/72320820/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index 4cebe5d..bfd52ba 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -10,7 +10,7 @@
 ## License for the specific language governing permissions and limitations under
 ## the License.
 
-ibrowseebindir = $(localerlanglibdir)/ibrowse-2.1.2/ebin
+ibrowseebindir = $(localerlanglibdir)/ibrowse-2.1.3/ebin
 
 ibrowse_file_collection = \
 	ibrowse.app.in \

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/72320820/ibrowse.app.in
----------------------------------------------------------------------
diff --git a/ibrowse.app.in b/ibrowse.app.in
index c8e4227..875620d 100644
--- a/ibrowse.app.in
+++ b/ibrowse.app.in
@@ -1,6 +1,6 @@
 {application, ibrowse,
         [{description, "HTTP client application"},
-         {vsn, "2.1.2"},
+         {vsn, "2.1.3"},
          {modules, [ ibrowse, 
 		     ibrowse_http_client, 
 		     ibrowse_app, 

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/72320820/ibrowse.erl
----------------------------------------------------------------------
diff --git a/ibrowse.erl b/ibrowse.erl
index e105150..f70f92f 100644
--- a/ibrowse.erl
+++ b/ibrowse.erl
@@ -6,8 +6,8 @@
 %%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
 %%%-------------------------------------------------------------------
 %% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
-%% @copyright 2005-2010 Chandrashekhar Mullaparthi
-%% @version 2.1.2
+%% @copyright 2005-2011 Chandrashekhar Mullaparthi
+%% @version 2.1.3
 %% @doc The ibrowse application implements an HTTP 1.1 client in erlang. This
 %% module implements the API of the HTTP client. There is one named
 %% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is
@@ -683,16 +683,16 @@ init(_) ->
     State = #state{},
     put(my_trace_flag, State#state.trace),
     put(ibrowse_trace_token, "ibrowse"),
-    ets:new(ibrowse_lb, [named_table, public, {keypos, 2}]),
-    ets:new(ibrowse_conf, [named_table, protected, {keypos, 2}]),
-    ets:new(ibrowse_stream, [named_table, public]),
+    ibrowse_lb     = ets:new(ibrowse_lb, [named_table, public, {keypos, 2}]),
+    ibrowse_conf   = ets:new(ibrowse_conf, [named_table, protected, {keypos, 2}]),
+    ibrowse_stream = ets:new(ibrowse_stream, [named_table, public]),
     import_config(),
     {ok, #state{}}.
 
 import_config() ->
     case code:priv_dir(ibrowse) of
-        {error, _} = Err ->
-            Err;
+        {error, _} ->
+            ok;
         PrivDir ->
             Filename = filename:join(PrivDir, "ibrowse.conf"),
             import_config(Filename)
@@ -723,8 +723,8 @@ import_config(Filename) ->
                           io:format("Skipping unrecognised term: ~p~n", [X])
                   end,
             lists:foreach(Fun, Terms);
-        Err ->
-            Err
+        _Err ->
+            ok
     end.
 
 %% @doc Internal export

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/72320820/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 5dce321..7d606e6 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -188,7 +188,7 @@ handle_info({stream_next, Req_id}, #state{socket = Socket,
                                           cur_req = #request{req_id = Req_id}} = State) ->
     %% io:format("Client process set {active, once}~n", []),
     do_setopts(Socket, [{active, once}], State),
-    {noreply, State};
+    {noreply, set_inac_timer(State)};
 
 handle_info({stream_next, _Req_id}, State) ->
     _Cur_req_id = case State#state.cur_req of
@@ -216,12 +216,14 @@ handle_info({ssl_closed, _Sock}, State) ->
     handle_sock_closed(State),
     {stop, normal, State};
 
-handle_info({tcp_error, _Sock}, State) ->
-    do_trace("Error on connection to ~1000.p:~1000.p~n", [State#state.host, State#state.port]),
+handle_info({tcp_error, _Sock, Reason}, State) ->
+    do_trace("Error on connection to ~1000.p:~1000.p -> ~1000.p~n",
+             [State#state.host, State#state.port, Reason]),
     handle_sock_closed(State),
     {stop, normal, State};
-handle_info({ssl_error, _Sock}, State) ->
-    do_trace("Error on SSL connection to ~1000.p:~1000.p~n", [State#state.host, State#state.port]),
+handle_info({ssl_error, _Sock, Reason}, State) ->
+    do_trace("Error on SSL connection to ~1000.p:~1000.p -> ~1000.p~n",
+             [State#state.host, State#state.port, Reason]),
     handle_sock_closed(State),
     {stop, normal, State};
 
@@ -334,8 +336,13 @@ handle_sock_data(Data, #state{status           = get_body,
                             active_once(State_1)
                     end,
                     State_2 = State_1#state{interim_reply_sent = false},
-                    State_3 = set_inac_timer(State_2),
-                    {noreply, State_3};
+                    case Ccs of
+                    true ->
+                        cancel_timer(State_2#state.inactivity_timer_ref, {eat_message, timeout}),
+                        {noreply, State_2#state{inactivity_timer_ref = undefined}};
+                    _ ->
+                        {noreply, set_inac_timer(State_2)}
+                    end;
                 State_1 ->
                     active_once(State_1),
                     State_2 = set_inac_timer(State_1),
@@ -461,7 +468,7 @@ handle_sock_closed(#state{reply_buffer = Buf, reqs = Reqs, http_status_code = SC
                        undefined ->
                            Buf;
                        _ ->
-                           file:close(Fd),
+                           ok = file:close(Fd),
                            {file, TmpFilename}
                    end,
             Reply = case get_value(give_raw_headers, Options, false) of
@@ -470,11 +477,11 @@ handle_sock_closed(#state{reply_buffer = Buf, reqs = Reqs, http_status_code = SC
                         false ->
                             {ok, SC, Headers, Buf}
                     end,
-            do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
-            do_error_reply(State#state{reqs = Reqs_1}, connection_closed),
-            State;
+            State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+            ok = do_error_reply(State_1#state{reqs = Reqs_1}, connection_closed),
+            State_1;
         _ ->
-            do_error_reply(State, connection_closed),
+            ok = do_error_reply(State, connection_closed),
             State
     end.
 
@@ -482,17 +489,19 @@ do_connect(Host, Port, Options, #state{is_ssl      = true,
                                        use_proxy   = false,
                                        ssl_options = SSLOptions},
            Timeout) ->
-    Caller_socket_options = get_value(socket_options, Options, []),
-    Other_sock_options = filter_sock_options(SSLOptions ++ Caller_socket_options),
-    ssl:connect(Host, Port,
-                [binary, {nodelay, true}, {active, false} | Other_sock_options],
-                Timeout);
+    ssl:connect(Host, Port, get_sock_options(Options, SSLOptions), Timeout);
 do_connect(Host, Port, Options, _State, Timeout) ->
+    gen_tcp:connect(Host, Port, get_sock_options(Options, []), Timeout).
+
+get_sock_options(Options, SSLOptions) ->
     Caller_socket_options = get_value(socket_options, Options, []),
-    Other_sock_options = filter_sock_options(Caller_socket_options),
-    gen_tcp:connect(Host, to_integer(Port),
-                    [binary, {nodelay, true}, {active, false} | Other_sock_options],
-                    Timeout).
+    Other_sock_options = filter_sock_options(SSLOptions ++ Caller_socket_options),
+    case lists:keysearch(nodelay, 1, Other_sock_options) of
+        false ->
+            [{nodelay, true}, binary, {active, false} | Other_sock_options];
+        {value, _} ->
+            [binary, {active, false} | Other_sock_options]
+    end.
 
 %% We don't want the caller to specify certain options
 filter_sock_options(Opts) ->
@@ -547,7 +556,7 @@ do_send_body1(Source, Resp, State, TE) ->
 maybe_chunked_encode(Data, false) ->
     Data;
 maybe_chunked_encode(Data, true) ->
-    [?dec2hex(size(to_binary(Data))), "\r\n", Data, "\r\n"].
+    [?dec2hex(iolist_size(Data)), "\r\n", Data, "\r\n"].
 
 do_close(#state{socket = undefined})            ->  ok;
 do_close(#state{socket = Sock,
@@ -1269,7 +1278,7 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
                        reply_buffer  = RepBuf,
                        recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
     Body = RepBuf,
-    file:close(Fd),
+    ok = file:close(Fd),
     ResponseBody = case TmpFilename of
                        undefined ->
                            Body;
@@ -1656,8 +1665,8 @@ fail_pipelined_requests(#state{reqs = Reqs, cur_req = CurReq} = State, Reply) ->
     {_, Reqs_1} = queue:out(Reqs),
     #request{from=From, stream_to=StreamTo, req_id=ReqId,
              response_format = Resp_format} = CurReq,
-    do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
-    do_error_reply(State#state{reqs = Reqs_1}, previous_request_failed).
+    State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+    do_error_reply(State_1#state{reqs = Reqs_1}, previous_request_failed).
 
 split_list_at(List, N) ->
     split_list_at(List, N, []).
@@ -1701,7 +1710,8 @@ to_ascii($9) -> 9;
 to_ascii($0) -> 0.
 
 cancel_timer(undefined) -> ok;
-cancel_timer(Ref)       -> erlang:cancel_timer(Ref).
+cancel_timer(Ref)       -> _ = erlang:cancel_timer(Ref),
+                           ok.
 
 cancel_timer(Ref, {eat_message, Msg}) ->
     cancel_timer(Ref),
@@ -1814,8 +1824,5 @@ trace_request_body(Body) ->
             ok
     end.
 
-to_integer(X) when is_list(X)    -> list_to_integer(X); 
-to_integer(X) when is_integer(X) -> X.
-
 to_binary(X) when is_list(X)   -> list_to_binary(X); 
 to_binary(X) when is_binary(X) -> X.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/72320820/ibrowse_test.erl
----------------------------------------------------------------------
diff --git a/ibrowse_test.erl b/ibrowse_test.erl
index b8e0a4a..ff3b530 100644
--- a/ibrowse_test.erl
+++ b/ibrowse_test.erl
@@ -11,8 +11,6 @@
 	 unit_tests/0,
 	 unit_tests/1,
 	 unit_tests_1/2,
-	 drv_ue_test/0,
-	 drv_ue_test/1,
 	 ue_test/0,
 	 ue_test/1,
 	 verify_chunked_streaming/0,
@@ -21,7 +19,8 @@
 	 i_do_async_req_list/4,
 	 test_stream_once/3,
 	 test_stream_once/4,
-         test_20122010/0
+         test_20122010/0,
+         test_20122010/1
 	]).
 
 test_stream_once(Url, Method, Options) ->
@@ -413,20 +412,6 @@ execute_req(Url, Method, Options) ->
 	    io:format("~p~n", [Err])
     end.
 
-drv_ue_test() ->
-    drv_ue_test(lists:duplicate(1024, 127)).
-drv_ue_test(Data) ->
-    [{port, Port}| _] = ets:lookup(ibrowse_table, port),
-%     erl_ddll:unload_driver("ibrowse_drv"),
-%     timer:sleep(1000),
-%     erl_ddll:load_driver("../priv", "ibrowse_drv"),
-%     Port = open_port({spawn, "ibrowse_drv"}, []),
-    {Time, Res} = timer:tc(ibrowse_lib, drv_ue, [Data, Port]),
-    io:format("Time -> ~p~n", [Time]),
-    io:format("Data Length -> ~p~n", [length(Data)]),
-    io:format("Res Length -> ~p~n", [length(Res)]).
-%    io:format("Result -> ~s~n", [Res]).
-
 ue_test() ->
     ue_test(lists:duplicate(1024, $?)).
 ue_test(Data) ->
@@ -445,11 +430,14 @@ log_msg(Fmt, Args) ->
 %%------------------------------------------------------------------------------
 
 test_20122010() ->
-    {ok, Pid} = ibrowse:spawn_worker_process("http://localhost:8181"),
+    test_20122010("http://localhost:8181").
+
+test_20122010(Url) ->
+    {ok, Pid} = ibrowse:spawn_worker_process(Url),
     Expected_resp = <<"1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30-31-32-33-34-35-36-37-38-39-40-41-42-43-44-45-46-47-48-49-50-51-52-53-54-55-56-57-58-59-60-61-62-63-64-65-66-67-68-69-70-71-72-73-74-75-76-77-78-79-80-81-82-83-84-85-86-87-88-89-90-91-92-93-94-95-96-97-98-99-100">>,
     Test_parent = self(),
     Fun = fun() ->
-                  do_test_20122010(Pid, Expected_resp, Test_parent)
+                  do_test_20122010(Url, Pid, Expected_resp, Test_parent)
           end,
     Pids = [erlang:spawn_monitor(Fun) || _ <- lists:seq(1,10)],
     wait_for_workers(Pids).
@@ -458,18 +446,24 @@ wait_for_workers([{Pid, _Ref} | Pids]) ->
     receive
         {Pid, success} ->
             wait_for_workers(Pids)
-    after 5000 ->
+    after 60000 ->
             test_failed
     end;
 wait_for_workers([]) ->
     success.
 
-do_test_20122010(Pid, Expected_resp, Test_parent) ->
+do_test_20122010(Url, Pid, Expected_resp, Test_parent) ->
+    do_test_20122010(10, Url, Pid, Expected_resp, Test_parent).
+
+do_test_20122010(0, _Url, _Pid, _Expected_resp, Test_parent) ->
+    Test_parent ! {self(), success};
+do_test_20122010(Rem_count, Url, Pid, Expected_resp, Test_parent) ->
     {ibrowse_req_id, Req_id} = ibrowse:send_req_direct(
                                  Pid,
-                                 "http://localhost:8181/ibrowse_stream_once_chunk_pipeline_test",
+                                 Url ++ "/ibrowse_stream_once_chunk_pipeline_test",
                                  [], get, [],
                                  [{stream_to, {self(), once}},
+                                  {inactivity_timeout, 10000},
                                   {include_ibrowse_req_id, true}]),
     do_trace("~p -- sent request ~1000.p~n", [self(), Req_id]),
     Req_id_str = lists:flatten(io_lib:format("~1000.p",[Req_id])),
@@ -491,7 +485,7 @@ do_test_20122010(Pid, Expected_resp, Test_parent) ->
     ok = ibrowse:stream_next(Req_id),
     case do_test_20122010_1(Expected_resp, Req_id, []) of
         true ->
-            Test_parent ! {self(), success};
+            do_test_20122010(Rem_count - 1, Url, Pid, Expected_resp, Test_parent);
         false ->
             Test_parent ! {self(), failed}
     end.


[15/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Backport warning fix from a future version of ibrowse.

The current release makes our test suite fail and I don't
want to pester the 1.0.0 release cycle with this; yet I'd
like 1.0.0 (and 0.11.1) to be future proof for Erlang R14B.

git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@959988 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/c166256d
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/c166256d
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/c166256d

Branch: refs/heads/import-master
Commit: c166256d5d1826d946987d01a148be4136aebd54
Parents: f2a473e
Author: Jan Lehnardt <ja...@apache.org>
Authored: Fri Jul 2 14:01:56 2010 +0000
Committer: Jan Lehnardt <ja...@apache.org>
Committed: Fri Jul 2 14:01:56 2010 +0000

----------------------------------------------------------------------
 ibrowse_http_client.erl | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/c166256d/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 5f62f70..a767b84 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -352,7 +352,7 @@ accumulate_response(Data, #state{reply_buffer = RepBuf,
 	     stream_chunk_size = Stream_chunk_size,
 	     response_format = Response_format,
 	     caller_controls_socket = Caller_controls_socket} = CurReq,
-    RepBuf_1 = concat_binary([RepBuf, Data]),
+    RepBuf_1 = list_to_binary([RepBuf, Data]),
     New_data_size = RepBufSize - Streamed_size,
     case StreamTo of
 	undefined ->
@@ -1201,9 +1201,9 @@ scan_crlf_1(Bin1_head_size, Bin1, Bin2) ->
     case get_crlf_pos(Bin3) of
 	{yes, Pos} ->
 	    {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin3, Pos),
-	    {yes, concat_binary([Bin1_head, Prefix]), Suffix};
+	    {yes, list_to_binary([Bin1_head, Prefix]), Suffix};
 	no ->
-	    {no, concat_binary([Bin1, Bin2])}
+	    {no, list_to_binary([Bin1, Bin2])}
     end.
 
 get_crlf_pos(Bin) ->


[06/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
upgrade to ibrowse 1.5.0

git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@790771 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/7292757f
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/7292757f
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/7292757f

Branch: refs/heads/import-master
Commit: 7292757f2f3a6a6a596be192ab0430e895ad3d76
Parents: b9c2e64
Author: Adam Kocoloski <ko...@apache.org>
Authored: Fri Jul 3 00:58:13 2009 +0000
Committer: Adam Kocoloski <ko...@apache.org>
Committed: Fri Jul 3 00:58:13 2009 +0000

----------------------------------------------------------------------
 ibrowse.app             |   2 +-
 ibrowse.erl             |  93 ++++++++--
 ibrowse_http_client.erl | 402 ++++++++++++++++++++++++-------------------
 ibrowse_lb.erl          |   9 +-
 ibrowse_test.erl        |  44 +++--
 5 files changed, 333 insertions(+), 217 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/7292757f/ibrowse.app
----------------------------------------------------------------------
diff --git a/ibrowse.app b/ibrowse.app
index 960c079..5e4621d 100644
--- a/ibrowse.app
+++ b/ibrowse.app
@@ -1,6 +1,6 @@
 {application, ibrowse,
         [{description, "HTTP client application"},
-         {vsn, "1.4.1"},
+         {vsn, "1.5.0"},
          {modules, [ ibrowse, 
 		     ibrowse_http_client, 
 		     ibrowse_app, 

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/7292757f/ibrowse.erl
----------------------------------------------------------------------
diff --git a/ibrowse.erl b/ibrowse.erl
index 3390e58..1b0daad 100644
--- a/ibrowse.erl
+++ b/ibrowse.erl
@@ -6,8 +6,8 @@
 %%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
 %%%-------------------------------------------------------------------
 %% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
-%% @copyright 2005-2008 Chandrashekhar Mullaparthi
-%% @version 1.4
+%% @copyright 2005-2009 Chandrashekhar Mullaparthi
+%% @version 1.5.0
 %% @doc The ibrowse application implements an HTTP 1.1 client. This
 %% module implements the API of the HTTP client. There is one named
 %% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is
@@ -57,7 +57,7 @@
 %% driver isn't actually used.</p>
 
 -module(ibrowse).
--vsn('$Id: ibrowse.erl,v 1.7 2008/05/21 15:28:11 chandrusf Exp $ ').
+-vsn('$Id: ibrowse.erl,v 1.8 2009/07/01 22:43:19 chandrusf Exp $ ').
 
 -behaviour(gen_server).
 %%--------------------------------------------------------------------
@@ -96,6 +96,7 @@
 	 trace_off/0,
 	 trace_on/2,
 	 trace_off/2,
+	 all_trace_off/0,
 	 show_dest_status/2
 	]).
 
@@ -105,8 +106,6 @@
 
 -import(ibrowse_lib, [
 		      parse_url/1,
-		      printable_date/0,
-		      get_value/2,
 		      get_value/3,
 		      do_trace/2
 		     ]).
@@ -114,6 +113,7 @@
 -record(state, {trace = false}).
 
 -include("ibrowse.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
 
 -define(DEF_MAX_SESSIONS,10).
 -define(DEF_MAX_PIPELINE_SIZE,10).
@@ -170,7 +170,7 @@ send_req(Url, Headers, Method, Body) ->
 %% For a description of SSL Options, look in the ssl manpage. If the
 %% HTTP Version to use is not specified, the default is 1.1.
 %% <br/>
-%% <p>The <code>host_header</code> is useful in the case where ibrowse is
+%% <p>The <code>host_header</code> option is useful in the case where ibrowse is
 %% connecting to a component such as <a
 %% href="http://www.stunnel.org">stunnel</a> which then sets up a
 %% secure connection to a webserver. In this case, the URL supplied to
@@ -188,11 +188,39 @@ send_req(Url, Headers, Method, Body) ->
 %% <li>Whenever an error occurs in the processing of a request, ibrowse will return as much
 %% information as it has, such as HTTP Status Code and HTTP Headers. When this happens, the response
 %% is of the form <code>{error, {Reason, {stat_code, StatusCode}, HTTP_headers}}</code></li>
+%%
+%% <li>The <code>inactivity_timeout</code> option is useful when
+%% dealing with large response bodies and/or slow links. In these
+%% cases, it might be hard to estimate how long a request will take to
+%% complete. In such cases, the client might want to timeout if no
+%% data has been received on the link for a certain time interval.</li>
+%%
+%% <li>
+%% The <code>connect_timeout</code> option is to specify how long the
+%% client process should wait for connection establishment. This is
+%% useful in scenarios where connections to servers are usually setup
+%% very fast, but responses might take much longer compared to
+%% connection setup. In such cases, it is better for the calling
+%% process to timeout faster if there is a problem (DNS lookup
+%% delays/failures, network routing issues, etc). The total timeout
+%% value specified for the request will enforced. To illustrate using
+%% an example:
+%% <code>
+%% ibrowse:send_req("http://www.example.com/cgi-bin/request", [], get, [], [{connect_timeout, 100}], 1000).
+%% </code>
+%% In the above invocation, if the connection isn't established within
+%% 100 milliseconds, the request will fail with 
+%% <code>{error, conn_failed}</code>.<br/>
+%% If connection setup succeeds, the total time allowed for the
+%% request to complete will be 1000 milliseconds minus the time taken
+%% for connection setup.
+%% </li>
 %% </ul>
+%% 
 %% @spec send_req(Url::string(), Headers::headerList(), Method::method(), Body::body(), Options::optionList()) -> response()
 %% optionList() = [option()]
 %% option() = {max_sessions, integer()}        |
-%%          {response_format,response_format()}| 
+%%          {response_format,response_format()}|
 %%          {stream_chunk_size, integer()}     |
 %%          {max_pipeline_size, integer()}     |
 %%          {trace, boolean()}                 | 
@@ -212,8 +240,10 @@ send_req(Url, Headers, Method, Body) ->
 %%          {stream_to, process()}             |
 %%          {http_vsn, {MajorVsn, MinorVsn}}   |
 %%          {host_header, string()}            |
+%%          {inactivity_timeout, integer()}    |
+%%          {connect_timeout, integer()}       |
 %%          {transfer_encoding, {chunked, ChunkSize}}
-%% 
+%%
 %% process() = pid() | atom()
 %% username() = string()
 %% password() = string()
@@ -314,7 +344,7 @@ set_max_pipeline_size(Host, Port, Max) when is_integer(Max), Max > 0 ->
 
 do_send_req(Conn_Pid, Parsed_url, Headers, Method, Body, Options, Timeout) ->
     case catch ibrowse_http_client:send_req(Conn_Pid, Parsed_url,
-					    Headers, Method, Body,
+					    Headers, Method, ensure_bin(Body),
 					    Options, Timeout) of
 	{'EXIT', {timeout, _}} ->
 	    {error, req_timedout};
@@ -331,6 +361,11 @@ do_send_req(Conn_Pid, Parsed_url, Headers, Method, Body, Options, Timeout) ->
 	    Ret
     end.
 
+ensure_bin(L) when is_list(L) ->
+    list_to_binary(L);
+ensure_bin(B) when is_binary(B) ->
+    B.
+
 %% @doc Creates a HTTP client process to the specified Host:Port which
 %% is not part of the load balancing pool. This is useful in cases
 %% where some requests to a webserver might take a long time whereas
@@ -400,17 +435,25 @@ trace_off() ->
 
 %% @doc Turn tracing on for all connections to the specified HTTP
 %% server. Host is whatever is specified as the domain name in the URL
-%% @spec trace_on(Host, Port) -> term() 
+%% @spec trace_on(Host, Port) -> ok
 %% Host = string() 
 %% Port = integer()
 trace_on(Host, Port) ->
-    ibrowse ! {trace, true, Host, Port}.
+    ibrowse ! {trace, true, Host, Port},
+    ok.
 
 %% @doc Turn tracing OFF for all connections to the specified HTTP
 %% server.
-%% @spec trace_off(Host, Port) -> term()
+%% @spec trace_off(Host, Port) -> ok
 trace_off(Host, Port) ->
-    ibrowse ! {trace, false, Host, Port}.
+    ibrowse ! {trace, false, Host, Port},
+    ok.
+
+%% @doc Turn Off ALL tracing
+%% @spec all_trace_off() -> ok
+all_trace_off() ->
+    ibrowse ! all_trace_off,
+    ok.
 
 %% @doc Shows some internal information about load balancing to a
 %% specified Host:Port. Info about workers spawned using
@@ -588,6 +631,30 @@ handle_cast(_Msg, State) ->
 %%          {noreply, State, Timeout} |
 %%          {stop, Reason, State}            (terminate/2 is called)
 %%--------------------------------------------------------------------
+handle_info(all_trace_off, State) ->
+    Mspec = [{{ibrowse_conf,{trace,'$1','$2'},true},[],[{{'$1','$2'}}]}],
+    Trace_on_dests = ets:select(ibrowse_conf, Mspec),
+    Fun = fun(#lb_pid{host_port = {H, P}, pid = Pid}, _) ->
+		  case lists:member({H, P}, Trace_on_dests) of
+		      false ->
+			  ok;
+		      true ->
+			  catch Pid ! {trace, false}
+		  end;
+	     (#client_conn{key = {H, P, Pid}}, _) ->
+		  case lists:member({H, P}, Trace_on_dests) of
+		      false ->
+			  ok;
+		      true ->
+			  catch Pid ! {trace, false}
+		  end;
+	     (_, Acc) ->
+		  Acc
+	  end,
+    ets:foldl(Fun, undefined, ibrowse_lb),
+    ets:select_delete(ibrowse_conf, [{{ibrowse_conf,{trace,'$1','$2'},true},[],['true']}]),
+    {noreply, State};
+				  
 handle_info({trace, Bool}, State) ->
     put(my_trace_flag, Bool),
     {noreply, State};

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/7292757f/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 9455bc2..24214ff 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -6,7 +6,7 @@
 %%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
 %%%-------------------------------------------------------------------
 -module(ibrowse_http_client).
--vsn('$Id: ibrowse_http_client.erl,v 1.18 2008/05/21 15:28:11 chandrusf Exp $ ').
+-vsn('$Id: ibrowse_http_client.erl,v 1.19 2009/07/01 22:43:19 chandrusf Exp $ ').
 
 -behaviour(gen_server).
 %%--------------------------------------------------------------------
@@ -42,11 +42,12 @@
 		use_proxy = false, proxy_auth_digest,
 		ssl_options = [], is_ssl = false, socket,
 		reqs=queue:new(), cur_req, status=idle, http_status_code,
-		reply_buffer=[], rep_buf_size=0, streamed_size = 0,
+		reply_buffer = <<>>, rep_buf_size=0, streamed_size = 0,
 		recvd_headers=[],
 		is_closing, send_timer, content_length,
-		deleted_crlf = false, transfer_encoding, chunk_size,
-		chunks=[], lb_ets_tid, cur_pipeline_size = 0
+		deleted_crlf = false, transfer_encoding,
+		chunk_size, chunk_size_buffer = <<>>, recvd_chunk_size,
+		lb_ets_tid, cur_pipeline_size = 0
 	       }).
 
 -record(request, {url, method, options, from,
@@ -57,8 +58,6 @@
 		  response_format}).
 
 -import(ibrowse_lib, [
-		      parse_url/1,
-		      printable_date/0,
 		      get_value/2,
 		      get_value/3,
 		      do_trace/2
@@ -83,15 +82,9 @@ stop(Conn_pid) ->
     gen_server:call(Conn_pid, stop).
 
 send_req(Conn_Pid, Url, Headers, Method, Body, Options, Timeout) ->
-    Timeout_1 = case Timeout of
-		    infinity ->
-			infinity;
-		    _ when is_integer(Timeout) ->
-			Timeout + 100
-		end,
     gen_server:call(
       Conn_Pid,
-      {send_req, {Url, Headers, Method, Body, Options, Timeout}}, Timeout_1).
+      {send_req, {Url, Headers, Method, Body, Options, Timeout}}, Timeout).
 
 %%====================================================================
 %% Server functions
@@ -170,23 +163,29 @@ handle_call({send_req, {Url, Headers, Method, Body, Options, Timeout}},
     Reqs = queue:in(NewReq, State#state.reqs),
     State_2 = check_ssl_options(Options, State_1#state{reqs = Reqs}),
     do_trace("Connecting...~n", []),
-    Timeout_1 = case Timeout of
-		    infinity ->
-			infinity;
-		    _ ->
-			round(Timeout*0.9)
-		end,
-    case do_connect(Host_1, Port_1, Options, State_2, Timeout_1) of
+    Start_ts = now(),
+    Conn_timeout = get_value(connect_timeout, Options, Timeout),
+    case do_connect(Host_1, Port_1, Options, State_2, Conn_timeout) of
 	{ok, Sock} ->
+	    do_trace("Connected!~n", []),
+	    End_ts = now(),
 	    Ref = case Timeout of
 		      infinity ->
 			  undefined;
 		      _ ->
-			  erlang:send_after(Timeout, self(), {req_timedout, From})
+			  Rem_time = Timeout - trunc(round(timer:now_diff(End_ts, Start_ts) / 1000)),
+			  case Rem_time > 0 of
+			      true ->
+				  erlang:send_after(Rem_time, self(), {req_timedout, From});
+			      false ->
+				  shutting_down(State_2),
+				  do_error_reply(State_2, req_timedout),
+				  exit(normal)
+			  end
 		  end,
-	    do_trace("Connected!~n", []),
 	    case send_req_1(Url, Headers, Method, Body, Options, Sock, State_2) of
 		ok ->
+		    do_setopts(Sock, [{active, once}], State_2#state.is_ssl),
 		    case StreamTo of
 			undefined ->
 			    ok;
@@ -197,7 +196,7 @@ handle_call({send_req, {Url, Headers, Method, Body, Options, Timeout}},
 								 send_timer = Ref,
 								 cur_req = NewReq,
 								 status = get_header}),
-		    {noreply, State_3};
+		    {noreply, State_3, get_inac_timeout(State_3)};
 		Err ->
 		    shutting_down(State_2),
 		    do_trace("Send failed... Reason: ~p~n", [Err]),
@@ -234,7 +233,7 @@ handle_call({send_req, {Url, Headers, Method,
     case send_req_1(Url, Headers, Method, Body, Options, Sock, State_1) of
 	ok ->
 	    State_2 = inc_pipeline_counter(State_1),
-	    do_setopts(Sock, [{active, true}], State#state.is_ssl),
+	    do_setopts(Sock, [{active, once}], State#state.is_ssl),
 	    case Timeout of
 		infinity ->
 		    ok;
@@ -254,7 +253,7 @@ handle_call({send_req, {Url, Headers, Method,
 		_ ->
 		    gen_server:reply(From, {ibrowse_req_id, ReqId})
 	    end,
-	    {noreply, State_3};
+	    {noreply, State_3, get_inac_timeout(State_3)};
 	Err ->
 	    shutting_down(State_1),
 	    do_trace("Send request failed: Reason: ~p~n", [Err]),
@@ -289,7 +288,8 @@ handle_cast(_Msg, State) ->
 %%          {noreply, State, Timeout} |
 %%          {stop, Reason, State}            (terminate/2 is called)
 %%--------------------------------------------------------------------
-handle_info({tcp, _Sock, Data}, State) ->
+handle_info({tcp, _Sock, Data}, #state{status = Status} = State) ->
+    do_trace("Data recvd in state: ~p. Size: ~p. ~p~n~n", [Status, size(Data), Data]),
     handle_sock_data(Data, State);
 handle_info({ssl, _Sock, Data}, State) ->
     handle_sock_data(Data, State);
@@ -305,14 +305,19 @@ handle_info({ssl_closed, _Sock}, State) ->
 
 handle_info({req_timedout, From}, State) ->
     case lists:keysearch(From, #request.from, queue:to_list(State#state.reqs)) of
-       false ->
-          {noreply, State};
-       {value, _} ->
-          shutting_down(State),
-          do_error_reply(State, req_timedout),
-          {stop, normal, State}
+	false ->
+	    {noreply, State};
+	{value, _} ->
+	    shutting_down(State),
+	    do_error_reply(State, req_timedout),
+	    {stop, normal, State}
     end;
 
+handle_info(timeout, State) ->
+    shutting_down(State),
+    do_error_reply(State, req_timedout),
+    {stop, normal, State};
+
 handle_info({trace, Bool}, State) ->
     put(my_trace_flag, Bool),
     {noreply, State};
@@ -365,8 +370,8 @@ handle_sock_data(Data, #state{status=get_header, socket=Sock}=State) ->
 	    shutting_down(State),
 	    {stop, normal, State};
 	State_1 ->
-	    do_setopts(Sock, [{active, true}], State#state.is_ssl),
-	    {noreply, State_1}
+	    do_setopts(Sock, [{active, once}], State#state.is_ssl),
+	    {noreply, State_1, get_inac_timeout(State_1)}
     end;
 
 handle_sock_data(Data, #state{status=get_body, content_length=CL,
@@ -382,8 +387,8 @@ handle_sock_data(Data, #state{status=get_body, content_length=CL,
 					    {error, {Reason, {stat_code, StatCode}, Headers}}),
 		    {stop, normal, State};
 		State_1 ->
-		    do_setopts(Sock, [{active, true}], State#state.is_ssl),
-		    {noreply, State_1}
+		    do_setopts(Sock, [{active, once}], State#state.is_ssl),
+		    {noreply, State_1, get_inac_timeout(State_1)}
 	    end;
 	_ ->
 	    case parse_11_response(Data, State) of
@@ -396,20 +401,17 @@ handle_sock_data(Data, #state{status=get_body, content_length=CL,
 		    shutting_down(State),
 		    {stop, normal, State};
 		State_1 ->
-		    do_setopts(Sock, [{active, true}], State#state.is_ssl),
-		    {noreply, State_1}
+		    do_setopts(Sock, [{active, once}], State#state.is_ssl),
+		    {noreply, State_1, get_inac_timeout(State_1)}
 	    end
     end.
 
 accumulate_response(Data,
 		    #state{
-		      cur_req = #request{save_response_to_file = SaveResponseToFile,
+		      cur_req = #request{save_response_to_file = true,
 					 tmp_file_fd = undefined} = CurReq,
-		      http_status_code=[$2 | _]}=State) when SaveResponseToFile /= false ->
-    TmpFilename = case SaveResponseToFile of
-		      true -> make_tmp_filename();
-		      F -> F
-		  end,
+		      http_status_code=[$2 | _]}=State) ->
+    TmpFilename = make_tmp_filename(),
     case file:open(TmpFilename, [write, delayed_write, raw]) of
 	{ok, Fd} ->
 	    accumulate_response(Data, State#state{
@@ -419,30 +421,30 @@ accumulate_response(Data,
 	{error, Reason} ->
 	    {error, {file_open_error, Reason}}
     end;
-accumulate_response(Data, #state{cur_req = #request{save_response_to_file = SaveResponseToFile,
+accumulate_response(Data, #state{cur_req = #request{save_response_to_file = true,
 						    tmp_file_fd = Fd},
 				 transfer_encoding=chunked,
-				 chunks = Chunks,
+				 reply_buffer = Reply_buf,
 				 http_status_code=[$2 | _]
-				} = State) when SaveResponseToFile /= false ->
-    case file:write(Fd, [Chunks | Data]) of
+				} = State) ->
+    case file:write(Fd, [Reply_buf, Data]) of
 	ok ->
-	    State#state{chunks = []};
+	    State#state{reply_buffer = <<>>};
 	{error, Reason} ->
 	    {error, {file_write_error, Reason}}
     end;
-accumulate_response(Data, #state{cur_req = #request{save_response_to_file = SaveResponseToFile,
+accumulate_response(Data, #state{cur_req = #request{save_response_to_file = true,
 						    tmp_file_fd = Fd},
 				 reply_buffer = RepBuf,
 				 http_status_code=[$2 | _]
-				} = State) when SaveResponseToFile /= false ->
-    case file:write(Fd, [RepBuf | Data]) of
+				} = State) ->
+    case file:write(Fd, [RepBuf, Data]) of
 	ok ->
-	    State#state{reply_buffer = []};
+	    State#state{reply_buffer = <<>>};
 	{error, Reason} ->
 	    {error, {file_write_error, Reason}}
     end;
-accumulate_response([], State) ->
+accumulate_response(<<>>, State) ->
     State;
 accumulate_response(Data, #state{reply_buffer = RepBuf,
 				 rep_buf_size = RepBufSize,
@@ -451,7 +453,7 @@ accumulate_response(Data, #state{reply_buffer = RepBuf,
     #request{stream_to=StreamTo, req_id=ReqId,
 	     stream_chunk_size = Stream_chunk_size,
 	     response_format = Response_format} = CurReq,
-    RepBuf_1 = [Data | RepBuf],
+    RepBuf_1 = concat_binary([RepBuf, Data]),
     New_data_size = RepBufSize - Streamed_size,
     case StreamTo of
 	undefined ->
@@ -459,12 +461,12 @@ accumulate_response(Data, #state{reply_buffer = RepBuf,
 	_ when New_data_size < Stream_chunk_size ->
 	    State#state{reply_buffer = RepBuf_1};
 	_ ->
-	    {Stream_chunk, Rem_data} = split_list_at(flatten(lists:reverse(RepBuf_1)), Stream_chunk_size),
+	    {Stream_chunk, Rem_data} = split_binary(RepBuf_1, Stream_chunk_size),
 	    do_interim_reply(StreamTo, Response_format, ReqId, Stream_chunk),
 	    accumulate_response(
 	      Rem_data,
 	      State#state{
-		reply_buffer = [],
+		reply_buffer = <<>>,
 		streamed_size = Streamed_size + Stream_chunk_size})
     end.
 
@@ -491,11 +493,11 @@ handle_sock_closed(#state{cur_req=undefined} = State) ->
 %% We check for IsClosing because this the server could have sent a
 %% Connection-Close header and has closed the socket to indicate end
 %% of response. There maybe requests pipelined which need a response.
-handle_sock_closed(#state{reply_buffer=Buf, reqs=Reqs, http_status_code=SC,
-			  is_closing=IsClosing,
-			  cur_req=#request{tmp_file_name=TmpFilename,
-					   tmp_file_fd=Fd} = CurReq,
-			  status=get_body, recvd_headers=Headers}=State) ->
+handle_sock_closed(#state{reply_buffer = Buf, reqs = Reqs, http_status_code = SC,
+			  is_closing = IsClosing,
+			  cur_req = #request{tmp_file_name=TmpFilename,
+					     tmp_file_fd=Fd} = CurReq,
+			  status = get_body, recvd_headers = Headers}=State) ->
     #request{from=From, stream_to=StreamTo, req_id=ReqId,
 	     response_format = Resp_format} = CurReq,
     case IsClosing of
@@ -519,11 +521,11 @@ handle_sock_closed(#state{reply_buffer=Buf, reqs=Reqs, http_status_code=SC,
 
 do_connect(Host, Port, _Options, #state{is_ssl=true, ssl_options=SSLOptions}, Timeout) ->
     ssl:connect(Host, Port,
-		[{nodelay, true}, {active, false} | SSLOptions],
+		[binary, {nodelay, true}, {active, false} | SSLOptions],
 		Timeout);
 do_connect(Host, Port, _Options, _State, Timeout) ->
     gen_tcp:connect(Host, Port,
-		    [{nodelay, true}, {active, false}],
+		    [binary, {nodelay, true}, {active, false}],
 		    Timeout).
 
 do_send(Sock, Req, true)  ->  ssl:send(Sock, Req);
@@ -602,7 +604,7 @@ send_req_1(#url{abspath = AbsPath,
 		     io:format("Err: ~p~n", [Err]),
 		     Err
 	     end,
-    do_setopts(Sock, [{active, true}], State#state.is_ssl),
+    do_setopts(Sock, [{active, once}], State#state.is_ssl),
     SndRes.
 
 add_auth_headers(#url{username = User,
@@ -758,12 +760,12 @@ chunk_request_body(Body, _ChunkSize, Acc) when list(Body) ->
 
 parse_response(_Data, #state{cur_req = undefined}=State) ->
     State#state{status = idle};
-parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
-			    cur_req=CurReq}=State) ->
+parse_response(Data, #state{reply_buffer = Acc, reqs = Reqs,
+			    cur_req = CurReq} = State) ->
     #request{from=From, stream_to=StreamTo, req_id=ReqId,
 	     method=Method, response_format = Resp_format} = CurReq,
     MaxHeaderSize = ibrowse:get_config_value(max_headers_size, infinity),
-    case scan_header(Data, Acc) of
+    case scan_header(Acc, Data) of
 	{yes, Headers, Data_1}  ->
 	    do_trace("Recvd Header Data -> ~s~n----~n", [Headers]),
 	    do_trace("Recvd headers~n--- Headers Begin ---~n~s~n--- Headers End ---~n~n", [Headers]),
@@ -779,7 +781,7 @@ parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
 		    ok
 	    end,
 	    State_1 = State#state{recvd_headers=Headers_1, status=get_body,
-				  reply_buffer = [],
+				  reply_buffer = <<>>,
 				  http_status_code=StatCode, is_closing=IsClosing},
 	    put(conn_close, ConnClose),
 	    TransferEncoding = to_lower(get_value("transfer-encoding", LCHeaders, "false")),
@@ -818,7 +820,7 @@ parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
 		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
 		    case parse_11_response(Data_1, State_1#state{transfer_encoding=chunked,
 								 chunk_size=chunk_start,
-								 reply_buffer=[], chunks=[]}) of
+								 reply_buffer = <<>>}) of
 			{error, Reason} ->
 			    fail_pipelined_requests(State_1,
 						    {error, {Reason,
@@ -830,7 +832,7 @@ parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
 		undefined when HttpVsn == "HTTP/1.0";
 			       ConnClose == "close" ->
 		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
-		    State_1#state{reply_buffer=[Data_1]};
+		    State_1#state{reply_buffer = Data_1};
 		undefined ->
 		    fail_pipelined_requests(State_1,
 					    {error, {content_length_undefined,
@@ -842,7 +844,7 @@ parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
 			    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
 			    do_trace("Recvd Content-Length of ~p~n", [V_1]),
 			    State_2 = State_1#state{rep_buf_size=0,
-						    reply_buffer=[],
+						    reply_buffer = <<>>,
 						    content_length=V_1},
 			    case parse_11_response(Data_1, State_2) of
 				{error, Reason} ->
@@ -861,9 +863,9 @@ parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
 		    end
 	    end;
 	{no, Acc_1} when MaxHeaderSize == infinity ->
-	    State#state{reply_buffer=Acc_1};
-	{no, Acc_1} when length(Acc_1) < MaxHeaderSize ->
-	    State#state{reply_buffer=Acc_1};
+	    State#state{reply_buffer = Acc_1};
+	{no, Acc_1} when size(Acc_1) < MaxHeaderSize ->
+	    State#state{reply_buffer = Acc_1};
 	{no, _Acc_1} ->
 	    fail_pipelined_requests(State, {error, max_headers_size_exceeded}),
 	    {error, max_headers_size_exceeded}
@@ -878,122 +880,97 @@ is_connection_closing(_, _)                -> false.
 parse_11_response(DataRecvd,
 		  #state{transfer_encoding=chunked, 
 			 chunk_size=chunk_start,
-			 cur_req=CurReq,
-			 reply_buffer=Buf
-			}=State) ->
-    case scan_crlf(DataRecvd, Buf) of
+			 chunk_size_buffer = Chunk_sz_buf
+			} = State) ->
+    case scan_crlf(Chunk_sz_buf, DataRecvd) of
 	{yes, ChunkHeader, Data_1} ->
 	    case parse_chunk_header(ChunkHeader) of
 		{error, Reason} ->
 		    {error, Reason};
 		ChunkSize ->
-		    #request{stream_to=StreamTo, req_id=ReqId,
-			     response_format = Response_format} = CurReq,
 		    %%
-		    %% Do we have to preserve the chunk encoding when streaming?
+		    %% Do we have to preserve the chunk encoding when
+		    %% streaming? NO. This should be transparent to the client
+		    %% process. Chunked encoding was only introduced to make
+		    %% it efficient for the server.
 		    %%
-		    do_interim_reply(StreamTo, Response_format,
-				     ReqId, {chunk_start, ChunkSize}),
-		    RemLen = length(Data_1),
+		    RemLen = size(Data_1),
 		    do_trace("Determined chunk size: ~p. Already recvd: ~p~n", [ChunkSize, RemLen]),
-		    parse_11_response(Data_1, State#state{rep_buf_size=0,
-							  reply_buffer=[],
-							  deleted_crlf=true,
-							  chunk_size=ChunkSize})
+		    parse_11_response(Data_1, State#state{chunk_size_buffer = <<>>,
+							  deleted_crlf = true,
+							  recvd_chunk_size = 0,
+							  chunk_size = ChunkSize})
 	    end;
 	{no, Data_1} ->
-	    State#state{reply_buffer=Data_1, rep_buf_size=length(Data_1)}
+	    State#state{chunk_size_buffer = Data_1}
     end;
 
-%% This clause is there to remove the CRLF between two chunks
+%% This clause is to remove the CRLF between two chunks
 %%
 parse_11_response(DataRecvd,
-		  #state{transfer_encoding=chunked, 
-			 chunk_size=tbd,
-			 chunks = Chunks,
-			 cur_req=CurReq,
-			 reply_buffer=Buf}=State) ->
-    case scan_crlf(DataRecvd, Buf) of
+		  #state{transfer_encoding = chunked, 
+			 chunk_size = tbd,
+			 chunk_size_buffer = Buf}=State) ->
+    case scan_crlf(Buf, DataRecvd) of
 	{yes, _, NextChunk} ->
-	    #request{stream_to=StreamTo, req_id=ReqId,
-		     response_format = Response_format} = CurReq,
-	    %%
-	    %% Do we have to preserve the chunk encoding when streaming?
-	    %%
-	    State_1 = State#state{chunk_size=chunk_start,
-				  rep_buf_size=0,
-				  reply_buffer=[],
-				  deleted_crlf=true},
-	    State_2 = case StreamTo of
-			  undefined ->
-			      State_1#state{chunks = [Buf | Chunks]};
-			  _ ->
-			      %% Flush out all buffered data as chunk is ending
-			      do_interim_reply(StreamTo, Response_format, ReqId,
-					       lists:reverse([Buf | Chunks])),
-			      do_interim_reply(StreamTo, Response_format,
-					       ReqId, chunk_end),
-			      State_1#state{chunks = [], streamed_size = 0}
-		      end,
-	    parse_11_response(NextChunk, State_2);
+	    State_1 = State#state{chunk_size = chunk_start,
+				  chunk_size_buffer = <<>>,
+%%				  reply_buffer = Buf_1,
+				  deleted_crlf = true},
+	    parse_11_response(NextChunk, State_1);
 	{no, Data_1} ->
-	    State#state{reply_buffer=Data_1, rep_buf_size=length(Data_1)}
+%%	    State#state{reply_buffer = Data_1, rep_buf_size = size(Data_1)}
+	    State#state{chunk_size_buffer = Data_1}
     end;
 
 %% This clause deals with the end of a chunked transfer
 parse_11_response(DataRecvd,
-		  #state{transfer_encoding=chunked, chunk_size=0, 
-			 cur_req=CurReq,
+		  #state{transfer_encoding = chunked, chunk_size = 0, 
+			 cur_req = CurReq,
 			 deleted_crlf = DelCrlf,
-			 reply_buffer=Trailer, reqs=Reqs}=State) ->
+			 reply_buffer = Trailer, reqs = Reqs}=State) ->
     do_trace("Detected end of chunked transfer...~n", []),
     DataRecvd_1 = case DelCrlf of
 		      false ->
 			  DataRecvd;
 		      true ->
-			  [$\r, $\n | DataRecvd]
-		  end,
-    #request{stream_to=StreamTo, req_id=ReqId,
-	     response_format = Response_format} = CurReq,
-    case scan_header(DataRecvd_1, Trailer) of
+			  <<$\r, $\n, DataRecvd/binary>>
+                  end,
+    case scan_header(Trailer, DataRecvd_1) of
 	{yes, _TEHeaders, Rem} ->
 	    {_, Reqs_1} = queue:out(Reqs),
-	    %%
-	    %% Do we have to preserve the chunk encoding when streaming? Nope.
-	    %%
-	    do_interim_reply(StreamTo, Response_format, ReqId, chunk_end),
-	    State_1 = handle_response(CurReq, State#state{reqs=Reqs_1}),
+	    State_1 = handle_response(CurReq, State#state{reqs = Reqs_1}),
 	    parse_response(Rem, reset_state(State_1));
 	{no, Rem} ->
-	    State#state{reply_buffer=Rem, rep_buf_size=length(Rem), deleted_crlf=false}
+	    State#state{reply_buffer = Rem, rep_buf_size = size(Rem), deleted_crlf = false}
     end;
 
 %% This clause extracts a chunk, given the size.
 parse_11_response(DataRecvd,
-		  #state{transfer_encoding=chunked, chunk_size=CSz,
-			 rep_buf_size=RepBufSz}=State) ->
-    NeedBytes = CSz - RepBufSz,
-    DataLen = length(DataRecvd),
+		  #state{transfer_encoding = chunked,
+			 chunk_size = CSz,
+			 recvd_chunk_size = Recvd_csz,
+			 rep_buf_size = RepBufSz} = State) ->
+    NeedBytes = CSz - Recvd_csz,
+    DataLen = size(DataRecvd),
     do_trace("Recvd more data: size: ~p. NeedBytes: ~p~n", [DataLen, NeedBytes]),
     case DataLen >= NeedBytes of
 	true ->
-	    {RemChunk, RemData} = split_list_at(DataRecvd, NeedBytes),
+	    {RemChunk, RemData} = split_binary(DataRecvd, NeedBytes),
 	    do_trace("Recvd another chunk...~n", []),
 	    do_trace("RemData -> ~p~n", [RemData]),
 	    case accumulate_response(RemChunk, State) of
 		{error, Reason} ->
 		    do_trace("Error accumulating response --> ~p~n", [Reason]),
 		    {error, Reason};
-		#state{reply_buffer = NewRepBuf,
-		       chunks = NewChunks} = State_1 ->
-		    State_2 = State_1#state{reply_buffer=[],
-					    chunks = [lists:reverse(NewRepBuf) | NewChunks],
-					    rep_buf_size=0,
-					    chunk_size=tbd},
+		#state{} = State_1 ->
+		    State_2 = State_1#state{chunk_size=tbd},
 		    parse_11_response(RemData, State_2)
 	    end;
 	false ->
-	    accumulate_response(DataRecvd, State#state{rep_buf_size=(RepBufSz + DataLen)})
+	    accumulate_response(DataRecvd,
+				State#state{rep_buf_size = RepBufSz + DataLen,
+					    recvd_chunk_size = Recvd_csz + DataLen})
     end;
 
 %% This clause to extract the body when Content-Length is specified
@@ -1001,10 +978,10 @@ parse_11_response(DataRecvd,
 		  #state{content_length=CL, rep_buf_size=RepBufSz,
 			 reqs=Reqs}=State) ->
     NeedBytes = CL - RepBufSz,
-    DataLen = length(DataRecvd),
+    DataLen = size(DataRecvd),
     case DataLen >= NeedBytes of
 	true ->
-	    {RemBody, Rem} = split_list_at(DataRecvd, NeedBytes),
+	    {RemBody, Rem} = split_binary(DataRecvd, NeedBytes),
 	    {_, Reqs_1} = queue:out(Reqs),
 	    State_1 = accumulate_response(RemBody, State),
 	    State_2 = handle_response(State_1#state.cur_req, State_1#state{reqs=Reqs_1}),
@@ -1023,15 +1000,8 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
 		#state{http_status_code = SCode,
 		       send_timer = ReqTimer,
 		       reply_buffer = RepBuf,
-		       transfer_encoding = TEnc,
-		       chunks = Chunks,
 		       recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
-    Body = case TEnc of
-	       chunked ->
-		   lists:reverse(Chunks);
-	       _ ->
-		   lists:reverse(RepBuf)
-	   end,
+    Body = RepBuf,
     State_1 = set_cur_request(State),
     file:close(Fd),
     ResponseBody = case TmpFilename of
@@ -1047,14 +1017,9 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
 handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
 			 response_format = Resp_format},
 		#state{http_status_code=SCode, recvd_headers=RespHeaders,
-		       reply_buffer=RepBuf, transfer_encoding=TEnc,
-		       chunks=Chunks, send_timer=ReqTimer}=State) ->
-    Body = case TEnc of
-	       chunked ->
-		   lists:reverse(Chunks);
-	       _ ->
-		   lists:reverse(RepBuf)
-	   end,
+		       reply_buffer = RepBuf,
+		       send_timer=ReqTimer}=State) ->
+    Body = RepBuf,
 %%    State_1 = set_cur_request(State),
     State_1 = case get(conn_close) of
 	"close" ->
@@ -1070,10 +1035,17 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
     set_cur_request(State_1).
 
 reset_state(State) ->
-    State#state{status=get_header, rep_buf_size=0, streamed_size = 0,
-		content_length=undefined,
-		reply_buffer=[], chunks=[], recvd_headers=[], deleted_crlf=false,
-		http_status_code=undefined, chunk_size=undefined, transfer_encoding=undefined}.
+    State#state{status            = get_header,
+		rep_buf_size      = 0,
+		streamed_size     = 0,
+		content_length    = undefined,
+		reply_buffer      = <<>>,
+		chunk_size_buffer = <<>>,
+		recvd_headers     = [],
+		deleted_crlf      = false,
+		http_status_code  = undefined,
+		chunk_size        = undefined,
+		transfer_encoding = undefined}.
 
 set_cur_request(#state{reqs = Reqs} = State) ->
     case queue:to_list(Reqs) of
@@ -1084,7 +1056,7 @@ set_cur_request(#state{reqs = Reqs} = State) ->
     end.
 
 parse_headers(Headers) ->
-    case scan_crlf(Headers, []) of
+    case scan_crlf(Headers) of
 	{yes, StatusLine, T} ->
 	    Headers_1 = parse_headers_1(T),
 	    case parse_status_line(StatusLine) of
@@ -1107,6 +1079,8 @@ parse_headers(Headers) ->
 %    SP. A recipient MAY replace any linear white space with a single
 %    SP before interpreting the field value or forwarding the message
 %    downstream.
+parse_headers_1(B) when is_binary(B) ->
+    parse_headers_1(binary_to_list(B));
 parse_headers_1(String) ->
     parse_headers_1(String, [], []).
 
@@ -1135,6 +1109,8 @@ parse_headers_1([], L, Acc) ->
 	    end,
     lists:reverse(Acc_1).
 
+parse_status_line(Line) when is_binary(Line) ->
+    parse_status_line(binary_to_list(Line));
 parse_status_line(Line) ->
     parse_status_line(Line, get_prot_vsn, [], []).
 parse_status_line([32 | T], get_prot_vsn, ProtVsn, StatCode) ->
@@ -1148,6 +1124,8 @@ parse_status_line([H | T], get_status_code, ProtVsn, StatCode) ->
 parse_status_line([], _, _, _) ->
     http_09.
 
+parse_header(B) when is_binary(B) ->
+    parse_header(binary_to_list(B));
 parse_header(L) ->
     parse_header(L, []).
 parse_header([$: | V], Acc) ->
@@ -1157,13 +1135,75 @@ parse_header([H | T], Acc) ->
 parse_header([], _) ->
     invalid.
 
-scan_header([$\n|T], [$\r,$\n,$\r|L]) -> {yes, lists:reverse([$\n,$\r| L]), T};
-scan_header([H|T],  L)                -> scan_header(T, [H|L]);
-scan_header([], L)                    -> {no, L}.
+scan_header(Bin) ->
+    case get_crlf_crlf_pos(Bin, 0) of
+	{yes, Pos} ->
+	    {Headers, <<_:4/binary, Body/binary>>} = split_binary(Bin, Pos),
+	    {yes, Headers, Body};
+	no ->
+	    {no, Bin}
+    end.
+
+scan_header(Bin1, Bin2) when size(Bin1) < 4 ->
+    scan_header(<<Bin1/binary, Bin2/binary>>);
+scan_header(Bin1, <<>>) ->
+    scan_header(Bin1);
+scan_header(Bin1, Bin2) ->
+    Bin1_already_scanned_size = size(Bin1) - 4,
+    <<Headers_prefix:Bin1_already_scanned_size/binary, Rest/binary>> = Bin1,
+    Bin_to_scan = <<Rest/binary, Bin2/binary>>,
+    case get_crlf_crlf_pos(Bin_to_scan, 0) of
+	{yes, Pos} ->
+	    {Headers_suffix, <<_:4/binary, Body/binary>>} = split_binary(Bin_to_scan, Pos),
+	    {yes, <<Headers_prefix/binary, Headers_suffix/binary>>, Body};
+	no ->
+	    {no, <<Bin1/binary, Bin2/binary>>}
+    end.
+
+get_crlf_crlf_pos(<<$\r, $\n, $\r, $\n, _/binary>>, Pos) -> {yes, Pos};
+get_crlf_crlf_pos(<<_, Rest/binary>>, Pos)               -> get_crlf_crlf_pos(Rest, Pos + 1);
+get_crlf_crlf_pos(<<>>, _)                               -> no.
+
+scan_crlf(Bin) ->
+    case get_crlf_pos(Bin) of
+	{yes, Pos} ->
+	    {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin, Pos),
+	    {yes, Prefix, Suffix};
+	no ->
+	    {no, Bin}
+    end.
+
+scan_crlf(<<>>, Bin2) ->
+    scan_crlf(Bin2);
+scan_crlf(Bin1, Bin2) when size(Bin1) < 2 ->
+    scan_crlf(<<Bin1/binary, Bin2/binary>>);
+scan_crlf(Bin1, Bin2) ->
+    scan_crlf_1(size(Bin1) - 2, Bin1, Bin2).
+
+scan_crlf_1(Bin1_head_size, Bin1, Bin2) ->
+    <<Bin1_head:Bin1_head_size/binary, Bin1_tail/binary>> = Bin1,
+    Bin3 = <<Bin1_tail/binary, Bin2/binary>>,
+    case get_crlf_pos(Bin3) of
+	{yes, Pos} ->
+	    {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin3, Pos),
+	    {yes, concat_binary([Bin1_head, Prefix]), Suffix};
+	no ->
+	    {no, concat_binary([Bin1, Bin2])}
+    end.
 
-scan_crlf([$\n|T], [$\r | L]) -> {yes, lists:reverse(L), T};
-scan_crlf([H|T],  L)          -> scan_crlf(T, [H|L]);
-scan_crlf([], L)              -> {no, L}.
+get_crlf_pos(Bin) ->
+    get_crlf_pos(Bin, 0).
+
+get_crlf_pos(<<$\r, $\n, _/binary>>, Pos) -> {yes, Pos};
+get_crlf_pos(<<_, Rest/binary>>, Pos)     -> get_crlf_pos(Rest, Pos + 1);
+get_crlf_pos(<<>>, _)                     -> no.
+
+%% scan_crlf(<<$\n, T/binary>>, [$\r | L]) -> {yes, lists:reverse(L), T};
+%% scan_crlf(<<H, T/binary>>,  L)          -> scan_crlf(T, [H|L]);
+%% scan_crlf(<<>>, L)                      -> {no, L};
+%% scan_crlf([$\n|T], [$\r | L])           -> {yes, lists:reverse(L), T};
+%% scan_crlf([H|T],  L)                    -> scan_crlf(T, [H|L]);
+%% scan_crlf([], L)                        -> {no, L}.
 
 fmt_val(L) when list(L)    -> L;
 fmt_val(I) when integer(I) -> integer_to_list(I);
@@ -1221,16 +1261,16 @@ parse_chunk_header([]) ->
 parse_chunk_header(ChunkHeader) ->
     parse_chunk_header(ChunkHeader, []).
 
-parse_chunk_header([$; | _], Acc) ->
+parse_chunk_header(<<$;, _/binary>>, Acc) ->
     hexlist_to_integer(lists:reverse(Acc));
-parse_chunk_header([H | T], Acc) ->
+parse_chunk_header(<<H, T/binary>>, Acc) ->
     case is_whitespace(H) of
 	true ->
 	    parse_chunk_header(T, Acc);
 	false ->
 	    parse_chunk_header(T, [H | Acc])
     end;
-parse_chunk_header([], Acc) ->
+parse_chunk_header(<<>>, Acc) ->
     hexlist_to_integer(lists:reverse(Acc)).
 
 is_whitespace($\s)  -> true;
@@ -1249,6 +1289,8 @@ format_response_data(Resp_format, Body) ->
     case Resp_format of
 	list when is_list(Body) ->
 	    flatten(Body);
+	list when is_binary(Body) ->
+	    binary_to_list(Body);
 	binary when is_list(Body) ->
 	    list_to_binary(Body);
 	_ ->
@@ -1399,4 +1441,8 @@ get_stream_chunk_size(Options) ->
 	_ ->
 	    ?DEFAULT_STREAM_CHUNK_SIZE
     end.
-	
+
+get_inac_timeout(#state{cur_req = #request{options = Opts}}) ->	
+    get_value(inactivity_timeout, Opts, infinity);
+get_inac_timeout(#state{cur_req = undefined}) ->
+    infinity.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/7292757f/ibrowse_lb.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lb.erl b/ibrowse_lb.erl
index 03dc4e0..9212ccd 100644
--- a/ibrowse_lb.erl
+++ b/ibrowse_lb.erl
@@ -7,7 +7,7 @@
 %%%-------------------------------------------------------------------
 -module(ibrowse_lb).
 
--vsn('$Id: ibrowse_lb.erl,v 1.1 2008/03/27 01:36:21 chandrusf Exp $ ').
+-vsn('$Id: ibrowse_lb.erl,v 1.2 2009/07/01 22:43:19 chandrusf Exp $ ').
 -author(chandru).
 -behaviour(gen_server).
 %%--------------------------------------------------------------------
@@ -39,13 +39,6 @@
 		max_pipeline_size,
 		num_cur_sessions = 0}).
 
--import(ibrowse_lib, [
-		      parse_url/1,
-		      printable_date/0,
-		      get_value/3
-		     ]).
-		      
-
 -include("ibrowse.hrl").
 
 %%====================================================================

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/7292757f/ibrowse_test.erl
----------------------------------------------------------------------
diff --git a/ibrowse_test.erl b/ibrowse_test.erl
index de8865f..f3559b5 100644
--- a/ibrowse_test.erl
+++ b/ibrowse_test.erl
@@ -4,13 +4,14 @@
 %%% Created : 14 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
 
 -module(ibrowse_test).
--vsn('$Id: ibrowse_test.erl,v 1.3 2008/05/21 15:28:11 chandrusf Exp $ ').
+-vsn('$Id: ibrowse_test.erl,v 1.4 2009/07/01 22:43:19 chandrusf Exp $ ').
 -export([
 	 load_test/3,
 	 send_reqs_1/3,
 	 do_send_req/2,
 	 unit_tests/0,
 	 unit_tests/1,
+	 unit_tests_1/2,
 	 drv_ue_test/0,
 	 drv_ue_test/1,
 	 ue_test/0,
@@ -20,8 +21,6 @@
 	 i_do_async_req_list/4
 	]).
 
--import(ibrowse_lib, [printable_date/0]).
-
 %% Use ibrowse:set_max_sessions/3 and ibrowse:set_max_pipeline_size/3 to
 %% tweak settings before running the load test. The defaults are 10 and 10.
 load_test(Url, NumWorkers, NumReqsPerWorker) when is_list(Url),
@@ -49,7 +48,7 @@ send_reqs_1(Url, NumWorkers, NumReqsPerWorker) ->
     log_msg("End time  : ~1000.p~n", [calendar:now_to_local_time(End_time)]),
     Elapsed_time_secs = trunc(timer:now_diff(End_time, Start_time) / 1000000),
     log_msg("Elapsed   : ~p~n", [Elapsed_time_secs]),
-    log_msg("Reqs/sec  : ~p~n", [(NumWorkers*NumReqsPerWorker) / Elapsed_time_secs]),
+    log_msg("Reqs/sec  : ~p~n", [round(trunc((NumWorkers*NumReqsPerWorker) / Elapsed_time_secs))]),
     dump_errors().
 
 init_results() ->
@@ -183,11 +182,23 @@ unit_tests() ->
     unit_tests([]).
 
 unit_tests(Options) ->
+    {Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options]),
+    receive 
+	{done, Pid} ->
+	    ok;
+	{'DOWN', Ref, _, _, Info} ->
+	    io:format("Test process crashed: ~p~n", [Info])
+    after 60000 ->
+	    io:format("Timed out waiting for tests to complete~n", [])
+    end.
+
+unit_tests_1(Parent, Options) ->
     lists:foreach(fun({Url, Method}) ->
 			  execute_req(Url, Method, Options);
 		     ({Url, Method, X_Opts}) ->
 			  execute_req(Url, Method, X_Opts ++ Options)
-		  end, ?TEST_LIST).
+		  end, ?TEST_LIST),
+    Parent ! {done, self()}.
 
 verify_chunked_streaming() ->
     verify_chunked_streaming([]).
@@ -201,10 +212,10 @@ verify_chunked_streaming(Options) ->
 				 [{response_format, binary} | Options]),
     io:format("Fetching data with streaming as list...~n", []),
     Async_response_list = do_async_req_list(
-			    Url, get, [{response_format, list}]),
+			    Url, get, [{response_format, list} | Options]),
     io:format("Fetching data with streaming as binary...~n", []),
     Async_response_bin = do_async_req_list(
-			   Url, get, [{response_format, binary}]),
+			   Url, get, [{response_format, binary} | Options]),
     compare_responses(Result_without_streaming, Async_response_list, Async_response_bin).
 
 compare_responses({ok, St_code, _, Body}, {ok, St_code, _, Body}, {ok, St_code, _, Body}) ->
@@ -220,6 +231,9 @@ compare_responses({ok, St_code, _, Body_1}, {ok, St_code, _, Body_2}, {ok, St_co
 	_ ->
 	    io:format("All three bodies are different!~n", [])
     end,
+    io:format("Body_1 -> ~p~n", [Body_1]),
+    io:format("Body_2 -> ~p~n", [Body_2]),
+    io:format("Body_3 -> ~p~n", [Body_3]),
     fail_bodies_mismatch;
 compare_responses(R1, R2, R3) ->
     io:format("R1 -> ~p~n", [R1]),
@@ -227,12 +241,12 @@ compare_responses(R1, R2, R3) ->
     io:format("R3 -> ~p~n", [R3]),
     fail.
 
-do_async_req_list(Url) ->
-    do_async_req_list(Url, get).
+%% do_async_req_list(Url) ->
+%%     do_async_req_list(Url, get).
 
-do_async_req_list(Url, Method) ->
-    do_async_req_list(Url, Method, [{stream_to, self()},
-				    {stream_chunk_size, 1000}]).
+%% do_async_req_list(Url, Method) ->
+%%     do_async_req_list(Url, Method, [{stream_to, self()},
+%% 				    {stream_chunk_size, 1000}]).
 
 do_async_req_list(Url, Method, Options) ->
     {Pid,_} = erlang:spawn_monitor(?MODULE, i_do_async_req_list,
@@ -270,10 +284,6 @@ wait_for_async_resp(Req_id, Acc_Stat_code, Acc_Headers, Body) ->
     receive
 	{ibrowse_async_headers, Req_id, StatCode, Headers} ->
 	    wait_for_async_resp(Req_id, StatCode, Headers, Body);
-	{ibrowse_async_response, Req_id, {chunk_start, _}} ->
-	    wait_for_async_resp(Req_id, Acc_Stat_code, Acc_Headers, Body);
-	{ibrowse_async_response, Req_id, chunk_end} ->
-	    wait_for_async_resp(Req_id, Acc_Stat_code, Acc_Headers, Body);
 	{ibrowse_async_response_end, Req_id} ->
 	    Body_1 = list_to_binary(lists:reverse(Body)),
 	    {ok, Acc_Stat_code, Acc_Headers, Body_1};
@@ -284,7 +294,7 @@ wait_for_async_resp(Req_id, Acc_Stat_code, Acc_Headers, Body) ->
     end.
 
 execute_req(Url, Method, Options) ->
-    io:format("~s, ~p: ", [Url, Method]),
+    io:format("~7.7w, ~50.50s: ", [Method, Url]),
     Result = (catch ibrowse:send_req(Url, [], Method, [], Options)),
     case Result of
 	{ok, SCode, _H, _B} ->


[12/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
upgrade ibrowse to 1.5.2.  Closes COUCHDB-451

git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@801657 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/c6b2bb60
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/c6b2bb60
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/c6b2bb60

Branch: refs/heads/import-master
Commit: c6b2bb6068293ee9d3e536c81257927eb51e68a5
Parents: 087f816
Author: Adam Kocoloski <ko...@apache.org>
Authored: Thu Aug 6 14:26:16 2009 +0000
Committer: Adam Kocoloski <ko...@apache.org>
Committed: Thu Aug 6 14:26:16 2009 +0000

----------------------------------------------------------------------
 Makefile.am             |  2 +-
 ibrowse.erl             | 41 ++++++++++++++++++++++++++++++++++++++++-
 ibrowse_http_client.erl | 23 +++++++++++++++++------
 ibrowse_lb.erl          | 38 ++++++++++++++++++++++++++++++--------
 ibrowse_test.erl        |  4 ++++
 5 files changed, 92 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/c6b2bb60/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index 02a3e6e..76262a6 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -10,7 +10,7 @@
 ## License for the specific language governing permissions and limitations under
 ## the License.
 
-ibrowseebindir = $(localerlanglibdir)/ibrowse-1.4.1/ebin
+ibrowseebindir = $(localerlanglibdir)/ibrowse-1.5.2/ebin
 
 ibrowse_file_collection = \
     ibrowse.erl \

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/c6b2bb60/ibrowse.erl
----------------------------------------------------------------------
diff --git a/ibrowse.erl b/ibrowse.erl
index 81fc74d..1913ef5 100644
--- a/ibrowse.erl
+++ b/ibrowse.erl
@@ -7,7 +7,7 @@
 %%%-------------------------------------------------------------------
 %% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
 %% @copyright 2005-2009 Chandrashekhar Mullaparthi
-%% @version 1.5.1
+%% @version 1.5.2
 %% @doc The ibrowse application implements an HTTP 1.1 client. This
 %% module implements the API of the HTTP client. There is one named
 %% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is
@@ -98,6 +98,7 @@
 	 trace_on/2,
 	 trace_off/2,
 	 all_trace_off/0,
+	 show_dest_status/0,
 	 show_dest_status/2
 	]).
 
@@ -480,6 +481,44 @@ all_trace_off() ->
     ibrowse ! all_trace_off,
     ok.
 
+show_dest_status() ->
+    Dests = lists:filter(fun({lb_pid, {Host, Port}, _}) when is_list(Host),
+							     is_integer(Port) ->
+				 true;
+			    (_) ->
+				 false
+			 end, ets:tab2list(ibrowse_lb)),
+    All_ets = ets:all(),
+    io:format("~-40.40s | ~-5.5s | ~-10.10s | ~s~n",
+	      ["Server:port", "ETS", "Num conns", "LB Pid"]),
+    io:format("~80.80.=s~n", [""]),
+    lists:foreach(fun({lb_pid, {Host, Port}, Lb_pid}) ->
+			  case lists:dropwhile(
+				 fun(Tid) ->
+					 ets:info(Tid, owner) /= Lb_pid
+				 end, All_ets) of
+			      [] ->
+				  io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
+					    [Host ++ ":" ++ integer_to_list(Port),
+					     "",
+					     "",
+					     io_lib:format("~p", [Lb_pid])]
+					   );
+			      [Tid | _] ->
+				  catch (
+				    begin
+					Size = ets:info(Tid, size),
+					io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
+						  [Host ++ ":" ++ integer_to_list(Port),
+						   integer_to_list(Tid),
+						   integer_to_list(Size),
+						   io_lib:format("~p", [Lb_pid])]
+						 )
+				    end
+				   )
+				  end
+		  end, Dests).
+
 %% @doc Shows some internal information about load balancing to a
 %% specified Host:Port. Info about workers spawned using
 %% spawn_worker_process/2 or spawn_link_worker_process/2 is not

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/c6b2bb60/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 013f31b..dde258e 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -137,7 +137,7 @@ handle_call({send_req, {Url, Headers, Method, Body, Options, Timeout}},
 handle_call(stop, _From, State) ->
     do_close(State),
     do_error_reply(State, closing_on_request),
-    {stop, normal, State};
+    {stop, normal, ok, State};
 
 handle_call(Request, _From, State) ->
     Reply = {unknown_request, Request},
@@ -184,6 +184,15 @@ handle_info({ssl_closed, _Sock}, State) ->
     handle_sock_closed(State),
     {stop, normal, State};
 
+handle_info({tcp_error, _Sock}, State) ->
+    io:format("Error on connection to ~1000.p:~1000.p~n", [State#state.host, State#state.port]),
+    handle_sock_closed(State),
+    {stop, normal, State};
+handle_info({ssl_error, _Sock}, State) ->
+    io:format("Error on SSL connection to ~1000.p:~1000.p~n", [State#state.host, State#state.port]),
+    handle_sock_closed(State),
+    {stop, normal, State};
+
 handle_info({req_timedout, From}, State) ->
     case lists:keysearch(From, #request.from, queue:to_list(State#state.reqs)) of
 	false ->
@@ -204,6 +213,8 @@ handle_info({trace, Bool}, State) ->
     {noreply, State};
 
 handle_info(Info, State) ->
+    io:format("Unknown message recvd for ~1000.p:~1000.p -> ~p~n",
+	      [State#state.host, State#state.port, Info]),
     io:format("Recvd unknown message ~p when in state: ~p~n", [Info, State]),
     {noreply, State}.
 
@@ -869,8 +880,8 @@ is_connection_closing(_, _)                -> false.
 
 %% This clause determines the chunk size when given data from the beginning of the chunk
 parse_11_response(DataRecvd,
-		  #state{transfer_encoding=chunked,
-			 chunk_size=chunk_start,
+		  #state{transfer_encoding = chunked,
+			 chunk_size = chunk_start,
 			 chunk_size_buffer = Chunk_sz_buf
 			} = State) ->
     case scan_crlf(Chunk_sz_buf, DataRecvd) of
@@ -906,15 +917,15 @@ parse_11_response(DataRecvd,
 	{yes, _, NextChunk} ->
 	    State_1 = State#state{chunk_size = chunk_start,
 				  chunk_size_buffer = <<>>,
-%%				  reply_buffer = Buf_1,
 				  deleted_crlf = true},
 	    parse_11_response(NextChunk, State_1);
 	{no, Data_1} ->
-%%	    State#state{reply_buffer = Data_1, rep_buf_size = size(Data_1)}
 	    State#state{chunk_size_buffer = Data_1}
     end;
 
-%% This clause deals with the end of a chunked transfer
+%% This clause deals with the end of a chunked transfer. ibrowse does
+%% not support Trailers in the Chunked Transfer encoding. Any trailer
+%% received is silently discarded.
 parse_11_response(DataRecvd,
 		  #state{transfer_encoding = chunked, chunk_size = 0,
 			 cur_req = CurReq,

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/c6b2bb60/ibrowse_lb.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lb.erl b/ibrowse_lb.erl
index 9c2165b..834054a 100644
--- a/ibrowse_lb.erl
+++ b/ibrowse_lb.erl
@@ -108,18 +108,19 @@ spawn_connection(Lb_pid, Url,
 
 %% Update max_sessions in #state with supplied value
 handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
-	    #state{ets_tid = Tid,
-		   num_cur_sessions = Num} = State)
+	    #state{num_cur_sessions = Num} = State)
     when Num >= Max_sess ->
-    Reply = find_best_connection(Tid, Max_pipe),
-    {reply, Reply, State#state{max_sessions = Max_sess}};
+    State_1 = maybe_create_ets(State),
+    Reply = find_best_connection(State_1#state.ets_tid, Max_pipe),
+    {reply, Reply, State_1#state{max_sessions = Max_sess}};
 
 handle_call({spawn_connection, Url, _Max_sess, _Max_pipe, SSL_options}, _From,
-	    #state{num_cur_sessions = Cur,
-		   ets_tid = Tid} = State) ->
+	    #state{num_cur_sessions = Cur} = State) ->
+    State_1 = maybe_create_ets(State),
+    Tid = State_1#state.ets_tid,
     {ok, Pid} = ibrowse_http_client:start_link({Tid, Url, SSL_options}),
     ets:insert(Tid, {{1, Pid}, []}),
-    {reply, {ok, Pid}, State#state{num_cur_sessions = Cur + 1}};
+    {reply, {ok, Pid}, State_1#state{num_cur_sessions = Cur + 1}};
 
 handle_call(Request, _From, State) ->
     Reply = {unknown_request, Request},
@@ -145,11 +146,26 @@ handle_cast(_Msg, State) ->
 handle_info({'EXIT', Parent, _Reason}, #state{parent_pid = Parent} = State) ->
     {stop, normal, State};
 
+handle_info({'EXIT', _Pid, _Reason}, #state{ets_tid = undefined} = State) ->
+    {noreply, State};
+
 handle_info({'EXIT', Pid, _Reason},
 	    #state{num_cur_sessions = Cur,
 		   ets_tid = Tid} = State) ->
     ets:match_delete(Tid, {{'_', Pid}, '_'}),
-    {noreply, State#state{num_cur_sessions = Cur - 1}};
+    Cur_1 = Cur - 1,
+    State_1 = case Cur_1 of
+		  0 ->
+		      ets:delete(Tid),
+		      State#state{ets_tid = undefined};
+		  _ ->
+		      State
+	      end,
+    {noreply, State_1#state{num_cur_sessions = Cur_1}};
+
+handle_info({trace, Bool}, #state{ets_tid = undefined} = State) ->
+    put(my_trace_flag, Bool),
+    {noreply, State};
 
 handle_info({trace, Bool}, #state{ets_tid = Tid} = State) ->
     ets:foldl(fun({{_, Pid}, _}, Acc) when is_pid(Pid) ->
@@ -192,3 +208,9 @@ find_best_connection(Tid, Max_pipe) ->
 	_ ->
 	    {error, retry_later}
     end.
+
+maybe_create_ets(#state{ets_tid = undefined} = State) ->
+    Tid = ets:new(ibrowse_lb, [public, ordered_set]),
+    State#state{ets_tid = Tid};
+maybe_create_ets(State) ->
+    State.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/c6b2bb60/ibrowse_test.erl
----------------------------------------------------------------------
diff --git a/ibrowse_test.erl b/ibrowse_test.erl
index cab1f88..3dc66ec 100644
--- a/ibrowse_test.erl
+++ b/ibrowse_test.erl
@@ -231,6 +231,7 @@ unit_tests(Options) ->
 	{'DOWN', Ref, _, _, Info} ->
 	    io:format("Test process crashed: ~p~n", [Info])
     after 60000 ->
+	    exit(Pid, kill),
 	    io:format("Timed out waiting for tests to complete~n", [])
     end.
 
@@ -301,6 +302,9 @@ wait_for_resp(Pid) ->
     receive
 	{async_result, Pid, Res} ->
 	    Res;
+	{async_result, Other_pid, _} ->
+	    io:format("~p: Waiting for result from ~p: got from ~p~n", [self(), Pid, Other_pid]),
+	    wait_for_resp(Pid);
 	{'DOWN', _, _, Pid, Reason} ->
 	    {'EXIT', Reason};
 	{'DOWN', _, _, _, _} ->


[16/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
more relaxed verification of SSL chains, COUCHDB-840

git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@980269 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/1c4324e4
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/1c4324e4
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/1c4324e4

Branch: refs/heads/import-master
Commit: 1c4324e46d6ad9de8df77bc7215fd8da7fa280e4
Parents: c166256
Author: Adam Kocoloski <ko...@apache.org>
Authored: Thu Jul 29 00:02:08 2010 +0000
Committer: Adam Kocoloski <ko...@apache.org>
Committed: Thu Jul 29 00:02:08 2010 +0000

----------------------------------------------------------------------
 ibrowse_http_client.erl | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/1c4324e4/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index a767b84..65d9cb9 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -119,7 +119,7 @@ init(#url{host=Host, port=Port, protocol=Protocol}) ->
         host = Host,
         port = Port,
         is_ssl = (Protocol == https),
-        ssl_options = [{ssl_imp, new}]
+        ssl_options = [{ssl_imp, new}, {depth, 9}]
     },
     put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
     put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),


[29/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Add support for replication over IPv6 (part 1)

This change upgrades ibrowse to version 2.2.0. This version adds support
for IPv6 (https://github.com/cmullaparthi/ibrowse/pull/34).
This is part of COUCHDB-665.



git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@1091709 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/ca1ed965
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/ca1ed965
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/ca1ed965

Branch: refs/heads/import-master
Commit: ca1ed9659ed462917c36954d96734c589fef8193
Parents: 7232082
Author: Filipe David Borba Manana <fd...@apache.org>
Authored: Wed Apr 13 08:50:58 2011 +0000
Committer: Filipe David Borba Manana <fd...@apache.org>
Committed: Wed Apr 13 08:50:58 2011 +0000

----------------------------------------------------------------------
 Makefile.am             |  2 +-
 ibrowse.app.in          |  2 +-
 ibrowse.hrl             | 11 +++++++++-
 ibrowse_http_client.erl | 37 +++++++++++++++++++++++++++-----
 ibrowse_lib.erl         | 51 +++++++++++++++++++++++++++++++++++++++++++-
 5 files changed, 94 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/ca1ed965/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index bfd52ba..869bd10 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -10,7 +10,7 @@
 ## License for the specific language governing permissions and limitations under
 ## the License.
 
-ibrowseebindir = $(localerlanglibdir)/ibrowse-2.1.3/ebin
+ibrowseebindir = $(localerlanglibdir)/ibrowse-2.2.0/ebin
 
 ibrowse_file_collection = \
 	ibrowse.app.in \

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/ca1ed965/ibrowse.app.in
----------------------------------------------------------------------
diff --git a/ibrowse.app.in b/ibrowse.app.in
index 875620d..af46d8a 100644
--- a/ibrowse.app.in
+++ b/ibrowse.app.in
@@ -1,6 +1,6 @@
 {application, ibrowse,
         [{description, "HTTP client application"},
-         {vsn, "2.1.3"},
+         {vsn, "2.2.0"},
          {modules, [ ibrowse, 
 		     ibrowse_http_client, 
 		     ibrowse_app, 

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/ca1ed965/ibrowse.hrl
----------------------------------------------------------------------
diff --git a/ibrowse.hrl b/ibrowse.hrl
index ebf3bb3..18dde82 100644
--- a/ibrowse.hrl
+++ b/ibrowse.hrl
@@ -1,7 +1,16 @@
 -ifndef(IBROWSE_HRL).
 -define(IBROWSE_HRL, "ibrowse.hrl").
 
--record(url, {abspath, host, port, username, password, path, protocol}).
+-record(url, {
+          abspath,
+          host,
+          port,
+          username,
+          password,
+          path,
+          protocol,
+          host_type  % 'hostname', 'ipv4_address' or 'ipv6_address'
+}).
 
 -record(lb_pid, {host_port, pid}).
 

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/ca1ed965/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 7d606e6..eb2bf31 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -35,6 +35,7 @@
         ]).
 
 -include("ibrowse.hrl").
+-include_lib("kernel/include/inet.hrl").
 
 -record(state, {host, port, connect_timeout,
                 inactivity_timer_ref,
@@ -489,13 +490,19 @@ do_connect(Host, Port, Options, #state{is_ssl      = true,
                                        use_proxy   = false,
                                        ssl_options = SSLOptions},
            Timeout) ->
-    ssl:connect(Host, Port, get_sock_options(Options, SSLOptions), Timeout);
+    ssl:connect(Host, Port, get_sock_options(Host, Options, SSLOptions), Timeout);
 do_connect(Host, Port, Options, _State, Timeout) ->
-    gen_tcp:connect(Host, Port, get_sock_options(Options, []), Timeout).
+    gen_tcp:connect(Host, Port, get_sock_options(Host, Options, []), Timeout).
 
-get_sock_options(Options, SSLOptions) ->
+get_sock_options(Host, Options, SSLOptions) ->
     Caller_socket_options = get_value(socket_options, Options, []),
-    Other_sock_options = filter_sock_options(SSLOptions ++ Caller_socket_options),
+    Ipv6Options = case is_ipv6_host(Host) of
+        true ->
+            [inet6];
+        false ->
+            []
+    end,
+    Other_sock_options = filter_sock_options(SSLOptions ++ Caller_socket_options ++ Ipv6Options),
     case lists:keysearch(nodelay, 1, Other_sock_options) of
         false ->
             [{nodelay, true}, binary, {active, false} | Other_sock_options];
@@ -503,6 +510,21 @@ get_sock_options(Options, SSLOptions) ->
             [binary, {active, false} | Other_sock_options]
     end.
 
+is_ipv6_host(Host) ->
+    case inet_parse:address(Host) of
+        {ok, {_, _, _, _, _, _, _, _}} ->
+            true;
+        {ok, {_, _, _, _}} ->
+            false;
+        _  ->
+            case inet:gethostbyname(Host) of
+                {ok, #hostent{h_addrtype = inet6}} ->
+                    true;
+                _ ->
+                    false
+            end
+    end.
+
 %% We don't want the caller to specify certain options
 filter_sock_options(Opts) ->
     lists:filter(fun({active, _}) ->
@@ -1278,7 +1300,12 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
                        reply_buffer  = RepBuf,
                        recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
     Body = RepBuf,
-    ok = file:close(Fd),
+    case Fd of
+        undefined -> 
+            ok;
+        _ -> 
+            ok = file:close(Fd)
+    end,
     ResponseBody = case TmpFilename of
                        undefined ->
                            Body;

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/ca1ed965/ibrowse_lib.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lib.erl b/ibrowse_lib.erl
index 696d0f6..3cbe3ac 100644
--- a/ibrowse_lib.erl
+++ b/ibrowse_lib.erl
@@ -180,7 +180,19 @@ get_value(Tag, TVL) ->
     V.
 
 parse_url(Url) ->
-    parse_url(Url, get_protocol, #url{abspath=Url}, []).
+    case parse_url(Url, get_protocol, #url{abspath=Url}, []) of
+        #url{host_type = undefined, host = Host} = UrlRec ->
+            case inet_parse:address(Host) of
+                {ok, {_, _, _, _, _, _, _, _}} ->
+                    UrlRec#url{host_type = ipv6_address};
+                {ok, {_, _, _, _}} ->
+                    UrlRec#url{host_type = ipv4_address};
+                _ ->
+                    UrlRec#url{host_type = hostname}
+            end;
+        Else ->
+            Else
+    end.
 
 parse_url([$:, $/, $/ | _], get_protocol, Url, []) ->
     {invalid_uri_1, Url};
@@ -215,6 +227,21 @@ parse_url([$@ | T], get_username, Url, TmpAcc) ->
               Url#url{username = lists:reverse(TmpAcc),
                       password = ""},
               []);
+parse_url([$[ | T], get_username, Url, []) ->
+    % IPv6 address literals are enclosed by square brackets:
+    %     http://www.ietf.org/rfc/rfc2732.txt
+    parse_url(T, get_ipv6_address, Url#url{host_type = ipv6_address}, []);
+parse_url([$[ | T], get_username, _Url, TmpAcc) ->
+    {error, {invalid_username_or_host, lists:reverse(TmpAcc) ++ "[" ++ T}};
+parse_url([$[ | _], get_password, _Url, []) ->
+    {error, missing_password};
+parse_url([$[ | T], get_password, Url, TmpAcc) ->
+    % IPv6 address literals are enclosed by square brackets:
+    %     http://www.ietf.org/rfc/rfc2732.txt
+    parse_url(T, get_ipv6_address,
+              Url#url{host_type = ipv6_address,
+                      password = lists:reverse(TmpAcc)},
+              []);
 parse_url([$@ | T], get_password, Url, TmpAcc) ->
     parse_url(T, get_host, 
               Url#url{password = lists:reverse(TmpAcc)},
@@ -236,6 +263,28 @@ parse_url([H | T], get_password, Url, TmpAcc) when H == $/;
             username = undefined,
             password = undefined,
            path = Path};
+parse_url([$] | T], get_ipv6_address, #url{protocol = Prot} = Url, TmpAcc) ->
+    Addr = lists:reverse(TmpAcc),
+    case inet_parse:address(Addr) of
+        {ok, {_, _, _, _, _, _, _, _}} ->
+            Url2 = Url#url{host = Addr, port = default_port(Prot)},
+            case T of
+                [$: | T2] ->
+                    parse_url(T2, get_port, Url2, []);
+                [$/ | T2] ->
+                    Url2#url{path = [$/ | T2]};
+                [$? | T2] ->
+                    Url2#url{path = [$/, $? | T2]};
+                [] ->
+                    Url2#url{path = "/"};
+                _ ->
+                    {error, {invalid_host, "[" ++ Addr ++ "]" ++ T}}
+            end;
+        _ ->
+            {error, {invalid_ipv6_address, Addr}}
+    end;
+parse_url([$[ | T], get_host, #url{} = Url, []) ->
+    parse_url(T, get_ipv6_address, Url#url{host_type = ipv6_address}, []);
 parse_url([$: | T], get_host, #url{} = Url, TmpAcc) ->
     parse_url(T, get_port, 
               Url#url{host = lists:reverse(TmpAcc)},


[05/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Added code coverage report generation target.

To generate reports:

    # Assuming etap is installed
    $ cd /path/to/couchdb
    $ ./bootstrap && ERLC_FLAGS=+debug_info ./configure && make cover

You can browse the report by opening ./cover/index.html in your html consuming software of choice.

Shoutout to Nick Gerakines in THANKS for helping with etap and testing in general.



git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@780326 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/b9c2e648
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/b9c2e648
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/b9c2e648

Branch: refs/heads/import-master
Commit: b9c2e64805ad6d0abca5b96425f82e01190f46e7
Parents: 208131a
Author: Paul Joseph Davis <da...@apache.org>
Authored: Sat May 30 21:12:06 2009 +0000
Committer: Paul Joseph Davis <da...@apache.org>
Committed: Sat May 30 21:12:06 2009 +0000

----------------------------------------------------------------------
 Makefile.am | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/b9c2e648/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index 614bdc3..2ba7b31 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -45,4 +45,4 @@ CLEANFILES = \
     $(ibrowseebin_make_generated_file_list)
 
 %.beam: %.erl
-	$(ERLC) $<
+	$(ERLC) $(ERLC_FLAGS) $<


[27/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Applied 2 more ibrowse fixes already submitted upstream

https://github.com/cmullaparthi/ibrowse/pull/24
https://github.com/cmullaparthi/ibrowse/pull/25



git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@1056395 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/cb08a0db
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/cb08a0db
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/cb08a0db

Branch: refs/heads/import-master
Commit: cb08a0db501d5478f305b4916d49da77a15e2a71
Parents: 99da0af
Author: Filipe David Borba Manana <fd...@apache.org>
Authored: Fri Jan 7 17:15:24 2011 +0000
Committer: Filipe David Borba Manana <fd...@apache.org>
Committed: Fri Jan 7 17:15:24 2011 +0000

----------------------------------------------------------------------
 ibrowse_http_client.erl | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/cb08a0db/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index ea75948..5dce321 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -287,10 +287,14 @@ handle_sock_data(Data, #state{status = get_header}=State) ->
         {error, _Reason} ->
             shutting_down(State),
             {stop, normal, State};
-        State_1 ->
-            active_once(State_1),
-            State_2 = set_inac_timer(State_1),
-            {noreply, State_2}
+        #state{socket = Socket, status = Status, cur_req = CurReq} = State_1 ->
+            case {Status, CurReq} of
+                {get_header, #request{caller_controls_socket = true}} ->
+                    do_setopts(Socket, [{active, once}], State_1);
+                _ ->
+                    active_once(State_1)
+            end,
+            {noreply, set_inac_timer(State_1)}
     end;
 
 handle_sock_data(Data, #state{status           = get_body,
@@ -683,6 +687,7 @@ send_req_1(From,
            Headers, Method, Body, Options, Timeout,
            #state{status    = Status,
                   socket    = Socket} = State) ->
+    cancel_timer(State#state.inactivity_timer_ref, {eat_message, timeout}),
     ReqId = make_req_id(),
     Resp_format = get_value(response_format, Options, list),
     Caller_socket_options = get_value(socket_options, Options, []),


[32/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
update ibrowse to 4.0.1


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/50ee48dd
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/50ee48dd
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/50ee48dd

Branch: refs/heads/import-master
Commit: 50ee48dd5df7eca3ec1d94d600bc06e75561b1ca
Parents: 6a89bd6
Author: Jan Lehnardt <ja...@apache.org>
Authored: Wed Nov 14 20:13:52 2012 +0100
Committer: Jan Lehnardt <ja...@apache.org>
Committed: Sat Jan 12 20:10:23 2013 +0100

----------------------------------------------------------------------
 ibrowse.app.in          |  14 +--
 ibrowse.erl             | 250 +++++++++++++++++++++++++---------------
 ibrowse_http_client.erl | 268 ++++++++++++++++++++++++++-----------------
 ibrowse_lb.erl          |  91 +++++++++------
 ibrowse_lib.erl         |  74 ++++++++++--
 ibrowse_test.erl        | 132 +++++++++++++++++++--
 6 files changed, 565 insertions(+), 264 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/50ee48dd/ibrowse.app.in
----------------------------------------------------------------------
diff --git a/ibrowse.app.in b/ibrowse.app.in
index af46d8a..1d88084 100644
--- a/ibrowse.app.in
+++ b/ibrowse.app.in
@@ -1,13 +1,7 @@
 {application, ibrowse,
-        [{description, "HTTP client application"},
-         {vsn, "2.2.0"},
-         {modules, [ ibrowse, 
-		     ibrowse_http_client, 
-		     ibrowse_app, 
-		     ibrowse_sup, 
-		     ibrowse_lib,
-		     ibrowse_lb ]},
-         {registered, []},
-         {applications, [kernel,stdlib,sasl]},
+        [{description, "Erlang HTTP client application"},
+         {vsn, "4.0.1"},
+         {registered, [ibrowse_sup, ibrowse]},
+         {applications, [kernel,stdlib]},
 	 {env, []},
 	 {mod, {ibrowse_app, []}}]}.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/50ee48dd/ibrowse.erl
----------------------------------------------------------------------
diff --git a/ibrowse.erl b/ibrowse.erl
index f70f92f..80a4282 100644
--- a/ibrowse.erl
+++ b/ibrowse.erl
@@ -6,8 +6,7 @@
 %%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
 %%%-------------------------------------------------------------------
 %% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
-%% @copyright 2005-2011 Chandrashekhar Mullaparthi
-%% @version 2.1.3
+%% @copyright 2005-2012 Chandrashekhar Mullaparthi
 %% @doc The ibrowse application implements an HTTP 1.1 client in erlang. This
 %% module implements the API of the HTTP client. There is one named
 %% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is
@@ -71,6 +70,7 @@
 -export([
          rescan_config/0,
          rescan_config/1,
+         add_config/1,
          get_config_value/1,
          get_config_value/2,
          spawn_worker_process/1,
@@ -97,7 +97,10 @@
          trace_off/2,
          all_trace_off/0,
          show_dest_status/0,
-         show_dest_status/2
+         show_dest_status/1,
+         show_dest_status/2,
+         get_metrics/0,
+         get_metrics/2
         ]).
 
 -ifdef(debug).
@@ -136,7 +139,12 @@ start() ->
 
 %% @doc Stop the ibrowse process. Useful when testing using the shell.
 stop() ->
-    catch gen_server:call(ibrowse, stop).
+    case catch gen_server:call(ibrowse, stop) of
+        {'EXIT',{noproc,_}} ->
+            ok;
+        Res ->
+            Res
+    end.
 
 %% @doc This is the basic function to send a HTTP request.
 %% The Status return value indicates the HTTP status code returned by the webserver
@@ -277,7 +285,8 @@ send_req(Url, Headers, Method, Body) ->
 %%          {transfer_encoding, {chunked, ChunkSize}} | 
 %%          {headers_as_is, boolean()}         |
 %%          {give_raw_headers, boolean()}      |
-%%          {preserve_chunked_encoding,boolean()}
+%%          {preserve_chunked_encoding,boolean()}     |
+%%          {workaround, head_response_with_body}
 %%
 %% stream_to() = process() | {process(), once}
 %% process() = pid() | atom()
@@ -287,7 +296,7 @@ send_req(Url, Headers, Method, Body) ->
 %% Sock_opts = [Sock_opt]
 %% Sock_opt = term()
 %% ChunkSize = integer()
-%% srtf() = boolean() | filename()
+%% srtf() = boolean() | filename() | {append, filename()}
 %% filename() = string()
 %% response_format() = list | binary
 send_req(Url, Headers, Method, Body, Options) ->
@@ -354,15 +363,16 @@ try_routing_request(_, _, _, _, _, _, _, _, _, _, _) ->
     {error, retry_later}.
 
 merge_options(Host, Port, Options) ->
-    Config_options = get_config_value({options, Host, Port}, []),
+    Config_options = get_config_value({options, Host, Port}, []) ++
+                     get_config_value({options, global}, []),
     lists:foldl(
       fun({Key, Val}, Acc) ->
-                        case lists:keysearch(Key, 1, Options) of
-                            false ->
-                                [{Key, Val} | Acc];
-                            _ ->
-                                Acc
-                        end
+              case lists:keysearch(Key, 1, Options) of
+                  false ->
+                      [{Key, Val} | Acc];
+                  _ ->
+                      Acc
+              end
       end, Options, Config_options).
 
 get_lb_pid(Url) ->
@@ -426,6 +436,8 @@ do_send_req(Conn_Pid, Parsed_url, Headers, Method, Body, Options, Timeout) ->
             {error, req_timedout};
         {'EXIT', {noproc, {gen_server, call, [Conn_Pid, _, _]}}} ->
             {error, sel_conn_closed};
+        {'EXIT', {normal, _}} ->
+            {error, req_timedout};
         {error, connection_closed} ->
             {error, sel_conn_closed};
         {'EXIT', Reason} ->
@@ -581,74 +593,98 @@ all_trace_off() ->
 %% about workers spawned using spawn_worker_process/2 or
 %% spawn_link_worker_process/2 is not included.
 show_dest_status() ->
-    Dests = lists:filter(fun({lb_pid, {Host, Port}, _}) when is_list(Host),
-                                                             is_integer(Port) ->
-                                 true;
-                            (_) ->
-                                 false
-                         end, ets:tab2list(ibrowse_lb)),
-    All_ets = ets:all(),
     io:format("~-40.40s | ~-5.5s | ~-10.10s | ~s~n",
               ["Server:port", "ETS", "Num conns", "LB Pid"]),
     io:format("~80.80.=s~n", [""]),
-    lists:foreach(fun({lb_pid, {Host, Port}, Lb_pid}) ->
-                          case lists:dropwhile(
-                                 fun(Tid) ->
-                                         ets:info(Tid, owner) /= Lb_pid
-                                 end, All_ets) of
-                              [] ->
-                                  io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
-                                            [Host ++ ":" ++ integer_to_list(Port),
-                                             "",
-                                             "",
-                                             io_lib:format("~p", [Lb_pid])]
-                                           );
-                              [Tid | _] ->
-                                  catch (
-                                    begin
-                                        Size = ets:info(Tid, size),
-                                        io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
-                                                  [Host ++ ":" ++ integer_to_list(Port),
-                                                   io_lib:format("~p", [Tid]),
-                                                   integer_to_list(Size),
-                                                   io_lib:format("~p", [Lb_pid])]
-                                                 )
-                                    end
-                                   )
-                                  end
-                  end, Dests).
-                                          
+    Metrics = get_metrics(),
+    lists:foreach(
+      fun({Host, Port, Lb_pid, Tid, Size}) ->
+              io:format("~40.40s | ~-5.5s | ~-5.5s | ~p~n",
+                        [Host ++ ":" ++ integer_to_list(Port),
+                         integer_to_list(Tid),
+                         integer_to_list(Size), 
+                         Lb_pid])
+      end, Metrics).
+
+show_dest_status(Url) ->                                          
+    #url{host = Host, port = Port} = ibrowse_lib:parse_url(Url),
+    show_dest_status(Host, Port).
+
 %% @doc Shows some internal information about load balancing to a
 %% specified Host:Port. Info about workers spawned using
 %% spawn_worker_process/2 or spawn_link_worker_process/2 is not
 %% included.
 show_dest_status(Host, Port) ->
+    case get_metrics(Host, Port) of
+        {Lb_pid, MsgQueueSize, Tid, Size,
+         {{First_p_sz, First_speculative_sz},
+          {Last_p_sz, Last_speculative_sz}}} ->
+            io:format("Load Balancer Pid     : ~p~n"
+                      "LB process msg q size : ~p~n"
+                      "LB ETS table id       : ~p~n"
+                      "Num Connections       : ~p~n"
+                      "Smallest pipeline     : ~p:~p~n"
+                      "Largest pipeline      : ~p:~p~n",
+                      [Lb_pid, MsgQueueSize, Tid, Size, 
+                       First_p_sz, First_speculative_sz,
+                       Last_p_sz, Last_speculative_sz]);
+        _Err ->
+            io:format("Metrics not available~n", [])
+    end.
+
+get_metrics() ->
+    Dests = lists:filter(fun({lb_pid, {Host, Port}, _}) when is_list(Host),
+                                                             is_integer(Port) ->
+                                 true;
+                            (_) ->
+                                 false
+                         end, ets:tab2list(ibrowse_lb)),
+    All_ets = ets:all(),
+    lists:map(fun({lb_pid, {Host, Port}, Lb_pid}) ->
+                  case lists:dropwhile(
+                         fun(Tid) ->
+                                 ets:info(Tid, owner) /= Lb_pid
+                         end, All_ets) of
+                      [] ->
+                          {Host, Port, Lb_pid, unknown, 0};
+                      [Tid | _] ->
+                          Size = case catch (ets:info(Tid, size)) of
+                                     N when is_integer(N) -> N;
+                                     _ -> 0
+                                 end,
+                          {Host, Port, Lb_pid, Tid, Size}
+                  end
+              end, Dests).
+
+get_metrics(Host, Port) ->
     case ets:lookup(ibrowse_lb, {Host, Port}) of
         [] ->
             no_active_processes;
         [#lb_pid{pid = Lb_pid}] ->
-            io:format("Load Balancer Pid     : ~p~n", [Lb_pid]),
-            io:format("LB process msg q size : ~p~n", [(catch process_info(Lb_pid, message_queue_len))]),
+            MsgQueueSize = (catch process_info(Lb_pid, message_queue_len)),
+            %% {Lb_pid, MsgQueueSize,
             case lists:dropwhile(
                    fun(Tid) ->
                            ets:info(Tid, owner) /= Lb_pid
                    end, ets:all()) of
                 [] ->
-                    io:format("Couldn't locate ETS table for ~p~n", [Lb_pid]);
+                    {Lb_pid, MsgQueueSize, unknown, 0, unknown};
                 [Tid | _] ->
-                    First = ets:first(Tid),
-                    Last = ets:last(Tid),
-                    Size = ets:info(Tid, size),
-                    io:format("LB ETS table id       : ~p~n", [Tid]),
-                    io:format("Num Connections       : ~p~n", [Size]),
-                    case Size of
-                        0 ->
-                            ok;
-                        _ ->
-                            {First_p_sz, _} = First,
-                            {Last_p_sz, _} = Last,
-                            io:format("Smallest pipeline     : ~1000.p~n", [First_p_sz]),
-                            io:format("Largest pipeline      : ~1000.p~n", [Last_p_sz])
+                    try
+                        Size = ets:info(Tid, size),
+                        case Size of
+                            0 ->
+                                ok;
+                            _ ->
+                                First = ets:first(Tid),
+                                Last = ets:last(Tid),
+                                [{_, First_p_sz, First_speculative_sz}] = ets:lookup(Tid, First),
+                                [{_, Last_p_sz, Last_speculative_sz}] = ets:lookup(Tid, Last),
+                                {Lb_pid, MsgQueueSize, Tid, Size,
+                                 {{First_p_sz, First_speculative_sz}, {Last_p_sz, Last_speculative_sz}}}
+                        end
+                    catch _:_ ->
+                            not_available
                     end
             end
     end.
@@ -663,9 +699,15 @@ rescan_config() ->
 %% Clear current configuration for ibrowse and load from the specified
 %% file. Current configuration is cleared only if the specified
 %% file is readable using file:consult/1
+rescan_config([{_,_}|_]=Terms) ->
+    gen_server:call(?MODULE, {rescan_config_terms, Terms});
 rescan_config(File) when is_list(File) ->
     gen_server:call(?MODULE, {rescan_config, File}).
 
+%% @doc Add additional configuration elements at runtime.
+add_config([{_,_}|_]=Terms) ->
+    gen_server:call(?MODULE, {add_config_terms, Terms}).
+
 %%====================================================================
 %% Server functions
 %%====================================================================
@@ -701,44 +743,60 @@ import_config() ->
 import_config(Filename) ->
     case file:consult(Filename) of
         {ok, Terms} ->
-            ets:delete_all_objects(ibrowse_conf),
-            Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options}) 
-                     when is_list(Host), is_integer(Port),
-                          is_integer(MaxSess), MaxSess > 0,
-                          is_integer(MaxPipe), MaxPipe > 0, is_list(Options) ->
-                          I = [{{max_sessions, Host, Port}, MaxSess},
-                               {{max_pipeline_size, Host, Port}, MaxPipe},
-                               {{options, Host, Port}, Options}],
-                          lists:foreach(
-                            fun({X, Y}) ->
-                                    ets:insert(ibrowse_conf,
-                                               #ibrowse_conf{key = X, 
-                                                             value = Y})
-                            end, I);
-                     ({K, V}) ->
-                          ets:insert(ibrowse_conf,
-                                     #ibrowse_conf{key = K,
-                                                   value = V});
-                     (X) ->
-                          io:format("Skipping unrecognised term: ~p~n", [X])
-                  end,
-            lists:foreach(Fun, Terms);
+            apply_config(Terms);
         _Err ->
             ok
     end.
 
+apply_config(Terms) ->
+    ets:delete_all_objects(ibrowse_conf),
+    insert_config(Terms).
+
+insert_config(Terms) ->
+    Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options}) 
+             when is_list(Host), is_integer(Port),
+                  is_integer(MaxSess), MaxSess > 0,
+                  is_integer(MaxPipe), MaxPipe > 0, is_list(Options) ->
+                  I = [{{max_sessions, Host, Port}, MaxSess},
+                       {{max_pipeline_size, Host, Port}, MaxPipe},
+                       {{options, Host, Port}, Options}],
+                  lists:foreach(
+                    fun({X, Y}) ->
+                            ets:insert(ibrowse_conf,
+                                       #ibrowse_conf{key = X, 
+                                                     value = Y})
+                    end, I);
+             ({K, V}) ->
+                  ets:insert(ibrowse_conf,
+                             #ibrowse_conf{key = K,
+                                           value = V});
+             (X) ->
+                  io:format("Skipping unrecognised term: ~p~n", [X])
+          end,
+    lists:foreach(Fun, Terms).
+
 %% @doc Internal export
 get_config_value(Key) ->
-    [#ibrowse_conf{value = V}] = ets:lookup(ibrowse_conf, Key),
-    V.
+    try
+        [#ibrowse_conf{value = V}] = ets:lookup(ibrowse_conf, Key),
+        V
+    catch
+        error:badarg ->
+            throw({error, ibrowse_not_running})
+    end.
 
 %% @doc Internal export
 get_config_value(Key, DefVal) ->
-    case ets:lookup(ibrowse_conf, Key) of
-        [] ->
-            DefVal;
-        [#ibrowse_conf{value = V}] ->
-            V
+    try
+        case ets:lookup(ibrowse_conf, Key) of
+            [] ->
+                DefVal;
+            [#ibrowse_conf{value = V}] ->
+                V
+        end
+    catch
+        error:badarg ->
+            throw({error, ibrowse_not_running})
     end.
 
 set_config_value(Key, Val) ->
@@ -777,6 +835,14 @@ handle_call({rescan_config, File}, _From, State) ->
     Ret = (catch import_config(File)),
     {reply, Ret, State};
 
+handle_call({rescan_config_terms, Terms}, _From, State) ->
+    Ret = (catch apply_config(Terms)),
+    {reply, Ret, State};
+
+handle_call({add_config_terms, Terms}, _From, State) ->
+    Ret = (catch insert_config(Terms)),
+    {reply, Ret, State};
+
 handle_call(Request, _From, State) ->
     Reply = {unknown_request, Request},
     {reply, Reply, State}.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/50ee48dd/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 00e8ed3..c01385a 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -47,7 +47,7 @@
                 reply_buffer = <<>>, rep_buf_size=0, streamed_size = 0,
                 recvd_headers=[],
                 status_line, raw_headers,
-                is_closing, send_timer, content_length,
+                is_closing, content_length,
                 deleted_crlf = false, transfer_encoding,
                 chunk_size, chunk_size_buffer = <<>>,
                 recvd_chunk_size, interim_reply_sent = false,
@@ -61,7 +61,7 @@
                   stream_chunk_size,
                   save_response_to_file = false,
                   tmp_file_name, tmp_file_fd, preserve_chunked_encoding,
-                  response_format}).
+                  response_format, timer_ref}).
 
 -import(ibrowse_lib, [
                       get_value/2,
@@ -118,7 +118,7 @@ init({Lb_Tid, #url{host = Host, port = Port}, {SSLOptions, Is_ssl}}) ->
                    lb_ets_tid = Lb_Tid},
     put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
     put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
-    {ok, State};
+    {ok, set_inac_timer(State)};
 init(Url) when is_list(Url) ->
     case catch ibrowse_lib:parse_url(Url) of
         #url{protocol = Protocol} = Url_rec ->
@@ -131,7 +131,7 @@ init({Host, Port}) ->
                    port = Port},
     put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
     put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
-    {ok, State}.
+    {ok, set_inac_timer(State)}.
 
 %%--------------------------------------------------------------------
 %% Function: handle_call/3
@@ -179,7 +179,6 @@ handle_cast(_Msg, State) ->
 %%          {stop, Reason, State}            (terminate/2 is called)
 %%--------------------------------------------------------------------
 handle_info({tcp, _Sock, Data}, #state{status = Status} = State) ->
-%%    io:format("Recvd data: ~p~n", [Data]),
     do_trace("Data recvd in state: ~p. Size: ~p. ~p~n~n", [Status, size(Data), Data]),
     handle_sock_data(Data, State);
 handle_info({ssl, _Sock, Data}, State) ->
@@ -187,7 +186,6 @@ handle_info({ssl, _Sock, Data}, State) ->
 
 handle_info({stream_next, Req_id}, #state{socket = Socket,
                                           cur_req = #request{req_id = Req_id}} = State) ->
-    %% io:format("Client process set {active, once}~n", []),
     do_setopts(Socket, [{active, once}], State),
     {noreply, set_inac_timer(State)};
 
@@ -198,8 +196,6 @@ handle_info({stream_next, _Req_id}, State) ->
                      _ ->
                          undefined
                  end,
-%%     io:format("Ignoring stream_next as ~1000.p is not cur req (~1000.p)~n",
-%%               [_Req_id, _Cur_req_id]),
     {noreply, State};
 
 handle_info({stream_close, _Req_id}, State) ->
@@ -234,7 +230,7 @@ handle_info({req_timedout, From}, State) ->
             {noreply, State};
         true ->
             shutting_down(State),
-            do_error_reply(State, req_timedout),
+%%            do_error_reply(State, req_timedout),
             {stop, normal, State}
     end;
 
@@ -357,7 +353,8 @@ accumulate_response(Data,
                                          tmp_file_fd = undefined} = CurReq,
                       http_status_code=[$2 | _]}=State) when Srtf /= false ->
     TmpFilename = make_tmp_filename(Srtf),
-    case file:open(TmpFilename, [write, delayed_write, raw]) of
+    Mode = file_mode(Srtf),
+    case file:open(TmpFilename, [Mode, delayed_write, raw]) of
         {ok, Fd} ->
             accumulate_response(Data, State#state{
                                         cur_req = CurReq#request{
@@ -434,8 +431,13 @@ make_tmp_filename(true) ->
                    integer_to_list(B) ++
                    integer_to_list(C)]);
 make_tmp_filename(File) when is_list(File) ->
+    File;
+make_tmp_filename({append, File}) when is_list(File) ->
     File.
 
+file_mode({append, _File}) -> append;
+file_mode(_Srtf) -> write.
+
 
 %%--------------------------------------------------------------------
 %% Handles the case when the server closes the socket
@@ -560,9 +562,13 @@ do_send_body(Body, State, _TE) ->
 
 do_send_body1(Source, Resp, State, TE) ->
     case Resp of
+                {ok, Data} when Data == []; Data == <<>> ->
+                        do_send_body({Source}, State, TE);
         {ok, Data} ->
             do_send(maybe_chunked_encode(Data, TE), State),
             do_send_body({Source}, State, TE);
+                {ok, Data, New_source_state} when Data == []; Data == <<>> ->
+                        do_send_body({Source, New_source_state}, State, TE);
         {ok, Data, New_source_state} ->
             do_send(maybe_chunked_encode(Data, TE), State),
             do_send_body({Source, New_source_state}, State, TE);
@@ -658,10 +664,17 @@ send_req_1(From,
                   proxy_tunnel_setup = false,
                   use_proxy = true,
                   is_ssl    = true} = State) ->
+    Ref = case Timeout of
+              infinity ->
+                  undefined;
+              _ ->
+                  erlang:send_after(Timeout, self(), {req_timedout, From})
+          end,
     NewReq = #request{
       method                    = connect,
       preserve_chunked_encoding = get_value(preserve_chunked_encoding, Options, false),
-      options                   = Options
+      options                   = Options,
+      timer_ref                 = Ref
      },
     State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
     Pxy_auth_headers = maybe_modify_headers(Url, Method, Options, [], State_1),
@@ -677,17 +690,11 @@ send_req_1(From,
                 ok ->
                     trace_request_body(Body_1),
                     active_once(State_1),
-                    Ref = case Timeout of
-                              infinity ->
-                                  undefined;
-                              _ ->
-                                  erlang:send_after(Timeout, self(), {req_timedout, From})
-                          end,
-                    State_2 = State_1#state{status     = get_header,
-                                            cur_req    = NewReq,
-                                            send_timer = Ref,
-                                            proxy_tunnel_setup = in_progress,
-                                            tunnel_setup_queue = [{From, Url, Headers, Method, Body, Options, Timeout}]},
+                    State_1_1 = inc_pipeline_counter(State_1),
+                    State_2 = State_1_1#state{status     = get_header,
+                                              cur_req    = NewReq,
+                                              proxy_tunnel_setup = in_progress,
+                                              tunnel_setup_queue = [{From, Url, Headers, Method, Body, Options, Timeout}]},
                     State_3 = set_inac_timer(State_2),
                     {noreply, State_3};
                 Err ->
@@ -738,6 +745,12 @@ send_req_1(From,
                 exit({invalid_option, {stream_to, Stream_to_inv}})
         end,
     SaveResponseToFile = get_value(save_response_to_file, Options, false),
+    Ref = case Timeout of
+              infinity ->
+                  undefined;
+              _ ->
+                  erlang:send_after(Timeout, self(), {req_timedout, From})
+          end,
     NewReq = #request{url                    = Url,
                       method                 = Method,
                       stream_to              = StreamTo,
@@ -749,7 +762,8 @@ send_req_1(From,
                       stream_chunk_size      = get_stream_chunk_size(Options),
                       response_format        = Resp_format,
                       from                   = From,
-                      preserve_chunked_encoding = get_value(preserve_chunked_encoding, Options, false)
+                      preserve_chunked_encoding = get_value(preserve_chunked_encoding, Options, false),
+                      timer_ref              = Ref
                      },
     State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
     Headers_1 = maybe_modify_headers(Url, Method, Options, Headers, State_1),
@@ -767,19 +781,12 @@ send_req_1(From,
                     trace_request_body(Body_1),
                     State_2 = inc_pipeline_counter(State_1),
                     active_once(State_2),
-                    Ref = case Timeout of
-                              infinity ->
-                                  undefined;
-                              _ ->
-                                  erlang:send_after(Timeout, self(), {req_timedout, From})
-                          end,
                     State_3 = case Status of
                                   idle ->
                                       State_2#state{status     = get_header,
-                                                    cur_req    = NewReq,
-                                                    send_timer = Ref};
+                                                    cur_req    = NewReq};
                                   _ ->
-                                      State_2#state{send_timer = Ref}
+                                      State_2
                               end,
                     case StreamTo of
                         undefined ->
@@ -987,13 +994,17 @@ chunk_request_body(Body, _ChunkSize, Acc) when is_list(Body) ->
     lists:reverse(["\r\n", LastChunk, Chunk | Acc]).
 
 
-parse_response(_Data, #state{cur_req = undefined}=State) ->
+parse_response(<<>>, #state{cur_req = undefined}=State) ->
     State#state{status = idle};
+parse_response(Data, #state{cur_req = undefined}) ->
+    do_trace("Data left to process when no pending request. ~1000.p~n", [Data]),
+    {error, data_in_status_idle};
+
 parse_response(Data, #state{reply_buffer = Acc, reqs = Reqs,
                             cur_req = CurReq} = State) ->
     #request{from=From, stream_to=StreamTo, req_id=ReqId,
              method=Method, response_format = Resp_format,
-             options = Options
+             options = Options, timer_ref = T_ref
             } = CurReq,
     MaxHeaderSize = ibrowse:get_config_value(max_headers_size, infinity),
     case scan_header(Acc, Data) of
@@ -1005,47 +1016,55 @@ parse_response(Data, #state{reply_buffer = Acc, reqs = Reqs,
             LCHeaders = [{to_lower(X), Y} || {X,Y} <- Headers_1],
             ConnClose = to_lower(get_value("connection", LCHeaders, "false")),
             IsClosing = is_connection_closing(HttpVsn, ConnClose),
-            case IsClosing of
-                true ->
-                    shutting_down(State);
-                false ->
-                    ok
-            end,
+            State_0 = case IsClosing of
+                          true ->
+                              shutting_down(State),
+                              State#state{is_closing = IsClosing};
+                          false ->
+                              State
+                      end,
             Give_raw_headers = get_value(give_raw_headers, Options, false),
             State_1 = case Give_raw_headers of
                           true ->
-                              State#state{recvd_headers=Headers_1, status=get_body,
-                                          reply_buffer = <<>>,
-                                          status_line = Status_line,
-                                          raw_headers = Raw_headers,
-                                          http_status_code=StatCode, is_closing=IsClosing};
+                              State_0#state{recvd_headers=Headers_1, status=get_body,
+                                            reply_buffer = <<>>,
+                                            status_line = Status_line,
+                                            raw_headers = Raw_headers,
+                                            http_status_code=StatCode};
                           false ->
-                              State#state{recvd_headers=Headers_1, status=get_body,
-                                          reply_buffer = <<>>,
-                                          http_status_code=StatCode, is_closing=IsClosing}
+                              State_0#state{recvd_headers=Headers_1, status=get_body,
+                                            reply_buffer = <<>>,
+                                            http_status_code=StatCode}
                       end,
             put(conn_close, ConnClose),
             TransferEncoding = to_lower(get_value("transfer-encoding", LCHeaders, "false")),
+            Head_response_with_body = lists:member({workaround, head_response_with_body}, Options),
             case get_value("content-length", LCHeaders, undefined) of
                 _ when Method == connect,
                        hd(StatCode) == $2 ->
-                    cancel_timer(State#state.send_timer),
                     {_, Reqs_1} = queue:out(Reqs),
-                    upgrade_to_ssl(set_cur_request(State#state{reqs = Reqs_1,
-                                                               recvd_headers = [],
-                                                               status = idle
-                                                              }));
+                    cancel_timer(T_ref),
+                    upgrade_to_ssl(set_cur_request(State_0#state{reqs = Reqs_1,
+                                                                 recvd_headers = [],
+                                                                 status = idle
+                                                                }));
                 _ when Method == connect ->
                     {_, Reqs_1} = queue:out(Reqs),
                     do_error_reply(State#state{reqs = Reqs_1},
                                    {error, proxy_tunnel_failed}),
                     {error, proxy_tunnel_failed};
-                _ when Method == head ->
+                _ when Method =:= head,
+                       Head_response_with_body =:= false ->
+                    %% This (HEAD response with body) is not supposed
+                    %% to happen, but it does. An Apache server was
+                    %% observed to send an "empty" body, but in a
+                    %% Chunked-Transfer-Encoding way, which meant
+                    %% there was still a body.  Issue #67 on Github
                     {_, Reqs_1} = queue:out(Reqs),
                     send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
                     State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
                                          {ok, StatCode, Headers_1, []}),
-                    cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
+                    cancel_timer(T_ref, {eat_message, {req_timedout, From}}),
                     State_2 = reset_state(State_1_1),
                     State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
                     parse_response(Data_1, State_3);
@@ -1065,7 +1084,7 @@ parse_response(Data, #state{reply_buffer = Acc, reqs = Reqs,
                     send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
                     State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
                                          {ok, StatCode, Headers_1, []}),
-                    cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
+                    cancel_timer(T_ref, {eat_message, {req_timedout, From}}),
                     State_2 = reset_state(State_1_1),
                     State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
                     parse_response(Data_1, State_3);
@@ -1084,7 +1103,7 @@ parse_response(Data, #state{reply_buffer = Acc, reqs = Reqs,
                             State_2
                     end;
                 undefined when HttpVsn =:= "HTTP/1.0";
-                ConnClose =:= "close" ->
+                               ConnClose =:= "close" ->
                     send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
                     State_1#state{reply_buffer = Data_1};
                 undefined ->
@@ -1291,12 +1310,12 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
                          save_response_to_file = SaveResponseToFile,
                          tmp_file_name = TmpFilename,
                          tmp_file_fd = Fd,
-                         options       = Options
+                         options       = Options,
+                         timer_ref     = ReqTimer
                         },
                 #state{http_status_code = SCode,
                        status_line   = Status_line,
                        raw_headers   = Raw_headers,
-                       send_timer    = ReqTimer,
                        reply_buffer  = RepBuf,
                        recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
     Body = RepBuf,
@@ -1324,13 +1343,13 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
     set_cur_request(State_1);
 handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
                          response_format = Resp_format,
-                         options = Options},
+                         options = Options, timer_ref = ReqTimer},
                 #state{http_status_code = SCode,
                        status_line      = Status_line,
                        raw_headers      = Raw_headers,
                        recvd_headers    = Resp_headers,
-                       reply_buffer     = RepBuf,
-                       send_timer       = ReqTimer} = State) ->
+                       reply_buffer     = RepBuf
+                      } = State) ->
     Body = RepBuf,
     {Resp_headers_1, Raw_headers_1} = maybe_add_custom_headers(Resp_headers, Raw_headers, Options),
     Reply = case get_value(give_raw_headers, Options, false) of
@@ -1360,10 +1379,10 @@ reset_state(State) ->
                }.
 
 set_cur_request(#state{reqs = Reqs, socket = Socket} = State) ->
-    case queue:to_list(Reqs) of
-        [] ->
+    case queue:peek(Reqs) of
+        empty ->
             State#state{cur_req = undefined};
-        [#request{caller_controls_socket = Ccs} = NextReq | _] ->
+        {value, #request{caller_controls_socket = Ccs} = NextReq} ->
             case Ccs of
                 true ->
                     do_setopts(Socket, [{active, once}], State);
@@ -1410,6 +1429,11 @@ parse_headers_1([$\n, H |T], [$\r | L], Acc) when H =:= 32;
     parse_headers_1(lists:dropwhile(fun(X) ->
                                             is_whitespace(X)
                                     end, T), [32 | L], Acc);
+parse_headers_1([$\n, H |T], L, Acc) when H =:= 32;
+                                          H =:= $\t ->
+    parse_headers_1(lists:dropwhile(fun(X) ->
+                                            is_whitespace(X)
+                                    end, T), [32 | L], Acc);
 parse_headers_1([$\n|T], [$\r | L], Acc) ->
     case parse_header(lists:reverse(L)) of
         invalid ->
@@ -1417,6 +1441,13 @@ parse_headers_1([$\n|T], [$\r | L], Acc) ->
         NewHeader ->
             parse_headers_1(T, [], [NewHeader | Acc])
     end;
+parse_headers_1([$\n|T], L, Acc) ->
+    case parse_header(lists:reverse(L)) of
+        invalid ->
+            parse_headers_1(T, [], Acc);
+        NewHeader ->
+            parse_headers_1(T, [], [NewHeader | Acc])
+    end;
 parse_headers_1([H|T],  L, Acc) ->
     parse_headers_1(T, [H|L], Acc);
 parse_headers_1([], [], Acc) ->
@@ -1458,10 +1489,13 @@ parse_header([], _) ->
     invalid.
 
 scan_header(Bin) ->
-    case get_crlf_crlf_pos(Bin) of
+    case get_crlf_crlf_pos(Bin, 0) of
         {yes, Pos} ->
             {Headers, <<_:4/binary, Body/binary>>} = split_binary(Bin, Pos),
             {yes, Headers, Body};
+        {yes_dodgy, Pos} ->
+            {Headers, <<_:2/binary, Body/binary>>} = split_binary(Bin, Pos),
+            {yes, Headers, Body};
         no ->
             {no, Bin}
     end.
@@ -1474,29 +1508,26 @@ scan_header(Bin1, Bin2) ->
     Bin1_already_scanned_size = size(Bin1) - 4,
     <<Headers_prefix:Bin1_already_scanned_size/binary, Rest/binary>> = Bin1,
     Bin_to_scan = <<Rest/binary, Bin2/binary>>,
-    case get_crlf_crlf_pos(Bin_to_scan) of
+    case get_crlf_crlf_pos(Bin_to_scan, 0) of
         {yes, Pos} ->
             {Headers_suffix, <<_:4/binary, Body/binary>>} = split_binary(Bin_to_scan, Pos),
             {yes, <<Headers_prefix/binary, Headers_suffix/binary>>, Body};
+        {yes_dodgy, Pos} ->
+            {Headers_suffix, <<_:2/binary, Body/binary>>} = split_binary(Bin_to_scan, Pos),
+            {yes, <<Headers_prefix/binary, Headers_suffix/binary>>, Body};
         no ->
             {no, <<Bin1/binary, Bin2/binary>>}
     end.
 
-get_crlf_crlf_pos(Data) ->
-    binary_bif_match(Data, <<$\r, $\n, $\r, $\n>>).
-
-binary_bif_match(Data, Binary) ->
-    case binary:match(Data, Binary) of
-    {Pos, _Len} ->
-        {yes, Pos};
-    _ -> no
-    end.
-
+get_crlf_crlf_pos(<<$\r, $\n, $\r, $\n, _/binary>>, Pos) -> {yes, Pos};
+get_crlf_crlf_pos(<<$\n, $\n, _/binary>>, Pos)           -> {yes_dodgy, Pos};
+get_crlf_crlf_pos(<<_, Rest/binary>>, Pos)               -> get_crlf_crlf_pos(Rest, Pos + 1);
+get_crlf_crlf_pos(<<>>, _)                               -> no.
 
 scan_crlf(Bin) ->
     case get_crlf_pos(Bin) of
-        {yes, Pos} ->
-            {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin, Pos),
+        {yes, Offset, Pos} ->
+            {Prefix, <<_:Offset/binary, Suffix/binary>>} = split_binary(Bin, Pos),
             {yes, Prefix, Suffix};
         no ->
             {no, Bin}
@@ -1513,16 +1544,20 @@ scan_crlf_1(Bin1_head_size, Bin1, Bin2) ->
     <<Bin1_head:Bin1_head_size/binary, Bin1_tail/binary>> = Bin1,
     Bin3 = <<Bin1_tail/binary, Bin2/binary>>,
     case get_crlf_pos(Bin3) of
-        {yes, Pos} ->
-            {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin3, Pos),
+        {yes, Offset, Pos} ->
+            {Prefix, <<_:Offset/binary, Suffix/binary>>} = split_binary(Bin3, Pos),
             {yes, list_to_binary([Bin1_head, Prefix]), Suffix};
         no ->
             {no, list_to_binary([Bin1, Bin2])}
     end.
 
-get_crlf_pos(Data) ->
-    binary_bif_match(Data, <<$\r, $\n>>).
+get_crlf_pos(Bin) ->
+    get_crlf_pos(Bin, 0).
 
+get_crlf_pos(<<$\r, $\n, _/binary>>, Pos) -> {yes, 2, Pos};
+get_crlf_pos(<<$\n, _/binary>>, Pos) ->      {yes, 1, Pos};
+get_crlf_pos(<<_, Rest/binary>>, Pos)     -> get_crlf_pos(Rest, Pos + 1);
+get_crlf_pos(<<>>, _)                     -> no.
 
 fmt_val(L) when is_list(L)    -> L;
 fmt_val(I) when is_integer(I) -> integer_to_list(I);
@@ -1531,21 +1566,36 @@ fmt_val(Term)                 -> io_lib:format("~p", [Term]).
 
 crnl() -> "\r\n".
 
-method(get)       -> "GET";
-method(post)      -> "POST";
-method(head)      -> "HEAD";
-method(options)   -> "OPTIONS";
-method(put)       -> "PUT";
-method(delete)    -> "DELETE";
-method(trace)     -> "TRACE";
-method(mkcol)     -> "MKCOL";
-method(propfind)  -> "PROPFIND";
-method(proppatch) -> "PROPPATCH";
-method(lock)      -> "LOCK";
-method(unlock)    -> "UNLOCK";
-method(move)      -> "MOVE";
-method(copy)      -> "COPY";
-method(connect)   -> "CONNECT".
+method(connect)     -> "CONNECT";
+method(delete)      -> "DELETE";
+method(get)         -> "GET";
+method(head)        -> "HEAD";
+method(options)     -> "OPTIONS";
+method(post)        -> "POST";
+method(put)         -> "PUT";
+method(trace)       -> "TRACE";
+%% webdav
+method(copy)        -> "COPY";
+method(lock)        -> "LOCK";
+method(mkcol)       -> "MKCOL";
+method(move)        -> "MOVE";
+method(propfind)    -> "PROPFIND";
+method(proppatch)   -> "PROPPATCH";
+method(search)      -> "SEARCH";
+method(unlock)      -> "UNLOCK";
+%% subversion %%
+method(report)      -> "REPORT";
+method(mkactivity)  -> "MKACTIVITY";
+method(checkout)    -> "CHECKOUT";
+method(merge)       -> "MERGE";
+%% upnp
+method(msearch)     -> "MSEARCH";
+method(notify)      -> "NOTIFY";
+method(subscribe)   -> "SUBSCRIBE";
+method(unsubscribe) -> "UNSUBSCRIBE";
+%% rfc-5789
+method(patch)       -> "PATCH";
+method(purge)       -> "PURGE".
 
 %% From RFC 2616
 %%
@@ -1768,22 +1818,34 @@ to_lower([], Acc) ->
 shutting_down(#state{lb_ets_tid = undefined}) ->
     ok;
 shutting_down(#state{lb_ets_tid = Tid,
-                     cur_pipeline_size = Sz}) ->
-    catch ets:delete(Tid, {Sz, self()}).
+                     cur_pipeline_size = _Sz}) ->
+    catch ets:delete(Tid, self()).
 
 inc_pipeline_counter(#state{is_closing = true} = State) ->
     State;
-inc_pipeline_counter(#state{cur_pipeline_size = Pipe_sz} = State) ->
+inc_pipeline_counter(#state{lb_ets_tid = undefined} = State) ->
+    State;
+inc_pipeline_counter(#state{cur_pipeline_size = Pipe_sz,
+                           lb_ets_tid = Tid} = State) ->
+    update_counter(Tid, self(), {2,1,99999,9999}),
     State#state{cur_pipeline_size = Pipe_sz + 1}.
 
+update_counter(Tid, Key, Args) ->
+    ets:update_counter(Tid, Key, Args).
+
 dec_pipeline_counter(#state{is_closing = true} = State) ->
     State;
 dec_pipeline_counter(#state{lb_ets_tid = undefined} = State) ->
     State;
 dec_pipeline_counter(#state{cur_pipeline_size = Pipe_sz,
                             lb_ets_tid = Tid} = State) ->
-    ets:delete(Tid, {Pipe_sz, self()}),
-    ets:insert(Tid, {{Pipe_sz - 1, self()}, []}),
+    try
+        update_counter(Tid, self(), {2,-1,0,0}),
+        update_counter(Tid, self(), {3,-1,0,0})
+    catch
+        _:_ ->
+            ok
+    end,
     State#state{cur_pipeline_size = Pipe_sz - 1}.
 
 flatten([H | _] = L) when is_integer(H) ->

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/50ee48dd/ibrowse_lb.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lb.erl b/ibrowse_lb.erl
index 0e001d4..d98cf32 100644
--- a/ibrowse_lb.erl
+++ b/ibrowse_lb.erl
@@ -36,7 +36,9 @@
 		port,
 		max_sessions,
 		max_pipeline_size,
-		num_cur_sessions = 0}).
+		num_cur_sessions = 0,
+                proc_state
+               }).
 
 -include("ibrowse.hrl").
 
@@ -104,14 +106,21 @@ stop(Lb_pid) ->
 %%          {stop, Reason, Reply, State}   | (terminate/2 is called)
 %%          {stop, Reason, State}            (terminate/2 is called)
 %%--------------------------------------------------------------------
-% handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
-% 	    #state{max_sessions = Max_sess,
-% 		   ets_tid = Tid,
-% 		   max_pipeline_size = Max_pipe_sz,
-% 		   num_cur_sessions = Num} = State) 
-%     when Num >= Max ->
-%     Reply = find_best_connection(Tid),
-%     {reply, sorry_dude_reuse, State};
+
+handle_call(stop, _From, #state{ets_tid = undefined} = State) ->
+    gen_server:reply(_From, ok),
+    {stop, normal, State};
+
+handle_call(stop, _From, #state{ets_tid = Tid} = State) ->
+    ets:foldl(fun({Pid, _, _}, Acc) ->
+                      ibrowse_http_client:stop(Pid),
+                      Acc
+              end, [], Tid),
+    gen_server:reply(_From, ok),
+    {stop, normal, State};
+
+handle_call(_, _From, #state{proc_state = shutting_down} = State) ->
+    {reply, {error, shutting_down}, State};
 
 %% Update max_sessions in #state with supplied value
 handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
@@ -119,27 +128,18 @@ handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
     when Num >= Max_sess ->
     State_1 = maybe_create_ets(State),
     Reply = find_best_connection(State_1#state.ets_tid, Max_pipe),
-    {reply, Reply, State_1#state{max_sessions = Max_sess}};
+    {reply, Reply, State_1#state{max_sessions = Max_sess,
+                                 max_pipeline_size = Max_pipe}};
 
-handle_call({spawn_connection, Url, _Max_sess, _Max_pipe, SSL_options}, _From,
+handle_call({spawn_connection, Url, Max_sess, Max_pipe, SSL_options}, _From,
 	    #state{num_cur_sessions = Cur} = State) ->
     State_1 = maybe_create_ets(State),
     Tid = State_1#state.ets_tid,
     {ok, Pid} = ibrowse_http_client:start_link({Tid, Url, SSL_options}),
-    ets:insert(Tid, {{1, Pid}, []}),
-    {reply, {ok, Pid}, State_1#state{num_cur_sessions = Cur + 1}};
-
-handle_call(stop, _From, #state{ets_tid = undefined} = State) ->
-    gen_server:reply(_From, ok),
-    {stop, normal, State};
-
-handle_call(stop, _From, #state{ets_tid = Tid} = State) ->
-    ets:foldl(fun({{_, Pid}, _}, Acc) ->
-                      ibrowse_http_client:stop(Pid),
-                      Acc
-              end, [], Tid),
-    gen_server:reply(_From, ok),
-    {stop, normal, State};
+    ets:insert(Tid, {Pid, 0, 0}),
+    {reply, {ok, Pid}, State_1#state{num_cur_sessions = Cur + 1,
+                                     max_sessions = Max_sess,
+                                     max_pipeline_size = Max_pipe}};
 
 handle_call(Request, _From, State) ->
     Reply = {unknown_request, Request},
@@ -173,14 +173,13 @@ handle_info({'EXIT', Pid, _Reason},
 		   ets_tid = Tid} = State) ->
     ets:match_delete(Tid, {{'_', Pid}, '_'}),
     Cur_1 = Cur - 1,
-    State_1 = case Cur_1 of
+    case Cur_1 of
 		  0 ->
 		      ets:delete(Tid),
-		      State#state{ets_tid = undefined};
+			  {noreply, State#state{ets_tid = undefined, num_cur_sessions = 0}, 10000};
 		  _ ->
-		      State
-	      end,
-    {noreply, State_1#state{num_cur_sessions = Cur_1}};
+		      {noreply, State#state{num_cur_sessions = Cur_1}}
+	      end;
 
 handle_info({trace, Bool}, #state{ets_tid = undefined} = State) ->
     put(my_trace_flag, Bool),
@@ -196,6 +195,18 @@ handle_info({trace, Bool}, #state{ets_tid = Tid} = State) ->
     put(my_trace_flag, Bool),
     {noreply, State};
 
+handle_info(timeout, State) ->
+    %% We can't shutdown the process immediately because a request
+    %% might be in flight. So we first remove the entry from the
+    %% ibrowse_lb ets table, and then shutdown a couple of seconds
+    %% later
+    ets:delete(ibrowse_lb, {State#state.host, State#state.port}),
+    erlang:send_after(2000, self(), shutdown),
+    {noreply, State#state{proc_state = shutting_down}};
+
+handle_info(shutdown, State) ->
+    {stop, normal, State};
+
 handle_info(_Info, State) ->
     {noreply, State}.
 
@@ -219,13 +230,19 @@ code_change(_OldVsn, State, _Extra) ->
 %%% Internal functions
 %%--------------------------------------------------------------------
 find_best_connection(Tid, Max_pipe) ->
-    case ets:first(Tid) of
-	{Cur_sz, Pid} when Cur_sz < Max_pipe ->
-	    ets:delete(Tid, {Cur_sz, Pid}),
-	    ets:insert(Tid, {{Cur_sz + 1, Pid}, []}),
-	    {ok, Pid};
-	_ ->
-	    {error, retry_later}
+    Res = find_best_connection(ets:first(Tid), Tid, Max_pipe),
+    Res.
+
+find_best_connection('$end_of_table', _, _) ->
+    {error, retry_later};
+find_best_connection(Pid, Tid, Max_pipe) ->
+    case ets:lookup(Tid, Pid) of
+        [{Pid, Cur_sz, Speculative_sz}] when Cur_sz < Max_pipe,
+                                             Speculative_sz < Max_pipe ->
+            ets:update_counter(Tid, Pid, {3, 1, 9999999, 9999999}),
+            {ok, Pid};
+        _ ->
+            find_best_connection(ets:next(Tid, Pid), Tid, Max_pipe)
     end.
 
 maybe_create_ets(#state{ets_tid = undefined} = State) ->

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/50ee48dd/ibrowse_lib.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lib.erl b/ibrowse_lib.erl
index 3cbe3ac..1ce6bd4 100644
--- a/ibrowse_lib.erl
+++ b/ibrowse_lib.erl
@@ -12,6 +12,10 @@
 
 -include("ibrowse.hrl").
 
+-ifdef(EUNIT).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
+
 -export([
          get_trace_status/2,
          do_trace/2,
@@ -180,18 +184,24 @@ get_value(Tag, TVL) ->
     V.
 
 parse_url(Url) ->
-    case parse_url(Url, get_protocol, #url{abspath=Url}, []) of
-        #url{host_type = undefined, host = Host} = UrlRec ->
-            case inet_parse:address(Host) of
-                {ok, {_, _, _, _, _, _, _, _}} ->
-                    UrlRec#url{host_type = ipv6_address};
-                {ok, {_, _, _, _}} ->
-                    UrlRec#url{host_type = ipv4_address};
-                _ ->
-                    UrlRec#url{host_type = hostname}
-            end;
-        Else ->
-            Else
+    try
+        case parse_url(Url, get_protocol, #url{abspath=Url}, []) of
+            #url{host_type = undefined, host = Host} = UrlRec ->
+                case inet_parse:address(Host) of
+                    {ok, {_, _, _, _, _, _, _, _}} ->
+                        UrlRec#url{host_type = ipv6_address};
+                    {ok, {_, _, _, _}} ->
+                        UrlRec#url{host_type = ipv4_address};
+                    _ ->
+                        UrlRec#url{host_type = hostname}
+                end;
+            #url{} = UrlRec ->
+                UrlRec;
+            _ ->
+                {error, invalid_uri}
+        end
+    catch _:_ ->
+            {error, invalid_uri}
     end.
 
 parse_url([$:, $/, $/ | _], get_protocol, Url, []) ->
@@ -389,3 +399,43 @@ do_trace(true, Fmt, Args) ->
 do_trace(_, _, _) ->
     ok.
 -endif.
+
+-ifdef(EUNIT).
+
+parse_url_test() ->
+    Urls = [{"http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/index.html",
+             #url{abspath = "http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/index.html",
+                  host = "FEDC:BA98:7654:3210:FEDC:BA98:7654:3210",
+                  port = 80, protocol = http, path = "/index.html",
+                  host_type = ipv6_address}},
+            {"http://[1080:0:0:0:8:800:200C:417A]/index.html",
+             #url{abspath = "http://[1080:0:0:0:8:800:200C:417A]/index.html",
+                  host_type = ipv6_address, port = 80, protocol = http,
+                  host = "1080:0:0:0:8:800:200C:417A", path = "/index.html"}},
+            {"http://[3ffe:2a00:100:7031::1]",
+             #url{abspath = "http://[3ffe:2a00:100:7031::1]",
+                  host_type = ipv6_address, port = 80, protocol = http,
+                  host = "3ffe:2a00:100:7031::1", path = "/"}},
+            {"http://[1080::8:800:200C:417A]/foo",
+             #url{abspath = "http://[1080::8:800:200C:417A]/foo",
+                  host_type = ipv6_address, port = 80, protocol = http,
+                  host = "1080::8:800:200C:417A", path = "/foo"}},
+            {"http://[::192.9.5.5]/ipng",
+             #url{abspath = "http://[::192.9.5.5]/ipng",
+                  host_type = ipv6_address, port = 80, protocol = http,
+                  host = "::192.9.5.5", path = "/ipng"}},
+            {"http://[::FFFF:129.144.52.38]:80/index.html",
+             #url{abspath = "http://[::FFFF:129.144.52.38]:80/index.html",
+                  host_type = ipv6_address, port = 80, protocol = http,
+                  host = "::FFFF:129.144.52.38", path = "/index.html"}},
+            {"http://[2010:836B:4179::836B:4179]",
+             #url{abspath = "http://[2010:836B:4179::836B:4179]",
+                  host_type = ipv6_address, port = 80, protocol = http,
+                  host = "2010:836B:4179::836B:4179", path = "/"}}
+           ],
+    lists:foreach(
+      fun({Url, Expected_result}) ->
+              ?assertMatch(Expected_result, parse_url(Url))
+      end, Urls).
+
+-endif.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/50ee48dd/ibrowse_test.erl
----------------------------------------------------------------------
diff --git a/ibrowse_test.erl b/ibrowse_test.erl
index ff3b530..d97f76c 100644
--- a/ibrowse_test.erl
+++ b/ibrowse_test.erl
@@ -20,7 +20,14 @@
 	 test_stream_once/3,
 	 test_stream_once/4,
          test_20122010/0,
-         test_20122010/1
+         test_20122010/1,
+         test_pipeline_head_timeout/0,
+         test_pipeline_head_timeout/1,
+         do_test_pipeline_head_timeout/4,
+         test_head_transfer_encoding/0,
+         test_head_transfer_encoding/1,
+         test_head_response_with_body/0,
+         test_head_response_with_body/1
 	]).
 
 test_stream_once(Url, Method, Options) ->
@@ -81,7 +88,7 @@ send_reqs_1(Url, NumWorkers, NumReqsPerWorker) ->
     log_msg("Starting spawning of workers...~n", []),
     spawn_workers(Url, NumWorkers, NumReqsPerWorker),
     log_msg("Finished spawning workers...~n", []),
-    do_wait(),
+    do_wait(Url),
     End_time = now(),
     log_msg("All workers are done...~n", []),
     log_msg("ibrowse_test_results table: ~n~p~n", [ets:tab2list(ibrowse_test_results)]),
@@ -111,24 +118,28 @@ spawn_workers(Url, NumWorkers, NumReqsPerWorker) ->
     ets:insert(pid_table, {Pid, []}),
     spawn_workers(Url, NumWorkers - 1, NumReqsPerWorker).
 
-do_wait() ->
+do_wait(Url) ->
     receive
 	{'EXIT', _, normal} ->
-	    do_wait();
+            catch ibrowse:show_dest_status(Url),
+            catch ibrowse:show_dest_status(),
+	    do_wait(Url);
 	{'EXIT', Pid, Reason} ->
 	    ets:delete(pid_table, Pid),
 	    ets:insert(ibrowse_errors, {Pid, Reason}),
 	    ets:update_counter(ibrowse_test_results, crash, 1),
-	    do_wait();
+	    do_wait(Url);
 	Msg ->
 	    io:format("Recvd unknown message...~p~n", [Msg]),
-	    do_wait()
+	    do_wait(Url)
     after 1000 ->
 	    case ets:info(pid_table, size) of
 		0 ->
 		    done;
 		_ ->
-		    do_wait()
+                    catch ibrowse:show_dest_status(Url),
+                    catch ibrowse:show_dest_status(),
+		    do_wait(Url)
 	    end
     end.
 
@@ -219,7 +230,10 @@ dump_errors(Key, Iod) ->
 		    {"http://jigsaw.w3.org/HTTP/CL/", get},
 		    {"http://www.httpwatch.com/httpgallery/chunked/", get},
                     {"https://github.com", get, [{ssl_options, [{depth, 2}]}]},
-                    {local_test_fun, test_20122010, []}
+                    {local_test_fun, test_20122010, []},
+                    {local_test_fun, test_pipeline_head_timeout, []},
+                    {local_test_fun, test_head_transfer_encoding, []},
+                    {local_test_fun, test_head_response_with_body, []}
 		   ]).
 
 unit_tests() ->
@@ -232,16 +246,19 @@ unit_tests(Options) ->
     (catch ibrowse_test_server:start_server(8181, tcp)),
     ibrowse:start(),
     Options_1 = Options ++ [{connect_timeout, 5000}],
+    Test_timeout = proplists:get_value(test_timeout, Options, 60000),
     {Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options_1]),
     receive 
 	{done, Pid} ->
 	    ok;
 	{'DOWN', Ref, _, _, Info} ->
 	    io:format("Test process crashed: ~p~n", [Info])
-    after 60000 ->
+    after Test_timeout ->
 	    exit(Pid, kill),
 	    io:format("Timed out waiting for tests to complete~n", [])
-    end.
+    end,
+    catch ibrowse_test_server:stop_server(8181),
+    ok.
 
 unit_tests_1(Parent, Options) ->
     lists:foreach(fun({local_test_fun, Fun_name, Args}) ->
@@ -426,6 +443,101 @@ log_msg(Fmt, Args) ->
 	      [ibrowse_lib:printable_date() | Args]).
 
 %%------------------------------------------------------------------------------
+%% Test what happens when the response to a HEAD request is a
+%% Chunked-Encoding response with a non-empty body. Issue #67 on
+%% Github
+%% ------------------------------------------------------------------------------
+test_head_transfer_encoding() ->
+    clear_msg_q(),
+    test_head_transfer_encoding("http://localhost:8181/ibrowse_head_test").
+
+test_head_transfer_encoding(Url) ->
+    case ibrowse:send_req(Url, [], head) of
+        {ok, "200", _, _} ->
+            success;
+        Res ->
+            {test_failed, Res}
+    end.
+
+%%------------------------------------------------------------------------------
+%% Test what happens when the response to a HEAD request is a
+%% Chunked-Encoding response with a non-empty body. Issue #67 on
+%% Github
+%% ------------------------------------------------------------------------------
+test_head_response_with_body() ->
+    clear_msg_q(),
+    test_head_response_with_body("http://localhost:8181/ibrowse_head_transfer_enc").
+
+test_head_response_with_body(Url) ->
+    case ibrowse:send_req(Url, [], head, [], [{workaround, head_response_with_body}]) of
+        {ok, "400", _, _} ->
+            success;
+        Res ->
+            {test_failed, Res}
+    end.
+
+%%------------------------------------------------------------------------------
+%% Test what happens when the request at the head of a pipeline times out
+%%------------------------------------------------------------------------------
+test_pipeline_head_timeout() ->
+    clear_msg_q(),
+    test_pipeline_head_timeout("http://localhost:8181/ibrowse_inac_timeout_test").
+
+test_pipeline_head_timeout(Url) ->
+    {ok, Pid} = ibrowse:spawn_worker_process(Url),
+    Test_parent = self(),
+    Fun = fun({fixed, Timeout}) ->
+                  spawn(fun() ->
+                                do_test_pipeline_head_timeout(Url, Pid, Test_parent, Timeout)
+                        end);
+             (Timeout_mult) ->
+                  spawn(fun() ->
+                                Timeout = 1000 + Timeout_mult*1000,
+                                do_test_pipeline_head_timeout(Url, Pid, Test_parent, Timeout)
+                        end)
+          end,
+    Pids = [Fun(X) || X <- [{fixed, 32000} | lists:seq(1,10)]],
+    Result = accumulate_worker_resp(Pids),
+    case lists:all(fun({_, X_res}) ->
+                           X_res == {error,req_timedout}
+                   end, Result) of
+        true ->
+            success;
+        false ->
+            {test_failed, Result}
+    end.
+
+do_test_pipeline_head_timeout(Url, Pid, Test_parent, Req_timeout) ->
+    Resp = ibrowse:send_req_direct(
+                                 Pid,
+                                 Url,
+                                 [], get, [],
+                                 [{socket_options,[{keepalive,true}]},
+                                  {inactivity_timeout,180000},
+                                  {connect_timeout,180000}], Req_timeout),
+    Test_parent ! {self(), Resp}.
+
+accumulate_worker_resp(Pids) ->
+    accumulate_worker_resp(Pids, []).
+
+accumulate_worker_resp([_ | _] = Pids, Acc) ->
+    receive
+        {Pid, Res} when is_pid(Pid) ->
+            accumulate_worker_resp(Pids -- [Pid], [{Pid, Res} | Acc]);
+        Err ->
+            io:format("Received unexpected: ~p~n", [Err])
+    end;
+accumulate_worker_resp([], Acc) ->
+    lists:reverse(Acc).
+
+clear_msg_q() ->
+    receive
+        _ ->
+            clear_msg_q()
+    after 0 ->
+            ok
+    end.
+%%------------------------------------------------------------------------------
 %% 
 %%------------------------------------------------------------------------------
 


[10/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
update ibrowse to 1.5.1+

git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@795278 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/42034396
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/42034396
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/42034396

Branch: refs/heads/import-master
Commit: 420343967fec874fb7b980f5ba9de86e80620ecb
Parents: 1eba7c7
Author: Adam Kocoloski <ko...@apache.org>
Authored: Fri Jul 17 23:58:35 2009 +0000
Committer: Adam Kocoloski <ko...@apache.org>
Committed: Fri Jul 17 23:58:35 2009 +0000

----------------------------------------------------------------------
 ibrowse.app             |  2 +-
 ibrowse.erl             | 28 +++++++++++++++++++---------
 ibrowse_http_client.erl |  2 +-
 ibrowse_lb.erl          |  8 +++++++-
 ibrowse_lib.erl         | 12 ++++++------
 5 files changed, 34 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/42034396/ibrowse.app
----------------------------------------------------------------------
diff --git a/ibrowse.app b/ibrowse.app
index a3d23ae..4f43dd9 100644
--- a/ibrowse.app
+++ b/ibrowse.app
@@ -1,6 +1,6 @@
 {application, ibrowse,
         [{description, "HTTP client application"},
-         {vsn, "1.5.0"},
+         {vsn, "1.5.1"},
          {modules, [ ibrowse,
 		     ibrowse_http_client,
 		     ibrowse_app,

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/42034396/ibrowse.erl
----------------------------------------------------------------------
diff --git a/ibrowse.erl b/ibrowse.erl
index 56f0ef4..81fc74d 100644
--- a/ibrowse.erl
+++ b/ibrowse.erl
@@ -7,7 +7,7 @@
 %%%-------------------------------------------------------------------
 %% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
 %% @copyright 2005-2009 Chandrashekhar Mullaparthi
-%% @version 1.5.0
+%% @version 1.5.1
 %% @doc The ibrowse application implements an HTTP 1.1 client. This
 %% module implements the API of the HTTP client. There is one named
 %% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is
@@ -152,7 +152,7 @@ stop() ->
 %% headerName() = string()
 %% headerValue() = string()
 %% response() = {ok, Status, ResponseHeaders, ResponseBody} | {ibrowse_req_id, req_id() } | {error, Reason}
-%% req_id = term()
+%% req_id() = term()
 %% ResponseBody = string() | {file, Filename}
 %% Reason = term()
 send_req(Url, Headers, Method) ->
@@ -169,7 +169,7 @@ send_req(Url, Headers, Method, Body) ->
     send_req(Url, Headers, Method, Body, []).
 
 %% @doc Same as send_req/4.
-%% For a description of SSL Options, look in the ssl manpage. If the
+%% For a description of SSL Options, look in the <a href="http://www.erlang.org/doc/apps/ssl/index.html">ssl</a> manpage. If the
 %% HTTP Version to use is not specified, the default is 1.1.
 %% <br/>
 %% <p>The <code>host_header</code> option is useful in the case where ibrowse is
@@ -181,7 +181,15 @@ send_req(Url, Headers, Method, Body) ->
 %% used to specify what should go in the <code>Host</code> header in
 %% the request.</p>
 %% <ul>
-%% <li>When both the options <code>save_response_to_file</code> and <code>stream_to</code>
+%% <li>The <code>stream_to</code> option can be used to have the HTTP
+%% response streamed to a process as messages as data arrives on the
+%% socket. If the calling process wishes to control the rate at which
+%% data is received from the server, the option <code>{stream_to,
+%% {process(), once}}</code> can be specified. The calling process
+%% will have to invoke <code>ibrowse:stream_next(Request_id)</code> to
+%% receive the next packet.</li>
+%%
+%% <li>When both the options <code>save_response_to_file</code> and <code>stream_to</code> 
 %% are specified, the former takes precedence.</li>
 %%
 %% <li>For the <code>save_response_to_file</code> option, the response body is saved to
@@ -239,13 +247,14 @@ send_req(Url, Headers, Method, Body) ->
 %%          {content_length, integer()}        |
 %%          {content_type, string()}           |
 %%          {save_response_to_file, srtf()}    |
-%%          {stream_to, process()}             |
+%%          {stream_to, stream_to()}           |
 %%          {http_vsn, {MajorVsn, MinorVsn}}   |
 %%          {host_header, string()}            |
 %%          {inactivity_timeout, integer()}    |
 %%          {connect_timeout, integer()}       |
 %%          {transfer_encoding, {chunked, ChunkSize}}
 %%
+%% stream_to() = process() | {process(), once}
 %% process() = pid() | atom()
 %% username() = string()
 %% password() = string()
@@ -363,10 +372,11 @@ do_send_req(Conn_Pid, Parsed_url, Headers, Method, Body, Options, Timeout) ->
 	    Ret
     end.
 
-ensure_bin(L) when is_list(L) ->
-    list_to_binary(L);
-ensure_bin(B) when is_binary(B) ->
-    B.
+ensure_bin(L) when is_list(L)                     -> list_to_binary(L);
+ensure_bin(B) when is_binary(B)                   -> B;
+ensure_bin(Fun) when is_function(Fun)             -> Fun;
+ensure_bin({Fun}) when is_function(Fun)           -> Fun;
+ensure_bin({Fun, _} = Body) when is_function(Fun) -> Body.
 
 %% @doc Creates a HTTP client process to the specified Host:Port which
 %% is not part of the load balancing pool. This is useful in cases

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/42034396/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 43aa51f..6a26dd4 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -388,7 +388,7 @@ handle_sock_closed(#state{reply_buffer = Buf, reqs = Reqs, http_status_code = SC
 	    case TmpFilename of
 		undefined ->
 		    do_reply(State, From, StreamTo, ReqId, Resp_format,
-			     {ok, SC, Headers, lists:reverse(Buf)});
+			     {ok, SC, Headers, Buf});
 		_ ->
 		    file:close(Fd),
 		    do_reply(State, From, StreamTo, ReqId, Resp_format,

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/42034396/ibrowse_lb.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lb.erl b/ibrowse_lb.erl
index b0654b7..9c2165b 100644
--- a/ibrowse_lb.erl
+++ b/ibrowse_lb.erl
@@ -151,7 +151,13 @@ handle_info({'EXIT', Pid, _Reason},
     ets:match_delete(Tid, {{'_', Pid}, '_'}),
     {noreply, State#state{num_cur_sessions = Cur - 1}};
 
-handle_info({trace, Bool}, State) ->
+handle_info({trace, Bool}, #state{ets_tid = Tid} = State) ->
+    ets:foldl(fun({{_, Pid}, _}, Acc) when is_pid(Pid) ->
+		      catch Pid ! {trace, Bool},
+		      Acc;
+		 (_, Acc) ->
+		      Acc
+	      end, undefined, Tid),
     put(my_trace_flag, Bool),
     {noreply, State};
 

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/42034396/ibrowse_lib.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lib.erl b/ibrowse_lib.erl
index 7567a6a..6c7b154 100644
--- a/ibrowse_lib.erl
+++ b/ibrowse_lib.erl
@@ -49,7 +49,7 @@ drv_ue(Str, Port) ->
 %% @spec url_encode(Str) -> UrlEncodedStr
 %% Str = string()
 %% UrlEncodedStr = string()
-url_encode(Str) when list(Str) ->
+url_encode(Str) when is_list(Str) ->
     url_encode_char(lists:reverse(Str), []).
 
 url_encode_char([X | T], Acc) when X >= $0, X =< $9 ->
@@ -70,7 +70,7 @@ url_encode_char([], Acc) ->
 d2h(N) when N<10 -> N+$0;
 d2h(N) -> N+$a-10.
 
-decode_rfc822_date(String) when list(String) ->
+decode_rfc822_date(String) when is_list(String) ->
     case catch decode_rfc822_date_1(string:tokens(String, ", \t\r\n")) of
 	{'EXIT', _} ->
 	    {error, invalid_date};
@@ -177,9 +177,9 @@ dec2hex(M,N,Ack) -> dec2hex(M-1,N bsr 4,[d2h(N band 15)|Ack]).
 %% @spec encode_base64(In) -> Out
 %% In = string() | binary()
 %% Out = string() | binary()
-encode_base64(List) when list(List) ->
+encode_base64(List) when is_list(List) ->
     encode_base64_1(list_to_binary(List));
-encode_base64(Bin) when binary(Bin) ->
+encode_base64(Bin) when is_binary(Bin) ->
     List = encode_base64_1(Bin),
     list_to_binary(List).
 
@@ -197,9 +197,9 @@ encode_base64_1(<<>>) ->
 %% @spec decode_base64(In) -> Out | exit({error, invalid_input})
 %% In = string() | binary()
 %% Out = string() | binary()
-decode_base64(List) when list(List) ->
+decode_base64(List) when is_list(List) ->
     decode_base64_1(List, []);
-decode_base64(Bin) when binary(Bin) ->
+decode_base64(Bin) when is_binary(Bin) ->
     List = decode_base64_1(binary_to_list(Bin), []),
     list_to_binary(List).
 


[21/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Upgrading ibrowse from version 1.6.2 to 2.0.1.
This version fixes a serious issue regarding streaming of chunked HTTP(S) responses.
The issue is that the client occasionally gets blocked or receives a timeout (if inactivity_timeout parameter is given to ibrowse).

This fixes part of ticket COUCHDB-491.




git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@1000880 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/db7f9033
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/db7f9033
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/db7f9033

Branch: refs/heads/import-master
Commit: db7f9033de9cb2fc54d767b855e358bb422585a8
Parents: cb51bb1
Author: Filipe David Borba Manana <fd...@apache.org>
Authored: Fri Sep 24 14:18:56 2010 +0000
Committer: Filipe David Borba Manana <fd...@apache.org>
Committed: Fri Sep 24 14:18:56 2010 +0000

----------------------------------------------------------------------
 Makefile.am             |   2 +-
 ibrowse.app.in          |   2 +-
 ibrowse.erl             |  78 ++++++++++++---
 ibrowse_http_client.erl | 227 ++++++++++++++++++++++++++++---------------
 ibrowse_lb.erl          |  23 ++++-
 ibrowse_test.erl        |  51 ++++++++--
 6 files changed, 279 insertions(+), 104 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/db7f9033/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index b517486..39878f0 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -10,7 +10,7 @@
 ## License for the specific language governing permissions and limitations under
 ## the License.
 
-ibrowseebindir = $(localerlanglibdir)/ibrowse-1.6.2/ebin
+ibrowseebindir = $(localerlanglibdir)/ibrowse-2.0.1/ebin
 
 ibrowse_file_collection = \
 	ibrowse.app.in \

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/db7f9033/ibrowse.app.in
----------------------------------------------------------------------
diff --git a/ibrowse.app.in b/ibrowse.app.in
index 208c311..8fc2066 100644
--- a/ibrowse.app.in
+++ b/ibrowse.app.in
@@ -1,6 +1,6 @@
 {application, ibrowse,
         [{description, "HTTP client application"},
-         {vsn, "1.6.2"},
+         {vsn, "2.0.1"},
          {modules, [ ibrowse, 
 		     ibrowse_http_client, 
 		     ibrowse_app, 

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/db7f9033/ibrowse.erl
----------------------------------------------------------------------
diff --git a/ibrowse.erl b/ibrowse.erl
index 09d36a3..7f8d8bc 100644
--- a/ibrowse.erl
+++ b/ibrowse.erl
@@ -7,7 +7,7 @@
 %%%-------------------------------------------------------------------
 %% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
 %% @copyright 2005-2010 Chandrashekhar Mullaparthi
-%% @version 1.6.0
+%% @version 2.0.1
 %% @doc The ibrowse application implements an HTTP 1.1 client. This
 %% module implements the API of the HTTP client. There is one named
 %% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is
@@ -236,6 +236,11 @@ send_req(Url, Headers, Method, Body) ->
 %% caller to get access to the raw status line and raw unparsed
 %% headers. Not quite sure why someone would want this, but one of my
 %% users asked for it, so here it is. </li>
+%%
+%% <li> The <code>preserve_chunked_encoding</code> option enables the caller
+%% to receive the raw data stream when the Transfer-Encoding of the server
+%% response is Chunked.
+%% </li>
 %% </ul>
 %%
 %% @spec send_req(Url::string(), Headers::headerList(), Method::method(), Body::body(), Options::optionList()) -> response()
@@ -266,7 +271,8 @@ send_req(Url, Headers, Method, Body) ->
 %%          {socket_options, Sock_opts}        |
 %%          {transfer_encoding, {chunked, ChunkSize}} | 
 %%          {headers_as_is, boolean()}         |
-%%          {give_raw_headers, boolean()}
+%%          {give_raw_headers, boolean()}      |
+%%          {preserve_chunked_encoding,boolean()}
 %%
 %% stream_to() = process() | {process(), once}
 %% process() = pid() | atom()
@@ -302,23 +308,45 @@ send_req(Url, Headers, Method, Body, Options, Timeout) ->
             Options_1 = merge_options(Host, Port, Options),
             {SSLOptions, IsSSL} =
                 case (Protocol == https) orelse
-                     get_value(is_ssl, Options_1, false) of
+                    get_value(is_ssl, Options_1, false) of
                     false -> {[], false};
                     true -> {get_value(ssl_options, Options_1, []), true}
                 end,
-            case ibrowse_lb:spawn_connection(Lb_pid, Parsed_url,
+            try_routing_request(Lb_pid, Parsed_url,
+                                Max_sessions, 
+                                Max_pipeline_size,
+                                {SSLOptions, IsSSL}, 
+                                Headers, Method, Body, Options_1, Timeout, 0);
+        Err ->
+            {error, {url_parsing_failed, Err}}
+    end.
+
+try_routing_request(Lb_pid, Parsed_url,
+                    Max_sessions, 
+                    Max_pipeline_size,
+                    {SSLOptions, IsSSL}, 
+                    Headers, Method, Body, Options_1, Timeout, Try_count) when Try_count < 3 ->
+    case ibrowse_lb:spawn_connection(Lb_pid, Parsed_url,
                                              Max_sessions, 
                                              Max_pipeline_size,
                                              {SSLOptions, IsSSL}) of
-                {ok, Conn_Pid} ->
-                    do_send_req(Conn_Pid, Parsed_url, Headers,
-                                Method, Body, Options_1, Timeout);
-                Err ->
-                    Err
+        {ok, Conn_Pid} ->
+            case do_send_req(Conn_Pid, Parsed_url, Headers,
+                             Method, Body, Options_1, Timeout) of
+                {error, sel_conn_closed} ->
+                    try_routing_request(Lb_pid, Parsed_url,
+                                        Max_sessions, 
+                                        Max_pipeline_size,
+                                        {SSLOptions, IsSSL}, 
+                                        Headers, Method, Body, Options_1, Timeout, Try_count + 1);
+                Res ->
+                    Res
             end;
         Err ->
-            {error, {url_parsing_failed, Err}}
-    end.
+            Err
+    end;
+try_routing_request(_, _, _, _, _, _, _, _, _, _, _) ->
+    {error, retry_later}.
 
 merge_options(Host, Port, Options) ->
     Config_options = get_config_value({options, Host, Port}, []),
@@ -337,11 +365,27 @@ get_lb_pid(Url) ->
 
 get_max_sessions(Host, Port, Options) ->
     get_value(max_sessions, Options,
-              get_config_value({max_sessions, Host, Port}, ?DEF_MAX_SESSIONS)).
+              get_config_value({max_sessions, Host, Port},
+                               default_max_sessions())).
 
 get_max_pipeline_size(Host, Port, Options) ->
     get_value(max_pipeline_size, Options,
-              get_config_value({max_pipeline_size, Host, Port}, ?DEF_MAX_PIPELINE_SIZE)).
+              get_config_value({max_pipeline_size, Host, Port},
+                               default_max_pipeline_size())).
+
+default_max_sessions() ->
+    safe_get_env(ibrowse, default_max_sessions, ?DEF_MAX_SESSIONS).
+
+default_max_pipeline_size() ->
+    safe_get_env(ibrowse, default_max_pipeline_size, ?DEF_MAX_PIPELINE_SIZE).
+
+safe_get_env(App, Key, Def_val) ->
+    case application:get_env(App, Key) of
+        undefined ->
+            Def_val;
+        {ok, Val} ->
+            Val
+    end.
 
 %% @doc Deprecated. Use set_max_sessions/3 and set_max_pipeline_size/3
 %% for achieving the same effect.
@@ -375,6 +419,10 @@ do_send_req(Conn_Pid, Parsed_url, Headers, Method, Body, Options, Timeout) ->
                                             Options, Timeout) of
         {'EXIT', {timeout, _}} ->
             {error, req_timedout};
+        {'EXIT', {noproc, {gen_server, call, [Conn_Pid, _, _]}}} ->
+            {error, sel_conn_closed};
+        {error, connection_closed} ->
+            {error, sel_conn_closed};
         {'EXIT', Reason} ->
             {error, {'EXIT', Reason}};
         {ok, St_code, Headers, Body} = Ret when is_binary(Body) ->
@@ -684,6 +732,10 @@ handle_call({get_lb_pid, #url{host = Host, port = Port} = Url}, _From, State) ->
 
 handle_call(stop, _From, State) ->
     do_trace("IBROWSE shutting down~n", []),
+    ets:foldl(fun(#lb_pid{pid = Pid}, Acc) ->
+                      ibrowse_lb:stop(Pid),
+                      Acc
+              end, [], ibrowse_lb),
     {stop, normal, ok, State};
 
 handle_call({set_config_value, Key, Val}, _From, State) ->

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/db7f9033/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 1633e5b..16d9b87 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -47,7 +47,8 @@
                 status_line, raw_headers, 
                 is_closing, send_timer, content_length,
                 deleted_crlf = false, transfer_encoding,
-                chunk_size, chunk_size_buffer = <<>>, recvd_chunk_size,
+                chunk_size, chunk_size_buffer = <<>>,
+                recvd_chunk_size, interim_reply_sent = false,
                 lb_ets_tid, cur_pipeline_size = 0, prev_req_id
                }).
 
@@ -57,7 +58,7 @@
                   req_id,
                   stream_chunk_size,
                   save_response_to_file = false, 
-                  tmp_file_name, tmp_file_fd,
+                  tmp_file_name, tmp_file_fd, preserve_chunked_encoding,
                   response_format}).
 
 -import(ibrowse_lib, [
@@ -82,8 +83,13 @@ start_link(Args) ->
     gen_server:start_link(?MODULE, Args, []).
 
 stop(Conn_pid) ->
-    catch gen_server:call(Conn_pid, stop),
-    ok.
+    case catch gen_server:call(Conn_pid, stop) of
+        {'EXIT', {timeout, _}} ->
+            exit(Conn_pid, kill),
+            ok;
+        _ ->
+            ok
+    end.
 
 send_req(Conn_Pid, Url, Headers, Method, Body, Options, Timeout) ->
     gen_server:call(
@@ -171,6 +177,7 @@ handle_cast(_Msg, State) ->
 %%          {stop, Reason, State}            (terminate/2 is called)
 %%--------------------------------------------------------------------
 handle_info({tcp, _Sock, Data}, #state{status = Status} = State) ->
+%%    io:format("Recvd data: ~p~n", [Data]),
     do_trace("Data recvd in state: ~p. Size: ~p. ~p~n~n", [Status, size(Data), Data]),
     handle_sock_data(Data, State);
 handle_info({ssl, _Sock, Data}, State) ->
@@ -178,13 +185,14 @@ handle_info({ssl, _Sock, Data}, State) ->
 
 handle_info({stream_next, Req_id}, #state{socket = Socket,
                                           cur_req = #request{req_id = Req_id}} = State) ->
+    %% io:format("Client process set {active, once}~n", []),
     do_setopts(Socket, [{active, once}], State),
     {noreply, State};
 
 handle_info({stream_next, _Req_id}, State) ->
     {noreply, State};
 
-handle_info({tcp_closed, _Sock}, State) ->
+handle_info({tcp_closed, _Sock}, State) ->    
     do_trace("TCP connection closed by peer!~n", []),
     handle_sock_closed(State),
     {stop, normal, State};
@@ -194,11 +202,11 @@ handle_info({ssl_closed, _Sock}, State) ->
     {stop, normal, State};
 
 handle_info({tcp_error, _Sock}, State) ->
-    io:format("Error on connection to ~1000.p:~1000.p~n", [State#state.host, State#state.port]),
+    do_trace("Error on connection to ~1000.p:~1000.p~n", [State#state.host, State#state.port]),
     handle_sock_closed(State),
     {stop, normal, State};
 handle_info({ssl_error, _Sock}, State) ->
-    io:format("Error on SSL connection to ~1000.p:~1000.p~n", [State#state.host, State#state.port]),
+    do_trace("Error on SSL connection to ~1000.p:~1000.p~n", [State#state.host, State#state.port]),
     handle_sock_closed(State),
     {stop, normal, State};
 
@@ -233,7 +241,8 @@ handle_info(Info, State) ->
 %% Returns: any (ignored by gen_server)
 %%--------------------------------------------------------------------
 terminate(_Reason, State) ->
-    do_close(State).
+    do_close(State),
+    ok.
 
 %%--------------------------------------------------------------------
 %% Func: code_change/3
@@ -269,6 +278,7 @@ handle_sock_data(Data, #state{status = get_header}=State) ->
     end;
 
 handle_sock_data(Data, #state{status           = get_body,
+                              socket           = Socket,
                               content_length   = CL,
                               http_status_code = StatCode,
                               recvd_headers    = Headers,
@@ -293,6 +303,19 @@ handle_sock_data(Data, #state{status           = get_body,
                     fail_pipelined_requests(State,
                                             {error, {Reason, {stat_code, StatCode}, Headers}}),
                     {stop, normal, State};
+                #state{cur_req = #request{caller_controls_socket = Ccs},
+                       interim_reply_sent = Irs} = State_1 ->
+                    case Irs of
+                        true ->
+                            active_once(State_1);
+                        false when Ccs == true ->
+                            do_setopts(Socket, [{active, once}], State);
+                        false ->
+                            active_once(State_1)
+                    end,
+                    State_2 = State_1#state{interim_reply_sent = false},
+                    set_inac_timer(State_2),
+                    {noreply, State_2};
                 State_1 ->
                     active_once(State_1),
                     set_inac_timer(State_1),
@@ -338,17 +361,25 @@ accumulate_response(Data, #state{cur_req = #request{save_response_to_file = Srtf
         {error, Reason} ->
             {error, {file_write_error, Reason}}
     end;
-accumulate_response(<<>>, State) ->
-    State;
-accumulate_response(Data, #state{reply_buffer = RepBuf,
-                                 rep_buf_size = RepBufSize,
-                                 streamed_size = Streamed_size,
-                                 cur_req = CurReq}=State) ->
-    #request{stream_to=StreamTo, req_id=ReqId,
-             stream_chunk_size = Stream_chunk_size,
-             response_format = Response_format,
-             caller_controls_socket = Caller_controls_socket} = CurReq,
-    RepBuf_1 = list_to_binary([RepBuf, Data]),
+%% accumulate_response(<<>>, #state{cur_req = #request{caller_controls_socket = Ccs},
+%%                                  socket = Socket} = State) ->
+%%     case Ccs of
+%%         true ->
+%%             do_setopts(Socket, [{active, once}], State);
+%%         false ->
+%%             ok
+%%     end,
+%%     State;
+accumulate_response(Data, #state{reply_buffer      = RepBuf,
+                                 rep_buf_size      = RepBufSize,
+                                 streamed_size     = Streamed_size,
+                                 cur_req           = CurReq}=State) ->
+    #request{stream_to                 = StreamTo,
+             req_id                    = ReqId,
+             stream_chunk_size         = Stream_chunk_size,
+             response_format           = Response_format,
+             caller_controls_socket    = Caller_controls_socket} = CurReq,
+    RepBuf_1 = <<RepBuf/binary, Data/binary>>,
     New_data_size = RepBufSize - Streamed_size,
     case StreamTo of
         undefined ->
@@ -356,15 +387,21 @@ accumulate_response(Data, #state{reply_buffer = RepBuf,
         _ when Caller_controls_socket == true ->
             do_interim_reply(StreamTo, Response_format, ReqId, RepBuf_1),
             State#state{reply_buffer = <<>>, 
+                        interim_reply_sent = true,
                         streamed_size = Streamed_size + size(RepBuf_1)};
         _ when New_data_size >= Stream_chunk_size ->
             {Stream_chunk, Rem_data} = split_binary(RepBuf_1, Stream_chunk_size),
             do_interim_reply(StreamTo, Response_format, ReqId, Stream_chunk),
-            accumulate_response(
-              Rem_data,
-              State#state{
-                reply_buffer = <<>>,
-                streamed_size = Streamed_size + Stream_chunk_size});
+            State_1 = State#state{
+                        reply_buffer = <<>>,
+                        interim_reply_sent = true,
+                        streamed_size = Streamed_size + Stream_chunk_size},
+            case Rem_data of
+                <<>> ->
+                    State_1;
+                _ ->
+                    accumulate_response(Rem_data, State_1)
+            end;
         _ ->
             State#state{reply_buffer = RepBuf_1}
     end.
@@ -498,9 +535,9 @@ do_close(#state{socket = Sock,
                 is_ssl = true,
                 use_proxy = true,
                 proxy_tunnel_setup = Pts
-               }) when Pts /= done ->  gen_tcp:close(Sock);
-do_close(#state{socket = Sock, is_ssl = true})  ->  ssl:close(Sock);
-do_close(#state{socket = Sock, is_ssl = false}) ->  gen_tcp:close(Sock).
+               }) when Pts /= done ->  catch gen_tcp:close(Sock);
+do_close(#state{socket = Sock, is_ssl = true})  ->  catch ssl:close(Sock);
+do_close(#state{socket = Sock, is_ssl = false}) ->  catch gen_tcp:close(Sock).
 
 active_once(#state{cur_req = #request{caller_controls_socket = true}}) ->
     ok;
@@ -542,25 +579,17 @@ send_req_1(From,
         end,
     State_2 = check_ssl_options(Options, State_1),
     do_trace("Connecting...~n", []),
-    Start_ts = now(),
     Conn_timeout = get_value(connect_timeout, Options, Timeout),
     case do_connect(Host_1, Port_1, Options, State_2, Conn_timeout) of
         {ok, Sock} ->
-            do_trace("Connected!~n", []),
-            End_ts = now(),
-            Timeout_1 = case Timeout of
-                            infinity ->
-                                infinity;
-                            _ ->
-                                Timeout - trunc(round(timer:now_diff(End_ts, Start_ts) / 1000))
-                        end,
+            do_trace("Connected! Socket: ~1000.p~n", [Sock]),
             State_3 = State_2#state{socket = Sock,
                                     connect_timeout = Conn_timeout},
-            send_req_1(From, Url, Headers, Method, Body, Options, Timeout_1, State_3);
+            send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State_3);
         Err ->
             shutting_down(State_2),
             do_trace("Error connecting. Reason: ~1000.p~n", [Err]),
-            gen_server:reply(From, {error, conn_failed}),
+            gen_server:reply(From, {error, {conn_failed, Err}}),
             {stop, normal, State_2}
     end;
 
@@ -580,8 +609,9 @@ send_req_1(From,
                   use_proxy = true,
                   is_ssl    = true} = State) ->
     NewReq = #request{
-      method                 = connect,
-      options                = Options
+      method                    = connect,
+      preserve_chunked_encoding = get_value(preserve_chunked_encoding, Options, false),
+      options                   = Options
      },
     State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
     Pxy_auth_headers = maybe_modify_headers(Url, Method, Options, [], State_1),
@@ -611,13 +641,13 @@ send_req_1(From,
                 Err ->
                     shutting_down(State_1),
                     do_trace("Send failed... Reason: ~p~n", [Err]),
-                    gen_server:reply(From, {error, send_failed}),
+                    gen_server:reply(From, {error, {send_failed, Err}}),
                     {stop, normal, State_1}
             end;
         Err ->
             shutting_down(State_1),
             do_trace("Send failed... Reason: ~p~n", [Err]),
-            gen_server:reply(From, {error, send_failed}),
+            gen_server:reply(From, {error, {send_failed, Err}}),
             {stop, normal, State_1}
     end;
 
@@ -666,7 +696,9 @@ send_req_1(From,
                       save_response_to_file  = SaveResponseToFile,
                       stream_chunk_size      = get_stream_chunk_size(Options),
                       response_format        = Resp_format,
-                      from                   = From},
+                      from                   = From,
+                      preserve_chunked_encoding = get_value(preserve_chunked_encoding, Options, false)
+                     },
     State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
     Headers_1 = maybe_modify_headers(Url, Method, Options, Headers, State_1),
     {Req, Body_1} = make_request(Method,
@@ -705,13 +737,13 @@ send_req_1(From,
                 Err ->
                     shutting_down(State_1),
                     do_trace("Send failed... Reason: ~p~n", [Err]),
-                    gen_server:reply(From, {error, send_failed}),
+                    gen_server:reply(From, {error, {send_failed, Err}}),
                     {stop, normal, State_1}
             end;
         Err ->
             shutting_down(State_1),
             do_trace("Send failed... Reason: ~p~n", [Err]),
-            gen_server:reply(From, {error, send_failed}),
+            gen_server:reply(From, {error, {send_failed, Err}}),
             {stop, normal, State_1}
     end.
 
@@ -768,14 +800,14 @@ http_auth_digest(Username, Password) ->
     ibrowse_lib:encode_base64(Username ++ [$: | Password]).
 
 make_request(Method, Headers, AbsPath, RelPath, Body, Options,
-             #state{use_proxy = UseProxy}) ->
+             #state{use_proxy = UseProxy, is_ssl = Is_ssl}) ->
     HttpVsn = http_vsn_string(get_value(http_vsn, Options, {1,1})),
     Headers_1 =
         case get_value(content_length, Headers, false) of
             false when (Body == []) or
-                       (Body == <<>>) or
-                       is_tuple(Body) or
-                       is_function(Body) ->
+            (Body == <<>>) or
+            is_tuple(Body) or
+            is_function(Body) ->
                 Headers;
             false when is_binary(Body) ->
                 [{"content-length", integer_to_list(size(Body))} | Headers];
@@ -799,7 +831,12 @@ make_request(Method, Headers, AbsPath, RelPath, Body, Options,
     Headers_3 = cons_headers(Headers_2),
     Uri = case get_value(use_absolute_uri, Options, false) or UseProxy of
               true ->
-                  AbsPath;
+                  case Is_ssl of
+                      true ->
+                          RelPath;
+                      false ->
+                          AbsPath
+                  end;
               false ->
                   RelPath
           end,
@@ -1017,7 +1054,7 @@ upgrade_to_ssl(#state{socket = Socket,
             send_queued_requests(lists:reverse(Q), State_1);
         Err ->
             do_trace("Upgrade to SSL socket failed. Reson: ~p~n", [Err]),
-            do_error_reply(State, {error, send_failed}),
+            do_error_reply(State, {error, {send_failed, Err}}),
             {error, send_failed}
     end.
 
@@ -1029,12 +1066,12 @@ send_queued_requests([{From, Url, Headers, Method, Body, Options, Timeout} | Q],
     case send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State) of
         {noreply, State_1} ->
             send_queued_requests(Q, State_1);
-        _ ->
+        Err ->
             do_trace("Error sending queued SSL request: ~n"
                      "URL     : ~s~n"
                      "Method  : ~p~n"
                      "Headers : ~p~n", [Url, Method, Headers]),
-            do_error_reply(State, {error, send_failed}),
+            do_error_reply(State, {error, {send_failed, Err}}),
             {error, send_failed}
     end.
 
@@ -1046,11 +1083,12 @@ is_connection_closing(_, _)                -> false.
 %% This clause determines the chunk size when given data from the beginning of the chunk
 parse_11_response(DataRecvd,
                   #state{transfer_encoding = chunked, 
-                         chunk_size = chunk_start,
+                         chunk_size        = chunk_start,
                          chunk_size_buffer = Chunk_sz_buf
                         } = State) ->
     case scan_crlf(Chunk_sz_buf, DataRecvd) of
         {yes, ChunkHeader, Data_1} ->
+            State_1 = maybe_accumulate_ce_data(State, <<ChunkHeader/binary, $\r, $\n>>),
             ChunkSize = parse_chunk_header(ChunkHeader),
             %%
             %% Do we have to preserve the chunk encoding when
@@ -1061,10 +1099,10 @@ parse_11_response(DataRecvd,
             RemLen = size(Data_1),
             do_trace("Determined chunk size: ~p. Already recvd: ~p~n",
                      [ChunkSize, RemLen]),
-            parse_11_response(Data_1, State#state{chunk_size_buffer = <<>>,
-                                                  deleted_crlf = true,
-                                                  recvd_chunk_size = 0,
-                                                  chunk_size = ChunkSize});
+            parse_11_response(Data_1, State_1#state{chunk_size_buffer = <<>>,
+                                                    deleted_crlf = true,
+                                                    recvd_chunk_size = 0,
+                                                    chunk_size = ChunkSize});
         {no, Data_1} ->
             State#state{chunk_size_buffer = Data_1}
     end;
@@ -1074,13 +1112,15 @@ parse_11_response(DataRecvd,
 parse_11_response(DataRecvd,
                   #state{transfer_encoding = chunked, 
                          chunk_size = tbd,
-                         chunk_size_buffer = Buf}=State) ->
+                         chunk_size_buffer = Buf
+                        } = State) ->
     case scan_crlf(Buf, DataRecvd) of
         {yes, _, NextChunk} ->
-            State_1 = State#state{chunk_size = chunk_start,
-                                  chunk_size_buffer = <<>>,
-                                  deleted_crlf = true},
-            parse_11_response(NextChunk, State_1);
+            State_1 = maybe_accumulate_ce_data(State, <<$\r, $\n>>),
+            State_2 = State_1#state{chunk_size = chunk_start,
+                                    chunk_size_buffer = <<>>,
+                                    deleted_crlf = true},
+            parse_11_response(NextChunk, State_2);
         {no, Data_1} ->
             State#state{chunk_size_buffer = Data_1}
     end;
@@ -1090,9 +1130,10 @@ parse_11_response(DataRecvd,
 %% received is silently discarded.
 parse_11_response(DataRecvd,
                   #state{transfer_encoding = chunked, chunk_size = 0, 
-                         cur_req = CurReq,
-                         deleted_crlf = DelCrlf,
-                         chunk_size_buffer = Trailer, reqs = Reqs}=State) ->
+                         cur_req           = CurReq,
+                         deleted_crlf      = DelCrlf,
+                         chunk_size_buffer = Trailer,
+                         reqs              = Reqs} = State) ->
     do_trace("Detected end of chunked transfer...~n", []),
     DataRecvd_1 = case DelCrlf of
                       false ->
@@ -1101,12 +1142,14 @@ parse_11_response(DataRecvd,
                           <<$\r, $\n, DataRecvd/binary>>
                   end,
     case scan_header(Trailer, DataRecvd_1) of
-        {yes, _TEHeaders, Rem} ->
+        {yes, TEHeaders, Rem} ->
             {_, Reqs_1} = queue:out(Reqs),
-            State_1 = handle_response(CurReq, State#state{reqs = Reqs_1}),
-            parse_response(Rem, reset_state(State_1));
+            State_1 = maybe_accumulate_ce_data(State, <<TEHeaders/binary, $\r, $\n>>),
+            State_2 = handle_response(CurReq,
+                                      State_1#state{reqs = Reqs_1}),
+            parse_response(Rem, reset_state(State_2));
         {no, Rem} ->
-            State#state{chunk_size_buffer = Rem, deleted_crlf = false}
+            accumulate_response(<<>>, State#state{chunk_size_buffer = Rem, deleted_crlf = false})
     end;
 
 %% This clause extracts a chunk, given the size.
@@ -1121,7 +1164,7 @@ parse_11_response(DataRecvd,
     case DataLen >= NeedBytes of
         true ->
             {RemChunk, RemData} = split_binary(DataRecvd, NeedBytes),
-            do_trace("Recvd another chunk...~n", []),
+            do_trace("Recvd another chunk...~p~n", [RemChunk]),
             do_trace("RemData -> ~p~n", [RemData]),
             case accumulate_response(RemChunk, State) of
                 {error, Reason} ->
@@ -1155,6 +1198,11 @@ parse_11_response(DataRecvd,
             accumulate_response(DataRecvd, State#state{rep_buf_size = (RepBufSz+DataLen)})
     end.
 
+maybe_accumulate_ce_data(#state{cur_req = #request{preserve_chunked_encoding = false}} = State, _) ->
+    State;
+maybe_accumulate_ce_data(State, Data) ->
+    accumulate_response(Data, State).
+
 handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
                          response_format = Resp_format,
                          save_response_to_file = SaveResponseToFile,
@@ -1177,11 +1225,12 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
                        _ ->
                            {file, TmpFilename}
                    end,
+    {Resp_headers_1, Raw_headers_1} = maybe_add_custom_headers(RespHeaders, Raw_headers, Options),
     Reply = case get_value(give_raw_headers, Options, false) of
                 true ->
-                    {ok, Status_line, Raw_headers, ResponseBody};
+                    {ok, Status_line, Raw_headers_1, ResponseBody};
                 false ->
-                    {ok, SCode, RespHeaders, ResponseBody}
+                    {ok, SCode, Resp_headers_1, ResponseBody}
             end,
     State_2 = do_reply(State_1, From, StreamTo, ReqId, Resp_format, Reply),
     cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
@@ -1192,16 +1241,17 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
                 #state{http_status_code = SCode,
                        status_line      = Status_line,
                        raw_headers      = Raw_headers,
-                       recvd_headers    = RespHeaders,
+                       recvd_headers    = Resp_headers,
                        reply_buffer     = RepBuf,
                        send_timer       = ReqTimer} = State) ->
     Body = RepBuf,
 %%    State_1 = set_cur_request(State),
+    {Resp_headers_1, Raw_headers_1} = maybe_add_custom_headers(Resp_headers, Raw_headers, Options),
     Reply = case get_value(give_raw_headers, Options, false) of
                 true ->
-                    {ok, Status_line, Raw_headers, Body};
+                    {ok, Status_line, Raw_headers_1, Body};
                 false ->
-                    {ok, SCode, RespHeaders, Body}
+                    {ok, SCode, Resp_headers_1, Body}
             end,
     State_1 = case get(conn_close) of
         "close" ->
@@ -1227,7 +1277,8 @@ reset_state(State) ->
                 deleted_crlf      = false,
                 http_status_code  = undefined,
                 chunk_size        = undefined,
-                transfer_encoding = undefined}.
+                transfer_encoding = undefined
+               }.
 
 set_cur_request(#state{reqs = Reqs} = State) ->
     case queue:to_list(Reqs) of
@@ -1459,15 +1510,29 @@ send_async_headers(_ReqId, undefined, _, _State) ->
     ok;
 send_async_headers(ReqId, StreamTo, Give_raw_headers, 
                    #state{status_line = Status_line, raw_headers = Raw_headers, 
-                          recvd_headers = Headers, http_status_code = StatCode
-                          }) ->
+                          recvd_headers = Headers, http_status_code = StatCode,
+                          cur_req = #request{options = Opts}
+                         }) ->
+    {Headers_1, Raw_headers_1} = maybe_add_custom_headers(Headers, Raw_headers, Opts),
     case Give_raw_headers of
         false ->
-            catch StreamTo ! {ibrowse_async_headers, ReqId, StatCode, Headers};
+            catch StreamTo ! {ibrowse_async_headers, ReqId, StatCode, Headers_1};
         true ->
-            catch StreamTo ! {ibrowse_async_headers, ReqId, Status_line, Raw_headers}
+            catch StreamTo ! {ibrowse_async_headers, ReqId, Status_line, Raw_headers_1}
     end.
 
+maybe_add_custom_headers(Headers, Raw_headers, Opts) ->
+    Custom_headers = get_value(add_custom_headers, Opts, []),
+    Headers_1 = Headers ++ Custom_headers,
+    Raw_headers_1 = case Custom_headers of
+                        [_ | _] when is_binary(Raw_headers) ->
+                            Custom_headers_bin = list_to_binary(string:join([[X, $:, Y] || {X, Y} <- Custom_headers], "\r\n")),
+                            <<Raw_headers/binary, "\r\n", Custom_headers_bin/binary>>;
+                        _ ->
+                            Raw_headers
+                    end,
+    {Headers_1, Raw_headers_1}.
+
 format_response_data(Resp_format, Body) ->
     case Resp_format of
         list when is_list(Body) ->

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/db7f9033/ibrowse_lb.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lb.erl b/ibrowse_lb.erl
index 6bc600b..0e001d4 100644
--- a/ibrowse_lb.erl
+++ b/ibrowse_lb.erl
@@ -16,7 +16,8 @@
 %% External exports
 -export([
 	 start_link/1,
-	 spawn_connection/5
+	 spawn_connection/5,
+         stop/1
 	]).
 
 %% gen_server callbacks
@@ -85,6 +86,14 @@ spawn_connection(Lb_pid, Url,
        is_integer(Max_sessions) ->
     gen_server:call(Lb_pid,
 		    {spawn_connection, Url, Max_sessions, Max_pipeline_size, SSL_options}).
+
+stop(Lb_pid) ->
+    case catch gen_server:call(Lb_pid, stop) of
+        {'EXIT', {timeout, _}} ->
+            exit(Lb_pid, kill);
+        ok ->
+            ok
+    end.
 %%--------------------------------------------------------------------
 %% Function: handle_call/3
 %% Description: Handling call messages
@@ -120,6 +129,18 @@ handle_call({spawn_connection, Url, _Max_sess, _Max_pipe, SSL_options}, _From,
     ets:insert(Tid, {{1, Pid}, []}),
     {reply, {ok, Pid}, State_1#state{num_cur_sessions = Cur + 1}};
 
+handle_call(stop, _From, #state{ets_tid = undefined} = State) ->
+    gen_server:reply(_From, ok),
+    {stop, normal, State};
+
+handle_call(stop, _From, #state{ets_tid = Tid} = State) ->
+    ets:foldl(fun({{_, Pid}, _}, Acc) ->
+                      ibrowse_http_client:stop(Pid),
+                      Acc
+              end, [], Tid),
+    gen_server:reply(_From, ok),
+    {stop, normal, State};
+
 handle_call(Request, _From, State) ->
     Reply = {unknown_request, Request},
     {reply, Reply, State}.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/db7f9033/ibrowse_test.erl
----------------------------------------------------------------------
diff --git a/ibrowse_test.erl b/ibrowse_test.erl
index 00b0244..e7d6e59 100644
--- a/ibrowse_test.erl
+++ b/ibrowse_test.erl
@@ -17,6 +17,7 @@
 	 ue_test/1,
 	 verify_chunked_streaming/0,
 	 verify_chunked_streaming/1,
+         test_chunked_streaming_once/0,
 	 i_do_async_req_list/4,
 	 test_stream_once/3,
 	 test_stream_once/4
@@ -260,7 +261,20 @@ verify_chunked_streaming(Options) ->
     io:format("Fetching data with streaming as binary...~n", []),
     Async_response_bin = do_async_req_list(
 			   Url, get, [{response_format, binary} | Options]),
-    compare_responses(Result_without_streaming, Async_response_list, Async_response_bin).
+    io:format("Fetching data with streaming as binary, {active, once}...~n", []),
+    Async_response_bin_once = do_async_req_list(
+                                Url, get, [once, {response_format, binary} | Options]),
+    compare_responses(Result_without_streaming, Async_response_list, Async_response_bin),
+    compare_responses(Result_without_streaming, Async_response_list, Async_response_bin_once).
+
+test_chunked_streaming_once() ->
+    test_chunked_streaming_once([]).
+
+test_chunked_streaming_once(Options) ->
+    Url = "http://www.httpwatch.com/httpgallery/chunked/",
+    io:format("URL: ~s~n", [Url]),
+    io:format("Fetching data with streaming as binary, {active, once}...~n", []),
+    do_async_req_list(Url, get, [once, {response_format, binary} | Options]).
 
 compare_responses({ok, St_code, _, Body}, {ok, St_code, _, Body}, {ok, St_code, _, Body}) ->
     success;
@@ -313,31 +327,54 @@ wait_for_resp(Pid) ->
 	Msg ->
 	    io:format("Recvd unknown message: ~p~n", [Msg]),
 	    wait_for_resp(Pid)
-    after 10000 ->
+    after 100000 ->
 	  {error, timeout}
     end.
 
 i_do_async_req_list(Parent, Url, Method, Options) ->
-    Res = ibrowse:send_req(Url, [], Method, [], [{stream_to, self()} | Options]),
+    Options_1 = case lists:member(once, Options) of
+                    true ->
+                        [{stream_to, {self(), once}} | (Options -- [once])];
+                    false ->
+                        [{stream_to, self()} | Options]
+                end,
+    Res = ibrowse:send_req(Url, [], Method, [], Options_1),
     case Res of
 	{ibrowse_req_id, Req_id} ->
-	    Result = wait_for_async_resp(Req_id, undefined, undefined, []),
+	    Result = wait_for_async_resp(Req_id, Options, undefined, undefined, []),
 	    Parent ! {async_result, self(), Result};
 	Err ->
 	    Parent ! {async_result, self(), Err}
     end.
 
-wait_for_async_resp(Req_id, Acc_Stat_code, Acc_Headers, Body) ->
+wait_for_async_resp(Req_id, Options, Acc_Stat_code, Acc_Headers, Body) ->    
     receive
 	{ibrowse_async_headers, Req_id, StatCode, Headers} ->
-	    wait_for_async_resp(Req_id, StatCode, Headers, Body);
+            %% io:format("Recvd headers...~n", []),
+            maybe_stream_next(Req_id, Options),
+	    wait_for_async_resp(Req_id, Options, StatCode, Headers, Body);
 	{ibrowse_async_response_end, Req_id} ->
+            io:format("Recvd end of response.~n", []),
 	    Body_1 = list_to_binary(lists:reverse(Body)),
 	    {ok, Acc_Stat_code, Acc_Headers, Body_1};
 	{ibrowse_async_response, Req_id, Data} ->
-	    wait_for_async_resp(Req_id, Acc_Stat_code, Acc_Headers, [Data | Body]);
+            maybe_stream_next(Req_id, Options),
+            %% io:format("Recvd data...~n", []),
+	    wait_for_async_resp(Req_id, Options, Acc_Stat_code, Acc_Headers, [Data | Body]);
+	{ibrowse_async_response, Req_id, {error, _} = Err} ->
+            {ok, Acc_Stat_code, Acc_Headers, Err};
 	Err ->
 	    {ok, Acc_Stat_code, Acc_Headers, Err}
+    after 10000 ->
+            {timeout, Acc_Stat_code, Acc_Headers, Body}
+    end.
+
+maybe_stream_next(Req_id, Options) ->
+    case lists:member(once, Options) of
+        true ->
+            ibrowse:stream_next(Req_id);
+        false ->
+            ok
     end.
 
 execute_req(Url, Method, Options) ->


[14/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Fixes 'make distcheck' to run the test suite.

Quite a few changes to the build system to handle VPATH builds appropriately as well as make the test suite know about them.



git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@833951 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/f2a473e8
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/f2a473e8
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/f2a473e8

Branch: refs/heads/import-master
Commit: f2a473e8c0374e2111a89d716d7d39b47f9efc17
Parents: 96d28d8
Author: Paul Joseph Davis <da...@apache.org>
Authored: Mon Nov 9 00:39:16 2009 +0000
Committer: Paul Joseph Davis <da...@apache.org>
Committed: Mon Nov 9 00:39:16 2009 +0000

----------------------------------------------------------------------
 Makefile.am    |  9 +++++----
 ibrowse.app    | 13 -------------
 ibrowse.app.in | 13 +++++++++++++
 3 files changed, 18 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/f2a473e8/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index 76262a6..510f36a 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -13,6 +13,7 @@
 ibrowseebindir = $(localerlanglibdir)/ibrowse-1.5.2/ebin
 
 ibrowse_file_collection = \
+	ibrowse.app.in \
     ibrowse.erl \
     ibrowse_app.erl \
     ibrowse_http_client.erl \
@@ -21,9 +22,8 @@ ibrowse_file_collection = \
     ibrowse_sup.erl \
     ibrowse_test.erl
 
-ibrowseebin_static_file = ibrowse.app
-
 ibrowseebin_make_generated_file_list = \
+	ibrowse.app \
     ibrowse.beam \
     ibrowse_app.beam \
     ibrowse_http_client.beam \
@@ -33,16 +33,17 @@ ibrowseebin_make_generated_file_list = \
     ibrowse_test.beam
 
 ibrowseebin_DATA = \
-    $(ibrowseebin_static_file) \
     $(ibrowseebin_make_generated_file_list)
 
 EXTRA_DIST =  \
     $(ibrowse_file_collection) \
-    $(ibrowseebin_static_file) \
     ibrowse.hrl
 
 CLEANFILES = \
     $(ibrowseebin_make_generated_file_list)
 
+%.app: %.app.in
+	cp $< $@
+
 %.beam: %.erl
 	$(ERLC) $(ERLC_FLAGS) $<

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/f2a473e8/ibrowse.app
----------------------------------------------------------------------
diff --git a/ibrowse.app b/ibrowse.app
deleted file mode 100644
index 4f43dd9..0000000
--- a/ibrowse.app
+++ /dev/null
@@ -1,13 +0,0 @@
-{application, ibrowse,
-        [{description, "HTTP client application"},
-         {vsn, "1.5.1"},
-         {modules, [ ibrowse,
-		     ibrowse_http_client,
-		     ibrowse_app,
-		     ibrowse_sup,
-		     ibrowse_lib,
-		     ibrowse_lb ]},
-         {registered, []},
-         {applications, [kernel,stdlib,sasl]},
-	 {env, []},
-	 {mod, {ibrowse_app, []}}]}.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/f2a473e8/ibrowse.app.in
----------------------------------------------------------------------
diff --git a/ibrowse.app.in b/ibrowse.app.in
new file mode 100644
index 0000000..4f43dd9
--- /dev/null
+++ b/ibrowse.app.in
@@ -0,0 +1,13 @@
+{application, ibrowse,
+        [{description, "HTTP client application"},
+         {vsn, "1.5.1"},
+         {modules, [ ibrowse,
+		     ibrowse_http_client,
+		     ibrowse_app,
+		     ibrowse_sup,
+		     ibrowse_lib,
+		     ibrowse_lb ]},
+         {registered, []},
+         {applications, [kernel,stdlib,sasl]},
+	 {env, []},
+	 {mod, {ibrowse_app, []}}]}.


[08/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
trimmed trailing whitespace

git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@791350 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/d76a4473
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/d76a4473
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/d76a4473

Branch: refs/heads/import-master
Commit: d76a4473a0c4e2bd208ff2552561436c3adbf0f9
Parents: 50228c1
Author: Noah Slater <ns...@apache.org>
Authored: Mon Jul 6 00:33:50 2009 +0000
Committer: Noah Slater <ns...@apache.org>
Committed: Mon Jul 6 00:33:50 2009 +0000

----------------------------------------------------------------------
 ibrowse.app             |  8 ++++----
 ibrowse.erl             | 42 +++++++++++++++++++++---------------------
 ibrowse_app.erl         |  8 ++++----
 ibrowse_http_client.erl | 16 ++++++++--------
 ibrowse_lb.erl          |  6 +++---
 ibrowse_lib.erl         | 20 ++++++++++----------
 ibrowse_sup.erl         |  4 ++--
 ibrowse_test.erl        |  4 ++--
 8 files changed, 54 insertions(+), 54 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/d76a4473/ibrowse.app
----------------------------------------------------------------------
diff --git a/ibrowse.app b/ibrowse.app
index 5e4621d..a3d23ae 100644
--- a/ibrowse.app
+++ b/ibrowse.app
@@ -1,10 +1,10 @@
 {application, ibrowse,
         [{description, "HTTP client application"},
          {vsn, "1.5.0"},
-         {modules, [ ibrowse, 
-		     ibrowse_http_client, 
-		     ibrowse_app, 
-		     ibrowse_sup, 
+         {modules, [ ibrowse,
+		     ibrowse_http_client,
+		     ibrowse_app,
+		     ibrowse_sup,
 		     ibrowse_lib,
 		     ibrowse_lb ]},
          {registered, []},

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/d76a4473/ibrowse.erl
----------------------------------------------------------------------
diff --git a/ibrowse.erl b/ibrowse.erl
index 0d3478b..56f0ef4 100644
--- a/ibrowse.erl
+++ b/ibrowse.erl
@@ -21,14 +21,14 @@
 %% <p>Here are a few sample invocations.</p>
 %%
 %% <code>
-%% ibrowse:send_req("http://intranet/messenger/", [], get). 
+%% ibrowse:send_req("http://intranet/messenger/", [], get).
 %% <br/><br/>
-%% 
-%% ibrowse:send_req("http://www.google.com/", [], get, [], 
+%%
+%% ibrowse:send_req("http://www.google.com/", [], get, [],
 %% 		 [{proxy_user, "XXXXX"},
 %% 		  {proxy_password, "XXXXX"},
 %% 		  {proxy_host, "proxy"},
-%% 		  {proxy_port, 8080}], 1000). 
+%% 		  {proxy_port, 8080}], 1000).
 %% <br/><br/>
 %%
 %%ibrowse:send_req("http://www.erlang.org/download/otp_src_R10B-3.tar.gz", [], get, [],
@@ -48,7 +48,7 @@
 %% ibrowse:send_req("http://www.bbc.co.uk", [], trace).
 %%
 %% <br/><br/>
-%% ibrowse:send_req("http://www.google.com", [], get, [], 
+%% ibrowse:send_req("http://www.google.com", [], get, [],
 %%                   [{stream_to, self()}]).
 %% </code>
 %%
@@ -110,7 +110,7 @@
 		      get_value/3,
 		      do_trace/2
 		     ]).
-		      
+
 -record(state, {trace = false}).
 
 -include("ibrowse.hrl").
@@ -158,7 +158,7 @@ stop() ->
 send_req(Url, Headers, Method) ->
     send_req(Url, Headers, Method, [], []).
 
-%% @doc Same as send_req/3. 
+%% @doc Same as send_req/3.
 %% If a list is specified for the body it has to be a flat list. The body can also be a fun/0 or a fun/1. <br/>
 %% If fun/0, the connection handling process will repeatdely call the fun until it returns an error or eof. <pre>Fun() = {ok, Data} | eof</pre><br/>
 %% If fun/1, the connection handling process will repeatedly call the fun with the supplied state until it returns an error or eof. <pre>Fun(State) = {ok, Data} | {ok, Data, NewState} | eof</pre>
@@ -168,7 +168,7 @@ send_req(Url, Headers, Method) ->
 send_req(Url, Headers, Method, Body) ->
     send_req(Url, Headers, Method, Body, []).
 
-%% @doc Same as send_req/4. 
+%% @doc Same as send_req/4.
 %% For a description of SSL Options, look in the ssl manpage. If the
 %% HTTP Version to use is not specified, the default is 1.1.
 %% <br/>
@@ -181,7 +181,7 @@ send_req(Url, Headers, Method, Body) ->
 %% used to specify what should go in the <code>Host</code> header in
 %% the request.</p>
 %% <ul>
-%% <li>When both the options <code>save_response_to_file</code> and <code>stream_to</code> 
+%% <li>When both the options <code>save_response_to_file</code> and <code>stream_to</code>
 %% are specified, the former takes precedence.</li>
 %%
 %% <li>For the <code>save_response_to_file</code> option, the response body is saved to
@@ -211,21 +211,21 @@ send_req(Url, Headers, Method, Body) ->
 %% ibrowse:send_req("http://www.example.com/cgi-bin/request", [], get, [], [{connect_timeout, 100}], 1000).
 %% </code>
 %% In the above invocation, if the connection isn't established within
-%% 100 milliseconds, the request will fail with 
+%% 100 milliseconds, the request will fail with
 %% <code>{error, conn_failed}</code>.<br/>
 %% If connection setup succeeds, the total time allowed for the
 %% request to complete will be 1000 milliseconds minus the time taken
 %% for connection setup.
 %% </li>
 %% </ul>
-%% 
+%%
 %% @spec send_req(Url::string(), Headers::headerList(), Method::method(), Body::body(), Options::optionList()) -> response()
 %% optionList() = [option()]
 %% option() = {max_sessions, integer()}        |
 %%          {response_format,response_format()}|
 %%          {stream_chunk_size, integer()}     |
 %%          {max_pipeline_size, integer()}     |
-%%          {trace, boolean()}                 | 
+%%          {trace, boolean()}                 |
 %%          {is_ssl, boolean()}                |
 %%          {ssl_options, [SSLOpt]}            |
 %%          {pool_name, atom()}                |
@@ -257,7 +257,7 @@ send_req(Url, Headers, Method, Body) ->
 send_req(Url, Headers, Method, Body, Options) ->
     send_req(Url, Headers, Method, Body, Options, 30000).
 
-%% @doc Same as send_req/5. 
+%% @doc Same as send_req/5.
 %% All timeout values are in milliseconds.
 %% @spec send_req(Url, Headers::headerList(), Method::method(), Body::body(), Options::optionList(), Timeout) -> response()
 %% Timeout = integer() | infinity
@@ -282,7 +282,7 @@ send_req(Url, Headers, Method, Body, Options, Timeout) ->
 		    true -> {get_value(ssl_options, Options_1, []), true}
 		end,
 	    case ibrowse_lb:spawn_connection(Lb_pid, Parsed_url,
-					     Max_sessions, 
+					     Max_sessions,
 					     Max_pipeline_size,
 					     {SSLOptions, IsSSL}) of
 		{ok, Conn_Pid} ->
@@ -333,7 +333,7 @@ set_dest(_Host, _Port, [H | _]) ->
     exit({invalid_option, H});
 set_dest(_, _, []) ->
     ok.
-    
+
 %% @doc Set the maximum number of connections allowed to a specific Host:Port.
 %% @spec set_max_sessions(Host::string(), Port::integer(), Max::integer()) -> ok
 set_max_sessions(Host, Port, Max) when is_integer(Max), Max > 0 ->
@@ -432,7 +432,7 @@ send_req_direct(Conn_pid, Url, Headers, Method, Body, Options, Timeout) ->
 %% caller. Should be used in conjunction with the
 %% <code>stream_to</code> option
 %% @spec stream_next(Req_id :: req_id()) -> ok | {error, unknown_req_id}
-stream_next(Req_id) ->    
+stream_next(Req_id) ->
     case ets:lookup(ibrowse_stream, {req_id_pid, Req_id}) of
 	[] ->
 	    {error, unknown_req_id};
@@ -451,7 +451,7 @@ trace_off() ->
 %% @doc Turn tracing on for all connections to the specified HTTP
 %% server. Host is whatever is specified as the domain name in the URL
 %% @spec trace_on(Host, Port) -> ok
-%% Host = string() 
+%% Host = string()
 %% Port = integer()
 trace_on(Host, Port) ->
     ibrowse ! {trace, true, Host, Port},
@@ -554,7 +554,7 @@ import_config(Filename) ->
     case file:consult(Filename) of
 	{ok, Terms} ->
 	    ets:delete_all_objects(ibrowse_conf),
-	    Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options}) 
+	    Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options})
 		     when is_list(Host), is_integer(Port),
 		          is_integer(MaxSess), MaxSess > 0,
 		          is_integer(MaxPipe), MaxPipe > 0, is_list(Options) ->
@@ -564,7 +564,7 @@ import_config(Filename) ->
 			  lists:foreach(
 			    fun({X, Y}) ->
 				    ets:insert(ibrowse_conf,
-					       #ibrowse_conf{key = X, 
+					       #ibrowse_conf{key = X,
 							     value = Y})
 			    end, I);
 		     ({K, V}) ->
@@ -663,7 +663,7 @@ handle_info(all_trace_off, State) ->
     ets:foldl(Fun, undefined, ibrowse_lb),
     ets:select_delete(ibrowse_conf, [{{ibrowse_conf,{trace,'$1','$2'},true},[],['true']}]),
     {noreply, State};
-				  
+
 handle_info({trace, Bool}, State) ->
     put(my_trace_flag, Bool),
     {noreply, State};
@@ -680,7 +680,7 @@ handle_info({trace, Bool, Host, Port}, State) ->
     ets:insert(ibrowse_conf, #ibrowse_conf{key = {trace, Host, Port},
 					   value = Bool}),
     {noreply, State};
-		     
+
 handle_info(_Info, State) ->
     {noreply, State}.
 

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/d76a4473/ibrowse_app.erl
----------------------------------------------------------------------
diff --git a/ibrowse_app.erl b/ibrowse_app.erl
index f5e523c..8c83e8f 100644
--- a/ibrowse_app.erl
+++ b/ibrowse_app.erl
@@ -1,7 +1,7 @@
 %%%-------------------------------------------------------------------
 %%% File    : ibrowse_app.erl
 %%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%% Description : 
+%%% Description :
 %%%
 %%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
 %%%-------------------------------------------------------------------
@@ -42,11 +42,11 @@
 %% Func: start/2
 %% Returns: {ok, Pid}        |
 %%          {ok, Pid, State} |
-%%          {error, Reason}   
+%%          {error, Reason}
 %%--------------------------------------------------------------------
 start(_Type, _StartArgs) ->
     case ibrowse_sup:start_link() of
-	{ok, Pid} -> 
+	{ok, Pid} ->
 	    {ok, Pid};
 	Error ->
 	    Error
@@ -54,7 +54,7 @@ start(_Type, _StartArgs) ->
 
 %%--------------------------------------------------------------------
 %% Func: stop/1
-%% Returns: any 
+%% Returns: any
 %%--------------------------------------------------------------------
 stop(_State) ->
     ok.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/d76a4473/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 3cacf39..43aa51f 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -51,10 +51,10 @@
 	       }).
 
 -record(request, {url, method, options, from,
-		  stream_to, caller_controls_socket = false, 
+		  stream_to, caller_controls_socket = false,
 		  req_id,
 		  stream_chunk_size,
-		  save_response_to_file = false, 
+		  save_response_to_file = false,
 		  tmp_file_name, tmp_file_fd,
 		  response_format}).
 
@@ -338,7 +338,7 @@ accumulate_response(Data, #state{reply_buffer = RepBuf,
 	    State#state{reply_buffer = RepBuf_1};
 	_ when Caller_controls_socket == true ->
 	    do_interim_reply(StreamTo, Response_format, ReqId, RepBuf_1),
-	    State#state{reply_buffer = <<>>, 
+	    State#state{reply_buffer = <<>>,
 			streamed_size = Streamed_size + size(RepBuf_1)};
 	_ when New_data_size >= Stream_chunk_size ->
 	    {Stream_chunk, Rem_data} = split_binary(RepBuf_1, Stream_chunk_size),
@@ -514,7 +514,7 @@ send_req_1(From,
 	    {Caller, once} when is_pid(Caller) or
 				is_atom(Caller) ->
 		Async_pid_rec = {{req_id_pid, ReqId}, self()},
-		true = ets:insert(ibrowse_stream, Async_pid_rec), 
+		true = ets:insert(ibrowse_stream, Async_pid_rec),
 		{Caller, true};
 	    undefined ->
 		{undefined, false};
@@ -869,7 +869,7 @@ is_connection_closing(_, _)                -> false.
 
 %% This clause determines the chunk size when given data from the beginning of the chunk
 parse_11_response(DataRecvd,
-		  #state{transfer_encoding=chunked, 
+		  #state{transfer_encoding=chunked,
 			 chunk_size=chunk_start,
 			 chunk_size_buffer = Chunk_sz_buf
 			} = State) ->
@@ -899,7 +899,7 @@ parse_11_response(DataRecvd,
 %% This clause is to remove the CRLF between two chunks
 %%
 parse_11_response(DataRecvd,
-		  #state{transfer_encoding = chunked, 
+		  #state{transfer_encoding = chunked,
 			 chunk_size = tbd,
 			 chunk_size_buffer = Buf}=State) ->
     case scan_crlf(Buf, DataRecvd) of
@@ -916,7 +916,7 @@ parse_11_response(DataRecvd,
 
 %% This clause deals with the end of a chunked transfer
 parse_11_response(DataRecvd,
-		  #state{transfer_encoding = chunked, chunk_size = 0, 
+		  #state{transfer_encoding = chunked, chunk_size = 0,
 			 cur_req = CurReq,
 			 deleted_crlf = DelCrlf,
 			 reply_buffer = Trailer, reqs = Reqs}=State) ->
@@ -1449,7 +1449,7 @@ get_stream_chunk_size(Options) ->
 	    ?DEFAULT_STREAM_CHUNK_SIZE
     end.
 
-get_inac_timeout(#state{cur_req = #request{options = Opts}}) ->	
+get_inac_timeout(#state{cur_req = #request{options = Opts}}) ->
     get_value(inactivity_timeout, Opts, infinity);
 get_inac_timeout(#state{cur_req = undefined}) ->
     infinity.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/d76a4473/ibrowse_lb.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lb.erl b/ibrowse_lb.erl
index 9212ccd..b0654b7 100644
--- a/ibrowse_lb.erl
+++ b/ibrowse_lb.erl
@@ -1,7 +1,7 @@
 %%%-------------------------------------------------------------------
 %%% File    : ibrowse_lb.erl
 %%% Author  : chandru <ch...@t-mobile.co.uk>
-%%% Description : 
+%%% Description :
 %%%
 %%% Created :  6 Mar 2008 by chandru <ch...@t-mobile.co.uk>
 %%%-------------------------------------------------------------------
@@ -101,7 +101,7 @@ spawn_connection(Lb_pid, Url,
 % 	    #state{max_sessions = Max_sess,
 % 		   ets_tid = Tid,
 % 		   max_pipeline_size = Max_pipe_sz,
-% 		   num_cur_sessions = Num} = State) 
+% 		   num_cur_sessions = Num} = State)
 %     when Num >= Max ->
 %     Reply = find_best_connection(Tid),
 %     {reply, sorry_dude_reuse, State};
@@ -109,7 +109,7 @@ spawn_connection(Lb_pid, Url,
 %% Update max_sessions in #state with supplied value
 handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
 	    #state{ets_tid = Tid,
-		   num_cur_sessions = Num} = State) 
+		   num_cur_sessions = Num} = State)
     when Num >= Max_sess ->
     Reply = find_best_connection(Tid, Max_pipe),
     {reply, Reply, State#state{max_sessions = Max_sess}};

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/d76a4473/ibrowse_lib.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lib.erl b/ibrowse_lib.erl
index 67c5eee..7567a6a 100644
--- a/ibrowse_lib.erl
+++ b/ibrowse_lib.erl
@@ -1,6 +1,6 @@
 %%% File    : ibrowse_lib.erl
 %%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%% Description : 
+%%% Description :
 %%% Created : 27 Feb 2004 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
 %% @doc Module with a few useful functions
 
@@ -110,7 +110,7 @@ month_int("Oct") -> 10;
 month_int("Nov") -> 11;
 month_int("Dec") -> 12.
 
-%% @doc Given a status code, returns an atom describing the status code. 
+%% @doc Given a status code, returns an atom describing the status code.
 %% @spec status_code(StatusCode::status_code()) -> StatusDescription
 %% status_code() = string() | integer()
 %% StatusDescription = atom()
@@ -271,7 +271,7 @@ parse_url([$:, $/, $/ | _], get_protocol, Url, []) ->
     {invalid_uri_1, Url};
 parse_url([$:, $/, $/ | T], get_protocol, Url, TmpAcc) ->
     Prot = list_to_atom(lists:reverse(TmpAcc)),
-    parse_url(T, get_username, 
+    parse_url(T, get_username,
 	      Url#url{protocol = Prot},
 	      []);
 parse_url([$/ | T], get_username, Url, TmpAcc) ->
@@ -285,16 +285,16 @@ parse_url([$: | T], get_username, Url, TmpAcc) ->
     %% a username/password. If we encounter a '@' later on, there is a
     %% username/password indeed. If we encounter a '/', it was
     %% actually the hostname
-    parse_url(T, get_password, 
+    parse_url(T, get_password,
 	      Url#url{username = lists:reverse(TmpAcc)},
 	      []);
 parse_url([$@ | T], get_username, Url, TmpAcc) ->
-    parse_url(T, get_host, 
+    parse_url(T, get_host,
 	      Url#url{username = lists:reverse(TmpAcc),
 		      password = ""},
 	      []);
 parse_url([$@ | T], get_password, Url, TmpAcc) ->
-    parse_url(T, get_host, 
+    parse_url(T, get_host,
 	      Url#url{password = lists:reverse(TmpAcc)},
 	      []);
 parse_url([$/ | T], get_password, Url, TmpAcc) ->
@@ -308,7 +308,7 @@ parse_url([$/ | T], get_password, Url, TmpAcc) ->
 	    password = undefined,
 	    path = [$/ | T]};
 parse_url([$: | T], get_host, #url{} = Url, TmpAcc) ->
-    parse_url(T, get_port, 
+    parse_url(T, get_port,
 	      Url#url{host = lists:reverse(TmpAcc)},
 	      []);
 parse_url([$/ | T], get_host, #url{protocol=Prot} = Url, TmpAcc) ->
@@ -340,7 +340,7 @@ parse_url([], get_port, #url{protocol=Prot} = Url, TmpAcc) ->
 	       _ ->
 		   list_to_integer(lists:reverse(TmpAcc))
 	   end,
-    Url#url{port = Port, 
+    Url#url{port = Port,
 	    path = "/"};
 parse_url([], get_password, Url, TmpAcc) ->
     %% Ok, what we thought was the username/password was the hostname
@@ -387,12 +387,12 @@ do_trace(Fmt, Args) ->
 -ifdef(DEBUG).
 do_trace(_, Fmt, Args) ->
     io:format("~s -- (~s) - "++Fmt,
-	      [printable_date(), 
+	      [printable_date(),
 	       get(ibrowse_trace_token) | Args]).
 -else.
 do_trace(true, Fmt, Args) ->
     io:format("~s -- (~s) - "++Fmt,
-	      [printable_date(), 
+	      [printable_date(),
 	       get(ibrowse_trace_token) | Args]);
 do_trace(_, _, _) ->
     ok.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/d76a4473/ibrowse_sup.erl
----------------------------------------------------------------------
diff --git a/ibrowse_sup.erl b/ibrowse_sup.erl
index 300435d..1b9b863 100644
--- a/ibrowse_sup.erl
+++ b/ibrowse_sup.erl
@@ -1,7 +1,7 @@
 %%%-------------------------------------------------------------------
 %%% File    : ibrowse_sup.erl
 %%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%% Description : 
+%%% Description :
 %%%
 %%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
 %%%-------------------------------------------------------------------
@@ -53,7 +53,7 @@ start_link() ->
 %% Func: init/1
 %% Returns: {ok,  {SupFlags,  [ChildSpec]}} |
 %%          ignore                          |
-%%          {error, Reason}   
+%%          {error, Reason}
 %%--------------------------------------------------------------------
 init([]) ->
     AChild = {ibrowse,{ibrowse,start_link,[]},

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/d76a4473/ibrowse_test.erl
----------------------------------------------------------------------
diff --git a/ibrowse_test.erl b/ibrowse_test.erl
index ad3e812..cab1f88 100644
--- a/ibrowse_test.erl
+++ b/ibrowse_test.erl
@@ -225,7 +225,7 @@ unit_tests() ->
 unit_tests(Options) ->
     Options_1 = Options ++ [{connect_timeout, 5000}],
     {Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options_1]),
-    receive 
+    receive
 	{done, Pid} ->
 	    ok;
 	{'DOWN', Ref, _, _, Info} ->
@@ -292,7 +292,7 @@ compare_responses(R1, R2, R3) ->
 
 do_async_req_list(Url, Method, Options) ->
     {Pid,_} = erlang:spawn_monitor(?MODULE, i_do_async_req_list,
-				   [self(), Url, Method, 
+				   [self(), Url, Method,
 				    Options ++ [{stream_chunk_size, 1000}]]),
     io:format("Spawned process ~p~n", [Pid]),
     wait_for_resp(Pid).


[24/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Yet another ibrowse fix.
Patch submitted upstream:  https://github.com/cmullaparthi/ibrowse/issues/issue/20



git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@1034404 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/8f3735f0
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/8f3735f0
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/8f3735f0

Branch: refs/heads/import-master
Commit: 8f3735f01063758ebfdc3c8c6919c87d33f1f27f
Parents: 0db80d3
Author: Filipe David Borba Manana <fd...@apache.org>
Authored: Fri Nov 12 14:29:00 2010 +0000
Committer: Filipe David Borba Manana <fd...@apache.org>
Committed: Fri Nov 12 14:29:00 2010 +0000

----------------------------------------------------------------------
 ibrowse_http_client.erl | 10 +++++-----
 ibrowse_lib.erl         | 12 ++++--------
 2 files changed, 9 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/8f3735f0/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 5c3d5c9..5ff323c 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -544,7 +544,7 @@ do_send_body1(Source, Resp, State, TE) ->
 maybe_chunked_encode(Data, false) ->
     Data;
 maybe_chunked_encode(Data, true) ->
-    [ibrowse_lib:dec2hex(4, size(to_binary(Data))), "\r\n", Data, "\r\n"].
+    [ibrowse_lib:dec2hex(byte_size(to_binary(Data))), "\r\n", Data, "\r\n"].
 
 do_close(#state{socket = undefined})            ->  ok;
 do_close(#state{socket = Sock,
@@ -927,23 +927,23 @@ chunk_request_body(Body, _ChunkSize, Acc) when Body == <<>>; Body == [] ->
 chunk_request_body(Body, ChunkSize, Acc) when is_binary(Body),
                                               size(Body) >= ChunkSize ->
     <<ChunkBody:ChunkSize/binary, Rest/binary>> = Body,
-    Chunk = [ibrowse_lib:dec2hex(4, ChunkSize),"\r\n",
+    Chunk = [ibrowse_lib:dec2hex(ChunkSize),"\r\n",
              ChunkBody, "\r\n"],
     chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
 chunk_request_body(Body, _ChunkSize, Acc) when is_binary(Body) ->
     BodySize = size(Body),
-    Chunk = [ibrowse_lib:dec2hex(4, BodySize),"\r\n",
+    Chunk = [ibrowse_lib:dec2hex(BodySize),"\r\n",
              Body, "\r\n"],
     LastChunk = "0\r\n",
     lists:reverse(["\r\n", LastChunk, Chunk | Acc]);
 chunk_request_body(Body, ChunkSize, Acc) when length(Body) >= ChunkSize ->
     {ChunkBody, Rest} = split_list_at(Body, ChunkSize),
-    Chunk = [ibrowse_lib:dec2hex(4, ChunkSize),"\r\n",
+    Chunk = [ibrowse_lib:dec2hex(ChunkSize),"\r\n",
              ChunkBody, "\r\n"],
     chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
 chunk_request_body(Body, _ChunkSize, Acc) when is_list(Body) ->
     BodySize = length(Body),
-    Chunk = [ibrowse_lib:dec2hex(4, BodySize),"\r\n",
+    Chunk = [ibrowse_lib:dec2hex(BodySize),"\r\n",
              Body, "\r\n"],
     LastChunk = "0\r\n",
     lists:reverse(["\r\n", LastChunk, Chunk | Acc]).

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/8f3735f0/ibrowse_lib.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lib.erl b/ibrowse_lib.erl
index c463c7b..e913adb 100644
--- a/ibrowse_lib.erl
+++ b/ibrowse_lib.erl
@@ -19,7 +19,7 @@
          url_encode/1,
          decode_rfc822_date/1,
          status_code/1,
-         dec2hex/2,
+         dec2hex/1,
          drv_ue/1,
          drv_ue/2,
          encode_base64/1,
@@ -163,14 +163,10 @@ status_code(507) -> insufficient_storage;
 status_code(X) when is_list(X) -> status_code(list_to_integer(X));
 status_code(_)   -> unknown_status_code.
 
-%% @doc dec2hex taken from gtk.erl in std dist
-%% M = integer() -- number of hex digits required
+%% @doc Returns a string with the hexadecimal representation of a given decimal.
 %% N = integer() -- the number to represent as hex
-%% @spec dec2hex(M::integer(), N::integer()) -> string()
-dec2hex(M,N) -> dec2hex(M,N,[]).
-
-dec2hex(0,_N,Ack) -> Ack;
-dec2hex(M,N,Ack) -> dec2hex(M-1,N bsr 4,[d2h(N band 15)|Ack]).
+%% @spec dec2hex(N::integer()) -> string()
+dec2hex(N) -> lists:flatten(io_lib:format("~.16B", [N])).
 
 %% @doc Implements the base64 encoding algorithm. The output data type matches in the input data type.
 %% @spec encode_base64(In) -> Out


[09/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
wheeeeeeee

git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@794057 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/1eba7c7a
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/1eba7c7a
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/1eba7c7a

Branch: refs/heads/import-master
Commit: 1eba7c7ab56ebc1ebf8f91046749935371ee2f08
Parents: d76a447
Author: Noah Slater <ns...@apache.org>
Authored: Tue Jul 14 20:21:39 2009 +0000
Committer: Noah Slater <ns...@apache.org>
Committed: Tue Jul 14 20:21:39 2009 +0000

----------------------------------------------------------------------
 Makefile.am | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/1eba7c7a/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index 2ba7b31..02a3e6e 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,12 +1,12 @@
 ## Licensed under the Apache License, Version 2.0 (the "License"); you may not
-## use this file except in compliance with the License.  You may obtain a copy
-## of the License at
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
 ##
 ##   http://www.apache.org/licenses/LICENSE-2.0
 ##
 ## Unless required by applicable law or agreed to in writing, software
 ## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 ## License for the specific language governing permissions and limitations under
 ## the License.
 


[11/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
apply patch for chunking error from ibrowse 1.5.2

git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@800950 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/087f8163
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/087f8163
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/087f8163

Branch: refs/heads/import-master
Commit: 087f81636012cfa1721e7db2942e0b3d5a09c5c6
Parents: 4203439
Author: Adam Kocoloski <ko...@apache.org>
Authored: Tue Aug 4 20:29:12 2009 +0000
Committer: Adam Kocoloski <ko...@apache.org>
Committed: Tue Aug 4 20:29:12 2009 +0000

----------------------------------------------------------------------
 ibrowse_http_client.erl | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/087f8163/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 6a26dd4..013f31b 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -919,7 +919,7 @@ parse_11_response(DataRecvd,
 		  #state{transfer_encoding = chunked, chunk_size = 0,
 			 cur_req = CurReq,
 			 deleted_crlf = DelCrlf,
-			 reply_buffer = Trailer, reqs = Reqs}=State) ->
+			 chunk_size_buffer = Trailer, reqs = Reqs}=State) ->
     do_trace("Detected end of chunked transfer...~n", []),
     DataRecvd_1 = case DelCrlf of
 		      false ->
@@ -933,7 +933,7 @@ parse_11_response(DataRecvd,
 	    State_1 = handle_response(CurReq, State#state{reqs = Reqs_1}),
 	    parse_response(Rem, reset_state(State_1));
 	{no, Rem} ->
-	    State#state{reply_buffer = Rem, rep_buf_size = size(Rem), deleted_crlf = false}
+	    State#state{chunk_size_buffer = Rem, deleted_crlf = false}
     end;
 
 %% This clause extracts a chunk, given the size.


[17/33] Bumping ibrowse library to version 1.6.2 (latest). It has a few important bug fixes and new features, such as, for example:

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/b7fafdcc/ibrowse_lib.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lib.erl b/ibrowse_lib.erl
index 6c7b154..fbb9c34 100644
--- a/ibrowse_lib.erl
+++ b/ibrowse_lib.erl
@@ -1,11 +1,10 @@
 %%% File    : ibrowse_lib.erl
 %%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%% Description :
+%%% Description : 
 %%% Created : 27 Feb 2004 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
 %% @doc Module with a few useful functions
 
 -module(ibrowse_lib).
--vsn('$Id: ibrowse_lib.erl,v 1.6 2008/03/27 01:35:50 chandrusf Exp $ ').
 -author('chandru').
 -ifdef(debug).
 -compile(export_all).
@@ -14,22 +13,22 @@
 -include("ibrowse.hrl").
 
 -export([
-	 get_trace_status/2,
-	 do_trace/2,
-	 do_trace/3,
-	 url_encode/1,
-	 decode_rfc822_date/1,
-	 status_code/1,
-	 dec2hex/2,
-	 drv_ue/1,
-	 drv_ue/2,
-	 encode_base64/1,
-	 decode_base64/1,
-	 get_value/2,
-	 get_value/3,
-	 parse_url/1,
-	 printable_date/0
-	]).
+         get_trace_status/2,
+         do_trace/2,
+         do_trace/3,
+         url_encode/1,
+         decode_rfc822_date/1,
+         status_code/1,
+         dec2hex/2,
+         drv_ue/1,
+         drv_ue/2,
+         encode_base64/1,
+         decode_base64/1,
+         get_value/2,
+         get_value/3,
+         parse_url/1,
+         printable_date/0
+        ]).
 
 get_trace_status(Host, Port) ->
     ibrowse:get_config_value({trace, Host, Port}, false).
@@ -39,10 +38,10 @@ drv_ue(Str) ->
     drv_ue(Str, Port).
 drv_ue(Str, Port) ->
     case erlang:port_control(Port, 1, Str) of
-	[] ->
-	    Str;
-	Res ->
-	    Res
+        [] ->
+            Str;
+        Res ->
+            Res
     end.
 
 %% @doc URL-encodes a string based on RFC 1738. Returns a flat list.
@@ -72,10 +71,10 @@ d2h(N) -> N+$a-10.
 
 decode_rfc822_date(String) when is_list(String) ->
     case catch decode_rfc822_date_1(string:tokens(String, ", \t\r\n")) of
-	{'EXIT', _} ->
-	    {error, invalid_date};
-	Res ->
-	    Res
+        {'EXIT', _} ->
+            {error, invalid_date};
+        Res ->
+            Res
     end.
 
 % TODO: Have to handle the Zone
@@ -86,15 +85,15 @@ decode_rfc822_date_1([Day,Month,Year, Time,_Zone]) ->
     MonthI = month_int(Month),
     YearI = list_to_integer(Year),
     TimeTup = case string:tokens(Time, ":") of
-		  [H,M] ->
-		      {list_to_integer(H),
-		       list_to_integer(M),
-		       0};
-		  [H,M,S] ->
-		      {list_to_integer(H),
-		       list_to_integer(M),
-		       list_to_integer(S)}
-	      end,
+                  [H,M] ->
+                      {list_to_integer(H),
+                       list_to_integer(M),
+                       0};
+                  [H,M,S] ->
+                      {list_to_integer(H),
+                       list_to_integer(M),
+                       list_to_integer(S)}
+              end,
     {{YearI,MonthI,DayI}, TimeTup}.
 
 month_int("Jan") -> 1;
@@ -110,7 +109,7 @@ month_int("Oct") -> 10;
 month_int("Nov") -> 11;
 month_int("Dec") -> 12.
 
-%% @doc Given a status code, returns an atom describing the status code.
+%% @doc Given a status code, returns an atom describing the status code. 
 %% @spec status_code(StatusCode::status_code()) -> StatusDescription
 %% status_code() = string() | integer()
 %% StatusDescription = atom()
@@ -178,86 +177,25 @@ dec2hex(M,N,Ack) -> dec2hex(M-1,N bsr 4,[d2h(N band 15)|Ack]).
 %% In = string() | binary()
 %% Out = string() | binary()
 encode_base64(List) when is_list(List) ->
-    encode_base64_1(list_to_binary(List));
+    binary_to_list(base64:encode(List));
 encode_base64(Bin) when is_binary(Bin) ->
-    List = encode_base64_1(Bin),
-    list_to_binary(List).
-
-encode_base64_1(<<A:6, B:6, C:6, D:6, Rest/binary>>) ->
-    [int_to_b64(A), int_to_b64(B),
-     int_to_b64(C), int_to_b64(D) | encode_base64_1(Rest)];
-encode_base64_1(<<A:6, B:6, C:4>>) ->
-    [int_to_b64(A), int_to_b64(B), int_to_b64(C bsl 2), $=];
-encode_base64_1(<<A:6, B:2>>) ->
-    [int_to_b64(A), int_to_b64(B bsl 4), $=, $=];
-encode_base64_1(<<>>) ->
-    [].
+    base64:encode(Bin).
 
 %% @doc Implements the base64 decoding algorithm. The output data type matches in the input data type.
 %% @spec decode_base64(In) -> Out | exit({error, invalid_input})
 %% In = string() | binary()
 %% Out = string() | binary()
 decode_base64(List) when is_list(List) ->
-    decode_base64_1(List, []);
+    binary_to_list(base64:decode(List));
 decode_base64(Bin) when is_binary(Bin) ->
-    List = decode_base64_1(binary_to_list(Bin), []),
-    list_to_binary(List).
-
-decode_base64_1([H | T], Acc) when ((H == $\t) or
-				    (H == 32) or
-				    (H == $\r) or
-				    (H == $\n)) ->
-    decode_base64_1(T, Acc);
-
-decode_base64_1([$=, $=], Acc) ->
-    lists:reverse(Acc);
-decode_base64_1([$=, _ | _], _Acc) ->
-    exit({error, invalid_input});
-
-decode_base64_1([A1, B1, $=, $=], Acc) ->
-    A = b64_to_int(A1),
-    B = b64_to_int(B1),
-    Oct1 = (A bsl 2) bor (B bsr 4),
-    decode_base64_1([], [Oct1 | Acc]);
-decode_base64_1([A1, B1, C1, $=], Acc) ->
-    A = b64_to_int(A1),
-    B = b64_to_int(B1),
-    C = b64_to_int(C1),
-    Oct1 = (A bsl 2) bor (B bsr 4),
-    Oct2 = ((B band 16#f) bsl 6) bor (C bsr 2),
-    decode_base64_1([], [Oct2, Oct1 | Acc]);
-decode_base64_1([A1, B1, C1, D1 | T], Acc) ->
-    A = b64_to_int(A1),
-    B = b64_to_int(B1),
-    C = b64_to_int(C1),
-    D = b64_to_int(D1),
-    Oct1 = (A bsl 2) bor (B bsr 4),
-    Oct2 = ((B band 16#f) bsl 4) bor (C bsr 2),
-    Oct3 = ((C band 2#11) bsl 6) bor D,
-    decode_base64_1(T, [Oct3, Oct2, Oct1 | Acc]);
-decode_base64_1([], Acc) ->
-    lists:reverse(Acc).
-
-%% Taken from httpd_util.erl
-int_to_b64(X) when X >= 0, X =< 25 -> X + $A;
-int_to_b64(X) when X >= 26, X =< 51 -> X - 26 + $a;
-int_to_b64(X) when X >= 52, X =< 61 -> X - 52 + $0;
-int_to_b64(62) -> $+;
-int_to_b64(63) -> $/.
-
-%% Taken from httpd_util.erl
-b64_to_int(X) when X >= $A, X =< $Z -> X - $A;
-b64_to_int(X) when X >= $a, X =< $z -> X - $a + 26;
-b64_to_int(X) when X >= $0, X =< $9 -> X - $0 + 52;
-b64_to_int($+) -> 62;
-b64_to_int($/) -> 63.
+    base64:decode(Bin).
 
 get_value(Tag, TVL, DefVal) ->
     case lists:keysearch(Tag, 1, TVL) of
-	false ->
-	    DefVal;
-	{value, {_, Val}} ->
-	    Val
+        false ->
+            DefVal;
+        {value, {_, Val}} ->
+            Val
     end.
 
 get_value(Tag, TVL) ->
@@ -271,92 +209,120 @@ parse_url([$:, $/, $/ | _], get_protocol, Url, []) ->
     {invalid_uri_1, Url};
 parse_url([$:, $/, $/ | T], get_protocol, Url, TmpAcc) ->
     Prot = list_to_atom(lists:reverse(TmpAcc)),
-    parse_url(T, get_username,
-	      Url#url{protocol = Prot},
-	      []);
-parse_url([$/ | T], get_username, Url, TmpAcc) ->
+    parse_url(T, get_username, 
+              Url#url{protocol = Prot},
+              []);
+parse_url([H | T], get_username, Url, TmpAcc) when H == $/;
+                                                   H == $? ->
+    Path = case H of
+               $/ ->
+                   [$/ | T];
+               $? ->
+                   [$/, $? | T]
+           end,
     %% No username/password. No  port number
     Url#url{host = lists:reverse(TmpAcc),
-	    port = default_port(Url#url.protocol),
-	    path = [$/ | T]};
+            port = default_port(Url#url.protocol),
+           path = Path};
 parse_url([$: | T], get_username, Url, TmpAcc) ->
     %% It is possible that no username/password has been
     %% specified. But we'll continue with the assumption that there is
     %% a username/password. If we encounter a '@' later on, there is a
     %% username/password indeed. If we encounter a '/', it was
     %% actually the hostname
-    parse_url(T, get_password,
-	      Url#url{username = lists:reverse(TmpAcc)},
-	      []);
+    parse_url(T, get_password, 
+              Url#url{username = lists:reverse(TmpAcc)},
+              []);
 parse_url([$@ | T], get_username, Url, TmpAcc) ->
-    parse_url(T, get_host,
-	      Url#url{username = lists:reverse(TmpAcc),
-		      password = ""},
-	      []);
+    parse_url(T, get_host, 
+              Url#url{username = lists:reverse(TmpAcc),
+                      password = ""},
+              []);
 parse_url([$@ | T], get_password, Url, TmpAcc) ->
-    parse_url(T, get_host,
-	      Url#url{password = lists:reverse(TmpAcc)},
-	      []);
-parse_url([$/ | T], get_password, Url, TmpAcc) ->
+    parse_url(T, get_host, 
+              Url#url{password = lists:reverse(TmpAcc)},
+              []);
+parse_url([H | T], get_password, Url, TmpAcc) when H == $/;
+                                                   H == $? ->
     %% Ok, what we thought was the username/password was the hostname
     %% and portnumber
     #url{username=User} = Url,
     Port = list_to_integer(lists:reverse(TmpAcc)),
+    Path = case H of
+               $/ ->
+                   [$/ | T];
+               $? ->
+                   [$/, $? | T]
+           end,
     Url#url{host = User,
-	    port = Port,
-	    username = undefined,
-	    password = undefined,
-	    path = [$/ | T]};
+            port = Port,
+            username = undefined,
+            password = undefined,
+           path = Path};
 parse_url([$: | T], get_host, #url{} = Url, TmpAcc) ->
-    parse_url(T, get_port,
-	      Url#url{host = lists:reverse(TmpAcc)},
-	      []);
-parse_url([$/ | T], get_host, #url{protocol=Prot} = Url, TmpAcc) ->
+    parse_url(T, get_port, 
+              Url#url{host = lists:reverse(TmpAcc)},
+              []);
+parse_url([H | T], get_host, #url{protocol=Prot} = Url, TmpAcc) when H == $/;
+                                                                     H == $? ->
+    Path = case H of
+               $/ ->
+                   [$/ | T];
+               $? ->
+                   [$/, $? | T]
+           end,
     Url#url{host = lists:reverse(TmpAcc),
-	    port = default_port(Prot),
-	    path = [$/ | T]};
-parse_url([$/ | T], get_port, #url{protocol=Prot} = Url, TmpAcc) ->
+            port = default_port(Prot),
+           path = Path};
+parse_url([H | T], get_port, #url{protocol=Prot} = Url, TmpAcc) when H == $/;
+                                                                     H == $? ->
+    Path = case H of
+               $/ ->
+                   [$/ | T];
+               $? ->
+                   [$/, $? | T]
+           end,
     Port = case TmpAcc of
-	       [] ->
-		   default_port(Prot);
-	       _ ->
-		   list_to_integer(lists:reverse(TmpAcc))
-	   end,
-    Url#url{port = Port, path = [$/ | T]};
+               [] ->
+                   default_port(Prot);
+               _ ->
+                   list_to_integer(lists:reverse(TmpAcc))
+           end,
+    Url#url{port = Port, path = Path};
 parse_url([H | T], State, Url, TmpAcc) ->
     parse_url(T, State, Url, [H | TmpAcc]);
 parse_url([], get_host, Url, TmpAcc) when TmpAcc /= [] ->
     Url#url{host = lists:reverse(TmpAcc),
-	    port = default_port(Url#url.protocol),
-	    path = "/"};
+            port = default_port(Url#url.protocol),
+            path = "/"};
 parse_url([], get_username, Url, TmpAcc) when TmpAcc /= [] ->
     Url#url{host = lists:reverse(TmpAcc),
-	    port = default_port(Url#url.protocol),
-	    path = "/"};
+            port = default_port(Url#url.protocol),
+            path = "/"};
 parse_url([], get_port, #url{protocol=Prot} = Url, TmpAcc) ->
     Port = case TmpAcc of
-	       [] ->
-		   default_port(Prot);
-	       _ ->
-		   list_to_integer(lists:reverse(TmpAcc))
-	   end,
-    Url#url{port = Port,
-	    path = "/"};
+               [] ->
+                   default_port(Prot);
+               _ ->
+                   list_to_integer(lists:reverse(TmpAcc))
+           end,
+    Url#url{port = Port, 
+            path = "/"};
 parse_url([], get_password, Url, TmpAcc) ->
     %% Ok, what we thought was the username/password was the hostname
     %% and portnumber
     #url{username=User} = Url,
     Port = case TmpAcc of
-	       [] ->
-		   default_port(Url#url.protocol);
-	       _ ->
-		   list_to_integer(lists:reverse(TmpAcc))
-	   end,
+               [] ->
+                   default_port(Url#url.protocol);
+               _ ->
+                   list_to_integer(lists:reverse(TmpAcc))
+           end,
     Url#url{host = User,
-	    port = Port,
-	    username = undefined,
-	    password = undefined,
-	    path = "/"};
+            port = Port,
+            username = undefined,
+            password = undefined,
+            path = "/"};
 parse_url([], State, Url, TmpAcc) ->
     {invalid_uri_2, State, Url, TmpAcc}.
 
@@ -387,13 +353,13 @@ do_trace(Fmt, Args) ->
 -ifdef(DEBUG).
 do_trace(_, Fmt, Args) ->
     io:format("~s -- (~s) - "++Fmt,
-	      [printable_date(),
-	       get(ibrowse_trace_token) | Args]).
+              [printable_date(), 
+               get(ibrowse_trace_token) | Args]).
 -else.
 do_trace(true, Fmt, Args) ->
     io:format("~s -- (~s) - "++Fmt,
-	      [printable_date(),
-	       get(ibrowse_trace_token) | Args]);
+              [printable_date(), 
+               get(ibrowse_trace_token) | Args]);
 do_trace(_, _, _) ->
     ok.
 -endif.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/b7fafdcc/ibrowse_sup.erl
----------------------------------------------------------------------
diff --git a/ibrowse_sup.erl b/ibrowse_sup.erl
index 1b9b863..ace33d1 100644
--- a/ibrowse_sup.erl
+++ b/ibrowse_sup.erl
@@ -1,13 +1,11 @@
 %%%-------------------------------------------------------------------
 %%% File    : ibrowse_sup.erl
 %%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%% Description :
+%%% Description : 
 %%%
 %%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
 %%%-------------------------------------------------------------------
 -module(ibrowse_sup).
--vsn('$Id: ibrowse_sup.erl,v 1.1 2005/05/05 22:28:28 chandrusf Exp $ ').
-
 -behaviour(supervisor).
 %%--------------------------------------------------------------------
 %% Include files
@@ -53,7 +51,7 @@ start_link() ->
 %% Func: init/1
 %% Returns: {ok,  {SupFlags,  [ChildSpec]}} |
 %%          ignore                          |
-%%          {error, Reason}
+%%          {error, Reason}   
 %%--------------------------------------------------------------------
 init([]) ->
     AChild = {ibrowse,{ibrowse,start_link,[]},

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/b7fafdcc/ibrowse_test.erl
----------------------------------------------------------------------
diff --git a/ibrowse_test.erl b/ibrowse_test.erl
index 3dc66ec..00b0244 100644
--- a/ibrowse_test.erl
+++ b/ibrowse_test.erl
@@ -4,7 +4,6 @@
 %%% Created : 14 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
 
 -module(ibrowse_test).
--vsn('$Id: ibrowse_test.erl,v 1.4 2009/07/01 22:43:19 chandrusf Exp $ ').
 -export([
 	 load_test/3,
 	 send_reqs_1/3,
@@ -193,6 +192,7 @@ dump_errors(Key, Iod) ->
 		    {"http://www.google.co.uk", get},
 		    {"http://www.google.com", get},
 		    {"http://www.google.com", options},
+                    {"https://mail.google.com", get},
 		    {"http://www.sun.com", get},
 		    {"http://www.oracle.com", get},
 		    {"http://www.bbc.co.uk", get},
@@ -223,9 +223,10 @@ unit_tests() ->
     unit_tests([]).
 
 unit_tests(Options) ->
+    application:start(ssl),
     Options_1 = Options ++ [{connect_timeout, 5000}],
     {Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options_1]),
-    receive
+    receive 
 	{done, Pid} ->
 	    ok;
 	{'DOWN', Ref, _, _, Info} ->
@@ -293,7 +294,7 @@ compare_responses(R1, R2, R3) ->
 
 do_async_req_list(Url, Method, Options) ->
     {Pid,_} = erlang:spawn_monitor(?MODULE, i_do_async_req_list,
-				   [self(), Url, Method,
+				   [self(), Url, Method, 
 				    Options ++ [{stream_chunk_size, 1000}]]),
     io:format("Spawned process ~p~n", [Pid]),
     wait_for_resp(Pid).


[03/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Fix `make distcheck` (which builds releases and nightlies)

git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@743787 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/39af2b60
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/39af2b60
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/39af2b60

Branch: refs/heads/import-master
Commit: 39af2b605eab6c11877ca87ddad5f80086da11c4
Parents: 4d1d829
Author: Jan Lehnardt <ja...@apache.org>
Authored: Thu Feb 12 15:52:37 2009 +0000
Committer: Jan Lehnardt <ja...@apache.org>
Committed: Thu Feb 12 15:52:37 2009 +0000

----------------------------------------------------------------------
 Makefile.am | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/39af2b60/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index e4feb3d..614bdc3 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -38,7 +38,8 @@ ibrowseebin_DATA = \
 
 EXTRA_DIST =  \
     $(ibrowse_file_collection) \
-    $(ibrowseebin_static_file)
+    $(ibrowseebin_static_file) \
+    ibrowse.hrl
 
 CLEANFILES = \
     $(ibrowseebin_make_generated_file_list)


[26/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Upgraded ibrowse to version 2.1.2 (released today)

This version fixes a blocking issue (which rarely happens) when using the same connection
(with ot without pipelining) for multiple requests using the option {stream_to, {pid(), once}}.



git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@1051082 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/99da0af1
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/99da0af1
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/99da0af1

Branch: refs/heads/import-master
Commit: 99da0af1fe7669bf5eb768422f433e558bf92a88
Parents: 211fbf3
Author: Filipe David Borba Manana <fd...@apache.org>
Authored: Mon Dec 20 12:32:49 2010 +0000
Committer: Filipe David Borba Manana <fd...@apache.org>
Committed: Mon Dec 20 12:32:49 2010 +0000

----------------------------------------------------------------------
 Makefile.am             |  2 +-
 ibrowse.app.in          |  2 +-
 ibrowse.erl             |  2 +-
 ibrowse_http_client.erl | 35 +++++++++++++----
 ibrowse_test.erl        | 93 ++++++++++++++++++++++++++++++++++++++++++--
 5 files changed, 121 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/99da0af1/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index deddd5a..4cebe5d 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -10,7 +10,7 @@
 ## License for the specific language governing permissions and limitations under
 ## the License.
 
-ibrowseebindir = $(localerlanglibdir)/ibrowse-2.1.1/ebin
+ibrowseebindir = $(localerlanglibdir)/ibrowse-2.1.2/ebin
 
 ibrowse_file_collection = \
 	ibrowse.app.in \

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/99da0af1/ibrowse.app.in
----------------------------------------------------------------------
diff --git a/ibrowse.app.in b/ibrowse.app.in
index aee0f20..c8e4227 100644
--- a/ibrowse.app.in
+++ b/ibrowse.app.in
@@ -1,6 +1,6 @@
 {application, ibrowse,
         [{description, "HTTP client application"},
-         {vsn, "2.1.1"},
+         {vsn, "2.1.2"},
          {modules, [ ibrowse, 
 		     ibrowse_http_client, 
 		     ibrowse_app, 

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/99da0af1/ibrowse.erl
----------------------------------------------------------------------
diff --git a/ibrowse.erl b/ibrowse.erl
index 6e20cfb..e105150 100644
--- a/ibrowse.erl
+++ b/ibrowse.erl
@@ -7,7 +7,7 @@
 %%%-------------------------------------------------------------------
 %% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
 %% @copyright 2005-2010 Chandrashekhar Mullaparthi
-%% @version 2.1.1
+%% @version 2.1.2
 %% @doc The ibrowse application implements an HTTP 1.1 client in erlang. This
 %% module implements the API of the HTTP client. There is one named
 %% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/99da0af1/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 0135a49..ea75948 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -191,6 +191,14 @@ handle_info({stream_next, Req_id}, #state{socket = Socket,
     {noreply, State};
 
 handle_info({stream_next, _Req_id}, State) ->
+    _Cur_req_id = case State#state.cur_req of
+                     #request{req_id = Cur} ->
+                         Cur;
+                     _ ->
+                         undefined
+                 end,
+%%     io:format("Ignoring stream_next as ~1000.p is not cur req (~1000.p)~n",
+%%               [_Req_id, _Cur_req_id]),
     {noreply, State};
 
 handle_info({stream_close, _Req_id}, State) ->
@@ -625,7 +633,7 @@ send_req_1(From,
     Path = [Server_host, $:, integer_to_list(Server_port)],
     {Req, Body_1} = make_request(connect, Pxy_auth_headers,
                                  Path, Path,
-                                 [], Options, State_1),
+                                 [], Options, State_1, undefined),
     TE = is_chunked_encoding_specified(Options),
     trace_request(Req),
     case do_send(Req, State) of
@@ -711,7 +719,8 @@ send_req_1(From,
     Headers_1 = maybe_modify_headers(Url, Method, Options, Headers, State_1),
     {Req, Body_1} = make_request(Method,
                                  Headers_1,
-                                 AbsPath, RelPath, Body, Options, State_1),
+                                 AbsPath, RelPath, Body, Options, State_1,
+                                 ReqId),
     trace_request(Req),
     do_setopts(Socket, Caller_socket_options, State_1),
     TE = is_chunked_encoding_specified(Options),
@@ -811,7 +820,7 @@ http_auth_digest(Username, Password) ->
     ibrowse_lib:encode_base64(Username ++ [$: | Password]).
 
 make_request(Method, Headers, AbsPath, RelPath, Body, Options,
-             #state{use_proxy = UseProxy, is_ssl = Is_ssl}) ->
+             #state{use_proxy = UseProxy, is_ssl = Is_ssl}, ReqId) ->
     HttpVsn = http_vsn_string(get_value(http_vsn, Options, {1,1})),
     Fun1 = fun({X, Y}) when is_atom(X) ->
                    {to_lower(atom_to_list(X)), X, Y};
@@ -847,7 +856,13 @@ make_request(Method, Headers, AbsPath, RelPath, Body, Options,
                  [{"Transfer-Encoding", "chunked"}],
                  chunk_request_body(Body, Chunk_size_1)}
         end,
-    Headers_3 = cons_headers(Headers_2),
+    Headers_3 = case lists:member({include_ibrowse_req_id, true}, Options) of
+                    true ->
+                        [{"x-ibrowse-request-id", io_lib:format("~1000.p",[ReqId])} | Headers_2];
+                    false ->
+                        Headers_2
+                end,
+    Headers_4 = cons_headers(Headers_3),
     Uri = case get_value(use_absolute_uri, Options, false) or UseProxy of
               true ->
                   case Is_ssl of
@@ -859,7 +874,7 @@ make_request(Method, Headers, AbsPath, RelPath, Body, Options,
               false ->
                   RelPath
           end,
-    {[method(Method), " ", Uri, " ", HttpVsn, crnl(), Headers_3, crnl()], Body_1}.
+    {[method(Method), " ", Uri, " ", HttpVsn, crnl(), Headers_4, crnl()], Body_1}.
 
 is_chunked_encoding_specified(Options) ->
     case get_value(transfer_encoding, Options, false) of
@@ -1303,11 +1318,17 @@ reset_state(State) ->
                 transfer_encoding = undefined
                }.
 
-set_cur_request(#state{reqs = Reqs} = State) ->
+set_cur_request(#state{reqs = Reqs, socket = Socket} = State) ->
     case queue:to_list(Reqs) of
         [] ->
             State#state{cur_req = undefined};
-        [NextReq | _] ->
+        [#request{caller_controls_socket = Ccs} = NextReq | _] ->
+            case Ccs of
+                true ->
+                    do_setopts(Socket, [{active, once}], State);
+                _ ->
+                    ok
+            end,
             State#state{cur_req = NextReq}
     end.
 

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/99da0af1/ibrowse_test.erl
----------------------------------------------------------------------
diff --git a/ibrowse_test.erl b/ibrowse_test.erl
index 3ad7660..b8e0a4a 100644
--- a/ibrowse_test.erl
+++ b/ibrowse_test.erl
@@ -20,7 +20,8 @@
          test_chunked_streaming_once/0,
 	 i_do_async_req_list/4,
 	 test_stream_once/3,
-	 test_stream_once/4
+	 test_stream_once/4,
+         test_20122010/0
 	]).
 
 test_stream_once(Url, Method, Options) ->
@@ -218,7 +219,8 @@ dump_errors(Key, Iod) ->
 		    {"http://jigsaw.w3.org/HTTP/Basic/", get, [{basic_auth, {"guest", "guest"}}]},
 		    {"http://jigsaw.w3.org/HTTP/CL/", get},
 		    {"http://www.httpwatch.com/httpgallery/chunked/", get},
-                    {"https://github.com", get, [{ssl_options, [{depth, 2}]}]}
+                    {"https://github.com", get, [{ssl_options, [{depth, 2}]}]},
+                    {local_test_fun, test_20122010, []}
 		   ]).
 
 unit_tests() ->
@@ -228,6 +230,7 @@ unit_tests(Options) ->
     application:start(crypto),
     application:start(public_key),
     application:start(ssl),
+    (catch ibrowse_test_server:start_server(8181, tcp)),
     ibrowse:start(),
     Options_1 = Options ++ [{connect_timeout, 5000}],
     {Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options_1]),
@@ -242,7 +245,9 @@ unit_tests(Options) ->
     end.
 
 unit_tests_1(Parent, Options) ->
-    lists:foreach(fun({Url, Method}) ->
+    lists:foreach(fun({local_test_fun, Fun_name, Args}) ->
+                          execute_req(local_test_fun, Fun_name, Args);
+                     ({Url, Method}) ->
 			  execute_req(Url, Method, Options);
 		     ({Url, Method, X_Opts}) ->
 			  execute_req(Url, Method, X_Opts ++ Options)
@@ -394,6 +399,10 @@ maybe_stream_next(Req_id, Options) ->
             ok
     end.
 
+execute_req(local_test_fun, Method, Args) ->
+    io:format("     ~-54.54w: ", [Method]),
+    Result = (catch apply(?MODULE, Method, Args)),
+    io:format("~p~n", [Result]);
 execute_req(Url, Method, Options) ->
     io:format("~7.7w, ~50.50s: ", [Method, Url]),
     Result = (catch ibrowse:send_req(Url, [], Method, [], Options)),
@@ -430,3 +439,81 @@ ue_test(Data) ->
 log_msg(Fmt, Args) ->
     io:format("~s -- " ++ Fmt,
 	      [ibrowse_lib:printable_date() | Args]).
+
+%%------------------------------------------------------------------------------
+%% 
+%%------------------------------------------------------------------------------
+
+test_20122010() ->
+    {ok, Pid} = ibrowse:spawn_worker_process("http://localhost:8181"),
+    Expected_resp = <<"1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30-31-32-33-34-35-36-37-38-39-40-41-42-43-44-45-46-47-48-49-50-51-52-53-54-55-56-57-58-59-60-61-62-63-64-65-66-67-68-69-70-71-72-73-74-75-76-77-78-79-80-81-82-83-84-85-86-87-88-89-90-91-92-93-94-95-96-97-98-99-100">>,
+    Test_parent = self(),
+    Fun = fun() ->
+                  do_test_20122010(Pid, Expected_resp, Test_parent)
+          end,
+    Pids = [erlang:spawn_monitor(Fun) || _ <- lists:seq(1,10)],
+    wait_for_workers(Pids).
+
+wait_for_workers([{Pid, _Ref} | Pids]) ->
+    receive
+        {Pid, success} ->
+            wait_for_workers(Pids)
+    after 5000 ->
+            test_failed
+    end;
+wait_for_workers([]) ->
+    success.
+
+do_test_20122010(Pid, Expected_resp, Test_parent) ->
+    {ibrowse_req_id, Req_id} = ibrowse:send_req_direct(
+                                 Pid,
+                                 "http://localhost:8181/ibrowse_stream_once_chunk_pipeline_test",
+                                 [], get, [],
+                                 [{stream_to, {self(), once}},
+                                  {include_ibrowse_req_id, true}]),
+    do_trace("~p -- sent request ~1000.p~n", [self(), Req_id]),
+    Req_id_str = lists:flatten(io_lib:format("~1000.p",[Req_id])),
+    receive
+        {ibrowse_async_headers, Req_id, "200", Headers} ->
+            case lists:keysearch("x-ibrowse-request-id", 1, Headers) of
+                {value, {_, Req_id_str}} ->
+                    ok;
+                {value, {_, Req_id_1}} ->
+                    do_trace("~p -- Sent req-id: ~1000.p. Recvd: ~1000.p~n",
+                              [self(), Req_id, Req_id_1]),
+                    exit(req_id_mismatch)
+            end
+    after 5000 ->
+            do_trace("~p -- response headers not received~n", [self()]),
+            exit({timeout, test_failed})
+    end,
+    do_trace("~p -- response headers received~n", [self()]),
+    ok = ibrowse:stream_next(Req_id),
+    case do_test_20122010_1(Expected_resp, Req_id, []) of
+        true ->
+            Test_parent ! {self(), success};
+        false ->
+            Test_parent ! {self(), failed}
+    end.
+
+do_test_20122010_1(Expected_resp, Req_id, Acc) ->
+    receive
+        {ibrowse_async_response, Req_id, Body_part} ->
+            ok = ibrowse:stream_next(Req_id),
+            do_test_20122010_1(Expected_resp, Req_id, [Body_part | Acc]);
+        {ibrowse_async_response_end, Req_id} ->
+            Acc_1 = list_to_binary(lists:reverse(Acc)),
+            Result = Acc_1 == Expected_resp,
+            do_trace("~p -- End of response. Result: ~p~n", [self(), Result]),
+            Result
+    after 1000 ->
+            exit({timeout, test_failed})
+    end.
+
+do_trace(Fmt, Args) ->
+    do_trace(get(my_trace_flag), Fmt, Args).
+
+do_trace(true, Fmt, Args) ->
+    io:format("~s -- " ++ Fmt, [ibrowse_lib:printable_date() | Args]);
+do_trace(_, _, _) ->
+    ok.


[02/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Replacement of inets with ibrowse. Fixes COUCHDB-179 and enhances replication.
Thanks Jason Davies and Adam Kocoloski for the fix, Maximillian Dornseif for reporting.


git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@739047 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/4d1d8294
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/4d1d8294
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/4d1d8294

Branch: refs/heads/import-master
Commit: 4d1d8294dfd6548500bf288010a065995a3fda8b
Parents: 
Author: John Christopher Anderson <jc...@apache.org>
Authored: Thu Jan 29 22:15:48 2009 +0000
Committer: John Christopher Anderson <jc...@apache.org>
Committed: Thu Jan 29 22:15:48 2009 +0000

----------------------------------------------------------------------
 Makefile.am             |   47 ++
 ibrowse.app             |   13 +
 ibrowse.erl             |  628 ++++++++++++++++++++
 ibrowse.hrl             |   12 +
 ibrowse_app.erl         |   64 +++
 ibrowse_http_client.erl | 1312 ++++++++++++++++++++++++++++++++++++++++++
 ibrowse_lb.erl          |  195 +++++++
 ibrowse_lib.erl         |  399 +++++++++++++
 ibrowse_sup.erl         |   65 +++
 ibrowse_test.erl        |  226 ++++++++
 10 files changed, 2961 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/4d1d8294/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
new file mode 100644
index 0000000..e4feb3d
--- /dev/null
+++ b/Makefile.am
@@ -0,0 +1,47 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License.  You may obtain a copy
+## of the License at
+##
+##   http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+ibrowseebindir = $(localerlanglibdir)/ibrowse-1.4.1/ebin
+
+ibrowse_file_collection = \
+    ibrowse.erl \
+    ibrowse_app.erl \
+    ibrowse_http_client.erl \
+    ibrowse_lb.erl \
+    ibrowse_lib.erl \
+    ibrowse_sup.erl \
+    ibrowse_test.erl
+
+ibrowseebin_static_file = ibrowse.app
+
+ibrowseebin_make_generated_file_list = \
+    ibrowse.beam \
+    ibrowse_app.beam \
+    ibrowse_http_client.beam \
+    ibrowse_lb.beam \
+    ibrowse_lib.beam \
+    ibrowse_sup.beam \
+    ibrowse_test.beam
+
+ibrowseebin_DATA = \
+    $(ibrowseebin_static_file) \
+    $(ibrowseebin_make_generated_file_list)
+
+EXTRA_DIST =  \
+    $(ibrowse_file_collection) \
+    $(ibrowseebin_static_file)
+
+CLEANFILES = \
+    $(ibrowseebin_make_generated_file_list)
+
+%.beam: %.erl
+	$(ERLC) $<

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/4d1d8294/ibrowse.app
----------------------------------------------------------------------
diff --git a/ibrowse.app b/ibrowse.app
new file mode 100644
index 0000000..960c079
--- /dev/null
+++ b/ibrowse.app
@@ -0,0 +1,13 @@
+{application, ibrowse,
+        [{description, "HTTP client application"},
+         {vsn, "1.4.1"},
+         {modules, [ ibrowse, 
+		     ibrowse_http_client, 
+		     ibrowse_app, 
+		     ibrowse_sup, 
+		     ibrowse_lib,
+		     ibrowse_lb ]},
+         {registered, []},
+         {applications, [kernel,stdlib,sasl]},
+	 {env, []},
+	 {mod, {ibrowse_app, []}}]}.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/4d1d8294/ibrowse.erl
----------------------------------------------------------------------
diff --git a/ibrowse.erl b/ibrowse.erl
new file mode 100644
index 0000000..4e6404a
--- /dev/null
+++ b/ibrowse.erl
@@ -0,0 +1,628 @@
+%%%-------------------------------------------------------------------
+%%% File    : ibrowse.erl
+%%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
+%%% Description : Load balancer process for HTTP client connections.
+%%%
+%%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+%% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
+%% @copyright 2005-2008 Chandrashekhar Mullaparthi
+%% @version 1.4
+%% @doc The ibrowse application implements an HTTP 1.1 client. This
+%% module implements the API of the HTTP client. There is one named
+%% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is
+%% one process to handle one TCP connection to a webserver
+%% (implemented in the module ibrowse_http_client). Multiple connections to a
+%% webserver are setup based on the settings for each webserver. The
+%% ibrowse process also determines which connection to pipeline a
+%% certain request on.  The functions to call are send_req/3,
+%% send_req/4, send_req/5, send_req/6.
+%%
+%% <p>Here are a few sample invocations.</p>
+%%
+%% <code>
+%% ibrowse:send_req("http://intranet/messenger/", [], get). 
+%% <br/><br/>
+%% 
+%% ibrowse:send_req("http://www.google.com/", [], get, [], 
+%% 		 [{proxy_user, "XXXXX"},
+%% 		  {proxy_password, "XXXXX"},
+%% 		  {proxy_host, "proxy"},
+%% 		  {proxy_port, 8080}], 1000). 
+%% <br/><br/>
+%%
+%%ibrowse:send_req("http://www.erlang.org/download/otp_src_R10B-3.tar.gz", [], get, [],
+%% 		 [{proxy_user, "XXXXX"},
+%% 		  {proxy_password, "XXXXX"},
+%% 		  {proxy_host, "proxy"},
+%% 		  {proxy_port, 8080},
+%% 		  {save_response_to_file, true}], 1000).
+%% <br/><br/>
+%%
+%% ibrowse:send_req("http://www.erlang.org", [], head).
+%%
+%% <br/><br/>
+%% ibrowse:send_req("http://www.sun.com", [], options).
+%%
+%% <br/><br/>
+%% ibrowse:send_req("http://www.bbc.co.uk", [], trace).
+%%
+%% <br/><br/>
+%% ibrowse:send_req("http://www.google.com", [], get, [], 
+%%                   [{stream_to, self()}]).
+%% </code>
+%%
+%% <p>A driver exists which implements URL encoding in C, but the
+%% speed achieved using only erlang has been good enough, so the
+%% driver isn't actually used.</p>
+
+-module(ibrowse).
+-vsn('$Id: ibrowse.erl,v 1.7 2008/05/21 15:28:11 chandrusf Exp $ ').
+
+-behaviour(gen_server).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+-export([start_link/0, start/0, stop/0]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+	 terminate/2, code_change/3]).
+
+%% API interface
+-export([
+	 rescan_config/0,
+	 rescan_config/1,
+	 get_config_value/1,
+	 get_config_value/2,
+	 spawn_worker_process/2,
+	 spawn_link_worker_process/2,
+	 stop_worker_process/1,
+	 send_req/3,
+	 send_req/4,
+	 send_req/5,
+	 send_req/6,
+	 send_req_direct/4,
+	 send_req_direct/5,
+	 send_req_direct/6,
+	 send_req_direct/7,
+	 set_max_sessions/3,
+	 set_max_pipeline_size/3,
+	 set_dest/3,
+	 trace_on/0,
+	 trace_off/0,
+	 trace_on/2,
+	 trace_off/2,
+	 show_dest_status/2
+	]).
+
+-ifdef(debug).
+-compile(export_all).
+-endif.
+
+-import(ibrowse_lib, [
+		      parse_url/1,
+		      printable_date/0,
+		      get_value/2,
+		      get_value/3,
+		      do_trace/2
+		     ]).
+		      
+-record(state, {trace = false}).
+
+-include("ibrowse.hrl").
+
+-define(DEF_MAX_SESSIONS,10).
+-define(DEF_MAX_PIPELINE_SIZE,10).
+
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start_link/0
+%% Description: Starts the server
+%%--------------------------------------------------------------------
+%% @doc Starts the ibrowse process linked to the calling process. Usually invoked by the supervisor ibrowse_sup
+%% @spec start_link() -> {ok, pid()}
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+%% @doc Starts the ibrowse process without linking. Useful when testing using the shell
+start() ->
+    gen_server:start({local, ?MODULE}, ?MODULE, [], [{debug, []}]).
+
+%% @doc Stop the ibrowse process. Useful when testing using the shell.
+stop() ->
+    catch gen_server:call(ibrowse, stop).
+
+%% @doc This is the basic function to send a HTTP request.
+%% The Status return value indicates the HTTP status code returned by the webserver
+%% @spec send_req(Url::string(), Headers::headerList(), Method::method()) -> response()
+%% headerList() = [{header(), value()}]
+%% header() = atom() | string()
+%% value() = term()
+%% method() = get | post | head | options | put | delete | trace | mkcol | propfind | proppatch | lock | unlock | move | copy
+%% Status = string()
+%% ResponseHeaders = [respHeader()]
+%% respHeader() = {headerName(), headerValue()}
+%% headerName() = string()
+%% headerValue() = string()
+%% response() = {ok, Status, ResponseHeaders, ResponseBody} | {error, Reason}
+%% ResponseBody = string() | {file, Filename}
+%% Reason = term()
+send_req(Url, Headers, Method) ->
+    send_req(Url, Headers, Method, [], []).
+
+%% @doc Same as send_req/3. 
+%% If a list is specified for the body it has to be a flat list. The body can also be a fun/0 or a fun/1. <br/>
+%% If fun/0, the connection handling process will repeatdely call the fun until it returns an error or eof. <pre>Fun() = {ok, Data} | eof</pre><br/>
+%% If fun/1, the connection handling process will repeatedly call the fun with the supplied state until it returns an error or eof. <pre>Fun(State) = {ok, Data} | {ok, Data, NewState} | eof</pre>
+%% @spec send_req(Url, Headers, Method::method(), Body::body()) -> response()
+%% body() = [] | string() | binary() | fun_arity_0() | {fun_arity_1(), initial_state()}
+%% initial_state() = term()
+send_req(Url, Headers, Method, Body) ->
+    send_req(Url, Headers, Method, Body, []).
+
+%% @doc Same as send_req/4. 
+%% For a description of SSL Options, look in the ssl manpage. If the
+%% HTTP Version to use is not specified, the default is 1.1.
+%% <br/>
+%% <p>The <code>host_header</code> is useful in the case where ibrowse is
+%% connecting to a component such as <a
+%% href="http://www.stunnel.org">stunnel</a> which then sets up a
+%% secure connection to a webserver. In this case, the URL supplied to
+%% ibrowse must have the stunnel host/port details, but that won't
+%% make sense to the destination webserver. This option can then be
+%% used to specify what should go in the <code>Host</code> header in
+%% the request.</p>
+%% <ul>
+%% <li>When both the options <code>save_response_to_file</code> and <code>stream_to</code> 
+%% are specified, the former takes precedence.</li>
+%%
+%% <li>For the <code>save_response_to_file</code> option, the response body is saved to
+%% file only if the status code is in the 200-299 range. If not, the response body is returned
+%% as a string.</li>
+%% <li>Whenever an error occurs in the processing of a request, ibrowse will return as much
+%% information as it has, such as HTTP Status Code and HTTP Headers. When this happens, the response
+%% is of the form <code>{error, {Reason, {stat_code, StatusCode}, HTTP_headers}}</code></li>
+%% </ul>
+%% @spec send_req(Url::string(), Headers::headerList(), Method::method(), Body::body(), Options::optionList()) -> response()
+%% optionList() = [option()]
+%% option() = {max_sessions, integer()}        |
+%%          {max_pipeline_size, integer()}     |
+%%          {trace, boolean()}                 | 
+%%          {is_ssl, boolean()}                |
+%%          {ssl_options, [SSLOpt]}            |
+%%          {pool_name, atom()}                |
+%%          {proxy_host, string()}             |
+%%          {proxy_port, integer()}            |
+%%          {proxy_user, string()}             |
+%%          {proxy_password, string()}         |
+%%          {use_absolute_uri, boolean()}      |
+%%          {basic_auth, {username(), password()}} |
+%%          {cookie, string()}                 |
+%%          {content_length, integer()}        |
+%%          {content_type, string()}           |
+%%          {save_response_to_file, srtf()}    |
+%%          {stream_to, process()}             |
+%%          {http_vsn, {MajorVsn, MinorVsn}}   |
+%%          {host_header, string()}            |
+%%          {transfer_encoding, {chunked, ChunkSize}}
+%% 
+%% process() = pid() | atom()
+%% username() = string()
+%% password() = string()
+%% SSLOpt = term()
+%% ChunkSize = integer()
+%% srtf() = boolean() | filename()
+%% filename() = string()
+%% 
+send_req(Url, Headers, Method, Body, Options) ->
+    send_req(Url, Headers, Method, Body, Options, 30000).
+
+%% @doc Same as send_req/5. 
+%% All timeout values are in milliseconds.
+%% @spec send_req(Url, Headers::headerList(), Method::method(), Body::body(), Options::optionList(), Timeout) -> response()
+%% Timeout = integer() | infinity
+send_req(Url, Headers, Method, Body, Options, Timeout) ->
+    case catch parse_url(Url) of
+	#url{host = Host,
+	     port = Port} = Parsed_url ->
+	    Lb_pid = case ets:lookup(ibrowse_lb, {Host, Port}) of
+			 [] ->
+			     get_lb_pid(Parsed_url);
+			 [#lb_pid{pid = Lb_pid_1}] ->
+			     Lb_pid_1
+		     end,
+	    Max_sessions = get_max_sessions(Host, Port, Options),
+	    Max_pipeline_size = get_max_pipeline_size(Host, Port, Options),
+	    Options_1 = merge_options(Host, Port, Options),
+	    {SSLOptions, IsSSL} =
+		case get_value(is_ssl, Options_1, false) of
+		    false -> {[], false};
+		    true -> {get_value(ssl_options, Options_1), true}
+		end,
+	    case ibrowse_lb:spawn_connection(Lb_pid, Parsed_url,
+					     Max_sessions, 
+					     Max_pipeline_size,
+					     {SSLOptions, IsSSL}) of
+		{ok, Conn_Pid} ->
+		    do_send_req(Conn_Pid, Parsed_url, Headers,
+				Method, Body, Options_1, Timeout);
+		Err ->
+		    Err
+	    end;
+	Err ->
+	    {error, {url_parsing_failed, Err}}
+    end.
+
+merge_options(Host, Port, Options) ->
+    Config_options = get_config_value({options, Host, Port}, []),
+    lists:foldl(
+      fun({Key, Val}, Acc) ->
+			case lists:keysearch(Key, 1, Options) of
+			    false ->
+				[{Key, Val} | Acc];
+			    _ ->
+				Acc
+			end
+      end, Options, Config_options).
+
+get_lb_pid(Url) ->
+    gen_server:call(?MODULE, {get_lb_pid, Url}).
+
+get_max_sessions(Host, Port, Options) ->
+    get_value(max_sessions, Options,
+	      get_config_value({max_sessions, Host, Port}, ?DEF_MAX_SESSIONS)).
+
+get_max_pipeline_size(Host, Port, Options) ->
+    get_value(max_pipeline_size, Options,
+	      get_config_value({max_pipeline_size, Host, Port}, ?DEF_MAX_PIPELINE_SIZE)).
+
+%% @doc Deprecated. Use set_max_sessions/3 and set_max_pipeline_size/3
+%% for achieving the same effect.
+set_dest(Host, Port, [{max_sessions, Max} | T]) ->
+    set_max_sessions(Host, Port, Max),
+    set_dest(Host, Port, T);
+set_dest(Host, Port, [{max_pipeline_size, Max} | T]) ->
+    set_max_pipeline_size(Host, Port, Max),
+    set_dest(Host, Port, T);
+set_dest(Host, Port, [{trace, Bool} | T]) when Bool == true; Bool == false ->
+    ibrowse ! {trace, true, Host, Port},
+    set_dest(Host, Port, T);
+set_dest(_Host, _Port, [H | _]) ->
+    exit({invalid_option, H});
+set_dest(_, _, []) ->
+    ok.
+    
+%% @doc Set the maximum number of connections allowed to a specific Host:Port.
+%% @spec set_max_sessions(Host::string(), Port::integer(), Max::integer()) -> ok
+set_max_sessions(Host, Port, Max) when is_integer(Max), Max > 0 ->
+    gen_server:call(?MODULE, {set_config_value, {max_sessions, Host, Port}, Max}).
+
+%% @doc Set the maximum pipeline size for each connection to a specific Host:Port.
+%% @spec set_max_pipeline_size(Host::string(), Port::integer(), Max::integer()) -> ok
+set_max_pipeline_size(Host, Port, Max) when is_integer(Max), Max > 0 ->
+    gen_server:call(?MODULE, {set_config_value, {max_pipeline_size, Host, Port}, Max}).
+
+do_send_req(Conn_Pid, Parsed_url, Headers, Method, Body, Options, Timeout) ->
+    case catch ibrowse_http_client:send_req(Conn_Pid, Parsed_url,
+					    Headers, Method, Body,
+					    Options, Timeout) of
+	{'EXIT', {timeout, _}} ->
+	    {error, req_timedout};
+	{'EXIT', Reason} ->
+	    {error, {'EXIT', Reason}};
+	Ret ->
+	    Ret
+    end.
+
+%% @doc Creates a HTTP client process to the specified Host:Port which
+%% is not part of the load balancing pool. This is useful in cases
+%% where some requests to a webserver might take a long time whereas
+%% some might take a very short time. To avoid getting these quick
+%% requests stuck in the pipeline behind time consuming requests, use
+%% this function to get a handle to a connection process. <br/>
+%% <b>Note:</b> Calling this function only creates a worker process. No connection
+%% is setup. The connection attempt is made only when the first
+%% request is sent via any of the send_req_direct/4,5,6,7 functions.<br/>
+%% <b>Note:</b> It is the responsibility of the calling process to control
+%% pipeline size on such connections.
+%%
+%% @spec spawn_worker_process(Host::string(), Port::integer()) -> {ok, pid()}
+spawn_worker_process(Host, Port) ->
+    ibrowse_http_client:start({Host, Port}).
+
+%% @doc Same as spawn_worker_process/2 except the the calling process
+%% is linked to the worker process which is spawned.
+spawn_link_worker_process(Host, Port) ->
+    ibrowse_http_client:start_link({Host, Port}).
+
+%% @doc Terminate a worker process spawned using
+%% spawn_worker_process/2 or spawn_link_worker_process/2. Requests in
+%% progress will get the error response <pre>{error, closing_on_request}</pre>
+%% @spec stop_worker_process(Conn_pid::pid()) -> ok
+stop_worker_process(Conn_pid) ->
+    ibrowse_http_client:stop(Conn_pid).
+
+%% @doc Same as send_req/3 except that the first argument is the PID
+%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
+send_req_direct(Conn_pid, Url, Headers, Method) ->
+    send_req_direct(Conn_pid, Url, Headers, Method, [], []).
+
+%% @doc Same as send_req/4 except that the first argument is the PID
+%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
+send_req_direct(Conn_pid, Url, Headers, Method, Body) ->
+    send_req_direct(Conn_pid, Url, Headers, Method, Body, []).
+
+%% @doc Same as send_req/5 except that the first argument is the PID
+%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
+send_req_direct(Conn_pid, Url, Headers, Method, Body, Options) ->
+    send_req_direct(Conn_pid, Url, Headers, Method, Body, Options, 30000).
+
+%% @doc Same as send_req/6 except that the first argument is the PID
+%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
+send_req_direct(Conn_pid, Url, Headers, Method, Body, Options, Timeout) ->
+    case catch parse_url(Url) of
+	#url{host = Host,
+	     port = Port} = Parsed_url ->
+	    Options_1 = merge_options(Host, Port, Options),
+	    case do_send_req(Conn_pid, Parsed_url, Headers, Method, Body, Options_1, Timeout) of
+		{error, {'EXIT', {noproc, _}}} ->
+		    {error, worker_is_dead};
+		Ret ->
+		    Ret
+	    end;
+	Err ->
+	    {error, {url_parsing_failed, Err}}
+    end.
+    
+%% @doc Turn tracing on for the ibrowse process
+trace_on() ->
+    ibrowse ! {trace, true}.
+%% @doc Turn tracing off for the ibrowse process
+trace_off() ->
+    ibrowse ! {trace, false}.
+
+%% @doc Turn tracing on for all connections to the specified HTTP
+%% server. Host is whatever is specified as the domain name in the URL
+%% @spec trace_on(Host, Port) -> term() 
+%% Host = string() 
+%% Port = integer()
+trace_on(Host, Port) ->
+    ibrowse ! {trace, true, Host, Port}.
+
+%% @doc Turn tracing OFF for all connections to the specified HTTP
+%% server.
+%% @spec trace_off(Host, Port) -> term()
+trace_off(Host, Port) ->
+    ibrowse ! {trace, false, Host, Port}.
+
+%% @doc Shows some internal information about load balancing to a
+%% specified Host:Port. Info about workers spawned using
+%% spawn_worker_process/2 or spawn_link_worker_process/2 is not
+%% included.
+show_dest_status(Host, Port) ->
+    case ets:lookup(ibrowse_lb, {Host, Port}) of
+	[] ->
+	    no_active_processes;
+	[#lb_pid{pid = Lb_pid}] ->
+	    io:format("Load Balancer Pid     : ~p~n", [Lb_pid]),
+	    io:format("LB process msg q size : ~p~n", [(catch process_info(Lb_pid, message_queue_len))]),
+	    case lists:dropwhile(
+		   fun(Tid) ->
+			   ets:info(Tid, owner) /= Lb_pid
+		   end, ets:all()) of
+		[] ->
+		    io:format("Couldn't locate ETS table for ~p~n", [Lb_pid]);
+		[Tid | _] ->
+		    First = ets:first(Tid),
+		    Last = ets:last(Tid),
+		    Size = ets:info(Tid, size),
+		    io:format("LB ETS table id       : ~p~n", [Tid]),
+		    io:format("Num Connections       : ~p~n", [Size]),
+		    case Size of
+			0 ->
+			    ok;
+			_ ->
+			    {First_p_sz, _} = First,
+			    {Last_p_sz, _} = Last,
+			    io:format("Smallest pipeline     : ~1000.p~n", [First_p_sz]),
+			    io:format("Largest pipeline      : ~1000.p~n", [Last_p_sz])
+		    end
+	    end
+    end.
+
+%% @doc Clear current configuration for ibrowse and load from the file
+%% ibrowse.conf in the IBROWSE_EBIN/../priv directory. Current
+%% configuration is cleared only if the ibrowse.conf file is readable
+%% using file:consult/1
+rescan_config() ->
+    gen_server:call(?MODULE, rescan_config).
+
+%% Clear current configuration for ibrowse and load from the specified
+%% file. Current configuration is cleared only if the specified
+%% file is readable using file:consult/1
+rescan_config(File) when is_list(File) ->
+    gen_server:call(?MODULE, {rescan_config, File}).
+
+%%====================================================================
+%% Server functions
+%%====================================================================
+
+%%--------------------------------------------------------------------
+%% Function: init/1
+%% Description: Initiates the server
+%% Returns: {ok, State}          |
+%%          {ok, State, Timeout} |
+%%          ignore               |
+%%          {stop, Reason}
+%%--------------------------------------------------------------------
+init(_) ->
+    process_flag(trap_exit, true),
+    State = #state{},
+    put(my_trace_flag, State#state.trace),
+    put(ibrowse_trace_token, "ibrowse"),
+    ets:new(ibrowse_lb, [named_table, public, {keypos, 2}]),
+    ets:new(ibrowse_conf, [named_table, protected, {keypos, 2}]),
+    import_config(),
+    {ok, #state{}}.
+
+import_config() ->
+    case code:priv_dir(ibrowse) of
+	{error, _} = Err ->
+	    Err;
+	PrivDir ->
+	    Filename = filename:join(PrivDir, "ibrowse.conf"),
+	    import_config(Filename)
+    end.
+
+import_config(Filename) ->
+    case file:consult(Filename) of
+	{ok, Terms} ->
+	    ets:delete_all_objects(ibrowse_conf),
+	    Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options}) 
+		     when list(Host), integer(Port),
+		     integer(MaxSess), MaxSess > 0,
+		     integer(MaxPipe), MaxPipe > 0, list(Options) ->
+			  I = [{{max_sessions, Host, Port}, MaxSess},
+			       {{max_pipeline_size, Host, Port}, MaxPipe},
+			       {{options, Host, Port}, Options}],
+			  lists:foreach(
+			    fun({X, Y}) ->
+				    ets:insert(ibrowse_conf,
+					       #ibrowse_conf{key = X, 
+							     value = Y})
+			    end, I);
+		     ({K, V}) ->
+			  ets:insert(ibrowse_conf,
+				     #ibrowse_conf{key = K,
+						   value = V});
+		     (X) ->
+			  io:format("Skipping unrecognised term: ~p~n", [X])
+		  end,
+	    lists:foreach(Fun, Terms);
+	Err ->
+	    Err
+    end.
+
+%% @doc Internal export
+get_config_value(Key) ->
+    [#ibrowse_conf{value = V}] = ets:lookup(ibrowse_conf, Key),
+    V.
+
+%% @doc Internal export
+get_config_value(Key, DefVal) ->
+    case ets:lookup(ibrowse_conf, Key) of
+	[] ->
+	    DefVal;
+	[#ibrowse_conf{value = V}] ->
+	    V
+    end.
+
+set_config_value(Key, Val) ->
+    ets:insert(ibrowse_conf, #ibrowse_conf{key = Key, value = Val}).
+%%--------------------------------------------------------------------
+%% Function: handle_call/3
+%% Description: Handling call messages
+%% Returns: {reply, Reply, State}          |
+%%          {reply, Reply, State, Timeout} |
+%%          {noreply, State}               |
+%%          {noreply, State, Timeout}      |
+%%          {stop, Reason, Reply, State}   | (terminate/2 is called)
+%%          {stop, Reason, State}            (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_call({get_lb_pid, #url{host = Host, port = Port} = Url}, _From, State) ->
+    Pid = do_get_connection(Url, ets:lookup(ibrowse_lb, {Host, Port})),
+    {reply, Pid, State};
+
+handle_call(stop, _From, State) ->
+    do_trace("IBROWSE shutting down~n", []),
+    {stop, normal, ok, State};
+
+handle_call({set_config_value, Key, Val}, _From, State) ->
+    set_config_value(Key, Val),
+    {reply, ok, State};
+
+handle_call(rescan_config, _From, State) ->
+    Ret = (catch import_config()),
+    {reply, Ret, State};
+
+handle_call({rescan_config, File}, _From, State) ->
+    Ret = (catch import_config(File)),
+    {reply, Ret, State};
+
+handle_call(Request, _From, State) ->
+    Reply = {unknown_request, Request},
+    {reply, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_cast/2
+%% Description: Handling cast messages
+%% Returns: {noreply, State}          |
+%%          {noreply, State, Timeout} |
+%%          {stop, Reason, State}            (terminate/2 is called)
+%%--------------------------------------------------------------------
+
+handle_cast(_Msg, State) ->
+    {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_info/2
+%% Description: Handling all non call/cast messages
+%% Returns: {noreply, State}          |
+%%          {noreply, State, Timeout} |
+%%          {stop, Reason, State}            (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_info({trace, Bool}, State) ->
+    put(my_trace_flag, Bool),
+    {noreply, State};
+
+handle_info({trace, Bool, Host, Port}, State) ->
+    Fun = fun(#lb_pid{host_port = {H, P}, pid = Pid}, _)
+	     when H == Host,
+		  P == Port ->
+		  catch Pid ! {trace, Bool};
+	     (#client_conn{key = {H, P, Pid}}, _)
+	     when H == Host,
+		  P == Port ->
+		  catch Pid ! {trace, Bool};
+	     (_, Acc) ->
+		  Acc
+	  end,
+    ets:foldl(Fun, undefined, ibrowse_lb),
+    ets:insert(ibrowse_conf, #ibrowse_conf{key = {trace, Host, Port},
+					   value = Bool}),
+    {noreply, State};
+		     
+handle_info(_Info, State) ->
+    {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: terminate/2
+%% Description: Shutdown the server
+%% Returns: any (ignored by gen_server)
+%%--------------------------------------------------------------------
+terminate(_Reason, _State) ->
+    ok.
+
+%%--------------------------------------------------------------------
+%% Func: code_change/3
+%% Purpose: Convert process state when code is changed
+%% Returns: {ok, NewState}
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+do_get_connection(#url{host = Host, port = Port}, []) ->
+    {ok, Pid} = ibrowse_lb:start_link([Host, Port]),
+    ets:insert(ibrowse_lb, #lb_pid{host_port = {Host, Port}, pid = Pid}),
+    Pid;
+do_get_connection(_Url, [#lb_pid{pid = Pid}]) ->
+    Pid.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/4d1d8294/ibrowse.hrl
----------------------------------------------------------------------
diff --git a/ibrowse.hrl b/ibrowse.hrl
new file mode 100644
index 0000000..ebf3bb3
--- /dev/null
+++ b/ibrowse.hrl
@@ -0,0 +1,12 @@
+-ifndef(IBROWSE_HRL).
+-define(IBROWSE_HRL, "ibrowse.hrl").
+
+-record(url, {abspath, host, port, username, password, path, protocol}).
+
+-record(lb_pid, {host_port, pid}).
+
+-record(client_conn, {key, cur_pipeline_size = 0, reqs_served = 0}).
+
+-record(ibrowse_conf, {key, value}).
+
+-endif.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/4d1d8294/ibrowse_app.erl
----------------------------------------------------------------------
diff --git a/ibrowse_app.erl b/ibrowse_app.erl
new file mode 100644
index 0000000..f5e523c
--- /dev/null
+++ b/ibrowse_app.erl
@@ -0,0 +1,64 @@
+%%%-------------------------------------------------------------------
+%%% File    : ibrowse_app.erl
+%%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
+%%% Description : 
+%%%
+%%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+-module(ibrowse_app).
+-vsn('$Id: ibrowse_app.erl,v 1.1 2005/05/05 22:28:28 chandrusf Exp $ ').
+
+-behaviour(application).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+%%--------------------------------------------------------------------
+-export([
+	 start/2,
+	 stop/1
+        ]).
+
+%%--------------------------------------------------------------------
+%% Internal exports
+%%--------------------------------------------------------------------
+-export([
+        ]).
+
+%%--------------------------------------------------------------------
+%% Macros
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% Records
+%%--------------------------------------------------------------------
+
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Func: start/2
+%% Returns: {ok, Pid}        |
+%%          {ok, Pid, State} |
+%%          {error, Reason}   
+%%--------------------------------------------------------------------
+start(_Type, _StartArgs) ->
+    case ibrowse_sup:start_link() of
+	{ok, Pid} -> 
+	    {ok, Pid};
+	Error ->
+	    Error
+    end.
+
+%%--------------------------------------------------------------------
+%% Func: stop/1
+%% Returns: any 
+%%--------------------------------------------------------------------
+stop(_State) ->
+    ok.
+
+%%====================================================================
+%% Internal functions
+%%====================================================================

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/4d1d8294/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
new file mode 100644
index 0000000..9a0e4d3
--- /dev/null
+++ b/ibrowse_http_client.erl
@@ -0,0 +1,1312 @@
+%%%-------------------------------------------------------------------
+%%% File    : ibrowse_http_client.erl
+%%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
+%%% Description : The name says it all
+%%%
+%%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+-module(ibrowse_http_client).
+-vsn('$Id: ibrowse_http_client.erl,v 1.18 2008/05/21 15:28:11 chandrusf Exp $ ').
+
+-behaviour(gen_server).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+-export([
+	 start_link/1,
+	 start/1,
+	 stop/1,
+	 send_req/7
+	]).
+
+-ifdef(debug).
+-compile(export_all).
+-endif.
+
+%% gen_server callbacks
+-export([
+	 init/1,
+	 handle_call/3,
+	 handle_cast/2,
+	 handle_info/2,
+	 terminate/2,
+	 code_change/3
+	]).
+
+-include("ibrowse.hrl").
+
+-record(state, {host, port, 
+		use_proxy = false, proxy_auth_digest,
+		ssl_options = [], is_ssl = false, socket, 
+		reqs=queue:new(), cur_req, status=idle, http_status_code, 
+		reply_buffer=[], rep_buf_size=0, recvd_headers=[],
+		is_closing, send_timer, content_length,
+		deleted_crlf = false, transfer_encoding, chunk_size, 
+		chunks=[], lb_ets_tid, cur_pipeline_size = 0}).
+
+-record(request, {url, method, options, from,
+		  stream_to, req_id,
+		  save_response_to_file = false,
+		  tmp_file_name, tmp_file_fd}).
+
+-import(ibrowse_lib, [
+		      parse_url/1,
+		      printable_date/0,
+		      get_value/2,
+		      get_value/3,
+		      do_trace/2
+		     ]).
+
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start_link/0
+%% Description: Starts the server
+%%--------------------------------------------------------------------
+start(Args) ->
+    gen_server:start(?MODULE, Args, []).
+
+start_link(Args) ->
+    gen_server:start_link(?MODULE, Args, []).
+
+stop(Conn_pid) ->
+    gen_server:call(Conn_pid, stop).
+
+send_req(Conn_Pid, Url, Headers, Method, Body, Options, Timeout) ->
+    Timeout_1 = case Timeout of
+		    infinity ->
+			infinity;
+		    _ when is_integer(Timeout) ->
+			Timeout + 100
+		end,
+    gen_server:call(
+      Conn_Pid,
+      {send_req, {Url, Headers, Method, Body, Options, Timeout}}, Timeout_1).
+
+%%====================================================================
+%% Server functions
+%%====================================================================
+
+%%--------------------------------------------------------------------
+%% Function: init/1
+%% Description: Initiates the server
+%% Returns: {ok, State}          |
+%%          {ok, State, Timeout} |
+%%          ignore               |
+%%          {stop, Reason}
+%%--------------------------------------------------------------------
+init({Lb_Tid, #url{host = Host, port = Port}, {SSLOptions, Is_ssl}}) ->
+    State = #state{host = Host,
+		   port = Port,
+		   ssl_options = SSLOptions,
+		   is_ssl = Is_ssl,
+		   lb_ets_tid = Lb_Tid},
+    put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
+    put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
+    {ok, State};
+init({Host, Port}) ->
+    State = #state{host = Host,
+		   port = Port},
+    put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
+    put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
+    {ok, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_call/3
+%% Description: Handling call messages
+%% Returns: {reply, Reply, State}          |
+%%          {reply, Reply, State, Timeout} |
+%%          {noreply, State}               |
+%%          {noreply, State, Timeout}      |
+%%          {stop, Reason, Reply, State}   | (terminate/2 is called)
+%%          {stop, Reason, State}            (terminate/2 is called)
+%%--------------------------------------------------------------------
+%% Received a request when the remote server has already sent us a
+%% Connection: Close header
+handle_call({send_req, _}, 
+	    _From,
+	    #state{is_closing=true}=State) ->
+    {reply, {error, connection_closing}, State};
+
+handle_call({send_req, {Url, Headers, Method, Body, Options, Timeout}}, 
+	    From,
+	    #state{socket=undefined,
+		   host=Host, port=Port}=State) ->
+    {Host_1, Port_1, State_1} =
+	case get_value(proxy_host, Options, false) of
+	    false ->
+		{Host, Port, State};
+	    PHost ->
+		ProxyUser = get_value(proxy_user, Options, []),
+		ProxyPassword = get_value(proxy_password, Options, []),
+		Digest = http_auth_digest(ProxyUser, ProxyPassword),
+		{PHost, get_value(proxy_port, Options, 80),
+		 State#state{use_proxy = true,
+			     proxy_auth_digest = Digest}}
+	end,
+    StreamTo = get_value(stream_to, Options, undefined),
+    ReqId = make_req_id(),
+    SaveResponseToFile = get_value(save_response_to_file, Options, false),
+    NewReq = #request{url=Url, 
+		      method=Method,
+		      stream_to=StreamTo,
+		      options=Options, 
+		      req_id=ReqId,
+		      save_response_to_file = SaveResponseToFile,
+		      from=From},
+    Reqs = queue:in(NewReq, State#state.reqs),
+    State_2 = check_ssl_options(Options, State_1#state{reqs = Reqs}),
+    do_trace("Connecting...~n", []),
+    Timeout_1 = case Timeout of
+		    infinity ->
+			infinity;
+		    _ ->
+			round(Timeout*0.9)
+		end,
+    case do_connect(Host_1, Port_1, Options, State_2, Timeout_1) of
+	{ok, Sock} ->
+	    Ref = case Timeout of
+		      infinity ->
+			  undefined;
+		      _ ->
+			  erlang:send_after(Timeout, self(), {req_timedout, From})
+		  end,
+	    do_trace("Connected!~n", []),
+	    case send_req_1(Url, Headers, Method, Body, Options, Sock, State_2) of
+		ok ->
+		    case StreamTo of
+			undefined ->
+			    ok;
+			_ ->
+			    gen_server:reply(From, {ibrowse_req_id, ReqId})
+		    end,
+		    State_3 = inc_pipeline_counter(State_2#state{socket = Sock,
+								 send_timer = Ref,
+								 cur_req = NewReq,
+								 status = get_header}),
+		    {noreply, State_3};
+		Err ->
+		    shutting_down(State_2),
+		    do_trace("Send failed... Reason: ~p~n", [Err]),
+		    gen_server:reply(From, {error, send_failed}),
+		    {stop, normal, State_2}
+	    end;
+	Err ->
+	    shutting_down(State_2),
+	    do_trace("Error connecting. Reason: ~1000.p~n", [Err]),
+	    gen_server:reply(From, {error, conn_failed}),
+	    {stop, normal, State_2}
+    end;
+
+%% Request which is to be pipelined
+handle_call({send_req, {Url, Headers, Method,
+			 Body, Options, Timeout}},
+	    From,
+	    #state{socket=Sock, status=Status, reqs=Reqs}=State) ->
+    do_trace("Recvd request in connected state. Status -> ~p NumPending: ~p~n", [Status, length(queue:to_list(Reqs))]),
+    StreamTo = get_value(stream_to, Options, undefined),
+    SaveResponseToFile = get_value(save_response_to_file, Options, false),
+    ReqId = make_req_id(),
+    NewReq = #request{url=Url, 
+		      stream_to=StreamTo,
+		      method=Method,
+		      options=Options, 
+		      req_id=ReqId,
+		      save_response_to_file = SaveResponseToFile,
+		      from=From},
+    State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
+    case send_req_1(Url, Headers, Method, Body, Options, Sock, State_1) of
+	ok ->
+	    State_2 = inc_pipeline_counter(State_1),
+	    do_setopts(Sock, [{active, true}], State#state.is_ssl),
+	    case Timeout of
+		infinity ->
+		    ok;
+		_ ->
+		    erlang:send_after(Timeout, self(), {req_timedout, From})
+	    end,
+	    State_3 = case Status of
+			  idle ->
+			      State_2#state{status = get_header,
+					    cur_req = NewReq};
+			  _ ->
+			      State_2
+		      end,
+	    case StreamTo of
+		undefined ->
+		    ok;
+		_ ->
+		    gen_server:reply(From, {ibrowse_req_id, ReqId})
+	    end,
+	    {noreply, State_3};
+	Err ->
+	    shutting_down(State_1),
+	    do_trace("Send request failed: Reason: ~p~n", [Err]),
+	    gen_server:reply(From, {error, send_failed}),
+	    do_error_reply(State, send_failed),
+	    {stop, normal, State_1}
+    end;
+
+handle_call(stop, _From, #state{socket = Socket, is_ssl = Is_ssl} = State) ->
+    do_close(Socket, Is_ssl),
+    do_error_reply(State, closing_on_request),
+    {stop, normal, State};
+
+handle_call(Request, _From, State) ->
+    Reply = {unknown_request, Request},
+    {reply, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_cast/2
+%% Description: Handling cast messages
+%% Returns: {noreply, State}          |
+%%          {noreply, State, Timeout} |
+%%          {stop, Reason, State}            (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_cast(_Msg, State) ->
+    {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_info/2
+%% Description: Handling all non call/cast messages
+%% Returns: {noreply, State}          |
+%%          {noreply, State, Timeout} |
+%%          {stop, Reason, State}            (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_info({tcp, _Sock, Data}, State) ->
+    handle_sock_data(Data, State);
+handle_info({ssl, _Sock, Data}, State) ->
+    handle_sock_data(Data, State);
+
+handle_info({tcp_closed, _Sock}, State) ->
+    do_trace("TCP connection closed by peer!~n", []),
+    handle_sock_closed(State),
+    {stop, normal, State};
+handle_info({ssl_closed, _Sock}, State) ->
+    do_trace("SSL connection closed by peer!~n", []),
+    handle_sock_closed(State),
+    {stop, normal, State};
+
+handle_info({req_timedout, From}, State) ->
+    case lists:keysearch(From, #request.from, queue:to_list(State#state.reqs)) of
+       false ->
+          {noreply, State};
+       {value, _} ->
+          shutting_down(State),
+          do_error_reply(State, req_timedout),
+          {stop, normal, State}
+    end;
+
+handle_info({trace, Bool}, State) ->
+    put(my_trace_flag, Bool),
+    {noreply, State};
+
+handle_info(Info, State) ->
+    io:format("Recvd unknown message ~p when in state: ~p~n", [Info, State]),
+    {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: terminate/2
+%% Description: Shutdown the server
+%% Returns: any (ignored by gen_server)
+%%--------------------------------------------------------------------
+terminate(_Reason, State) ->
+    case State#state.socket of
+	undefined ->
+	    ok;
+	Sock ->
+	    do_close(Sock, State#state.is_ssl)
+    end.
+
+%%--------------------------------------------------------------------
+%% Func: code_change/3
+%% Purpose: Convert process state when code is changed
+%% Returns: {ok, NewState}
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% Handles data recvd on the socket
+%%--------------------------------------------------------------------
+handle_sock_data(Data, #state{status=idle}=State) ->
+    do_trace("Data recvd on socket in state idle!. ~1000.p~n", [Data]),
+    shutting_down(State),
+    do_error_reply(State, data_in_status_idle),
+    do_close(State#state.socket, State#state.is_ssl),
+    {stop, normal, State};
+
+handle_sock_data(Data, #state{status=get_header, socket=Sock}=State) ->
+    case parse_response(Data, State) of
+	{error, _Reason} ->
+	    shutting_down(State),
+	    {stop, normal, State};
+	stop ->
+	    shutting_down(State),
+	    {stop, normal, State};
+	State_1 ->
+	    do_setopts(Sock, [{active, true}], State#state.is_ssl),
+	    {noreply, State_1}
+    end;
+
+handle_sock_data(Data, #state{status=get_body, content_length=CL,
+			      http_status_code = StatCode,
+			      recvd_headers=Headers, 
+			      chunk_size=CSz, socket=Sock}=State) ->
+    case (CL == undefined) and (CSz == undefined) of
+	true ->
+	    case accumulate_response(Data, State) of
+		{error, Reason} ->
+		    shutting_down(State),
+		    fail_pipelined_requests(State, 
+					    {error, {Reason, {stat_code, StatCode}, Headers}}),
+		    {stop, normal, State};
+		State_1 ->
+		    do_setopts(Sock, [{active, true}], State#state.is_ssl),
+		    {noreply, State_1}
+	    end;
+	_ ->
+	    case parse_11_response(Data, State) of
+		{error, Reason} ->
+		    shutting_down(State),
+		    fail_pipelined_requests(State, 
+					    {error, {Reason, {stat_code, StatCode}, Headers}}),
+		    {stop, normal, State};
+		stop ->
+		    shutting_down(State),
+		    {stop, normal, State};
+		State_1 ->
+		    do_setopts(Sock, [{active, true}], State#state.is_ssl),
+		    {noreply, State_1}
+	    end
+    end.
+
+accumulate_response(Data,
+		    #state{
+		      cur_req = #request{save_response_to_file = SaveResponseToFile,
+					 tmp_file_fd = undefined} = CurReq,
+		      http_status_code=[$2 | _]}=State) when SaveResponseToFile /= false ->
+    TmpFilename = case SaveResponseToFile of
+		      true -> make_tmp_filename();
+		      F -> F
+		  end,
+    case file:open(TmpFilename, [write, delayed_write, raw]) of
+	{ok, Fd} ->
+	    accumulate_response(Data, State#state{
+					cur_req = CurReq#request{
+						    tmp_file_fd = Fd,
+						    tmp_file_name = TmpFilename}});
+	{error, Reason} ->
+	    {error, {file_open_error, Reason}}
+    end;
+accumulate_response(Data, #state{cur_req = #request{save_response_to_file = SaveResponseToFile,
+						    tmp_file_fd = Fd},
+				 transfer_encoding=chunked,
+				 chunks = Chunks,
+				 http_status_code=[$2 | _]
+				} = State) when SaveResponseToFile /= false ->
+    case file:write(Fd, [Chunks | Data]) of
+	ok ->
+	    State#state{chunks = []};
+	{error, Reason} ->
+	    {error, {file_write_error, Reason}}
+    end;
+accumulate_response(Data, #state{cur_req = #request{save_response_to_file = SaveResponseToFile,
+						    tmp_file_fd = Fd},
+				 reply_buffer = RepBuf,
+				 http_status_code=[$2 | _]
+				} = State) when SaveResponseToFile /= false ->
+    case file:write(Fd, [RepBuf | Data]) of
+	ok ->
+	    State#state{reply_buffer = []};
+	{error, Reason} ->
+	    {error, {file_write_error, Reason}}
+    end;
+accumulate_response([], State) ->
+    State;
+accumulate_response(Data, #state{reply_buffer = RepBuf,
+				 cur_req = CurReq}=State) ->
+    #request{stream_to=StreamTo, req_id=ReqId} = CurReq,
+    case StreamTo of
+	undefined ->
+	    State#state{reply_buffer = [Data | RepBuf]};
+	_ ->
+	    do_interim_reply(StreamTo, ReqId, Data),
+	    State
+    end.
+
+make_tmp_filename() ->
+    DownloadDir = ibrowse:get_config_value(download_dir, filename:absname("./")),
+    {A,B,C} = now(),
+    filename:join([DownloadDir,
+		   "ibrowse_tmp_file_"++
+		   integer_to_list(A) ++
+		   integer_to_list(B) ++
+		   integer_to_list(C)]).
+
+
+%%--------------------------------------------------------------------
+%% Handles the case when the server closes the socket
+%%--------------------------------------------------------------------
+handle_sock_closed(#state{status=get_header}=State) ->
+    shutting_down(State),
+    do_error_reply(State, connection_closed);
+
+handle_sock_closed(#state{cur_req=undefined} = State) ->
+    shutting_down(State);
+
+%% We check for IsClosing because this the server could have sent a 
+%% Connection-Close header and has closed the socket to indicate end
+%% of response. There maybe requests pipelined which need a response.
+handle_sock_closed(#state{reply_buffer=Buf, reqs=Reqs, http_status_code=SC,
+			  is_closing=IsClosing,
+			  cur_req=#request{tmp_file_name=TmpFilename,
+					   tmp_file_fd=Fd} = CurReq,
+			  status=get_body, recvd_headers=Headers}=State) ->
+    #request{from=From, stream_to=StreamTo, req_id=ReqId} = CurReq,
+    case IsClosing of
+	true ->
+	    {_, Reqs_1} = queue:out(Reqs),
+	    case TmpFilename of
+		undefined ->
+		    do_reply(State, From, StreamTo, ReqId,
+			     {ok, SC, Headers,
+			      lists:flatten(lists:reverse(Buf))});
+		_ ->
+		    file:close(Fd),
+		    do_reply(State, From, StreamTo, ReqId,
+			     {ok, SC, Headers, {file, TmpFilename}})
+	    end,
+	    do_error_reply(State#state{reqs = Reqs_1}, connection_closed),
+	    State;
+	_ ->
+	    do_error_reply(State, connection_closed),
+	    State
+    end.
+
+do_connect(Host, Port, _Options, #state{is_ssl=true, ssl_options=SSLOptions}, Timeout) ->
+    ssl:connect(Host, Port, [{nodelay, true}, {active, false} | SSLOptions], Timeout);
+do_connect(Host, Port, _Options, _State, Timeout) ->
+    gen_tcp:connect(Host, Port, [{nodelay, true}, {active, false}], Timeout).
+
+do_send(Sock, Req, true)  ->  ssl:send(Sock, Req);
+do_send(Sock, Req, false) ->  gen_tcp:send(Sock, Req).
+
+%% @spec do_send_body(Sock::socket_descriptor(), Source::source_descriptor(), IsSSL::boolean()) -> ok | error()
+%% source_descriptor() = fun_arity_0           |
+%%                       {fun_arity_0}         |
+%%                       {fun_arity_1, term()}
+%% error() = term()
+do_send_body(Sock, Source, IsSSL) when is_function(Source) ->
+    do_send_body(Sock, {Source}, IsSSL);
+do_send_body(Sock, {Source}, IsSSL) when is_function(Source) ->
+    do_send_body1(Sock, Source, IsSSL, Source());
+do_send_body(Sock, {Source, State}, IsSSL) when is_function(Source) ->
+    do_send_body1(Sock, Source, IsSSL, Source(State));
+do_send_body(Sock, Body, IsSSL) ->
+    do_send(Sock, Body, IsSSL).
+
+do_send_body1(Sock, Source, IsSSL, Resp) ->
+    case Resp of
+	{ok, Data} ->
+	    do_send(Sock, Data, IsSSL),
+	    do_send_body(Sock, {Source}, IsSSL);
+	{ok, Data, NewState} ->
+	    do_send(Sock, Data, IsSSL),
+	    do_send_body(Sock, {Source, NewState}, IsSSL);
+	eof -> ok;
+	Err -> Err
+    end.
+
+do_close(Sock, true)  ->  ssl:close(Sock);
+do_close(Sock, false) ->  gen_tcp:close(Sock).
+
+do_setopts(Sock, Opts, true)  ->  ssl:setopts(Sock, Opts);
+do_setopts(Sock, Opts, false) ->  inet:setopts(Sock, Opts).
+
+check_ssl_options(Options, State) ->
+    case get_value(is_ssl, Options, false) of
+	false ->
+	    State;
+	true ->
+	    State#state{is_ssl=true, ssl_options=get_value(ssl_options, Options)}
+    end.
+
+send_req_1(#url{abspath = AbsPath,
+		host = Host,
+		port = Port, 
+		path = RelPath} = Url,
+	   Headers, Method, Body, Options, Sock, State) ->
+    Headers_1 = add_auth_headers(Url, Options, Headers, State),
+    HostHeaderValue = case lists:keysearch(host_header, 1, Options) of
+			  false ->
+			      case Port of
+				  80 -> Host;
+				  _ -> [Host, ":", integer_to_list(Port)]
+			      end;
+			  {value, {_, Host_h_val}} ->
+			      Host_h_val
+		      end,
+    {Req, Body_1} = make_request(Method, 
+				 [{"Host", HostHeaderValue} | Headers_1],
+				 AbsPath, RelPath, Body, Options, State#state.use_proxy),
+    case get(my_trace_flag) of 
+	true ->
+	    %%Avoid the binary operations if trace is not on...
+	    NReq = binary_to_list(list_to_binary(Req)),
+	    do_trace("Sending request: ~n"
+		     "--- Request Begin ---~n~s~n"
+		     "--- Request End ---~n", [NReq]);
+	_ -> ok
+    end,
+    SndRes = case do_send(Sock, Req, State#state.is_ssl) of
+		 ok -> do_send_body(Sock, Body_1, State#state.is_ssl);
+		 Err -> 
+		     io:format("Err: ~p~n", [Err]),
+		     Err
+	     end,
+    do_setopts(Sock, [{active, true}], State#state.is_ssl),
+    SndRes.
+
+add_auth_headers(#url{username = User,
+		      password = UPw}, 
+		 Options,
+		 Headers, 
+		 #state{use_proxy = UseProxy,
+		        proxy_auth_digest = ProxyAuthDigest}) ->
+    Headers_1 = case User of
+		    undefined ->
+			case get_value(basic_auth, Options, undefined) of
+			    undefined ->
+				Headers;
+			    {U,P} ->
+				[{"Authorization", ["Basic ", http_auth_digest(U, P)]} | Headers]
+			end;
+		    _ ->
+			[{"Authorization", ["Basic ", http_auth_digest(User, UPw)]} | Headers]
+		end,
+    case UseProxy of
+	false ->
+	    Headers_1;
+	true when ProxyAuthDigest == [] ->
+	    Headers_1;
+	true ->
+	    [{"Proxy-Authorization", ["Basic ", ProxyAuthDigest]} | Headers_1]
+    end.
+			
+http_auth_digest([], []) ->
+    [];
+http_auth_digest(Username, Password) ->
+    encode_base64(Username ++ [$: | Password]).
+
+encode_base64([]) ->
+    [];
+encode_base64([A]) ->
+    [e(A bsr 2), e((A band 3) bsl 4), $=, $=];
+encode_base64([A,B]) ->
+    [e(A bsr 2), e(((A band 3) bsl 4) bor (B bsr 4)), e((B band 15) bsl 2), $=];
+encode_base64([A,B,C|Ls]) ->
+    encode_base64_do(A,B,C, Ls).
+encode_base64_do(A,B,C, Rest) ->
+    BB = (A bsl 16) bor (B bsl 8) bor C,
+    [e(BB bsr 18), e((BB bsr 12) band 63), 
+     e((BB bsr 6) band 63), e(BB band 63)|encode_base64(Rest)].
+
+e(X) when X >= 0, X < 26 -> X+65;
+e(X) when X>25, X<52     -> X+71;
+e(X) when X>51, X<62     -> X-4;
+e(62)                    -> $+;
+e(63)                    -> $/;
+e(X)                     -> exit({bad_encode_base64_token, X}).
+
+make_request(Method, Headers, AbsPath, RelPath, Body, Options, UseProxy) ->
+    HttpVsn = http_vsn_string(get_value(http_vsn, Options, {1,1})),
+    Headers_1 =
+	case get_value(content_length, Headers, false) of
+	    false when (Body == []) or
+	               (Body == <<>>) or
+	               is_tuple(Body) or
+	               is_function(Body) ->
+		Headers;
+	    false when is_binary(Body) ->
+		[{"content-length", integer_to_list(size(Body))} | Headers];
+	    false ->
+		[{"content-length", integer_to_list(length(Body))} | Headers];
+	    _ ->
+		Headers
+	end,
+    {Headers_2, Body_1} = 
+	case get_value(transfer_encoding, Options, false) of
+	    false ->
+		{Headers_1, Body};
+	    {chunked, ChunkSize} ->
+		{[{X, Y} || {X, Y} <- Headers_1, 
+			    X /= "Content-Length",
+			    X /= "content-length",
+			    X /= content_length] ++
+		 [{"Transfer-Encoding", "chunked"}],
+		 chunk_request_body(Body, ChunkSize)}
+	end,
+    Headers_3 = cons_headers(Headers_2),
+    Uri = case get_value(use_absolute_uri, Options, false) or UseProxy of
+	      true ->
+		  AbsPath;
+	      false -> 
+		  RelPath
+	  end,
+    {[method(Method), " ", Uri, " ", HttpVsn, crnl(), Headers_3, crnl()], Body_1}.
+
+http_vsn_string({0,9}) -> "HTTP/0.9";
+http_vsn_string({1,0}) -> "HTTP/1.0";
+http_vsn_string({1,1}) -> "HTTP/1.1".
+
+cons_headers(Headers) ->
+    cons_headers(Headers, []).
+cons_headers([], Acc) ->
+    encode_headers(Acc);
+cons_headers([{basic_auth, {U,P}} | T], Acc) ->
+    cons_headers(T, [{"Authorization",
+		      ["Basic ", ibrowse_lib:encode_base64(U++":"++P)]} | Acc]);
+cons_headers([{cookie, Cookie} | T], Acc) ->
+    cons_headers(T, [{"Cookie", Cookie} | Acc]);
+cons_headers([{content_length, L} | T], Acc) ->
+    cons_headers(T, [{"Content-Length", L} | Acc]);
+cons_headers([{content_type, L} | T], Acc) ->
+    cons_headers(T, [{"Content-Type", L} | Acc]);
+cons_headers([H | T], Acc) ->
+    cons_headers(T, [H | Acc]).
+
+encode_headers(L) ->
+    encode_headers(L, []).
+encode_headers([{http_vsn, _Val} | T], Acc) ->
+    encode_headers(T, Acc);
+encode_headers([{Name,Val} | T], Acc) when list(Name) ->
+    encode_headers(T, [[Name, ": ", fmt_val(Val), crnl()] | Acc]);
+encode_headers([{Name,Val} | T], Acc) when atom(Name) ->
+    encode_headers(T, [[atom_to_list(Name), ": ", fmt_val(Val), crnl()] | Acc]);
+encode_headers([], Acc) ->
+    lists:reverse(Acc).
+
+chunk_request_body(Body, ChunkSize) ->
+    chunk_request_body(Body, ChunkSize, []).
+
+chunk_request_body(Body, _ChunkSize, Acc) when Body == <<>>; Body == [] ->
+    LastChunk = "0\r\n",
+    lists:reverse(["\r\n", LastChunk | Acc]);
+chunk_request_body(Body, ChunkSize, Acc) when binary(Body),
+                                              size(Body) >= ChunkSize ->
+    <<ChunkBody:ChunkSize/binary, Rest/binary>> = Body,
+    Chunk = [ibrowse_lib:dec2hex(4, ChunkSize),"\r\n",
+	     ChunkBody, "\r\n"],
+    chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
+chunk_request_body(Body, _ChunkSize, Acc) when binary(Body) ->
+    BodySize = size(Body),
+    Chunk = [ibrowse_lib:dec2hex(4, BodySize),"\r\n",
+	     Body, "\r\n"],
+    LastChunk = "0\r\n",
+    lists:reverse(["\r\n", LastChunk, Chunk | Acc]);
+chunk_request_body(Body, ChunkSize, Acc) when list(Body),
+                                              length(Body) >= ChunkSize ->
+    {ChunkBody, Rest} = split_list_at(Body, ChunkSize),
+    Chunk = [ibrowse_lib:dec2hex(4, ChunkSize),"\r\n",
+	     ChunkBody, "\r\n"],
+    chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
+chunk_request_body(Body, _ChunkSize, Acc) when list(Body) ->
+    BodySize = length(Body),
+    Chunk = [ibrowse_lib:dec2hex(4, BodySize),"\r\n",
+	     Body, "\r\n"],
+    LastChunk = "0\r\n",
+    lists:reverse(["\r\n", LastChunk, Chunk | Acc]).
+
+
+parse_response(_Data, #state{cur_req = undefined}=State) ->
+    State#state{status = idle};
+parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
+			    cur_req=CurReq}=State) ->
+    #request{from=From, stream_to=StreamTo, req_id=ReqId,
+	     method=Method} = CurReq,
+    MaxHeaderSize = ibrowse:get_config_value(max_headers_size, infinity),
+    case scan_header(Data, Acc) of
+	{yes, Headers, Data_1}  ->
+	    do_trace("Recvd Header Data -> ~s~n----~n", [Headers]),
+	    do_trace("Recvd headers~n--- Headers Begin ---~n~s~n--- Headers End ---~n~n", [Headers]),
+	    {HttpVsn, StatCode, Headers_1} = parse_headers(Headers),
+	    do_trace("HttpVsn: ~p StatusCode: ~p Headers_1 -> ~1000.p~n", [HttpVsn, StatCode, Headers_1]),
+	    LCHeaders = [{to_lower(X), Y} || {X,Y} <- Headers_1],
+	    ConnClose = to_lower(get_value("connection", LCHeaders, "false")),
+	    IsClosing = is_connection_closing(HttpVsn, ConnClose),
+	    case IsClosing of
+		true ->
+                    shutting_down(State);
+		false ->
+		    ok
+	    end,
+	    State_1 = State#state{recvd_headers=Headers_1, status=get_body, 
+				  http_status_code=StatCode, is_closing=IsClosing},
+	    put(conn_close, ConnClose),
+	    TransferEncoding = to_lower(get_value("transfer-encoding", LCHeaders, "false")),
+	    case get_value("content-length", LCHeaders, undefined) of
+		_ when Method == head ->
+		    {_, Reqs_1} = queue:out(Reqs),
+		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
+		    State_1_1 = do_reply(State_1, From, StreamTo, ReqId, {ok, StatCode, Headers_1, []}),
+		    cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
+		    State_2 = reset_state(State_1_1),
+		    State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
+		    parse_response(Data_1, State_3);
+		_ when hd(StatCode) == $1 ->
+		    %% No message body is expected. Server may send
+		    %% one or more 1XX responses before a proper
+		    %% response.
+		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
+		    do_trace("Recvd a status code of ~p. Ignoring and waiting for a proper response~n", [StatCode]),
+		    parse_response(Data_1, State_1#state{recvd_headers = [],
+							 status = get_header});
+		_ when StatCode == "204";
+		       StatCode == "304" ->
+		    %% No message body is expected for these Status Codes.
+		    %% RFC2616 - Sec 4.4
+		    {_, Reqs_1} = queue:out(Reqs),
+		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
+		    State_1_1 = do_reply(State_1, From, StreamTo, ReqId, {ok, StatCode, Headers_1, []}),
+		    cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
+		    State_2 = reset_state(State_1_1),
+		    State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
+		    parse_response(Data_1, State_3);
+		_ when TransferEncoding == "chunked" ->
+		    do_trace("Chunked encoding detected...~n",[]),
+		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
+		    case parse_11_response(Data_1, State_1#state{transfer_encoding=chunked,
+								 chunk_size=chunk_start,
+								 reply_buffer=[], chunks=[]}) of
+			{error, Reason} ->
+			    fail_pipelined_requests(State_1, 
+						    {error, {Reason,
+							     {stat_code, StatCode}, Headers_1}}),
+			    {error, Reason};
+			State_2 ->
+			    State_2
+		    end;
+		undefined when HttpVsn == "HTTP/1.0";
+			       ConnClose == "close" ->
+		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
+		    State_1#state{reply_buffer=[Data_1]};
+		undefined ->
+		    fail_pipelined_requests(State_1, 
+					    {error, {content_length_undefined,
+						     {stat_code, StatCode}, Headers}}),
+		    {error, content_length_undefined};
+		V ->
+		    case catch list_to_integer(V) of
+			V_1 when integer(V_1), V_1 >= 0 ->
+			    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
+			    do_trace("Recvd Content-Length of ~p~n", [V_1]),
+			    State_2 = State_1#state{rep_buf_size=0,
+						    reply_buffer=[],
+						    content_length=V_1},
+			    case parse_11_response(Data_1, State_2) of
+				{error, Reason} ->
+				    fail_pipelined_requests(State_1, 
+							    {error, {Reason,
+								     {stat_code, StatCode}, Headers_1}}),
+				    {error, Reason};
+				State_3 ->
+				    State_3
+			    end;
+			_ ->
+			    fail_pipelined_requests(State_1, 
+					    {error, {content_length_undefined,
+						     {stat_code, StatCode}, Headers}}),
+			    {error, content_length_undefined}
+		    end
+	    end;
+	{no, Acc_1} when MaxHeaderSize == infinity ->
+	    State#state{reply_buffer=Acc_1};
+	{no, Acc_1} when length(Acc_1) < MaxHeaderSize ->
+	    State#state{reply_buffer=Acc_1};
+	{no, _Acc_1} ->
+	    fail_pipelined_requests(State, {error, max_headers_size_exceeded}),
+	    {error, max_headers_size_exceeded}
+    end.
+
+is_connection_closing("HTTP/0.9", _)       -> true;
+is_connection_closing(_, "close")          -> true;
+is_connection_closing("HTTP/1.0", "false") -> true;
+is_connection_closing(_, _)                -> false.
+
+%% This clause determines the chunk size when given data from the beginning of the chunk
+parse_11_response(DataRecvd, 
+		  #state{transfer_encoding=chunked,
+			 chunk_size=chunk_start,
+			 cur_req=CurReq,
+			 reply_buffer=Buf}=State) ->
+    case scan_crlf(DataRecvd, Buf) of
+	{yes, ChunkHeader, Data_1} ->
+	    case parse_chunk_header(ChunkHeader) of
+		{error, Reason} ->
+		    {error, Reason};
+		ChunkSize ->
+		    #request{stream_to=StreamTo, req_id=ReqId} = CurReq,
+		    %%
+		    %% Do we have to preserve the chunk encoding when streaming?
+		    %%
+		    do_interim_reply(StreamTo, ReqId, {chunk_start, ChunkSize}),
+		    RemLen = length(Data_1),
+		    do_trace("Determined chunk size: ~p. Already recvd: ~p~n", [ChunkSize, RemLen]),
+		    parse_11_response(Data_1, State#state{rep_buf_size=0, 
+							  reply_buffer=[],
+							  deleted_crlf=true,
+							  chunk_size=ChunkSize})
+	    end;
+	{no, Data_1} ->
+	    State#state{reply_buffer=Data_1, rep_buf_size=length(Data_1)}
+    end;
+
+%% This clause is there to remove the CRLF between two chunks
+%% 
+parse_11_response(DataRecvd, 
+		  #state{transfer_encoding=chunked,
+			 chunk_size=tbd,
+			 chunks = Chunks,
+			 cur_req=CurReq,
+			 reply_buffer=Buf}=State) ->
+    case scan_crlf(DataRecvd, Buf) of
+	{yes, _, NextChunk} ->
+	    #request{stream_to=StreamTo, req_id=ReqId} = CurReq,
+	    %%
+	    %% Do we have to preserve the chunk encoding when streaming?
+	    %%
+	    State_1 = State#state{chunk_size=chunk_start,
+				  rep_buf_size=0, 
+				  reply_buffer=[],
+				  deleted_crlf=true},
+	    State_2 = case StreamTo of
+			  undefined ->
+			      State_1#state{chunks = [Buf | Chunks]};
+		_ ->
+			      do_interim_reply(StreamTo, ReqId, chunk_end),
+			      State_1
+		      end,
+	    parse_11_response(NextChunk, State_2);
+	{no, Data_1} ->
+	    State#state{reply_buffer=Data_1, rep_buf_size=length(Data_1)}
+    end;
+
+%% This clause deals with the end of a chunked transfer
+parse_11_response(DataRecvd, 
+		  #state{transfer_encoding=chunked, chunk_size=0,
+			 cur_req=CurReq,
+			 deleted_crlf = DelCrlf,
+			 reply_buffer=Trailer, reqs=Reqs}=State) ->
+    do_trace("Detected end of chunked transfer...~n", []),
+    DataRecvd_1 = case DelCrlf of
+		      false -> 
+			  DataRecvd;
+		      true ->
+			  [$\r, $\n | DataRecvd]
+		  end,
+    #request{stream_to=StreamTo, req_id=ReqId} = CurReq,
+    case scan_header(DataRecvd_1, Trailer) of
+	{yes, _TEHeaders, Rem} ->
+	    {_, Reqs_1} = queue:out(Reqs),
+	    %%
+	    %% Do we have to preserve the chunk encoding when streaming?
+	    %%
+	    do_interim_reply(StreamTo, ReqId, chunk_end),
+	    State_1 = handle_response(CurReq, State#state{reqs=Reqs_1}),
+	    parse_response(Rem, reset_state(State_1));
+	{no, Rem} ->
+	    State#state{reply_buffer=Rem, rep_buf_size=length(Rem), deleted_crlf=false}
+    end;
+
+%% This clause extracts a chunk, given the size.
+parse_11_response(DataRecvd, 
+		  #state{transfer_encoding=chunked, chunk_size=CSz,
+			 rep_buf_size=RepBufSz}=State) ->
+    NeedBytes = CSz - RepBufSz,
+    DataLen = length(DataRecvd),
+    do_trace("Recvd more data: size: ~p. NeedBytes: ~p~n", [DataLen, NeedBytes]),
+    case DataLen >= NeedBytes of
+	true ->
+	    {RemChunk, RemData} = split_list_at(DataRecvd, NeedBytes),
+	    do_trace("Recvd another chunk...~n", []),
+	    do_trace("RemData -> ~p~n", [RemData]),
+	    case accumulate_response(RemChunk, State) of
+		{error, Reason} ->
+		    do_trace("Error accumulating response --> ~p~n", [Reason]),
+		    {error, Reason};
+		#state{reply_buffer = NewRepBuf,
+		       chunks = NewChunks} = State_1 ->
+		    State_2 = State_1#state{reply_buffer=[],
+					    chunks = [lists:reverse(NewRepBuf) | NewChunks],
+					    rep_buf_size=0,
+					    chunk_size=tbd},
+		    parse_11_response(RemData, State_2)
+	    end;
+	false ->
+	    accumulate_response(DataRecvd, State#state{rep_buf_size=RepBufSz + DataLen})
+    end;
+
+%% This clause to extract the body when Content-Length is specified
+parse_11_response(DataRecvd, 
+		  #state{content_length=CL, rep_buf_size=RepBufSz, 
+			 reqs=Reqs}=State) ->
+    NeedBytes = CL - RepBufSz,
+    DataLen = length(DataRecvd),
+    case DataLen >= NeedBytes of
+	true ->
+	    {RemBody, Rem} = split_list_at(DataRecvd, NeedBytes),
+	    {_, Reqs_1} = queue:out(Reqs),
+	    State_1 = accumulate_response(RemBody, State),
+	    State_2 = handle_response(State_1#state.cur_req, State_1#state{reqs=Reqs_1}),
+	    State_3 = reset_state(State_2),
+	    parse_response(Rem, State_3);
+	false ->
+	    accumulate_response(DataRecvd, State#state{rep_buf_size=RepBufSz+DataLen})
+    end.
+
+handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
+			 save_response_to_file = SaveResponseToFile, 
+			 tmp_file_name = TmpFilename,
+			 tmp_file_fd = Fd
+			},
+		#state{http_status_code = SCode,
+		       send_timer = ReqTimer,
+		       reply_buffer = RepBuf,
+		       transfer_encoding = TEnc,
+		       chunks = Chunks,
+		       recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
+    Body = case TEnc of
+	       chunked ->
+		   lists:flatten(lists:reverse(Chunks));
+	       _ ->
+		   lists:flatten(lists:reverse(RepBuf))
+	   end,
+    State_1 = set_cur_request(State),
+    file:close(Fd),
+    ResponseBody = case TmpFilename of
+		       undefined ->
+			   Body;
+		       _ ->
+			   {file, TmpFilename}
+		   end,
+    State_2 = do_reply(State_1, From, StreamTo, ReqId, {ok, SCode, RespHeaders, ResponseBody}),
+    cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
+    State_2;
+handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId},
+		#state{http_status_code=SCode, recvd_headers=RespHeaders,
+		       reply_buffer=RepBuf, transfer_encoding=TEnc,
+		       chunks=Chunks, send_timer=ReqTimer}=State) ->
+    Body = case TEnc of
+	       chunked ->
+		   lists:flatten(lists:reverse(Chunks));
+	       _ ->
+		   lists:flatten(lists:reverse(RepBuf))
+	   end,
+    State_1 = set_cur_request(State),
+    case get(conn_close) of
+	"close" ->
+	    do_reply(State_1, From, StreamTo, ReqId, {ok, SCode, RespHeaders, Body}),
+	    exit(normal);
+	_ ->
+	    State_2 = do_reply(State_1, From, StreamTo, ReqId, {ok, SCode, RespHeaders, Body}),
+	    cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
+	    State_2
+    end.
+
+reset_state(State) ->
+    State#state{status=get_header, rep_buf_size=0,content_length=undefined,
+		reply_buffer=[], chunks=[], recvd_headers=[], deleted_crlf=false,
+		http_status_code=undefined, chunk_size=undefined, transfer_encoding=undefined}.
+
+set_cur_request(#state{reqs = Reqs} = State) ->
+    case queue:to_list(Reqs) of
+	[] ->
+	    State#state{cur_req = undefined};
+	[NextReq | _] ->
+	    State#state{cur_req = NextReq}
+    end.
+
+parse_headers(Headers) ->
+    case scan_crlf(Headers, []) of
+	{yes, StatusLine, T} ->
+	    Headers_1 = parse_headers_1(T),
+	    case parse_status_line(StatusLine) of
+		{ok, HttpVsn, StatCode, _Msg} ->
+		    put(http_prot_vsn, HttpVsn),
+		    {HttpVsn, StatCode, Headers_1};
+		_ -> %% A HTTP 0.9 response?
+		    put(http_prot_vsn, "HTTP/0.9"),
+		    {"HTTP/0.9", undefined, Headers}
+	    end;
+	_ ->
+	    {error, no_status_line}
+    end.
+
+% From RFC 2616
+%
+%    HTTP/1.1 header field values can be folded onto multiple lines if
+%    the continuation line begins with a space or horizontal tab. All
+%    linear white space, including folding, has the same semantics as
+%    SP. A recipient MAY replace any linear white space with a single
+%    SP before interpreting the field value or forwarding the message
+%    downstream.
+parse_headers_1(String) ->
+    parse_headers_1(String, [], []).
+
+parse_headers_1([$\n, H |T], [$\r | L], Acc) when H == 32;
+						  H == $\t -> 
+    parse_headers_1(lists:dropwhile(fun(X) ->
+					    is_whitespace(X)
+				    end, T), [32 | L], Acc);
+parse_headers_1([$\n|T], [$\r | L], Acc) -> 
+    case parse_header(lists:reverse(L)) of
+	invalid ->
+	    parse_headers_1(T, [], Acc);
+	NewHeader ->
+	    parse_headers_1(T, [], [NewHeader | Acc])
+    end;
+parse_headers_1([H|T],  L, Acc) -> 
+    parse_headers_1(T, [H|L], Acc);
+parse_headers_1([], [], Acc) ->
+    lists:reverse(Acc);
+parse_headers_1([], L, Acc) ->
+    Acc_1 = case parse_header(lists:reverse(L)) of
+		invalid ->
+		    Acc;
+		NewHeader ->
+		    [NewHeader | Acc]
+	    end,
+    lists:reverse(Acc_1).
+
+parse_status_line(Line) ->
+    parse_status_line(Line, get_prot_vsn, [], []).
+parse_status_line([32 | T], get_prot_vsn, ProtVsn, StatCode) ->
+    parse_status_line(T, get_status_code, ProtVsn, StatCode);
+parse_status_line([32 | T], get_status_code, ProtVsn, StatCode) ->
+    {ok, lists:reverse(ProtVsn), lists:reverse(StatCode), T};
+parse_status_line([H | T], get_prot_vsn, ProtVsn, StatCode) ->
+    parse_status_line(T, get_prot_vsn, [H|ProtVsn], StatCode);
+parse_status_line([H | T], get_status_code, ProtVsn, StatCode) ->
+    parse_status_line(T, get_status_code, ProtVsn, [H | StatCode]);
+parse_status_line([], _, _, _) ->
+    http_09.
+
+parse_header(L) ->
+    parse_header(L, []).
+parse_header([$: | V], Acc) ->
+    {lists:reverse(Acc), string:strip(V)};
+parse_header([H | T], Acc) ->
+    parse_header(T, [H | Acc]);
+parse_header([], _) ->
+    invalid.
+
+scan_header([$\n|T], [$\r,$\n,$\r|L]) -> {yes, lists:reverse([$\n,$\r| L]), T};
+scan_header([H|T],  L)                -> scan_header(T, [H|L]);
+scan_header([], L)                    -> {no, L}.
+
+scan_crlf([$\n|T], [$\r | L]) -> {yes, lists:reverse(L), T};
+scan_crlf([H|T],  L)          -> scan_crlf(T, [H|L]);
+scan_crlf([], L)              -> {no, L}.
+
+fmt_val(L) when list(L)    -> L;
+fmt_val(I) when integer(I) -> integer_to_list(I);
+fmt_val(A) when atom(A)    -> atom_to_list(A);
+fmt_val(Term)              -> io_lib:format("~p", [Term]).
+
+crnl() -> "\r\n".
+
+method(get)       -> "GET";
+method(post)      -> "POST";
+method(head)      -> "HEAD";
+method(options)   -> "OPTIONS";
+method(put)       -> "PUT";
+method(delete)    -> "DELETE";
+method(trace)     -> "TRACE";
+method(mkcol)     -> "MKCOL";
+method(propfind)  -> "PROPFIND";
+method(proppatch) -> "PROPPATCH";
+method(lock)      -> "LOCK";
+method(unlock)    -> "UNLOCK";
+method(move)      -> "MOVE";
+method(copy)      -> "COPY".
+
+%% From RFC 2616
+%%
+% The chunked encoding modifies the body of a message in order to
+% transfer it as a series of chunks, each with its own size indicator,
+% followed by an OPTIONAL trailer containing entity-header
+% fields. This allows dynamically produced content to be transferred
+% along with the information necessary for the recipient to verify
+% that it has received the full message.
+% 	Chunked-Body = 	*chunk
+% 			last-chunk
+% 			trailer
+% 			CRLF
+% 	chunk = chunk-size [ chunk-extension ] CRLF
+% 		chunk-data CRLF
+% 	chunk-size = 1*HEX
+% 	last-chunk = 1*("0") [ chunk-extension ] CRLF
+% 	chunk-extension= *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
+% 	chunk-ext-name = token
+% 	chunk-ext-val = token | quoted-string
+% 	chunk-data = chunk-size(OCTET)
+% 	trailer = *(entity-header CRLF)
+% The chunk-size field is a string of hex digits indicating the size
+% of the chunk. The chunked encoding is ended by any chunk whose size
+% is zero, followed by the trailer, which is terminated by an empty
+% line.
+%%
+%% The parsing implemented here discards all chunk extensions. It also
+%% strips trailing spaces from the chunk size fields as Apache 1.3.27 was
+%% sending them.
+parse_chunk_header([]) ->
+    throw({error, invalid_chunk_size});
+parse_chunk_header(ChunkHeader) ->
+    parse_chunk_header(ChunkHeader, []).
+
+parse_chunk_header([$; | _], Acc) ->
+    hexlist_to_integer(lists:reverse(Acc));
+parse_chunk_header([H | T], Acc) ->
+    case is_whitespace(H) of
+	true ->
+	    parse_chunk_header(T, Acc);
+	false ->
+	    parse_chunk_header(T, [H | Acc])
+    end;
+parse_chunk_header([], Acc) ->
+    hexlist_to_integer(lists:reverse(Acc)).
+
+is_whitespace(32)  -> true;
+is_whitespace($\r) -> true;
+is_whitespace($\n) -> true;
+is_whitespace($\t) -> true;
+is_whitespace(_)   -> false.
+
+
+send_async_headers(_ReqId, undefined, _StatCode, _Headers) ->
+    ok;
+send_async_headers(ReqId, StreamTo, StatCode, Headers) ->
+    catch StreamTo ! {ibrowse_async_headers, ReqId, StatCode, Headers}.
+
+do_reply(State, From, undefined, _, Msg) ->
+    gen_server:reply(From, Msg),
+    dec_pipeline_counter(State);
+do_reply(State, _From, StreamTo, ReqId, {ok, _, _, _}) ->
+    State_1 = dec_pipeline_counter(State),
+    catch StreamTo ! {ibrowse_async_response_end, ReqId},
+    State_1;
+do_reply(State, _From, StreamTo, ReqId, Msg) ->
+    State_1 = dec_pipeline_counter(State),
+    catch StreamTo ! {ibrowse_async_response, ReqId, Msg},
+    State_1.
+
+do_interim_reply(undefined, _ReqId, _Msg) ->
+    ok;
+do_interim_reply(StreamTo, ReqId, Msg) ->
+    catch StreamTo ! {ibrowse_async_response, ReqId, Msg}.
+
+do_error_reply(#state{reqs = Reqs} = State, Err) ->
+    ReqList = queue:to_list(Reqs),
+    lists:foreach(fun(#request{from=From, stream_to=StreamTo, req_id=ReqId}) ->
+                          do_reply(State, From, StreamTo, ReqId, {error, Err})
+		  end, ReqList).
+
+fail_pipelined_requests(#state{reqs = Reqs, cur_req = CurReq} = State, Reply) ->
+    {_, Reqs_1} = queue:out(Reqs),
+    #request{from=From, stream_to=StreamTo, req_id=ReqId} = CurReq,
+    do_reply(State, From, StreamTo, ReqId, Reply),
+    do_error_reply(State#state{reqs = Reqs_1}, previous_request_failed).
+
+
+split_list_at(List, N) ->
+    split_list_at(List, N, []).
+split_list_at([], _, Acc) ->
+    {lists:reverse(Acc), []};
+split_list_at(List2, 0, List1) ->
+    {lists:reverse(List1), List2};
+split_list_at([H | List2], N, List1) ->
+    split_list_at(List2, N-1, [H | List1]).
+
+hexlist_to_integer(List) ->
+    hexlist_to_integer(lists:reverse(List), 1, 0).
+hexlist_to_integer([H | T], Multiplier, Acc) ->
+    hexlist_to_integer(T, Multiplier*16, Multiplier*to_ascii(H) + Acc);
+hexlist_to_integer([], _, Acc) ->
+    Acc.
+
+to_ascii($A) -> 10;
+to_ascii($a) -> 10;
+to_ascii($B) -> 11;
+to_ascii($b) -> 11;
+to_ascii($C) -> 12;
+to_ascii($c) -> 12;
+to_ascii($D) -> 13;
+to_ascii($d) -> 13;
+to_ascii($E) -> 14;
+to_ascii($e) -> 14;
+to_ascii($F) -> 15;
+to_ascii($f) -> 15;
+to_ascii($1) -> 1;
+to_ascii($2) -> 2;
+to_ascii($3) -> 3;
+to_ascii($4) -> 4;
+to_ascii($5) -> 5;
+to_ascii($6) -> 6;
+to_ascii($7) -> 7;
+to_ascii($8) -> 8;
+to_ascii($9) -> 9;
+to_ascii($0) -> 0.
+
+cancel_timer(undefined) -> ok;
+cancel_timer(Ref)       -> erlang:cancel_timer(Ref).
+
+cancel_timer(Ref, {eat_message, Msg}) ->
+    cancel_timer(Ref),
+    receive 
+	Msg ->
+	    ok
+    after 0 ->
+	    ok
+    end.
+
+make_req_id() ->
+    now().
+
+to_lower(Str) ->
+    to_lower(Str, []).
+to_lower([H|T], Acc) when H >= $A, H =< $Z ->
+    to_lower(T, [H+32|Acc]);
+to_lower([H|T], Acc) ->
+    to_lower(T, [H|Acc]);
+to_lower([], Acc) ->
+    lists:reverse(Acc).
+
+shutting_down(#state{lb_ets_tid = undefined}) ->
+    ok;
+shutting_down(#state{lb_ets_tid = Tid,
+		     cur_pipeline_size = Sz}) ->
+    catch ets:delete(Tid, {Sz, self()}).
+
+inc_pipeline_counter(#state{is_closing = true} = State) ->
+    State;
+inc_pipeline_counter(#state{cur_pipeline_size = Pipe_sz} = State) ->
+    State#state{cur_pipeline_size = Pipe_sz + 1}.
+
+dec_pipeline_counter(#state{is_closing = true} = State) ->
+    State;
+dec_pipeline_counter(#state{lb_ets_tid = undefined} = State) ->
+    State;
+dec_pipeline_counter(#state{cur_pipeline_size = Pipe_sz,
+			    lb_ets_tid = Tid} = State) ->
+    ets:delete(Tid, {Pipe_sz, self()}),
+    ets:insert(Tid, {{Pipe_sz - 1, self()}, []}),
+    State#state{cur_pipeline_size = Pipe_sz - 1}.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/4d1d8294/ibrowse_lb.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lb.erl b/ibrowse_lb.erl
new file mode 100644
index 0000000..03dc4e0
--- /dev/null
+++ b/ibrowse_lb.erl
@@ -0,0 +1,195 @@
+%%%-------------------------------------------------------------------
+%%% File    : ibrowse_lb.erl
+%%% Author  : chandru <ch...@t-mobile.co.uk>
+%%% Description : 
+%%%
+%%% Created :  6 Mar 2008 by chandru <ch...@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+-module(ibrowse_lb).
+
+-vsn('$Id: ibrowse_lb.erl,v 1.1 2008/03/27 01:36:21 chandrusf Exp $ ').
+-author(chandru).
+-behaviour(gen_server).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+-export([
+	 start_link/1,
+	 spawn_connection/5
+	]).
+
+%% gen_server callbacks
+-export([
+	 init/1,
+	 handle_call/3,
+	 handle_cast/2,
+	 handle_info/2,
+	 terminate/2,
+	 code_change/3
+	]).
+
+-record(state, {parent_pid,
+		ets_tid,
+		host,
+		port,
+		max_sessions,
+		max_pipeline_size,
+		num_cur_sessions = 0}).
+
+-import(ibrowse_lib, [
+		      parse_url/1,
+		      printable_date/0,
+		      get_value/3
+		     ]).
+		      
+
+-include("ibrowse.hrl").
+
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start_link/0
+%% Description: Starts the server
+%%--------------------------------------------------------------------
+start_link(Args) ->
+    gen_server:start_link(?MODULE, Args, []).
+
+%%====================================================================
+%% Server functions
+%%====================================================================
+
+%%--------------------------------------------------------------------
+%% Function: init/1
+%% Description: Initiates the server
+%% Returns: {ok, State}          |
+%%          {ok, State, Timeout} |
+%%          ignore               |
+%%          {stop, Reason}
+%%--------------------------------------------------------------------
+init([Host, Port]) ->
+    process_flag(trap_exit, true),
+    Max_sessions = ibrowse:get_config_value({max_sessions, Host, Port}, 10),
+    Max_pipe_sz = ibrowse:get_config_value({max_pipeline_size, Host, Port}, 10),
+    put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
+    put(ibrowse_trace_token, ["LB: ", Host, $:, integer_to_list(Port)]),
+    Tid = ets:new(ibrowse_lb, [public, ordered_set]),
+    {ok, #state{parent_pid = whereis(ibrowse),
+		host = Host,
+		port = Port,
+		ets_tid = Tid,
+		max_pipeline_size = Max_pipe_sz,
+	        max_sessions = Max_sessions}}.
+
+spawn_connection(Lb_pid, Url,
+		 Max_sessions,
+		 Max_pipeline_size,
+		 SSL_options)
+  when is_pid(Lb_pid),
+       is_record(Url, url),
+       is_integer(Max_pipeline_size),
+       is_integer(Max_sessions) ->
+    gen_server:call(Lb_pid,
+		    {spawn_connection, Url, Max_sessions, Max_pipeline_size, SSL_options}).
+%%--------------------------------------------------------------------
+%% Function: handle_call/3
+%% Description: Handling call messages
+%% Returns: {reply, Reply, State}          |
+%%          {reply, Reply, State, Timeout} |
+%%          {noreply, State}               |
+%%          {noreply, State, Timeout}      |
+%%          {stop, Reason, Reply, State}   | (terminate/2 is called)
+%%          {stop, Reason, State}            (terminate/2 is called)
+%%--------------------------------------------------------------------
+% handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
+% 	    #state{max_sessions = Max_sess,
+% 		   ets_tid = Tid,
+% 		   max_pipeline_size = Max_pipe_sz,
+% 		   num_cur_sessions = Num} = State) 
+%     when Num >= Max ->
+%     Reply = find_best_connection(Tid),
+%     {reply, sorry_dude_reuse, State};
+
+%% Update max_sessions in #state with supplied value
+handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
+	    #state{ets_tid = Tid,
+		   num_cur_sessions = Num} = State) 
+    when Num >= Max_sess ->
+    Reply = find_best_connection(Tid, Max_pipe),
+    {reply, Reply, State#state{max_sessions = Max_sess}};
+
+handle_call({spawn_connection, Url, _Max_sess, _Max_pipe, SSL_options}, _From,
+	    #state{num_cur_sessions = Cur,
+		   ets_tid = Tid} = State) ->
+    {ok, Pid} = ibrowse_http_client:start_link({Tid, Url, SSL_options}),
+    ets:insert(Tid, {{1, Pid}, []}),
+    {reply, {ok, Pid}, State#state{num_cur_sessions = Cur + 1}};
+
+handle_call(Request, _From, State) ->
+    Reply = {unknown_request, Request},
+    {reply, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_cast/2
+%% Description: Handling cast messages
+%% Returns: {noreply, State}          |
+%%          {noreply, State, Timeout} |
+%%          {stop, Reason, State}            (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_cast(_Msg, State) ->
+    {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_info/2
+%% Description: Handling all non call/cast messages
+%% Returns: {noreply, State}          |
+%%          {noreply, State, Timeout} |
+%%          {stop, Reason, State}            (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_info({'EXIT', Parent, _Reason}, #state{parent_pid = Parent} = State) ->
+    {stop, normal, State};
+
+handle_info({'EXIT', Pid, _Reason},
+	    #state{num_cur_sessions = Cur,
+		   ets_tid = Tid} = State) ->
+    ets:match_delete(Tid, {{'_', Pid}, '_'}),
+    {noreply, State#state{num_cur_sessions = Cur - 1}};
+
+handle_info({trace, Bool}, State) ->
+    put(my_trace_flag, Bool),
+    {noreply, State};
+
+handle_info(_Info, State) ->
+    {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: terminate/2
+%% Description: Shutdown the server
+%% Returns: any (ignored by gen_server)
+%%--------------------------------------------------------------------
+terminate(_Reason, _State) ->
+    ok.
+
+%%--------------------------------------------------------------------
+%% Func: code_change/3
+%% Purpose: Convert process state when code is changed
+%% Returns: {ok, NewState}
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+find_best_connection(Tid, Max_pipe) ->
+    case ets:first(Tid) of
+	{Cur_sz, Pid} when Cur_sz < Max_pipe ->
+	    ets:delete(Tid, {Cur_sz, Pid}),
+	    ets:insert(Tid, {{Cur_sz + 1, Pid}, []}),
+	    {ok, Pid};
+	_ ->
+	    {error, retry_later}
+    end.


[33/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Support SOCKS5 protocol for replication

Using "socks5" as the protocol in the "proxy" parameter of replication
requests will cause DNS resolution and data transfer to happen via a
SOCKS5 proxy server.

COUCHDB-2025


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/1167b0e3
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/1167b0e3
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/1167b0e3

Branch: refs/heads/import-master
Commit: 1167b0e3cdea6dc71c415cb40d96a383c1e8f098
Parents: 50ee48d
Author: Robert Newson <rn...@apache.org>
Authored: Sat Jan 4 17:32:00 2014 +0000
Committer: Robert Newson <rn...@apache.org>
Committed: Mon Jan 6 23:34:53 2014 +0000

----------------------------------------------------------------------
 Makefile.am             |   2 +
 ibrowse_http_client.erl |  62 ++++++++++++++++--------
 ibrowse_lib.erl         |   7 +--
 ibrowse_socks5.erl      | 109 +++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 158 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/1167b0e3/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index 869bd10..7c48169 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -19,6 +19,7 @@ ibrowse_file_collection = \
     ibrowse_http_client.erl \
     ibrowse_lb.erl \
     ibrowse_lib.erl \
+    ibrowse_socks5.erl \
     ibrowse_sup.erl \
     ibrowse_test.erl
 
@@ -29,6 +30,7 @@ ibrowseebin_make_generated_file_list = \
     ibrowse_http_client.beam \
     ibrowse_lb.beam \
     ibrowse_lib.beam \
+    ibrowse_socks5.beam \
     ibrowse_sup.beam \
     ibrowse_test.beam
 

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/1167b0e3/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index c01385a..a1cf6eb 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -39,7 +39,8 @@
 
 -record(state, {host, port, connect_timeout,
                 inactivity_timer_ref,
-                use_proxy = false, proxy_auth_digest,
+                use_http_proxy = false, http_proxy_auth_digest,
+                socks5_host, socks5_port, socks5_user, socks5_password,
                 ssl_options = [], is_ssl = false, socket,
                 proxy_tunnel_setup = false,
                 tunnel_setup_queue = [],
@@ -488,9 +489,21 @@ handle_sock_closed(#state{reply_buffer = Buf, reqs = Reqs, http_status_code = SC
             State
     end.
 
-do_connect(Host, Port, Options, #state{is_ssl      = true,
-                                       use_proxy   = false,
-                                       ssl_options = SSLOptions},
+do_connect(Host, Port, Options, #state{socks5_host = SocksHost}=State, Timeout)
+  when SocksHost /= undefined ->
+    ProxyOptions = [
+        {user,     State#state.socks5_user},
+        {password, State#state.socks5_password},
+        {host,     SocksHost},
+        {port,     State#state.socks5_port},
+        {is_ssl,   State#state.is_ssl},
+        {ssl_opts, State#state.ssl_options}],
+    ibrowse_socks5:connect(Host, Port, ProxyOptions,
+                           get_sock_options(SocksHost, Options, []),
+                           Timeout);
+do_connect(Host, Port, Options, #state{is_ssl         = true,
+                                       use_http_proxy = false,
+                                       ssl_options    = SSLOptions},
            Timeout) ->
     ssl:connect(Host, Port, get_sock_options(Host, Options, SSLOptions), Timeout);
 do_connect(Host, Port, Options, _State, Timeout) ->
@@ -541,7 +554,7 @@ filter_sock_options(Opts) ->
 
 do_send(Req, #state{socket = Sock,
                     is_ssl = true,
-                    use_proxy = true,
+                    use_http_proxy = true,
                     proxy_tunnel_setup = Pts}) when Pts /= done ->  gen_tcp:send(Sock, Req);
 do_send(Req, #state{socket = Sock, is_ssl = true})  ->  ssl:send(Sock, Req);
 do_send(Req, #state{socket = Sock, is_ssl = false}) ->  gen_tcp:send(Sock, Req).
@@ -589,7 +602,7 @@ maybe_chunked_encode(Data, true) ->
 do_close(#state{socket = undefined})            ->  ok;
 do_close(#state{socket = Sock,
                 is_ssl = true,
-                use_proxy = true,
+                use_http_proxy = true,
                 proxy_tunnel_setup = Pts
                }) when Pts /= done ->  catch gen_tcp:close(Sock);
 do_close(#state{socket = Sock, is_ssl = true})  ->  catch ssl:close(Sock);
@@ -602,7 +615,7 @@ active_once(#state{socket = Socket} = State) ->
 
 do_setopts(_Sock, [],   _)    ->  ok;
 do_setopts(Sock, Opts, #state{is_ssl = true,
-                              use_proxy = true,
+                              use_http_proxy = true,
                               proxy_tunnel_setup = Pts}
                              ) when Pts /= done ->  inet:setopts(Sock, Opts);
 do_setopts(Sock, Opts, #state{is_ssl = true}) -> ssl:setopts(Sock, Opts);
@@ -621,17 +634,28 @@ send_req_1(From,
                 port = Port} = Url,
            Headers, Method, Body, Options, Timeout,
            #state{socket = undefined} = State) ->
+    ProxyHost = get_value(proxy_host, Options, false),
+    ProxyProtocol = get_value(proxy_protocol, Options, http),
     {Host_1, Port_1, State_1} =
-        case get_value(proxy_host, Options, false) of
-            false ->
+        case {ProxyHost, ProxyProtocol} of
+            {false, _} ->
                 {Host, Port, State};
-            PHost ->
+            {_, http} ->
                 ProxyUser     = get_value(proxy_user, Options, []),
                 ProxyPassword = get_value(proxy_password, Options, []),
                 Digest        = http_auth_digest(ProxyUser, ProxyPassword),
-                {PHost, get_value(proxy_port, Options, 80),
-                 State#state{use_proxy = true,
-                             proxy_auth_digest = Digest}}
+                {ProxyHost, get_value(proxy_port, Options, 80),
+                 State#state{use_http_proxy = true,
+                             http_proxy_auth_digest = Digest}};
+            {_, socks5} ->
+                ProxyUser     = list_to_binary(get_value(proxy_user, Options, [])),
+                ProxyPassword = list_to_binary(get_value(proxy_password, Options, [])),
+                ProxyPort = get_value(proxy_port, Options, 1080),
+                {Host, Port,
+                 State#state{socks5_host = ProxyHost,
+                             socks5_port = ProxyPort,
+                             socks5_user = ProxyUser,
+                             socks5_password = ProxyPassword}}
         end,
     State_2 = check_ssl_options(Options, State_1),
     do_trace("Connecting...~n", []),
@@ -662,7 +686,7 @@ send_req_1(From,
            Headers, Method, Body, Options, Timeout,
            #state{
                   proxy_tunnel_setup = false,
-                  use_proxy = true,
+                  use_http_proxy = true,
                   is_ssl    = true} = State) ->
     Ref = case Timeout of
               infinity ->
@@ -850,11 +874,11 @@ add_auth_headers(#url{username = User,
                 end,
     add_proxy_auth_headers(State, Headers_1).
 
-add_proxy_auth_headers(#state{use_proxy = false}, Headers) ->
+add_proxy_auth_headers(#state{use_http_proxy = false}, Headers) ->
     Headers;
-add_proxy_auth_headers(#state{proxy_auth_digest = []}, Headers) ->
+add_proxy_auth_headers(#state{http_proxy_auth_digest = []}, Headers) ->
     Headers;
-add_proxy_auth_headers(#state{proxy_auth_digest = Auth_digest}, Headers) ->
+add_proxy_auth_headers(#state{http_proxy_auth_digest = Auth_digest}, Headers) ->
     [{"Proxy-Authorization", ["Basic ", Auth_digest]} | Headers].
 
 http_auth_digest([], []) ->
@@ -863,7 +887,7 @@ http_auth_digest(Username, Password) ->
     ibrowse_lib:encode_base64(Username ++ [$: | Password]).
 
 make_request(Method, Headers, AbsPath, RelPath, Body, Options,
-             #state{use_proxy = UseProxy, is_ssl = Is_ssl}, ReqId) ->
+             #state{use_http_proxy = UseHttpProxy, is_ssl = Is_ssl}, ReqId) ->
     HttpVsn = http_vsn_string(get_value(http_vsn, Options, {1,1})),
     Fun1 = fun({X, Y}) when is_atom(X) ->
                    {to_lower(atom_to_list(X)), X, Y};
@@ -906,7 +930,7 @@ make_request(Method, Headers, AbsPath, RelPath, Body, Options,
                         Headers_2
                 end,
     Headers_4 = cons_headers(Headers_3),
-    Uri = case get_value(use_absolute_uri, Options, false) or UseProxy of
+    Uri = case get_value(use_absolute_uri, Options, false) or UseHttpProxy of
               true ->
                   case Is_ssl of
                       true ->

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/1167b0e3/ibrowse_lib.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lib.erl b/ibrowse_lib.erl
index 1ce6bd4..7b12cb3 100644
--- a/ibrowse_lib.erl
+++ b/ibrowse_lib.erl
@@ -362,9 +362,10 @@ parse_url([], get_password, Url, TmpAcc) ->
 parse_url([], State, Url, TmpAcc) ->
     {invalid_uri_2, State, Url, TmpAcc}.
 
-default_port(http)  -> 80;
-default_port(https) -> 443;
-default_port(ftp)   -> 21.
+default_port(socks5) -> 1080;
+default_port(http)   -> 80;
+default_port(https)  -> 443;
+default_port(ftp)    -> 21.
 
 printable_date() ->
     {{Y,Mo,D},{H, M, S}} = calendar:local_time(),

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/1167b0e3/ibrowse_socks5.erl
----------------------------------------------------------------------
diff --git a/ibrowse_socks5.erl b/ibrowse_socks5.erl
new file mode 100644
index 0000000..d00df44
--- /dev/null
+++ b/ibrowse_socks5.erl
@@ -0,0 +1,109 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(ibrowse_socks5).
+
+-define(VERSION, 5).
+-define(CONNECT, 1).
+
+-define(NO_AUTH, 0).
+-define(USERPASS, 2).
+-define(UNACCEPTABLE, 16#FF).
+-define(RESERVED, 0).
+
+-define(ATYP_IPV4, 1).
+-define(ATYP_DOMAINNAME, 3).
+-define(ATYP_IPV6, 4).
+
+-define(SUCCEEDED, 0).
+
+-export([connect/5]).
+
+-import(ibrowse_lib, [get_value/2, get_value/3]).
+
+connect(TargetHost, TargetPort, ProxyOptions, Options, Timeout) ->
+    case gen_tcp:connect(get_value(host, ProxyOptions),
+                         get_value(port, ProxyOptions),
+                         Options, Timeout) of
+        {ok, Socket} ->
+            case handshake(Socket, Options) of
+                ok ->
+                    case connect(TargetHost, TargetPort, Socket) of
+                        ok ->
+                            maybe_ssl(Socket, ProxyOptions, Timeout);
+                        Else ->
+                            gen_tcp:close(Socket),
+                            Else
+                    end;
+                Else ->
+                    gen_tcp:close(Socket),
+                    Else
+            end;
+        Else ->
+            Else
+    end.
+
+handshake(Socket, ProxyOptions) when is_port(Socket) ->
+    {Handshake, Success} = case get_value(user, ProxyOptions, <<>>) of
+        <<>> ->
+            {<<?VERSION, 1, ?NO_AUTH>>, ?NO_AUTH};
+        User ->
+            Password = get_value(password, ProxyOptions, <<>>),
+            {<<?VERSION, 1, ?USERPASS, (byte_size(User)), User,
+               (byte_size(Password)), Password>>, ?USERPASS}
+    end,
+    ok = gen_tcp:send(Socket, Handshake),
+    case gen_tcp:recv(Socket, 0) of
+        {ok, <<?VERSION, Success>>} ->
+            ok;
+        {ok, <<?VERSION, ?UNACCEPTABLE>>} ->
+            {error, unacceptable};
+        {error, Reason} ->
+            {error, Reason}
+    end.
+
+connect(Host, Port, Via) when is_list(Host) ->
+    connect(list_to_binary(Host), Port, Via);
+connect(Host, Port, Via) when is_binary(Host), is_integer(Port),
+                              is_port(Via) ->
+    ok = gen_tcp:send(Via,
+        <<?VERSION, ?CONNECT, ?RESERVED, ?ATYP_DOMAINNAME,
+          (byte_size(Host)), Host/binary,
+          (Port):16>>),
+    case gen_tcp:recv(Via, 0) of
+        {ok, <<?VERSION, ?SUCCEEDED, ?RESERVED, _/binary>>} ->
+            ok;
+        {ok, <<?VERSION, Rep, ?RESERVED, _/binary>>} ->
+            {error, rep(Rep)};
+        {error, Reason} ->
+            {error, Reason}
+    end.
+
+maybe_ssl(Socket, ProxyOptions, Timeout) ->
+    IsSsl = get_value(is_ssl, ProxyOptions, false),
+    SslOpts = get_value(ssl_opts, ProxyOptions, []),
+    case IsSsl of
+        false ->
+            {ok, Socket};
+        true ->
+            ssl:connect(Socket, SslOpts, Timeout)
+    end.
+
+rep(0) -> succeeded;
+rep(1) -> server_fail;
+rep(2) -> disallowed_by_ruleset;
+rep(3) -> network_unreachable;
+rep(4) -> host_unreachable;
+rep(5) -> connection_refused;
+rep(6) -> ttl_expired;
+rep(7) -> command_not_supported;
+rep(8) -> address_type_not_supported.


[23/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Updated ibrowse to version 2.1.0. It contains fixes for the following important issues:

- https://github.com/cmullaparthi/ibrowse/issues/closed#issue/17
- https://github.com/cmullaparthi/ibrowse/issues/closed#issue/15
- https://github.com/cmullaparthi/ibrowse/issues/closed#issue/19




git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@1033456 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/0db80d3e
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/0db80d3e
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/0db80d3e

Branch: refs/heads/import-master
Commit: 0db80d3e8bdd81f451fbeb002ef8bd107f653f31
Parents: a284c87
Author: Filipe David Borba Manana <fd...@apache.org>
Authored: Wed Nov 10 13:34:16 2010 +0000
Committer: Filipe David Borba Manana <fd...@apache.org>
Committed: Wed Nov 10 13:34:16 2010 +0000

----------------------------------------------------------------------
 Makefile.am             |   2 +-
 ibrowse.app.in          |   2 +-
 ibrowse.erl             |  33 ++++++-
 ibrowse_http_client.erl | 211 ++++++++++++++++++++++++++++---------------
 ibrowse_lib.erl         |   2 +-
 ibrowse_test.erl        |  45 ++++++---
 6 files changed, 201 insertions(+), 94 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/0db80d3e/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index 39878f0..8c5d3f8 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -10,7 +10,7 @@
 ## License for the specific language governing permissions and limitations under
 ## the License.
 
-ibrowseebindir = $(localerlanglibdir)/ibrowse-2.0.1/ebin
+ibrowseebindir = $(localerlanglibdir)/ibrowse-2.1.0/ebin
 
 ibrowse_file_collection = \
 	ibrowse.app.in \

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/0db80d3e/ibrowse.app.in
----------------------------------------------------------------------
diff --git a/ibrowse.app.in b/ibrowse.app.in
index 8fc2066..e8580d1 100644
--- a/ibrowse.app.in
+++ b/ibrowse.app.in
@@ -1,6 +1,6 @@
 {application, ibrowse,
         [{description, "HTTP client application"},
-         {vsn, "2.0.1"},
+         {vsn, "2.1.0"},
          {modules, [ ibrowse, 
 		     ibrowse_http_client, 
 		     ibrowse_app, 

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/0db80d3e/ibrowse.erl
----------------------------------------------------------------------
diff --git a/ibrowse.erl b/ibrowse.erl
index 7f8d8bc..1a42f4b 100644
--- a/ibrowse.erl
+++ b/ibrowse.erl
@@ -7,8 +7,8 @@
 %%%-------------------------------------------------------------------
 %% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
 %% @copyright 2005-2010 Chandrashekhar Mullaparthi
-%% @version 2.0.1
-%% @doc The ibrowse application implements an HTTP 1.1 client. This
+%% @version 2.1.0
+%% @doc The ibrowse application implements an HTTP 1.1 client in erlang. This
 %% module implements the API of the HTTP client. There is one named
 %% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is
 %% one process to handle one TCP connection to a webserver
@@ -87,6 +87,7 @@
          send_req_direct/6,
          send_req_direct/7,
          stream_next/1,
+         stream_close/1,
          set_max_sessions/3,
          set_max_pipeline_size/3,
          set_dest/3,
@@ -201,7 +202,11 @@ send_req(Url, Headers, Method, Body) ->
 %% dealing with large response bodies and/or slow links. In these
 %% cases, it might be hard to estimate how long a request will take to
 %% complete. In such cases, the client might want to timeout if no
-%% data has been received on the link for a certain time interval.</li>
+%% data has been received on the link for a certain time interval.
+%% 
+%% This value is also used to close connections which are not in use for 
+%% the specified timeout value.
+%% </li>
 %%
 %% <li>
 %% The <code>connect_timeout</code> option is to specify how long the
@@ -458,6 +463,8 @@ ensure_bin({Fun, _} = Body) when is_function(Fun) -> Body.
 spawn_worker_process(Url) ->
     ibrowse_http_client:start(Url).
 
+%% @doc Same as spawn_worker_process/1 but takes as input a Host and Port
+%% instead of a URL.
 %% @spec spawn_worker_process(Host::string(), Port::integer()) -> {ok, pid()}
 spawn_worker_process(Host, Port) ->
     ibrowse_http_client:start({Host, Port}).
@@ -468,6 +475,8 @@ spawn_worker_process(Host, Port) ->
 spawn_link_worker_process(Url) ->
     ibrowse_http_client:start_link(Url).
 
+%% @doc Same as spawn_worker_process/2 except the the calling process
+%% is linked to the worker process which is spawned.
 %% @spec spawn_link_worker_process(Host::string(), Port::integer()) -> {ok, pid()}
 spawn_link_worker_process(Host, Port) ->
     ibrowse_http_client:start_link({Host, Port}).
@@ -524,6 +533,21 @@ stream_next(Req_id) ->
             ok
     end.
 
+%% @doc Tell ibrowse to close the connection associated with the
+%% specified stream.  Should be used in conjunction with the
+%% <code>stream_to</code> option. Note that all requests in progress on
+%% the connection which is serving this Req_id will be aborted, and an
+%% error returned.
+%% @spec stream_close(Req_id :: req_id()) -> ok | {error, unknown_req_id}
+stream_close(Req_id) ->    
+    case ets:lookup(ibrowse_stream, {req_id_pid, Req_id}) of
+        [] ->
+            {error, unknown_req_id};
+        [{_, Pid}] ->
+            catch Pid ! {stream_close, Req_id},
+            ok
+    end.
+
 %% @doc Turn tracing on for the ibrowse process
 trace_on() ->
     ibrowse ! {trace, true}.
@@ -553,6 +577,9 @@ all_trace_off() ->
     ibrowse ! all_trace_off,
     ok.
 
+%% @doc Shows some internal information about load balancing. Info
+%% about workers spawned using spawn_worker_process/2 or
+%% spawn_link_worker_process/2 is not included.
 show_dest_status() ->
     Dests = lists:filter(fun({lb_pid, {Host, Port}, _}) when is_list(Host),
                                                              is_integer(Port) ->

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/0db80d3e/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 2dd209d..5c3d5c9 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -37,6 +37,7 @@
 -include("ibrowse.hrl").
 
 -record(state, {host, port, connect_timeout,
+                inactivity_timer_ref,
                 use_proxy = false, proxy_auth_digest,
                 ssl_options = [], is_ssl = false, socket,
                 proxy_tunnel_setup = false,
@@ -192,6 +193,12 @@ handle_info({stream_next, Req_id}, #state{socket = Socket,
 handle_info({stream_next, _Req_id}, State) ->
     {noreply, State};
 
+handle_info({stream_close, _Req_id}, State) ->
+    shutting_down(State),
+    do_close(State),
+    do_error_reply(State, closing_on_request),
+    {stop, normal, ok, State};
+
 handle_info({tcp_closed, _Sock}, State) ->    
     do_trace("TCP connection closed by peer!~n", []),
     handle_sock_closed(State),
@@ -221,6 +228,7 @@ handle_info({req_timedout, From}, State) ->
     end;
 
 handle_info(timeout, State) ->
+    do_trace("Inactivity timeout triggered. Shutting down connection~n", []),
     shutting_down(State),
     do_error_reply(State, req_timedout),
     {stop, normal, State};
@@ -273,8 +281,8 @@ handle_sock_data(Data, #state{status = get_header}=State) ->
             {stop, normal, State};
         State_1 ->
             active_once(State_1),
-            set_inac_timer(State_1),
-            {noreply, State_1}
+            State_2 = set_inac_timer(State_1),
+            {noreply, State_2}
     end;
 
 handle_sock_data(Data, #state{status           = get_body,
@@ -293,8 +301,8 @@ handle_sock_data(Data, #state{status           = get_body,
                     {stop, normal, State};
                 State_1 ->
                     active_once(State_1),
-                    set_inac_timer(State_1),
-                    {noreply, State_1}
+                    State_2 = set_inac_timer(State_1),
+                    {noreply, State_2}
             end;
         _ ->
             case parse_11_response(Data, State) of
@@ -314,12 +322,12 @@ handle_sock_data(Data, #state{status           = get_body,
                             active_once(State_1)
                     end,
                     State_2 = State_1#state{interim_reply_sent = false},
-                    set_inac_timer(State_2),
-                    {noreply, State_2};
+                    State_3 = set_inac_timer(State_2),
+                    {noreply, State_3};
                 State_1 ->
                     active_once(State_1),
-                    set_inac_timer(State_1),
-                    {noreply, State_1}
+                    State_2 = set_inac_timer(State_1),
+                    {noreply, State_2}
             end
     end.
 
@@ -507,29 +515,37 @@ do_send(Req, #state{socket = Sock, is_ssl = false}) ->  gen_tcp:send(Sock, Req).
 %%                       {fun_arity_0}         |
 %%                       {fun_arity_1, term()}
 %% error() = term()
-do_send_body(Source, State) when is_function(Source) ->
-    do_send_body({Source}, State);
-do_send_body({Source}, State) when is_function(Source) ->
-    do_send_body1(Source, Source(), State);
-do_send_body({Source, Source_state}, State) when is_function(Source) ->
-    do_send_body1(Source, Source(Source_state), State);
-do_send_body(Body, State) ->
+do_send_body(Source, State, TE) when is_function(Source) ->
+    do_send_body({Source}, State, TE);
+do_send_body({Source}, State, TE) when is_function(Source) ->
+    do_send_body1(Source, Source(), State, TE);
+do_send_body({Source, Source_state}, State, TE) when is_function(Source) ->
+    do_send_body1(Source, Source(Source_state), State, TE);
+do_send_body(Body, State, _TE) ->
     do_send(Body, State).
 
-do_send_body1(Source, Resp, State) ->
+do_send_body1(Source, Resp, State, TE) ->
     case Resp of
         {ok, Data} ->
-            do_send(Data, State),
-            do_send_body({Source}, State);
+            do_send(maybe_chunked_encode(Data, TE), State),
+            do_send_body({Source}, State, TE);
         {ok, Data, New_source_state} ->
-            do_send(Data, State),
-            do_send_body({Source, New_source_state}, State);
+            do_send(maybe_chunked_encode(Data, TE), State),
+            do_send_body({Source, New_source_state}, State, TE);
+        eof when TE == true ->
+            do_send(<<"0\r\n\r\n">>, State),
+            ok;
         eof ->
             ok;
         Err ->
             Err
     end.
 
+maybe_chunked_encode(Data, false) ->
+    Data;
+maybe_chunked_encode(Data, true) ->
+    [ibrowse_lib:dec2hex(4, size(to_binary(Data))), "\r\n", Data, "\r\n"].
+
 do_close(#state{socket = undefined})            ->  ok;
 do_close(#state{socket = Sock,
                 is_ssl = true,
@@ -619,11 +635,13 @@ send_req_1(From,
     {Req, Body_1} = make_request(connect, Pxy_auth_headers,
                                  Path, Path,
                                  [], Options, State_1),
+    TE = is_chunked_encoding_specified(Options),
     trace_request(Req),
     case do_send(Req, State) of
         ok ->
-            case do_send_body(Body_1, State_1) of
+            case do_send_body(Body_1, State_1, TE) of
                 ok ->
+                    trace_request_body(Body_1),
                     active_once(State_1),
                     Ref = case Timeout of
                               infinity ->
@@ -636,8 +654,8 @@ send_req_1(From,
                                             send_timer = Ref,
                                             proxy_tunnel_setup = in_progress,
                                             tunnel_setup_queue = [{From, Url, Headers, Method, Body, Options, Timeout}]},
-                    set_inac_timer(State_1),
-                    {noreply, State_2};
+                    State_3 = set_inac_timer(State_2),
+                    {noreply, State_3};
                 Err ->
                     shutting_down(State_1),
                     do_trace("Send failed... Reason: ~p~n", [Err]),
@@ -706,10 +724,12 @@ send_req_1(From,
                                  AbsPath, RelPath, Body, Options, State_1),
     trace_request(Req),
     do_setopts(Socket, Caller_socket_options, Is_ssl),
+    TE = is_chunked_encoding_specified(Options),
     case do_send(Req, State_1) of
         ok ->
-            case do_send_body(Body_1, State_1) of
+            case do_send_body(Body_1, State_1, TE) of
                 ok ->
+                    trace_request_body(Body_1),
                     State_2 = inc_pipeline_counter(State_1),
                     active_once(State_2),
                     Ref = case Timeout of
@@ -732,8 +752,8 @@ send_req_1(From,
                         _ ->
                             gen_server:reply(From, {ibrowse_req_id, ReqId})
                     end,
-                    set_inac_timer(State_1),
-                    {noreply, State_3};
+                    State_4 = set_inac_timer(State_3),
+                    {noreply, State_4};
                 Err ->
                     shutting_down(State_1),
                     do_trace("Send failed... Reason: ~p~n", [Err]),
@@ -759,6 +779,7 @@ maybe_modify_headers(#url{host = Host, port = Port} = Url,
                                   false ->
                                       case Port of
                                           80 -> Host;
+                                          443 -> Host;
                                           _ -> [Host, ":", integer_to_list(Port)]
                                       end;
                                   {value, {_, Host_h_val}} ->
@@ -802,31 +823,42 @@ http_auth_digest(Username, Password) ->
 make_request(Method, Headers, AbsPath, RelPath, Body, Options,
              #state{use_proxy = UseProxy, is_ssl = Is_ssl}) ->
     HttpVsn = http_vsn_string(get_value(http_vsn, Options, {1,1})),
+    Fun1 = fun({X, Y}) when is_atom(X) ->
+                   {to_lower(atom_to_list(X)), X, Y};
+              ({X, Y}) when is_list(X) ->
+                   {to_lower(X), X, Y}
+           end,
+    Headers_0 = [Fun1(X) || X <- Headers],
     Headers_1 =
-        case get_value(content_length, Headers, false) of
-            false when (Body == []) or
-            (Body == <<>>) or
-            is_tuple(Body) or
-            is_function(Body) ->
-                Headers;
+        case lists:keysearch("content-length", 1, Headers_0) of
+            false when (Body == []) orelse
+                       (Body == <<>>) orelse
+                       is_tuple(Body) orelse
+                       is_function(Body) ->
+                Headers_0;
             false when is_binary(Body) ->
-                [{"content-length", integer_to_list(size(Body))} | Headers];
-            false ->
-                [{"content-length", integer_to_list(length(Body))} | Headers];
+                [{"content-length", "content-length", integer_to_list(size(Body))} | Headers_0];
+            false when is_list(Body) ->
+                [{"content-length", "content-length", integer_to_list(length(Body))} | Headers_0];
             _ ->
-                Headers
+                %% Content-Length is already specified
+                Headers_0
         end,
     {Headers_2, Body_1} =
-        case get_value(transfer_encoding, Options, false) of
+        case is_chunked_encoding_specified(Options) of
             false ->
-                {Headers_1, Body};
-            {chunked, ChunkSize} ->
-                {[{X, Y} || {X, Y} <- Headers_1,
-                            X /= "Content-Length",
-                            X /= "content-length",
-                            X /= content_length] ++
+                {[{Y, Z} || {_, Y, Z} <- Headers_1], Body};
+            true ->
+                Chunk_size_1 = case get_value(transfer_encoding, Options) of
+                                  chunked ->
+                                      5120;
+                                  {chunked, Chunk_size} ->
+                                      Chunk_size
+                              end,
+                {[{Y, Z} || {X, Y, Z} <- Headers_1,
+                            X /= "content-length"] ++
                  [{"Transfer-Encoding", "chunked"}],
-                 chunk_request_body(Body, ChunkSize)}
+                 chunk_request_body(Body, Chunk_size_1)}
         end,
     Headers_3 = cons_headers(Headers_2),
     Uri = case get_value(use_absolute_uri, Options, false) or UseProxy of
@@ -842,6 +874,16 @@ make_request(Method, Headers, AbsPath, RelPath, Body, Options,
           end,
     {[method(Method), " ", Uri, " ", HttpVsn, crnl(), Headers_3, crnl()], Body_1}.
 
+is_chunked_encoding_specified(Options) ->
+    case get_value(transfer_encoding, Options, false) of
+        false ->
+            false;
+        {chunked, _} -> 
+            true;
+        chunked ->
+            true
+    end.
+
 http_vsn_string({0,9}) -> "HTTP/0.9";
 http_vsn_string({1,0}) -> "HTTP/1.0";
 http_vsn_string({1,1}) -> "HTTP/1.1".
@@ -873,6 +915,9 @@ encode_headers([{Name,Val} | T], Acc) when is_atom(Name) ->
 encode_headers([], Acc) ->
     lists:reverse(Acc).
 
+chunk_request_body(Body, _ChunkSize) when is_tuple(Body) orelse
+                                          is_function(Body) ->
+    Body;
 chunk_request_body(Body, ChunkSize) ->
     chunk_request_body(Body, ChunkSize, []).
 
@@ -1060,7 +1105,7 @@ upgrade_to_ssl(#state{socket = Socket,
 
 send_queued_requests([], State) ->
     do_trace("Sent all queued requests via SSL connection~n", []),
-    State#state{tunnel_setup_queue = done};
+    State#state{tunnel_setup_queue = []};
 send_queued_requests([{From, Url, Headers, Method, Body, Options, Timeout} | Q],
                      State) ->
     case send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State) of
@@ -1217,7 +1262,6 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
                        reply_buffer  = RepBuf,
                        recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
     Body = RepBuf,
-    State_1 = set_cur_request(State),
     file:close(Fd),
     ResponseBody = case TmpFilename of
                        undefined ->
@@ -1232,9 +1276,9 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
                 false ->
                     {ok, SCode, Resp_headers_1, ResponseBody}
             end,
-    State_2 = do_reply(State_1, From, StreamTo, ReqId, Resp_format, Reply),
+    State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
     cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
-    State_2;
+    set_cur_request(State_1);
 handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
                          response_format = Resp_format,
                          options = Options},
@@ -1245,7 +1289,6 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
                        reply_buffer     = RepBuf,
                        send_timer       = ReqTimer} = State) ->
     Body = RepBuf,
-%%    State_1 = set_cur_request(State),
     {Resp_headers_1, Raw_headers_1} = maybe_add_custom_headers(Resp_headers, Raw_headers, Options),
     Reply = case get_value(give_raw_headers, Options, false) of
                 true ->
@@ -1253,15 +1296,8 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
                 false ->
                     {ok, SCode, Resp_headers_1, Body}
             end,
-    State_1 = case get(conn_close) of
-        "close" ->
-            do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
-            exit(normal);
-        _ ->
-            State_1_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
-            cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
-            State_1_1
-    end,
+    State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+    cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
     set_cur_request(State_1).
 
 reset_state(State) ->
@@ -1353,6 +1389,8 @@ parse_status_line([32 | T], get_prot_vsn, ProtVsn, StatCode) ->
     parse_status_line(T, get_status_code, ProtVsn, StatCode);
 parse_status_line([32 | T], get_status_code, ProtVsn, StatCode) ->
     {ok, lists:reverse(ProtVsn), lists:reverse(StatCode), T};
+parse_status_line([], get_status_code, ProtVsn, StatCode) ->
+    {ok, lists:reverse(ProtVsn), lists:reverse(StatCode), []};
 parse_status_line([H | T], get_prot_vsn, ProtVsn, StatCode) ->
     parse_status_line(T, get_prot_vsn, [H|ProtVsn], StatCode);
 parse_status_line([H | T], get_status_code, ProtVsn, StatCode) ->
@@ -1710,36 +1748,61 @@ get_stream_chunk_size(Options) ->
     end.
 
 set_inac_timer(State) ->
-    set_inac_timer(State, get_inac_timeout(State)).
-
-set_inac_timer(_State, Timeout) when is_integer(Timeout) ->
-    TimerRef = erlang:send_after(Timeout, self(), timeout),
-    case erlang:put(inac_timer, TimerRef) of
-    OldTimer when is_reference(OldTimer) ->
-        erlang:cancel_timer(OldTimer),
-        receive timeout -> ok after 0 -> ok end;
-    _ ->
-        ok
-    end,
-    TimerRef;
-set_inac_timer(_, _) ->
-    undefined.
+    cancel_timer(State#state.inactivity_timer_ref),
+    set_inac_timer(State#state{inactivity_timer_ref = undefined},
+                   get_inac_timeout(State)).
+
+set_inac_timer(State, Timeout) when is_integer(Timeout) ->
+    Ref = erlang:send_after(Timeout, self(), timeout),
+    State#state{inactivity_timer_ref = Ref};
+set_inac_timer(State, _) ->
+    State.
 
 get_inac_timeout(#state{cur_req = #request{options = Opts}}) -> 
     get_value(inactivity_timeout, Opts, infinity);
 get_inac_timeout(#state{cur_req = undefined}) ->
-    infinity.
+    case ibrowse:get_config_value(inactivity_timeout, undefined) of
+        Val when is_integer(Val) ->
+            Val;
+        _ ->
+            case application:get_env(ibrowse, inactivity_timeout) of
+                {ok, Val} when is_integer(Val), Val > 0 ->
+                    Val;
+                _ ->
+                    10000
+            end
+    end.
 
 trace_request(Req) ->
     case get(my_trace_flag) of
         true ->
             %%Avoid the binary operations if trace is not on...
-            NReq = binary_to_list(list_to_binary(Req)),
+            NReq = to_binary(Req),
             do_trace("Sending request: ~n"
                      "--- Request Begin ---~n~s~n"
                      "--- Request End ---~n", [NReq]);
         _ -> ok
     end.
 
+trace_request_body(Body) ->
+    case get(my_trace_flag) of
+        true ->
+            %%Avoid the binary operations if trace is not on...
+            NBody = to_binary(Body),
+            case size(NBody) > 1024 of
+                true ->
+                    ok;
+                false ->
+                    do_trace("Sending request body: ~n"
+                             "--- Request Body Begin ---~n~s~n"
+                             "--- Request Body End ---~n", [NBody])
+            end;
+        false ->
+            ok
+    end.
+
 to_integer(X) when is_list(X)    -> list_to_integer(X); 
 to_integer(X) when is_integer(X) -> X.
+
+to_binary(X) when is_list(X)   -> list_to_binary(X); 
+to_binary(X) when is_binary(X) -> X.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/0db80d3e/ibrowse_lib.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lib.erl b/ibrowse_lib.erl
index fbb9c34..c463c7b 100644
--- a/ibrowse_lib.erl
+++ b/ibrowse_lib.erl
@@ -208,7 +208,7 @@ parse_url(Url) ->
 parse_url([$:, $/, $/ | _], get_protocol, Url, []) ->
     {invalid_uri_1, Url};
 parse_url([$:, $/, $/ | T], get_protocol, Url, TmpAcc) ->
-    Prot = list_to_atom(lists:reverse(TmpAcc)),
+    Prot = list_to_existing_atom(lists:reverse(TmpAcc)),
     parse_url(T, get_username, 
               Url#url{protocol = Prot},
               []);

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/0db80d3e/ibrowse_test.erl
----------------------------------------------------------------------
diff --git a/ibrowse_test.erl b/ibrowse_test.erl
index e7d6e59..3ad7660 100644
--- a/ibrowse_test.erl
+++ b/ibrowse_test.erl
@@ -217,14 +217,18 @@ dump_errors(Key, Iod) ->
 		    {"http://jigsaw.w3.org/HTTP/300/", get},
 		    {"http://jigsaw.w3.org/HTTP/Basic/", get, [{basic_auth, {"guest", "guest"}}]},
 		    {"http://jigsaw.w3.org/HTTP/CL/", get},
-		    {"http://www.httpwatch.com/httpgallery/chunked/", get}
+		    {"http://www.httpwatch.com/httpgallery/chunked/", get},
+                    {"https://github.com", get, [{ssl_options, [{depth, 2}]}]}
 		   ]).
 
 unit_tests() ->
     unit_tests([]).
 
 unit_tests(Options) ->
+    application:start(crypto),
+    application:start(public_key),
     application:start(ssl),
+    ibrowse:start(),
     Options_1 = Options ++ [{connect_timeout, 5000}],
     {Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options_1]),
     receive 
@@ -249,32 +253,45 @@ verify_chunked_streaming() ->
     verify_chunked_streaming([]).
 
 verify_chunked_streaming(Options) ->
+    io:format("~nVerifying that chunked streaming is working...~n", []),
     Url = "http://www.httpwatch.com/httpgallery/chunked/",
-    io:format("URL: ~s~n", [Url]),
-    io:format("Fetching data without streaming...~n", []),
+    io:format("  URL: ~s~n", [Url]),
+    io:format("  Fetching data without streaming...~n", []),
     Result_without_streaming = ibrowse:send_req(
 				 Url, [], get, [],
 				 [{response_format, binary} | Options]),
-    io:format("Fetching data with streaming as list...~n", []),
+    io:format("  Fetching data with streaming as list...~n", []),
     Async_response_list = do_async_req_list(
 			    Url, get, [{response_format, list} | Options]),
-    io:format("Fetching data with streaming as binary...~n", []),
+    io:format("  Fetching data with streaming as binary...~n", []),
     Async_response_bin = do_async_req_list(
 			   Url, get, [{response_format, binary} | Options]),
-    io:format("Fetching data with streaming as binary, {active, once}...~n", []),
+    io:format("  Fetching data with streaming as binary, {active, once}...~n", []),
     Async_response_bin_once = do_async_req_list(
                                 Url, get, [once, {response_format, binary} | Options]),
-    compare_responses(Result_without_streaming, Async_response_list, Async_response_bin),
-    compare_responses(Result_without_streaming, Async_response_list, Async_response_bin_once).
+    Res1 = compare_responses(Result_without_streaming, Async_response_list, Async_response_bin),
+    Res2 = compare_responses(Result_without_streaming, Async_response_list, Async_response_bin_once),
+    case {Res1, Res2} of
+        {success, success} ->
+            io:format("  Chunked streaming working~n", []);
+        _ ->
+            ok
+    end.
 
 test_chunked_streaming_once() ->
     test_chunked_streaming_once([]).
 
 test_chunked_streaming_once(Options) ->
+    io:format("~nTesting chunked streaming with the {stream_to, {Pid, once}} option...~n", []),
     Url = "http://www.httpwatch.com/httpgallery/chunked/",
-    io:format("URL: ~s~n", [Url]),
-    io:format("Fetching data with streaming as binary, {active, once}...~n", []),
-    do_async_req_list(Url, get, [once, {response_format, binary} | Options]).
+    io:format("  URL: ~s~n", [Url]),
+    io:format("  Fetching data with streaming as binary, {active, once}...~n", []),
+    case do_async_req_list(Url, get, [once, {response_format, binary} | Options]) of
+        {ok, _, _, _} ->
+            io:format("  Success!~n", []);
+        Err ->
+            io:format("  Fail: ~p~n", [Err])
+    end.
 
 compare_responses({ok, St_code, _, Body}, {ok, St_code, _, Body}, {ok, St_code, _, Body}) ->
     success;
@@ -310,7 +327,7 @@ do_async_req_list(Url, Method, Options) ->
     {Pid,_} = erlang:spawn_monitor(?MODULE, i_do_async_req_list,
 				   [self(), Url, Method, 
 				    Options ++ [{stream_chunk_size, 1000}]]),
-    io:format("Spawned process ~p~n", [Pid]),
+%%    io:format("Spawned process ~p~n", [Pid]),
     wait_for_resp(Pid).
 
 wait_for_resp(Pid) ->
@@ -354,7 +371,7 @@ wait_for_async_resp(Req_id, Options, Acc_Stat_code, Acc_Headers, Body) ->
             maybe_stream_next(Req_id, Options),
 	    wait_for_async_resp(Req_id, Options, StatCode, Headers, Body);
 	{ibrowse_async_response_end, Req_id} ->
-            io:format("Recvd end of response.~n", []),
+            %% io:format("Recvd end of response.~n", []),
 	    Body_1 = list_to_binary(lists:reverse(Body)),
 	    {ok, Acc_Stat_code, Acc_Headers, Body_1};
 	{ibrowse_async_response, Req_id, Data} ->
@@ -384,7 +401,7 @@ execute_req(Url, Method, Options) ->
 	{ok, SCode, _H, _B} ->
 	    io:format("Status code: ~p~n", [SCode]);
 	Err ->
-	    io:format("Err -> ~p~n", [Err])
+	    io:format("~p~n", [Err])
     end.
 
 drv_ue_test() ->


[13/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Support for replication over SSL.  Resolves COUCHDB-491

This turned out to be a decent amount of work, since:

1) ibrowse did not use SSL on dedicated connections.  Wrote a simplistic patch,
   will contact Chandru for further discussion.
2) When nginx is used for the SSL wrapper, it wants to buffer the changes feed.
   Setting "proxy_buffering off" in nginx.conf helps, but some buffering still
   occurred. Fixed by making couch_rep_changes_feed smart enough to split
   merged chunks.
3) The Erlang ssl application showed instabilities when used with {active,once}.
   Switched to the "new implementation" using {ssl_imp, new} and instabilities
   disappeared.



git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@810350 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/96d28d85
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/96d28d85
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/96d28d85

Branch: refs/heads/import-master
Commit: 96d28d85d235dfff8ead68d603d03edb42f3818f
Parents: c6b2bb6
Author: Adam Kocoloski <ko...@apache.org>
Authored: Wed Sep 2 03:40:44 2009 +0000
Committer: Adam Kocoloski <ko...@apache.org>
Committed: Wed Sep 2 03:40:44 2009 +0000

----------------------------------------------------------------------
 ibrowse_http_client.erl | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/96d28d85/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index dde258e..5f62f70 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -113,6 +113,16 @@ init({Host, Port}) ->
 		   port = Port},
     put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
     put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
+    {ok, State};
+init(#url{host=Host, port=Port, protocol=Protocol}) ->
+    State = #state{
+        host = Host,
+        port = Port,
+        is_ssl = (Protocol == https),
+        ssl_options = [{ssl_imp, new}]
+    },
+    put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
+    put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
     {ok, State}.
 
 %%--------------------------------------------------------------------
@@ -137,7 +147,7 @@ handle_call({send_req, {Url, Headers, Method, Body, Options, Timeout}},
 handle_call(stop, _From, State) ->
     do_close(State),
     do_error_reply(State, closing_on_request),
-    {stop, normal, ok, State};
+    {stop, normal, ok, State#state{socket=undefined}};
 
 handle_call(Request, _From, State) ->
     Reply = {unknown_request, Request},


[04/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
rewrite replicator using OTP behaviours

- only one instance of given source->target runs at a time
- supervisor restarts replications that terminate abnormally
- pull repl. streams attachments directly to disk
- improved memory utilization
- temporarily rollback parallel async doc GETs during pull rep.
- replication updates show up in Futon Status window


git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@751305 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/208131ad
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/208131ad
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/208131ad

Branch: refs/heads/import-master
Commit: 208131ada0c102c077a29ca224584d3309282e45
Parents: 39af2b6
Author: Adam Kocoloski <ko...@apache.org>
Authored: Sat Mar 7 18:48:47 2009 +0000
Committer: Adam Kocoloski <ko...@apache.org>
Committed: Sat Mar 7 18:48:47 2009 +0000

----------------------------------------------------------------------
 ibrowse.erl             |  19 ++-
 ibrowse_http_client.erl | 298 ++++++++++++++++++++++++++++---------------
 ibrowse_test.erl        | 109 +++++++++++++++-
 3 files changed, 311 insertions(+), 115 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/208131ad/ibrowse.erl
----------------------------------------------------------------------
diff --git a/ibrowse.erl b/ibrowse.erl
index 4e6404a..3390e58 100644
--- a/ibrowse.erl
+++ b/ibrowse.erl
@@ -192,6 +192,8 @@ send_req(Url, Headers, Method, Body) ->
 %% @spec send_req(Url::string(), Headers::headerList(), Method::method(), Body::body(), Options::optionList()) -> response()
 %% optionList() = [option()]
 %% option() = {max_sessions, integer()}        |
+%%          {response_format,response_format()}| 
+%%          {stream_chunk_size, integer()}     |
 %%          {max_pipeline_size, integer()}     |
 %%          {trace, boolean()}                 | 
 %%          {is_ssl, boolean()}                |
@@ -219,7 +221,7 @@ send_req(Url, Headers, Method, Body) ->
 %% ChunkSize = integer()
 %% srtf() = boolean() | filename()
 %% filename() = string()
-%% 
+%% response_format() = list | binary
 send_req(Url, Headers, Method, Body, Options) ->
     send_req(Url, Headers, Method, Body, Options, 30000).
 
@@ -230,7 +232,8 @@ send_req(Url, Headers, Method, Body, Options) ->
 send_req(Url, Headers, Method, Body, Options, Timeout) ->
     case catch parse_url(Url) of
 	#url{host = Host,
-	     port = Port} = Parsed_url ->
+	     port = Port,
+	     protocol = Protocol} = Parsed_url ->
 	    Lb_pid = case ets:lookup(ibrowse_lb, {Host, Port}) of
 			 [] ->
 			     get_lb_pid(Parsed_url);
@@ -241,9 +244,10 @@ send_req(Url, Headers, Method, Body, Options, Timeout) ->
 	    Max_pipeline_size = get_max_pipeline_size(Host, Port, Options),
 	    Options_1 = merge_options(Host, Port, Options),
 	    {SSLOptions, IsSSL} =
-		case get_value(is_ssl, Options_1, false) of
+		case (Protocol == https) orelse
+		     get_value(is_ssl, Options_1, false) of
 		    false -> {[], false};
-		    true -> {get_value(ssl_options, Options_1), true}
+		    true -> {get_value(ssl_options, Options_1, []), true}
 		end,
 	    case ibrowse_lb:spawn_connection(Lb_pid, Parsed_url,
 					     Max_sessions, 
@@ -316,6 +320,13 @@ do_send_req(Conn_Pid, Parsed_url, Headers, Method, Body, Options, Timeout) ->
 	    {error, req_timedout};
 	{'EXIT', Reason} ->
 	    {error, {'EXIT', Reason}};
+	{ok, St_code, Headers, Body} = Ret when is_binary(Body) ->
+	    case get_value(response_format, Options, list) of
+		list ->
+		    {ok, St_code, Headers, binary_to_list(Body)};
+		binary ->
+		    Ret
+	    end;
 	Ret ->
 	    Ret
     end.

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/208131ad/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 9a0e4d3..9455bc2 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -38,19 +38,23 @@
 
 -include("ibrowse.hrl").
 
--record(state, {host, port, 
+-record(state, {host, port,
 		use_proxy = false, proxy_auth_digest,
-		ssl_options = [], is_ssl = false, socket, 
-		reqs=queue:new(), cur_req, status=idle, http_status_code, 
-		reply_buffer=[], rep_buf_size=0, recvd_headers=[],
+		ssl_options = [], is_ssl = false, socket,
+		reqs=queue:new(), cur_req, status=idle, http_status_code,
+		reply_buffer=[], rep_buf_size=0, streamed_size = 0,
+		recvd_headers=[],
 		is_closing, send_timer, content_length,
-		deleted_crlf = false, transfer_encoding, chunk_size, 
-		chunks=[], lb_ets_tid, cur_pipeline_size = 0}).
+		deleted_crlf = false, transfer_encoding, chunk_size,
+		chunks=[], lb_ets_tid, cur_pipeline_size = 0
+	       }).
 
 -record(request, {url, method, options, from,
 		  stream_to, req_id,
-		  save_response_to_file = false,
-		  tmp_file_name, tmp_file_fd}).
+		  stream_chunk_size,
+		  save_response_to_file = false, 
+		  tmp_file_name, tmp_file_fd,
+		  response_format}).
 
 -import(ibrowse_lib, [
 		      parse_url/1,
@@ -60,6 +64,8 @@
 		      do_trace/2
 		     ]).
 
+-define(DEFAULT_STREAM_CHUNK_SIZE, 1024*1024).
+
 %%====================================================================
 %% External functions
 %%====================================================================
@@ -127,15 +133,16 @@ init({Host, Port}) ->
 %%--------------------------------------------------------------------
 %% Received a request when the remote server has already sent us a
 %% Connection: Close header
-handle_call({send_req, _}, 
+handle_call({send_req, _},
 	    _From,
 	    #state{is_closing=true}=State) ->
     {reply, {error, connection_closing}, State};
 
-handle_call({send_req, {Url, Headers, Method, Body, Options, Timeout}}, 
+handle_call({send_req, {Url, Headers, Method, Body, Options, Timeout}},
 	    From,
 	    #state{socket=undefined,
 		   host=Host, port=Port}=State) ->
+    Resp_format = get_value(response_format, Options, list),
     {Host_1, Port_1, State_1} =
 	case get_value(proxy_host, Options, false) of
 	    false ->
@@ -151,12 +158,14 @@ handle_call({send_req, {Url, Headers, Method, Body, Options, Timeout}},
     StreamTo = get_value(stream_to, Options, undefined),
     ReqId = make_req_id(),
     SaveResponseToFile = get_value(save_response_to_file, Options, false),
-    NewReq = #request{url=Url, 
+    NewReq = #request{url=Url,
 		      method=Method,
 		      stream_to=StreamTo,
-		      options=Options, 
+		      options=Options,
 		      req_id=ReqId,
 		      save_response_to_file = SaveResponseToFile,
+		      stream_chunk_size = get_stream_chunk_size(Options),
+		      response_format = Resp_format,
 		      from=From},
     Reqs = queue:in(NewReq, State#state.reqs),
     State_2 = check_ssl_options(Options, State_1#state{reqs = Reqs}),
@@ -208,15 +217,18 @@ handle_call({send_req, {Url, Headers, Method,
 	    From,
 	    #state{socket=Sock, status=Status, reqs=Reqs}=State) ->
     do_trace("Recvd request in connected state. Status -> ~p NumPending: ~p~n", [Status, length(queue:to_list(Reqs))]),
+    Resp_format = get_value(response_format, Options, list),
     StreamTo = get_value(stream_to, Options, undefined),
     SaveResponseToFile = get_value(save_response_to_file, Options, false),
     ReqId = make_req_id(),
-    NewReq = #request{url=Url, 
+    NewReq = #request{url=Url,
 		      stream_to=StreamTo,
 		      method=Method,
-		      options=Options, 
+		      options=Options,
 		      req_id=ReqId,
 		      save_response_to_file = SaveResponseToFile,
+		      stream_chunk_size = get_stream_chunk_size(Options),
+		      response_format = Resp_format,
 		      from=From},
     State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
     case send_req_1(Url, Headers, Method, Body, Options, Sock, State_1) of
@@ -359,14 +371,14 @@ handle_sock_data(Data, #state{status=get_header, socket=Sock}=State) ->
 
 handle_sock_data(Data, #state{status=get_body, content_length=CL,
 			      http_status_code = StatCode,
-			      recvd_headers=Headers, 
+			      recvd_headers=Headers,
 			      chunk_size=CSz, socket=Sock}=State) ->
     case (CL == undefined) and (CSz == undefined) of
 	true ->
 	    case accumulate_response(Data, State) of
 		{error, Reason} ->
 		    shutting_down(State),
-		    fail_pipelined_requests(State, 
+		    fail_pipelined_requests(State,
 					    {error, {Reason, {stat_code, StatCode}, Headers}}),
 		    {stop, normal, State};
 		State_1 ->
@@ -377,7 +389,7 @@ handle_sock_data(Data, #state{status=get_body, content_length=CL,
 	    case parse_11_response(Data, State) of
 		{error, Reason} ->
 		    shutting_down(State),
-		    fail_pipelined_requests(State, 
+		    fail_pipelined_requests(State,
 					    {error, {Reason, {stat_code, StatCode}, Headers}}),
 		    {stop, normal, State};
 		stop ->
@@ -433,14 +445,27 @@ accumulate_response(Data, #state{cur_req = #request{save_response_to_file = Save
 accumulate_response([], State) ->
     State;
 accumulate_response(Data, #state{reply_buffer = RepBuf,
+				 rep_buf_size = RepBufSize,
+				 streamed_size = Streamed_size,
 				 cur_req = CurReq}=State) ->
-    #request{stream_to=StreamTo, req_id=ReqId} = CurReq,
+    #request{stream_to=StreamTo, req_id=ReqId,
+	     stream_chunk_size = Stream_chunk_size,
+	     response_format = Response_format} = CurReq,
+    RepBuf_1 = [Data | RepBuf],
+    New_data_size = RepBufSize - Streamed_size,
     case StreamTo of
 	undefined ->
-	    State#state{reply_buffer = [Data | RepBuf]};
+	    State#state{reply_buffer = RepBuf_1};
+	_ when New_data_size < Stream_chunk_size ->
+	    State#state{reply_buffer = RepBuf_1};
 	_ ->
-	    do_interim_reply(StreamTo, ReqId, Data),
-	    State
+	    {Stream_chunk, Rem_data} = split_list_at(flatten(lists:reverse(RepBuf_1)), Stream_chunk_size),
+	    do_interim_reply(StreamTo, Response_format, ReqId, Stream_chunk),
+	    accumulate_response(
+	      Rem_data,
+	      State#state{
+		reply_buffer = [],
+		streamed_size = Streamed_size + Stream_chunk_size})
     end.
 
 make_tmp_filename() ->
@@ -463,7 +488,7 @@ handle_sock_closed(#state{status=get_header}=State) ->
 handle_sock_closed(#state{cur_req=undefined} = State) ->
     shutting_down(State);
 
-%% We check for IsClosing because this the server could have sent a 
+%% We check for IsClosing because this the server could have sent a
 %% Connection-Close header and has closed the socket to indicate end
 %% of response. There maybe requests pipelined which need a response.
 handle_sock_closed(#state{reply_buffer=Buf, reqs=Reqs, http_status_code=SC,
@@ -471,18 +496,18 @@ handle_sock_closed(#state{reply_buffer=Buf, reqs=Reqs, http_status_code=SC,
 			  cur_req=#request{tmp_file_name=TmpFilename,
 					   tmp_file_fd=Fd} = CurReq,
 			  status=get_body, recvd_headers=Headers}=State) ->
-    #request{from=From, stream_to=StreamTo, req_id=ReqId} = CurReq,
+    #request{from=From, stream_to=StreamTo, req_id=ReqId,
+	     response_format = Resp_format} = CurReq,
     case IsClosing of
 	true ->
 	    {_, Reqs_1} = queue:out(Reqs),
 	    case TmpFilename of
 		undefined ->
-		    do_reply(State, From, StreamTo, ReqId,
-			     {ok, SC, Headers,
-			      lists:flatten(lists:reverse(Buf))});
+		    do_reply(State, From, StreamTo, ReqId, Resp_format,
+			     {ok, SC, Headers, lists:reverse(Buf)});
 		_ ->
 		    file:close(Fd),
-		    do_reply(State, From, StreamTo, ReqId,
+		    do_reply(State, From, StreamTo, ReqId, Resp_format,
 			     {ok, SC, Headers, {file, TmpFilename}})
 	    end,
 	    do_error_reply(State#state{reqs = Reqs_1}, connection_closed),
@@ -493,9 +518,13 @@ handle_sock_closed(#state{reply_buffer=Buf, reqs=Reqs, http_status_code=SC,
     end.
 
 do_connect(Host, Port, _Options, #state{is_ssl=true, ssl_options=SSLOptions}, Timeout) ->
-    ssl:connect(Host, Port, [{nodelay, true}, {active, false} | SSLOptions], Timeout);
+    ssl:connect(Host, Port,
+		[{nodelay, true}, {active, false} | SSLOptions],
+		Timeout);
 do_connect(Host, Port, _Options, _State, Timeout) ->
-    gen_tcp:connect(Host, Port, [{nodelay, true}, {active, false}], Timeout).
+    gen_tcp:connect(Host, Port,
+		    [{nodelay, true}, {active, false}],
+		    Timeout).
 
 do_send(Sock, Req, true)  ->  ssl:send(Sock, Req);
 do_send(Sock, Req, false) ->  gen_tcp:send(Sock, Req).
@@ -542,7 +571,7 @@ check_ssl_options(Options, State) ->
 
 send_req_1(#url{abspath = AbsPath,
 		host = Host,
-		port = Port, 
+		port = Port,
 		path = RelPath} = Url,
 	   Headers, Method, Body, Options, Sock, State) ->
     Headers_1 = add_auth_headers(Url, Options, Headers, State),
@@ -555,10 +584,10 @@ send_req_1(#url{abspath = AbsPath,
 			  {value, {_, Host_h_val}} ->
 			      Host_h_val
 		      end,
-    {Req, Body_1} = make_request(Method, 
+    {Req, Body_1} = make_request(Method,
 				 [{"Host", HostHeaderValue} | Headers_1],
 				 AbsPath, RelPath, Body, Options, State#state.use_proxy),
-    case get(my_trace_flag) of 
+    case get(my_trace_flag) of
 	true ->
 	    %%Avoid the binary operations if trace is not on...
 	    NReq = binary_to_list(list_to_binary(Req)),
@@ -569,7 +598,7 @@ send_req_1(#url{abspath = AbsPath,
     end,
     SndRes = case do_send(Sock, Req, State#state.is_ssl) of
 		 ok -> do_send_body(Sock, Body_1, State#state.is_ssl);
-		 Err -> 
+		 Err ->
 		     io:format("Err: ~p~n", [Err]),
 		     Err
 	     end,
@@ -577,9 +606,9 @@ send_req_1(#url{abspath = AbsPath,
     SndRes.
 
 add_auth_headers(#url{username = User,
-		      password = UPw}, 
+		      password = UPw},
 		 Options,
-		 Headers, 
+		 Headers,
 		 #state{use_proxy = UseProxy,
 		        proxy_auth_digest = ProxyAuthDigest}) ->
     Headers_1 = case User of
@@ -601,7 +630,7 @@ add_auth_headers(#url{username = User,
 	true ->
 	    [{"Proxy-Authorization", ["Basic ", ProxyAuthDigest]} | Headers_1]
     end.
-			
+
 http_auth_digest([], []) ->
     [];
 http_auth_digest(Username, Password) ->
@@ -617,7 +646,7 @@ encode_base64([A,B,C|Ls]) ->
     encode_base64_do(A,B,C, Ls).
 encode_base64_do(A,B,C, Rest) ->
     BB = (A bsl 16) bor (B bsl 8) bor C,
-    [e(BB bsr 18), e((BB bsr 12) band 63), 
+    [e(BB bsr 18), e((BB bsr 12) band 63),
      e((BB bsr 6) band 63), e(BB band 63)|encode_base64(Rest)].
 
 e(X) when X >= 0, X < 26 -> X+65;
@@ -643,12 +672,12 @@ make_request(Method, Headers, AbsPath, RelPath, Body, Options, UseProxy) ->
 	    _ ->
 		Headers
 	end,
-    {Headers_2, Body_1} = 
+    {Headers_2, Body_1} =
 	case get_value(transfer_encoding, Options, false) of
 	    false ->
 		{Headers_1, Body};
 	    {chunked, ChunkSize} ->
-		{[{X, Y} || {X, Y} <- Headers_1, 
+		{[{X, Y} || {X, Y} <- Headers_1,
 			    X /= "Content-Length",
 			    X /= "content-length",
 			    X /= content_length] ++
@@ -659,7 +688,7 @@ make_request(Method, Headers, AbsPath, RelPath, Body, Options, UseProxy) ->
     Uri = case get_value(use_absolute_uri, Options, false) or UseProxy of
 	      true ->
 		  AbsPath;
-	      false -> 
+	      false ->
 		  RelPath
 	  end,
     {[method(Method), " ", Uri, " ", HttpVsn, crnl(), Headers_3, crnl()], Body_1}.
@@ -732,7 +761,7 @@ parse_response(_Data, #state{cur_req = undefined}=State) ->
 parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
 			    cur_req=CurReq}=State) ->
     #request{from=From, stream_to=StreamTo, req_id=ReqId,
-	     method=Method} = CurReq,
+	     method=Method, response_format = Resp_format} = CurReq,
     MaxHeaderSize = ibrowse:get_config_value(max_headers_size, infinity),
     case scan_header(Data, Acc) of
 	{yes, Headers, Data_1}  ->
@@ -749,7 +778,8 @@ parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
 		false ->
 		    ok
 	    end,
-	    State_1 = State#state{recvd_headers=Headers_1, status=get_body, 
+	    State_1 = State#state{recvd_headers=Headers_1, status=get_body,
+				  reply_buffer = [],
 				  http_status_code=StatCode, is_closing=IsClosing},
 	    put(conn_close, ConnClose),
 	    TransferEncoding = to_lower(get_value("transfer-encoding", LCHeaders, "false")),
@@ -757,7 +787,8 @@ parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
 		_ when Method == head ->
 		    {_, Reqs_1} = queue:out(Reqs),
 		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
-		    State_1_1 = do_reply(State_1, From, StreamTo, ReqId, {ok, StatCode, Headers_1, []}),
+		    State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
+					 {ok, StatCode, Headers_1, []}),
 		    cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
 		    State_2 = reset_state(State_1_1),
 		    State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
@@ -776,7 +807,8 @@ parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
 		    %% RFC2616 - Sec 4.4
 		    {_, Reqs_1} = queue:out(Reqs),
 		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
-		    State_1_1 = do_reply(State_1, From, StreamTo, ReqId, {ok, StatCode, Headers_1, []}),
+		    State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
+					 {ok, StatCode, Headers_1, []}),
 		    cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
 		    State_2 = reset_state(State_1_1),
 		    State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
@@ -788,7 +820,7 @@ parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
 								 chunk_size=chunk_start,
 								 reply_buffer=[], chunks=[]}) of
 			{error, Reason} ->
-			    fail_pipelined_requests(State_1, 
+			    fail_pipelined_requests(State_1,
 						    {error, {Reason,
 							     {stat_code, StatCode}, Headers_1}}),
 			    {error, Reason};
@@ -800,7 +832,7 @@ parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
 		    send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
 		    State_1#state{reply_buffer=[Data_1]};
 		undefined ->
-		    fail_pipelined_requests(State_1, 
+		    fail_pipelined_requests(State_1,
 					    {error, {content_length_undefined,
 						     {stat_code, StatCode}, Headers}}),
 		    {error, content_length_undefined};
@@ -814,7 +846,7 @@ parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
 						    content_length=V_1},
 			    case parse_11_response(Data_1, State_2) of
 				{error, Reason} ->
-				    fail_pipelined_requests(State_1, 
+				    fail_pipelined_requests(State_1,
 							    {error, {Reason,
 								     {stat_code, StatCode}, Headers_1}}),
 				    {error, Reason};
@@ -822,7 +854,7 @@ parse_response(Data, #state{reply_buffer=Acc, reqs=Reqs,
 				    State_3
 			    end;
 			_ ->
-			    fail_pipelined_requests(State_1, 
+			    fail_pipelined_requests(State_1,
 					    {error, {content_length_undefined,
 						     {stat_code, StatCode}, Headers}}),
 			    {error, content_length_undefined}
@@ -843,25 +875,28 @@ is_connection_closing("HTTP/1.0", "false") -> true;
 is_connection_closing(_, _)                -> false.
 
 %% This clause determines the chunk size when given data from the beginning of the chunk
-parse_11_response(DataRecvd, 
-		  #state{transfer_encoding=chunked,
+parse_11_response(DataRecvd,
+		  #state{transfer_encoding=chunked, 
 			 chunk_size=chunk_start,
 			 cur_req=CurReq,
-			 reply_buffer=Buf}=State) ->
+			 reply_buffer=Buf
+			}=State) ->
     case scan_crlf(DataRecvd, Buf) of
 	{yes, ChunkHeader, Data_1} ->
 	    case parse_chunk_header(ChunkHeader) of
 		{error, Reason} ->
 		    {error, Reason};
 		ChunkSize ->
-		    #request{stream_to=StreamTo, req_id=ReqId} = CurReq,
+		    #request{stream_to=StreamTo, req_id=ReqId,
+			     response_format = Response_format} = CurReq,
 		    %%
 		    %% Do we have to preserve the chunk encoding when streaming?
 		    %%
-		    do_interim_reply(StreamTo, ReqId, {chunk_start, ChunkSize}),
+		    do_interim_reply(StreamTo, Response_format,
+				     ReqId, {chunk_start, ChunkSize}),
 		    RemLen = length(Data_1),
 		    do_trace("Determined chunk size: ~p. Already recvd: ~p~n", [ChunkSize, RemLen]),
-		    parse_11_response(Data_1, State#state{rep_buf_size=0, 
+		    parse_11_response(Data_1, State#state{rep_buf_size=0,
 							  reply_buffer=[],
 							  deleted_crlf=true,
 							  chunk_size=ChunkSize})
@@ -871,29 +906,34 @@ parse_11_response(DataRecvd,
     end;
 
 %% This clause is there to remove the CRLF between two chunks
-%% 
-parse_11_response(DataRecvd, 
-		  #state{transfer_encoding=chunked,
+%%
+parse_11_response(DataRecvd,
+		  #state{transfer_encoding=chunked, 
 			 chunk_size=tbd,
 			 chunks = Chunks,
 			 cur_req=CurReq,
 			 reply_buffer=Buf}=State) ->
     case scan_crlf(DataRecvd, Buf) of
 	{yes, _, NextChunk} ->
-	    #request{stream_to=StreamTo, req_id=ReqId} = CurReq,
+	    #request{stream_to=StreamTo, req_id=ReqId,
+		     response_format = Response_format} = CurReq,
 	    %%
 	    %% Do we have to preserve the chunk encoding when streaming?
 	    %%
 	    State_1 = State#state{chunk_size=chunk_start,
-				  rep_buf_size=0, 
+				  rep_buf_size=0,
 				  reply_buffer=[],
 				  deleted_crlf=true},
 	    State_2 = case StreamTo of
 			  undefined ->
 			      State_1#state{chunks = [Buf | Chunks]};
-		_ ->
-			      do_interim_reply(StreamTo, ReqId, chunk_end),
-			      State_1
+			  _ ->
+			      %% Flush out all buffered data as chunk is ending
+			      do_interim_reply(StreamTo, Response_format, ReqId,
+					       lists:reverse([Buf | Chunks])),
+			      do_interim_reply(StreamTo, Response_format,
+					       ReqId, chunk_end),
+			      State_1#state{chunks = [], streamed_size = 0}
 		      end,
 	    parse_11_response(NextChunk, State_2);
 	{no, Data_1} ->
@@ -901,26 +941,27 @@ parse_11_response(DataRecvd,
     end;
 
 %% This clause deals with the end of a chunked transfer
-parse_11_response(DataRecvd, 
-		  #state{transfer_encoding=chunked, chunk_size=0,
+parse_11_response(DataRecvd,
+		  #state{transfer_encoding=chunked, chunk_size=0, 
 			 cur_req=CurReq,
 			 deleted_crlf = DelCrlf,
 			 reply_buffer=Trailer, reqs=Reqs}=State) ->
     do_trace("Detected end of chunked transfer...~n", []),
     DataRecvd_1 = case DelCrlf of
-		      false -> 
+		      false ->
 			  DataRecvd;
 		      true ->
 			  [$\r, $\n | DataRecvd]
 		  end,
-    #request{stream_to=StreamTo, req_id=ReqId} = CurReq,
+    #request{stream_to=StreamTo, req_id=ReqId,
+	     response_format = Response_format} = CurReq,
     case scan_header(DataRecvd_1, Trailer) of
 	{yes, _TEHeaders, Rem} ->
 	    {_, Reqs_1} = queue:out(Reqs),
 	    %%
-	    %% Do we have to preserve the chunk encoding when streaming?
+	    %% Do we have to preserve the chunk encoding when streaming? Nope.
 	    %%
-	    do_interim_reply(StreamTo, ReqId, chunk_end),
+	    do_interim_reply(StreamTo, Response_format, ReqId, chunk_end),
 	    State_1 = handle_response(CurReq, State#state{reqs=Reqs_1}),
 	    parse_response(Rem, reset_state(State_1));
 	{no, Rem} ->
@@ -928,7 +969,7 @@ parse_11_response(DataRecvd,
     end;
 
 %% This clause extracts a chunk, given the size.
-parse_11_response(DataRecvd, 
+parse_11_response(DataRecvd,
 		  #state{transfer_encoding=chunked, chunk_size=CSz,
 			 rep_buf_size=RepBufSz}=State) ->
     NeedBytes = CSz - RepBufSz,
@@ -952,12 +993,12 @@ parse_11_response(DataRecvd,
 		    parse_11_response(RemData, State_2)
 	    end;
 	false ->
-	    accumulate_response(DataRecvd, State#state{rep_buf_size=RepBufSz + DataLen})
+	    accumulate_response(DataRecvd, State#state{rep_buf_size=(RepBufSz + DataLen)})
     end;
 
 %% This clause to extract the body when Content-Length is specified
-parse_11_response(DataRecvd, 
-		  #state{content_length=CL, rep_buf_size=RepBufSz, 
+parse_11_response(DataRecvd,
+		  #state{content_length=CL, rep_buf_size=RepBufSz,
 			 reqs=Reqs}=State) ->
     NeedBytes = CL - RepBufSz,
     DataLen = length(DataRecvd),
@@ -970,11 +1011,12 @@ parse_11_response(DataRecvd,
 	    State_3 = reset_state(State_2),
 	    parse_response(Rem, State_3);
 	false ->
-	    accumulate_response(DataRecvd, State#state{rep_buf_size=RepBufSz+DataLen})
+	    accumulate_response(DataRecvd, State#state{rep_buf_size = (RepBufSz+DataLen)})
     end.
 
 handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
-			 save_response_to_file = SaveResponseToFile, 
+			 response_format = Resp_format,
+			 save_response_to_file = SaveResponseToFile,
 			 tmp_file_name = TmpFilename,
 			 tmp_file_fd = Fd
 			},
@@ -986,9 +1028,9 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
 		       recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
     Body = case TEnc of
 	       chunked ->
-		   lists:flatten(lists:reverse(Chunks));
+		   lists:reverse(Chunks);
 	       _ ->
-		   lists:flatten(lists:reverse(RepBuf))
+		   lists:reverse(RepBuf)
 	   end,
     State_1 = set_cur_request(State),
     file:close(Fd),
@@ -998,32 +1040,38 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
 		       _ ->
 			   {file, TmpFilename}
 		   end,
-    State_2 = do_reply(State_1, From, StreamTo, ReqId, {ok, SCode, RespHeaders, ResponseBody}),
+    State_2 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
+		       {ok, SCode, RespHeaders, ResponseBody}),
     cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
     State_2;
-handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId},
+handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
+			 response_format = Resp_format},
 		#state{http_status_code=SCode, recvd_headers=RespHeaders,
 		       reply_buffer=RepBuf, transfer_encoding=TEnc,
 		       chunks=Chunks, send_timer=ReqTimer}=State) ->
     Body = case TEnc of
 	       chunked ->
-		   lists:flatten(lists:reverse(Chunks));
+		   lists:reverse(Chunks);
 	       _ ->
-		   lists:flatten(lists:reverse(RepBuf))
+		   lists:reverse(RepBuf)
 	   end,
-    State_1 = set_cur_request(State),
-    case get(conn_close) of
+%%    State_1 = set_cur_request(State),
+    State_1 = case get(conn_close) of
 	"close" ->
-	    do_reply(State_1, From, StreamTo, ReqId, {ok, SCode, RespHeaders, Body}),
+	    do_reply(State, From, StreamTo, ReqId, Resp_format,
+		     {ok, SCode, RespHeaders, Body}),
 	    exit(normal);
 	_ ->
-	    State_2 = do_reply(State_1, From, StreamTo, ReqId, {ok, SCode, RespHeaders, Body}),
+	    State_1_1 = do_reply(State, From, StreamTo, ReqId, Resp_format,
+				 {ok, SCode, RespHeaders, Body}),
 	    cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
-	    State_2
-    end.
+	    State_1_1
+    end,
+    set_cur_request(State_1).
 
 reset_state(State) ->
-    State#state{status=get_header, rep_buf_size=0,content_length=undefined,
+    State#state{status=get_header, rep_buf_size=0, streamed_size = 0,
+		content_length=undefined,
 		reply_buffer=[], chunks=[], recvd_headers=[], deleted_crlf=false,
 		http_status_code=undefined, chunk_size=undefined, transfer_encoding=undefined}.
 
@@ -1063,18 +1111,18 @@ parse_headers_1(String) ->
     parse_headers_1(String, [], []).
 
 parse_headers_1([$\n, H |T], [$\r | L], Acc) when H == 32;
-						  H == $\t -> 
+						  H == $\t ->
     parse_headers_1(lists:dropwhile(fun(X) ->
 					    is_whitespace(X)
 				    end, T), [32 | L], Acc);
-parse_headers_1([$\n|T], [$\r | L], Acc) -> 
+parse_headers_1([$\n|T], [$\r | L], Acc) ->
     case parse_header(lists:reverse(L)) of
 	invalid ->
 	    parse_headers_1(T, [], Acc);
 	NewHeader ->
 	    parse_headers_1(T, [], [NewHeader | Acc])
     end;
-parse_headers_1([H|T],  L, Acc) -> 
+parse_headers_1([H|T],  L, Acc) ->
     parse_headers_1(T, [H|L], Acc);
 parse_headers_1([], [], Acc) ->
     lists:reverse(Acc);
@@ -1185,7 +1233,7 @@ parse_chunk_header([H | T], Acc) ->
 parse_chunk_header([], Acc) ->
     hexlist_to_integer(lists:reverse(Acc)).
 
-is_whitespace(32)  -> true;
+is_whitespace($\s)  -> true;
 is_whitespace($\r) -> true;
 is_whitespace($\n) -> true;
 is_whitespace($\t) -> true;
@@ -1197,36 +1245,62 @@ send_async_headers(_ReqId, undefined, _StatCode, _Headers) ->
 send_async_headers(ReqId, StreamTo, StatCode, Headers) ->
     catch StreamTo ! {ibrowse_async_headers, ReqId, StatCode, Headers}.
 
-do_reply(State, From, undefined, _, Msg) ->
+format_response_data(Resp_format, Body) ->
+    case Resp_format of
+	list when is_list(Body) ->
+	    flatten(Body);
+	binary when is_list(Body) ->
+	    list_to_binary(Body);
+	_ ->
+	    %% This is to cater for sending messages such as
+	    %% {chunk_start, _}, chunk_end etc
+	    Body
+    end.
+
+do_reply(State, From, undefined, _, Resp_format, {ok, St_code, Headers, Body}) ->
+    Msg_1 = {ok, St_code, Headers, format_response_data(Resp_format, Body)},
+    gen_server:reply(From, Msg_1),
+    dec_pipeline_counter(State);
+do_reply(State, From, undefined, _, _, Msg) ->
     gen_server:reply(From, Msg),
     dec_pipeline_counter(State);
-do_reply(State, _From, StreamTo, ReqId, {ok, _, _, _}) ->
+do_reply(State, _From, StreamTo, ReqId, Resp_format, {ok, _, _, Body}) ->
     State_1 = dec_pipeline_counter(State),
+    case Body of
+	[] ->
+	    ok;
+	_ ->
+	    Body_1 = format_response_data(Resp_format, Body),
+	    catch StreamTo ! {ibrowse_async_response, ReqId, Body_1}
+    end,
     catch StreamTo ! {ibrowse_async_response_end, ReqId},
     State_1;
-do_reply(State, _From, StreamTo, ReqId, Msg) ->
+do_reply(State, _From, StreamTo, ReqId, Resp_format, Msg) ->
     State_1 = dec_pipeline_counter(State),
-    catch StreamTo ! {ibrowse_async_response, ReqId, Msg},
+    Msg_1 = format_response_data(Resp_format, Msg),
+    catch StreamTo ! {ibrowse_async_response, ReqId, Msg_1},
     State_1.
 
-do_interim_reply(undefined, _ReqId, _Msg) ->
+do_interim_reply(undefined, _, _ReqId, _Msg) ->
     ok;
-do_interim_reply(StreamTo, ReqId, Msg) ->
-    catch StreamTo ! {ibrowse_async_response, ReqId, Msg}.
+do_interim_reply(StreamTo, Response_format, ReqId, Msg) ->
+    Msg_1 = format_response_data(Response_format, Msg),
+    catch StreamTo ! {ibrowse_async_response, ReqId, Msg_1}.
 
 do_error_reply(#state{reqs = Reqs} = State, Err) ->
     ReqList = queue:to_list(Reqs),
-    lists:foreach(fun(#request{from=From, stream_to=StreamTo, req_id=ReqId}) ->
-                          do_reply(State, From, StreamTo, ReqId, {error, Err})
+    lists:foreach(fun(#request{from=From, stream_to=StreamTo, req_id=ReqId,
+			       response_format = Resp_format}) ->
+                          do_reply(State, From, StreamTo, ReqId, Resp_format, {error, Err})
 		  end, ReqList).
 
 fail_pipelined_requests(#state{reqs = Reqs, cur_req = CurReq} = State, Reply) ->
     {_, Reqs_1} = queue:out(Reqs),
-    #request{from=From, stream_to=StreamTo, req_id=ReqId} = CurReq,
-    do_reply(State, From, StreamTo, ReqId, Reply),
+    #request{from=From, stream_to=StreamTo, req_id=ReqId,
+	     response_format = Resp_format} = CurReq,
+    do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
     do_error_reply(State#state{reqs = Reqs_1}, previous_request_failed).
 
-
 split_list_at(List, N) ->
     split_list_at(List, N, []).
 split_list_at([], _, Acc) ->
@@ -1271,7 +1345,7 @@ cancel_timer(Ref)       -> erlang:cancel_timer(Ref).
 
 cancel_timer(Ref, {eat_message, Msg}) ->
     cancel_timer(Ref),
-    receive 
+    receive
 	Msg ->
 	    ok
     after 0 ->
@@ -1310,3 +1384,19 @@ dec_pipeline_counter(#state{cur_pipeline_size = Pipe_sz,
     ets:delete(Tid, {Pipe_sz, self()}),
     ets:insert(Tid, {{Pipe_sz - 1, self()}, []}),
     State#state{cur_pipeline_size = Pipe_sz - 1}.
+
+flatten([H | _] = L) when is_integer(H) ->
+    L;
+flatten([H | _] = L) when is_list(H) ->
+    lists:flatten(L);
+flatten([]) ->
+    [].
+
+get_stream_chunk_size(Options) ->
+    case lists:keysearch(stream_chunk_size, 1, Options) of
+	{value, {_, V}} when V > 0 ->
+	    V;
+	_ ->
+	    ?DEFAULT_STREAM_CHUNK_SIZE
+    end.
+	

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/208131ad/ibrowse_test.erl
----------------------------------------------------------------------
diff --git a/ibrowse_test.erl b/ibrowse_test.erl
index b4429c9..de8865f 100644
--- a/ibrowse_test.erl
+++ b/ibrowse_test.erl
@@ -14,7 +14,10 @@
 	 drv_ue_test/0,
 	 drv_ue_test/1,
 	 ue_test/0,
-	 ue_test/1
+	 ue_test/1,
+	 verify_chunked_streaming/0,
+	 verify_chunked_streaming/1,
+	 i_do_async_req_list/4
 	]).
 
 -import(ibrowse_lib, [printable_date/0]).
@@ -88,7 +91,7 @@ do_wait() ->
 		    do_wait()
 	    end
     end.
-		     
+
 do_send_req(Url, NumReqs) ->
     do_send_req_1(Url, NumReqs).
 
@@ -149,7 +152,7 @@ dump_errors(Key, Iod) ->
 -define(TEST_LIST, [{"http://intranet/messenger", get},
 		    {"http://www.google.co.uk", get},
 		    {"http://www.google.com", get},
-		    {"http://www.google.com", options}, 
+		    {"http://www.google.com", options},
 		    {"http://www.sun.com", get},
 		    {"http://www.oracle.com", get},
 		    {"http://www.bbc.co.uk", get},
@@ -172,7 +175,8 @@ dump_errors(Key, Iod) ->
 		    {"http://jigsaw.w3.org/HTTP/400/toolong/", get},
 		    {"http://jigsaw.w3.org/HTTP/300/", get},
 		    {"http://jigsaw.w3.org/HTTP/Basic/", get, [{basic_auth, {"guest", "guest"}}]},
-		    {"http://jigsaw.w3.org/HTTP/CL/", get}
+		    {"http://jigsaw.w3.org/HTTP/CL/", get},
+		    {"http://www.httpwatch.com/httpgallery/chunked/", get}
 		   ]).
 
 unit_tests() ->
@@ -185,13 +189,104 @@ unit_tests(Options) ->
 			  execute_req(Url, Method, X_Opts ++ Options)
 		  end, ?TEST_LIST).
 
-execute_req(Url, Method) ->
-    execute_req(Url, Method, []).
+verify_chunked_streaming() ->
+    verify_chunked_streaming([]).
+
+verify_chunked_streaming(Options) ->
+    Url = "http://www.httpwatch.com/httpgallery/chunked/",
+    io:format("URL: ~s~n", [Url]),
+    io:format("Fetching data without streaming...~n", []),
+    Result_without_streaming = ibrowse:send_req(
+				 Url, [], get, [],
+				 [{response_format, binary} | Options]),
+    io:format("Fetching data with streaming as list...~n", []),
+    Async_response_list = do_async_req_list(
+			    Url, get, [{response_format, list}]),
+    io:format("Fetching data with streaming as binary...~n", []),
+    Async_response_bin = do_async_req_list(
+			   Url, get, [{response_format, binary}]),
+    compare_responses(Result_without_streaming, Async_response_list, Async_response_bin).
+
+compare_responses({ok, St_code, _, Body}, {ok, St_code, _, Body}, {ok, St_code, _, Body}) ->
+    success;
+compare_responses({ok, St_code, _, Body_1}, {ok, St_code, _, Body_2}, {ok, St_code, _, Body_3}) ->
+    case Body_1 of
+	Body_2 ->
+	    io:format("Body_1 and Body_2 match~n", []);
+	Body_3 ->
+	    io:format("Body_1 and Body_3 match~n", []);
+	_ when Body_2 == Body_3 ->
+	    io:format("Body_2 and Body_3 match~n", []);
+	_ ->
+	    io:format("All three bodies are different!~n", [])
+    end,
+    fail_bodies_mismatch;
+compare_responses(R1, R2, R3) ->
+    io:format("R1 -> ~p~n", [R1]),
+    io:format("R2 -> ~p~n", [R2]),
+    io:format("R3 -> ~p~n", [R3]),
+    fail.
+
+do_async_req_list(Url) ->
+    do_async_req_list(Url, get).
+
+do_async_req_list(Url, Method) ->
+    do_async_req_list(Url, Method, [{stream_to, self()},
+				    {stream_chunk_size, 1000}]).
+
+do_async_req_list(Url, Method, Options) ->
+    {Pid,_} = erlang:spawn_monitor(?MODULE, i_do_async_req_list,
+				   [self(), Url, Method, 
+				    Options ++ [{stream_chunk_size, 1000}]]),
+    io:format("Spawned process ~p~n", [Pid]),
+    wait_for_resp(Pid).
+
+wait_for_resp(Pid) ->
+    receive
+	{async_result, Pid, Res} ->
+	    Res;
+	{'DOWN', _, _, Pid, Reason} ->
+	    {'EXIT', Reason};
+	{'DOWN', _, _, _, _} ->
+	    wait_for_resp(Pid);
+	Msg ->
+	    io:format("Recvd unknown message: ~p~n", [Msg]),
+	    wait_for_resp(Pid)
+    after 10000 ->
+	  {error, timeout}
+    end.
+
+i_do_async_req_list(Parent, Url, Method, Options) ->
+    Res = ibrowse:send_req(Url, [], Method, [], [{stream_to, self()} | Options]),
+    case Res of
+	{ibrowse_req_id, Req_id} ->
+	    Result = wait_for_async_resp(Req_id, undefined, undefined, []),
+	    Parent ! {async_result, self(), Result};
+	Err ->
+	    Parent ! {async_result, self(), Err}
+    end.
+
+wait_for_async_resp(Req_id, Acc_Stat_code, Acc_Headers, Body) ->
+    receive
+	{ibrowse_async_headers, Req_id, StatCode, Headers} ->
+	    wait_for_async_resp(Req_id, StatCode, Headers, Body);
+	{ibrowse_async_response, Req_id, {chunk_start, _}} ->
+	    wait_for_async_resp(Req_id, Acc_Stat_code, Acc_Headers, Body);
+	{ibrowse_async_response, Req_id, chunk_end} ->
+	    wait_for_async_resp(Req_id, Acc_Stat_code, Acc_Headers, Body);
+	{ibrowse_async_response_end, Req_id} ->
+	    Body_1 = list_to_binary(lists:reverse(Body)),
+	    {ok, Acc_Stat_code, Acc_Headers, Body_1};
+	{ibrowse_async_response, Req_id, Data} ->
+	    wait_for_async_resp(Req_id, Acc_Stat_code, Acc_Headers, [Data | Body]);
+	Err ->
+	    {ok, Acc_Stat_code, Acc_Headers, Err}
+    end.
 
 execute_req(Url, Method, Options) ->
     io:format("~s, ~p: ", [Url, Method]),
     Result = (catch ibrowse:send_req(Url, [], Method, [], Options)),
-    case Result of 
+    case Result of
 	{ok, SCode, _H, _B} ->
 	    io:format("Status code: ~p~n", [SCode]);
 	Err ->


[25/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Upgrade ibrowse to version 2.1.1

This ibrowse release includes a few important fixes.
See https://github.com/cmullaparthi/ibrowse for the list of fixes.



git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@1050633 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/211fbf3a
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/211fbf3a
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/211fbf3a

Branch: refs/heads/import-master
Commit: 211fbf3a6e63e83af032805b0bc4cf9eb98edf06
Parents: 8f3735f
Author: Filipe David Borba Manana <fd...@apache.org>
Authored: Sat Dec 18 13:10:15 2010 +0000
Committer: Filipe David Borba Manana <fd...@apache.org>
Committed: Sat Dec 18 13:10:15 2010 +0000

----------------------------------------------------------------------
 Makefile.am             |  2 +-
 ibrowse.app.in          |  2 +-
 ibrowse.erl             |  2 +-
 ibrowse_http_client.erl | 45 ++++++++++++++++----------------------------
 ibrowse_lib.erl         | 19 -------------------
 5 files changed, 19 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/211fbf3a/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index 8c5d3f8..deddd5a 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -10,7 +10,7 @@
 ## License for the specific language governing permissions and limitations under
 ## the License.
 
-ibrowseebindir = $(localerlanglibdir)/ibrowse-2.1.0/ebin
+ibrowseebindir = $(localerlanglibdir)/ibrowse-2.1.1/ebin
 
 ibrowse_file_collection = \
 	ibrowse.app.in \

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/211fbf3a/ibrowse.app.in
----------------------------------------------------------------------
diff --git a/ibrowse.app.in b/ibrowse.app.in
index e8580d1..aee0f20 100644
--- a/ibrowse.app.in
+++ b/ibrowse.app.in
@@ -1,6 +1,6 @@
 {application, ibrowse,
         [{description, "HTTP client application"},
-         {vsn, "2.1.0"},
+         {vsn, "2.1.1"},
          {modules, [ ibrowse, 
 		     ibrowse_http_client, 
 		     ibrowse_app, 

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/211fbf3a/ibrowse.erl
----------------------------------------------------------------------
diff --git a/ibrowse.erl b/ibrowse.erl
index 1a42f4b..6e20cfb 100644
--- a/ibrowse.erl
+++ b/ibrowse.erl
@@ -7,7 +7,7 @@
 %%%-------------------------------------------------------------------
 %% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
 %% @copyright 2005-2010 Chandrashekhar Mullaparthi
-%% @version 2.1.0
+%% @version 2.1.1
 %% @doc The ibrowse application implements an HTTP 1.1 client in erlang. This
 %% module implements the API of the HTTP client. There is one named
 %% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/211fbf3a/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index 5ff323c..0135a49 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -69,7 +69,7 @@
                      ]).
 
 -define(DEFAULT_STREAM_CHUNK_SIZE, 1024*1024).
-
+-define(dec2hex(X), erlang:integer_to_list(X, 16)).
 %%====================================================================
 %% External functions
 %%====================================================================
@@ -197,7 +197,7 @@ handle_info({stream_close, _Req_id}, State) ->
     shutting_down(State),
     do_close(State),
     do_error_reply(State, closing_on_request),
-    {stop, normal, ok, State};
+    {stop, normal, State};
 
 handle_info({tcp_closed, _Sock}, State) ->    
     do_trace("TCP connection closed by peer!~n", []),
@@ -369,15 +369,6 @@ accumulate_response(Data, #state{cur_req = #request{save_response_to_file = Srtf
         {error, Reason} ->
             {error, {file_write_error, Reason}}
     end;
-%% accumulate_response(<<>>, #state{cur_req = #request{caller_controls_socket = Ccs},
-%%                                  socket = Socket} = State) ->
-%%     case Ccs of
-%%         true ->
-%%             do_setopts(Socket, [{active, once}], State);
-%%         false ->
-%%             ok
-%%     end,
-%%     State;
 accumulate_response(Data, #state{reply_buffer      = RepBuf,
                                  rep_buf_size      = RepBufSize,
                                  streamed_size     = Streamed_size,
@@ -544,7 +535,7 @@ do_send_body1(Source, Resp, State, TE) ->
 maybe_chunked_encode(Data, false) ->
     Data;
 maybe_chunked_encode(Data, true) ->
-    [ibrowse_lib:dec2hex(byte_size(to_binary(Data))), "\r\n", Data, "\r\n"].
+    [?dec2hex(size(to_binary(Data))), "\r\n", Data, "\r\n"].
 
 do_close(#state{socket = undefined})            ->  ok;
 do_close(#state{socket = Sock,
@@ -683,8 +674,7 @@ send_req_1(From,
                 path    = RelPath} = Url,
            Headers, Method, Body, Options, Timeout,
            #state{status    = Status,
-                  socket    = Socket,
-                  is_ssl    = Is_ssl} = State) ->
+                  socket    = Socket} = State) ->
     ReqId = make_req_id(),
     Resp_format = get_value(response_format, Options, list),
     Caller_socket_options = get_value(socket_options, Options, []),
@@ -723,7 +713,7 @@ send_req_1(From,
                                  Headers_1,
                                  AbsPath, RelPath, Body, Options, State_1),
     trace_request(Req),
-    do_setopts(Socket, Caller_socket_options, Is_ssl),
+    do_setopts(Socket, Caller_socket_options, State_1),
     TE = is_chunked_encoding_specified(Options),
     case do_send(Req, State_1) of
         ok ->
@@ -831,17 +821,14 @@ make_request(Method, Headers, AbsPath, RelPath, Body, Options,
     Headers_0 = [Fun1(X) || X <- Headers],
     Headers_1 =
         case lists:keysearch("content-length", 1, Headers_0) of
-            false when (Body == []) orelse
-                       (Body == <<>>) orelse
-                       is_tuple(Body) orelse
-                       is_function(Body) ->
-                Headers_0;
-            false when is_binary(Body) ->
-                [{"content-length", "content-length", integer_to_list(size(Body))} | Headers_0];
-            false when is_list(Body) ->
-                [{"content-length", "content-length", integer_to_list(length(Body))} | Headers_0];
+            false when (Body =:= [] orelse Body =:= <<>>) andalso
+                       (Method =:= post orelse Method =:= put) ->
+                [{"content-length", "Content-Length", "0"} | Headers_0];
+            false when is_binary(Body) orelse is_list(Body) ->
+                [{"content-length", "Content-Length", integer_to_list(iolist_size(Body))} | Headers_0];
             _ ->
-                %% Content-Length is already specified
+                %% Content-Length is already specified or Body is a
+                %% function or function/state pair
                 Headers_0
         end,
     {Headers_2, Body_1} =
@@ -927,23 +914,23 @@ chunk_request_body(Body, _ChunkSize, Acc) when Body == <<>>; Body == [] ->
 chunk_request_body(Body, ChunkSize, Acc) when is_binary(Body),
                                               size(Body) >= ChunkSize ->
     <<ChunkBody:ChunkSize/binary, Rest/binary>> = Body,
-    Chunk = [ibrowse_lib:dec2hex(ChunkSize),"\r\n",
+    Chunk = [?dec2hex(ChunkSize),"\r\n",
              ChunkBody, "\r\n"],
     chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
 chunk_request_body(Body, _ChunkSize, Acc) when is_binary(Body) ->
     BodySize = size(Body),
-    Chunk = [ibrowse_lib:dec2hex(BodySize),"\r\n",
+    Chunk = [?dec2hex(BodySize),"\r\n",
              Body, "\r\n"],
     LastChunk = "0\r\n",
     lists:reverse(["\r\n", LastChunk, Chunk | Acc]);
 chunk_request_body(Body, ChunkSize, Acc) when length(Body) >= ChunkSize ->
     {ChunkBody, Rest} = split_list_at(Body, ChunkSize),
-    Chunk = [ibrowse_lib:dec2hex(ChunkSize),"\r\n",
+    Chunk = [?dec2hex(ChunkSize),"\r\n",
              ChunkBody, "\r\n"],
     chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
 chunk_request_body(Body, _ChunkSize, Acc) when is_list(Body) ->
     BodySize = length(Body),
-    Chunk = [ibrowse_lib:dec2hex(BodySize),"\r\n",
+    Chunk = [?dec2hex(BodySize),"\r\n",
              Body, "\r\n"],
     LastChunk = "0\r\n",
     lists:reverse(["\r\n", LastChunk, Chunk | Acc]).

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/211fbf3a/ibrowse_lib.erl
----------------------------------------------------------------------
diff --git a/ibrowse_lib.erl b/ibrowse_lib.erl
index e913adb..696d0f6 100644
--- a/ibrowse_lib.erl
+++ b/ibrowse_lib.erl
@@ -19,9 +19,6 @@
          url_encode/1,
          decode_rfc822_date/1,
          status_code/1,
-         dec2hex/1,
-         drv_ue/1,
-         drv_ue/2,
          encode_base64/1,
          decode_base64/1,
          get_value/2,
@@ -33,17 +30,6 @@
 get_trace_status(Host, Port) ->
     ibrowse:get_config_value({trace, Host, Port}, false).
 
-drv_ue(Str) ->
-    [{port, Port}| _] = ets:lookup(ibrowse_table, port),
-    drv_ue(Str, Port).
-drv_ue(Str, Port) ->
-    case erlang:port_control(Port, 1, Str) of
-        [] ->
-            Str;
-        Res ->
-            Res
-    end.
-
 %% @doc URL-encodes a string based on RFC 1738. Returns a flat list.
 %% @spec url_encode(Str) -> UrlEncodedStr
 %% Str = string()
@@ -163,11 +149,6 @@ status_code(507) -> insufficient_storage;
 status_code(X) when is_list(X) -> status_code(list_to_integer(X));
 status_code(_)   -> unknown_status_code.
 
-%% @doc Returns a string with the hexadecimal representation of a given decimal.
-%% N = integer() -- the number to represent as hex
-%% @spec dec2hex(N::integer()) -> string()
-dec2hex(N) -> lists:flatten(io_lib:format("~.16B", [N])).
-
 %% @doc Implements the base64 encoding algorithm. The output data type matches in the input data type.
 %% @spec encode_base64(In) -> Out
 %% In = string() | binary()


[19/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Bumping ibrowse library to version 1.6.2 (latest).
It has a few important bug fixes and new features, such as, for example:

1) fixes https requests not going via the proxy;
2) added SSL support for direct connections;
3) fixes to URL parsing;
4) added option headers_as_is



git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@985730 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/b7fafdcc
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/b7fafdcc
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/b7fafdcc

Branch: refs/heads/import-master
Commit: b7fafdccc6ad27c3f97e8ebf759030778bf55542
Parents: 1c4324e
Author: Filipe David Borba Manana <fd...@apache.org>
Authored: Sun Aug 15 18:37:45 2010 +0000
Committer: Filipe David Borba Manana <fd...@apache.org>
Committed: Sun Aug 15 18:37:45 2010 +0000

----------------------------------------------------------------------
 ibrowse.app.in          |   10 +-
 ibrowse.erl             |  516 +++++++------
 ibrowse_app.erl         |    9 +-
 ibrowse_http_client.erl | 1686 +++++++++++++++++++++++-------------------
 ibrowse_lb.erl          |    8 +-
 ibrowse_lib.erl         |  292 ++++----
 ibrowse_sup.erl         |    6 +-
 ibrowse_test.erl        |    7 +-
 8 files changed, 1358 insertions(+), 1176 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/b7fafdcc/ibrowse.app.in
----------------------------------------------------------------------
diff --git a/ibrowse.app.in b/ibrowse.app.in
index 4f43dd9..208c311 100644
--- a/ibrowse.app.in
+++ b/ibrowse.app.in
@@ -1,10 +1,10 @@
 {application, ibrowse,
         [{description, "HTTP client application"},
-         {vsn, "1.5.1"},
-         {modules, [ ibrowse,
-		     ibrowse_http_client,
-		     ibrowse_app,
-		     ibrowse_sup,
+         {vsn, "1.6.2"},
+         {modules, [ ibrowse, 
+		     ibrowse_http_client, 
+		     ibrowse_app, 
+		     ibrowse_sup, 
 		     ibrowse_lib,
 		     ibrowse_lb ]},
          {registered, []},

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/b7fafdcc/ibrowse.erl
----------------------------------------------------------------------
diff --git a/ibrowse.erl b/ibrowse.erl
index 1913ef5..09d36a3 100644
--- a/ibrowse.erl
+++ b/ibrowse.erl
@@ -6,8 +6,8 @@
 %%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
 %%%-------------------------------------------------------------------
 %% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
-%% @copyright 2005-2009 Chandrashekhar Mullaparthi
-%% @version 1.5.2
+%% @copyright 2005-2010 Chandrashekhar Mullaparthi
+%% @version 1.6.0
 %% @doc The ibrowse application implements an HTTP 1.1 client. This
 %% module implements the API of the HTTP client. There is one named
 %% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is
@@ -21,22 +21,22 @@
 %% <p>Here are a few sample invocations.</p>
 %%
 %% <code>
-%% ibrowse:send_req("http://intranet/messenger/", [], get).
+%% ibrowse:send_req("http://intranet/messenger/", [], get). 
 %% <br/><br/>
-%%
-%% ibrowse:send_req("http://www.google.com/", [], get, [],
-%% 		 [{proxy_user, "XXXXX"},
-%% 		  {proxy_password, "XXXXX"},
-%% 		  {proxy_host, "proxy"},
-%% 		  {proxy_port, 8080}], 1000).
+%% 
+%% ibrowse:send_req("http://www.google.com/", [], get, [], 
+%%               [{proxy_user, "XXXXX"},
+%%                {proxy_password, "XXXXX"},
+%%                {proxy_host, "proxy"},
+%%                {proxy_port, 8080}], 1000). 
 %% <br/><br/>
 %%
 %%ibrowse:send_req("http://www.erlang.org/download/otp_src_R10B-3.tar.gz", [], get, [],
-%% 		 [{proxy_user, "XXXXX"},
-%% 		  {proxy_password, "XXXXX"},
-%% 		  {proxy_host, "proxy"},
-%% 		  {proxy_port, 8080},
-%% 		  {save_response_to_file, true}], 1000).
+%%               [{proxy_user, "XXXXX"},
+%%                {proxy_password, "XXXXX"},
+%%                {proxy_host, "proxy"},
+%%                {proxy_port, 8080},
+%%                {save_response_to_file, true}], 1000).
 %% <br/><br/>
 %%
 %% ibrowse:send_req("http://www.erlang.org", [], head).
@@ -48,17 +48,12 @@
 %% ibrowse:send_req("http://www.bbc.co.uk", [], trace).
 %%
 %% <br/><br/>
-%% ibrowse:send_req("http://www.google.com", [], get, [],
+%% ibrowse:send_req("http://www.google.com", [], get, [], 
 %%                   [{stream_to, self()}]).
 %% </code>
 %%
-%% <p>A driver exists which implements URL encoding in C, but the
-%% speed achieved using only erlang has been good enough, so the
-%% driver isn't actually used.</p>
 
 -module(ibrowse).
--vsn('$Id: ibrowse.erl,v 1.8 2009/07/01 22:43:19 chandrusf Exp $ ').
-
 -behaviour(gen_server).
 %%--------------------------------------------------------------------
 %% Include files
@@ -70,48 +65,50 @@
 
 %% gen_server callbacks
 -export([init/1, handle_call/3, handle_cast/2, handle_info/2,
-	 terminate/2, code_change/3]).
+         terminate/2, code_change/3]).
 
 %% API interface
 -export([
-	 rescan_config/0,
-	 rescan_config/1,
-	 get_config_value/1,
-	 get_config_value/2,
-	 spawn_worker_process/2,
-	 spawn_link_worker_process/2,
-	 stop_worker_process/1,
-	 send_req/3,
-	 send_req/4,
-	 send_req/5,
-	 send_req/6,
-	 send_req_direct/4,
-	 send_req_direct/5,
-	 send_req_direct/6,
-	 send_req_direct/7,
-	 stream_next/1,
-	 set_max_sessions/3,
-	 set_max_pipeline_size/3,
-	 set_dest/3,
-	 trace_on/0,
-	 trace_off/0,
-	 trace_on/2,
-	 trace_off/2,
-	 all_trace_off/0,
-	 show_dest_status/0,
-	 show_dest_status/2
-	]).
+         rescan_config/0,
+         rescan_config/1,
+         get_config_value/1,
+         get_config_value/2,
+         spawn_worker_process/1,
+         spawn_worker_process/2,
+         spawn_link_worker_process/1,
+         spawn_link_worker_process/2,
+         stop_worker_process/1,
+         send_req/3,
+         send_req/4,
+         send_req/5,
+         send_req/6,
+         send_req_direct/4,
+         send_req_direct/5,
+         send_req_direct/6,
+         send_req_direct/7,
+         stream_next/1,
+         set_max_sessions/3,
+         set_max_pipeline_size/3,
+         set_dest/3,
+         trace_on/0,
+         trace_off/0,
+         trace_on/2,
+         trace_off/2,
+         all_trace_off/0,
+         show_dest_status/0,
+         show_dest_status/2
+        ]).
 
 -ifdef(debug).
 -compile(export_all).
 -endif.
 
 -import(ibrowse_lib, [
-		      parse_url/1,
-		      get_value/3,
-		      do_trace/2
-		     ]).
-
+                      parse_url/1,
+                      get_value/3,
+                      do_trace/2
+                     ]).
+                      
 -record(state, {trace = false}).
 
 -include("ibrowse.hrl").
@@ -159,7 +156,7 @@ stop() ->
 send_req(Url, Headers, Method) ->
     send_req(Url, Headers, Method, [], []).
 
-%% @doc Same as send_req/3.
+%% @doc Same as send_req/3. 
 %% If a list is specified for the body it has to be a flat list. The body can also be a fun/0 or a fun/1. <br/>
 %% If fun/0, the connection handling process will repeatdely call the fun until it returns an error or eof. <pre>Fun() = {ok, Data} | eof</pre><br/>
 %% If fun/1, the connection handling process will repeatedly call the fun with the supplied state until it returns an error or eof. <pre>Fun(State) = {ok, Data} | {ok, Data, NewState} | eof</pre>
@@ -169,19 +166,19 @@ send_req(Url, Headers, Method) ->
 send_req(Url, Headers, Method, Body) ->
     send_req(Url, Headers, Method, Body, []).
 
-%% @doc Same as send_req/4.
+%% @doc Same as send_req/4. 
 %% For a description of SSL Options, look in the <a href="http://www.erlang.org/doc/apps/ssl/index.html">ssl</a> manpage. If the
 %% HTTP Version to use is not specified, the default is 1.1.
 %% <br/>
-%% <p>The <code>host_header</code> option is useful in the case where ibrowse is
+%% <ul>
+%% <li>The <code>host_header</code> option is useful in the case where ibrowse is
 %% connecting to a component such as <a
 %% href="http://www.stunnel.org">stunnel</a> which then sets up a
 %% secure connection to a webserver. In this case, the URL supplied to
 %% ibrowse must have the stunnel host/port details, but that won't
 %% make sense to the destination webserver. This option can then be
 %% used to specify what should go in the <code>Host</code> header in
-%% the request.</p>
-%% <ul>
+%% the request.</li>
 %% <li>The <code>stream_to</code> option can be used to have the HTTP
 %% response streamed to a process as messages as data arrives on the
 %% socket. If the calling process wishes to control the rate at which
@@ -220,12 +217,25 @@ send_req(Url, Headers, Method, Body) ->
 %% ibrowse:send_req("http://www.example.com/cgi-bin/request", [], get, [], [{connect_timeout, 100}], 1000).
 %% </code>
 %% In the above invocation, if the connection isn't established within
-%% 100 milliseconds, the request will fail with
+%% 100 milliseconds, the request will fail with 
 %% <code>{error, conn_failed}</code>.<br/>
 %% If connection setup succeeds, the total time allowed for the
 %% request to complete will be 1000 milliseconds minus the time taken
 %% for connection setup.
 %% </li>
+%% 
+%% <li> The <code>socket_options</code> option can be used to set
+%% specific options on the socket. The <code>{active, true | false | once}</code> 
+%% and <code>{packet_type, Packet_type}</code> will be filtered out by ibrowse.  </li>
+%%
+%% <li> The <code>headers_as_is</code> option is to enable the caller
+%% to send headers exactly as specified in the request without ibrowse
+%% adding some of its own. Required for some picky servers apparently.  </li>
+%%
+%% <li>The <code>give_raw_headers</code> option is to enable the
+%% caller to get access to the raw status line and raw unparsed
+%% headers. Not quite sure why someone would want this, but one of my
+%% users asked for it, so here it is. </li>
 %% </ul>
 %%
 %% @spec send_req(Url::string(), Headers::headerList(), Method::method(), Body::body(), Options::optionList()) -> response()
@@ -234,7 +244,7 @@ send_req(Url, Headers, Method, Body) ->
 %%          {response_format,response_format()}|
 %%          {stream_chunk_size, integer()}     |
 %%          {max_pipeline_size, integer()}     |
-%%          {trace, boolean()}                 |
+%%          {trace, boolean()}                 | 
 %%          {is_ssl, boolean()}                |
 %%          {ssl_options, [SSLOpt]}            |
 %%          {pool_name, atom()}                |
@@ -253,13 +263,18 @@ send_req(Url, Headers, Method, Body) ->
 %%          {host_header, string()}            |
 %%          {inactivity_timeout, integer()}    |
 %%          {connect_timeout, integer()}       |
-%%          {transfer_encoding, {chunked, ChunkSize}}
+%%          {socket_options, Sock_opts}        |
+%%          {transfer_encoding, {chunked, ChunkSize}} | 
+%%          {headers_as_is, boolean()}         |
+%%          {give_raw_headers, boolean()}
 %%
 %% stream_to() = process() | {process(), once}
 %% process() = pid() | atom()
 %% username() = string()
 %% password() = string()
 %% SSLOpt = term()
+%% Sock_opts = [Sock_opt]
+%% Sock_opt = term()
 %% ChunkSize = integer()
 %% srtf() = boolean() | filename()
 %% filename() = string()
@@ -267,54 +282,54 @@ send_req(Url, Headers, Method, Body) ->
 send_req(Url, Headers, Method, Body, Options) ->
     send_req(Url, Headers, Method, Body, Options, 30000).
 
-%% @doc Same as send_req/5.
+%% @doc Same as send_req/5. 
 %% All timeout values are in milliseconds.
 %% @spec send_req(Url, Headers::headerList(), Method::method(), Body::body(), Options::optionList(), Timeout) -> response()
 %% Timeout = integer() | infinity
 send_req(Url, Headers, Method, Body, Options, Timeout) ->
     case catch parse_url(Url) of
-	#url{host = Host,
-	     port = Port,
-	     protocol = Protocol} = Parsed_url ->
-	    Lb_pid = case ets:lookup(ibrowse_lb, {Host, Port}) of
-			 [] ->
-			     get_lb_pid(Parsed_url);
-			 [#lb_pid{pid = Lb_pid_1}] ->
-			     Lb_pid_1
-		     end,
-	    Max_sessions = get_max_sessions(Host, Port, Options),
-	    Max_pipeline_size = get_max_pipeline_size(Host, Port, Options),
-	    Options_1 = merge_options(Host, Port, Options),
-	    {SSLOptions, IsSSL} =
-		case (Protocol == https) orelse
-		     get_value(is_ssl, Options_1, false) of
-		    false -> {[], false};
-		    true -> {get_value(ssl_options, Options_1, []), true}
-		end,
-	    case ibrowse_lb:spawn_connection(Lb_pid, Parsed_url,
-					     Max_sessions,
-					     Max_pipeline_size,
-					     {SSLOptions, IsSSL}) of
-		{ok, Conn_Pid} ->
-		    do_send_req(Conn_Pid, Parsed_url, Headers,
-				Method, Body, Options_1, Timeout);
-		Err ->
-		    Err
-	    end;
-	Err ->
-	    {error, {url_parsing_failed, Err}}
+        #url{host = Host,
+             port = Port,
+             protocol = Protocol} = Parsed_url ->
+            Lb_pid = case ets:lookup(ibrowse_lb, {Host, Port}) of
+                         [] ->
+                             get_lb_pid(Parsed_url);
+                         [#lb_pid{pid = Lb_pid_1}] ->
+                             Lb_pid_1
+                     end,
+            Max_sessions = get_max_sessions(Host, Port, Options),
+            Max_pipeline_size = get_max_pipeline_size(Host, Port, Options),
+            Options_1 = merge_options(Host, Port, Options),
+            {SSLOptions, IsSSL} =
+                case (Protocol == https) orelse
+                     get_value(is_ssl, Options_1, false) of
+                    false -> {[], false};
+                    true -> {get_value(ssl_options, Options_1, []), true}
+                end,
+            case ibrowse_lb:spawn_connection(Lb_pid, Parsed_url,
+                                             Max_sessions, 
+                                             Max_pipeline_size,
+                                             {SSLOptions, IsSSL}) of
+                {ok, Conn_Pid} ->
+                    do_send_req(Conn_Pid, Parsed_url, Headers,
+                                Method, Body, Options_1, Timeout);
+                Err ->
+                    Err
+            end;
+        Err ->
+            {error, {url_parsing_failed, Err}}
     end.
 
 merge_options(Host, Port, Options) ->
     Config_options = get_config_value({options, Host, Port}, []),
     lists:foldl(
       fun({Key, Val}, Acc) ->
-			case lists:keysearch(Key, 1, Options) of
-			    false ->
-				[{Key, Val} | Acc];
-			    _ ->
-				Acc
-			end
+                        case lists:keysearch(Key, 1, Options) of
+                            false ->
+                                [{Key, Val} | Acc];
+                            _ ->
+                                Acc
+                        end
       end, Options, Config_options).
 
 get_lb_pid(Url) ->
@@ -322,11 +337,11 @@ get_lb_pid(Url) ->
 
 get_max_sessions(Host, Port, Options) ->
     get_value(max_sessions, Options,
-	      get_config_value({max_sessions, Host, Port}, ?DEF_MAX_SESSIONS)).
+              get_config_value({max_sessions, Host, Port}, ?DEF_MAX_SESSIONS)).
 
 get_max_pipeline_size(Host, Port, Options) ->
     get_value(max_pipeline_size, Options,
-	      get_config_value({max_pipeline_size, Host, Port}, ?DEF_MAX_PIPELINE_SIZE)).
+              get_config_value({max_pipeline_size, Host, Port}, ?DEF_MAX_PIPELINE_SIZE)).
 
 %% @doc Deprecated. Use set_max_sessions/3 and set_max_pipeline_size/3
 %% for achieving the same effect.
@@ -343,7 +358,7 @@ set_dest(_Host, _Port, [H | _]) ->
     exit({invalid_option, H});
 set_dest(_, _, []) ->
     ok.
-
+    
 %% @doc Set the maximum number of connections allowed to a specific Host:Port.
 %% @spec set_max_sessions(Host::string(), Port::integer(), Max::integer()) -> ok
 set_max_sessions(Host, Port, Max) when is_integer(Max), Max > 0 ->
@@ -356,21 +371,21 @@ set_max_pipeline_size(Host, Port, Max) when is_integer(Max), Max > 0 ->
 
 do_send_req(Conn_Pid, Parsed_url, Headers, Method, Body, Options, Timeout) ->
     case catch ibrowse_http_client:send_req(Conn_Pid, Parsed_url,
-					    Headers, Method, ensure_bin(Body),
-					    Options, Timeout) of
-	{'EXIT', {timeout, _}} ->
-	    {error, req_timedout};
-	{'EXIT', Reason} ->
-	    {error, {'EXIT', Reason}};
-	{ok, St_code, Headers, Body} = Ret when is_binary(Body) ->
-	    case get_value(response_format, Options, list) of
-		list ->
-		    {ok, St_code, Headers, binary_to_list(Body)};
-		binary ->
-		    Ret
-	    end;
-	Ret ->
-	    Ret
+                                            Headers, Method, ensure_bin(Body),
+                                            Options, Timeout) of
+        {'EXIT', {timeout, _}} ->
+            {error, req_timedout};
+        {'EXIT', Reason} ->
+            {error, {'EXIT', Reason}};
+        {ok, St_code, Headers, Body} = Ret when is_binary(Body) ->
+            case get_value(response_format, Options, list) of
+                list ->
+                    {ok, St_code, Headers, binary_to_list(Body)};
+                binary ->
+                    Ret
+            end;
+        Ret ->
+            Ret
     end.
 
 ensure_bin(L) when is_list(L)                     -> list_to_binary(L);
@@ -391,12 +406,21 @@ ensure_bin({Fun, _} = Body) when is_function(Fun) -> Body.
 %% <b>Note:</b> It is the responsibility of the calling process to control
 %% pipeline size on such connections.
 %%
+%% @spec spawn_worker_process(Url::string()) -> {ok, pid()}
+spawn_worker_process(Url) ->
+    ibrowse_http_client:start(Url).
+
 %% @spec spawn_worker_process(Host::string(), Port::integer()) -> {ok, pid()}
 spawn_worker_process(Host, Port) ->
     ibrowse_http_client:start({Host, Port}).
 
-%% @doc Same as spawn_worker_process/2 except the the calling process
+%% @doc Same as spawn_worker_process/1 except the the calling process
 %% is linked to the worker process which is spawned.
+%% @spec spawn_link_worker_process(Url::string()) -> {ok, pid()}
+spawn_link_worker_process(Url) ->
+    ibrowse_http_client:start_link(Url).
+
+%% @spec spawn_link_worker_process(Host::string(), Port::integer()) -> {ok, pid()}
 spawn_link_worker_process(Host, Port) ->
     ibrowse_http_client:start_link({Host, Port}).
 
@@ -426,30 +450,30 @@ send_req_direct(Conn_pid, Url, Headers, Method, Body, Options) ->
 %% returned by spawn_worker_process/2 or spawn_link_worker_process/2
 send_req_direct(Conn_pid, Url, Headers, Method, Body, Options, Timeout) ->
     case catch parse_url(Url) of
-	#url{host = Host,
-	     port = Port} = Parsed_url ->
-	    Options_1 = merge_options(Host, Port, Options),
-	    case do_send_req(Conn_pid, Parsed_url, Headers, Method, Body, Options_1, Timeout) of
-		{error, {'EXIT', {noproc, _}}} ->
-		    {error, worker_is_dead};
-		Ret ->
-		    Ret
-	    end;
-	Err ->
-	    {error, {url_parsing_failed, Err}}
+        #url{host = Host,
+             port = Port} = Parsed_url ->
+            Options_1 = merge_options(Host, Port, Options),
+            case do_send_req(Conn_pid, Parsed_url, Headers, Method, Body, Options_1, Timeout) of
+                {error, {'EXIT', {noproc, _}}} ->
+                    {error, worker_is_dead};
+                Ret ->
+                    Ret
+            end;
+        Err ->
+            {error, {url_parsing_failed, Err}}
     end.
 
 %% @doc Tell ibrowse to stream the next chunk of data to the
 %% caller. Should be used in conjunction with the
 %% <code>stream_to</code> option
 %% @spec stream_next(Req_id :: req_id()) -> ok | {error, unknown_req_id}
-stream_next(Req_id) ->
+stream_next(Req_id) ->    
     case ets:lookup(ibrowse_stream, {req_id_pid, Req_id}) of
-	[] ->
-	    {error, unknown_req_id};
-	[{_, Pid}] ->
-	    catch Pid ! {stream_next, Req_id},
-	    ok
+        [] ->
+            {error, unknown_req_id};
+        [{_, Pid}] ->
+            catch Pid ! {stream_next, Req_id},
+            ok
     end.
 
 %% @doc Turn tracing on for the ibrowse process
@@ -462,7 +486,7 @@ trace_off() ->
 %% @doc Turn tracing on for all connections to the specified HTTP
 %% server. Host is whatever is specified as the domain name in the URL
 %% @spec trace_on(Host, Port) -> ok
-%% Host = string()
+%% Host = string() 
 %% Port = integer()
 trace_on(Host, Port) ->
     ibrowse ! {trace, true, Host, Port},
@@ -483,75 +507,75 @@ all_trace_off() ->
 
 show_dest_status() ->
     Dests = lists:filter(fun({lb_pid, {Host, Port}, _}) when is_list(Host),
-							     is_integer(Port) ->
-				 true;
-			    (_) ->
-				 false
-			 end, ets:tab2list(ibrowse_lb)),
+                                                             is_integer(Port) ->
+                                 true;
+                            (_) ->
+                                 false
+                         end, ets:tab2list(ibrowse_lb)),
     All_ets = ets:all(),
     io:format("~-40.40s | ~-5.5s | ~-10.10s | ~s~n",
-	      ["Server:port", "ETS", "Num conns", "LB Pid"]),
+              ["Server:port", "ETS", "Num conns", "LB Pid"]),
     io:format("~80.80.=s~n", [""]),
     lists:foreach(fun({lb_pid, {Host, Port}, Lb_pid}) ->
-			  case lists:dropwhile(
-				 fun(Tid) ->
-					 ets:info(Tid, owner) /= Lb_pid
-				 end, All_ets) of
-			      [] ->
-				  io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
-					    [Host ++ ":" ++ integer_to_list(Port),
-					     "",
-					     "",
-					     io_lib:format("~p", [Lb_pid])]
-					   );
-			      [Tid | _] ->
-				  catch (
-				    begin
-					Size = ets:info(Tid, size),
-					io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
-						  [Host ++ ":" ++ integer_to_list(Port),
-						   integer_to_list(Tid),
-						   integer_to_list(Size),
-						   io_lib:format("~p", [Lb_pid])]
-						 )
-				    end
-				   )
-				  end
-		  end, Dests).
-
+                          case lists:dropwhile(
+                                 fun(Tid) ->
+                                         ets:info(Tid, owner) /= Lb_pid
+                                 end, All_ets) of
+                              [] ->
+                                  io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
+                                            [Host ++ ":" ++ integer_to_list(Port),
+                                             "",
+                                             "",
+                                             io_lib:format("~p", [Lb_pid])]
+                                           );
+                              [Tid | _] ->
+                                  catch (
+                                    begin
+                                        Size = ets:info(Tid, size),
+                                        io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
+                                                  [Host ++ ":" ++ integer_to_list(Port),
+                                                   io_lib:format("~p", [Tid]),
+                                                   integer_to_list(Size),
+                                                   io_lib:format("~p", [Lb_pid])]
+                                                 )
+                                    end
+                                   )
+                                  end
+                  end, Dests).
+                                          
 %% @doc Shows some internal information about load balancing to a
 %% specified Host:Port. Info about workers spawned using
 %% spawn_worker_process/2 or spawn_link_worker_process/2 is not
 %% included.
 show_dest_status(Host, Port) ->
     case ets:lookup(ibrowse_lb, {Host, Port}) of
-	[] ->
-	    no_active_processes;
-	[#lb_pid{pid = Lb_pid}] ->
-	    io:format("Load Balancer Pid     : ~p~n", [Lb_pid]),
-	    io:format("LB process msg q size : ~p~n", [(catch process_info(Lb_pid, message_queue_len))]),
-	    case lists:dropwhile(
-		   fun(Tid) ->
-			   ets:info(Tid, owner) /= Lb_pid
-		   end, ets:all()) of
-		[] ->
-		    io:format("Couldn't locate ETS table for ~p~n", [Lb_pid]);
-		[Tid | _] ->
-		    First = ets:first(Tid),
-		    Last = ets:last(Tid),
-		    Size = ets:info(Tid, size),
-		    io:format("LB ETS table id       : ~p~n", [Tid]),
-		    io:format("Num Connections       : ~p~n", [Size]),
-		    case Size of
-			0 ->
-			    ok;
-			_ ->
-			    {First_p_sz, _} = First,
-			    {Last_p_sz, _} = Last,
-			    io:format("Smallest pipeline     : ~1000.p~n", [First_p_sz]),
-			    io:format("Largest pipeline      : ~1000.p~n", [Last_p_sz])
-		    end
-	    end
+        [] ->
+            no_active_processes;
+        [#lb_pid{pid = Lb_pid}] ->
+            io:format("Load Balancer Pid     : ~p~n", [Lb_pid]),
+            io:format("LB process msg q size : ~p~n", [(catch process_info(Lb_pid, message_queue_len))]),
+            case lists:dropwhile(
+                   fun(Tid) ->
+                           ets:info(Tid, owner) /= Lb_pid
+                   end, ets:all()) of
+                [] ->
+                    io:format("Couldn't locate ETS table for ~p~n", [Lb_pid]);
+                [Tid | _] ->
+                    First = ets:first(Tid),
+                    Last = ets:last(Tid),
+                    Size = ets:info(Tid, size),
+                    io:format("LB ETS table id       : ~p~n", [Tid]),
+                    io:format("Num Connections       : ~p~n", [Size]),
+                    case Size of
+                        0 ->
+                            ok;
+                        _ ->
+                            {First_p_sz, _} = First,
+                            {Last_p_sz, _} = Last,
+                            io:format("Smallest pipeline     : ~1000.p~n", [First_p_sz]),
+                            io:format("Largest pipeline      : ~1000.p~n", [Last_p_sz])
+                    end
+            end
     end.
 
 %% @doc Clear current configuration for ibrowse and load from the file
@@ -592,40 +616,40 @@ init(_) ->
 
 import_config() ->
     case code:priv_dir(ibrowse) of
-	{error, _} = Err ->
-	    Err;
-	PrivDir ->
-	    Filename = filename:join(PrivDir, "ibrowse.conf"),
-	    import_config(Filename)
+        {error, _} = Err ->
+            Err;
+        PrivDir ->
+            Filename = filename:join(PrivDir, "ibrowse.conf"),
+            import_config(Filename)
     end.
 
 import_config(Filename) ->
     case file:consult(Filename) of
-	{ok, Terms} ->
-	    ets:delete_all_objects(ibrowse_conf),
-	    Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options})
-		     when is_list(Host), is_integer(Port),
-		          is_integer(MaxSess), MaxSess > 0,
-		          is_integer(MaxPipe), MaxPipe > 0, is_list(Options) ->
-			  I = [{{max_sessions, Host, Port}, MaxSess},
-			       {{max_pipeline_size, Host, Port}, MaxPipe},
-			       {{options, Host, Port}, Options}],
-			  lists:foreach(
-			    fun({X, Y}) ->
-				    ets:insert(ibrowse_conf,
-					       #ibrowse_conf{key = X,
-							     value = Y})
-			    end, I);
-		     ({K, V}) ->
-			  ets:insert(ibrowse_conf,
-				     #ibrowse_conf{key = K,
-						   value = V});
-		     (X) ->
-			  io:format("Skipping unrecognised term: ~p~n", [X])
-		  end,
-	    lists:foreach(Fun, Terms);
-	Err ->
-	    Err
+        {ok, Terms} ->
+            ets:delete_all_objects(ibrowse_conf),
+            Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options}) 
+                     when is_list(Host), is_integer(Port),
+                          is_integer(MaxSess), MaxSess > 0,
+                          is_integer(MaxPipe), MaxPipe > 0, is_list(Options) ->
+                          I = [{{max_sessions, Host, Port}, MaxSess},
+                               {{max_pipeline_size, Host, Port}, MaxPipe},
+                               {{options, Host, Port}, Options}],
+                          lists:foreach(
+                            fun({X, Y}) ->
+                                    ets:insert(ibrowse_conf,
+                                               #ibrowse_conf{key = X, 
+                                                             value = Y})
+                            end, I);
+                     ({K, V}) ->
+                          ets:insert(ibrowse_conf,
+                                     #ibrowse_conf{key = K,
+                                                   value = V});
+                     (X) ->
+                          io:format("Skipping unrecognised term: ~p~n", [X])
+                  end,
+            lists:foreach(Fun, Terms);
+        Err ->
+            Err
     end.
 
 %% @doc Internal export
@@ -636,10 +660,10 @@ get_config_value(Key) ->
 %% @doc Internal export
 get_config_value(Key, DefVal) ->
     case ets:lookup(ibrowse_conf, Key) of
-	[] ->
-	    DefVal;
-	[#ibrowse_conf{value = V}] ->
-	    V
+        [] ->
+            DefVal;
+        [#ibrowse_conf{value = V}] ->
+            V
     end.
 
 set_config_value(Key, Val) ->
@@ -700,36 +724,36 @@ handle_info(all_trace_off, State) ->
     Mspec = [{{ibrowse_conf,{trace,'$1','$2'},true},[],[{{'$1','$2'}}]}],
     Trace_on_dests = ets:select(ibrowse_conf, Mspec),
     Fun = fun(#lb_pid{host_port = {H, P}, pid = Pid}, _) ->
-		  case lists:member({H, P}, Trace_on_dests) of
-		      false ->
-			  ok;
-		      true ->
-			  catch Pid ! {trace, false}
-		  end;
-	     (_, Acc) ->
-		  Acc
-	  end,
+                  case lists:member({H, P}, Trace_on_dests) of
+                      false ->
+                          ok;
+                      true ->
+                          catch Pid ! {trace, false}
+                  end;
+             (_, Acc) ->
+                  Acc
+          end,
     ets:foldl(Fun, undefined, ibrowse_lb),
     ets:select_delete(ibrowse_conf, [{{ibrowse_conf,{trace,'$1','$2'},true},[],['true']}]),
     {noreply, State};
-
+                                  
 handle_info({trace, Bool}, State) ->
     put(my_trace_flag, Bool),
     {noreply, State};
 
 handle_info({trace, Bool, Host, Port}, State) ->
     Fun = fun(#lb_pid{host_port = {H, P}, pid = Pid}, _)
-	     when H == Host,
-		  P == Port ->
-		  catch Pid ! {trace, Bool};
-	     (_, Acc) ->
-		  Acc
-	  end,
+             when H == Host,
+                  P == Port ->
+                  catch Pid ! {trace, Bool};
+             (_, Acc) ->
+                  Acc
+          end,
     ets:foldl(Fun, undefined, ibrowse_lb),
     ets:insert(ibrowse_conf, #ibrowse_conf{key = {trace, Host, Port},
-					   value = Bool}),
+                                           value = Bool}),
     {noreply, State};
-
+                     
 handle_info(_Info, State) ->
     {noreply, State}.
 

http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/b7fafdcc/ibrowse_app.erl
----------------------------------------------------------------------
diff --git a/ibrowse_app.erl b/ibrowse_app.erl
index 8c83e8f..d3a0f7b 100644
--- a/ibrowse_app.erl
+++ b/ibrowse_app.erl
@@ -1,12 +1,11 @@
 %%%-------------------------------------------------------------------
 %%% File    : ibrowse_app.erl
 %%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%% Description :
+%%% Description : 
 %%%
 %%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
 %%%-------------------------------------------------------------------
 -module(ibrowse_app).
--vsn('$Id: ibrowse_app.erl,v 1.1 2005/05/05 22:28:28 chandrusf Exp $ ').
 
 -behaviour(application).
 %%--------------------------------------------------------------------
@@ -42,11 +41,11 @@
 %% Func: start/2
 %% Returns: {ok, Pid}        |
 %%          {ok, Pid, State} |
-%%          {error, Reason}
+%%          {error, Reason}   
 %%--------------------------------------------------------------------
 start(_Type, _StartArgs) ->
     case ibrowse_sup:start_link() of
-	{ok, Pid} ->
+	{ok, Pid} -> 
 	    {ok, Pid};
 	Error ->
 	    Error
@@ -54,7 +53,7 @@ start(_Type, _StartArgs) ->
 
 %%--------------------------------------------------------------------
 %% Func: stop/1
-%% Returns: any
+%% Returns: any 
 %%--------------------------------------------------------------------
 stop(_State) ->
     ok.


[31/33] ibrowse commit: updated refs/heads/import-master to 1167b0e

Posted by da...@apache.org.
Upgrade get_crlf funs to use binary BIF


Project: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/commit/6a89bd65
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/tree/6a89bd65
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/diff/6a89bd65

Branch: refs/heads/import-master
Commit: 6a89bd65be3dc575d0b1a165778823fe908345e1
Parents: 64f1b8c
Author: Bob Dionne <bi...@apache.org>
Authored: Tue Oct 23 20:01:32 2012 -0400
Committer: Bob Dionne <bo...@cloudant.com>
Committed: Tue Oct 23 20:01:32 2012 -0400

----------------------------------------------------------------------
 ibrowse_http_client.erl | 24 ++++++++++++++----------
 1 file changed, 14 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-ibrowse/blob/6a89bd65/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/ibrowse_http_client.erl b/ibrowse_http_client.erl
index fd91d40..00e8ed3 100644
--- a/ibrowse_http_client.erl
+++ b/ibrowse_http_client.erl
@@ -1458,7 +1458,7 @@ parse_header([], _) ->
     invalid.
 
 scan_header(Bin) ->
-    case get_crlf_crlf_pos(Bin, 0) of
+    case get_crlf_crlf_pos(Bin) of
         {yes, Pos} ->
             {Headers, <<_:4/binary, Body/binary>>} = split_binary(Bin, Pos),
             {yes, Headers, Body};
@@ -1474,7 +1474,7 @@ scan_header(Bin1, Bin2) ->
     Bin1_already_scanned_size = size(Bin1) - 4,
     <<Headers_prefix:Bin1_already_scanned_size/binary, Rest/binary>> = Bin1,
     Bin_to_scan = <<Rest/binary, Bin2/binary>>,
-    case get_crlf_crlf_pos(Bin_to_scan, 0) of
+    case get_crlf_crlf_pos(Bin_to_scan) of
         {yes, Pos} ->
             {Headers_suffix, <<_:4/binary, Body/binary>>} = split_binary(Bin_to_scan, Pos),
             {yes, <<Headers_prefix/binary, Headers_suffix/binary>>, Body};
@@ -1482,9 +1482,16 @@ scan_header(Bin1, Bin2) ->
             {no, <<Bin1/binary, Bin2/binary>>}
     end.
 
-get_crlf_crlf_pos(<<$\r, $\n, $\r, $\n, _/binary>>, Pos) -> {yes, Pos};
-get_crlf_crlf_pos(<<_, Rest/binary>>, Pos)               -> get_crlf_crlf_pos(Rest, Pos + 1);
-get_crlf_crlf_pos(<<>>, _)                               -> no.
+get_crlf_crlf_pos(Data) ->
+    binary_bif_match(Data, <<$\r, $\n, $\r, $\n>>).
+
+binary_bif_match(Data, Binary) ->
+    case binary:match(Data, Binary) of
+    {Pos, _Len} ->
+        {yes, Pos};
+    _ -> no
+    end.
+
 
 scan_crlf(Bin) ->
     case get_crlf_pos(Bin) of
@@ -1513,12 +1520,9 @@ scan_crlf_1(Bin1_head_size, Bin1, Bin2) ->
             {no, list_to_binary([Bin1, Bin2])}
     end.
 
-get_crlf_pos(Bin) ->
-    get_crlf_pos(Bin, 0).
+get_crlf_pos(Data) ->
+    binary_bif_match(Data, <<$\r, $\n>>).
 
-get_crlf_pos(<<$\r, $\n, _/binary>>, Pos) -> {yes, Pos};
-get_crlf_pos(<<_, Rest/binary>>, Pos)     -> get_crlf_pos(Rest, Pos + 1);
-get_crlf_pos(<<>>, _)                     -> no.
 
 fmt_val(L) when is_list(L)    -> L;
 fmt_val(I) when is_integer(I) -> integer_to_list(I);