You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by da...@apache.org on 2014/02/05 15:50:23 UTC

[01/49] Remove src/chttpd

Updated Branches:
  refs/heads/1843-feature-bigcouch bb2fa4466 -> 3069c0134 (forced update)


http://git-wip-us.apache.org/repos/asf/couchdb/blob/e41cfa40/src/chttpd/src/chttpd_show.erl
----------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd_show.erl b/src/chttpd/src/chttpd_show.erl
deleted file mode 100644
index b028f4c..0000000
--- a/src/chttpd/src/chttpd_show.erl
+++ /dev/null
@@ -1,322 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_show).
-
--export([handle_doc_show_req/3, handle_doc_update_req/3, handle_view_list_req/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
--record(lacc, {
-    req,
-    resp = nil,
-    qserver,
-    lname,
-    db,
-    etag
-}).
-
-% /db/_design/foo/_show/bar/docid
-% show converts a json doc to a response of any content-type. 
-% it looks up the doc an then passes it to the query server.
-% then it sends the response from the query server to the http client.
-
-maybe_open_doc(Db, DocId) ->
-    case fabric:open_doc(Db, DocId, [conflicts]) of
-    {ok, Doc} ->
-        Doc;
-    {not_found, _} ->
-        nil
-    end.
-
-handle_doc_show_req(#httpd{
-        path_parts=[_, _, _, _, ShowName, DocId]
-    }=Req, Db, DDoc) ->
-
-    % open the doc
-    Doc = maybe_open_doc(Db, DocId),
-
-    % we don't handle revs here b/c they are an internal api
-    % returns 404 if there is no doc with DocId
-    handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId);
-
-handle_doc_show_req(#httpd{
-        path_parts=[_, _, _, _, ShowName, DocId|Rest]
-    }=Req, Db, DDoc) ->
-    
-    DocParts = [DocId|Rest],
-    DocId1 = ?l2b(string:join([?b2l(P)|| P <- DocParts], "/")),
-
-    % open the doc
-    Doc = maybe_open_doc(Db, DocId1),
-
-    % we don't handle revs here b/c they are an internal api
-    % pass 404 docs to the show function
-    handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId1);
-
-handle_doc_show_req(#httpd{
-        path_parts=[_, _, _, _, ShowName]
-    }=Req, Db, DDoc) ->
-    % with no docid the doc is nil
-    handle_doc_show(Req, Db, DDoc, ShowName, nil);
-
-handle_doc_show_req(Req, _Db, _DDoc) ->
-    chttpd:send_error(Req, 404, <<"show_error">>, <<"Invalid path.">>).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc) ->
-    handle_doc_show(Req, Db, DDoc, ShowName, Doc, null).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId) ->
-    %% Will throw an exception if the _show handler is missing
-    couch_util:get_nested_json_value(DDoc#doc.body, [<<"shows">>, ShowName]),
-    % get responder for ddoc/showname
-    CurrentEtag = show_etag(Req, Doc, DDoc, []),
-    chttpd:etag_respond(Req, CurrentEtag, fun() ->
-        JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
-        JsonDoc = couch_query_servers:json_doc(Doc),
-        [<<"resp">>, ExternalResp] = 
-            couch_query_servers:ddoc_prompt(DDoc, [<<"shows">>, ShowName],
-                [JsonDoc, JsonReq]),
-        JsonResp = apply_etag(ExternalResp, CurrentEtag),
-        chttpd_external:send_external_response(Req, JsonResp)
-    end).
-
-
-show_etag(#httpd{user_ctx=UserCtx}=Req, Doc, DDoc, More) ->
-    Accept = chttpd:header_value(Req, "Accept"),
-    DocPart = case Doc of
-        nil -> nil;
-        Doc -> chttpd:doc_etag(Doc)
-    end,
-    couch_httpd:make_etag({couch_httpd:doc_etag(DDoc), DocPart, Accept,
-        UserCtx#user_ctx.roles, More}).
-
-% /db/_design/foo/update/bar/docid
-% updates a doc based on a request
-% handle_doc_update_req(#httpd{method = 'GET'}=Req, _Db, _DDoc) ->
-%     % anything but GET
-%     send_method_not_allowed(Req, "POST,PUT,DELETE,ETC");
-
-handle_doc_update_req(#httpd{
-        path_parts=[_, _, _, _, UpdateName, DocId]
-    }=Req, Db, DDoc) ->
-    Doc = maybe_open_doc(Db, DocId),
-    send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId);
-
-handle_doc_update_req(#httpd{
-        path_parts=[_, _, _, _, UpdateName]
-    }=Req, Db, DDoc) ->
-    send_doc_update_response(Req, Db, DDoc, UpdateName, nil, null);
-
-handle_doc_update_req(Req, _Db, _DDoc) ->
-    chttpd:send_error(Req, 404, <<"update_error">>, <<"Invalid path.">>).
-
-send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
-    %% Will throw an exception if the _update handler is missing
-    couch_util:get_nested_json_value(DDoc#doc.body, [<<"updates">>, UpdateName]),
-    JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
-    JsonDoc = couch_query_servers:json_doc(Doc),
-    Cmd = [<<"updates">>, UpdateName],
-    case couch_query_servers:ddoc_prompt(DDoc, Cmd, [JsonDoc, JsonReq]) of
-    [<<"up">>, {NewJsonDoc}, JsonResp] ->
-        case chttpd:header_value(Req, "X-Couch-Full-Commit", "false") of
-        "true" ->
-            Options = [full_commit, {user_ctx, Req#httpd.user_ctx}];
-        _ ->
-            Options = [{user_ctx, Req#httpd.user_ctx}]
-        end,
-        NewDoc = couch_doc:from_json_obj({NewJsonDoc}),
-        case fabric:update_doc(Db, NewDoc, Options) of
-        {ok, _} ->
-            Code = 201;
-        {accepted, _} ->
-            Code = 202
-        end;
-    [<<"up">>, _Other, JsonResp] ->
-        Code = 200
-    end,
-    JsonResp2 = json_apply_field({<<"code">>, Code}, JsonResp),
-    % todo set location field
-    chttpd_external:send_external_response(Req, JsonResp2).
-
-
-% view-list request with view and list from same design doc.
-handle_view_list_req(#httpd{method='GET',
-        path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc) ->
-    Keys = chttpd:qs_json_value(Req, "keys", nil),
-    handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, Keys);
-
-% view-list request with view and list from different design docs.
-handle_view_list_req(#httpd{method='GET',
-        path_parts=[_, _, _, _, ListName, DesignName, ViewName]}=Req, Db, DDoc) ->
-    Keys = chttpd:qs_json_value(Req, "keys", nil),
-    handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method='GET'}=Req, _Db, _DDoc) ->
-    chttpd:send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
-
-handle_view_list_req(#httpd{method='POST',
-        path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc) ->
-    ReqBody = chttpd:body(Req),
-    {Props2} = ?JSON_DECODE(ReqBody),
-    Keys = proplists:get_value(<<"keys">>, Props2, nil),
-    handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName,
-        {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method='POST',
-        path_parts=[_, _, _, _, ListName, DesignName, ViewName]}=Req, Db, DDoc) ->
-    ReqBody = chttpd:body(Req),
-    {Props2} = ?JSON_DECODE(ReqBody),
-    Keys = proplists:get_value(<<"keys">>, Props2, nil),
-    handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName,
-        {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method='POST'}=Req, _Db, _DDoc) ->
-    chttpd:send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
-
-handle_view_list_req(Req, _Db, _DDoc) ->
-    chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_view_list(Req, Db, DDoc, LName, {ViewDesignName, ViewName}, Keys) ->
-    %% Will throw an exception if the _list handler is missing
-    couch_util:get_nested_json_value(DDoc#doc.body, [<<"lists">>, LName]),
-    {ok, VDoc} = fabric:open_doc(Db, <<"_design/", ViewDesignName/binary>>, []),
-    Group = couch_view_group:design_doc_to_view_group(VDoc),
-    IsReduce = chttpd_view:get_reduce_type(Req),
-    ViewType = chttpd_view:extract_view_type(ViewName,
-        couch_view_group:get_views(Group), IsReduce),
-    QueryArgs = chttpd_view:parse_view_params(Req, Keys, ViewType),
-    CB = fun list_callback/2,
-    Etag = couch_uuids:new(),
-    chttpd:etag_respond(Req, Etag, fun() ->
-        couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) ->
-            Acc0 = #lacc{
-                lname = LName,
-                req = Req,
-                qserver = QServer,
-                db = Db,
-                etag = Etag
-            },
-            fabric:query_view(Db, VDoc, ViewName, CB, Acc0, QueryArgs)
-        end)
-    end).
-
-list_callback({total_and_offset, Total, Offset}, #lacc{resp=nil} = Acc) ->
-    start_list_resp({[{<<"total_rows">>, Total}, {<<"offset">>, Offset}]}, Acc);
-list_callback({total_and_offset, _, _}, Acc) ->
-    % a sorted=false view where the message came in late.  Ignore.
-    {ok, Acc};
-list_callback({row, Row}, #lacc{resp=nil} = Acc) ->
-    % first row of a reduce view, or a sorted=false view
-    {ok, NewAcc} = start_list_resp({[]}, Acc),
-    send_list_row(Row, NewAcc);
-list_callback({row, Row}, Acc) ->
-    send_list_row(Row, Acc);
-list_callback(complete, Acc) ->
-    #lacc{qserver = {Proc, _}, resp = Resp0} = Acc,
-    if Resp0 =:= nil ->
-        {ok, #lacc{resp = Resp}} = start_list_resp({[]}, Acc);
-    true ->
-        Resp = Resp0
-    end,
-    try couch_query_servers:proc_prompt(Proc, [<<"list_end">>]) of
-    [<<"end">>, Chunk] ->
-        {ok, Resp1} = send_non_empty_chunk(Resp, Chunk),
-        chttpd:send_delayed_last_chunk(Resp1)
-    catch Error ->
-        {ok, Resp1} = chttpd:send_delayed_error(Resp, Error),
-        {stop, Resp1}
-    end;
-list_callback({error, Reason}, #lacc{resp=Resp}) ->
-    chttpd:send_delayed_error(Resp, Reason).
-
-start_list_resp(Head, Acc) ->
-    #lacc{
-        req = Req,
-        db = Db,
-        qserver = QServer,
-        lname = LName,
-        etag = Etag
-    } = Acc,
-
-    % use a separate process because we're already in a receive loop, and
-    % json_req_obj calls fabric:get_db_info()
-    spawn_monitor(fun() -> exit(chttpd_external:json_req_obj(Req, Db)) end),
-    receive {'DOWN', _, _, _, JsonReq} -> ok end,
-
-    [<<"start">>,Chunk,JsonResp] = couch_query_servers:ddoc_proc_prompt(QServer,
-        [<<"lists">>, LName], [Head, JsonReq]),
-    JsonResp2 = apply_etag(JsonResp, Etag),
-    #extern_resp_args{
-        code = Code,
-        ctype = CType,
-        headers = ExtHeaders
-    } = couch_httpd_external:parse_external_response(JsonResp2),
-    JsonHeaders = couch_httpd_external:default_or_content_type(CType, ExtHeaders),
-    {ok, Resp} = chttpd:start_delayed_chunked_response(Req, Code,
-        JsonHeaders, Chunk),
-    {ok, Acc#lacc{resp=Resp}}.
-
-send_list_row(Row, #lacc{qserver = {Proc, _}, resp = Resp} = Acc) ->
-    try couch_query_servers:proc_prompt(Proc, [<<"list_row">>, Row]) of
-    [<<"chunks">>, Chunk] ->
-        {ok, Resp1} = send_non_empty_chunk(Resp, Chunk),
-        {ok, Acc#lacc{resp=Resp1}};
-    [<<"end">>, Chunk] ->
-        {ok, Resp1} = send_non_empty_chunk(Resp, Chunk),
-        {ok, Resp2} = chttpd:send_delayed_last_chunk(Resp1),
-        {stop, Resp2}
-    catch Error ->
-        {ok, Resp1} = chttpd:send_delayed_error(Resp, Error),
-        {stop, Resp1}
-    end.
-
-send_non_empty_chunk(Resp, []) ->
-    {ok, Resp};
-send_non_empty_chunk(Resp, Chunk) ->
-    chttpd:send_delayed_chunk(Resp, Chunk).
-
-% Maybe this is in the proplists API
-% todo move to couch_util
-json_apply_field(H, {L}) ->
-    json_apply_field(H, L, []).
-json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
-    % drop matching keys
-    json_apply_field({Key, NewValue}, Headers, Acc);
-json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
-    % something else is next, leave it alone.
-    json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
-json_apply_field({Key, NewValue}, [], Acc) ->
-    % end of list, add ours
-    {[{Key, NewValue}|Acc]}.
-
-apply_etag({ExternalResponse}, CurrentEtag) ->
-    % Here we embark on the delicate task of replacing or creating the
-    % headers on the JsonResponse object. We need to control the Etag and
-    % Vary headers. If the external function controls the Etag, we'd have to
-    % run it to check for a match, which sort of defeats the purpose.
-    case couch_util:get_value(<<"headers">>, ExternalResponse, nil) of
-    nil ->
-        % no JSON headers
-        % add our Etag and Vary headers to the response
-        {[{<<"headers">>, {[{<<"Etag">>, CurrentEtag}, {<<"Vary">>, <<"Accept">>}]}} | ExternalResponse]};
-    JsonHeaders ->
-        {[case Field of
-        {<<"headers">>, JsonHeaders} -> % add our headers
-            JsonHeadersEtagged = json_apply_field({<<"Etag">>, CurrentEtag}, JsonHeaders),
-            JsonHeadersVaried = json_apply_field({<<"Vary">>, <<"Accept">>}, JsonHeadersEtagged),
-            {<<"headers">>, JsonHeadersVaried};
-        _ -> % skip non-header fields
-            Field
-        end || Field <- ExternalResponse]}
-    end.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/e41cfa40/src/chttpd/src/chttpd_sup.erl
----------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd_sup.erl b/src/chttpd/src/chttpd_sup.erl
deleted file mode 100644
index 69283a9..0000000
--- a/src/chttpd/src/chttpd_sup.erl
+++ /dev/null
@@ -1,29 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License.  You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_sup).
--behaviour(supervisor).
--export([init/1]).
-
--export([start_link/1]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 100, Type, [I]}).
-
-start_link(Args) ->
-    supervisor:start_link({local,?MODULE}, ?MODULE, Args).
-
-init([]) ->
-    {ok, {{one_for_one, 3, 10}, [
-        ?CHILD(chttpd, worker),
-        ?CHILD(chttpd_config_listener, worker)
-    ]}}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/e41cfa40/src/chttpd/src/chttpd_view.erl
----------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd_view.erl b/src/chttpd/src/chttpd_view.erl
deleted file mode 100644
index 85b88a3..0000000
--- a/src/chttpd/src/chttpd_view.erl
+++ /dev/null
@@ -1,405 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_view).
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--export([handle_view_req/3, handle_temp_view_req/2, get_reduce_type/1,
-    parse_view_params/3, view_group_etag/2, view_group_etag/3,
-    parse_bool_param/1, extract_view_type/3]).
-
-
-multi_query_view(Req, Db, DDoc, ViewName, Queries) ->
-    Group = couch_view_group:design_doc_to_view_group(DDoc),
-    IsReduce = get_reduce_type(Req),
-    ViewType = extract_view_type(ViewName, couch_view_group:get_views(Group),
-        IsReduce),
-    % TODO proper calculation of etag
-    % Etag = view_group_etag(ViewGroup, Db, Queries),
-    Etag = couch_uuids:new(),
-    DefaultParams = lists:flatmap(fun({K,V}) -> parse_view_param(K,V) end,
-        chttpd:qs(Req)),
-    [couch_stats_collector:increment({httpd, view_reads}) || _I <- Queries],
-    chttpd:etag_respond(Req, Etag, fun() ->
-        FirstChunk = "{\"results\":[",
-        {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"Etag",Etag}], FirstChunk),
-        {_, Resp1} = lists:foldl(fun({QueryProps}, {Chunk, RespAcc}) ->
-            if Chunk =/= nil -> chttpd:send_delayed_chunk(Resp, Chunk); true -> ok end,
-            ThisQuery = lists:flatmap(fun parse_json_view_param/1, QueryProps),
-            FullParams = lists:ukeymerge(1, ThisQuery, DefaultParams),
-            {ok, RespAcc1} = fabric:query_view(
-                Db,
-                DDoc,
-                ViewName,
-                fun view_callback/2,
-                {nil, RespAcc},
-                parse_view_params(FullParams, nil, ViewType)
-            ),
-            {",\n", RespAcc1}
-        end, {nil,Resp}, Queries),
-        chttpd:send_delayed_chunk(Resp1, "]}"),
-        chttpd:end_delayed_json_response(Resp1)
-    end).
-
-design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
-    Group = couch_view_group:design_doc_to_view_group(DDoc),
-    IsReduce = get_reduce_type(Req),
-    ViewType = extract_view_type(ViewName, couch_view_group:get_views(Group),
-        IsReduce),
-    QueryArgs = parse_view_params(Req, Keys, ViewType),
-    % TODO proper calculation of etag
-    % Etag = view_group_etag(ViewGroup, Db, Keys),
-    Etag = couch_uuids:new(),
-    couch_stats_collector:increment({httpd, view_reads}),
-    chttpd:etag_respond(Req, Etag, fun() ->
-        {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"Etag",Etag}]),
-        CB = fun view_callback/2,
-        {ok, Resp1} = fabric:query_view(Db, DDoc, ViewName, CB, {nil, Resp}, QueryArgs),
-        chttpd:end_delayed_json_response(Resp1)
-    end).
-
-view_callback({total_and_offset, Total, Offset}, {nil, Resp}) ->
-    Chunk = "{\"total_rows\":~p,\"offset\":~p,\"rows\":[\r\n",
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp, io_lib:format(Chunk, [Total, Offset])),
-    {ok, {"", Resp1}};
-view_callback({total_and_offset, _, _}, Acc) ->
-    % a sorted=false view where the message came in late.  Ignore.
-    {ok, Acc};
-view_callback({row, Row}, {nil, Resp}) ->
-    % first row of a reduce view, or a sorted=false view
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp, ["{\"rows\":[\r\n", ?JSON_ENCODE(Row)]),
-    {ok, {",\r\n", Resp1}};
-view_callback({row, Row}, {Prepend, Resp}) ->
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp, [Prepend, ?JSON_ENCODE(Row)]),
-    {ok, {",\r\n", Resp1}};
-view_callback(complete, {nil, Resp}) ->
-    chttpd:send_delayed_chunk(Resp, "{\"rows\":[]}");
-view_callback(complete, {_, Resp}) ->
-    chttpd:send_delayed_chunk(Resp, "\r\n]}");
-view_callback({error, Reason}, {_, Resp}) ->
-    chttpd:send_delayed_error(Resp, Reason).
-
-extract_view_type(_ViewName, [], _IsReduce) ->
-    throw({not_found, missing_named_view});
-extract_view_type(ViewName, [View|Rest], IsReduce) ->
-    case lists:member(ViewName, [Name || {Name, _} <- View#mrview.reduce_funs]) of
-    true ->
-        if IsReduce -> reduce; true -> red_map end;
-    false ->
-        case lists:member(ViewName, View#mrview.map_names) of
-        true -> map;
-        false -> extract_view_type(ViewName, Rest, IsReduce)
-        end
-    end.
-
-handle_view_req(#httpd{method='GET',
-        path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) ->
-    Keys = chttpd:qs_json_value(Req, "keys", nil),
-    design_doc_view(Req, Db, DDoc, ViewName, Keys);
-
-handle_view_req(#httpd{method='POST',
-        path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) ->
-    {Fields} = chttpd:json_body_obj(Req),
-    Queries = couch_util:get_value(<<"queries">>, Fields),
-    Keys = couch_util:get_value(<<"keys">>, Fields),
-    case {Queries, Keys} of
-    {Queries, undefined} when is_list(Queries) ->
-        multi_query_view(Req, Db, DDoc, ViewName, Queries);
-    {undefined, Keys} when is_list(Keys) ->
-        design_doc_view(Req, Db, DDoc, ViewName, Keys);
-    {undefined, undefined} ->
-        throw({bad_request, "POST body must contain `keys` or `queries` field"});
-    {undefined, _} ->
-        throw({bad_request, "`keys` body member must be an array"});
-    {_, undefined} ->
-        throw({bad_request, "`queries` body member must be an array"});
-    {_, _} ->
-        throw({bad_request, "`keys` and `queries` are mutually exclusive"})
-    end;
-
-handle_view_req(Req, _Db, _DDoc) ->
-    chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_temp_view_req(Req, _Db) ->
-    Msg = <<"Temporary views are not supported in CouchDB">>,
-    chttpd:send_error(Req, 403, forbidden, Msg).
-
-reverse_key_default(?MIN_STR) -> ?MAX_STR;
-reverse_key_default(?MAX_STR) -> ?MIN_STR;
-reverse_key_default(Key) -> Key.
-
-get_reduce_type(Req) ->
-    case chttpd:qs_value(Req, "reduce", "true") of
-    "true" ->
-        true;
-    "false" ->
-        false;
-    _Error ->
-        throw({bad_request, "`reduce` qs param must be `true` or `false`"})
-    end.
-
-parse_view_params(Req, Keys, ViewType) when not is_list(Req) ->
-    QueryParams = lists:flatmap(fun({K,V}) -> parse_view_param(K,V) end,
-        chttpd:qs(Req)),
-    parse_view_params(QueryParams, Keys, ViewType);
-parse_view_params(QueryParams, Keys, ViewType) ->
-    IsMultiGet = (Keys =/= nil),
-    Args = #mrargs{
-        view_type=ViewType,
-        multi_get=IsMultiGet,
-        keys=Keys
-    },
-    QueryArgs = lists:foldl(fun({K, V}, Args2) ->
-        validate_view_query(K, V, Args2)
-    end, Args, QueryParams),
-
-    GroupLevel = QueryArgs#mrargs.group_level,
-    case {ViewType, GroupLevel, IsMultiGet} of
-        {reduce, exact, true} ->
-            QueryArgs;
-        {reduce, _, false} ->
-            QueryArgs;
-        {reduce, _, _} ->
-            Msg = <<"Multi-key fetchs for reduce "
-                    "view must include `group=true`">>,
-            throw({query_parse_error, Msg});
-        _ ->
-            QueryArgs
-    end,
-    QueryArgs.
-
-parse_json_view_param({<<"key">>, V}) ->
-    [{start_key, V}, {end_key, V}];
-parse_json_view_param({<<"startkey_docid">>, V}) ->
-    [{start_key_docid, V}];
-parse_json_view_param({<<"endkey_docid">>, V}) ->
-    [{end_key_docid, V}];
-parse_json_view_param({<<"startkey">>, V}) ->
-    [{start_key, V}];
-parse_json_view_param({<<"endkey">>, V}) ->
-    [{end_key, V}];
-parse_json_view_param({<<"limit">>, V}) when is_integer(V), V > 0 ->
-    [{limit, V}];
-parse_json_view_param({<<"stale">>, <<"ok">>}) ->
-    [{stale, ok}];
-parse_json_view_param({<<"stale">>, <<"update_after">>}) ->
-    [{stale, update_after}];
-parse_json_view_param({<<"descending">>, V}) when is_boolean(V) ->
-    [{descending, V}];
-parse_json_view_param({<<"skip">>, V}) when is_integer(V) ->
-    [{skip, V}];
-parse_json_view_param({<<"group">>, true}) ->
-    [{group_level, exact}];
-parse_json_view_param({<<"group">>, false}) ->
-    [{group_level, 0}];
-parse_json_view_param({<<"group_level">>, V}) when is_integer(V), V > 0 ->
-    [{group_level, V}];
-parse_json_view_param({<<"inclusive_end">>, V}) when is_boolean(V) ->
-    [{inclusive_end, V}];
-parse_json_view_param({<<"reduce">>, V}) when is_boolean(V) ->
-    [{reduce, V}];
-parse_json_view_param({<<"include_docs">>, V}) when is_boolean(V) ->
-    [{include_docs, V}];
-parse_json_view_param({<<"conflicts">>, V}) when is_boolean(V) ->
-    [{conflicts, V}];
-parse_json_view_param({<<"list">>, V}) ->
-    [{list, couch_util:to_binary(V)}];
-parse_json_view_param({<<"sorted">>, V}) when is_boolean(V) ->
-    [{sorted, V}];
-parse_json_view_param({K, V}) ->
-    [{extra, {K, V}}].
-
-parse_view_param("", _) ->
-    [];
-parse_view_param("key", Value) ->
-    JsonKey = ?JSON_DECODE(Value),
-    [{start_key, JsonKey}, {end_key, JsonKey}];
-parse_view_param("startkey_docid", Value) ->
-    [{start_key_docid, ?l2b(Value)}];
-parse_view_param("endkey_docid", Value) ->
-    [{end_key_docid, ?l2b(Value)}];
-parse_view_param("startkey", Value) ->
-    [{start_key, ?JSON_DECODE(Value)}];
-parse_view_param("endkey", Value) ->
-    [{end_key, ?JSON_DECODE(Value)}];
-parse_view_param("limit", Value) ->
-    [{limit, parse_positive_int_param(Value)}];
-parse_view_param("count", _Value) ->
-    throw({query_parse_error, <<"Query parameter 'count' is now 'limit'.">>});
-parse_view_param("stale", "ok") ->
-    [{stale, ok}];
-parse_view_param("stale", "update_after") ->
-    [{stale, update_after}];
-parse_view_param("stale", _Value) ->
-    throw({query_parse_error,
-            <<"stale only available as stale=ok or as stale=update_after">>});
-parse_view_param("update", _Value) ->
-    throw({query_parse_error, <<"update=false is now stale=ok">>});
-parse_view_param("descending", Value) ->
-    [{descending, parse_bool_param(Value)}];
-parse_view_param("skip", Value) ->
-    [{skip, parse_int_param(Value)}];
-parse_view_param("group", Value) ->
-    case parse_bool_param(Value) of
-        true -> [{group_level, exact}];
-        false -> [{group_level, 0}]
-    end;
-parse_view_param("group_level", Value) ->
-    [{group_level, parse_positive_int_param(Value)}];
-parse_view_param("inclusive_end", Value) ->
-    [{inclusive_end, parse_bool_param(Value)}];
-parse_view_param("reduce", Value) ->
-    [{reduce, parse_bool_param(Value)}];
-parse_view_param("include_docs", Value) ->
-    [{include_docs, parse_bool_param(Value)}];
-parse_view_param("conflicts", Value) ->
-    [{conflicts, parse_bool_param(Value)}];
-parse_view_param("list", Value) ->
-    [{list, ?l2b(Value)}];
-parse_view_param("callback", _) ->
-    []; % Verified in the JSON response functions
-parse_view_param("sorted", Value) ->
-    [{sorted, parse_bool_param(Value)}];
-parse_view_param(Key, Value) ->
-    [{extra, {Key, Value}}].
-
-validate_view_query(start_key, Value, Args) ->
-    case Args#mrargs.multi_get of
-        true ->
-            Msg = <<"Query parameter `start_key` is "
-                    "not compatiible with multi-get">>,
-            throw({query_parse_error, Msg});
-        _ ->
-            Args#mrargs{start_key=Value}
-    end;
-validate_view_query(start_key_docid, Value, Args) ->
-    Args#mrargs{start_key_docid=Value};
-validate_view_query(end_key, Value, Args) ->
-    case Args#mrargs.multi_get of
-        true->
-            Msg = <<"Query paramter `end_key` is "
-                    "not compatibile with multi-get">>,
-            throw({query_parse_error, Msg});
-        _ ->
-            Args#mrargs{end_key=Value}
-    end;
-validate_view_query(end_key_docid, Value, Args) ->
-    Args#mrargs{end_key_docid=Value};
-validate_view_query(limit, Value, Args) ->
-    Args#mrargs{limit=Value};
-validate_view_query(list, Value, Args) ->
-    Args#mrargs{list=Value};
-validate_view_query(stale, Value, Args) ->
-    Args#mrargs{stale=Value};
-validate_view_query(descending, true, Args) ->
-    case Args#mrargs.direction of
-        rev -> Args; % Already reversed
-        fwd ->
-            Args#mrargs{
-                direction = rev,
-                start_key_docid =
-                    reverse_key_default(Args#mrargs.start_key_docid),
-                end_key_docid =
-                    reverse_key_default(Args#mrargs.end_key_docid)
-            }
-    end;
-validate_view_query(descending, false, Args) ->
-    Args; % Ignore default condition
-validate_view_query(skip, Value, Args) ->
-    Args#mrargs{skip=Value};
-validate_view_query(group_level, Value, Args) ->
-    case Args#mrargs.view_type of
-        reduce ->
-            Args#mrargs{group_level=Value};
-        _ ->
-            Msg = <<"Invalid URL parameter 'group' or "
-                    " 'group_level' for non-reduce view.">>,
-            throw({query_parse_error, Msg})
-    end;
-validate_view_query(inclusive_end, Value, Args) ->
-    Args#mrargs{inclusive_end=Value};
-validate_view_query(reduce, false, Args) ->
-    Args;
-validate_view_query(reduce, _, Args) ->
-    case Args#mrargs.view_type of
-        map ->
-            Msg = <<"Invalid URL parameter `reduce` for map view.">>,
-            throw({query_parse_error, Msg});
-        _ ->
-            Args
-    end;
-validate_view_query(include_docs, true, Args) ->
-    case Args#mrargs.view_type of
-        reduce ->
-            Msg = <<"Query paramter `include_docs` "
-                    "is invalid for reduce views.">>,
-            throw({query_parse_error, Msg});
-        _ ->
-            Args#mrargs{include_docs=true}
-    end;
-validate_view_query(include_docs, _Value, Args) ->
-    Args;
-validate_view_query(conflicts, true, Args) ->
-    case Args#mrargs.view_type of
-    reduce ->
-        Msg = <<"Query parameter `conflicts` "
-                "is invalid for reduce views.">>,
-        throw({query_parse_error, Msg});
-    _ ->
-        Args#mrargs{extra = [conflicts|Args#mrargs.extra]}
-    end;
-validate_view_query(conflicts, _Value, Args) ->
-    Args;
-validate_view_query(sorted, false, Args) ->
-    Args#mrargs{sorted=false};
-validate_view_query(sorted, _Value, Args) ->
-    Args;
-validate_view_query(extra, _Value, Args) ->
-    Args.
-
-view_group_etag(Group, Db) ->
-    view_group_etag(Group, Db, nil).
-
-view_group_etag(Group, _Db, Extra) ->
-    Sig = couch_view_group:get_signature(Group),
-    CurrentSeq = couch_view_group:get_current_seq(Group),
-    % This is not as granular as it could be.
-    % If there are updates to the db that do not effect the view index,
-    % they will change the Etag. For more granular Etags we'd need to keep
-    % track of the last Db seq that caused an index change.
-    chttpd:make_etag({Sig, CurrentSeq, Extra}).
-
-parse_bool_param("true") -> true;
-parse_bool_param("false") -> false;
-parse_bool_param(Val) ->
-    Msg = io_lib:format("Invalid value for boolean paramter: ~p", [Val]),
-    throw({query_parse_error, ?l2b(Msg)}).
-
-parse_int_param(Val) ->
-    case (catch list_to_integer(Val)) of
-    IntVal when is_integer(IntVal) ->
-        IntVal;
-    _ ->
-        Msg = io_lib:format("Invalid value for integer parameter: ~p", [Val]),
-        throw({query_parse_error, ?l2b(Msg)})
-    end.
-
-parse_positive_int_param(Val) ->
-    case parse_int_param(Val) of
-    IntVal when IntVal >= 0 ->
-        IntVal;
-    _ ->
-        Fmt = "Invalid value for positive integer parameter: ~p",
-        Msg = io_lib:format(Fmt, [Val]),
-        throw({query_parse_error, ?l2b(Msg)})
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/e41cfa40/src/chttpd/test/chttpd_delayed_response_test.erl
----------------------------------------------------------------------
diff --git a/src/chttpd/test/chttpd_delayed_response_test.erl b/src/chttpd/test/chttpd_delayed_response_test.erl
deleted file mode 100644
index 911ef5b..0000000
--- a/src/chttpd/test/chttpd_delayed_response_test.erl
+++ /dev/null
@@ -1,41 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%%   http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
--module(chttpd_delayed_response_test).
--include_lib("eunit/include/eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-all_test_() ->
-    {foreach,
-     fun() -> application:load(couch) end,
-     fun(_) -> application:unload(couch) end,
-     [
-      fun delayed_chunked_response/1,
-      fun delayed_chunked_response_after_error/1
-     ]}.
-
-delayed_chunked_response(_) ->
-    {"sending an error first should be ok",
-    fun() ->
-        Req = #httpd{mochi_req=mock_request:new(nil, get, "/", {1, 1}, [])},
-        {ok, Resp} = chttpd:start_delayed_chunked_response(Req, 200, []),
-        ?assertMatch({ok, _}, chttpd:send_delayed_error(Resp, bad_request))
-    end}.
-
-delayed_chunked_response_after_error(_) ->
-    {"sending an error midstream should throw http_abort",
-    fun() ->
-        Req = #httpd{mochi_req=mock_request:new(nil, get, "/", {1, 1}, [])},
-        {ok, Resp} = chttpd:start_delayed_chunked_response(Req, 200, []),
-        {ok, Resp1} = chttpd:send_delayed_chunk(Resp, <<>>),
-        ?assertThrow({http_abort, _, _}, chttpd:send_delayed_error(Resp1, bad_request))
-    end}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/e41cfa40/src/chttpd/test/mock_request.erl
----------------------------------------------------------------------
diff --git a/src/chttpd/test/mock_request.erl b/src/chttpd/test/mock_request.erl
deleted file mode 100644
index 3edfd29..0000000
--- a/src/chttpd/test/mock_request.erl
+++ /dev/null
@@ -1,37 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%%   http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
--module(mock_request, [Socket, Method, RawPath, Version, Headers]).
-
--compile(export_all).
-
-get_header_value(_) ->
-    undefined.
-
-parse_qs() ->
-    [].
-
-get(method) ->
-    Method;
-get(raw_path) ->
-    RawPath;
-get(version) ->
-    Version.
-
-should_close() ->
-    false.
-
-respond({Code, ResponseHeaders, _}) ->
-    mochiweb:new_response({THIS, Code, ResponseHeaders}).
-
-send(_) ->
-    ok.


[28/49] Remove src/fabric

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_doc_open_revs.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_doc_open_revs.erl b/src/fabric/src/fabric_doc_open_revs.erl
deleted file mode 100644
index 398a675..0000000
--- a/src/fabric/src/fabric_doc_open_revs.erl
+++ /dev/null
@@ -1,305 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_open_revs).
-
--export([go/4]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--record(state, {
-    dbname,
-    worker_count,
-    workers,
-    reply_count = 0,
-    r,
-    revs,
-    latest,
-    replies = []
-}).
-
-go(DbName, Id, Revs, Options) ->
-    Workers = fabric_util:submit_jobs(mem3:shards(DbName,Id), open_revs,
-        [Id, Revs, Options]),
-    R = couch_util:get_value(r, Options, integer_to_list(mem3:quorum(DbName))),
-    State = #state{
-        dbname = DbName,
-        worker_count = length(Workers),
-        workers = Workers,
-        r = list_to_integer(R),
-        revs = Revs,
-        latest = lists:member(latest, Options),
-        replies = case Revs of all -> []; Revs -> [{Rev,[]} || Rev <- Revs] end
-    },
-    RexiMon = fabric_util:create_monitors(Workers),
-    try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, State) of
-    {ok, {ok, Reply}} ->
-        {ok, Reply};
-    Else ->
-        Else
-    after
-        rexi_monitor:stop(RexiMon)
-    end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, #state{workers=Workers}=State) ->
-    NewWorkers = lists:keydelete(NodeRef, #shard.node, Workers),
-    skip(State#state{workers=NewWorkers});
-handle_message({rexi_EXIT, _}, Worker, #state{workers=Workers}=State) ->
-    skip(State#state{workers=lists:delete(Worker,Workers)});
-handle_message({ok, RawReplies}, Worker, #state{revs = all} = State) ->
-    #state{
-        dbname = DbName,
-        reply_count = ReplyCount,
-        worker_count = WorkerCount,
-        workers = Workers,
-        replies = All0,
-        r = R
-    } = State,
-    All = lists:foldl(fun(Reply,D) -> fabric_util:update_counter(Reply,1,D) end,
-        All0, RawReplies),
-    Reduced = fabric_util:remove_ancestors(All, []),
-    Complete = (ReplyCount =:= (WorkerCount - 1)),
-    QuorumMet = lists:all(fun({_,{_, C}}) -> C >= R end, Reduced),
-    case Reduced of All when QuorumMet andalso ReplyCount =:= (R-1) ->
-        Repair = false;
-    _ ->
-        Repair = [D || {_,{{ok,D}, _}} <- Reduced]
-    end,
-    case maybe_reply(DbName, Reduced, Complete, Repair, R) of
-    noreply ->
-        {ok, State#state{replies = All, reply_count = ReplyCount+1,
-                        workers = lists:delete(Worker,Workers)}};
-    {reply, FinalReply} ->
-        fabric_util:cleanup(lists:delete(Worker,Workers)),
-        {stop, FinalReply}
-    end;
-handle_message({ok, RawReplies0}, Worker, State) ->
-    % we've got an explicit revision list, but if latest=true the workers may
-    % return a descendant of the requested revision.  Take advantage of the
-    % fact that revisions are returned in order to keep track.
-    RawReplies = strip_not_found_missing(RawReplies0),
-    #state{
-        dbname = DbName,
-        reply_count = ReplyCount,
-        worker_count = WorkerCount,
-        workers = Workers,
-        replies = All0,
-        r = R
-    } = State,
-    All = lists:zipwith(fun({Rev, D}, Reply) ->
-        if Reply =:= error -> {Rev, D}; true ->
-            {Rev, fabric_util:update_counter(Reply, 1, D)}
-        end
-    end, All0, RawReplies),
-    Reduced = [fabric_util:remove_ancestors(X, []) || {_, X} <- All],
-    FinalReplies = [choose_winner(X, R) || X <- Reduced, X =/= []],
-    Complete = (ReplyCount =:= (WorkerCount - 1)),
-    case is_repair_needed(All, FinalReplies) of
-    true ->
-        Repair = [D || {_,{{ok,D}, _}} <- lists:flatten(Reduced)];
-    false ->
-        Repair = false
-    end,
-    case maybe_reply(DbName, FinalReplies, Complete, Repair, R) of
-    noreply ->
-        {ok, State#state{replies = All, reply_count = ReplyCount+1,
-                        workers=lists:delete(Worker,Workers)}};
-    {reply, FinalReply} ->
-        fabric_util:cleanup(lists:delete(Worker,Workers)),
-        {stop, FinalReply}
-    end.
-
-skip(#state{revs=all} = State) ->
-    handle_message({ok, []}, nil, State);
-skip(#state{revs=Revs} = State) ->
-    handle_message({ok, [error || _Rev <- Revs]}, nil, State).
-
-maybe_reply(_, [], false, _, _) ->
-    noreply;
-maybe_reply(DbName, ReplyDict, Complete, RepairDocs, R) ->
-    case Complete orelse lists:all(fun({_,{_, C}}) -> C >= R end, ReplyDict) of
-    true ->
-        maybe_execute_read_repair(DbName, RepairDocs),
-        {reply, unstrip_not_found_missing(extract_replies(ReplyDict))};
-    false ->
-        noreply
-    end.
-
-extract_replies(Replies) ->
-    lists:map(fun({_,{Reply,_}}) -> Reply end, Replies).
-
-choose_winner(Options, R) ->
-    case lists:dropwhile(fun({_,{_Reply, C}}) -> C < R end, Options) of
-    [] ->
-        case [Elem || {_,{{ok, #doc{}}, _}} = Elem <- Options] of
-        [] ->
-            hd(Options);
-        Docs ->
-            lists:last(lists:sort(Docs))
-        end;
-    [QuorumMet | _] ->
-        QuorumMet
-    end.
-
-% repair needed if any reply other than the winner has been received for a rev
-is_repair_needed([], []) ->
-    false;
-is_repair_needed([{_Rev, [Reply]} | Tail1], [Reply | Tail2]) ->
-    is_repair_needed(Tail1, Tail2);
-is_repair_needed(_, _) ->
-    true.
-
-maybe_execute_read_repair(_Db, false) ->
-    ok;
-maybe_execute_read_repair(Db, Docs) ->
-    [#doc{id=Id} | _] = Docs,
-    Ctx = #user_ctx{roles=[<<"_admin">>]},
-    Res = fabric:update_docs(Db, Docs, [replicated_changes, {user_ctx,Ctx}]),
-    twig:log(notice, "read_repair ~s ~s ~p", [Db, Id, Res]).
-
-% hackery required so that not_found sorts first
-strip_not_found_missing([]) ->
-    [];
-strip_not_found_missing([{{not_found, missing}, Rev} | Rest]) ->
-    [{not_found, Rev} | strip_not_found_missing(Rest)];
-strip_not_found_missing([Else | Rest]) ->
-    [Else | strip_not_found_missing(Rest)].
-
-unstrip_not_found_missing([]) ->
-    [];
-unstrip_not_found_missing([{not_found, Rev} | Rest]) ->
-    [{{not_found, missing}, Rev} | unstrip_not_found_missing(Rest)];
-unstrip_not_found_missing([Else | Rest]) ->
-    [Else | unstrip_not_found_missing(Rest)].
-
-all_revs_test() ->
-    config:start_link([]),
-    meck:new(fabric),
-    meck:expect(fabric, dbname, fun(Name) -> Name end),
-    meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, nil} end),
-    State0 = #state{worker_count = 3, workers=[nil,nil,nil], r = 2, revs = all},
-    Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
-    Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
-    Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
-
-    % an empty worker response does not count as meeting quorum
-    ?assertMatch(
-        {ok, #state{workers=[nil,nil]}},
-        handle_message({ok, []}, nil, State0)
-    ),
-
-    ?assertMatch(
-        {ok, #state{workers=[nil, nil]}},
-        handle_message({ok, [Foo1, Bar1]}, nil, State0)
-    ),
-    {ok, State1} = handle_message({ok, [Foo1, Bar1]}, nil, State0),
-
-    % the normal case - workers agree
-    ?assertEqual(
-        {stop, [Bar1, Foo1]},
-        handle_message({ok, [Foo1, Bar1]}, nil, State1)
-    ),
-
-    % a case where the 2nd worker has a newer Foo - currently we're considering
-    % Foo to have reached quorum and execute_read_repair()
-    ?assertEqual(
-        {stop, [Bar1, Foo2]},
-        handle_message({ok, [Foo2, Bar1]}, nil, State1)
-    ),
-
-    % a case where quorum has not yet been reached for Foo
-    ?assertMatch(
-        {ok, #state{}},
-        handle_message({ok, [Bar1]}, nil, State1)
-    ),
-    {ok, State2} = handle_message({ok, [Bar1]}, nil, State1),
-
-    % still no quorum, but all workers have responded.  We include Foo1 in the
-    % response and execute_read_repair()
-    ?assertEqual(
-        {stop, [Bar1, Foo1]},
-        handle_message({ok, [Bar1]}, nil, State2)
-      ),
-    meck:unload(fabric),
-    config:stop().
-
-specific_revs_test() ->
-    config:start_link([]),
-    meck:new(fabric),
-    meck:expect(fabric, dbname, fun(Name) -> Name end),
-    meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, nil} end),
-    Revs = [{1,<<"foo">>}, {1,<<"bar">>}, {1,<<"baz">>}],
-    State0 = #state{
-        worker_count = 3,
-        workers = [nil, nil, nil],
-        r = 2,
-        revs = Revs,
-        latest = false,
-        replies = [{Rev,[]} || Rev <- Revs]
-    },
-    Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
-    Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
-    Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
-    Baz1 = {{not_found, missing}, {1,<<"baz">>}},
-    Baz2 = {ok, #doc{revs = {1, [<<"baz">>]}}},
-
-    ?assertMatch(
-        {ok, #state{}},
-        handle_message({ok, [Foo1, Bar1, Baz1]}, nil, State0)
-    ),
-    {ok, State1} = handle_message({ok, [Foo1, Bar1, Baz1]}, nil, State0),
-
-    % the normal case - workers agree
-    ?assertEqual(
-        {stop, [Foo1, Bar1, Baz1]},
-        handle_message({ok, [Foo1, Bar1, Baz1]}, nil, State1)
-    ),
-
-    % latest=true, worker responds with Foo2 and we return it
-    State0L = State0#state{latest = true},
-    ?assertMatch(
-        {ok, #state{}},
-        handle_message({ok, [Foo2, Bar1, Baz1]}, nil, State0L)
-    ),
-    {ok, State1L} = handle_message({ok, [Foo2, Bar1, Baz1]}, nil, State0L),
-    ?assertEqual(
-        {stop, [Foo2, Bar1, Baz1]},
-        handle_message({ok, [Foo2, Bar1, Baz1]}, nil, State1L)
-    ),
-
-    % Foo1 is included in the read quorum for Foo2
-    ?assertEqual(
-        {stop, [Foo2, Bar1, Baz1]},
-        handle_message({ok, [Foo1, Bar1, Baz1]}, nil, State1L)
-    ),
-
-    % {not_found, missing} is included in the quorum for any found revision
-    ?assertEqual(
-        {stop, [Foo2, Bar1, Baz2]},
-        handle_message({ok, [Foo2, Bar1, Baz2]}, nil, State1L)
-    ),
-
-    % a worker failure is skipped
-    ?assertMatch(
-        {ok, #state{}},
-        handle_message({rexi_EXIT, foo}, nil, State1L)
-    ),
-    {ok, State2L} = handle_message({rexi_EXIT, foo}, nil, State1L),
-    ?assertEqual(
-        {stop, [Foo2, Bar1, Baz2]},
-        handle_message({ok, [Foo2, Bar1, Baz2]}, nil, State2L)
-      ),
-    meck:unload(fabric),
-    config:stop().

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_doc_update.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_doc_update.erl b/src/fabric/src/fabric_doc_update.erl
deleted file mode 100644
index 50c244c..0000000
--- a/src/fabric/src/fabric_doc_update.erl
+++ /dev/null
@@ -1,306 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_update).
-
--export([go/3]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(_, [], _) ->
-    {ok, []};
-go(DbName, AllDocs, Opts) ->
-    validate_atomic_update(DbName, AllDocs, lists:member(all_or_nothing, Opts)),
-    Options = lists:delete(all_or_nothing, Opts),
-    GroupedDocs = lists:map(fun({#shard{name=Name, node=Node} = Shard, Docs}) ->
-        Ref = rexi:cast(Node, {fabric_rpc, update_docs, [Name, Docs, Options]}),
-        {Shard#shard{ref=Ref}, Docs}
-    end, group_docs_by_shard(DbName, AllDocs)),
-    {Workers, _} = lists:unzip(GroupedDocs),
-    RexiMon = fabric_util:create_monitors(Workers),
-    W = couch_util:get_value(w, Options, integer_to_list(mem3:quorum(DbName))),
-    Acc0 = {length(Workers), length(AllDocs), list_to_integer(W), GroupedDocs,
-        dict:from_list([{Doc,[]} || Doc <- AllDocs])},
-    Timeout = fabric_util:request_timeout(),
-    try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, Acc0, infinity, Timeout) of
-    {ok, {Health, Results}} when Health =:= ok; Health =:= accepted ->
-        {Health, [R || R <- couch_util:reorder_results(AllDocs, Results), R =/= noreply]};
-    {timeout, Acc} ->
-        {_, _, W1, _, DocReplDict} = Acc,
-        {Health, _, Resp} = dict:fold(fun force_reply/3, {ok, W1, []},
-            DocReplDict),
-        {Health, [R || R <- couch_util:reorder_results(AllDocs, Resp), R =/= noreply]};
-    Else ->
-        Else
-    after
-        rexi_monitor:stop(RexiMon)
-    end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, Acc0) ->
-    {_, LenDocs, W, GroupedDocs, DocReplyDict} = Acc0,
-    NewGrpDocs = [X || {#shard{node=N}, _} = X <- GroupedDocs, N =/= NodeRef],
-    skip_message({length(NewGrpDocs), LenDocs, W, NewGrpDocs, DocReplyDict});
-
-handle_message({rexi_EXIT, _}, Worker, Acc0) ->
-    {WC,LenDocs,W,GrpDocs,DocReplyDict} = Acc0,
-    NewGrpDocs = lists:keydelete(Worker,1,GrpDocs),
-    skip_message({WC-1,LenDocs,W,NewGrpDocs,DocReplyDict});
-handle_message(internal_server_error, Worker, Acc0) ->
-    % happens when we fail to load validation functions in an RPC worker
-    {WC,LenDocs,W,GrpDocs,DocReplyDict} = Acc0,
-    NewGrpDocs = lists:keydelete(Worker,1,GrpDocs),
-    skip_message({WC-1,LenDocs,W,NewGrpDocs,DocReplyDict});
-handle_message(attachment_chunk_received, _Worker, Acc0) ->
-    {ok, Acc0};
-handle_message({ok, Replies}, Worker, Acc0) ->
-    {WaitingCount, DocCount, W, GroupedDocs, DocReplyDict0} = Acc0,
-    {value, {_, Docs}, NewGrpDocs} = lists:keytake(Worker, 1, GroupedDocs),
-    DocReplyDict = append_update_replies(Docs, Replies, DocReplyDict0),
-    case {WaitingCount, dict:size(DocReplyDict)} of
-    {1, _} ->
-        % last message has arrived, we need to conclude things
-        {Health, W, Reply} = dict:fold(fun force_reply/3, {ok, W, []},
-           DocReplyDict),
-        {stop, {Health, Reply}};
-    {_, DocCount} ->
-        % we've got at least one reply for each document, let's take a look
-        case dict:fold(fun maybe_reply/3, {stop,W,[]}, DocReplyDict) of
-        continue ->
-            {ok, {WaitingCount - 1, DocCount, W, NewGrpDocs, DocReplyDict}};
-        {stop, W, FinalReplies} ->
-            {stop, {ok, FinalReplies}}
-        end
-    end;
-handle_message({missing_stub, Stub}, _, _) ->
-    throw({missing_stub, Stub});
-handle_message({not_found, no_db_file} = X, Worker, Acc0) ->
-    {_, _, _, GroupedDocs, _} = Acc0,
-    Docs = couch_util:get_value(Worker, GroupedDocs),
-    handle_message({ok, [X || _D <- Docs]}, Worker, Acc0).
-
-force_reply(Doc, [], {_, W, Acc}) ->
-    {error, W, [{Doc, {error, internal_server_error}} | Acc]};
-force_reply(Doc, [FirstReply|_] = Replies, {Health, W, Acc}) ->
-    case update_quorum_met(W, Replies) of
-    {true, Reply} ->
-        {Health, W, [{Doc,Reply} | Acc]};
-    false ->
-        twig:log(warn, "write quorum (~p) failed for ~s", [W, Doc#doc.id]),
-        case [Reply || {ok, Reply} <- Replies] of
-        [] ->
-            % check if all errors are identical, if so inherit health
-            case lists:all(fun(E) -> E =:= FirstReply end, Replies) of
-            true ->
-                {Health, W, [{Doc, FirstReply} | Acc]};
-            false ->
-                {error, W, [{Doc, FirstReply} | Acc]}
-            end;
-        [AcceptedRev | _] ->
-            NewHealth = case Health of ok -> accepted; _ -> Health end,
-            {NewHealth, W, [{Doc, {accepted,AcceptedRev}} | Acc]}
-        end
-    end.
-
-maybe_reply(_, _, continue) ->
-    % we didn't meet quorum for all docs, so we're fast-forwarding the fold
-    continue;
-maybe_reply(Doc, Replies, {stop, W, Acc}) ->
-    case update_quorum_met(W, Replies) of
-    {true, Reply} ->
-        {stop, W, [{Doc, Reply} | Acc]};
-    false ->
-        continue
-    end.
-
-update_quorum_met(W, Replies) ->
-    Counters = lists:foldl(fun(R,D) -> orddict:update_counter(R,1,D) end,
-        orddict:new(), Replies),
-    GoodReplies = lists:filter(fun good_reply/1, Counters),
-    case lists:dropwhile(fun({_, Count}) -> Count < W end, GoodReplies) of
-    [] ->
-        false;
-    [{FinalReply, _} | _] ->
-        {true, FinalReply}
-    end.
-
-good_reply({{ok, _}, _}) ->
-    true;
-good_reply({noreply, _}) ->
-    true;
-good_reply(_) ->
-    false.
-
--spec group_docs_by_shard(binary(), [#doc{}]) -> [{#shard{}, [#doc{}]}].
-group_docs_by_shard(DbName, Docs) ->
-    dict:to_list(lists:foldl(fun(#doc{id=Id} = Doc, D0) ->
-        lists:foldl(fun(Shard, D1) ->
-            dict:append(Shard, Doc, D1)
-        end, D0, mem3:shards(DbName,Id))
-    end, dict:new(), Docs)).
-
-append_update_replies([], [], DocReplyDict) ->
-    DocReplyDict;
-append_update_replies([Doc|Rest], [], Dict0) ->
-    % icky, if replicated_changes only errors show up in result
-    append_update_replies(Rest, [], dict:append(Doc, noreply, Dict0));
-append_update_replies([Doc|Rest1], [Reply|Rest2], Dict0) ->
-    % TODO what if the same document shows up twice in one update_docs call?
-    append_update_replies(Rest1, Rest2, dict:append(Doc, Reply, Dict0)).
-
-skip_message({0, _, W, _, DocReplyDict}) ->
-    {Health, W, Reply} = dict:fold(fun force_reply/3, {ok, W, []}, DocReplyDict),
-    {stop, {Health, Reply}};
-skip_message(Acc0) ->
-    {ok, Acc0}.
-
-validate_atomic_update(_, _, false) ->
-    ok;
-validate_atomic_update(_DbName, AllDocs, true) ->
-    % TODO actually perform the validation.  This requires some hackery, we need
-    % to basically extract the prep_and_validate_updates function from couch_db
-    % and only run that, without actually writing in case of a success.
-    Error = {not_implemented, <<"all_or_nothing is not supported yet">>},
-    PreCommitFailures = lists:map(fun(#doc{id=Id, revs = {Pos,Revs}}) ->
-        case Revs of [] -> RevId = <<>>; [RevId|_] -> ok end,
-        {{Id, {Pos, RevId}}, Error}
-    end, AllDocs),
-    throw({aborted, PreCommitFailures}).
-
-% eunits
-doc_update1_test() ->
-    Doc1 = #doc{revs = {1,[<<"foo">>]}},
-    Doc2 = #doc{revs = {1,[<<"bar">>]}},
-    Docs = [Doc1],
-    Docs2 = [Doc2, Doc1],
-    Dict = dict:from_list([{Doc,[]} || Doc <- Docs]),
-    Dict2 = dict:from_list([{Doc,[]} || Doc <- Docs2]),
-
-    Shards =
-        mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
-    GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
-
-
-    % test for W = 2
-    AccW2 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
-        Dict},
-
-    {ok,{WaitingCountW2_1,_,_,_,_}=AccW2_1} =
-        handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW2),
-    ?assertEqual(WaitingCountW2_1,2),
-    {stop, FinalReplyW2 } =
-        handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW2_1),
-    ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW2),
-
-    % test for W = 3
-    AccW3 = {length(Shards), length(Docs), list_to_integer("3"), GroupedDocs,
-        Dict},
-
-    {ok,{WaitingCountW3_1,_,_,_,_}=AccW3_1} =
-        handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW3),
-    ?assertEqual(WaitingCountW3_1,2),
-
-    {ok,{WaitingCountW3_2,_,_,_,_}=AccW3_2} =
-        handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW3_1),
-    ?assertEqual(WaitingCountW3_2,1),
-
-    {stop, FinalReplyW3 } =
-        handle_message({ok, [{ok, Doc1}]},lists:nth(3,Shards),AccW3_2),
-    ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW3),
-
-    % test w quorum > # shards, which should fail immediately
-
-    Shards2 = mem3_util:create_partition_map("foo",1,1,["node1"]),
-    GroupedDocs2 = group_docs_by_shard_hack(<<"foo">>,Shards2,Docs),
-
-    AccW4 =
-        {length(Shards2), length(Docs), list_to_integer("2"), GroupedDocs2, Dict},
-    Bool =
-    case handle_message({ok, [{ok, Doc1}]},hd(Shards2),AccW4) of
-        {stop, _Reply} ->
-            true;
-        _ -> false
-    end,
-    ?assertEqual(Bool,true),
-
-    % Docs with no replies should end up as {error, internal_server_error}
-    SA1 = #shard{node=a, range=1},
-    SB1 = #shard{node=b, range=1},
-    SA2 = #shard{node=a, range=2},
-    SB2 = #shard{node=b, range=2},
-    GroupedDocs3 = [{SA1,[Doc1]}, {SB1,[Doc1]}, {SA2,[Doc2]}, {SB2,[Doc2]}],
-    StW5_0 = {length(GroupedDocs3), length(Docs2), 2, GroupedDocs3, Dict2},
-    {ok, StW5_1} = handle_message({ok, [{ok, "A"}]}, SA1, StW5_0),
-    {ok, StW5_2} = handle_message({rexi_EXIT, nil}, SB1, StW5_1),
-    {ok, StW5_3} = handle_message({rexi_EXIT, nil}, SA2, StW5_2),
-    {stop, ReplyW5} = handle_message({rexi_EXIT, nil}, SB2, StW5_3),
-    ?assertEqual(
-        {error, [{Doc1,{accepted,"A"}},{Doc2,{error,internal_server_error}}]},
-        ReplyW5
-    ).
-
-
-doc_update2_test() ->
-    Doc1 = #doc{revs = {1,[<<"foo">>]}},
-    Doc2 = #doc{revs = {1,[<<"bar">>]}},
-    Docs = [Doc2, Doc1],
-    Shards =
-        mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
-    GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
-    Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
-        dict:from_list([{Doc,[]} || Doc <- Docs])},
-
-    {ok,{WaitingCount1,_,_,_,_}=Acc1} =
-        handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
-    ?assertEqual(WaitingCount1,2),
-
-    {ok,{WaitingCount2,_,_,_,_}=Acc2} =
-        handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
-    ?assertEqual(WaitingCount2,1),
-
-    {stop, Reply} =
-        handle_message({rexi_EXIT, 1},lists:nth(3,Shards),Acc2),
-
-    ?assertEqual({accepted, [{Doc1,{accepted,Doc2}}, {Doc2,{accepted,Doc1}}]},
-        Reply).
-
-doc_update3_test() ->
-    Doc1 = #doc{revs = {1,[<<"foo">>]}},
-    Doc2 = #doc{revs = {1,[<<"bar">>]}},
-    Docs = [Doc2, Doc1],
-    Shards =
-        mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
-    GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
-    Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
-        dict:from_list([{Doc,[]} || Doc <- Docs])},
-
-    {ok,{WaitingCount1,_,_,_,_}=Acc1} =
-        handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
-    ?assertEqual(WaitingCount1,2),
-
-    {ok,{WaitingCount2,_,_,_,_}=Acc2} =
-        handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
-    ?assertEqual(WaitingCount2,1),
-
-    {stop, Reply} =
-        handle_message({ok, [{ok, Doc1},{ok, Doc2}]},lists:nth(3,Shards),Acc2),
-
-    ?assertEqual({ok, [{Doc1, {ok, Doc2}},{Doc2, {ok,Doc1}}]},Reply).
-
-% needed for testing to avoid having to start the mem3 application
-group_docs_by_shard_hack(_DbName, Shards, Docs) ->
-    dict:to_list(lists:foldl(fun(#doc{id=_Id} = Doc, D0) ->
-        lists:foldl(fun(Shard, D1) ->
-            dict:append(Shard, Doc, D1)
-        end, D0, Shards)
-    end, dict:new(), Docs)).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_group_info.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_group_info.erl b/src/fabric/src/fabric_group_info.erl
deleted file mode 100644
index 135090f..0000000
--- a/src/fabric/src/fabric_group_info.erl
+++ /dev/null
@@ -1,98 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_group_info).
-
--export([go/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName, GroupId) when is_binary(GroupId) ->
-    {ok, DDoc} = fabric:open_doc(DbName, GroupId, []),
-    go(DbName, DDoc);
-
-go(DbName, #doc{} = DDoc) ->
-    Group = couch_view_group:design_doc_to_view_group(DDoc),
-    Shards = mem3:shards(DbName),
-    Workers = fabric_util:submit_jobs(Shards, group_info, [Group]),
-    RexiMon = fabric_util:create_monitors(Shards),
-    Acc0 = {fabric_dict:init(Workers, nil), []},
-    try
-        fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0)
-    after
-        rexi_monitor:stop(RexiMon)
-    end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {Counters, Acc}) ->
-    case fabric_util:remove_down_workers(Counters, NodeRef) of
-    {ok, NewCounters} ->
-        {ok, {NewCounters, Acc}};
-    error ->
-        {error, {nodedown, <<"progress not possible">>}}
-    end;
-
-handle_message({rexi_EXIT, Reason}, Shard, {Counters, Acc}) ->
-    NewCounters = lists:keydelete(Shard, #shard.ref, Counters),
-    case fabric_view:is_progress_possible(NewCounters) of
-    true ->
-        {ok, {NewCounters, Acc}};
-    false ->
-        {error, Reason}
-    end;
-
-handle_message({ok, Info}, Shard, {Counters, Acc}) ->
-    case fabric_dict:lookup_element(Shard, Counters) of
-    undefined ->
-        % already heard from someone else in this range
-        {ok, {Counters, Acc}};
-    nil ->
-        C1 = fabric_dict:store(Shard, ok, Counters),
-        C2 = fabric_view:remove_overlapping_shards(Shard, C1),
-        case fabric_dict:any(nil, C2) of
-        true ->
-            {ok, {C2, [Info|Acc]}};
-        false ->
-            {stop, merge_results(lists:flatten([Info|Acc]))}
-        end
-    end;
-handle_message(_, _, Acc) ->
-    {ok, Acc}.
-
-merge_results(Info) ->
-    Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end,
-        orddict:new(), Info),
-    orddict:fold(fun
-        (signature, [X|_], Acc) ->
-            [{signature, X} | Acc];
-        (language, [X|_], Acc) ->
-            [{language, X} | Acc];
-        (disk_size, X, Acc) ->
-            [{disk_size, lists:sum(X)} | Acc];
-        (data_size, X, Acc) ->
-            [{data_size, lists:sum(X)} | Acc];
-        (compact_running, X, Acc) ->
-            [{compact_running, lists:member(true, X)} | Acc];
-        (updater_running, X, Acc) ->
-            [{updater_running, lists:member(true, X)} | Acc];
-        (waiting_commit, X, Acc) ->
-            [{waiting_commit, lists:member(true, X)} | Acc];
-        (waiting_clients, X, Acc) ->
-            [{waiting_clients, lists:sum(X)} | Acc];
-        (update_seq, X, Acc) ->
-            [{update_seq, lists:sum(X)} | Acc];
-        (purge_seq, X, Acc) ->
-            [{purge_seq, lists:sum(X)} | Acc];
-        (_, _, Acc) ->
-            Acc
-    end, [], Dict).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_rpc.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_rpc.erl b/src/fabric/src/fabric_rpc.erl
deleted file mode 100644
index afcf0e5..0000000
--- a/src/fabric/src/fabric_rpc.erl
+++ /dev/null
@@ -1,516 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_rpc).
-
--export([get_db_info/1, get_doc_count/1, get_update_seq/1]).
--export([open_doc/3, open_revs/4, get_missing_revs/2, get_missing_revs/3,
-    update_docs/3]).
--export([all_docs/2, changes/3, map_view/4, reduce_view/4, group_info/2]).
--export([create_db/1, delete_db/1, reset_validation_funs/1, set_security/3,
-    set_revs_limit/3, create_shard_db_doc/2, delete_shard_db_doc/2]).
--export([get_all_security/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--record (view_acc, {
-    db,
-    limit,
-    include_docs,
-    conflicts,
-    doc_info = nil,
-    offset = nil,
-    total_rows,
-    reduce_fun = fun couch_db:enum_docs_reduce_to_count/1,
-    group_level = 0
-}).
-
-%% rpc endpoints
-%%  call to with_db will supply your M:F with a #db{} and then remaining args
-
-all_docs(DbName, #mrargs{keys=undefined} = QueryArgs) ->
-    {ok, Db} = get_or_create_db(DbName, []),
-    #mrargs{
-        start_key = StartKey,
-        start_key_docid = StartDocId,
-        end_key = EndKey,
-        end_key_docid = EndDocId,
-        limit = Limit,
-        skip = Skip,
-        include_docs = IncludeDocs,
-        direction = Dir,
-        inclusive_end = Inclusive,
-        extra = Extra
-    } = QueryArgs,
-    set_io_priority(DbName, Extra),
-    {ok, Total} = couch_db:get_doc_count(Db),
-    Acc0 = #view_acc{
-        db = Db,
-        include_docs = IncludeDocs,
-        conflicts = proplists:get_value(conflicts, Extra, false),
-        limit = Limit+Skip,
-        total_rows = Total
-    },
-    EndKeyType = if Inclusive -> end_key; true -> end_key_gt end,
-    Options = [
-        {dir, Dir},
-        {start_key, if is_binary(StartKey) -> StartKey; true -> StartDocId end},
-        {EndKeyType, if is_binary(EndKey) -> EndKey; true -> EndDocId end}
-    ],
-    {ok, _, Acc} = couch_db:enum_docs(Db, fun view_fold/3, Acc0, Options),
-    final_response(Total, Acc#view_acc.offset).
-
-changes(DbName, #changes_args{} = Args, StartSeq) ->
-    changes(DbName, [Args], StartSeq);
-changes(DbName, Options, StartSeq) ->
-    erlang:put(io_priority, {interactive, DbName}),
-    #changes_args{dir=Dir} = Args = lists:keyfind(changes_args, 1, Options),
-    case get_or_create_db(DbName, []) of
-    {ok, Db} ->
-        Enum = fun changes_enumerator/2,
-        Opts = [{dir,Dir}],
-        Acc0 = {Db, StartSeq, Args, Options},
-        try
-            {ok, {_, LastSeq, _, _}} =
-                couch_db:changes_since(Db, StartSeq, Enum, Opts, Acc0),
-            rexi:reply({complete, LastSeq})
-        after
-            couch_db:close(Db)
-        end;
-    Error ->
-        rexi:reply(Error)
-    end.
-
-map_view(DbName, DDoc, ViewName, QueryArgs) ->
-    {ok, Db} = get_or_create_db(DbName, []),
-    #mrargs{
-        limit = Limit,
-        skip = Skip,
-        keys = Keys,
-        include_docs = IncludeDocs,
-        stale = Stale,
-        view_type = ViewType,
-        extra = Extra
-    } = QueryArgs,
-    set_io_priority(DbName, Extra),
-    {LastSeq, MinSeq} = calculate_seqs(Db, Stale),
-    Group0 = couch_view_group:design_doc_to_view_group(DDoc),
-    {ok, Pid} = gen_server:call(couch_view, {get_group_server, DbName, Group0}),
-    {ok, Group} = couch_view_group:request_group(Pid, MinSeq),
-    maybe_update_view_group(Pid, LastSeq, Stale),
-    erlang:monitor(process, couch_view_group:get_fd(Group)),
-    Views = couch_view_group:get_views(Group),
-    View = fabric_view:extract_view(Pid, ViewName, Views, ViewType),
-    {ok, Total} = couch_view:get_row_count(View),
-    Acc0 = #view_acc{
-        db = Db,
-        include_docs = IncludeDocs,
-        conflicts = proplists:get_value(conflicts, Extra, false),
-        limit = Limit+Skip,
-        total_rows = Total,
-        reduce_fun = fun couch_view:reduce_to_count/1
-    },
-    case Keys of
-    undefined ->
-        Options = couch_httpd_view:make_key_options(QueryArgs),
-        {ok, _, Acc} = couch_view:fold(View, fun view_fold/3, Acc0, Options);
-    _ ->
-        Acc = lists:foldl(fun(Key, AccIn) ->
-            KeyArgs = QueryArgs#mrargs{start_key=Key, end_key=Key},
-            Options = couch_httpd_view:make_key_options(KeyArgs),
-            {_Go, _, Out} = couch_view:fold(View, fun view_fold/3, AccIn,
-                Options),
-            Out
-        end, Acc0, Keys)
-    end,
-    final_response(Total, Acc#view_acc.offset).
-
-reduce_view(DbName, #doc{} = DDoc, ViewName, QueryArgs) ->
-    Group = couch_view_group:design_doc_to_view_group(DDoc),
-    reduce_view(DbName, Group, ViewName, QueryArgs);
-reduce_view(DbName, Group0, ViewName, QueryArgs) ->
-    erlang:put(io_priority, {interactive, DbName}),
-    {ok, Db} = get_or_create_db(DbName, []),
-    #mrargs{
-        group_level = GroupLevel,
-        limit = Limit,
-        skip = Skip,
-        keys = Keys,
-        stale = Stale,
-        extra = Extra
-    } = QueryArgs,
-    set_io_priority(DbName, Extra),
-    GroupFun = group_rows_fun(GroupLevel),
-    {LastSeq, MinSeq} = calculate_seqs(Db, Stale),
-    {ok, Pid} = gen_server:call(couch_view, {get_group_server, DbName, Group0}),
-    {ok, Group} = couch_view_group:request_group(Pid, MinSeq),
-    maybe_update_view_group(Pid, LastSeq, Stale),
-    Lang = couch_view_group:get_language(Group),
-    Views = couch_view_group:get_views(Group),
-    erlang:monitor(process, couch_view_group:get_fd(Group)),
-    {NthRed, View} = fabric_view:extract_view(Pid, ViewName, Views, reduce),
-    ReduceView = {reduce, NthRed, Lang, View},
-    Acc0 = #view_acc{group_level = GroupLevel, limit = Limit+Skip},
-    case Keys of
-    undefined ->
-        Options0 = couch_httpd_view:make_key_options(QueryArgs),
-        Options = [{key_group_fun, GroupFun} | Options0],
-        couch_view:fold_reduce(ReduceView, fun reduce_fold/3, Acc0, Options);
-    _ ->
-        lists:map(fun(Key) ->
-            KeyArgs = QueryArgs#mrargs{start_key=Key, end_key=Key},
-            Options0 = couch_httpd_view:make_key_options(KeyArgs),
-            Options = [{key_group_fun, GroupFun} | Options0],
-            couch_view:fold_reduce(ReduceView, fun reduce_fold/3, Acc0, Options)
-        end, Keys)
-    end,
-    rexi:reply(complete).
-
-calculate_seqs(Db, Stale) ->
-    LastSeq = couch_db:get_update_seq(Db),
-    if
-        Stale == ok orelse Stale == update_after ->
-            {LastSeq, 0};
-        true ->
-            {LastSeq, LastSeq}
-    end.
-
-maybe_update_view_group(GroupPid, LastSeq, update_after) ->
-    couch_view_group:trigger_group_update(GroupPid, LastSeq);
-maybe_update_view_group(_, _, _) ->
-    ok.
-
-create_db(DbName) ->
-    rexi:reply(case couch_server:create(DbName, []) of
-    {ok, _} ->
-        ok;
-    Error ->
-        Error
-    end).
-
-create_shard_db_doc(_, Doc) ->
-    rexi:reply(mem3_util:write_db_doc(Doc)).
-
-delete_db(DbName) ->
-    couch_server:delete(DbName, []).
-
-delete_shard_db_doc(_, DocId) ->
-    rexi:reply(mem3_util:delete_db_doc(DocId)).
-
-get_db_info(DbName) ->
-    with_db(DbName, [], {couch_db, get_db_info, []}).
-
-get_doc_count(DbName) ->
-    with_db(DbName, [], {couch_db, get_doc_count, []}).
-
-get_update_seq(DbName) ->
-    with_db(DbName, [], {couch_db, get_update_seq, []}).
-
-set_security(DbName, SecObj, Options) ->
-    with_db(DbName, Options, {couch_db, set_security, [SecObj]}).
-
-get_all_security(DbName, Options) ->
-    with_db(DbName, Options, {couch_db, get_security, []}).
-
-set_revs_limit(DbName, Limit, Options) ->
-    with_db(DbName, Options, {couch_db, set_revs_limit, [Limit]}).
-
-open_doc(DbName, DocId, Options) ->
-    with_db(DbName, Options, {couch_db, open_doc, [DocId, Options]}).
-
-open_revs(DbName, Id, Revs, Options) ->
-    with_db(DbName, Options, {couch_db, open_doc_revs, [Id, Revs, Options]}).
-
-get_missing_revs(DbName, IdRevsList) ->
-    get_missing_revs(DbName, IdRevsList, []).
-
-get_missing_revs(DbName, IdRevsList, Options) ->
-    % reimplement here so we get [] for Ids with no missing revs in response
-    set_io_priority(DbName, Options),
-    rexi:reply(case get_or_create_db(DbName, Options) of
-    {ok, Db} ->
-        Ids = [Id1 || {Id1, _Revs} <- IdRevsList],
-        {ok, lists:zipwith(fun({Id, Revs}, FullDocInfoResult) ->
-            case FullDocInfoResult of
-            {ok, #full_doc_info{rev_tree=RevisionTree} = FullInfo} ->
-                MissingRevs = couch_key_tree:find_missing(RevisionTree, Revs),
-                {Id, MissingRevs, possible_ancestors(FullInfo, MissingRevs)};
-            not_found ->
-                {Id, Revs, []}
-            end
-        end, IdRevsList, couch_btree:lookup(Db#db.id_tree, Ids))};
-    Error ->
-        Error
-    end).
-
-update_docs(DbName, Docs0, Options) ->
-    case proplists:get_value(replicated_changes, Options) of
-    true ->
-        X = replicated_changes;
-    _ ->
-        X = interactive_edit
-    end,
-    Docs = make_att_readers(Docs0),
-    with_db(DbName, Options, {couch_db, update_docs, [Docs, Options, X]}).
-
-group_info(DbName, Group0) ->
-    {ok, Pid} = gen_server:call(couch_view, {get_group_server, DbName, Group0}),
-    rexi:reply(couch_view_group:request_group_info(Pid)).
-
-reset_validation_funs(DbName) ->
-    case get_or_create_db(DbName, []) of
-    {ok, #db{main_pid = Pid}} ->
-        gen_server:cast(Pid, {load_validation_funs, undefined});
-    _ ->
-        ok
-    end.
-
-%%
-%% internal
-%%
-
-with_db(DbName, Options, {M,F,A}) ->
-    set_io_priority(DbName, Options),
-    case get_or_create_db(DbName, Options) of
-    {ok, Db} ->
-        rexi:reply(try
-            apply(M, F, [Db | A])
-        catch Exception ->
-            Exception;
-        error:Reason ->
-            twig:log(error, "rpc ~p:~p/~p ~p ~p", [M, F, length(A)+1, Reason,
-                clean_stack()]),
-            {error, Reason}
-        end);
-    Error ->
-        rexi:reply(Error)
-    end.
-
-get_or_create_db(DbName, Options) ->
-    case couch_db:open_int(DbName, Options) of
-    {not_found, no_db_file} ->
-        twig:log(warn, "~p creating ~s", [?MODULE, DbName]),
-        couch_server:create(DbName, Options);
-    Else ->
-        Else
-    end.
-
-view_fold(#full_doc_info{} = FullDocInfo, OffsetReds, Acc) ->
-    % matches for _all_docs and translates #full_doc_info{} -> KV pair
-    case couch_doc:to_doc_info(FullDocInfo) of
-    #doc_info{id=Id, revs=[#rev_info{deleted=false, rev=Rev}|_]} = DI ->
-        Value = {[{rev,couch_doc:rev_to_str(Rev)}]},
-        view_fold({{Id,Id}, Value}, OffsetReds, Acc#view_acc{doc_info=DI});
-    #doc_info{revs=[#rev_info{deleted=true}|_]} ->
-        {ok, Acc}
-    end;
-view_fold(KV, OffsetReds, #view_acc{offset=nil, total_rows=Total} = Acc) ->
-    % calculates the offset for this shard
-    #view_acc{reduce_fun=Reduce} = Acc,
-    Offset = Reduce(OffsetReds),
-    case rexi:sync_reply({total_and_offset, Total, Offset}) of
-    ok ->
-        view_fold(KV, OffsetReds, Acc#view_acc{offset=Offset});
-    stop ->
-        exit(normal);
-    timeout ->
-        exit(timeout)
-    end;
-view_fold(_KV, _Offset, #view_acc{limit=0} = Acc) ->
-    % we scanned through limit+skip local rows
-    {stop, Acc};
-view_fold({{Key,Id}, Value}, _Offset, Acc) ->
-    % the normal case
-    #view_acc{
-        db = Db,
-        doc_info = DocInfo,
-        limit = Limit,
-        conflicts = Conflicts,
-        include_docs = IncludeDocs
-    } = Acc,
-    case Value of {Props} ->
-        LinkedDocs = (couch_util:get_value(<<"_id">>, Props) =/= undefined);
-    _ ->
-        LinkedDocs = false
-    end,
-    if LinkedDocs ->
-        % we'll embed this at a higher level b/c the doc may be non-local
-        Doc = undefined;
-    IncludeDocs ->
-        IdOrInfo = if DocInfo =/= nil -> DocInfo; true -> Id end,
-        Options = if Conflicts -> [conflicts]; true -> [] end,
-        case couch_db:open_doc(Db, IdOrInfo, Options) of
-        {not_found, deleted} ->
-            Doc = null;
-        {not_found, missing} ->
-            Doc = undefined;
-        {ok, Doc0} ->
-            Doc = couch_doc:to_json_obj(Doc0, [])
-        end;
-    true ->
-        Doc = undefined
-    end,
-    case rexi:stream(#view_row{key=Key, id=Id, value=Value, doc=Doc}) of
-        ok ->
-            {ok, Acc#view_acc{limit=Limit-1}};
-        timeout ->
-            exit(timeout)
-    end.
-
-final_response(Total, nil) ->
-    case rexi:sync_reply({total_and_offset, Total, Total}) of ok ->
-        rexi:reply(complete);
-    stop ->
-        ok;
-    timeout ->
-        exit(timeout)
-    end;
-final_response(_Total, _Offset) ->
-    rexi:reply(complete).
-
-%% TODO: handle case of bogus group level
-group_rows_fun(exact) ->
-    fun({Key1,_}, {Key2,_}) -> Key1 == Key2 end;
-group_rows_fun(0) ->
-    fun(_A, _B) -> true end;
-group_rows_fun(GroupLevel) when is_integer(GroupLevel) ->
-    fun({[_|_] = Key1,_}, {[_|_] = Key2,_}) ->
-        lists:sublist(Key1, GroupLevel) == lists:sublist(Key2, GroupLevel);
-    ({Key1,_}, {Key2,_}) ->
-        Key1 == Key2
-    end.
-
-reduce_fold(_Key, _Red, #view_acc{limit=0} = Acc) ->
-    {stop, Acc};
-reduce_fold(_Key, Red, #view_acc{group_level=0} = Acc) ->
-    send(null, Red, Acc);
-reduce_fold(Key, Red, #view_acc{group_level=exact} = Acc) ->
-    send(Key, Red, Acc);
-reduce_fold(K, Red, #view_acc{group_level=I} = Acc) when I > 0, is_list(K) ->
-    send(lists:sublist(K, I), Red, Acc);
-reduce_fold(K, Red, #view_acc{group_level=I} = Acc) when I > 0 ->
-    send(K, Red, Acc).
-
-
-send(Key, Value, #view_acc{limit=Limit} = Acc) ->
-    case put(fabric_sent_first_row, true) of
-    undefined ->
-        case rexi:sync_reply(#view_row{key=Key, value=Value}) of
-        ok ->
-            {ok, Acc#view_acc{limit=Limit-1}};
-        stop ->
-            exit(normal);
-        timeout ->
-            exit(timeout)
-        end;
-    true ->
-        case rexi:stream(#view_row{key=Key, value=Value}) of
-        ok ->
-            {ok, Acc#view_acc{limit=Limit-1}};
-        timeout ->
-            exit(timeout)
-        end
-    end.
-
-changes_enumerator(DocInfo, {Db, _Seq, Args, Options}) ->
-    #changes_args{
-        include_docs = IncludeDocs,
-        filter = Acc
-    } = Args,
-    Conflicts = proplists:get_value(conflicts, Options, false),
-    #doc_info{high_seq=Seq, revs=[#rev_info{deleted=Del}|_]} = DocInfo,
-    case [X || X <- couch_changes:filter(Db, DocInfo, Acc), X /= null] of
-    [] ->
-        {ok, {Db, Seq, Args, Options}};
-    Results ->
-        Opts = if Conflicts -> [conflicts]; true -> [] end,
-        ChangesRow = changes_row(Db, DocInfo, Results, Del, IncludeDocs, Opts),
-        Go = rexi:sync_reply(ChangesRow),
-        {Go, {Db, Seq, Args, Options}}
-    end.
-
-changes_row(Db, #doc_info{id=Id, high_seq=Seq}=DI, Results, Del, true, Opts) ->
-    Doc = doc_member(Db, DI, Opts),
-    #change{key=Seq, id=Id, value=Results, doc=Doc, deleted=Del};
-changes_row(_, #doc_info{id=Id, high_seq=Seq}, Results, true, _, _) ->
-    #change{key=Seq, id=Id, value=Results, deleted=true};
-changes_row(_, #doc_info{id=Id, high_seq=Seq}, Results, _, _, _) ->
-    #change{key=Seq, id=Id, value=Results}.
-
-doc_member(Shard, DocInfo, Opts) ->
-    case couch_db:open_doc(Shard, DocInfo, [deleted | Opts]) of
-    {ok, Doc} ->
-        couch_doc:to_json_obj(Doc, []);
-    Error ->
-        Error
-    end.
-
-possible_ancestors(_FullInfo, []) ->
-    [];
-possible_ancestors(FullInfo, MissingRevs) ->
-    #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo),
-    LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo],
-    % Find the revs that are possible parents of this rev
-    lists:foldl(fun({LeafPos, LeafRevId}, Acc) ->
-        % this leaf is a "possible ancenstor" of the missing
-        % revs if this LeafPos lessthan any of the missing revs
-        case lists:any(fun({MissingPos, _}) ->
-                LeafPos < MissingPos end, MissingRevs) of
-        true ->
-            [{LeafPos, LeafRevId} | Acc];
-        false ->
-            Acc
-        end
-    end, [], LeafRevs).
-
-make_att_readers([]) ->
-    [];
-make_att_readers([#doc{atts=Atts0} = Doc | Rest]) ->
-    % % go through the attachments looking for 'follows' in the data,
-    % % replace with function that reads the data from MIME stream.
-    Atts = [Att#att{data=make_att_reader(D)} || #att{data=D} = Att <- Atts0],
-    [Doc#doc{atts = Atts} | make_att_readers(Rest)].
-
-make_att_reader({follows, Parser, Ref}) ->
-    fun() ->
-        ParserRef = case get(mp_parser_ref) of
-            undefined ->
-                PRef = erlang:monitor(process, Parser),
-                put(mp_parser_ref, PRef),
-                PRef;
-            Else ->
-                Else
-        end,
-        Parser ! {get_bytes, Ref, self()},
-        receive
-            {bytes, Ref, Bytes} ->
-                Bytes;
-            {'DOWN', ParserRef, _, _, Reason} ->
-                throw({mp_parser_died, Reason})
-        end
-    end;
-make_att_reader(Else) ->
-    Else.
-
-clean_stack() ->
-    lists:map(fun({M,F,A}) when is_list(A) -> {M,F,length(A)}; (X) -> X end,
-        erlang:get_stacktrace()).
-
-set_io_priority(DbName, Options) ->
-    case lists:keyfind(io_priority, 1, Options) of
-    {io_priority, Pri} ->
-        erlang:put(io_priority, Pri);
-    false ->
-        erlang:put(io_priority, {interactive, DbName})
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_util.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_util.erl b/src/fabric/src/fabric_util.erl
deleted file mode 100644
index a6982f3..0000000
--- a/src/fabric/src/fabric_util.erl
+++ /dev/null
@@ -1,171 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_util).
-
--export([submit_jobs/3, submit_jobs/4, cleanup/1, recv/4, get_db/1, get_db/2, error_info/1,
-        update_counter/3, remove_ancestors/2, create_monitors/1, kv/2,
-        remove_down_workers/2]).
--export([request_timeout/0]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-remove_down_workers(Workers, BadNode) ->
-    Filter = fun(#shard{node = Node}, _) -> Node =/= BadNode end,
-    NewWorkers = fabric_dict:filter(Filter, Workers),
-    case fabric_view:is_progress_possible(NewWorkers) of
-    true ->
-        {ok, NewWorkers};
-    false ->
-        error
-    end.
-
-submit_jobs(Shards, EndPoint, ExtraArgs) ->
-    submit_jobs(Shards, fabric_rpc, EndPoint, ExtraArgs).
-
-submit_jobs(Shards, Module, EndPoint, ExtraArgs) ->
-    lists:map(fun(#shard{node=Node, name=ShardName} = Shard) ->
-        Ref = rexi:cast(Node, {Module, EndPoint, [ShardName | ExtraArgs]}),
-        Shard#shard{ref = Ref}
-    end, Shards).
-
-cleanup(Workers) ->
-    [rexi:kill(Node, Ref) || #shard{node=Node, ref=Ref} <- Workers].
-
-recv(Workers, Keypos, Fun, Acc0) ->
-    rexi_utils:recv(Workers, Keypos, Fun, Acc0, request_timeout(), infinity).
-
-request_timeout() ->
-    case config:get("fabric", "request_timeout", "60000") of
-        "infinity" -> infinity;
-        N -> list_to_integer(N)
-    end.
-
-get_db(DbName) ->
-    get_db(DbName, []).
-
-get_db(DbName, Options) ->
-    {Local, SameZone, DifferentZone} = mem3:group_by_proximity(mem3:shards(DbName)),
-    % Prefer shards on the same node over other nodes, prefer shards in the same zone over
-    % over zones and sort each remote list by name so that we don't repeatedly try the same node.
-    Shards = Local ++ lists:keysort(#shard.name, SameZone) ++ lists:keysort(#shard.name, DifferentZone),
-    % suppress shards from down nodes
-    Nodes = [node()|erlang:nodes()],
-    Live = [S || #shard{node = N} = S <- Shards, lists:member(N, Nodes)],
-    get_shard(Live, Options, 100).
-
-get_shard([], _Opts, _Timeout) ->
-    erlang:error({internal_server_error, "No DB shards could be opened."});
-get_shard([#shard{node = Node, name = Name} | Rest], Opts, Timeout) ->
-    case rpc:call(Node, couch_db, open, [Name, [{timeout, Timeout} | Opts]]) of
-    {ok, Db} ->
-        {ok, Db};
-    {unauthorized, _} = Error ->
-        throw(Error);
-    {badrpc, {'EXIT', {timeout, _}}} ->
-        get_shard(Rest, Opts, 2*Timeout);
-    _Else ->
-        get_shard(Rest, Opts, Timeout)
-    end.
-
-error_info({{<<"reduce_overflow_error">>, _} = Error, _Stack}) ->
-    Error;
-error_info({{timeout, _} = Error, _Stack}) ->
-    Error;
-error_info({{Error, Reason}, Stack}) ->
-    {Error, Reason, Stack};
-error_info({Error, Stack}) ->
-    {Error, nil, Stack}.
-
-update_counter(Item, Incr, D) ->
-    UpdateFun = fun ({Old, Count}) -> {Old, Count + Incr} end,
-    orddict:update(make_key(Item), UpdateFun, {Item, Incr}, D).
-
-make_key({ok, L}) when is_list(L) ->
-    make_key(L);
-make_key([]) ->
-    [];
-make_key([{ok, #doc{revs= {Pos,[RevId | _]}}} | Rest]) ->
-    [{ok, {Pos, RevId}} | make_key(Rest)];
-make_key([{{not_found, missing}, Rev} | Rest]) ->
-    [{not_found, Rev} | make_key(Rest)];
-make_key({ok, #doc{id=Id,revs=Revs}}) ->
-    {Id, Revs};
-make_key(Else) ->
-    Else.
-
-% this presumes the incoming list is sorted, i.e. shorter revlists come first
-remove_ancestors([], Acc) ->
-    lists:reverse(Acc);
-remove_ancestors([{_, {{not_found, _}, Count}} = Head | Tail], Acc) ->
-    % any document is a descendant
-    case lists:filter(fun({_,{{ok, #doc{}}, _}}) -> true; (_) -> false end, Tail) of
-    [{_,{{ok, #doc{}} = Descendant, _}} | _] ->
-        remove_ancestors(update_counter(Descendant, Count, Tail), Acc);
-    [] ->
-        remove_ancestors(Tail, [Head | Acc])
-    end;
-remove_ancestors([{_,{{ok, #doc{revs = {Pos, Revs}}}, Count}} = Head | Tail], Acc) ->
-    Descendants = lists:dropwhile(fun
-    ({_,{{ok, #doc{revs = {Pos2, Revs2}}}, _}}) ->
-        case lists:nthtail(erlang:min(Pos2 - Pos, length(Revs2)), Revs2) of
-        [] ->
-            % impossible to tell if Revs2 is a descendant - assume no
-            true;
-        History ->
-            % if Revs2 is a descendant, History is a prefix of Revs
-            not lists:prefix(History, Revs)
-        end
-    end, Tail),
-    case Descendants of [] ->
-        remove_ancestors(Tail, [Head | Acc]);
-    [{Descendant, _} | _] ->
-        remove_ancestors(update_counter(Descendant, Count, Tail), Acc)
-    end;
-remove_ancestors([Error | Tail], Acc) ->
-    remove_ancestors(Tail, [Error | Acc]).
-
-create_monitors(Shards) ->
-    MonRefs = lists:usort([{rexi_server, N} || #shard{node=N} <- Shards]),
-    rexi_monitor:start(MonRefs).
-
-%% verify only id and rev are used in key.
-update_counter_test() ->
-    Reply = {ok, #doc{id = <<"id">>, revs = <<"rev">>,
-                    body = <<"body">>, atts = <<"atts">>}},
-    ?assertEqual([{{<<"id">>,<<"rev">>}, {Reply, 1}}],
-        update_counter(Reply, 1, [])).
-
-remove_ancestors_test() ->
-    Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
-    Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
-    Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
-    Bar2 = {not_found, {1,<<"bar">>}},
-    ?assertEqual(
-        [kv(Bar1,1), kv(Foo1,1)],
-        remove_ancestors([kv(Bar1,1), kv(Foo1,1)], [])
-    ),
-    ?assertEqual(
-        [kv(Bar1,1), kv(Foo2,2)],
-        remove_ancestors([kv(Bar1,1), kv(Foo1,1), kv(Foo2,1)], [])
-    ),
-    ?assertEqual(
-        [kv(Bar1,2)],
-        remove_ancestors([kv(Bar2,1), kv(Bar1,1)], [])
-    ).
-
-%% test function
-kv(Item, Count) ->
-    {make_key(Item), {Item,Count}}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_view.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl
deleted file mode 100644
index 942231f..0000000
--- a/src/fabric/src/fabric_view.erl
+++ /dev/null
@@ -1,344 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view).
-
--export([is_progress_possible/1, remove_overlapping_shards/2, maybe_send_row/1,
-    transform_row/1, keydict/1, extract_view/4, get_shards/2,
-    remove_down_shards/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--spec remove_down_shards(#collector{}, node()) ->
-    {ok, #collector{}} | {error, any()}.
-remove_down_shards(Collector, BadNode) ->
-    #collector{callback=Callback, counters=Counters, user_acc=Acc} = Collector,
-    case fabric_util:remove_down_workers(Counters, BadNode) of
-    {ok, NewCounters} ->
-        {ok, Collector#collector{counters = NewCounters}};
-    error ->
-        Reason = {nodedown, <<"progress not possible">>},
-        Callback({error, Reason}, Acc),
-        {stop, Collector}
-    end.
-
-%% @doc looks for a fully covered keyrange in the list of counters
--spec is_progress_possible([{#shard{}, term()}]) -> boolean().
-is_progress_possible([]) ->
-    false;
-is_progress_possible(Counters) ->
-    Ranges = fabric_dict:fold(fun(#shard{range=[X,Y]}, _, A) -> [{X,Y}|A] end,
-        [], Counters),
-    [{Start, Tail0} | Rest] = lists:ukeysort(1, Ranges),
-    Result = lists:foldl(fun
-    (_, fail) ->
-        % we've already declared failure
-        fail;
-    (_, complete) ->
-        % this is the success condition, we can fast-forward
-        complete;
-    ({X,_}, Tail) when X > (Tail+1) ->
-        % gap in the keyrange, we're dead
-        fail;
-    ({_,Y}, Tail) ->
-        case erlang:max(Tail, Y) of
-        End when (End+1) =:= (2 bsl 31) ->
-            complete;
-        Else ->
-            % the normal condition, adding to the tail
-            Else
-        end
-    end, if (Tail0+1) =:= (2 bsl 31) -> complete; true -> Tail0 end, Rest),
-    (Start =:= 0) andalso (Result =:= complete).
-
--spec remove_overlapping_shards(#shard{}, [{#shard{}, any()}]) ->
-    [{#shard{}, any()}].
-remove_overlapping_shards(#shard{range=[A,B]} = Shard0, Shards) ->
-    fabric_dict:filter(fun(#shard{range=[X,Y], node=Node, ref=Ref} = Shard, _) ->
-        if Shard =:= Shard0 ->
-            % we can't remove ourselves
-            true;
-        A < B, X >= A, X < B ->
-            % lower bound is inside our range
-            rexi:kill(Node, Ref),
-            false;
-        A < B, Y > A, Y =< B ->
-            % upper bound is inside our range
-            rexi:kill(Node, Ref),
-            false;
-        B < A, X >= A orelse B < A, X < B ->
-            % target shard wraps the key range, lower bound is inside
-            rexi:kill(Node, Ref),
-            false;
-        B < A, Y > A orelse B < A, Y =< B ->
-            % target shard wraps the key range, upper bound is inside
-            rexi:kill(Node, Ref),
-            false;
-        true ->
-            true
-        end
-    end, Shards).
-
-maybe_send_row(#collector{limit=0} = State) ->
-    #collector{counters=Counters, user_acc=AccIn, callback=Callback} = State,
-    case fabric_dict:any(0, Counters) of
-    true ->
-        % we still need to send the total/offset header
-        {ok, State};
-    false ->
-        {_, Acc} = Callback(complete, AccIn),
-        {stop, State#collector{user_acc=Acc}}
-    end;
-maybe_send_row(State) ->
-    #collector{
-        callback = Callback,
-        counters = Counters,
-        skip = Skip,
-        limit = Limit,
-        user_acc = AccIn
-    } = State,
-    case fabric_dict:any(0, Counters) of
-    true ->
-        {ok, State};
-    false ->
-        try get_next_row(State) of
-        {_, NewState} when Skip > 0 ->
-            maybe_send_row(NewState#collector{skip=Skip-1});
-        {Row, NewState} ->
-            case Callback(transform_row(possibly_embed_doc(NewState,Row)), AccIn) of
-            {stop, Acc} ->
-                {stop, NewState#collector{user_acc=Acc, limit=Limit-1}};
-            {ok, Acc} ->
-                maybe_send_row(NewState#collector{user_acc=Acc, limit=Limit-1})
-            end
-        catch complete ->
-            {_, Acc} = Callback(complete, AccIn),
-            {stop, State#collector{user_acc=Acc}}
-        end
-    end.
-
-%% if include_docs=true is used when keys and
-%% the values contain "_id" then use the "_id"s
-%% to retrieve documents and embed in result
-possibly_embed_doc(_State,
-              #view_row{id=reduced}=Row) ->
-    Row;
-possibly_embed_doc(_State,
-                   #view_row{value=undefined}=Row) ->
-    Row;
-possibly_embed_doc(#collector{db_name=DbName, query_args=Args},
-              #view_row{key=_Key, id=_Id, value=Value, doc=_Doc}=Row) ->
-    #mrargs{include_docs=IncludeDocs} = Args,
-    case IncludeDocs andalso is_tuple(Value) of
-    true ->
-        {Props} = Value,
-        Rev0 = couch_util:get_value(<<"_rev">>, Props),
-        case couch_util:get_value(<<"_id">>,Props) of
-        null -> Row#view_row{doc=null};
-        undefined -> Row;
-        IncId ->
-            % use separate process to call fabric:open_doc
-            % to not interfere with current call
-            {Pid, Ref} = spawn_monitor(fun() ->
-                exit(
-                case Rev0 of
-                undefined ->
-                    case fabric:open_doc(DbName, IncId, []) of
-                    {ok, NewDoc} ->
-                        Row#view_row{doc=couch_doc:to_json_obj(NewDoc,[])};
-                    {not_found, _} ->
-                        Row#view_row{doc=null}
-                    end;
-                Rev0 ->
-                    Rev = couch_doc:parse_rev(Rev0),
-                    case fabric:open_revs(DbName, IncId, [Rev], []) of
-                    {ok, [{ok, NewDoc}]} ->
-                        Row#view_row{doc=couch_doc:to_json_obj(NewDoc,[])};
-                    {ok, [{{not_found, _}, Rev}]} ->
-                        Row#view_row{doc=null}
-                    end
-                end) end),
-            receive {'DOWN',Ref,process,Pid, Resp} ->
-                        Resp
-            end
-        end;
-        _ -> Row
-    end.
-
-
-keydict(undefined) ->
-    undefined;
-keydict(Keys) ->
-    {Dict,_} = lists:foldl(fun(K, {D,I}) -> {dict:store(K,I,D), I+1} end,
-        {dict:new(),0}, Keys),
-    Dict.
-
-%% internal %%
-
-get_next_row(#collector{rows = []}) ->
-    throw(complete);
-get_next_row(#collector{reducer = RedSrc} = St) when RedSrc =/= undefined ->
-    #collector{
-        query_args = #mrargs{direction=Dir},
-        keys = Keys,
-        rows = RowDict,
-        os_proc = Proc,
-        counters = Counters0
-    } = St,
-    {Key, RestKeys} = find_next_key(Keys, Dir, RowDict),
-    case dict:find(Key, RowDict) of
-    {ok, Records} ->
-        NewRowDict = dict:erase(Key, RowDict),
-        Counters = lists:foldl(fun(#view_row{worker={Worker,From}}, CntrsAcc) ->
-            case From of
-                {Pid, _} when is_pid(Pid) ->
-                    gen_server:reply(From, ok);
-                Pid when is_pid(Pid) ->
-                    rexi:stream_ack(From)
-            end,
-            fabric_dict:update_counter(Worker, -1, CntrsAcc)
-        end, Counters0, Records),
-        Wrapped = [[V] || #view_row{value=V} <- Records],
-        {ok, [Reduced]} = couch_query_servers:rereduce(Proc, [RedSrc], Wrapped),
-        NewSt = St#collector{keys=RestKeys, rows=NewRowDict, counters=Counters},
-        {#view_row{key=Key, id=reduced, value=Reduced}, NewSt};
-    error ->
-        get_next_row(St#collector{keys=RestKeys})
-    end;
-get_next_row(State) ->
-    #collector{rows = [Row|Rest], counters = Counters0} = State,
-    {Worker, From} = Row#view_row.worker,
-    rexi:stream_ack(From),
-    Counters1 = fabric_dict:update_counter(Worker, -1, Counters0),
-    {Row, State#collector{rows = Rest, counters=Counters1}}.
-
-find_next_key(nil, Dir, RowDict) ->
-    case lists:sort(sort_fun(Dir), dict:fetch_keys(RowDict)) of
-    [] ->
-        throw(complete);
-    [Key|_] ->
-        {Key, nil}
-    end;
-find_next_key([], _, _) ->
-    throw(complete);
-find_next_key([Key|Rest], _, _) ->
-    {Key, Rest}.
-
-transform_row(#view_row{key=Key, id=reduced, value=Value}) ->
-    {row, {[{key,Key}, {value,Value}]}};
-transform_row(#view_row{key=Key, id=undefined}) ->
-    {row, {[{key,Key}, {error,not_found}]}};
-transform_row(#view_row{key=Key, id=Id, value=Value, doc=undefined}) ->
-    {row, {[{id,Id}, {key,Key}, {value,Value}]}};
-transform_row(#view_row{key=Key, id=Id, value=Value, doc={error,Reason}}) ->
-    {row, {[{id,Id}, {key,Key}, {value,Value}, {error,Reason}]}};
-transform_row(#view_row{key=Key, id=Id, value=Value, doc=Doc}) ->
-    {row, {[{id,Id}, {key,Key}, {value,Value}, {doc,Doc}]}}.
-
-
-sort_fun(fwd) ->
-    fun(A,A) -> true; (A,B) -> couch_view:less_json(A,B) end;
-sort_fun(rev) ->
-    fun(A,A) -> true; (A,B) -> couch_view:less_json(B,A) end.
-
-extract_view(Pid, ViewName, [], _ViewType) ->
-    twig:log(error, "missing_named_view ~p", [ViewName]),
-    exit(Pid, kill),
-    exit(missing_named_view);
-extract_view(Pid, ViewName, [View|Rest], ViewType) ->
-    case lists:member(ViewName, view_names(View, ViewType)) of
-    true ->
-        if ViewType == reduce ->
-            {index_of(ViewName, view_names(View, reduce)), View};
-        true ->
-            View
-        end;
-    false ->
-        extract_view(Pid, ViewName, Rest, ViewType)
-    end.
-
-view_names(View, Type) when Type == red_map; Type == reduce ->
-    [Name || {Name, _} <- View#mrview.reduce_funs];
-view_names(View, map) ->
-    View#mrview.map_names.
-
-index_of(X, List) ->
-    index_of(X, List, 1).
-
-index_of(_X, [], _I) ->
-    not_found;
-index_of(X, [X|_Rest], I) ->
-    I;
-index_of(X, [_|Rest], I) ->
-    index_of(X, Rest, I+1).
-
-get_shards(DbName, #mrargs{stale=Stale})
-  when Stale == ok orelse Stale == update_after ->
-    mem3:ushards(DbName);
-get_shards(DbName, #mrargs{stale=false}) ->
-    mem3:shards(DbName).
-
-% unit test
-is_progress_possible_test() ->
-    EndPoint = 2 bsl 31,
-    T1 = [[0, EndPoint-1]],
-    ?assertEqual(is_progress_possible(mk_cnts(T1)),true),
-    T2 = [[0,10],[11,20],[21,EndPoint-1]],
-    ?assertEqual(is_progress_possible(mk_cnts(T2)),true),
-    % gap
-    T3 = [[0,10],[12,EndPoint-1]],
-    ?assertEqual(is_progress_possible(mk_cnts(T3)),false),
-    % outside range
-    T4 = [[1,10],[11,20],[21,EndPoint-1]],
-    ?assertEqual(is_progress_possible(mk_cnts(T4)),false),
-    % outside range
-    T5 = [[0,10],[11,20],[21,EndPoint]],
-    ?assertEqual(is_progress_possible(mk_cnts(T5)),false).
-
-remove_overlapping_shards_test() ->
-    meck:new(rexi),
-    meck:expect(rexi, kill, fun(_, _) -> ok end),
-    EndPoint = 2 bsl 31,
-    T1 = [[0,10],[11,20],[21,EndPoint-1]],
-    Shards = mk_cnts(T1,3),
-    ?assertEqual(orddict:size(
-              remove_overlapping_shards(#shard{name=list_to_atom("node-3"),
-                                               node=list_to_atom("node-3"),
-                                               range=[11,20]},
-                                        Shards)),7),
-    meck:unload(rexi).
-
-mk_cnts(Ranges) ->
-    Shards = lists:map(fun(Range) ->
-                               #shard{range=Range}
-                                    end,
-                        Ranges),
-    orddict:from_list([{Shard,nil} || Shard <- Shards]).
-
-mk_cnts(Ranges, NoNodes) ->
-    orddict:from_list([{Shard,nil}
-                       || Shard <-
-                              lists:flatten(lists:map(
-                                 fun(Range) ->
-                                         mk_shards(NoNodes,Range,[])
-                                 end, Ranges))]
-                     ).
-
-mk_shards(0,_Range,Shards) ->
-    Shards;
-mk_shards(NoNodes,Range,Shards) ->
-    NodeName = list_to_atom("node-" ++ integer_to_list(NoNodes)),
-    mk_shards(NoNodes-1,Range,
-              [#shard{name=NodeName, node=NodeName, range=Range} | Shards]).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_view_all_docs.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_view_all_docs.erl b/src/fabric/src/fabric_view_all_docs.erl
deleted file mode 100644
index 1415c82..0000000
--- a/src/fabric/src/fabric_view_all_docs.erl
+++ /dev/null
@@ -1,212 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_all_docs).
-
--export([go/4]).
--export([open_doc/3]). % exported for spawn
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-go(DbName, #mrargs{keys=undefined} = QueryArgs, Callback, Acc0) ->
-    Workers = fabric_util:submit_jobs(mem3:shards(DbName),all_docs,[QueryArgs]),
-    #mrargs{limit = Limit, skip = Skip} = QueryArgs,
-    State = #collector{
-        query_args = QueryArgs,
-        callback = Callback,
-        counters = fabric_dict:init(Workers, 0),
-        skip = Skip,
-        limit = Limit,
-        user_acc = Acc0
-    },
-    RexiMon = fabric_util:create_monitors(Workers),
-    try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
-        State, infinity, 5000) of
-    {ok, NewState} ->
-        {ok, NewState#collector.user_acc};
-    {timeout, NewState} ->
-        Callback({error, timeout}, NewState#collector.user_acc);
-    {error, Resp} ->
-        {ok, Resp}
-    after
-        rexi_monitor:stop(RexiMon),
-        fabric_util:cleanup(Workers)
-    end;
-
-
-go(DbName, QueryArgs, Callback, Acc0) ->
-    #mrargs{
-        direction = Dir,
-        include_docs = IncludeDocs,
-        limit = Limit,
-        skip = Skip,
-        keys = Keys0
-    } = QueryArgs,
-    {_, Ref0} = spawn_monitor(fun() -> exit(fabric:get_doc_count(DbName)) end),
-    SpawnFun = fun(Key) ->
-        spawn_monitor(?MODULE, open_doc, [DbName, Key, IncludeDocs])
-    end,
-    MaxJobs = all_docs_concurrency(),
-    Keys1 = case Dir of
-        fwd -> Keys0;
-        _ -> lists:reverse(Keys0)
-    end,
-    Keys2 = case Skip < length(Keys1) of
-        true -> lists:nthtail(Skip, Keys1);
-        false -> []
-    end,
-    Keys3 = case Limit < length(Keys2) of
-        true -> lists:sublist(Keys2, Limit);
-        false -> Keys2
-    end,
-    receive {'DOWN', Ref0, _, _, {ok, TotalRows}} ->
-        {ok, Acc1} = Callback({total_and_offset, TotalRows, 0}, Acc0),
-        {ok, Acc2} = doc_receive_loop(
-            Keys3, queue:new(), SpawnFun, MaxJobs, Callback, Acc1
-        ),
-        Callback(complete, Acc2)
-    after 10000 ->
-        Callback(timeout, Acc0)
-    end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
-    fabric_view:remove_down_shards(State, NodeRef);
-
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
-    #collector{callback=Callback, counters=Counters0, user_acc=Acc} = State,
-    Counters = fabric_dict:erase(Worker, Counters0),
-    case fabric_view:is_progress_possible(Counters) of
-    true ->
-        {ok, State#collector{counters = Counters}};
-    false ->
-        {ok, Resp} = Callback({error, fabric_util:error_info(Reason)}, Acc),
-        {error, Resp}
-    end;
-
-handle_message({total_and_offset, Tot, Off}, {Worker, From}, State) ->
-    #collector{
-        callback = Callback,
-        counters = Counters0,
-        total_rows = Total0,
-        offset = Offset0,
-        user_acc = AccIn
-    } = State,
-    case fabric_dict:lookup_element(Worker, Counters0) of
-    undefined ->
-        % this worker lost the race with other partition copies, terminate
-        gen_server:reply(From, stop),
-        {ok, State};
-    0 ->
-        gen_server:reply(From, ok),
-        Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
-        Counters2 = fabric_view:remove_overlapping_shards(Worker, Counters1),
-        Total = Total0 + Tot,
-        Offset = Offset0 + Off,
-        case fabric_dict:any(0, Counters2) of
-        true ->
-            {ok, State#collector{
-                counters = Counters2,
-                total_rows = Total,
-                offset = Offset
-            }};
-        false ->
-            FinalOffset = erlang:min(Total, Offset+State#collector.skip),
-            {Go, Acc} = Callback({total_and_offset, Total, FinalOffset}, AccIn),
-            {Go, State#collector{
-                counters = fabric_dict:decrement_all(Counters2),
-                total_rows = Total,
-                offset = FinalOffset,
-                user_acc = Acc
-            }}
-        end
-    end;
-
-handle_message(#view_row{} = Row, {Worker, From}, State) ->
-    #collector{query_args = Args, counters = Counters0, rows = Rows0} = State,
-    Dir = Args#mrargs.direction,
-    Rows = merge_row(Dir, Row#view_row{worker={Worker, From}}, Rows0),
-    Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
-    State1 = State#collector{rows=Rows, counters=Counters1},
-    fabric_view:maybe_send_row(State1);
-
-handle_message(complete, Worker, State) ->
-    Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
-    fabric_view:maybe_send_row(State#collector{counters = Counters}).
-
-
-merge_row(fwd, Row, Rows) ->
-    lists:keymerge(#view_row.id, [Row], Rows);
-merge_row(rev, Row, Rows) ->
-    lists:rkeymerge(#view_row.id, [Row], Rows).
-
-all_docs_concurrency() ->
-    Value = config:get("fabric", "all_docs_concurrency", "10"),
-    try
-        list_to_integer(Value)
-    catch _:_ ->
-        10
-    end.
-
-doc_receive_loop(Keys, Pids, SpawnFun, MaxJobs, Callback, AccIn) ->
-    case {Keys, queue:len(Pids)} of
-    {[], 0} ->
-        {ok, AccIn};
-    {[K | RKeys], Len} when Len < MaxJobs ->
-        Pids1 = queue:in(SpawnFun(K), Pids),
-        doc_receive_loop(RKeys, Pids1, SpawnFun, MaxJobs, Callback, AccIn);
-    _ ->
-        {{value, {Pid, Ref}}, RestPids} = queue:out(Pids),
-        receive {'DOWN', Ref, process, Pid, #view_row{} = Row} ->
-            case Callback(fabric_view:transform_row(Row), AccIn) of
-            {ok, Acc} ->
-                doc_receive_loop(
-                    Keys, RestPids, SpawnFun, MaxJobs, Callback, Acc
-                );
-            {stop, Acc} ->
-                cancel_read_pids(RestPids),
-                {ok, Acc}
-            end
-        after 10000 ->
-            timeout
-        end
-    end.
-
-open_doc(DbName, Id, IncludeDocs) ->
-    Row = case fabric:open_doc(DbName, Id, [deleted]) of
-    {not_found, missing} ->
-        Doc = undefined,
-        #view_row{key=Id};
-    {ok, #doc{deleted=true, revs=Revs}} ->
-        Doc = null,
-        {RevPos, [RevId|_]} = Revs,
-        Value = {[{rev,couch_doc:rev_to_str({RevPos, RevId})}, {deleted,true}]},
-        #view_row{key=Id, id=Id, value=Value};
-    {ok, #doc{revs=Revs} = Doc0} ->
-        Doc = couch_doc:to_json_obj(Doc0, []),
-        {RevPos, [RevId|_]} = Revs,
-        Value = {[{rev,couch_doc:rev_to_str({RevPos, RevId})}]},
-        #view_row{key=Id, id=Id, value=Value}
-    end,
-    exit(if IncludeDocs -> Row#view_row{doc=Doc}; true -> Row end).
-
-cancel_read_pids(Pids) ->
-    case queue:out(Pids) of
-        {{value, {Pid, Ref}}, RestPids} ->
-            exit(Pid, kill),
-            erlang:demonitor(Ref, [flush]),
-            cancel_read_pids(RestPids);
-        {empty, _} ->
-            ok
-    end.


[07/49] Remove src/couch

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_key_tree.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_key_tree.erl b/src/couch/src/couch_key_tree.erl
deleted file mode 100644
index a7f6bb2..0000000
--- a/src/couch/src/couch_key_tree.erl
+++ /dev/null
@@ -1,422 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% @doc Data structure used to represent document edit histories.
-
-%% A key tree is used to represent the edit history of a document. Each node of
-%% the tree represents a particular version. Relations between nodes represent
-%% the order that these edits were applied. For instance, a set of three edits
-%% would produce a tree of versions A->B->C indicating that edit C was based on
-%% version B which was in turn based on A. In a world without replication (and
-%% no ability to disable MVCC checks), all histories would be forced to be
-%% linear lists of edits due to constraints imposed by MVCC (ie, new edits must
-%% be based on the current version). However, we have replication, so we must
-%% deal with not so easy cases, which lead to trees.
-%%
-%% Consider a document in state A. This doc is replicated to a second node. We
-%% then edit the document on each node leaving it in two different states, B
-%% and C. We now have two key trees, A->B and A->C. When we go to replicate a
-%% second time, the key tree must combine these two trees which gives us
-%% A->(B|C). This is how conflicts are introduced. In terms of the key tree, we
-%% say that we have two leaves (B and C) that are not deleted. The presense of
-%% the multiple leaves indicate conflict. To remove a conflict, one of the
-%% edits (B or C) can be deleted, which results in, A->(B|C->D) where D is an
-%% edit that is specially marked with the a deleted=true flag.
-%%
-%% What makes this a bit more complicated is that there is a limit to the
-%% number of revisions kept, specified in couch_db.hrl (default is 1000). When
-%% this limit is exceeded only the last 1000 are kept. This comes in to play
-%% when branches are merged. The comparison has to begin at the same place in
-%% the branches. A revision id is of the form N-XXXXXXX where N is the current
-%% revision. So each path will have a start number, calculated in
-%% couch_doc:to_path using the formula N - length(RevIds) + 1 So, .eg. if a doc
-%% was edit 1003 times this start number would be 4, indicating that 3
-%% revisions were truncated.
-%%
-%% This comes into play in @see merge_at/3 which recursively walks down one
-%% tree or the other until they begin at the same revision.
-
--module(couch_key_tree).
-
--export([merge/3, find_missing/2, get_key_leafs/2, get_full_key_paths/2, get/2]).
--export([get_all_leafs/1, count_leafs/1, remove_leafs/2, get_all_leafs_full/1, stem/2]).
--export([map/2, mapfold/3, map_leafs/2, fold/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
-%% @doc Merge a path with a list of paths and stem to the given length.
--spec merge([path()], path(), pos_integer()) -> {[path()],
-    conflicts | no_conflicts}.
-merge(Paths, Path, Depth) ->
-    {Merged, Conflicts} = merge(Paths, Path),
-    {stem(Merged, Depth), Conflicts}.
-
-%% @doc Merge a path with an existing list of paths, returning a new list of
-%% paths. A return of conflicts indicates a new conflict was discovered in this
-%% merge. Conflicts may already exist in the original list of paths.
--spec merge([path()], path()) -> {[path()], conflicts | no_conflicts}.
-merge(Paths, Path) ->
-    {ok, Merged, HasConflicts} = merge_one(Paths, Path, [], false),
-    if HasConflicts ->
-        Conflicts = conflicts;
-    (length(Merged) =/= length(Paths)) and (length(Merged) =/= 1) ->
-        Conflicts = conflicts;
-    true ->
-        Conflicts = no_conflicts
-    end,
-    {lists:sort(Merged), Conflicts}.
-
--spec merge_one(Original::[path()], Inserted::path(), [path()], boolean()) ->
-    {ok, Merged::[path()], NewConflicts::boolean()}.
-merge_one([], Insert, OutAcc, ConflictsAcc) ->
-    {ok, [Insert | OutAcc], ConflictsAcc};
-merge_one([{Start, Tree}|Rest], {StartInsert, TreeInsert}, Acc, HasConflicts) ->
-    case merge_at([Tree], StartInsert - Start, [TreeInsert]) of
-    {ok, [Merged], Conflicts} ->
-        MergedStart = lists:min([Start, StartInsert]),
-        {ok, Rest ++ [{MergedStart, Merged} | Acc], Conflicts or HasConflicts};
-    no ->
-        AccOut = [{Start, Tree} | Acc],
-        merge_one(Rest, {StartInsert, TreeInsert}, AccOut, HasConflicts)
-    end.
-
--spec merge_at(tree(), Place::integer(), tree()) ->
-    {ok, Merged::tree(), HasConflicts::boolean()} | no.
-merge_at(_Ours, _Place, []) ->
-    no;
-merge_at([], _Place, _Insert) ->
-    no;
-merge_at([{Key, Value, SubTree}|Sibs], Place, InsertTree) when Place > 0 ->
-    % inserted starts later than committed, need to drill into committed subtree
-    case merge_at(SubTree, Place - 1, InsertTree) of
-    {ok, Merged, Conflicts} ->
-        {ok, [{Key, Value, Merged} | Sibs], Conflicts};
-    no ->
-        % first branch didn't merge, move to next branch
-        case merge_at(Sibs, Place, InsertTree) of
-        {ok, Merged, Conflicts} ->
-            {ok, [{Key, Value, SubTree} | Merged], Conflicts};
-        no ->
-            no
-        end
-    end;
-merge_at(OurTree, Place, [{Key, Value, SubTree}]) when Place < 0 ->
-    % inserted starts earlier than committed, need to drill into insert subtree
-    case merge_at(OurTree, Place + 1, SubTree) of
-    {ok, Merged, Conflicts} ->
-        {ok, [{Key, Value, Merged}], Conflicts};
-    no ->
-        no
-    end;
-merge_at([{Key, V1, SubTree}|Sibs], 0, [{Key, V2, InsertSubTree}]) ->
-    {Merged, Conflicts} = merge_simple(SubTree, InsertSubTree),
-    {ok, [{Key, value_pref(V1, V2), Merged} | Sibs], Conflicts};
-merge_at([{OurKey, _, _} | _], 0, [{Key, _, _}]) when OurKey > Key ->
-    % siblings keys are ordered, no point in continuing
-    no;
-merge_at([Tree | Sibs], 0, InsertTree) ->
-    case merge_at(Sibs, 0, InsertTree) of
-    {ok, Merged, Conflicts} ->
-        {ok, [Tree | Merged], Conflicts};
-    no ->
-        no
-    end.
-
-% key tree functions
-
--spec merge_simple(tree(), tree()) -> {Merged::tree(), NewConflicts::boolean()}.
-merge_simple([], B) ->
-    {B, false};
-merge_simple(A, []) ->
-    {A, false};
-merge_simple([{Key, V1, SubA} | NextA], [{Key, V2, SubB} | NextB]) ->
-    {MergedSubTree, Conflict1} = merge_simple(SubA, SubB),
-    {MergedNextTree, Conflict2} = merge_simple(NextA, NextB),
-    Value = value_pref(V1, V2),
-    {[{Key, Value, MergedSubTree} | MergedNextTree], Conflict1 or Conflict2};
-merge_simple([{A, _, _} = Tree | Next], [{B, _, _} | _] = Insert) when A < B ->
-    {Merged, Conflict} = merge_simple(Next, Insert),
-    % if Merged has more branches than the input we added a new conflict
-    {[Tree | Merged], Conflict orelse (length(Merged) > length(Next))};
-merge_simple(Ours, [Tree | Next]) ->
-    {Merged, Conflict} = merge_simple(Ours, Next),
-    {[Tree | Merged], Conflict orelse (length(Merged) > length(Next))}.
-
-find_missing(_Tree, []) ->
-    [];
-find_missing([], SeachKeys) ->
-    SeachKeys;
-find_missing([{Start, {Key, Value, SubTree}} | RestTree], SeachKeys) ->
-    PossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos >= Start],
-    ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Start],
-    Missing = find_missing_simple(Start, [{Key, Value, SubTree}], PossibleKeys),
-    find_missing(RestTree, ImpossibleKeys ++ Missing).
-
-find_missing_simple(_Pos, _Tree, []) ->
-    [];
-find_missing_simple(_Pos, [], SeachKeys) ->
-    SeachKeys;
-find_missing_simple(Pos, [{Key, _, SubTree} | RestTree], SeachKeys) ->
-    PossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos >= Pos],
-    ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Pos],
-
-    SrcKeys2 = PossibleKeys -- [{Pos, Key}],
-    SrcKeys3 = find_missing_simple(Pos + 1, SubTree, SrcKeys2),
-    ImpossibleKeys ++ find_missing_simple(Pos, RestTree, SrcKeys3).
-
-
-filter_leafs([], _Keys, FilteredAcc, RemovedKeysAcc) ->
-    {FilteredAcc, RemovedKeysAcc};
-filter_leafs([{Pos, [{LeafKey, _}|_]} = Path |Rest], Keys, FilteredAcc, RemovedKeysAcc) ->
-    FilteredKeys = lists:delete({Pos, LeafKey}, Keys),
-    if FilteredKeys == Keys ->
-        % this leaf is not a key we are looking to remove
-        filter_leafs(Rest, Keys, [Path | FilteredAcc], RemovedKeysAcc);
-    true ->
-        % this did match a key, remove both the node and the input key
-        filter_leafs(Rest, FilteredKeys, FilteredAcc, [{Pos, LeafKey} | RemovedKeysAcc])
-    end.
-
-% Removes any branches from the tree whose leaf node(s) are in the Keys
-remove_leafs(Trees, Keys) ->
-    % flatten each branch in a tree into a tree path
-    Paths = get_all_leafs_full(Trees),
-
-    % filter out any that are in the keys list.
-    {FilteredPaths, RemovedKeys} = filter_leafs(Paths, Keys, [], []),
-
-    SortedPaths = lists:sort(
-        [{Pos + 1 - length(Path), Path} || {Pos, Path} <- FilteredPaths]
-    ),
-
-    % convert paths back to trees
-    NewTree = lists:foldl(
-        fun({StartPos, Path},TreeAcc) ->
-            [SingleTree] = lists:foldl(
-                fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path),
-            {NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}),
-            NewTrees
-        end, [], SortedPaths),
-    {NewTree, RemovedKeys}.
-
-
-% get the leafs in the tree matching the keys. The matching key nodes can be
-% leafs or an inner nodes. If an inner node, then the leafs for that node
-% are returned.
-get_key_leafs(Tree, Keys) ->
-    get_key_leafs(Tree, Keys, []).
-
-get_key_leafs(_, [], Acc) ->
-    {Acc, []};
-get_key_leafs([], Keys, Acc) ->
-    {Acc, Keys};
-get_key_leafs([{Pos, Tree}|Rest], Keys, Acc) ->
-    {Gotten, RemainingKeys} = get_key_leafs_simple(Pos, [Tree], Keys, []),
-    get_key_leafs(Rest, RemainingKeys, Gotten ++ Acc).
-
-get_key_leafs_simple(_Pos, _Tree, [], _KeyPathAcc) ->
-    {[], []};
-get_key_leafs_simple(_Pos, [], KeysToGet, _KeyPathAcc) ->
-    {[], KeysToGet};
-get_key_leafs_simple(Pos, [{Key, _Value, SubTree}=Tree | RestTree], KeysToGet, KeyPathAcc) ->
-    case lists:delete({Pos, Key}, KeysToGet) of
-    KeysToGet -> % same list, key not found
-        {LeafsFound, KeysToGet2} = get_key_leafs_simple(Pos + 1, SubTree, KeysToGet, [Key | KeyPathAcc]),
-        {RestLeafsFound, KeysRemaining} = get_key_leafs_simple(Pos, RestTree, KeysToGet2, KeyPathAcc),
-        {LeafsFound ++ RestLeafsFound, KeysRemaining};
-    KeysToGet2 ->
-        LeafsFound = get_all_leafs_simple(Pos, [Tree], KeyPathAcc),
-        LeafKeysFound = [{LeafPos, LeafRev} || {_, {LeafPos, [LeafRev|_]}}
-            <- LeafsFound],
-        KeysToGet3 = KeysToGet2 -- LeafKeysFound,
-        {RestLeafsFound, KeysRemaining} = get_key_leafs_simple(Pos, RestTree, KeysToGet3, KeyPathAcc),
-        {LeafsFound ++ RestLeafsFound, KeysRemaining}
-    end.
-
-get(Tree, KeysToGet) ->
-    {KeyPaths, KeysNotFound} = get_full_key_paths(Tree, KeysToGet),
-    FixedResults = [ {Value, {Pos, [Key0 || {Key0, _} <- Path]}} || {Pos, [{_Key, Value}|_]=Path} <- KeyPaths],
-    {FixedResults, KeysNotFound}.
-
-get_full_key_paths(Tree, Keys) ->
-    get_full_key_paths(Tree, Keys, []).
-
-get_full_key_paths(_, [], Acc) ->
-    {Acc, []};
-get_full_key_paths([], Keys, Acc) ->
-    {Acc, Keys};
-get_full_key_paths([{Pos, Tree}|Rest], Keys, Acc) ->
-    {Gotten, RemainingKeys} = get_full_key_paths(Pos, [Tree], Keys, []),
-    get_full_key_paths(Rest, RemainingKeys, Gotten ++ Acc).
-
-
-get_full_key_paths(_Pos, _Tree, [], _KeyPathAcc) ->
-    {[], []};
-get_full_key_paths(_Pos, [], KeysToGet, _KeyPathAcc) ->
-    {[], KeysToGet};
-get_full_key_paths(Pos, [{KeyId, Value, SubTree} | RestTree], KeysToGet, KeyPathAcc) ->
-    KeysToGet2 = KeysToGet -- [{Pos, KeyId}],
-    CurrentNodeResult =
-    case length(KeysToGet2) =:= length(KeysToGet) of
-    true -> % not in the key list.
-        [];
-    false -> % this node is the key list. return it
-        [{Pos, [{KeyId, Value} | KeyPathAcc]}]
-    end,
-    {KeysGotten, KeysRemaining} = get_full_key_paths(Pos + 1, SubTree, KeysToGet2, [{KeyId, Value} | KeyPathAcc]),
-    {KeysGotten2, KeysRemaining2} = get_full_key_paths(Pos, RestTree, KeysRemaining, KeyPathAcc),
-    {CurrentNodeResult ++ KeysGotten ++ KeysGotten2, KeysRemaining2}.
-
-get_all_leafs_full(Tree) ->
-    get_all_leafs_full(Tree, []).
-
-get_all_leafs_full([], Acc) ->
-    Acc;
-get_all_leafs_full([{Pos, Tree} | Rest], Acc) ->
-    get_all_leafs_full(Rest, get_all_leafs_full_simple(Pos, [Tree], []) ++ Acc).
-
-get_all_leafs_full_simple(_Pos, [], _KeyPathAcc) ->
-    [];
-get_all_leafs_full_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
-    [{Pos, [{KeyId, Value} | KeyPathAcc]} | get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc)];
-get_all_leafs_full_simple(Pos, [{KeyId, Value, SubTree} | RestTree], KeyPathAcc) ->
-    get_all_leafs_full_simple(Pos + 1, SubTree, [{KeyId, Value} | KeyPathAcc]) ++ get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc).
-
-get_all_leafs(Trees) ->
-    get_all_leafs(Trees, []).
-
-get_all_leafs([], Acc) ->
-    Acc;
-get_all_leafs([{Pos, Tree}|Rest], Acc) ->
-    get_all_leafs(Rest, get_all_leafs_simple(Pos, [Tree], []) ++ Acc).
-
-get_all_leafs_simple(_Pos, [], _KeyPathAcc) ->
-    [];
-get_all_leafs_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
-    [{Value, {Pos, [KeyId | KeyPathAcc]}} | get_all_leafs_simple(Pos, RestTree, KeyPathAcc)];
-get_all_leafs_simple(Pos, [{KeyId, _Value, SubTree} | RestTree], KeyPathAcc) ->
-    get_all_leafs_simple(Pos + 1, SubTree, [KeyId | KeyPathAcc]) ++ get_all_leafs_simple(Pos, RestTree, KeyPathAcc).
-
-
-count_leafs([]) ->
-    0;
-count_leafs([{_Pos,Tree}|Rest]) ->
-    count_leafs_simple([Tree]) + count_leafs(Rest).
-
-count_leafs_simple([]) ->
-    0;
-count_leafs_simple([{_Key, _Value, []} | RestTree]) ->
-    1 + count_leafs_simple(RestTree);
-count_leafs_simple([{_Key, _Value, SubTree} | RestTree]) ->
-    count_leafs_simple(SubTree) + count_leafs_simple(RestTree).
-
-
-fold(_Fun, Acc, []) ->
-    Acc;
-fold(Fun, Acc0, [{Pos, Tree}|Rest]) ->
-    Acc1 = fold_simple(Fun, Acc0, Pos, [Tree]),
-    fold(Fun, Acc1, Rest).
-
-fold_simple(_Fun, Acc, _Pos, []) ->
-    Acc;
-fold_simple(Fun, Acc0, Pos, [{Key, Value, SubTree} | RestTree]) ->
-    Type = if SubTree == [] -> leaf; true -> branch end,
-    Acc1 = Fun({Pos, Key}, Value, Type, Acc0),
-    Acc2 = fold_simple(Fun, Acc1, Pos+1, SubTree),
-    fold_simple(Fun, Acc2, Pos, RestTree).
-
-
-map(_Fun, []) ->
-    [];
-map(Fun, [{Pos, Tree}|Rest]) ->
-    case erlang:fun_info(Fun, arity) of
-    {arity, 2} ->
-        [NewTree] = map_simple(fun(A,B,_C) -> Fun(A,B) end, Pos, [Tree]),
-        [{Pos, NewTree} | map(Fun, Rest)];
-    {arity, 3} ->
-        [NewTree] = map_simple(Fun, Pos, [Tree]),
-        [{Pos, NewTree} | map(Fun, Rest)]
-    end.
-
-map_simple(_Fun, _Pos, []) ->
-    [];
-map_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
-    Value2 = Fun({Pos, Key}, Value,
-            if SubTree == [] -> leaf; true -> branch end),
-    [{Key, Value2, map_simple(Fun, Pos + 1, SubTree)} | map_simple(Fun, Pos, RestTree)].
-
-
-mapfold(_Fun, Acc, []) ->
-    {[], Acc};
-mapfold(Fun, Acc, [{Pos, Tree} | Rest]) ->
-    {[NewTree], Acc2} = mapfold_simple(Fun, Acc, Pos, [Tree]),
-    {Rest2, Acc3} = mapfold(Fun, Acc2, Rest),
-    {[{Pos, NewTree} | Rest2], Acc3}.
-
-mapfold_simple(_Fun, Acc, _Pos, []) ->
-    {[], Acc};
-mapfold_simple(Fun, Acc, Pos, [{Key, Value, SubTree} | RestTree]) ->
-    {Value2, Acc2} = Fun({Pos, Key}, Value,
-            if SubTree == [] -> leaf; true -> branch end, Acc),
-    {SubTree2, Acc3} = mapfold_simple(Fun, Acc2, Pos + 1, SubTree),
-    {RestTree2, Acc4} = mapfold_simple(Fun, Acc3, Pos, RestTree),
-    {[{Key, Value2, SubTree2} | RestTree2], Acc4}.
-
-
-map_leafs(_Fun, []) ->
-    [];
-map_leafs(Fun, [{Pos, Tree}|Rest]) ->
-    [NewTree] = map_leafs_simple(Fun, Pos, [Tree]),
-    [{Pos, NewTree} | map_leafs(Fun, Rest)].
-
-map_leafs_simple(_Fun, _Pos, []) ->
-    [];
-map_leafs_simple(Fun, Pos, [{Key, Value, []} | RestTree]) ->
-    Value2 = Fun({Pos, Key}, Value),
-    [{Key, Value2, []} | map_leafs_simple(Fun, Pos, RestTree)];
-map_leafs_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
-    [{Key, Value, map_leafs_simple(Fun, Pos + 1, SubTree)} | map_leafs_simple(Fun, Pos, RestTree)].
-
-
-stem(Trees, Limit) ->
-    % flatten each branch in a tree into a tree path, sort by starting rev #
-    Paths = lists:sort(lists:map(fun({Pos, Path}) ->
-        StemmedPath = lists:sublist(Path, Limit),
-        {Pos + 1 - length(StemmedPath), StemmedPath}
-    end, get_all_leafs_full(Trees))),
-
-    % convert paths back to trees
-    lists:foldl(
-        fun({StartPos, Path},TreeAcc) ->
-            [SingleTree] = lists:foldl(
-                fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path),
-            {NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}),
-            NewTrees
-        end, [], Paths).
-
-
-value_pref(Tuple, _) when is_tuple(Tuple),
-        (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) ->
-    Tuple;
-value_pref(_, Tuple) when is_tuple(Tuple),
-        (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) ->
-    Tuple;
-value_pref(?REV_MISSING, Other) ->
-    Other;
-value_pref(Other, ?REV_MISSING) ->
-    Other;
-value_pref(Last, _) ->
-    Last.
-
-
-% Tests moved to test/etap/06?-*.t
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_log.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_log.erl b/src/couch/src/couch_log.erl
deleted file mode 100644
index d1aa701..0000000
--- a/src/couch/src/couch_log.erl
+++ /dev/null
@@ -1,263 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log).
--behaviour(gen_event).
--behaviour(config_listener).
-
-% public API
--export([start_link/0, stop/0]).
--export([debug/2, info/2, warn/2, error/2]).
--export([debug_on/0, info_on/0, warn_on/0, get_level/0, get_level_integer/0, set_level/1]).
--export([debug_on/1, info_on/1, warn_on/1, get_level/1, get_level_integer/1, set_level/2]).
--export([read/2]).
-
-% gen_event callbacks
--export([init/1, handle_event/2, terminate/2, code_change/3]).
--export([handle_info/2, handle_call/2]).
-
-% config_listener api
--export([handle_config_change/5]).
-
--define(LEVEL_ERROR, 4).
--define(LEVEL_WARN, 3).
--define(LEVEL_INFO, 2).
--define(LEVEL_DEBUG, 1).
-
--record(state, {
-    fd,
-    level,
-    sasl
-}).
-
-debug(Format, Args) ->
-    {ConsoleMsg, FileMsg} = get_log_messages(self(), debug, Format, Args),
-    gen_event:sync_notify(error_logger, {couch_debug, ConsoleMsg, FileMsg}).
-
-info(Format, Args) ->
-    {ConsoleMsg, FileMsg} = get_log_messages(self(), info, Format, Args),
-    gen_event:sync_notify(error_logger, {couch_info, ConsoleMsg, FileMsg}).
-
-warn(Format, Args) ->
-    {ConsoleMsg, FileMsg} = get_log_messages(self(), warn, Format, Args),
-    gen_event:sync_notify(error_logger, {couch_warn, ConsoleMsg, FileMsg}).
-
-error(Format, Args) ->
-    {ConsoleMsg, FileMsg} = get_log_messages(self(), error, Format, Args),
-    gen_event:sync_notify(error_logger, {couch_error, ConsoleMsg, FileMsg}).
-
-
-level_integer(error)    -> ?LEVEL_ERROR;
-level_integer(warn)     -> ?LEVEL_WARN;
-level_integer(info)     -> ?LEVEL_INFO;
-level_integer(debug)    -> ?LEVEL_DEBUG;
-level_integer(_Else)    -> ?LEVEL_ERROR. % anything else default to ERROR level
-
-level_atom(?LEVEL_ERROR) -> error;
-level_atom(?LEVEL_WARN) -> warn;
-level_atom(?LEVEL_INFO) -> info;
-level_atom(?LEVEL_DEBUG) -> debug.
-
-
-start_link() ->
-    couch_event_sup:start_link({local, couch_log}, error_logger, couch_log, []).
-
-stop() ->
-    couch_event_sup:stop(couch_log).
-
-init([]) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-
-    Filename = config:get("log", "file", "couchdb.log"),
-    Level = level_integer(list_to_atom(config:get("log", "level", "info"))),
-    Sasl = config:get("log", "include_sasl", "true") =:= "true",
-    LevelByModule = config:get("log_level_by_module"),
-
-    case ets:info(?MODULE) of
-    undefined -> ets:new(?MODULE, [named_table]);
-    _ -> ok
-    end,
-    ets:insert(?MODULE, {level, Level}),
-    lists:foreach(fun({Module, ModuleLevel}) ->
-        ModuleLevelInteger = level_integer(list_to_atom(ModuleLevel)),
-        ets:insert(?MODULE, {Module, ModuleLevelInteger})
-    end, LevelByModule),
-
-
-    case file:open(Filename, [append]) of
-    {ok, Fd} ->
-        {ok, #state{fd = Fd, level = Level, sasl = Sasl}};
-    {error, Reason} ->
-        ReasonStr = file:format_error(Reason),
-        io:format("Error opening log file ~s: ~s", [Filename, ReasonStr]),
-        {stop, {error, ReasonStr, Filename}}
-    end.
-
-handle_config_change("log", "file", _, _, _) ->
-    ?MODULE:stop(),
-    remove_handler;
-handle_config_change("log", "level", _, _, _) ->
-    ?MODULE:stop(),
-    remove_handler;
-handle_config_change("log", "include_sasl", _, _, _) ->
-    ?MODULE:stop(),
-    remove_handler;
-handle_config_change(_, _, _, _, _) ->
-    {ok, nil}.
-
-debug_on() ->
-    get_level_integer() =< ?LEVEL_DEBUG.
-
-info_on() ->
-    get_level_integer() =< ?LEVEL_INFO.
-
-warn_on() ->
-    get_level_integer() =< ?LEVEL_WARN.
-
-debug_on(Module) ->
-    get_level_integer(Module) =< ?LEVEL_DEBUG.
-
-info_on(Module) ->
-    get_level_integer(Module) =< ?LEVEL_INFO.
-
-warn_on(Module) ->
-    get_level_integer(Module) =< ?LEVEL_WARN.
-
-set_level(LevelAtom) ->
-    set_level_integer(level_integer(LevelAtom)).
-
-set_level(Module, LevelAtom) ->
-    set_level_integer(Module, level_integer(LevelAtom)).
-
-get_level() ->
-    level_atom(get_level_integer()).
-
-get_level(Module) ->
-    level_atom(get_level_integer(Module)).
-
-get_level_integer() ->
-    try
-        ets:lookup_element(?MODULE, level, 2)
-    catch error:badarg ->
-        ?LEVEL_ERROR
-    end.
-
-get_level_integer(Module0) ->
-    Module = atom_to_list(Module0),
-    try
-        [{_Module, Level}] = ets:lookup(?MODULE, Module),
-        Level
-    catch error:_ ->
-        get_level_integer()
-    end.
-
-set_level_integer(Int) ->
-    gen_event:call(error_logger, couch_log, {set_level_integer, Int}).
-
-set_level_integer(Module, Int) ->
-    gen_event:call(error_logger, couch_log, {set_level_integer, Module, Int}).
-
-handle_event({couch_error, ConMsg, FileMsg}, State) ->
-    log(State, ConMsg, FileMsg),
-    {ok, State};
-handle_event({couch_warn, ConMsg, FileMsg}, State) ->
-    log(State, ConMsg, FileMsg),
-    {ok, State};
-handle_event({couch_info, ConMsg, FileMsg}, State) ->
-    log(State, ConMsg, FileMsg),
-    {ok, State};
-handle_event({couch_debug, ConMsg, FileMsg}, State) ->
-    log(State, ConMsg, FileMsg),
-    {ok, State};
-handle_event({error_report, _, {Pid, _, _}}=Event, #state{sasl = true} = St) ->
-    {ConMsg, FileMsg} = get_log_messages(Pid, error, "~p", [Event]),
-    log(St, ConMsg, FileMsg),
-    {ok, St};
-handle_event({error, _, {Pid, Format, Args}}, #state{sasl = true} = State) ->
-    {ConMsg, FileMsg} = get_log_messages(Pid, error, Format, Args),
-    log(State, ConMsg, FileMsg),
-    {ok, State};
-handle_event(_Event, State) ->
-    {ok, State}.
-
-handle_call({set_level_integer, NewLevel}, State) ->
-    ets:insert(?MODULE, {level, NewLevel}),
-    {ok, ok, State#state{level = NewLevel}};
-
-handle_call({set_level_integer, Module, NewLevel}, State) ->
-    ets:insert(?MODULE, {Module, NewLevel}),
-    {ok, ok, State#state{level = NewLevel}}.
-
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {ok, State};
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-    {ok, State};
-handle_info(_Info, State) ->
-    {ok, State}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-terminate(_Arg, #state{fd = Fd}) ->
-    file:close(Fd).
-
-log(#state{fd = Fd}, ConsoleMsg, FileMsg) ->
-    ok = io:put_chars(ConsoleMsg),
-    ok = io:put_chars(Fd, FileMsg).
-
-get_log_messages(Pid, Level, Format, Args) ->
-    ConsoleMsg = unicode:characters_to_binary(io_lib:format(
-        "[~s] [~p] " ++ Format ++ "~n", [Level, Pid | Args])),
-    FileMsg = ["[", couch_util:rfc1123_date(), "] ", ConsoleMsg],
-    {ConsoleMsg, iolist_to_binary(FileMsg)}.
-
-
-% Read Bytes bytes from the end of log file, jumping Offset bytes towards
-% the beginning of the file first.
-%
-%  Log File    FilePos
-%  ----------
-% |          |  10
-% |          |  20
-% |          |  30
-% |          |  40
-% |          |  50
-% |          |  60
-% |          |  70 -- Bytes = 20  --
-% |          |  80                 | Chunk
-% |          |  90 -- Offset = 10 --
-% |__________| 100
-
-read(Bytes, Offset) ->
-    LogFileName = config:get("log", "file"),
-    LogFileSize = filelib:file_size(LogFileName),
-    MaxChunkSize = list_to_integer(
-        config:get("httpd", "log_max_chunk_size", "1000000")),
-    case Bytes > MaxChunkSize of
-    true ->
-        throw({bad_request, "'bytes' cannot exceed " ++
-            integer_to_list(MaxChunkSize)});
-    false ->
-        ok
-    end,
-
-    {ok, Fd} = file:open(LogFileName, [read]),
-    Start = lists:max([LogFileSize - Bytes - Offset, 0]),
-
-    % TODO: truncate chopped first line
-    % TODO: make streaming
-
-    {ok, Chunk} = file:pread(Fd, Start, Bytes),
-    ok = file:close(Fd),
-    Chunk.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_lru.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_lru.erl b/src/couch/src/couch_lru.erl
deleted file mode 100644
index cc751b0..0000000
--- a/src/couch/src/couch_lru.erl
+++ /dev/null
@@ -1,48 +0,0 @@
--module(couch_lru).
--export([new/0, insert/2, update/2, close/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-new() ->
-    {gb_trees:empty(), dict:new()}.
-
-insert(DbName, {Tree0, Dict0}) ->
-    Lru = now(),
-    {gb_trees:insert(Lru, DbName, Tree0), dict:store(DbName, Lru, Dict0)}.
-
-update(DbName, {Tree0, Dict0}) ->
-    case dict:find(DbName, Dict0) of
-    {ok, Old} ->
-        New = now(),
-        Tree = gb_trees:insert(New, DbName, gb_trees:delete(Old, Tree0)),
-        Dict = dict:store(DbName, New, Dict0),
-        {Tree, Dict};
-    error ->
-        % We closed this database before processing the update.  Ignore
-        {Tree0, Dict0}
-    end.
-
-close({Tree, _} = Cache) ->
-    close_int(gb_trees:next(gb_trees:iterator(Tree)), Cache).
-
-%% internals
-
-close_int(none, _) ->
-    erlang:error(all_dbs_active);
-close_int({Lru, DbName, Iter}, {Tree, Dict} = Cache) ->
-    case ets:update_element(couch_dbs, DbName, {#db.fd_monitor, locked}) of
-    true ->
-        [#db{main_pid = Pid} = Db] = ets:lookup(couch_dbs, DbName),
-        case couch_db:is_idle(Db) of true ->
-            true = ets:delete(couch_dbs, DbName),
-            exit(Pid, kill),
-            {gb_trees:delete(Lru, Tree), dict:erase(DbName, Dict)};
-        false ->
-            true = ets:update_element(couch_dbs, DbName, {#db.fd_monitor, nil}),
-            close_int(gb_trees:next(Iter), update(DbName, Cache))
-        end;
-    false ->
-        NewTree = gb_trees:delete(Lru, Tree),
-        NewIter = gb_trees:iterator(NewTree),
-        close_int(gb_trees:next(NewIter), {NewTree, dict:erase(DbName, Dict)})
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_native_process.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_native_process.erl b/src/couch/src/couch_native_process.erl
deleted file mode 100644
index 8ca56f0..0000000
--- a/src/couch/src/couch_native_process.erl
+++ /dev/null
@@ -1,422 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License");
-% you may not use this file except in compliance with the License.
-%
-% You may obtain a copy of the License at
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing,
-% software distributed under the License is distributed on an
-% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
-% either express or implied.
-%
-% See the License for the specific language governing permissions
-% and limitations under the License.
-%
-% This file drew much inspiration from erlview, which was written by and
-% copyright Michael McDaniel [http://autosys.us], and is also under APL 2.0
-%
-%
-% This module provides the smallest possible native view-server.
-% With this module in-place, you can add the following to your couch INI files:
-%  [native_query_servers]
-%  erlang={couch_native_process, start_link, []}
-%
-% Which will then allow following example map function to be used:
-%
-%  fun({Doc}) ->
-%    % Below, we emit a single record - the _id as key, null as value
-%    DocId = couch_util:get_value(<<"_id">>, Doc, null),
-%    Emit(DocId, null)
-%  end.
-%
-% which should be roughly the same as the javascript:
-%    emit(doc._id, null);
-%
-% This module exposes enough functions such that a native erlang server can
-% act as a fully-fleged view server, but no 'helper' functions specifically
-% for simplifying your erlang view code.  It is expected other third-party
-% extensions will evolve which offer useful layers on top of this view server
-% to help simplify your view code.
--module(couch_native_process).
--behaviour(gen_server).
-
--export([start_link/0,init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,
-         handle_info/2]).
--export([set_timeout/2, prompt/2, prompt_many/2]).
-
--define(STATE, native_proc_state).
--record(evstate, {ddocs, funs=[], query_config=[], list_pid=nil, timeout=5000}).
-
--include_lib("couch/include/couch_db.hrl").
-
-start_link() ->
-    gen_server:start_link(?MODULE, [], []).
-
-% this is a bit messy, see also couch_query_servers handle_info
-% stop(_Pid) ->
-%     ok.
-
-set_timeout(Pid, TimeOut) ->
-    gen_server:call(Pid, {set_timeout, TimeOut}).
-
-prompt(Pid, Data) when is_list(Data) ->
-    gen_server:call(Pid, {prompt, Data}).
-
-prompt_many(Pid, DataList) ->
-    prompt_many(Pid, DataList, []).
-
-prompt_many(_Pid, [], Acc) ->
-    {ok, lists:reverse(Acc)};
-prompt_many(Pid, [Data | Rest], Acc) ->
-    Result = prompt(Pid, Data),
-    prompt_many(Pid, Rest, [Result | Acc]).
-
-% gen_server callbacks
-init([]) ->
-    {ok, #evstate{ddocs=dict:new()}}.
-
-handle_call({set_timeout, TimeOut}, _From, State) ->
-    {reply, ok, State#evstate{timeout=TimeOut}};
-
-handle_call({prompt, Data}, _From, State) ->
-    ?LOG_DEBUG("Prompt native qs: ~s",[?JSON_ENCODE(Data)]),
-    {NewState, Resp} = try run(State, to_binary(Data)) of
-        {S, R} -> {S, R}
-        catch
-            throw:{error, Why} ->
-                {State, [<<"error">>, Why, Why]}
-        end,
-
-    case Resp of
-        {error, Reason} ->
-            Msg = io_lib:format("couch native server error: ~p", [Reason]),
-            {reply, [<<"error">>, <<"native_query_server">>, list_to_binary(Msg)], NewState};
-        [<<"error">> | Rest] ->
-            % Msg = io_lib:format("couch native server error: ~p", [Rest]),
-            % TODO: markh? (jan)
-            {reply, [<<"error">> | Rest], NewState};
-        [<<"fatal">> | Rest] ->
-            % Msg = io_lib:format("couch native server error: ~p", [Rest]),
-            % TODO: markh? (jan)
-            {stop, fatal, [<<"error">> | Rest], NewState};
-        Resp ->
-            {reply, Resp, NewState}
-    end.
-
-handle_cast(garbage_collect, State) ->
-    erlang:garbage_collect(),
-    {noreply, State};
-handle_cast(foo, State) -> {noreply, State}.
-
-handle_info({'EXIT',_,normal}, State) -> {noreply, State};
-handle_info({'EXIT',_,Reason}, State) ->
-    {stop, Reason, State}.
-terminate(_Reason, _State) -> ok.
-code_change(_OldVersion, State, _Extra) -> {ok, State}.
-
-run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) ->
-    Pid ! {self(), list_row, Row},
-    receive
-        {Pid, chunks, Data} ->
-            {State, [<<"chunks">>, Data]};
-        {Pid, list_end, Data} ->
-            receive
-                {'EXIT', Pid, normal} -> ok
-            after State#evstate.timeout ->
-                throw({timeout, list_cleanup})
-            end,
-            process_flag(trap_exit, erlang:get(do_trap)),
-            {State#evstate{list_pid=nil}, [<<"end">>, Data]}
-    after State#evstate.timeout ->
-        throw({timeout, list_row})
-    end;
-run(#evstate{list_pid=Pid}=State, [<<"list_end">>]) when is_pid(Pid) ->
-    Pid ! {self(), list_end},
-    Resp =
-    receive
-        {Pid, list_end, Data} ->
-            receive
-                {'EXIT', Pid, normal} -> ok
-            after State#evstate.timeout ->
-                throw({timeout, list_cleanup})
-            end,
-            [<<"end">>, Data]
-    after State#evstate.timeout ->
-        throw({timeout, list_end})
-    end,
-    process_flag(trap_exit, erlang:get(do_trap)),
-    {State#evstate{list_pid=nil}, Resp};
-run(#evstate{list_pid=Pid}=State, _Command) when is_pid(Pid) ->
-    {State, [<<"error">>, list_error, list_error]};
-run(#evstate{ddocs=DDocs}, [<<"reset">>]) ->
-    {#evstate{ddocs=DDocs}, true};
-run(#evstate{ddocs=DDocs}, [<<"reset">>, QueryConfig]) ->
-    {#evstate{ddocs=DDocs, query_config=QueryConfig}, true};
-run(#evstate{funs=Funs}=State, [<<"add_fun">> , BinFunc]) ->
-    FunInfo = makefun(State, BinFunc),
-    {State#evstate{funs=Funs ++ [FunInfo]}, true};
-run(State, [<<"map_doc">> , Doc]) ->
-    Resp = lists:map(fun({Sig, Fun}) ->
-        erlang:put(Sig, []),
-        Fun(Doc),
-        lists:reverse(erlang:get(Sig))
-    end, State#evstate.funs),
-    {State, Resp};
-run(State, [<<"reduce">>, Funs, KVs]) ->
-    {Keys, Vals} =
-    lists:foldl(fun([K, V], {KAcc, VAcc}) ->
-        {[K | KAcc], [V | VAcc]}
-    end, {[], []}, KVs),
-    Keys2 = lists:reverse(Keys),
-    Vals2 = lists:reverse(Vals),
-    {State, catch reduce(State, Funs, Keys2, Vals2, false)};
-run(State, [<<"rereduce">>, Funs, Vals]) ->
-    {State, catch reduce(State, Funs, null, Vals, true)};
-run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) ->
-    DDocs2 = store_ddoc(DDocs, DDocId, DDoc),
-    {State#evstate{ddocs=DDocs2}, true};
-run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, DDocId | Rest]) ->
-    DDoc = load_ddoc(DDocs, DDocId),
-    ddoc(State, DDoc, Rest);
-run(_, Unknown) ->
-    ?LOG_ERROR("Native Process: Unknown command: ~p~n", [Unknown]),
-    throw({error, unknown_command}).
-    
-ddoc(State, {DDoc}, [FunPath, Args]) ->
-    % load fun from the FunPath
-    BFun = lists:foldl(fun
-        (Key, {Props}) when is_list(Props) ->
-            couch_util:get_value(Key, Props, nil);
-        (_Key, Fun) when is_binary(Fun) ->
-            Fun;
-        (_Key, nil) ->
-            throw({error, not_found});
-        (_Key, _Fun) ->
-            throw({error, malformed_ddoc})
-        end, {DDoc}, FunPath),
-    ddoc(State, makefun(State, BFun, {DDoc}), FunPath, Args).
-
-ddoc(State, {_, Fun}, [<<"validate_doc_update">>], Args) ->
-    {State, (catch apply(Fun, Args))};
-ddoc(State, {_, Fun}, [<<"filters">>|_], [Docs, Req]) ->
-    FilterFunWrapper = fun(Doc) ->
-        case catch Fun(Doc, Req) of
-        true -> true;
-        false -> false;
-        {'EXIT', Error} -> ?LOG_ERROR("~p", [Error])
-        end
-    end,
-    Resp = lists:map(FilterFunWrapper, Docs),
-    {State, [true, Resp]};
-ddoc(State, {_, Fun}, [<<"shows">>|_], Args) ->
-    Resp = case (catch apply(Fun, Args)) of
-        FunResp when is_list(FunResp) ->
-            FunResp;
-        {FunResp} ->
-            [<<"resp">>, {FunResp}];
-        FunResp ->
-            FunResp
-    end,
-    {State, Resp};
-ddoc(State, {_, Fun}, [<<"updates">>|_], Args) ->
-    Resp = case (catch apply(Fun, Args)) of
-        [JsonDoc, JsonResp]  ->
-            [<<"up">>, JsonDoc, JsonResp]
-    end,
-    {State, Resp};
-ddoc(State, {Sig, Fun}, [<<"lists">>|_], Args) ->
-    Self = self(),
-    SpawnFun = fun() ->
-        LastChunk = (catch apply(Fun, Args)),
-        case start_list_resp(Self, Sig) of
-            started ->
-                receive
-                    {Self, list_row, _Row} -> ignore;
-                    {Self, list_end} -> ignore
-                after State#evstate.timeout ->
-                    throw({timeout, list_cleanup_pid})
-                end;
-            _ ->
-                ok
-        end,
-        LastChunks =
-        case erlang:get(Sig) of
-            undefined -> [LastChunk];
-            OtherChunks -> [LastChunk | OtherChunks]
-        end,
-        Self ! {self(), list_end, lists:reverse(LastChunks)}
-    end,
-    erlang:put(do_trap, process_flag(trap_exit, true)),
-    Pid = spawn_link(SpawnFun),
-    Resp =
-    receive
-        {Pid, start, Chunks, JsonResp} ->
-            [<<"start">>, Chunks, JsonResp]
-    after State#evstate.timeout ->
-        throw({timeout, list_start})
-    end,
-    {State#evstate{list_pid=Pid}, Resp}.
-
-store_ddoc(DDocs, DDocId, DDoc) ->
-    dict:store(DDocId, DDoc, DDocs).
-load_ddoc(DDocs, DDocId) ->
-    try dict:fetch(DDocId, DDocs) of
-        {DDoc} -> {DDoc}
-    catch
-        _:_Else -> throw({error, ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s",[DDocId]))})
-    end.
-
-bindings(State, Sig) ->
-    bindings(State, Sig, nil).
-bindings(State, Sig, DDoc) ->
-    Self = self(),
-
-    Log = fun(Msg) ->
-        ?LOG_INFO(Msg, [])
-    end,
-
-    Emit = fun(Id, Value) ->
-        Curr = erlang:get(Sig),
-        erlang:put(Sig, [[Id, Value] | Curr])
-    end,
-
-    Start = fun(Headers) ->
-        erlang:put(list_headers, Headers)
-    end,
-
-    Send = fun(Chunk) ->
-        Curr =
-        case erlang:get(Sig) of
-            undefined -> [];
-            Else -> Else
-        end,
-        erlang:put(Sig, [Chunk | Curr])
-    end,
-
-    GetRow = fun() ->
-        case start_list_resp(Self, Sig) of
-            started ->
-                ok;
-            _ ->
-                Chunks =
-                case erlang:get(Sig) of
-                    undefined -> [];
-                    CurrChunks -> CurrChunks
-                end,
-                Self ! {self(), chunks, lists:reverse(Chunks)}
-        end,
-        erlang:put(Sig, []),
-        receive
-            {Self, list_row, Row} -> Row;
-            {Self, list_end} -> nil
-        after State#evstate.timeout ->
-            throw({timeout, list_pid_getrow})
-        end
-    end,
-   
-    FoldRows = fun(Fun, Acc) -> foldrows(GetRow, Fun, Acc) end,
-
-    Bindings = [
-        {'Log', Log},
-        {'Emit', Emit},
-        {'Start', Start},
-        {'Send', Send},
-        {'GetRow', GetRow},
-        {'FoldRows', FoldRows}
-    ],
-    case DDoc of
-        {_Props} ->
-            Bindings ++ [{'DDoc', DDoc}];
-        _Else -> Bindings
-    end.
-
-% thanks to erlview, via:
-% http://erlang.org/pipermail/erlang-questions/2003-November/010544.html
-makefun(State, Source) ->
-    Sig = couch_util:md5(Source),
-    BindFuns = bindings(State, Sig),
-    {Sig, makefun(State, Source, BindFuns)}.
-makefun(State, Source, {DDoc}) ->
-    Sig = couch_util:md5(lists:flatten([Source, term_to_binary(DDoc)])),
-    BindFuns = bindings(State, Sig, {DDoc}),
-    {Sig, makefun(State, Source, BindFuns)};
-makefun(_State, Source, BindFuns) when is_list(BindFuns) ->
-    FunStr = binary_to_list(Source),
-    {ok, Tokens, _} = erl_scan:string(FunStr),
-    Form = case (catch erl_parse:parse_exprs(Tokens)) of
-        {ok, [ParsedForm]} ->
-            ParsedForm;
-        {error, {LineNum, _Mod, [Mesg, Params]}}=Error ->
-            io:format(standard_error, "Syntax error on line: ~p~n", [LineNum]),
-            io:format(standard_error, "~s~p~n", [Mesg, Params]),
-            throw(Error)
-    end,
-    Bindings = lists:foldl(fun({Name, Fun}, Acc) ->
-        erl_eval:add_binding(Name, Fun, Acc)
-    end, erl_eval:new_bindings(), BindFuns),
-    {value, Fun, _} = erl_eval:expr(Form, Bindings),
-    Fun.
-
-reduce(State, BinFuns, Keys, Vals, ReReduce) ->
-    Funs = case is_list(BinFuns) of
-        true ->
-            lists:map(fun(BF) -> makefun(State, BF) end, BinFuns);
-        _ ->
-            [makefun(State, BinFuns)]
-    end,
-    Reds = lists:map(fun({_Sig, Fun}) ->
-        Fun(Keys, Vals, ReReduce)
-    end, Funs),
-    [true, Reds].
-
-foldrows(GetRow, ProcRow, Acc) ->
-    case GetRow() of
-        nil ->
-            {ok, Acc};
-        Row ->
-            case (catch ProcRow(Row, Acc)) of
-                {ok, Acc2} ->
-                    foldrows(GetRow, ProcRow, Acc2);
-                {stop, Acc2} ->
-                    {ok, Acc2}
-            end
-    end.
-
-start_list_resp(Self, Sig) ->
-    case erlang:get(list_started) of
-        undefined ->
-            Headers =
-            case erlang:get(list_headers) of
-                undefined -> {[{<<"headers">>, {[]}}]};
-                CurrHdrs -> CurrHdrs
-            end,
-            Chunks =
-            case erlang:get(Sig) of
-                undefined -> [];
-                CurrChunks -> CurrChunks
-            end,
-            Self ! {self(), start, lists:reverse(Chunks), Headers},
-            erlang:put(list_started, true),
-            erlang:put(Sig, []),
-            started;
-        _ ->
-            ok
-    end.
-
-to_binary({Data}) ->
-    Pred = fun({Key, Value}) ->
-        {to_binary(Key), to_binary(Value)}
-    end,
-    {lists:map(Pred, Data)};
-to_binary(Data) when is_list(Data) ->
-    [to_binary(D) || D <- Data];
-to_binary(null) ->
-    null;
-to_binary(true) ->
-    true;
-to_binary(false) ->
-    false;
-to_binary(Data) when is_atom(Data) ->
-    list_to_binary(atom_to_list(Data));
-to_binary(Data) ->
-    Data.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_os_daemons.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_os_daemons.erl b/src/couch/src/couch_os_daemons.erl
deleted file mode 100644
index 3560149..0000000
--- a/src/couch/src/couch_os_daemons.erl
+++ /dev/null
@@ -1,377 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_os_daemons).
--behaviour(gen_server).
--behaviour(config_listener).
-
--export([start_link/0, info/0, info/1]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-% config_listener api
--export([handle_config_change/5]).
-
--include_lib("couch/include/couch_db.hrl").
-
--record(daemon, {
-    port,
-    name,
-    cmd,
-    kill,
-    status=running,
-    cfg_patterns=[],
-    errors=[],
-    buf=[]
-}).
-
--define(PORT_OPTIONS, [stream, {line, 1024}, binary, exit_status, hide]).
--define(TIMEOUT, 5000).
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-info() ->
-    info([]).
-
-info(Options) ->
-    gen_server:call(?MODULE, {daemon_info, Options}).
-
-init(_) ->
-    process_flag(trap_exit, true),
-    ok = config:listen_for_changes(?MODULE, nil),
-    Table = ets:new(?MODULE, [protected, set, {keypos, #daemon.port}]),
-    reload_daemons(Table),
-    {ok, Table}.
-
-terminate(_Reason, Table) ->
-    [stop_port(D) || D <- ets:tab2list(Table)],
-    ok.
-
-handle_call({daemon_info, Options}, _From, Table) when is_list(Options) ->
-    case lists:member(table, Options) of
-        true ->
-            {reply, {ok, ets:tab2list(Table)}, Table};
-        _ ->
-            {reply, {ok, Table}, Table}
-    end;
-handle_call(Msg, From, Table) ->
-    ?LOG_ERROR("Unknown call message to ~p from ~p: ~p", [?MODULE, From, Msg]),
-    {stop, error, Table}.
-
-handle_cast({config_change, Sect, Key}, Table) ->
-    restart_daemons(Table, Sect, Key),
-    case Sect of
-        "os_daemons" -> reload_daemons(Table);
-        _ -> ok
-    end,
-    {noreply, Table};
-handle_cast(stop, Table) ->
-    {stop, normal, Table};
-handle_cast(Msg, Table) ->
-    ?LOG_ERROR("Unknown cast message to ~p: ~p", [?MODULE, Msg]),
-    {stop, error, Table}.
-
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {noreply, State};
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-    {noreply, State};
-handle_info({'EXIT', Port, Reason}, Table) ->
-    case ets:lookup(Table, Port) of
-        [] ->
-            ?LOG_INFO("Port ~p exited after stopping: ~p~n", [Port, Reason]);
-        [#daemon{status=stopping}] ->
-            true = ets:delete(Table, Port);
-        [#daemon{name=Name, status=restarting}=D] ->
-            ?LOG_INFO("Daemon ~p restarting after config change.", [Name]),
-            true = ets:delete(Table, Port),
-            {ok, Port2} = start_port(D#daemon.cmd),
-            true = ets:insert(Table, D#daemon{
-                port=Port2, status=running, kill=undefined, buf=[]
-            });
-        [#daemon{name=Name, status=halted}] ->
-            ?LOG_ERROR("Halted daemon process: ~p", [Name]);
-        [D] ->
-            ?LOG_ERROR("Invalid port state at exit: ~p", [D])
-    end,
-    {noreply, Table};
-handle_info({Port, closed}, Table) ->
-    handle_info({Port, {exit_status, closed}}, Table);
-handle_info({Port, {exit_status, Status}}, Table) ->
-    case ets:lookup(Table, Port) of
-        [] ->
-            ?LOG_ERROR("Unknown port ~p exiting ~p", [Port, Status]),
-            {stop, {error, unknown_port_died, Status}, Table};
-        [#daemon{name=Name, status=restarting}=D] ->
-            ?LOG_INFO("Daemon ~p restarting after config change.", [Name]),
-            true = ets:delete(Table, Port),
-            {ok, Port2} = start_port(D#daemon.cmd),
-            true = ets:insert(Table, D#daemon{
-                port=Port2, status=running, kill=undefined, buf=[]
-            }),
-            {noreply, Table};
-        [#daemon{status=stopping}=D] ->
-            % The configuration changed and this daemon is no
-            % longer needed.
-            ?LOG_DEBUG("Port ~p shut down.", [D#daemon.name]),
-            true = ets:delete(Table, Port),
-            {noreply, Table};
-        [D] ->
-            % Port died for unknown reason. Check to see if it's
-            % died too many times or if we should boot it back up.
-            case should_halt([now() | D#daemon.errors]) of
-                {true, _} ->
-                    % Halting the process. We won't try and reboot
-                    % until the configuration changes.
-                    Fmt = "Daemon ~p halted with exit_status ~p",
-                    ?LOG_ERROR(Fmt, [D#daemon.name, Status]),
-                    D2 = D#daemon{status=halted, errors=nil, buf=nil},
-                    true = ets:insert(Table, D2),
-                    {noreply, Table};
-                {false, Errors} ->
-                    % We're guessing it was a random error, this daemon
-                    % has behaved so we'll give it another chance.
-                    Fmt = "Daemon ~p is being rebooted after exit_status ~p",
-                    ?LOG_INFO(Fmt, [D#daemon.name, Status]),
-                    true = ets:delete(Table, Port),
-                    {ok, Port2} = start_port(D#daemon.cmd),
-                    true = ets:insert(Table, D#daemon{
-                        port=Port2, status=running, kill=undefined,
-                                                errors=Errors, buf=[]
-                    }),
-                    {noreply, Table}
-            end;
-        _Else ->
-            throw(error)
-    end;
-handle_info({Port, {data, {noeol, Data}}}, Table) ->
-    [#daemon{buf=Buf}=D] = ets:lookup(Table, Port),
-    true = ets:insert(Table, D#daemon{buf=[Data | Buf]}),
-    {noreply, Table};
-handle_info({Port, {data, {eol, Data}}}, Table) ->
-    [#daemon{buf=Buf}=D] = ets:lookup(Table, Port),
-    Line = lists:reverse(Buf, Data),
-    % The first line echoed back is the kill command
-    % for when we go to get rid of the port. Lines after
-    % that are considered part of the stdio API.
-    case D#daemon.kill of
-        undefined ->
-            true = ets:insert(Table, D#daemon{kill=?b2l(Line), buf=[]});
-        _Else ->
-            D2 = case (catch ?JSON_DECODE(Line)) of
-                {invalid_json, Rejected} ->
-                    ?LOG_ERROR("Ignoring OS daemon request: ~p", [Rejected]),
-                    D;
-                JSON ->
-                    {ok, D3} = handle_port_message(D, JSON),
-                    D3
-            end,
-            true = ets:insert(Table, D2#daemon{buf=[]})
-    end,
-    {noreply, Table};
-handle_info({Port, Error}, Table) ->
-    ?LOG_ERROR("Unexpectd message from port ~p: ~p", [Port, Error]),
-    stop_port(Port),
-    [D] = ets:lookup(Table, Port),
-    true = ets:insert(Table, D#daemon{status=restarting, buf=nil}),
-    {noreply, Table};
-handle_info(Msg, Table) ->
-    ?LOG_ERROR("Unexpected info message to ~p: ~p", [?MODULE, Msg]),
-    {stop, error, Table}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-handle_config_change(Section, Key, _, _, _) ->
-    gen_server:cast(?MODULE, {config_change, Section, Key}),
-    {ok, nil}.
-
-
-% Internal API
-
-%
-% Port management helpers
-%
-
-start_port(Command) ->
-    PrivDir = couch_util:priv_dir(),
-    Spawnkiller = filename:join(PrivDir, "couchspawnkillable"),
-    Port = open_port({spawn, Spawnkiller ++ " " ++ Command}, ?PORT_OPTIONS),
-    {ok, Port}.
-
-
-stop_port(#daemon{port=Port, kill=undefined}=D) ->
-    ?LOG_ERROR("Stopping daemon without a kill command: ~p", [D#daemon.name]),
-    catch port_close(Port);
-stop_port(#daemon{port=Port}=D) ->
-    ?LOG_DEBUG("Stopping daemon: ~p", [D#daemon.name]),
-    os:cmd(D#daemon.kill),
-    catch port_close(Port).
-
-
-handle_port_message(#daemon{port=Port}=Daemon, [<<"get">>, Section]) ->
-    KVs = config:get(Section),
-    Data = lists:map(fun({K, V}) -> {?l2b(K), ?l2b(V)} end, KVs),
-    Json = iolist_to_binary(?JSON_ENCODE({Data})),
-    port_command(Port, <<Json/binary, "\n">>),
-    {ok, Daemon};
-handle_port_message(#daemon{port=Port}=Daemon, [<<"get">>, Section, Key]) ->
-    Value = case config:get(Section, Key, null) of
-        null -> null;
-        String -> ?l2b(String)
-    end,
-    Json = iolist_to_binary(?JSON_ENCODE(Value)),
-    port_command(Port, <<Json/binary, "\n">>),
-    {ok, Daemon};
-handle_port_message(Daemon, [<<"register">>, Sec]) when is_binary(Sec) ->
-    Patterns = lists:usort(Daemon#daemon.cfg_patterns ++ [{?b2l(Sec)}]),
-    {ok, Daemon#daemon{cfg_patterns=Patterns}};
-handle_port_message(Daemon, [<<"register">>, Sec, Key])
-                        when is_binary(Sec) andalso is_binary(Key) ->
-    Pattern = {?b2l(Sec), ?b2l(Key)},
-    Patterns = lists:usort(Daemon#daemon.cfg_patterns ++ [Pattern]),
-    {ok, Daemon#daemon{cfg_patterns=Patterns}};
-handle_port_message(#daemon{name=Name}=Daemon, [<<"log">>, Msg]) ->
-    handle_log_message(Name, Msg, <<"info">>),
-    {ok, Daemon};
-handle_port_message(#daemon{name=Name}=Daemon, [<<"log">>, Msg, {Opts}]) ->
-    Level = couch_util:get_value(<<"level">>, Opts, <<"info">>),
-    handle_log_message(Name, Msg, Level),
-    {ok, Daemon};
-handle_port_message(#daemon{name=Name}=Daemon, Else) ->
-    ?LOG_ERROR("Daemon ~p made invalid request: ~p", [Name, Else]),
-    {ok, Daemon}.
-
-
-handle_log_message(Name, Msg, _Level) when not is_binary(Msg) ->
-    ?LOG_ERROR("Invalid log message from daemon ~p: ~p", [Name, Msg]);
-handle_log_message(Name, Msg, <<"debug">>) ->
-    ?LOG_DEBUG("Daemon ~p :: ~s", [Name, ?b2l(Msg)]);
-handle_log_message(Name, Msg, <<"info">>) ->
-    ?LOG_INFO("Daemon ~p :: ~s", [Name, ?b2l(Msg)]);
-handle_log_message(Name, Msg, <<"error">>) ->
-    ?LOG_ERROR("Daemon: ~p :: ~s", [Name, ?b2l(Msg)]);
-handle_log_message(Name, Msg, Level) ->
-    ?LOG_ERROR("Invalid log level from daemon: ~p", [Level]),
-    ?LOG_INFO("Daemon: ~p :: ~s", [Name, ?b2l(Msg)]).
-
-%
-% Daemon management helpers
-%
-
-reload_daemons(Table) ->
-    % List of daemons we want to have running.
-    Configured = lists:sort(config:get("os_daemons")),
-    
-    % Remove records for daemons that were halted.
-    MSpecHalted = #daemon{name='$1', cmd='$2', status=halted, _='_'},
-    Halted = lists:sort([{N, C} || [N, C] <- ets:match(Table, MSpecHalted)]),
-    ok = stop_os_daemons(Table, find_to_stop(Configured, Halted, [])),
-    
-    % Stop daemons that are running
-    % Start newly configured daemons
-    MSpecRunning = #daemon{name='$1', cmd='$2', status=running, _='_'},
-    Running = lists:sort([{N, C} || [N, C] <- ets:match(Table, MSpecRunning)]),
-    ok = stop_os_daemons(Table, find_to_stop(Configured, Running, [])),
-    ok = boot_os_daemons(Table, find_to_boot(Configured, Running, [])),
-    ok.
-
-
-restart_daemons(Table, Sect, Key) ->
-    restart_daemons(Table, Sect, Key, ets:first(Table)).
-
-restart_daemons(_, _, _, '$end_of_table') ->
-    ok;
-restart_daemons(Table, Sect, Key, Port) ->
-    [D] = ets:lookup(Table, Port),
-    HasSect = lists:member({Sect}, D#daemon.cfg_patterns),
-    HasKey = lists:member({Sect, Key}, D#daemon.cfg_patterns),
-    case HasSect or HasKey of
-        true ->
-            stop_port(D),
-            D2 = D#daemon{status=restarting, buf=nil},
-            true = ets:insert(Table, D2);
-        _ ->
-            ok
-    end,
-    restart_daemons(Table, Sect, Key, ets:next(Table, Port)).
-    
-
-stop_os_daemons(_Table, []) ->
-    ok;
-stop_os_daemons(Table, [{Name, Cmd} | Rest]) ->
-    [[Port]] = ets:match(Table, #daemon{port='$1', name=Name, cmd=Cmd, _='_'}),
-    [D] = ets:lookup(Table, Port),
-    case D#daemon.status of
-        halted ->
-            ets:delete(Table, Port);
-        _ ->
-            stop_port(D),
-            D2 = D#daemon{status=stopping, errors=nil, buf=nil},
-            true = ets:insert(Table, D2)
-    end,
-    stop_os_daemons(Table, Rest).
-    
-boot_os_daemons(_Table, []) ->
-    ok;
-boot_os_daemons(Table, [{Name, Cmd} | Rest]) ->
-    {ok, Port} = start_port(Cmd),
-    true = ets:insert(Table, #daemon{port=Port, name=Name, cmd=Cmd}),
-    boot_os_daemons(Table, Rest).
-    
-% Elements unique to the configured set need to be booted.
-find_to_boot([], _Rest, Acc) ->
-    % Nothing else configured.
-    Acc;
-find_to_boot([D | R1], [D | R2], Acc) ->
-    % Elements are equal, daemon already running.
-    find_to_boot(R1, R2, Acc);
-find_to_boot([D1 | R1], [D2 | _]=A2, Acc) when D1 < D2 ->
-    find_to_boot(R1, A2, [D1 | Acc]);
-find_to_boot(A1, [_ | R2], Acc) ->
-    find_to_boot(A1, R2, Acc);
-find_to_boot(Rest, [], Acc) ->
-    % No more candidates for already running. Boot all.
-    Rest ++ Acc.
-
-% Elements unique to the running set need to be killed.
-find_to_stop([], Rest, Acc) ->
-    % The rest haven't been found, so they must all
-    % be ready to die.
-    Rest ++ Acc;
-find_to_stop([D | R1], [D | R2], Acc) ->
-    % Elements are equal, daemon already running.
-    find_to_stop(R1, R2, Acc);
-find_to_stop([D1 | R1], [D2 | _]=A2, Acc) when D1 < D2 ->
-    find_to_stop(R1, A2, Acc);
-find_to_stop(A1, [D2 | R2], Acc) ->
-    find_to_stop(A1, R2, [D2 | Acc]);
-find_to_stop(_, [], Acc) ->
-    % No more running daemons to worry about.
-    Acc.
-
-should_halt(Errors) ->
-    RetryTimeCfg = config:get("os_daemon_settings", "retry_time", "5"),
-    RetryTime = list_to_integer(RetryTimeCfg),
-
-    Now = now(),
-    RecentErrors = lists:filter(fun(Time) ->
-        timer:now_diff(Now, Time) =< RetryTime * 1000000
-    end, Errors),
-
-    RetryCfg = config:get("os_daemon_settings", "max_retries", "3"),
-    Retries = list_to_integer(RetryCfg),
-
-    {length(RecentErrors) >= Retries, RecentErrors}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_os_process.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_os_process.erl b/src/couch/src/couch_os_process.erl
deleted file mode 100644
index c6e6520..0000000
--- a/src/couch/src/couch_os_process.erl
+++ /dev/null
@@ -1,285 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_os_process).
--behaviour(gen_server).
-
--export([start_link/1, start_link/2, start_link/3, stop/1]).
--export([set_timeout/2, prompt/2, prompt_many/2, killer/1]).
--export([send/2, writeline/2, readline/1, writejson/2, readjson/1]).
--export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(PORT_OPTIONS, [stream, {line, 4096}, binary, exit_status, hide]).
-
--record(os_proc,
-    {command,
-     port,
-     writer,
-     reader,
-     timeout=5000,
-     idle
-    }).
-
-start_link(Command) ->
-    start_link(Command, []).
-start_link(Command, Options) ->
-    start_link(Command, Options, ?PORT_OPTIONS).
-start_link(Command, Options, PortOptions) ->
-    gen_server:start_link(couch_os_process, [Command, Options, PortOptions], []).
-
-stop(Pid) ->
-    gen_server:cast(Pid, stop).
-
-% Read/Write API
-set_timeout(Pid, TimeOut) when is_integer(TimeOut) ->
-    ok = gen_server:call(Pid, {set_timeout, TimeOut}, infinity).
-
-% Used by couch_db_update_notifier.erl
-send(Pid, Data) ->
-    gen_server:cast(Pid, {send, Data}).
-
-prompt(Pid, Data) ->
-    case gen_server:call(Pid, {prompt, Data}, infinity) of
-        {ok, Result} ->
-            Result;
-        Error ->
-            ?LOG_ERROR("OS Process Error ~p :: ~p",[Pid,Error]),
-            throw(Error)
-    end.
-
-prompt_many(Pid, DataList) ->
-    OsProc = gen_server:call(Pid, get_os_proc, infinity),
-    true = port_connect(OsProc#os_proc.port, self()),
-    try
-        send_many(OsProc, DataList),
-        receive_many(length(DataList), OsProc, [])
-    after
-        % Can throw badarg error, when OsProc Pid is dead or port was closed
-        % by the readline function on error/timeout.
-        (catch port_connect(OsProc#os_proc.port, Pid)),
-        unlink(OsProc#os_proc.port),
-        drop_port_messages(OsProc#os_proc.port)
-    end.
-
-send_many(_OsProc, []) ->
-    ok;
-send_many(#os_proc{writer = Writer} = OsProc, [Data | Rest]) ->
-    Writer(OsProc, Data),
-    send_many(OsProc, Rest).
-
-receive_many(0, _OsProc, Acc) ->
-    {ok, lists:reverse(Acc)};
-receive_many(N, #os_proc{reader = Reader} = OsProc, Acc) ->
-    Line = Reader(OsProc),
-    receive_many(N - 1, OsProc, [Line | Acc]).
-
-drop_port_messages(Port) ->
-    receive
-    {Port, _} ->
-        drop_port_messages(Port)
-    after 0 ->
-        ok
-    end.
-
-% Utility functions for reading and writing
-% in custom functions
-writeline(OsProc, Data) when is_record(OsProc, os_proc) ->
-    port_command(OsProc#os_proc.port, [Data, $\n]).
-
-readline(#os_proc{} = OsProc) ->
-    readline(OsProc, []).
-readline(#os_proc{port = Port} = OsProc, Acc) ->
-    receive
-    {Port, {data, {noeol, Data}}} when is_binary(Acc) ->
-        readline(OsProc, <<Acc/binary,Data/binary>>);
-    {Port, {data, {noeol, Data}}} when is_binary(Data) ->
-        readline(OsProc, Data);
-    {Port, {data, {noeol, Data}}} ->
-        readline(OsProc, [Data|Acc]);
-    {Port, {data, {eol, <<Data/binary>>}}} when is_binary(Acc) ->
-        [<<Acc/binary,Data/binary>>];
-    {Port, {data, {eol, Data}}} when is_binary(Data) ->
-        [Data];
-    {Port, {data, {eol, Data}}} ->
-        lists:reverse(Acc, Data);
-    {Port, Err} ->
-        catch port_close(Port),
-        throw({os_process_error, Err})
-    after OsProc#os_proc.timeout ->
-        catch port_close(Port),
-        throw({os_process_error, "OS process timed out."})
-    end.
-
-% Standard JSON functions
-writejson(OsProc, Data) when is_record(OsProc, os_proc) ->
-    JsonData = ?JSON_ENCODE(Data),
-    ?LOG_DEBUG("OS Process ~p Input  :: ~s", [OsProc#os_proc.port, JsonData]),
-    true = writeline(OsProc, JsonData).
-
-readjson(OsProc) when is_record(OsProc, os_proc) ->
-    Line = iolist_to_binary(readline(OsProc)),
-    ?LOG_DEBUG("OS Process ~p Output :: ~s", [OsProc#os_proc.port, Line]),
-    try
-        % Don't actually parse the whole JSON. Just try to see if it's
-        % a command or a doc map/reduce/filter/show/list/update output.
-        % If it's a command then parse the whole JSON and execute the
-        % command, otherwise return the raw JSON line to the caller.
-        pick_command(Line)
-    catch
-    throw:abort ->
-        {json, Line};
-    throw:{cmd, _Cmd} ->
-        case ?JSON_DECODE(Line) of
-        [<<"log">>, Msg] when is_binary(Msg) ->
-            % we got a message to log. Log it and continue
-            ?LOG_INFO("OS Process ~p Log :: ~s", [OsProc#os_proc.port, Msg]),
-            readjson(OsProc);
-        [<<"error">>, Id, Reason] ->
-            throw({error, {couch_util:to_existing_atom(Id),Reason}});
-        [<<"fatal">>, Id, Reason] ->
-            ?LOG_INFO("OS Process ~p Fatal Error :: ~s ~p",
-                [OsProc#os_proc.port, Id, Reason]),
-            throw({couch_util:to_existing_atom(Id),Reason});
-        _Result ->
-            {json, Line}
-        end
-    end.
-
-pick_command(Line) ->
-    json_stream_parse:events(Line, fun pick_command0/1).
-
-pick_command0(array_start) ->
-    fun pick_command1/1;
-pick_command0(_) ->
-    throw(abort).
-
-pick_command1(<<"log">> = Cmd) ->
-    throw({cmd, Cmd});
-pick_command1(<<"error">> = Cmd) ->
-    throw({cmd, Cmd});
-pick_command1(<<"fatal">> = Cmd) ->
-    throw({cmd, Cmd});
-pick_command1(_) ->
-    throw(abort).
-
-
-% gen_server API
-init([Command, Options, PortOptions]) ->
-    PrivDir = couch_util:priv_dir(),
-    Spawnkiller = filename:join(PrivDir, "couchspawnkillable"),
-    V = config:get("query_server_config", "os_process_idle_limit", "300"),
-    IdleLimit = list_to_integer(V) * 1000,
-    BaseProc = #os_proc{
-        command=Command,
-        port=open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions),
-        writer=fun ?MODULE:writejson/2,
-        reader=fun ?MODULE:readjson/1,
-        idle=IdleLimit
-    },
-    KillCmd = iolist_to_binary(readline(BaseProc)),
-    Pid = self(),
-    ?LOG_DEBUG("OS Process Start :: ~p", [BaseProc#os_proc.port]),
-    spawn(fun() ->
-            % this ensure the real os process is killed when this process dies.
-            erlang:monitor(process, Pid),
-            receive _ -> ok end,
-            killer(?b2l(KillCmd))
-        end),
-    OsProc =
-    lists:foldl(fun(Opt, Proc) ->
-        case Opt of
-        {writer, Writer} when is_function(Writer) ->
-            Proc#os_proc{writer=Writer};
-        {reader, Reader} when is_function(Reader) ->
-            Proc#os_proc{reader=Reader};
-        {timeout, TimeOut} when is_integer(TimeOut) ->
-            Proc#os_proc{timeout=TimeOut}
-        end
-    end, BaseProc, Options),
-    {ok, OsProc, IdleLimit}.
-
-terminate(_Reason, #os_proc{port=Port}) ->
-    catch port_close(Port),
-    ok.
-
-handle_call(get_os_proc, _From, #os_proc{idle=Idle}=OsProc) ->
-    {reply, OsProc, OsProc, Idle};
-handle_call({set_timeout, TimeOut}, _From, #os_proc{idle=Idle}=OsProc) ->
-    {reply, ok, OsProc#os_proc{timeout=TimeOut}, Idle};
-handle_call({prompt, Data}, _From, #os_proc{idle=Idle}=OsProc) ->
-    #os_proc{writer=Writer, reader=Reader} = OsProc,
-    try
-        Writer(OsProc, Data),
-        {reply, {ok, Reader(OsProc)}, OsProc, Idle}
-    catch
-        throw:{error, OsError} ->
-            {reply, OsError, OsProc, Idle};
-        throw:{fatal, OsError} ->
-            {stop, normal, OsError, OsProc};
-        throw:OtherError ->
-            {stop, normal, OtherError, OsProc}
-    after
-        garbage_collect()
-    end.
-
-handle_cast({send, Data}, #os_proc{writer=Writer, idle=Idle}=OsProc) ->
-    try
-        Writer(OsProc, Data),
-        {noreply, OsProc, Idle}
-    catch
-        throw:OsError ->
-            ?LOG_ERROR("Failed sending data: ~p -> ~p", [Data, OsError]),
-            {stop, normal, OsProc}
-    end;
-handle_cast(stop, OsProc) ->
-    {stop, normal, OsProc};
-handle_cast(Msg, #os_proc{idle=Idle}=OsProc) ->
-    ?LOG_DEBUG("OS Proc: Unknown cast: ~p", [Msg]),
-    {noreply, OsProc, Idle}.
-
-handle_info(timeout, #os_proc{idle=Idle}=OsProc) ->
-    gen_server:cast(couch_proc_manager, {os_proc_idle, self()}),
-    erlang:garbage_collect(),
-    {noreply, OsProc, Idle};
-handle_info({Port, {exit_status, 0}}, #os_proc{port=Port}=OsProc) ->
-    ?LOG_INFO("OS Process terminated normally", []),
-    {stop, normal, OsProc};
-handle_info({Port, {exit_status, Status}}, #os_proc{port=Port}=OsProc) ->
-    ?LOG_ERROR("OS Process died with status: ~p", [Status]),
-    {stop, {exit_status, Status}, OsProc};
-handle_info(Msg, #os_proc{idle=Idle}=OsProc) ->
-    ?LOG_DEBUG("OS Proc: Unknown info: ~p", [Msg]),
-    {noreply, OsProc, Idle}.
-
-code_change(_, {os_proc, Cmd, Port, W, R, Timeout} , _) ->
-    V = config:get("query_server_config","os_process_idle_limit","300"),
-    State = #os_proc{
-        command = Cmd,
-        port = Port,
-        writer = W,
-        reader = R,
-        timeout = Timeout,
-        idle = list_to_integer(V) * 1000
-    },
-    {ok, State};
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-killer(KillCmd) ->
-    receive _ ->
-        os:cmd(KillCmd)
-    after 1000 ->
-        ?MODULE:killer(KillCmd)
-    end.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_passwords.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_passwords.erl b/src/couch/src/couch_passwords.erl
deleted file mode 100644
index d0f36cc..0000000
--- a/src/couch/src/couch_passwords.erl
+++ /dev/null
@@ -1,119 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_passwords).
-
--export([simple/2, pbkdf2/3, pbkdf2/4, verify/2]).
--export([hash_admin_password/1, get_unhashed_admins/0]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(MAX_DERIVED_KEY_LENGTH, (1 bsl 32 - 1)).
--define(SHA1_OUTPUT_LENGTH, 20).
-
-%% legacy scheme, not used for new passwords.
--spec simple(binary(), binary()) -> binary().
-simple(Password, Salt) ->
-    ?l2b(couch_util:to_hex(crypto:sha(<<Password/binary, Salt/binary>>))).
-
-%% CouchDB utility functions
--spec hash_admin_password(binary()) -> binary().
-hash_admin_password(ClearPassword) ->
-    Iterations = config:get("couch_httpd_auth", "iterations", "10000"),
-    Salt = couch_uuids:random(),
-    DerivedKey = couch_passwords:pbkdf2(couch_util:to_binary(ClearPassword),
-                                        Salt ,list_to_integer(Iterations)),
-    ?l2b("-pbkdf2-" ++ ?b2l(DerivedKey) ++ ","
-        ++ ?b2l(Salt) ++ ","
-        ++ Iterations).
-
--spec get_unhashed_admins() -> list().
-get_unhashed_admins() ->
-    lists:filter(
-        fun({_User, "-hashed-" ++ _}) ->
-            false; % already hashed
-        ({_User, "-pbkdf2-" ++ _}) ->
-            false; % already hashed
-        ({_User, _ClearPassword}) ->
-            true
-        end,
-    config:get("admins")).
-
-%% Current scheme, much stronger.
--spec pbkdf2(binary(), binary(), integer()) -> binary().
-pbkdf2(Password, Salt, Iterations) ->
-    {ok, Result} = pbkdf2(Password, Salt, Iterations, ?SHA1_OUTPUT_LENGTH),
-    Result.
-
--spec pbkdf2(binary(), binary(), integer(), integer())
-    -> {ok, binary()} | {error, derived_key_too_long}.
-pbkdf2(_Password, _Salt, _Iterations, DerivedLength)
-    when DerivedLength > ?MAX_DERIVED_KEY_LENGTH ->
-    {error, derived_key_too_long};
-pbkdf2(Password, Salt, Iterations, DerivedLength) ->
-    L = ceiling(DerivedLength / ?SHA1_OUTPUT_LENGTH),
-    <<Bin:DerivedLength/binary,_/binary>> =
-        iolist_to_binary(pbkdf2(Password, Salt, Iterations, L, 1, [])),
-    {ok, ?l2b(couch_util:to_hex(Bin))}.
-
--spec pbkdf2(binary(), binary(), integer(), integer(), integer(), iolist())
-    -> iolist().
-pbkdf2(_Password, _Salt, _Iterations, BlockCount, BlockIndex, Acc)
-    when BlockIndex > BlockCount ->
-    lists:reverse(Acc);
-pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex, Acc) ->
-    Block = pbkdf2(Password, Salt, Iterations, BlockIndex, 1, <<>>, <<>>),
-    pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex + 1, [Block|Acc]).
-
--spec pbkdf2(binary(), binary(), integer(), integer(), integer(),
-    binary(), binary()) -> binary().
-pbkdf2(_Password, _Salt, Iterations, _BlockIndex, Iteration, _Prev, Acc)
-    when Iteration > Iterations ->
-    Acc;
-pbkdf2(Password, Salt, Iterations, BlockIndex, 1, _Prev, _Acc) ->
-    InitialBlock = crypto:sha_mac(Password,
-        <<Salt/binary,BlockIndex:32/integer>>),
-    pbkdf2(Password, Salt, Iterations, BlockIndex, 2,
-        InitialBlock, InitialBlock);
-pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration, Prev, Acc) ->
-    Next = crypto:sha_mac(Password, Prev),
-    pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration + 1,
-                   Next, crypto:exor(Next, Acc)).
-
-%% verify two lists for equality without short-circuits to avoid timing attacks.
--spec verify(string(), string(), integer()) -> boolean().
-verify([X|RestX], [Y|RestY], Result) ->
-    verify(RestX, RestY, (X bxor Y) bor Result);
-verify([], [], Result) ->
-    Result == 0.
-
--spec verify(binary(), binary()) -> boolean();
-            (list(), list()) -> boolean().
-verify(<<X/binary>>, <<Y/binary>>) ->
-    verify(?b2l(X), ?b2l(Y));
-verify(X, Y) when is_list(X) and is_list(Y) ->
-    case length(X) == length(Y) of
-        true ->
-            verify(X, Y, 0);
-        false ->
-            false
-    end;
-verify(_X, _Y) -> false.
-
--spec ceiling(number()) -> integer().
-ceiling(X) ->
-    T = erlang:trunc(X),
-    case (X - T) of
-        Neg when Neg < 0 -> T;
-        Pos when Pos > 0 -> T + 1;
-        _ -> T
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_primary_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_primary_sup.erl b/src/couch/src/couch_primary_sup.erl
deleted file mode 100644
index 3ce8827..0000000
--- a/src/couch/src/couch_primary_sup.erl
+++ /dev/null
@@ -1,60 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_primary_sup).
--behaviour(supervisor).
--export([init/1, start_link/0]).
-
-start_link() ->
-    supervisor:start_link({local,couch_primary_services}, ?MODULE, []).
-
-init([]) ->
-    Children = [
-        {collation_driver,
-            {couch_drv, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_drv]},
-        {couch_task_status,
-            {couch_task_status, start_link, []},
-            permanent,
-            brutal_kill,
-            worker,
-            [couch_task_status]},
-        {couch_server,
-            {couch_server, sup_start_link, []},
-            permanent,
-            brutal_kill,
-            worker,
-            [couch_server]},
-        {couch_db_update_event,
-            {gen_event, start_link, [{local, couch_db_update}]},
-            permanent,
-            brutal_kill,
-            worker,
-            dynamic},
-        {couch_replication_event,
-            {gen_event, start_link, [{local, couch_replication}]},
-            permanent,
-            brutal_kill,
-            worker,
-            dynamic},
-        {couch_replicator_job_sup,
-            {couch_replicator_job_sup, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_replicator_job_sup]}
-    ],
-    {ok, {{one_for_one, 10, 3600}, Children}}.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_proc_manager.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_proc_manager.erl b/src/couch/src/couch_proc_manager.erl
deleted file mode 100644
index 45b334f..0000000
--- a/src/couch/src/couch_proc_manager.erl
+++ /dev/null
@@ -1,307 +0,0 @@
--module(couch_proc_manager).
--behaviour(gen_server).
--behaviour(config_listener).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, 
-    code_change/3]).
-
--export([start_link/0, get_proc_count/0, new_proc/2, new_proc/4]).
-
-% config_listener api
--export([handle_config_change/5]).
-
--include_lib("couch/include/couch_db.hrl").
-
--record(state, {
-    tab,
-    config
-}).
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_proc_count() ->
-    gen_server:call(?MODULE, get_proc_count).
-
-init([]) ->
-    process_flag(trap_exit, true),
-    ok = config:listen_for_changes(?MODULE, nil),
-    {ok, #state{
-        tab = ets:new(procs, [ordered_set, {keypos, #proc.pid}]),
-        config = get_proc_config()
-    }}.
-
-handle_call(get_table, _From, State) ->
-    {reply, State#state.tab, State};
-
-handle_call(get_proc_count, _From, State) ->
-    {reply, ets:info(State#state.tab, size), State};
-
-handle_call({get_proc, #doc{body={Props}}=DDoc, DDocKey}, From, State) ->
-    {Client, _} = From,
-    Lang = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
-    IterFun = fun(Proc, Acc) ->
-        case lists:member(DDocKey, Proc#proc.ddoc_keys) of
-            true ->
-                {stop, assign_proc(State#state.tab, Client, Proc)};
-            false ->
-                {ok, Acc}
-        end
-    end,
-    TeachFun = fun(Proc0, Acc) ->
-        try
-            {ok, Proc1} = teach_ddoc(DDoc, DDocKey, Proc0),
-            {stop, assign_proc(State#state.tab, Client, Proc1)}
-        catch _:_ ->
-            {ok, Acc}
-        end
-    end,
-    try iter_procs(State#state.tab, Lang, IterFun, nil) of
-    {not_found, _} ->
-        case iter_procs(State#state.tab, Lang, TeachFun, nil) of
-        {not_found, _} ->
-            spawn_link(?MODULE, new_proc, [From, Lang, DDoc, DDocKey]),
-            {noreply, State};
-        {ok, Proc} ->
-            {reply, {ok, Proc, State#state.config}, State}
-        end;
-    {ok, Proc} ->
-        {reply, {ok, Proc, State#state.config}, State}
-    catch error:Reason ->
-        ?LOG_ERROR("~p ~p ~p", [?MODULE, Reason, erlang:get_stacktrace()]),
-        {reply, {error, Reason}, State}
-    end;
-
-handle_call({get_proc, Lang}, {Client, _} = From, State) ->
-    IterFun = fun(Proc, _Acc) ->
-        {stop, assign_proc(State#state.tab, Client, Proc)}
-    end,
-    try iter_procs(State#state.tab, Lang, IterFun, nil) of
-    {not_found, _} ->
-        spawn_link(?MODULE, new_proc, [From, Lang]),
-        {noreply, State};
-    {ok, Proc} ->
-        {reply, {ok, Proc, State#state.config}, State}
-    catch error:Reason ->
-        ?LOG_ERROR("~p ~p ~p", [?MODULE, Reason, erlang:get_stacktrace()]),
-        {reply, {error, Reason}, State}
-    end;
-
-handle_call({ret_proc, #proc{client=Ref} = Proc}, _From, State) ->
-    erlang:demonitor(Ref, [flush]),
-    % We need to check if the process is alive here, as the client could be
-    % handing us a #proc{} with a dead one.  We would have already removed the
-    % #proc{} from our own table, so the alternative is to do a lookup in the
-    % table before the insert.  Don't know which approach is cheaper.
-    return_proc(State#state.tab, Proc),
-    {reply, true, State};
-
-handle_call(_Call, _From, State) ->
-    {reply, ignored, State}.
-
-handle_cast({os_proc_idle, Pid}, #state{tab=Tab}=State) ->
-    Limit = config:get("query_server_config", "os_process_soft_limit", "100"),
-    case ets:lookup(Tab, Pid) of
-        [#proc{client=nil}] ->
-            case ets:info(Tab, size) > list_to_integer(Limit) of
-                true ->
-                    ?LOG_INFO("Closing idle OS Process: ~p", [Pid]),
-                    ets:delete(Tab, Pid),
-                    case is_process_alive(Pid) of
-                        true ->
-                            unlink(Pid),
-                            gen_server:cast(Pid, stop);
-                        _ ->
-                            ok
-                    end;
-                _ ->
-                    ok
-            end;
-        _ ->
-            ok
-    end,
-    {noreply, State};
-handle_cast(reload_config, State) ->
-    {noreply, State#state{config = get_proc_config()}};
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-
-handle_info(shutdown, State) ->
-    {stop, shutdown, State};
-
-handle_info({'EXIT', _, {ok, Proc0, {Client,_} = From}}, State) ->
-    link(Proc0#proc.pid),
-    Proc = assign_proc(State#state.tab, Client, Proc0),
-    gen_server:reply(From, {ok, Proc, State#state.config}),
-    {noreply, State};
-
-handle_info({'EXIT', Pid, Reason}, State) ->
-    ?LOG_INFO("~p ~p died ~p", [?MODULE, Pid, Reason]),
-    ets:delete(State#state.tab, Pid),
-    {noreply, State};
-
-handle_info({'DOWN', Ref, _, _, _Reason}, State) ->
-    case ets:match_object(State#state.tab, #proc{client=Ref, _='_'}) of
-    [] ->
-        ok;
-    [#proc{} = Proc] ->
-        return_proc(State#state.tab, Proc)
-    end,
-    {noreply, State};
-
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {noreply, State};
-
-handle_info(restart_config_lister, State) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-    {noreply, State};
-
-handle_info(_Msg, State) ->
-    {noreply, State}.
-
-terminate(_Reason, #state{tab=Tab}) ->
-    ets:foldl(fun(#proc{pid=P}, _) -> couch_util:shutdown_sync(P) end, 0, Tab),
-    ok.
-
-code_change(_OldVsn, #state{tab = Tab} = State, _Extra) ->
-    NewTab = ets:new(procs, [ordered_set, {keypos, #proc.pid}]),
-    true = ets:insert(NewTab, ets:tab2list(Tab)),
-    true = ets:delete(Tab),
-    {ok, State#state{tab = NewTab}}.
-
-handle_config_change("query_server_config", _, _, _, _) ->
-    gen_server:cast(?MODULE, reload_config),
-    {ok, nil};
-handle_config_change(_, _, _, _, _) ->
-    {ok, nil}.
-
-iter_procs(Tab, Lang, Fun, Acc) when is_binary(Lang) ->
-    iter_procs(Tab, binary_to_list(Lang), Fun, Acc);
-iter_procs(Tab, Lang, Fun, Acc) ->
-    Pattern = #proc{lang=Lang, client=nil, _='_'},
-    MSpec = [{Pattern, [], ['$_']}],
-    case ets:select_reverse(Tab, MSpec, 25) of
-        '$end_of_table' ->
-            {not_found, Acc};
-        Continuation ->
-            iter_procs(Continuation, Fun, Acc)
-    end.
-
-iter_procs({[], Continuation0}, Fun, Acc) ->
-    case ets:select_reverse(Continuation0) of
-        '$end_of_table' ->
-            {not_found, Acc};
-        Continuation1 ->
-            iter_procs(Continuation1, Fun, Acc)
-    end;
-iter_procs({[Proc | Rest], Continuation}, Fun, Acc0) ->
-    case Fun(Proc, Acc0) of
-        {ok, Acc1} ->
-            iter_procs({Rest, Continuation}, Fun, Acc1);
-        {stop, Acc1} ->
-            {ok, Acc1}
-    end.
-
-new_proc(From, Lang) ->
-    case new_proc_int(From, Lang) of
-    {ok, Proc} ->
-        exit({ok, Proc, From});
-    Error ->
-        gen_server:reply(From, {error, Error})
-    end.
-
-new_proc(From, Lang, DDoc, DDocKey) ->
-    case new_proc_int(From, Lang) of
-    {ok, NewProc} ->
-        case proc_with_ddoc(DDoc, DDocKey, [NewProc]) of
-        {ok, Proc} ->
-            exit({ok, Proc, From});
-        {error, Reason} ->
-            gen_server:reply(From, {error, Reason})
-        end;
-    Error ->
-        gen_server:reply(From, {error, Error})
-    end.
-
-new_proc_int(From, Lang) when is_binary(Lang) ->
-    new_proc_int(From, binary_to_list(Lang));
-new_proc_int(From, Lang) when is_list(Lang) ->
-    case config:get("query_servers", Lang) of
-    undefined ->
-        case config:get("native_query_servers", Lang) of
-        undefined ->
-            gen_server:reply(From, {unknown_query_language, Lang});
-        SpecStr ->
-            {ok, {M,F,A}} = couch_util:parse_term(SpecStr),
-            {ok, Pid} = apply(M, F, A),
-            make_proc(Pid, Lang, M)
-        end;
-    Command ->
-        {ok, Pid} = couch_os_process:start_link(Command),
-        make_proc(Pid, Lang, couch_os_process)
-    end.
-
-make_proc(Pid, Lang, Mod) ->
-    Proc = #proc{
-        lang = Lang,
-        pid = Pid,
-        prompt_fun = {Mod, prompt},
-        prompt_many_fun = {Mod, prompt_many},
-        set_timeout_fun = {Mod, set_timeout},
-        stop_fun = {Mod, stop}
-    },
-    unlink(Pid),
-    {ok, Proc}.
-
-assign_proc(Tab, Client, #proc{client=nil}=Proc0) ->
-    Proc = Proc0#proc{client = erlang:monitor(process, Client)},
-    ets:insert(Tab, Proc),
-    Proc.
-
-return_proc(Tab, #proc{pid=Pid} = Proc) ->
-    case is_process_alive(Pid) of true ->
-        gen_server:cast(Pid, garbage_collect),
-        ets:insert(Tab, Proc#proc{client=nil});
-    false ->
-        ets:delete(Tab, Pid)
-    end.
-
-get_proc_config() ->
-    Limit = config:get("query_server_config", "reduce_limit", "true"),
-    Timeout = config:get("couchdb", "os_process_timeout", "5000"),
-    {[
-        {<<"reduce_limit">>, list_to_atom(Limit)},
-        {<<"timeout">>, list_to_integer(Timeout)}
-    ]}.
-
-proc_with_ddoc(DDoc, DDocKey, Procs) ->
-    Filter = fun(#proc{ddoc_keys=Keys}) -> not lists:member(DDocKey, Keys) end,
-    case lists:dropwhile(Filter, Procs) of
-    [DDocProc|_] ->
-        {ok, DDocProc};
-    [] ->
-        teach_any_proc(DDoc, DDocKey, Procs)
-    end.
-
-teach_any_proc(DDoc, DDocKey, [Proc|Rest]) ->
-    try
-        teach_ddoc(DDoc, DDocKey, Proc)
-    catch _:_ ->
-        teach_any_proc(DDoc, DDocKey, Rest)
-    end;
-teach_any_proc(_, _, []) ->
-    {error, noproc}.
-
-teach_ddoc(DDoc, {DDocId, _Rev}=DDocKey, #proc{ddoc_keys=Keys}=Proc) ->
-    % send ddoc over the wire
-    % we only share the rev with the client we know to update code
-    % but it only keeps the latest copy, per each ddoc, around.
-    true = couch_query_servers:proc_prompt(Proc, [<<"ddoc">>, <<"new">>,
-        DDocId, couch_doc:to_json_obj(DDoc, [])]),
-    % we should remove any other ddocs keys for this docid
-    % because the query server overwrites without the rev
-    Keys2 = [{D,R} || {D,R} <- Keys, D /= DDocId],
-    % add ddoc to the proc
-    {ok, Proc#proc{ddoc_keys=[DDocKey|Keys2]}}.


[29/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/fabric


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/753e7462
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/753e7462
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/753e7462

Branch: refs/heads/1843-feature-bigcouch
Commit: 753e7462d7f5535ff275489042e7558f7bb0351f
Parents: ed8c2fb
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:41:11 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:41:11 2014 -0600

----------------------------------------------------------------------
 src/fabric/README.md                         |  24 -
 src/fabric/include/couch_db_tmp.hrl          | 296 -------------
 src/fabric/include/fabric.hrl                |  36 --
 src/fabric/src/fabric.app.src                |  49 --
 src/fabric/src/fabric.erl                    | 479 --------------------
 src/fabric/src/fabric_db_create.erl          | 159 -------
 src/fabric/src/fabric_db_delete.erl          |  93 ----
 src/fabric/src/fabric_db_doc_count.erl       |  66 ---
 src/fabric/src/fabric_db_info.erl            | 102 -----
 src/fabric/src/fabric_db_meta.erl            | 157 -------
 src/fabric/src/fabric_db_update_listener.erl | 150 -------
 src/fabric/src/fabric_dict.erl               |  49 --
 src/fabric/src/fabric_doc_attachments.erl    | 151 -------
 src/fabric/src/fabric_doc_missing_revs.erl   |  88 ----
 src/fabric/src/fabric_doc_open.erl           | 470 --------------------
 src/fabric/src/fabric_doc_open_revs.erl      | 305 -------------
 src/fabric/src/fabric_doc_update.erl         | 306 -------------
 src/fabric/src/fabric_group_info.erl         |  98 ----
 src/fabric/src/fabric_rpc.erl                | 516 ----------------------
 src/fabric/src/fabric_util.erl               | 171 -------
 src/fabric/src/fabric_view.erl               | 344 ---------------
 src/fabric/src/fabric_view_all_docs.erl      | 212 ---------
 src/fabric/src/fabric_view_changes.erl       | 422 ------------------
 src/fabric/src/fabric_view_map.erl           | 147 ------
 src/fabric/src/fabric_view_reduce.erl        | 127 ------
 25 files changed, 5017 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/README.md
----------------------------------------------------------------------
diff --git a/src/fabric/README.md b/src/fabric/README.md
deleted file mode 100644
index 6df941b..0000000
--- a/src/fabric/README.md
+++ /dev/null
@@ -1,24 +0,0 @@
-## fabric
-
-Fabric is a collection of proxy functions for [CouchDB][1] operations in a cluster.  These functions are used in [BigCouch][2] as the remote procedure endpoints on each of the cluster nodes.
-
-For example, creating a database is a straightforward task in standalone CouchDB, but for BigCouch, each node that will store a shard for the database needs to receive and execute a fabric function.  The node handling the request also needs to compile the results from each of the nodes and respond accordingly to the client.
-
-Fabric is used in conjunction with 'Rexi' which is also an application within BigCouch.
-
-### Getting Started
-Fabric requires R13B03 or higher and can be built with [rebar][6], which comes bundled in the repository.
-
-### License
-[Apache 2.0][3]
-
-### Contact
- * [http://cloudant.com][4]
- * [info@cloudant.com][5]
-
-[1]: http://couchdb.apache.org
-[2]: http://github.com/cloudant/bigcouch
-[3]: http://www.apache.org/licenses/LICENSE-2.0.html
-[4]: http://cloudant.com
-[5]: mailto:info@cloudant.com
-[6]: http://github.com/basho/rebar

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/include/couch_db_tmp.hrl
----------------------------------------------------------------------
diff --git a/src/fabric/include/couch_db_tmp.hrl b/src/fabric/include/couch_db_tmp.hrl
deleted file mode 100644
index 96f3a2f..0000000
--- a/src/fabric/include/couch_db_tmp.hrl
+++ /dev/null
@@ -1,296 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(LOCAL_DOC_PREFIX, "_local/").
--define(DESIGN_DOC_PREFIX0, "_design").
--define(DESIGN_DOC_PREFIX, "_design/").
-
--define(MIN_STR, <<"">>).
--define(MAX_STR, <<255>>). % illegal utf string
-
--define(JSON_ENCODE(V), couch_util:json_encode(V)).
--define(JSON_DECODE(V), couch_util:json_decode(V)).
-
--define(b2l(V), binary_to_list(V)).
--define(l2b(V), list_to_binary(V)).
-
--define(DEFAULT_ATTACHMENT_CONTENT_TYPE, <<"application/octet-stream">>).
-
--define(LOG_DEBUG(Format, Args), twig:log(debug, Format, Args)).
--define(LOG_INFO(Format, Args), twig:log(notice, Format, Args)).
--define(LOG_ERROR(Format, Args), twig:log(error, Format, Args)).
-
--record(rev_info,
-    {
-    rev,
-    seq = 0,
-    deleted = false,
-    body_sp = nil % stream pointer
-    }).
-
--record(doc_info,
-    {
-    id = <<"">>,
-    high_seq = 0,
-    revs = [] % rev_info
-    }).
-
--record(full_doc_info,
-    {id = <<"">>,
-    update_seq = 0,
-    deleted = false,
-    data_size = 0,
-    rev_tree = []
-    }).
-
--record(httpd,
-    {mochi_req,
-    peer,
-    method,
-    path_parts,
-    db_url_handlers,
-    user_ctx,
-    req_body = undefined,
-    design_url_handlers,
-    auth,
-    default_fun,
-    url_handlers
-    }).
-
-
--record(doc,
-    {
-    id = <<"">>,
-    revs = {0, []},
-
-    % the json body object.
-    body = {[]},
-
-    atts = [], % attachments
-
-    deleted = false,
-
-    % key/value tuple of meta information, provided when using special options:
-    % couch_db:open_doc(Db, Id, Options).
-    meta = []
-    }).
-
-
--record(att,
-    {
-    name,
-    type,
-    att_len,
-    disk_len, % length of the attachment in its identity form
-              % (that is, without a content encoding applied to it)
-              % differs from att_len when encoding /= identity
-    md5= <<>>,
-    revpos=0,
-    data,
-    encoding=identity % currently supported values are:
-                      %     identity, gzip
-                      % additional values to support in the future:
-                      %     deflate, compress
-    }).
-
-
--record(user_ctx,
-    {
-    name=null,
-    roles=[],
-    handler
-    }).
-
-% This should be updated anytime a header change happens that requires more
-% than filling in new defaults.
-%
-% As long the changes are limited to new header fields (with inline
-% defaults) added to the end of the record, then there is no need to increment
-% the disk revision number.
-%
-% if the disk revision is incremented, then new upgrade logic will need to be
-% added to couch_db_updater:init_db.
-
--define(LATEST_DISK_VERSION, 5).
-
--record(db_header,
-    {disk_version = ?LATEST_DISK_VERSION,
-     update_seq = 0,
-     unused = 0,
-     id_tree_state = nil,
-     seq_tree_state = nil,
-     local_tree_state = nil,
-     purge_seq = 0,
-     purged_docs = nil,
-     security_ptr = nil,
-     revs_limit = 1000
-    }).
-
--record(db,
-    {main_pid = nil,
-    update_pid = nil,
-    compactor_pid = nil,
-    instance_start_time, % number of microsecs since jan 1 1970 as a binary string
-    fd,
-    fd_monitor,
-    header = #db_header{},
-    committed_update_seq,
-    id_tree,
-    seq_tree,
-    local_tree,
-    update_seq,
-    name,
-    filepath,
-    validate_doc_funs = undefined,
-    security = [],
-    security_ptr = nil,
-    user_ctx = #user_ctx{},
-    waiting_delayed_commit = nil,
-    revs_limit = 1000,
-    fsync_options = [],
-    is_sys_db = false
-    }).
-
-
--record(view_query_args, {
-    start_key,
-    end_key,
-    start_docid = ?MIN_STR,
-    end_docid = ?MAX_STR,
-
-    direction = fwd,
-    inclusive_end=true, % aka a closed-interval
-
-    limit = 10000000000, % Huge number to simplify logic
-    skip = 0,
-
-    group_level = 0,
-
-    view_type = nil,
-    include_docs = false,
-    stale = false,
-    multi_get = false,
-    callback = nil,
-    list = nil,
-    keys = nil,
-    sorted = true,
-    extra = []
-}).
-
--record(view_fold_helper_funs, {
-    reduce_count,
-    passed_end,
-    start_response,
-    send_row
-}).
-
--record(reduce_fold_helper_funs, {
-    start_response,
-    send_row
-}).
-
--record(extern_resp_args, {
-    code = 200,
-    stop = false,
-    data = <<>>,
-    ctype = "application/json",
-    headers = [],
-    json = nil
-}).
-
--record(group, {
-    sig=nil,
-    dbname,
-    fd=nil,
-    name,
-    def_lang,
-    design_options=[],
-    views,
-    id_btree=nil,
-    current_seq=0,
-    purge_seq=0,
-    query_server=nil,
-    waiting_delayed_commit=nil,
-    atts=[]
-    }).
-
--record(view,
-    {id_num,
-    map_names=[],
-    def,
-    btree=nil,
-    reduce_funs=[],
-    dbcopies=[],
-    options=[]
-    }).
-
--record(index_header,
-    {seq=0,
-    purge_seq=0,
-    id_btree_state=nil,
-    view_states=nil
-    }).
-
--record(http_db, {
-    url,
-    auth = [],
-    resource = "",
-    headers = [
-        {"User-Agent", "CouchDB/"++couch:version()},
-        {"Accept", "application/json"},
-        {"Accept-Encoding", "gzip"}
-    ],
-    qs = [],
-    method = get,
-    body = nil,
-    options = [
-        {response_format,binary},
-        {inactivity_timeout, 30000}
-    ],
-    retries = 10,
-    pause = 500,
-    conn = nil
-}).
-
-% small value used in revision trees to indicate the revision isn't stored
--define(REV_MISSING, []).
-
--record(changes_args, {
-    feed = "normal",
-    dir = fwd,
-    since = "0",
-    limit = 1000000000000000,
-    style = main_only,
-    heartbeat,
-    timeout,
-    filter,
-    include_docs = false
-}).
-
--record(proc, {
-    pid,
-    lang,
-    client = nil,
-    ddoc_keys = [],
-    prompt_fun,
-    set_timeout_fun,
-    stop_fun,
-    data_fun
-}).
-
--record(leaf, {
-    deleted,
-    ptr,
-    seq,
-    size = 0,
-    atts = []
-}).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/include/fabric.hrl
----------------------------------------------------------------------
diff --git a/src/fabric/include/fabric.hrl b/src/fabric/include/fabric.hrl
deleted file mode 100644
index 94769bd..0000000
--- a/src/fabric/include/fabric.hrl
+++ /dev/null
@@ -1,36 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--include_lib("eunit/include/eunit.hrl").
-
--record(collector, {
-    db_name=nil,
-    query_args,
-    callback,
-    counters,
-    buffer_size,
-    blocked = [],
-    total_rows = 0,
-    offset = 0,
-    rows = [],
-    skip,
-    limit,
-    keys,
-    os_proc,
-    reducer,
-    lang,
-    sorted,
-    user_acc
-}).
-
--record(view_row, {key, id, value, doc, worker}).
--record(change, {key, id, value, deleted=false, doc, worker}).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric.app.src
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric.app.src b/src/fabric/src/fabric.app.src
deleted file mode 100644
index 5ac86ef..0000000
--- a/src/fabric/src/fabric.app.src
+++ /dev/null
@@ -1,49 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, fabric, [
-    {description, "Routing and proxying layer for CouchDB cluster"},
-    {vsn, git},
-    {modules, [
-        fabric,
-        fabric_db_create,
-        fabric_db_delete,
-        fabric_db_doc_count,
-        fabric_db_info,
-        fabric_db_meta,
-        fabric_db_update_listener,
-        fabric_dict,
-        fabric_doc_attachments,
-        fabric_doc_missing_revs,
-        fabric_doc_open,
-        fabric_doc_open_revs,
-        fabric_doc_update,
-        fabric_group_info,
-        fabric_rpc,
-        fabric_util,
-        fabric_view,
-        fabric_view_all_docs,
-        fabric_view_changes,
-        fabric_view_map,
-        fabric_view_reduce
-    ]},
-    {registered, []},
-    {applications, [
-        kernel,
-        stdlib,
-        config,
-        couch,
-        rexi,
-        mem3,
-        twig
-    ]}
-]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl
deleted file mode 100644
index 1f05ed6..0000000
--- a/src/fabric/src/fabric.erl
+++ /dev/null
@@ -1,479 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--define(ADMIN_CTX, {user_ctx, #user_ctx{roles = [<<"_admin">>]}}).
-
-% DBs
--export([all_dbs/0, all_dbs/1, create_db/1, create_db/2, delete_db/1,
-    delete_db/2, get_db_info/1, get_doc_count/1, set_revs_limit/3,
-    set_security/2, set_security/3, get_revs_limit/1, get_security/1,
-    get_security/2, get_all_security/1, get_all_security/2]).
-
-% Documents
--export([open_doc/3, open_revs/4, get_missing_revs/2, get_missing_revs/3,
-    update_doc/3, update_docs/3, purge_docs/2, att_receiver/2]).
-
-% Views
--export([all_docs/4, changes/4, query_view/3, query_view/4, query_view/6,
-    get_view_group_info/2]).
-
-% miscellany
--export([design_docs/1, reset_validation_funs/1, cleanup_index_files/0,
-    cleanup_index_files/1]).
-
--include_lib("fabric/include/fabric.hrl").
-
--type dbname() :: (iodata() | #db{}).
--type docid() :: iodata().
--type revision() :: {integer(), binary()}.
--type callback() :: fun((any(), any()) -> {ok | stop, any()}).
--type json_obj() :: {[{binary() | atom(), any()}]}.
--type option() :: atom() | {atom(), any()}.
-
-%% db operations
-%% @equiv all_dbs(<<>>)
-all_dbs() ->
-    all_dbs(<<>>).
-
-%% @doc returns a list of all database names
--spec all_dbs(Prefix::iodata()) -> {ok, [binary()]}.
-all_dbs(Prefix) when is_binary(Prefix) ->
-    Length = byte_size(Prefix),
-    MatchingDbs = mem3:fold_shards(fun(#shard{dbname=DbName}, Acc) ->
-        case DbName of
-        <<Prefix:Length/binary, _/binary>> ->
-            [DbName | Acc];
-        _ ->
-            Acc
-        end
-    end, []),
-    {ok, lists:usort(MatchingDbs)};
-
-%% @equiv all_dbs(list_to_binary(Prefix))
-all_dbs(Prefix) when is_list(Prefix) ->
-    all_dbs(list_to_binary(Prefix)).
-
-%% @doc returns a property list of interesting properties
-%%      about the database such as `doc_count', `disk_size',
-%%      etc.
--spec get_db_info(dbname()) ->
-    {ok, [
-        {instance_start_time, binary()} |
-        {doc_count, non_neg_integer()} |
-        {doc_del_count, non_neg_integer()} |
-        {purge_seq, non_neg_integer()} |
-        {compact_running, boolean()} |
-        {disk_size, non_neg_integer()} |
-        {disk_format_version, pos_integer()}
-    ]}.
-get_db_info(DbName) ->
-    fabric_db_info:go(dbname(DbName)).
-
-%% @doc the number of docs in a database
--spec get_doc_count(dbname()) -> {ok, non_neg_integer()}.
-get_doc_count(DbName) ->
-    fabric_db_doc_count:go(dbname(DbName)).
-
-%% @equiv create_db(DbName, [])
-create_db(DbName) ->
-    create_db(DbName, []).
-
-%% @doc creates a database with the given name.
-%%
-%% Options can include values for q and n,
-%% for example `{q, "8"}' and `{n, "3"}', which
-%% control how many shards to split a database into
-%% and how many nodes each doc is copied to respectively.
-%%
--spec create_db(dbname(), [option()]) -> ok | accepted | {error, atom()}.
-create_db(DbName, Options) ->
-    fabric_db_create:go(dbname(DbName), opts(Options)).
-
-%% @equiv delete_db([])
-delete_db(DbName) ->
-    delete_db(DbName, []).
-
-%% @doc delete a database
--spec delete_db(dbname(), [option()]) -> ok | accepted | {error, atom()}.
-delete_db(DbName, Options) ->
-    fabric_db_delete:go(dbname(DbName), opts(Options)).
-
-%% @doc provide an upper bound for the number of tracked document revisions
--spec set_revs_limit(dbname(), pos_integer(), [option()]) -> ok.
-set_revs_limit(DbName, Limit, Options) when is_integer(Limit), Limit > 0 ->
-    fabric_db_meta:set_revs_limit(dbname(DbName), Limit, opts(Options)).
-
-%% @doc retrieves the maximum number of document revisions
--spec get_revs_limit(dbname()) -> pos_integer() | no_return().
-get_revs_limit(DbName) ->
-    {ok, Db} = fabric_util:get_db(dbname(DbName), [?ADMIN_CTX]),
-    try couch_db:get_revs_limit(Db) after catch couch_db:close(Db) end.
-
-%% @doc sets the readers/writers/admin permissions for a database
--spec set_security(dbname(), SecObj::json_obj()) -> ok.
-set_security(DbName, SecObj) ->
-    fabric_db_meta:set_security(dbname(DbName), SecObj, [?ADMIN_CTX]).
-
-%% @doc sets the readers/writers/admin permissions for a database
--spec set_security(dbname(), SecObj::json_obj(), [option()]) -> ok.
-set_security(DbName, SecObj, Options) ->
-    fabric_db_meta:set_security(dbname(DbName), SecObj, opts(Options)).
-
-get_security(DbName) ->
-    get_security(DbName, [?ADMIN_CTX]).
-
-%% @doc retrieve the security object for a database
--spec get_security(dbname()) -> json_obj() | no_return().
-get_security(DbName, Options) ->
-    {ok, Db} = fabric_util:get_db(dbname(DbName), opts(Options)),
-    try couch_db:get_security(Db) after catch couch_db:close(Db) end.
-
-%% @doc retrieve the security object for all shards of a database
--spec get_all_security(dbname()) -> json_obj() | no_return().
-get_all_security(DbName) ->
-    get_all_security(DbName, []).
-
-%% @doc retrieve the security object for all shards of a database
--spec get_all_security(dbname(), [option()]) -> json_obj() | no_return().
-get_all_security(DbName, Options) ->
-    fabric_db_meta:get_all_security(dbname(DbName), opts(Options)).
-
-% doc operations
-
-%% @doc retrieve the doc with a given id
--spec open_doc(dbname(), docid(), [option()]) ->
-    {ok, #doc{}} |
-    {not_found, missing | deleted} |
-    {timeout, any()} |
-    {error, any()} |
-    {error, any() | any()}.
-open_doc(DbName, Id, Options) ->
-    fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options)).
-
-%% @doc retrieve a collection of revisions, possible all
--spec open_revs(dbname(), docid(), [revision()] | all, [option()]) ->
-    {ok, [{ok, #doc{}} | {{not_found,missing}, revision()}]} |
-    {timeout, any()} |
-    {error, any()} |
-    {error, any(), any()}.
-open_revs(DbName, Id, Revs, Options) ->
-    fabric_doc_open_revs:go(dbname(DbName), docid(Id), Revs, opts(Options)).
-
-%% @equiv get_missing_revs(DbName, IdsRevs, [])
-get_missing_revs(DbName, IdsRevs) ->
-    get_missing_revs(DbName, IdsRevs, []).
-
-%% @doc retrieve missing revisions for a list of `{Id, Revs}'
--spec get_missing_revs(dbname(),[{docid(), [revision()]}], [option()]) ->
-    {ok, [{docid(), any(), [any()]}]}.
-get_missing_revs(DbName, IdsRevs, Options) when is_list(IdsRevs) ->
-    Sanitized = [idrevs(IdR) || IdR <- IdsRevs],
-    fabric_doc_missing_revs:go(dbname(DbName), Sanitized, opts(Options)).
-
-%% @doc update a single doc
-%% @equiv update_docs(DbName,[Doc],Options)
--spec update_doc(dbname(), #doc{}, [option()]) ->
-    {ok, any()} | any().
-update_doc(DbName, Doc, Options) ->
-    case update_docs(DbName, [Doc], opts(Options)) of
-    {ok, [{ok, NewRev}]} ->
-        {ok, NewRev};
-    {accepted, [{accepted, NewRev}]} ->
-        {accepted, NewRev};
-    {ok, [{{_Id, _Rev}, Error}]} ->
-        throw(Error);
-    {ok, [Error]} ->
-        throw(Error);
-    {ok, []} ->
-        % replication success
-        #doc{revs = {Pos, [RevId | _]}} = doc(Doc),
-        {ok, {Pos, RevId}}
-    end.
-
-%% @doc update a list of docs
--spec update_docs(dbname(), [#doc{}], [option()]) ->
-    {ok, any()} | any().
-update_docs(DbName, Docs, Options) ->
-    try
-        fabric_doc_update:go(dbname(DbName), docs(Docs), opts(Options)) of
-        {ok, Results} ->
-            {ok, Results};
-        {accepted, Results} ->
-            {accepted, Results};
-        Error ->
-            throw(Error)
-    catch {aborted, PreCommitFailures} ->
-        {aborted, PreCommitFailures}
-    end.
-
-purge_docs(_DbName, _IdsRevs) ->
-    not_implemented.
-
-%% @doc spawns a process to upload attachment data and
-%%      returns a function that shards can use to communicate
-%%      with the spawned middleman process
--spec att_receiver(#httpd{}, Length :: undefined | chunked | pos_integer() |
-        {unknown_transfer_encoding, any()}) ->
-    function() | binary().
-att_receiver(Req, Length) ->
-    fabric_doc_attachments:receiver(Req, Length).
-
-%% @doc retrieves all docs. Additional query parameters, such as `limit',
-%%      `start_key' and `end_key', `descending', and `include_docs', can
-%%      also be passed to further constrain the query. See <a href=
-%%      "http://wiki.apache.org/couchdb/HTTP_Document_API#All_Documents">
-%%      all_docs</a> for details
--spec all_docs(dbname(), callback(), [] | tuple(), #mrargs{}) ->
-    {ok, [any()]}.
-all_docs(DbName, Callback, Acc0, #mrargs{} = QueryArgs) when
-        is_function(Callback, 2) ->
-    fabric_view_all_docs:go(dbname(DbName), QueryArgs, Callback, Acc0);
-
-%% @doc convenience function that takes a keylist rather than a record
-%% @equiv all_docs(DbName, Callback, Acc0, kl_to_query_args(QueryArgs))
-all_docs(DbName, Callback, Acc0, QueryArgs) ->
-    all_docs(DbName, Callback, Acc0, kl_to_query_args(QueryArgs)).
-
-
--spec changes(dbname(), callback(), any(), #changes_args{} | [{atom(),any()}]) ->
-    {ok, any()}.
-changes(DbName, Callback, Acc0, #changes_args{}=Options) ->
-    Feed = Options#changes_args.feed,
-    fabric_view_changes:go(dbname(DbName), Feed, Options, Callback, Acc0);
-
-%% @doc convenience function, takes keylist instead of record
-%% @equiv changes(DbName, Callback, Acc0, kl_to_changes_args(Options))
-changes(DbName, Callback, Acc0, Options) ->
-    changes(DbName, Callback, Acc0, kl_to_changes_args(Options)).
-
-%% @equiv query_view(DbName, DesignName, ViewName, #mrargs{})
-query_view(DbName, DesignName, ViewName) ->
-    query_view(DbName, DesignName, ViewName, #mrargs{}).
-
-%% @equiv query_view(DbName, DesignName,
-%%                     ViewName, fun default_callback/2, [], QueryArgs)
-query_view(DbName, DesignName, ViewName, QueryArgs) ->
-    Callback = fun default_callback/2,
-    query_view(DbName, DesignName, ViewName, Callback, [], QueryArgs).
-
-%% @doc execute a given view.
-%%      There are many additional query args that can be passed to a view,
-%%      see <a href="http://wiki.apache.org/couchdb/HTTP_view_API#Querying_Options">
-%%      query args</a> for details.
--spec query_view(dbname(), #doc{} | binary(), iodata(), callback(), any(),
-        #mrargs{}) ->
-    any().
-query_view(DbName, Design, ViewName, Callback, Acc0, QueryArgs) ->
-    Db = dbname(DbName), View = name(ViewName),
-    case is_reduce_view(Db, Design, View, QueryArgs) of
-    true ->
-        Mod = fabric_view_reduce;
-    false ->
-        Mod = fabric_view_map
-    end,
-    Mod:go(Db, Design, View, QueryArgs, Callback, Acc0).
-
-%% @doc retrieve info about a view group, disk size, language, whether compaction
-%%      is running and so forth
--spec get_view_group_info(dbname(), #doc{} | docid()) ->
-    {ok, [
-        {signature, binary()} |
-        {language, binary()} |
-        {disk_size, non_neg_integer()} |
-        {compact_running, boolean()} |
-        {updater_running, boolean()} |
-        {waiting_commit, boolean()} |
-        {waiting_clients, non_neg_integer()} |
-        {update_seq, pos_integer()} |
-        {purge_seq, non_neg_integer()}
-    ]}.
-get_view_group_info(DbName, DesignId) ->
-    fabric_group_info:go(dbname(DbName), design_doc(DesignId)).
-
-%% @doc retrieve all the design docs from a database
--spec design_docs(dbname()) -> {ok, [json_obj()]}.
-design_docs(DbName) ->
-    QueryArgs = #mrargs{
-        start_key = <<"_design/">>,
-        end_key = <<"_design0">>,
-        include_docs=true
-    },
-    Callback = fun({total_and_offset, _, _}, []) ->
-        {ok, []};
-    ({row, {Props}}, Acc) ->
-        case couch_util:get_value(id, Props) of
-        <<"_design/", _/binary>> ->
-            {ok, [couch_util:get_value(doc, Props) | Acc]};
-        _ ->
-            {stop, Acc}
-        end;
-    (complete, Acc) ->
-        {ok, lists:reverse(Acc)};
-    ({error, Reason}, _Acc) ->
-        {error, Reason}
-    end,
-    fabric:all_docs(dbname(DbName), Callback, [], QueryArgs).
-
-%% @doc forces a reload of validation functions, this is performed after
-%%      design docs are update
-%% NOTE: This function probably doesn't belong here as part fo the API
--spec reset_validation_funs(dbname()) -> [reference()].
-reset_validation_funs(DbName) ->
-    [rexi:cast(Node, {fabric_rpc, reset_validation_funs, [Name]}) ||
-        #shard{node=Node, name=Name} <-  mem3:shards(DbName)].
-
-%% @doc clean up index files for all Dbs
--spec cleanup_index_files() -> [ok].
-cleanup_index_files() ->
-    {ok, Dbs} = fabric:all_dbs(),
-    [cleanup_index_files(Db) || Db <- Dbs].
-
-%% @doc clean up index files for a specific db
--spec cleanup_index_files(dbname()) -> ok.
-cleanup_index_files(DbName) ->
-    {ok, DesignDocs} = fabric:design_docs(DbName),
-
-    ActiveSigs = lists:map(fun(#doc{id = GroupId}) ->
-        {ok, Info} = fabric:get_view_group_info(DbName, GroupId),
-        binary_to_list(couch_util:get_value(signature, Info))
-    end, [couch_doc:from_json_obj(DD) || DD <- DesignDocs]),
-
-    FileList = filelib:wildcard([config:get("couchdb", "view_index_dir"),
-        "/.shards/*/", couch_util:to_list(dbname(DbName)), ".[0-9]*_design/*"]),
-
-    DeleteFiles = if ActiveSigs =:= [] -> FileList; true ->
-        {ok, RegExp} = re:compile([$(, string:join(ActiveSigs, "|"), $)]),
-        lists:filter(fun(FilePath) ->
-            re:run(FilePath, RegExp, [{capture, none}]) == nomatch
-        end, FileList)
-    end,
-    [file:delete(File) || File <- DeleteFiles],
-    ok.
-
-%% some simple type validation and transcoding
-
-dbname(DbName) when is_list(DbName) ->
-    list_to_binary(DbName);
-dbname(DbName) when is_binary(DbName) ->
-    DbName;
-dbname(#db{name=Name}) ->
-    Name;
-dbname(DbName) ->
-    erlang:error({illegal_database_name, DbName}).
-
-name(Thing) ->
-    couch_util:to_binary(Thing).
-
-docid(DocId) when is_list(DocId) ->
-    list_to_binary(DocId);
-docid(DocId) when is_binary(DocId) ->
-    DocId;
-docid(DocId) ->
-    erlang:error({illegal_docid, DocId}).
-
-docs(Docs) when is_list(Docs) ->
-    [doc(D) || D <- Docs];
-docs(Docs) ->
-    erlang:error({illegal_docs_list, Docs}).
-
-doc(#doc{} = Doc) ->
-    Doc;
-doc({_} = Doc) ->
-    couch_doc:from_json_obj(Doc);
-doc(Doc) ->
-    erlang:error({illegal_doc_format, Doc}).
-
-design_doc(#doc{} = DDoc) ->
-    DDoc;
-design_doc(DocId) when is_list(DocId) ->
-    design_doc(list_to_binary(DocId));
-design_doc(<<"_design/", _/binary>> = DocId) ->
-    DocId;
-design_doc(GroupName) ->
-    <<"_design/", GroupName/binary>>.
-
-idrevs({Id, Revs}) when is_list(Revs) ->
-    {docid(Id), [rev(R) || R <- Revs]}.
-
-rev(Rev) when is_list(Rev); is_binary(Rev) ->
-    couch_doc:parse_rev(Rev);
-rev({Seq, Hash} = Rev) when is_integer(Seq), is_binary(Hash) ->
-    Rev.
-
-%% @doc convenience method, useful when testing or calling fabric from the shell
-opts(Options) ->
-    add_option(user_ctx, add_option(io_priority, Options)).
-
-add_option(Key, Options) ->
-    case couch_util:get_value(Key, Options) of
-    undefined ->
-        case erlang:get(Key) of
-        undefined ->
-            Options;
-        Value ->
-            [{Key, Value} | Options]
-        end;
-    _ ->
-        Options
-    end.
-
-default_callback(complete, Acc) ->
-    {ok, lists:reverse(Acc)};
-default_callback(Row, Acc) ->
-    {ok, [Row | Acc]}.
-
-is_reduce_view(_, _, _, #mrargs{view_type=Reduce}) ->
-    Reduce =:= reduce.
-
-%% @doc convenience method for use in the shell, converts a keylist
-%%      to a `changes_args' record
-kl_to_changes_args(KeyList) ->
-    kl_to_record(KeyList, changes_args).
-
-%% @doc convenience method for use in the shell, converts a keylist
-%%      to a `mrargs' record
-kl_to_query_args(KeyList) ->
-    kl_to_record(KeyList, mrargs).
-
-%% @doc finds the index of the given Key in the record.
-%%      note that record_info is only known at compile time
-%%      so the code must be written in this way. For each new
-%%      record type add a case clause
-lookup_index(Key,RecName) ->
-    Indexes =
-        case RecName of
-        changes_args ->
-            lists:zip(record_info(fields, changes_args),
-                        lists:seq(2, record_info(size, changes_args)));
-        mrargs ->
-            lists:zip(record_info(fields, mrargs),
-                        lists:seq(2, record_info(size, mrargs)))
-        end,
-    couch_util:get_value(Key, Indexes).
-
-%% @doc convert a keylist to record with given `RecName'
-%% @see lookup_index
-kl_to_record(KeyList,RecName) ->
-    Acc0 = case RecName of
-          changes_args -> #changes_args{};
-          mrargs -> #mrargs{}
-          end,
-    lists:foldl(fun({Key, Value}, Acc) ->
-                    Index = lookup_index(couch_util:to_existing_atom(Key),RecName),
-                    setelement(Index, Acc, Value)
-                        end, Acc0, KeyList).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_db_create.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_db_create.erl b/src/fabric/src/fabric_db_create.erl
deleted file mode 100644
index c8f2d45..0000000
--- a/src/fabric/src/fabric_db_create.erl
+++ /dev/null
@@ -1,159 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_create).
--export([go/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(DBNAME_REGEX, "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*$").
-
-%% @doc Create a new database, and all its partition files across the cluster
-%%      Options is proplist with user_ctx, n, q, validate_name
-go(DbName, Options) ->
-    case validate_dbname(DbName, Options) of
-    ok ->
-        {Shards, Doc} = generate_shard_map(DbName, Options),
-        case {create_shard_files(Shards), create_shard_db_doc(Doc)} of
-        {ok, {ok, Status}} ->
-            Status;
-        {file_exists, {ok, _}} ->
-            {error, file_exists};
-        {_, Error} ->
-            Error
-        end;
-    Error ->
-        Error
-    end.
-
-validate_dbname(DbName, Options) ->
-    case couch_util:get_value(validate_name, Options, true) of
-    false ->
-        ok;
-    true ->
-        case re:run(DbName, ?DBNAME_REGEX, [{capture,none}]) of
-        match ->
-            ok;
-        nomatch when DbName =:= <<"_users">> ->
-            ok;
-        nomatch when DbName =:= <<"_replicator">> ->
-            ok;
-        nomatch ->
-            {error, illegal_database_name}
-        end
-    end.
-
-generate_shard_map(DbName, Options) ->
-    {MegaSecs, Secs, _} = now(),
-    Suffix = "." ++ integer_to_list(MegaSecs*1000000 + Secs),
-    Shards = mem3:choose_shards(DbName, [{shard_suffix,Suffix} | Options]),
-    case mem3_util:open_db_doc(DbName) of
-    {ok, Doc} ->
-        % the DB already exists, and may have a different Suffix
-        ok;
-    {not_found, _} ->
-        Doc = make_document(Shards, Suffix)
-    end,
-    {Shards, Doc}.
-
-create_shard_files(Shards) ->
-    Workers = fabric_util:submit_jobs(Shards, create_db, []),
-    RexiMon = fabric_util:create_monitors(Shards),
-    try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Workers) of
-    {error, file_exists} ->
-        file_exists;
-    _ ->
-        ok
-    after
-        rexi_monitor:stop(RexiMon)
-    end.
-
-handle_message(file_exists, _, _) ->
-    {error, file_exists};
-
-handle_message({rexi_DOWN, _, {_, Node}, _}, _, Workers) ->
-    case lists:filter(fun(S) -> S#shard.node =/= Node end, Workers) of
-    [] ->
-        {stop, ok};
-    RemainingWorkers ->
-        {ok, RemainingWorkers}
-    end;
-
-handle_message(_, Worker, Workers) ->
-    case lists:delete(Worker, Workers) of
-    [] ->
-        {stop, ok};
-    RemainingWorkers ->
-        {ok, RemainingWorkers}
-    end.
-
-create_shard_db_doc(Doc) ->
-    Shards = [#shard{node=N} || N <- mem3:nodes()],
-    RexiMon = fabric_util:create_monitors(Shards),
-    Workers = fabric_util:submit_jobs(Shards, create_shard_db_doc, [Doc]),
-    Acc0 = {length(Shards), fabric_dict:init(Workers, nil)},
-    try fabric_util:recv(Workers, #shard.ref, fun handle_db_update/3, Acc0) of
-    {timeout, _} ->
-        {error, timeout};
-    Else ->
-        Else
-    after
-        rexi_monitor:stop(RexiMon)
-    end.
-
-handle_db_update({rexi_DOWN, _, {_, Node}, _}, _Worker, {W, Counters}) ->
-    New = fabric_dict:filter(fun(S, _) -> S#shard.node =/= Node end, Counters),
-    maybe_stop(W, New);
-
-handle_db_update({rexi_EXIT, _Reason}, Worker, {W, Counters}) ->
-    maybe_stop(W, fabric_dict:erase(Worker, Counters));
-
-handle_db_update(conflict, _, _) ->
-    % just fail when we get any conflicts
-    {error, conflict};
-
-handle_db_update(Msg, Worker, {W, Counters}) ->
-    maybe_stop(W, fabric_dict:store(Worker, Msg, Counters)).
-
-maybe_stop(W, Counters) ->
-    case fabric_dict:any(nil, Counters) of
-    true ->
-        {ok, {W, Counters}};
-    false ->
-        case lists:sum([1 || {_, ok} <- Counters]) of
-        W ->
-            {stop, ok};
-        NumOk when NumOk >= (W div 2 + 1) ->
-            {stop, accepted};
-        _ ->
-            {error, internal_server_error}
-        end
-    end.
-
-make_document([#shard{dbname=DbName}|_] = Shards, Suffix) ->
-    {RawOut, ByNodeOut, ByRangeOut} =
-    lists:foldl(fun(#shard{node=N, range=[B,E]}, {Raw, ByNode, ByRange}) ->
-        Range = ?l2b([couch_util:to_hex(<<B:32/integer>>), "-",
-            couch_util:to_hex(<<E:32/integer>>)]),
-        Node = couch_util:to_binary(N),
-        {[[<<"add">>, Range, Node] | Raw], orddict:append(Node, Range, ByNode),
-            orddict:append(Range, Node, ByRange)}
-    end, {[], [], []}, Shards),
-    #doc{id=DbName, body = {[
-        {<<"shard_suffix">>, Suffix},
-        {<<"changelog">>, lists:sort(RawOut)},
-        {<<"by_node">>, {[{K,lists:sort(V)} || {K,V} <- ByNodeOut]}},
-        {<<"by_range">>, {[{K,lists:sort(V)} || {K,V} <- ByRangeOut]}}
-    ]}}.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_db_delete.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_db_delete.erl b/src/fabric/src/fabric_db_delete.erl
deleted file mode 100644
index 934f95b..0000000
--- a/src/fabric/src/fabric_db_delete.erl
+++ /dev/null
@@ -1,93 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_delete).
--export([go/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-%% @doc Options aren't used at all now in couch on delete but are left here
-%%      to be consistent with fabric_db_create for possible future use
-%% @see couch_server:delete_db
-%%
-go(DbName, _Options) ->
-    Shards = mem3:shards(DbName),
-    % delete doc from shard_db
-    try delete_shard_db_doc(DbName) of
-    {ok, ok} ->
-        ok;
-    {ok, accepted} ->
-        accepted;
-    {ok, not_found} ->
-        erlang:error(database_does_not_exist, DbName);
-    Error ->
-        Error
-    after
-        % delete the shard files
-        fabric_util:submit_jobs(Shards, delete_db, [])
-    end.
-
-delete_shard_db_doc(Doc) ->
-    Shards = [#shard{node=N} || N <- mem3:nodes()],
-    RexiMon = fabric_util:create_monitors(Shards),
-    Workers = fabric_util:submit_jobs(Shards, delete_shard_db_doc, [Doc]),
-    Acc0 = {length(Shards), fabric_dict:init(Workers, nil)},
-    try fabric_util:recv(Workers, #shard.ref, fun handle_db_update/3, Acc0) of
-    {timeout, _} ->
-        {error, timeout};
-    Else ->
-        Else
-    after
-        rexi_monitor:stop(RexiMon)
-    end.
-
-handle_db_update({rexi_DOWN, _, {_, Node}, _}, _Worker, {W, Counters}) ->
-    New = fabric_dict:filter(fun(S, _) -> S#shard.node =/= Node end, Counters),
-    maybe_stop(W, New);
-
-handle_db_update({rexi_EXIT, _Reason}, Worker, {W, Counters}) ->
-    maybe_stop(W, fabric_dict:erase(Worker, Counters));
-
-handle_db_update(conflict, _, _) ->
-    % just fail when we get any conflicts
-    {error, conflict};
-
-handle_db_update(Msg, Worker, {W, Counters}) ->
-    maybe_stop(W, fabric_dict:store(Worker, Msg, Counters)).
-
-maybe_stop(W, Counters) ->
-    case fabric_dict:any(nil, Counters) of
-    true ->
-        {ok, {W, Counters}};
-    false ->
-        {Ok,NotFound} = fabric_dict:fold(fun count_replies/3, {0,0}, Counters),
-        case {Ok + NotFound, Ok, NotFound} of
-        {W, 0, W} ->
-            {#shard{dbname=Name}, _} = hd(Counters),
-            twig:log(warn, "~p not_found ~s", [?MODULE, Name]),
-            {stop, not_found};
-        {W, _, _} ->
-            {stop, ok};
-        {N, M, _} when N >= (W div 2 + 1), M > 0 ->
-            {stop, accepted};
-        _ ->
-            {error, internal_server_error}
-        end
-    end.
-
-count_replies(_, ok, {Ok, NotFound}) ->
-    {Ok+1, NotFound};
-count_replies(_, not_found, {Ok, NotFound}) ->
-    {Ok, NotFound+1};
-count_replies(_, _, Acc) ->
-    Acc.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_db_doc_count.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_db_doc_count.erl b/src/fabric/src/fabric_db_doc_count.erl
deleted file mode 100644
index dcc32aa..0000000
--- a/src/fabric/src/fabric_db_doc_count.erl
+++ /dev/null
@@ -1,66 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_doc_count).
-
--export([go/1]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName) ->
-    Shards = mem3:shards(DbName),
-    Workers = fabric_util:submit_jobs(Shards, get_doc_count, []),
-    RexiMon = fabric_util:create_monitors(Shards),
-    Acc0 = {fabric_dict:init(Workers, nil), 0},
-    try
-        fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0)
-    after
-        rexi_monitor:stop(RexiMon)
-    end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {Counters, Acc}) ->
-    case fabric_util:remove_down_workers(Counters, NodeRef) of
-    {ok, NewCounters} ->
-        {ok, {NewCounters, Acc}};
-    error ->
-        {error, {nodedown, <<"progress not possible">>}}
-    end;
-
-handle_message({rexi_EXIT, Reason}, Shard, {Counters, Acc}) ->
-    NewCounters = lists:keydelete(Shard, #shard.ref, Counters),
-    case fabric_view:is_progress_possible(NewCounters) of
-    true ->
-        {ok, {NewCounters, Acc}};
-    false ->
-        {error, Reason}
-    end;
-
-handle_message({ok, Count}, Shard, {Counters, Acc}) ->
-    case fabric_dict:lookup_element(Shard, Counters) of
-    undefined ->
-        % already heard from someone else in this range
-        {ok, {Counters, Acc}};
-    nil ->
-        C1 = fabric_dict:store(Shard, ok, Counters),
-        C2 = fabric_view:remove_overlapping_shards(Shard, C1),
-        case fabric_dict:any(nil, C2) of
-        true ->
-            {ok, {C2, Count+Acc}};
-        false ->
-            {stop, Count+Acc}
-        end
-    end;
-handle_message(_, _, Acc) ->
-    {ok, Acc}.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_db_info.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_db_info.erl b/src/fabric/src/fabric_db_info.erl
deleted file mode 100644
index 58139e8..0000000
--- a/src/fabric/src/fabric_db_info.erl
+++ /dev/null
@@ -1,102 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_info).
-
--export([go/1]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-go(DbName) ->
-    Shards = mem3:shards(DbName),
-    Workers = fabric_util:submit_jobs(Shards, get_db_info, []),
-    RexiMon = fabric_util:create_monitors(Shards),
-    Acc0 = {fabric_dict:init(Workers, nil), []},
-    try
-        fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0)
-    after
-        rexi_monitor:stop(RexiMon)
-    end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {Counters, Acc}) ->
-    case fabric_util:remove_down_workers(Counters, NodeRef) of
-    {ok, NewCounters} ->
-        {ok, {NewCounters, Acc}};
-    error ->
-        {error, {nodedown, <<"progress not possible">>}}
-    end;
-
-handle_message({rexi_EXIT, Reason}, Shard, {Counters, Acc}) ->
-    NewCounters = lists:keydelete(Shard, #shard.ref, Counters),
-    case fabric_view:is_progress_possible(NewCounters) of
-    true ->
-        {ok, {NewCounters, Acc}};
-    false ->
-        {error, Reason}
-    end;
-
-handle_message({ok, Info}, #shard{dbname=Name} = Shard, {Counters, Acc}) ->
-    case fabric_dict:lookup_element(Shard, Counters) of
-    undefined ->
-        % already heard from someone else in this range
-        {ok, {Counters, Acc}};
-    nil ->
-        Seq = couch_util:get_value(update_seq, Info),
-        C1 = fabric_dict:store(Shard, Seq, Counters),
-        C2 = fabric_view:remove_overlapping_shards(Shard, C1),
-        case fabric_dict:any(nil, C2) of
-        true ->
-            {ok, {C2, [Info|Acc]}};
-        false ->
-            {stop, [
-                {db_name,Name},
-                {update_seq, fabric_view_changes:pack_seqs(C2)} |
-                merge_results(lists:flatten([Info|Acc]))
-            ]}
-        end
-    end;
-handle_message(_, _, Acc) ->
-    {ok, Acc}.
-
-merge_results(Info) ->
-    Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end,
-        orddict:new(), Info),
-    orddict:fold(fun
-        (doc_count, X, Acc) ->
-            [{doc_count, lists:sum(X)} | Acc];
-        (doc_del_count, X, Acc) ->
-            [{doc_del_count, lists:sum(X)} | Acc];
-        (purge_seq, X, Acc) ->
-            [{purge_seq, lists:sum(X)} | Acc];
-        (compact_running, X, Acc) ->
-            [{compact_running, lists:member(true, X)} | Acc];
-        (disk_size, X, Acc) ->
-            [{disk_size, lists:sum(X)} | Acc];
-        (other, X, Acc) ->
-            [{other, {merge_other_results(X)}} | Acc];
-        (disk_format_version, X, Acc) ->
-            [{disk_format_version, lists:max(X)} | Acc];
-        (_, _, Acc) ->
-            Acc
-    end, [{instance_start_time, <<"0">>}], Dict).
-
-merge_other_results(Results) ->
-    Dict = lists:foldl(fun({Props}, D) ->
-        lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end, D, Props)
-    end, orddict:new(), Results),
-    orddict:fold(fun
-        (data_size, X, Acc) ->
-            [{data_size, lists:sum(X)} | Acc];
-        (_, _, Acc) ->
-            Acc
-    end, [], Dict).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_db_meta.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_db_meta.erl b/src/fabric/src/fabric_db_meta.erl
deleted file mode 100644
index 78a3952..0000000
--- a/src/fabric/src/fabric_db_meta.erl
+++ /dev/null
@@ -1,157 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_meta).
-
--export([set_revs_limit/3, set_security/3, get_all_security/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record(acc, {
-    workers,
-    finished,
-    num_workers
-}).
-
-
-set_revs_limit(DbName, Limit, Options) ->
-    Shards = mem3:shards(DbName),
-    Workers = fabric_util:submit_jobs(Shards, set_revs_limit, [Limit, Options]),
-    Handler = fun handle_revs_message/3,
-    Waiting = length(Workers) - 1,
-    case fabric_util:recv(Workers, #shard.ref, Handler, Waiting) of
-    {ok, ok} ->
-        ok;
-    Error ->
-        Error
-    end.
-
-handle_revs_message(ok, _, 0) ->
-    {stop, ok};
-handle_revs_message(ok, _, Waiting) ->
-    {ok, Waiting - 1};
-handle_revs_message(Error, _, _Waiting) ->
-    {error, Error}.
-
-
-set_security(DbName, SecObj, Options) ->
-    Shards = mem3:shards(DbName),
-    RexiMon = fabric_util:create_monitors(Shards),
-    Workers = fabric_util:submit_jobs(Shards, set_security, [SecObj, Options]),
-    Handler = fun handle_set_message/3,
-    Acc = #acc{
-        workers=Workers,
-        finished=[],
-        num_workers=length(Workers)
-    },
-    try fabric_util:recv(Workers, #shard.ref, Handler, Acc) of
-    {ok, #acc{finished=Finished}} ->
-        case check_sec_set(length(Workers), Finished) of
-            ok -> ok;
-            Error -> Error
-        end;
-    Error ->
-        Error
-    after
-        rexi_monitor:stop(RexiMon)
-    end.
-
-handle_set_message({rexi_DOWN, _, {_, Node}, _}, _, #acc{workers=Wrkrs}=Acc) ->
-    RemWorkers = lists:filter(fun(S) -> S#shard.node =/= Node end, Wrkrs),
-    maybe_finish_set(Acc#acc{workers=RemWorkers});
-handle_set_message(ok, W, Acc) ->
-    NewAcc = Acc#acc{
-        workers = (Acc#acc.workers -- [W]),
-        finished = [W | Acc#acc.finished]
-    },
-    maybe_finish_set(NewAcc);
-handle_set_message(Error, W, Acc) ->
-    Dst = {W#shard.node, W#shard.name},
-    twig:log(err, "Failed to set security object on ~p :: ~p", [Dst, Error]),
-    NewAcc = Acc#acc{workers = (Acc#acc.workers -- [W])},
-    maybe_finish_set(NewAcc).
-
-maybe_finish_set(#acc{workers=[]}=Acc) ->
-    {stop, Acc};
-maybe_finish_set(#acc{finished=Finished, num_workers=NumWorkers}=Acc) ->
-    case check_sec_set(NumWorkers, Finished) of
-        ok -> {stop, Acc};
-        _ -> {ok, Acc}
-    end.
-
-check_sec_set(NumWorkers, SetWorkers) ->
-    try
-        check_sec_set_int(NumWorkers, SetWorkers)
-    catch throw:Reason ->
-        {error, Reason}
-    end.
-
-check_sec_set_int(NumWorkers, SetWorkers) ->
-    case length(SetWorkers) < ((NumWorkers div 2) + 1) of
-        true -> throw(no_majority);
-        false -> ok
-    end,
-    % Hack to reuse fabric_view:is_progress_possible/1
-    FakeCounters = [{S, 0} || S <- SetWorkers],
-    case fabric_view:is_progress_possible(FakeCounters) of
-        false -> throw(no_ring);
-        true -> ok
-    end,
-    ok.
-
-
-get_all_security(DbName, Options) ->
-    Shards = case proplists:get_value(shards, Options) of
-        Shards0 when is_list(Shards0) -> Shards0;
-        _ -> mem3:shards(DbName)
-    end,
-    Admin = [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}],
-    RexiMon = fabric_util:create_monitors(Shards),
-    Workers = fabric_util:submit_jobs(Shards, get_all_security, [Admin]),
-    Handler = fun handle_get_message/3,
-    Acc = #acc{
-        workers=Workers,
-        finished=[],
-        num_workers=length(Workers)
-    },
-    try fabric_util:recv(Workers, #shard.ref, Handler, Acc) of
-    {ok, #acc{finished=SecObjs}} when length(SecObjs) > length(Workers) / 2 ->
-        {ok, SecObjs};
-    {ok, _} ->
-        {error, no_majority};
-    Error ->
-        Error
-    after
-        rexi_monitor:stop(RexiMon)
-    end.
-
-handle_get_message({rexi_DOWN, _, {_, Node}, _}, _, #acc{workers=Wrkrs}=Acc) ->
-    RemWorkers = lists:filter(fun(S) -> S#shard.node =/= Node end, Wrkrs),
-    maybe_finish_get(Acc#acc{workers=RemWorkers});
-handle_get_message({Props}=SecObj, W, Acc) when is_list(Props) ->
-    NewAcc = Acc#acc{
-        workers = (Acc#acc.workers -- [W]),
-        finished = [{W, SecObj} | Acc#acc.finished]
-    },
-    maybe_finish_get(NewAcc);
-handle_get_message(Error, W, Acc) ->
-    Dst = {W#shard.node, W#shard.name},
-    twig:log(err, "Failed to get security object on ~p :: ~p", [Dst, Error]),
-    NewAcc = Acc#acc{workers = (Acc#acc.workers -- [W])},
-    maybe_finish_set(NewAcc).
-
-maybe_finish_get(#acc{workers=[]}=Acc) ->
-    {stop, Acc};
-maybe_finish_get(Acc) ->
-    {ok, Acc}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_db_update_listener.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_db_update_listener.erl b/src/fabric/src/fabric_db_update_listener.erl
deleted file mode 100644
index 28e5972..0000000
--- a/src/fabric/src/fabric_db_update_listener.erl
+++ /dev/null
@@ -1,150 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_update_listener).
-
--export([go/4, start_update_notifier/1, stop/1, wait_db_updated/1]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
--record(worker, {
-    ref,
-    node,
-    pid
-}).
-
--record(acc, {
-    parent,
-    state
-}).
-
-go(Parent, ParentRef, DbName, Timeout) ->
-    Notifiers = start_update_notifiers(DbName),
-    MonRefs = lists:usort([{rexi_server, Node} || {Node, _Ref} <- Notifiers]),
-    RexiMon = rexi_monitor:start(MonRefs),
-    MonPid = start_cleanup_monitor(self(), Notifiers),
-    %% This is not a common pattern for rexi but to enable the calling
-    %% process to communicate via handle_message/3 we "fake" it as a
-    %% a spawned worker.
-    Workers = [#worker{ref=ParentRef, pid=Parent} | Notifiers],
-    Resp = try
-        receive_results(Workers, #acc{parent=Parent, state=unset}, Timeout)
-    after
-        rexi_monitor:stop(RexiMon),
-        stop_cleanup_monitor(MonPid)
-    end,
-    case Resp of
-        {ok, _} -> ok;
-        {error, Error} -> erlang:error(Error);
-        Error -> erlang:error(Error)
-    end.
-
-start_update_notifiers(DbName) ->
-    EndPointDict = lists:foldl(fun(#shard{node=Node, name=Name}, Acc) ->
-        dict:append(Node, Name, Acc)
-    end, dict:new(), mem3:shards(DbName)),
-    lists:map(fun({Node, DbNames}) ->
-        Ref = rexi:cast(Node, {?MODULE, start_update_notifier, [DbNames]}),
-        #worker{ref=Ref, node=Node}
-    end, dict:to_list(EndPointDict)).
-
-% rexi endpoint
-start_update_notifier(DbNames) ->
-    {Caller, Ref} = get(rexi_from),
-    Fun = fun({_, X}) ->
-        case lists:member(X, DbNames) of
-            true -> erlang:send(Caller, {Ref, db_updated});
-            false -> ok
-        end
-    end,
-    Id = {couch_db_update_notifier, make_ref()},
-    ok = gen_event:add_sup_handler(couch_db_update, Id, Fun),
-    receive {gen_event_EXIT, Id, Reason} ->
-        rexi:reply({gen_event_EXIT, node(), Reason})
-    end.
-
-start_cleanup_monitor(Parent, Notifiers) ->
-    spawn(fun() ->
-        Ref = erlang:monitor(process, Parent),
-        cleanup_monitor(Parent, Ref, Notifiers)
-    end).
-
-stop_cleanup_monitor(MonPid) ->
-    MonPid ! {self(), stop}.
-
-cleanup_monitor(Parent, Ref, Notifiers) ->
-    receive
-        {'DOWN', Ref, _, _, _} ->
-            stop_update_notifiers(Notifiers);
-        {Parent, stop} ->
-            stop_update_notifiers(Notifiers);
-        Else ->
-            twig:log(error, "Unkown message in ~w :: ~w", [?MODULE, Else]),
-            stop_update_notifiers(Notifiers),
-            exit(Parent, {unknown_message, Else})
-    end.
-
-stop_update_notifiers(Notifiers) ->
-    [rexi:kill(Node, Ref) || #worker{node=Node, ref=Ref} <- Notifiers].
-
-stop({Pid, Ref}) ->
-    erlang:send(Pid, {Ref, done}).
-
-wait_db_updated({Pid, Ref}) ->
-    MonRef = erlang:monitor(process, Pid),
-    erlang:send(Pid, {Ref, get_state}),
-    receive
-        {state, Pid, State} ->
-            erlang:demonitor(MonRef, [flush]),
-            State;
-        {'DOWN', MonRef, process, Pid, Reason} ->
-            throw({changes_feed_died, Reason})
-    end.
-
-receive_results(Workers, Acc0, Timeout) ->
-    Fun = fun handle_message/3,
-    case rexi_utils:recv(Workers, #worker.ref, Fun, Acc0, infinity, Timeout) of
-    {timeout, #acc{state=updated}=Acc} ->
-        receive_results(Workers, Acc, Timeout);
-    {timeout, #acc{state=waiting}=Acc} ->
-        erlang:send(Acc#acc.parent, {state, self(), timeout}),
-        receive_results(Workers, Acc#acc{state=unset}, Timeout);
-    {timeout, Acc} ->
-        receive_results(Workers, Acc#acc{state=timeout}, Timeout);
-    {_, Acc} ->
-        {ok, Acc}
-    end.
-
-
-handle_message({rexi_DOWN, _, {_, Node}, _}, _Worker, _Acc) ->
-    {error, {nodedown, Node}};
-handle_message({rexi_EXIT, _Reason}, Worker, _Acc) ->
-    {error, {worker_exit, Worker}};
-handle_message({gen_event_EXIT, Node, Reason}, _Worker, _Acc) ->
-    {error, {gen_event_exit, Node, Reason}};
-handle_message(db_updated, _Worker, #acc{state=waiting}=Acc) ->
-    % propagate message to calling controller
-    erlang:send(Acc#acc.parent, {state, self(), updated}),
-    {ok, Acc#acc{state=unset}};
-handle_message(db_updated, _Worker, Acc) ->
-    {ok, Acc#acc{state=updated}};
-handle_message(get_state, _Worker, #acc{state=unset}=Acc) ->
-    {ok, Acc#acc{state=waiting}};
-handle_message(get_state, _Worker, Acc) ->
-    erlang:send(Acc#acc.parent, {state, self(), Acc#acc.state}),
-    {ok, Acc#acc{state=unset}};
-handle_message(done, _, _) ->
-    {stop, ok}.
-
-
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_dict.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_dict.erl b/src/fabric/src/fabric_dict.erl
deleted file mode 100644
index a9d7fea..0000000
--- a/src/fabric/src/fabric_dict.erl
+++ /dev/null
@@ -1,49 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_dict).
--compile(export_all).
-
-% Instead of ets, let's use an ordered keylist. We'll need to revisit if we
-% have >> 100 shards, so a private interface is a good idea. - APK June 2010
-
-init(Keys, InitialValue) ->
-    orddict:from_list([{Key, InitialValue} || Key <- Keys]).
-
-
-decrement_all(Dict) ->
-    [{K,V-1} || {K,V} <- Dict].
-
-store(Key, Value, Dict) ->
-    orddict:store(Key, Value, Dict).
-
-erase(Key, Dict) ->
-    orddict:erase(Key, Dict).
-
-update_counter(Key, Incr, Dict0) ->
-    orddict:update_counter(Key, Incr, Dict0).
-
-
-lookup_element(Key, Dict) ->
-    couch_util:get_value(Key, Dict).
-
-size(Dict) ->
-    orddict:size(Dict).
-
-any(Value, Dict) ->
-    lists:keymember(Value, 2, Dict).
-
-filter(Fun, Dict) ->
-    orddict:filter(Fun, Dict).
-
-fold(Fun, Acc0, Dict) ->
-    orddict:fold(Fun, Acc0, Dict).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_doc_attachments.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_doc_attachments.erl b/src/fabric/src/fabric_doc_attachments.erl
deleted file mode 100644
index b29e20f..0000000
--- a/src/fabric/src/fabric_doc_attachments.erl
+++ /dev/null
@@ -1,151 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_attachments).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-%% couch api calls
--export([receiver/2]).
-
-receiver(_Req, undefined) ->
-    <<"">>;
-receiver(_Req, {unknown_transfer_encoding, Unknown}) ->
-    exit({unknown_transfer_encoding, Unknown});
-receiver(Req, chunked) ->
-    MiddleMan = spawn(fun() -> middleman(Req, chunked) end),
-    fun(4096, ChunkFun, ok) ->
-        write_chunks(MiddleMan, ChunkFun)
-    end;
-receiver(_Req, 0) ->
-    <<"">>;
-receiver(Req, Length) when is_integer(Length) ->
-    maybe_send_continue(Req),
-    Middleman = spawn(fun() -> middleman(Req, Length) end),
-    fun() ->
-        Middleman ! {self(), gimme_data},
-        receive
-            {Middleman, Data} ->
-                rexi:reply(attachment_chunk_received),
-                iolist_to_binary(Data)
-        after 600000 ->
-            exit(timeout)
-        end
-    end;
-receiver(_Req, Length) ->
-    exit({length_not_integer, Length}).
-
-%%
-%% internal
-%%
-
-maybe_send_continue(#httpd{mochi_req = MochiReq} = Req) ->
-    case couch_httpd:header_value(Req, "expect") of
-    undefined ->
-        ok;
-    Expect ->
-        case string:to_lower(Expect) of
-        "100-continue" ->
-            MochiReq:start_raw_response({100, gb_trees:empty()});
-        _ ->
-            ok
-        end
-    end.
-
-write_chunks(MiddleMan, ChunkFun) ->
-    MiddleMan ! {self(), gimme_data},
-    receive
-    {MiddleMan, ChunkRecordList} ->
-        rexi:reply(attachment_chunk_received),
-        case flush_chunks(ChunkRecordList, ChunkFun) of
-        continue -> write_chunks(MiddleMan, ChunkFun);
-        done -> ok
-        end
-    after 600000 ->
-        exit(timeout)
-    end.
-
-flush_chunks([], _ChunkFun) ->
-    continue;
-flush_chunks([{0, _}], _ChunkFun) ->
-    done;
-flush_chunks([Chunk | Rest], ChunkFun) ->
-    ChunkFun(Chunk, ok),
-    flush_chunks(Rest, ChunkFun).
-
-receive_unchunked_attachment(_Req, 0) ->
-    ok;
-receive_unchunked_attachment(Req, Length) ->
-    receive {MiddleMan, go} ->
-        Data = couch_httpd:recv(Req, 0),
-        MiddleMan ! {self(), Data}
-    end,
-    receive_unchunked_attachment(Req, Length - size(Data)).
-
-middleman(Req, chunked) ->
-    % spawn a process to actually receive the uploaded data
-    RcvFun = fun(ChunkRecord, ok) ->
-        receive {From, go} -> From ! {self(), ChunkRecord} end, ok
-    end,
-    Receiver = spawn(fun() -> couch_httpd:recv_chunked(Req,4096,RcvFun,ok) end),
-
-    % take requests from the DB writers and get data from the receiver
-    N = erlang:list_to_integer(config:get("cluster","n")),
-    middleman_loop(Receiver, N, [], []);
-
-middleman(Req, Length) ->
-    Receiver = spawn(fun() -> receive_unchunked_attachment(Req, Length) end),
-    N = erlang:list_to_integer(config:get("cluster","n")),
-    middleman_loop(Receiver, N, [], []).
-
-middleman_loop(Receiver, N, Counters0, ChunkList0) ->
-    receive {From, gimme_data} ->
-        % Figure out how far along this writer (From) is in the list
-        ListIndex = case fabric_dict:lookup_element(From, Counters0) of
-        undefined -> 0;
-        I -> I
-        end,
-
-        % Talk to the receiver to get another chunk if necessary
-        ChunkList1 = if ListIndex == length(ChunkList0) ->
-            Receiver ! {self(), go},
-            receive
-                {Receiver, ChunkRecord} ->
-                    ChunkList0 ++ [ChunkRecord]
-            end;
-        true -> ChunkList0 end,
-
-        % reply to the writer
-        Reply = lists:nthtail(ListIndex, ChunkList1),
-        From ! {self(), Reply},
-
-        % Update the counter for this writer
-        Counters1 = fabric_dict:update_counter(From, length(Reply), Counters0),
-
-        % Drop any chunks that have been sent to all writers
-        Size = fabric_dict:size(Counters1),
-        NumToDrop = lists:min([I || {_, I} <- Counters1]),
-
-        {ChunkList3, Counters3} =
-        if Size == N andalso NumToDrop > 0 ->
-            ChunkList2 = lists:nthtail(NumToDrop, ChunkList1),
-            Counters2 = [{F, I-NumToDrop} || {F, I} <- Counters1],
-            {ChunkList2, Counters2};
-        true ->
-            {ChunkList1, Counters1}
-        end,
-
-        middleman_loop(Receiver, N, Counters3, ChunkList3)
-    after 10000 ->
-        ok
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_doc_missing_revs.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_doc_missing_revs.erl b/src/fabric/src/fabric_doc_missing_revs.erl
deleted file mode 100644
index ec154ee..0000000
--- a/src/fabric/src/fabric_doc_missing_revs.erl
+++ /dev/null
@@ -1,88 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_missing_revs).
-
--export([go/2, go/3]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-go(DbName, AllIdsRevs) ->
-    go(DbName, AllIdsRevs, []).
-
-go(DbName, AllIdsRevs, Options) ->
-    Workers = lists:map(fun({#shard{name=Name, node=Node} = Shard, IdsRevs}) ->
-        Ref = rexi:cast(Node, {fabric_rpc, get_missing_revs, [Name, IdsRevs,
-            Options]}),
-        Shard#shard{ref=Ref}
-    end, group_idrevs_by_shard(DbName, AllIdsRevs)),
-    ResultDict = dict:from_list([{Id, {{nil,Revs},[]}} || {Id, Revs} <- AllIdsRevs]),
-    RexiMon = fabric_util:create_monitors(Workers),
-    Acc0 = {length(Workers), ResultDict, Workers},
-    try
-        fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0)
-    after
-        rexi_monitor:stop(RexiMon)
-    end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {_WorkerLen, ResultDict, Workers}) ->
-    NewWorkers = [W || #shard{node=Node} = W <- Workers, Node =/= NodeRef],
-    skip_message({fabric_dict:size(NewWorkers), ResultDict, NewWorkers});
-handle_message({rexi_EXIT, _}, Worker, {W, D, Workers}) ->
-    skip_message({W-1,D,lists:delete(Worker, Workers)});
-handle_message({ok, Results}, _Worker, {1, D0, _}) ->
-    D = update_dict(D0, Results),
-    {stop, dict:fold(fun force_reply/3, [], D)};
-handle_message({ok, Results}, Worker, {WaitingCount, D0, Workers}) ->
-    D = update_dict(D0, Results),
-    case dict:fold(fun maybe_reply/3, {stop, []}, D) of
-    continue ->
-        % still haven't heard about some Ids
-        {ok, {WaitingCount - 1, D, lists:delete(Worker,Workers)}};
-    {stop, FinalReply} ->
-        % finished, stop the rest of the jobs
-        fabric_util:cleanup(lists:delete(Worker,Workers)),
-        {stop, FinalReply}
-    end.
-
-force_reply(Id, {{nil,Revs}, Anc}, Acc) ->
-    % never heard about this ID, assume it's missing
-    [{Id, Revs, Anc} | Acc];
-force_reply(_, {[], _}, Acc) ->
-    Acc;
-force_reply(Id, {Revs, Anc}, Acc) ->
-    [{Id, Revs, Anc} | Acc].
-
-maybe_reply(_, _, continue) ->
-    continue;
-maybe_reply(_, {{nil, _}, _}, _) ->
-    continue;
-maybe_reply(_, {[], _}, {stop, Acc}) ->
-    {stop, Acc};
-maybe_reply(Id, {Revs, Anc}, {stop, Acc}) ->
-    {stop, [{Id, Revs, Anc} | Acc]}.
-
-group_idrevs_by_shard(DbName, IdsRevs) ->
-    dict:to_list(lists:foldl(fun({Id, Revs}, D0) ->
-        lists:foldl(fun(Shard, D1) ->
-            dict:append(Shard, {Id, Revs}, D1)
-        end, D0, mem3:shards(DbName,Id))
-    end, dict:new(), IdsRevs)).
-
-update_dict(D0, KVs) ->
-    lists:foldl(fun({K,V,A}, D1) -> dict:store(K, {V,A}, D1) end, D0, KVs).
-
-skip_message({0, Dict, _Workers}) ->
-    {stop, dict:fold(fun force_reply/3, [], Dict)};
-skip_message(Acc) ->
-    {ok, Acc}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_doc_open.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_doc_open.erl b/src/fabric/src/fabric_doc_open.erl
deleted file mode 100644
index caa389e..0000000
--- a/src/fabric/src/fabric_doc_open.erl
+++ /dev/null
@@ -1,470 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_open).
-
--export([go/3]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--record(acc, {
-    dbname,
-    workers,
-    r,
-    state,
-    replies,
-    q_reply
-}).
-
-
-go(DbName, Id, Options) ->
-    Workers = fabric_util:submit_jobs(mem3:shards(DbName,Id), open_doc,
-        [Id, [deleted|Options]]),
-    SuppressDeletedDoc = not lists:member(deleted, Options),
-    N = mem3:n(DbName),
-    R = couch_util:get_value(r, Options, integer_to_list(mem3:quorum(DbName))),
-    Acc0 = #acc{
-        dbname = DbName,
-        workers = Workers,
-        r = erlang:min(N, list_to_integer(R)),
-        state = r_not_met,
-        replies = []
-    },
-    RexiMon = fabric_util:create_monitors(Workers),
-    try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
-    {ok, #acc{}=Acc} ->
-        Reply = handle_response(Acc),
-        format_reply(Reply, SuppressDeletedDoc);
-    Error ->
-        Error
-    after
-        rexi_monitor:stop(RexiMon)
-    end.
-
-handle_message({rexi_DOWN, _, {_, Node}, _}, _Worker, Acc) ->
-    NewWorkers = [W || #shard{node=N}=W <- Acc#acc.workers, N /= Node],
-    case NewWorkers of
-    [] ->
-        {stop, Acc#acc{workers=[]}};
-    _ ->
-        {ok, Acc#acc{workers=NewWorkers}}
-    end;
-handle_message({rexi_EXIT, _Reason}, Worker, Acc) ->
-    NewWorkers = lists:delete(Worker, Acc#acc.workers),
-    case NewWorkers of
-    [] ->
-        {stop, Acc#acc{workers=[]}};
-    _ ->
-        {ok, Acc#acc{workers=NewWorkers}}
-    end;
-handle_message(Reply, Worker, Acc) ->
-    NewReplies = fabric_util:update_counter(Reply, 1, Acc#acc.replies),
-    NewAcc = Acc#acc{replies = NewReplies},
-    case is_r_met(Acc#acc.workers, NewReplies, Acc#acc.r) of
-    {true, QuorumReply} ->
-        fabric_util:cleanup(lists:delete(Worker, Acc#acc.workers)),
-        {stop, NewAcc#acc{workers=[], state=r_met, q_reply=QuorumReply}};
-    wait_for_more ->
-        NewWorkers = lists:delete(Worker, Acc#acc.workers),
-        {ok, NewAcc#acc{workers=NewWorkers}};
-    no_more_workers ->
-        {stop, NewAcc#acc{workers=[]}}
-    end.
-
-handle_response(#acc{state=r_met, replies=Replies, q_reply=QuorumReply}=Acc) ->
-    case {Replies, fabric_util:remove_ancestors(Replies, [])} of
-        {[_], [_]} ->
-            % Complete agreement amongst all copies
-            QuorumReply;
-        {[_|_], [{_, {QuorumReply, _}}]} ->
-            % Any divergent replies are ancestors of the QuorumReply,
-            % repair the document asynchronously
-            spawn(fun() -> read_repair(Acc) end),
-            QuorumReply;
-        _Else ->
-            % real disagreement amongst the workers, block for the repair
-            read_repair(Acc)
-    end;
-handle_response(Acc) ->
-    read_repair(Acc).
-
-is_r_met(Workers, Replies, R) ->
-    case lists:dropwhile(fun({_,{_, Count}}) -> Count < R end, Replies) of
-    [{_,{QuorumReply, _}} | _] ->
-        {true, QuorumReply};
-    [] when length(Workers) > 1 ->
-        wait_for_more;
-    [] ->
-        no_more_workers
-    end.
-
-read_repair(#acc{dbname=DbName, replies=Replies}) ->
-    Docs = [Doc || {_, {{ok, #doc{}=Doc}, _}} <- Replies],
-    case Docs of
-    % omit local docs from read repair
-    [#doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} | _] ->
-        choose_reply(Docs);
-    [#doc{id=Id} | _] ->
-        Ctx = #user_ctx{roles=[<<"_admin">>]},
-        Opts = [replicated_changes, {user_ctx, Ctx}],
-        Res = fabric:update_docs(DbName, Docs, Opts),
-        twig:log(notice, "read_repair ~s ~s ~p", [DbName, Id, Res]),
-        choose_reply(Docs);
-    [] ->
-        % Try hard to return some sort of information
-        % to the client.
-        Values = [V || {_, {V, _}} <- Replies],
-        case lists:member({not_found, missing}, Values) of
-            true ->
-                {not_found, missing};
-            false when length(Values) > 0 ->
-                % Sort for stability in responses in
-                % case we have some weird condition
-                hd(lists:sort(Values));
-            false ->
-                {error, read_failure}
-        end
-    end.
-
-choose_reply(Docs) ->
-    % Sort descending by {not deleted, rev}. This should match
-    % the logic of couch_doc:to_doc_info/1.
-    [Winner | _] = lists:sort(fun(DocA, DocB) ->
-        InfoA = {not DocA#doc.deleted, DocA#doc.revs},
-        InfoB = {not DocB#doc.deleted, DocB#doc.revs},
-        InfoA > InfoB
-    end, Docs),
-    {ok, Winner}.
-
-format_reply({ok, #doc{deleted=true}}, true) ->
-    {not_found, deleted};
-format_reply(Else, _) ->
-    Else.
-
-
-is_r_met_test() ->
-    Workers0 = [],
-    Workers1 = [nil],
-    Workers2 = [nil,nil],
-
-    % Successful cases
-
-    ?assertEqual(
-        {true, foo},
-        is_r_met([], [fabric_util:kv(foo,2)], 2)
-    ),
-
-    ?assertEqual(
-        {true, foo},
-        is_r_met([], [fabric_util:kv(foo,3)], 2)
-    ),
-
-    ?assertEqual(
-        {true, foo},
-        is_r_met([], [fabric_util:kv(foo,1)], 1)
-    ),
-
-    ?assertEqual(
-        {true, foo},
-        is_r_met([], [fabric_util:kv(foo,2), fabric_util:kv(bar,1)], 2)
-    ),
-
-    ?assertEqual(
-        {true, bar},
-        is_r_met([], [fabric_util:kv(bar,1), fabric_util:kv(bar,2)], 2)
-    ),
-
-    ?assertEqual(
-        {true, bar},
-        is_r_met([], [fabric_util:kv(bar,2), fabric_util:kv(foo,1)], 2)
-    ),
-
-    % Not met, but wait for more messages
-
-    ?assertEqual(
-        wait_for_more,
-        is_r_met(Workers2, [fabric_util:kv(foo,1)], 2)
-    ),
-
-    ?assertEqual(
-        wait_for_more,
-        is_r_met(Workers2, [fabric_util:kv(foo,2)], 3)
-    ),
-
-    ?assertEqual(
-        wait_for_more,
-        is_r_met(Workers2, [fabric_util:kv(foo,1), fabric_util:kv(bar,1)], 2)
-    ),
-
-    % Not met, bail out
-
-    ?assertEqual(
-        no_more_workers,
-        is_r_met(Workers0, [fabric_util:kv(foo,1)], 2)
-    ),
-
-    ?assertEqual(
-        no_more_workers,
-        is_r_met(Workers1, [fabric_util:kv(foo,1)], 2)
-    ),
-
-    ?assertEqual(
-        no_more_workers,
-        is_r_met(Workers1, [fabric_util:kv(foo,1), fabric_util:kv(bar,1)], 2)
-    ),
-
-    ?assertEqual(
-        no_more_workers,
-        is_r_met(Workers1, [fabric_util:kv(foo,2)], 3)
-    ),
-
-    ok.
-
-handle_message_down_test() ->
-    Node0 = 'foo@localhost',
-    Node1 = 'bar@localhost',
-    Down0 = {rexi_DOWN, nil, {nil, Node0}, nil},
-    Down1 = {rexi_DOWN, nil, {nil, Node1}, nil},
-    Workers0 = [#shard{node=Node0} || _ <- [a, b]],
-    Worker1 = #shard{node=Node1},
-    Workers1 = Workers0 ++ [Worker1],
-
-    % Stop when no more workers are left
-    ?assertEqual(
-        {stop, #acc{workers=[]}},
-        handle_message(Down0, nil, #acc{workers=Workers0})
-    ),
-
-    % Continue when we have more workers
-    ?assertEqual(
-        {ok, #acc{workers=[Worker1]}},
-        handle_message(Down0, nil, #acc{workers=Workers1})
-    ),
-
-    % A second DOWN removes the remaining workers
-    ?assertEqual(
-        {stop, #acc{workers=[]}},
-        handle_message(Down1, nil, #acc{workers=[Worker1]})
-    ),
-
-    ok.
-
-handle_message_exit_test() ->
-    Exit = {rexi_EXIT, nil},
-    Worker0 = #shard{ref=erlang:make_ref()},
-    Worker1 = #shard{ref=erlang:make_ref()},
-
-    % Only removes the specified worker
-    ?assertEqual(
-        {ok, #acc{workers=[Worker1]}},
-        handle_message(Exit, Worker0, #acc{workers=[Worker0, Worker1]})
-    ),
-
-    ?assertEqual(
-        {ok, #acc{workers=[Worker0]}},
-        handle_message(Exit, Worker1, #acc{workers=[Worker0, Worker1]})
-    ),
-
-    % We bail if it was the last worker
-    ?assertEqual(
-        {stop, #acc{workers=[]}},
-        handle_message(Exit, Worker0, #acc{workers=[Worker0]})
-    ),
-
-    ok.
-
-handle_message_reply_test() ->
-    start_meck_(),
-    meck:expect(rexi, kill, fun(_, _) -> ok end),
-
-    Worker0 = #shard{ref=erlang:make_ref()},
-    Worker1 = #shard{ref=erlang:make_ref()},
-    Worker2 = #shard{ref=erlang:make_ref()},
-    Workers = [Worker0, Worker1, Worker2],
-    Acc0 = #acc{workers=Workers, r=2, replies=[]},
-
-    % Test that we continue when we haven't met R yet
-    ?assertEqual(
-        {ok, Acc0#acc{
-            workers=[Worker0, Worker1],
-            replies=[fabric_util:kv(foo,1)]
-        }},
-        handle_message(foo, Worker2, Acc0)
-    ),
-
-    ?assertEqual(
-        {ok, Acc0#acc{
-            workers=[Worker0, Worker1],
-            replies=[fabric_util:kv(bar,1), fabric_util:kv(foo,1)]
-        }},
-        handle_message(bar, Worker2, Acc0#acc{
-            replies=[fabric_util:kv(foo,1)]
-        })
-    ),
-
-    % Test that we don't get a quorum when R isn't met. q_reply
-    % isn't set and state remains unchanged and {stop, NewAcc}
-    % is returned. Bit subtle on the assertions here.
-
-    ?assertEqual(
-        {stop, Acc0#acc{workers=[],replies=[fabric_util:kv(foo,1)]}},
-        handle_message(foo, Worker0, Acc0#acc{workers=[Worker0]})
-    ),
-
-    ?assertEqual(
-        {stop, Acc0#acc{
-            workers=[],
-            replies=[fabric_util:kv(bar,1), fabric_util:kv(foo,1)]
-        }},
-        handle_message(bar, Worker0, Acc0#acc{
-            workers=[Worker0],
-            replies=[fabric_util:kv(foo,1)]
-        })
-    ),
-
-    % Check that when R is met we stop with a new state and
-    % a q_reply.
-
-    ?assertEqual(
-        {stop, Acc0#acc{
-            workers=[],
-            replies=[fabric_util:kv(foo,2)],
-            state=r_met,
-            q_reply=foo
-        }},
-        handle_message(foo, Worker1, Acc0#acc{
-            workers=[Worker0, Worker1],
-            replies=[fabric_util:kv(foo,1)]
-        })
-    ),
-
-    ?assertEqual(
-        {stop, Acc0#acc{
-            workers=[],
-            r=1,
-            replies=[fabric_util:kv(foo,1)],
-            state=r_met,
-            q_reply=foo
-        }},
-        handle_message(foo, Worker0, Acc0#acc{r=1})
-    ),
-
-    ?assertEqual(
-        {stop, Acc0#acc{
-            workers=[],
-            replies=[fabric_util:kv(bar,1), fabric_util:kv(foo,2)],
-            state=r_met,
-            q_reply=foo
-        }},
-        handle_message(foo, Worker0, Acc0#acc{
-            workers=[Worker0],
-            replies=[fabric_util:kv(bar,1), fabric_util:kv(foo,1)]
-        })
-    ),
-
-    stop_meck_(),
-    ok.
-
-read_repair_test() ->
-    start_meck_(),
-    meck:expect(twig, log, fun(_, _, _) -> ok end),
-
-    Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
-    Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
-    NFM = {not_found, missing},
-
-    % Test when we have actual doc data to repair
-
-    meck:expect(fabric, update_docs, fun(_, [_], _) -> {ok, []} end),
-    Acc0 = #acc{
-        dbname = <<"name">>,
-        replies = [fabric_util:kv(Foo1,1)]
-    },
-    ?assertEqual(Foo1, read_repair(Acc0)),
-
-    meck:expect(fabric, update_docs, fun(_, [_, _], _) -> {ok, []} end),
-    Acc1 = #acc{
-        dbname = <<"name">>,
-        replies = [fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,1)]
-    },
-    ?assertEqual(Foo2, read_repair(Acc1)),
-
-    % Test when we have nothing but errors
-
-    Acc2 = #acc{replies=[fabric_util:kv(NFM, 1)]},
-    ?assertEqual(NFM, read_repair(Acc2)),
-
-    Acc3 = #acc{replies=[fabric_util:kv(NFM,1), fabric_util:kv(foo,2)]},
-    ?assertEqual(NFM, read_repair(Acc3)),
-
-    Acc4 = #acc{replies=[fabric_util:kv(foo,1), fabric_util:kv(bar,1)]},
-    ?assertEqual(bar, read_repair(Acc4)),
-
-    stop_meck_(),
-    ok.
-
-handle_response_quorum_met_test() ->
-    start_meck_(),
-    meck:expect(twig, log, fun(_, _, _) -> ok end),
-    meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
-
-    Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
-    Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
-    Bar1 = {ok, #doc{revs = {1,[<<"bar">>]}}},
-
-    BasicOkAcc = #acc{
-        state=r_met,
-        replies=[fabric_util:kv(Foo1,2)],
-        q_reply=Foo1
-    },
-    ?assertEqual(Foo1, handle_response(BasicOkAcc)),
-
-    WithAncestorsAcc = #acc{
-        state=r_met,
-        replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,2)],
-        q_reply=Foo2
-    },
-    ?assertEqual(Foo2, handle_response(WithAncestorsAcc)),
-
-    % This also checks when the quorum isn't the most recent
-    % revision.
-    DeeperWinsAcc = #acc{
-        state=r_met,
-        replies=[fabric_util:kv(Foo1,2), fabric_util:kv(Foo2,1)],
-        q_reply=Foo1
-    },
-    ?assertEqual(Foo2, handle_response(DeeperWinsAcc)),
-
-    % Check that we return the proper doc based on rev
-    % (ie, pos is equal)
-    BiggerRevWinsAcc = #acc{
-        state=r_met,
-        replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Bar1,2)],
-        q_reply=Bar1
-    },
-    ?assertEqual(Foo1, handle_response(BiggerRevWinsAcc)),
-
-    % r_not_met is a proxy to read_repair so we rely on
-    % read_repair_test for those conditions.
-
-    stop_meck_(),
-    ok.
-
-
-start_meck_() ->
-    meck:new([twig, rexi, fabric]).
-
-stop_meck_() ->
-    meck:unload([twig, rexi, fabric]).


[09/49] Remove src/couch

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_httpd_auth.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl
deleted file mode 100644
index a747869..0000000
--- a/src/couch/src/couch_httpd_auth.erl
+++ /dev/null
@@ -1,376 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License.  You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_auth).
--include_lib("couch/include/couch_db.hrl").
-
--export([default_authentication_handler/1,special_test_authentication_handler/1]).
--export([cookie_authentication_handler/1]).
--export([null_authentication_handler/1]).
--export([proxy_authentification_handler/1]).
--export([cookie_auth_header/2]).
--export([handle_session_req/1]).
-
--import(couch_httpd, [header_value/2, send_json/2,send_json/4, send_method_not_allowed/2]).
-
-special_test_authentication_handler(Req) ->
-    case header_value(Req, "WWW-Authenticate") of
-    "X-Couch-Test-Auth " ++ NamePass ->
-        % NamePass is a colon separated string: "joe schmoe:a password".
-        [Name, Pass] = re:split(NamePass, ":", [{return, list}, {parts, 2}]),
-        case {Name, Pass} of
-        {"Jan Lehnardt", "apple"} -> ok;
-        {"Christopher Lenz", "dog food"} -> ok;
-        {"Noah Slater", "biggiesmalls endian"} -> ok;
-        {"Chris Anderson", "mp3"} -> ok;
-        {"Damien Katz", "pecan pie"} -> ok;
-        {_, _} ->
-            throw({unauthorized, <<"Name or password is incorrect.">>})
-        end,
-        Req#httpd{user_ctx=#user_ctx{name=?l2b(Name)}};
-    _ ->
-        % No X-Couch-Test-Auth credentials sent, give admin access so the
-        % previous authentication can be restored after the test
-        Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}
-    end.
-
-basic_name_pw(Req) ->
-    AuthorizationHeader = header_value(Req, "Authorization"),
-    case AuthorizationHeader of
-    "Basic " ++ Base64Value ->
-        case re:split(base64:decode(Base64Value), ":",
-                      [{return, list}, {parts, 2}]) of
-        ["_", "_"] ->
-            % special name and pass to be logged out
-            nil;
-        [User, Pass] ->
-            {User, Pass};
-        _ ->
-            nil
-        end;
-    _ ->
-        nil
-    end.
-
-default_authentication_handler(Req) ->
-    case basic_name_pw(Req) of
-    {User, Pass} ->
-        case couch_auth_cache:get_user_creds(User) of
-            nil ->
-                throw({unauthorized, <<"Name or password is incorrect.">>});
-            UserProps ->
-                case authenticate(?l2b(Pass), UserProps) of
-                    true ->
-                        Req#httpd{user_ctx=#user_ctx{
-                            name=?l2b(User),
-                            roles=couch_util:get_value(<<"roles">>, UserProps, [])
-                        }};
-                    _Else ->
-                        throw({unauthorized, <<"Name or password is incorrect.">>})
-                end
-        end;
-    nil ->
-        case couch_server:has_admins() of
-        true ->
-            Req;
-        false ->
-            case config:get("couch_httpd_auth", "require_valid_user", "false") of
-                "true" -> Req;
-                % If no admins, and no user required, then everyone is admin!
-                % Yay, admin party!
-                _ -> Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}
-            end
-        end
-    end.
-
-null_authentication_handler(Req) ->
-    Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}.
-
-%% @doc proxy auth handler.
-%
-% This handler allows creation of a userCtx object from a user authenticated remotly.
-% The client just pass specific headers to CouchDB and the handler create the userCtx.
-% Headers  name can be defined in local.ini. By thefault they are :
-%
-%   * X-Auth-CouchDB-UserName : contain the username, (x_auth_username in
-%   couch_httpd_auth section)
-%   * X-Auth-CouchDB-Roles : contain the user roles, list of roles separated by a
-%   comma (x_auth_roles in couch_httpd_auth section)
-%   * X-Auth-CouchDB-Token : token to authenticate the authorization (x_auth_token
-%   in couch_httpd_auth section). This token is an hmac-sha1 created from secret key
-%   and username. The secret key should be the same in the client and couchdb node. s
-%   ecret key is the secret key in couch_httpd_auth section of ini. This token is optional
-%   if value of proxy_use_secret key in couch_httpd_auth section of ini isn't true.
-%
-proxy_authentification_handler(Req) ->
-    case proxy_auth_user(Req) of
-        nil -> Req;
-        Req2 -> Req2
-    end.
-    
-proxy_auth_user(Req) ->
-    XHeaderUserName = config:get("couch_httpd_auth", "x_auth_username",
-                                "X-Auth-CouchDB-UserName"),
-    XHeaderRoles = config:get("couch_httpd_auth", "x_auth_roles",
-                                "X-Auth-CouchDB-Roles"),
-    XHeaderToken = config:get("couch_httpd_auth", "x_auth_token",
-                                "X-Auth-CouchDB-Token"),
-    case header_value(Req, XHeaderUserName) of
-        undefined -> nil;
-        UserName ->
-            Roles = case header_value(Req, XHeaderRoles) of
-                undefined -> [];
-                Else ->
-                    [?l2b(R) || R <- string:tokens(Else, ",")]
-            end,
-            case config:get("couch_httpd_auth", "proxy_use_secret", "false") of
-                "true" ->
-                    case config:get("couch_httpd_auth", "secret", nil) of
-                        nil ->
-                            Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}};
-                        Secret ->
-                            ExpectedToken = couch_util:to_hex(crypto:sha_mac(Secret, UserName)),
-                            case header_value(Req, XHeaderToken) of
-                                Token when Token == ExpectedToken ->
-                                    Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName),
-                                                            roles=Roles}};
-                                _ -> nil
-                            end
-                    end;
-                _ ->
-                    Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}}
-            end
-    end.
-
-
-cookie_authentication_handler(#httpd{mochi_req=MochiReq}=Req) ->
-    case MochiReq:get_cookie_value("AuthSession") of
-    undefined -> Req;
-    [] -> Req;
-    Cookie ->
-        [User, TimeStr, HashStr] = try
-            AuthSession = couch_util:decodeBase64Url(Cookie),
-            [_A, _B, _Cs] = re:split(?b2l(AuthSession), ":",
-                                     [{return, list}, {parts, 3}])
-        catch
-            _:_Error ->
-                Reason = <<"Malformed AuthSession cookie. Please clear your cookies.">>,
-                throw({bad_request, Reason})
-        end,
-        % Verify expiry and hash
-        CurrentTime = make_cookie_time(),
-        case config:get("couch_httpd_auth", "secret", nil) of
-        nil ->
-            ?LOG_DEBUG("cookie auth secret is not set",[]),
-            Req;
-        SecretStr ->
-            Secret = ?l2b(SecretStr),
-            case couch_auth_cache:get_user_creds(User) of
-            nil -> Req;
-            UserProps ->
-                UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<"">>),
-                FullSecret = <<Secret/binary, UserSalt/binary>>,
-                ExpectedHash = crypto:sha_mac(FullSecret, User ++ ":" ++ TimeStr),
-                Hash = ?l2b(HashStr),
-                Timeout = list_to_integer(
-                    config:get("couch_httpd_auth", "timeout", "600")),
-                ?LOG_DEBUG("timeout ~p", [Timeout]),
-                case (catch erlang:list_to_integer(TimeStr, 16)) of
-                    TimeStamp when CurrentTime < TimeStamp + Timeout ->
-                        case couch_passwords:verify(ExpectedHash, Hash) of
-                            true ->
-                                TimeLeft = TimeStamp + Timeout - CurrentTime,
-                                ?LOG_DEBUG("Successful cookie auth as: ~p", [User]),
-                                Req#httpd{user_ctx=#user_ctx{
-                                    name=?l2b(User),
-                                    roles=couch_util:get_value(<<"roles">>, UserProps, [])
-                                }, auth={FullSecret, TimeLeft < Timeout*0.9}};
-                            _Else ->
-                                Req
-                        end;
-                    _Else ->
-                        Req
-                end
-            end
-        end
-    end.
-
-cookie_auth_header(#httpd{user_ctx=#user_ctx{name=null}}, _Headers) -> [];
-cookie_auth_header(#httpd{user_ctx=#user_ctx{name=User}, auth={Secret, true}}=Req, Headers) ->
-    % Note: we only set the AuthSession cookie if:
-    %  * a valid AuthSession cookie has been received
-    %  * we are outside a 10% timeout window
-    %  * and if an AuthSession cookie hasn't already been set e.g. by a login
-    %    or logout handler.
-    % The login and logout handlers need to set the AuthSession cookie
-    % themselves.
-    CookieHeader = couch_util:get_value("Set-Cookie", Headers, ""),
-    Cookies = mochiweb_cookies:parse_cookie(CookieHeader),
-    AuthSession = couch_util:get_value("AuthSession", Cookies),
-    if AuthSession == undefined ->
-        TimeStamp = make_cookie_time(),
-        [cookie_auth_cookie(Req, ?b2l(User), Secret, TimeStamp)];
-    true ->
-        []
-    end;
-cookie_auth_header(_Req, _Headers) -> [].
-
-cookie_auth_cookie(Req, User, Secret, TimeStamp) ->
-    SessionData = User ++ ":" ++ erlang:integer_to_list(TimeStamp, 16),
-    Hash = crypto:sha_mac(Secret, SessionData),
-    mochiweb_cookies:cookie("AuthSession",
-        couch_util:encodeBase64Url(SessionData ++ ":" ++ ?b2l(Hash)),
-        [{path, "/"}] ++ cookie_scheme(Req) ++ max_age()).
-
-ensure_cookie_auth_secret() ->
-    case config:get("couch_httpd_auth", "secret", nil) of
-        nil ->
-            NewSecret = ?b2l(couch_uuids:random()),
-            config:set("couch_httpd_auth", "secret", NewSecret),
-            NewSecret;
-        Secret -> Secret
-    end.
-
-% session handlers
-% Login handler with user db
-handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req) ->
-    ReqBody = MochiReq:recv_body(),
-    Form = case MochiReq:get_primary_header_value("content-type") of
-        % content type should be json
-        "application/x-www-form-urlencoded" ++ _ ->
-            mochiweb_util:parse_qs(ReqBody);
-        "application/json" ++ _ ->
-            {Pairs} = ?JSON_DECODE(ReqBody),
-            lists:map(fun({Key, Value}) ->
-              {?b2l(Key), ?b2l(Value)}
-            end, Pairs);
-        _ ->
-            []
-    end,
-    UserName = ?l2b(couch_util:get_value("name", Form, "")),
-    Password = ?l2b(couch_util:get_value("password", Form, "")),
-    ?LOG_DEBUG("Attempt Login: ~s",[UserName]),
-    User = case couch_auth_cache:get_user_creds(UserName) of
-        nil -> [];
-        Result -> Result
-    end,
-    UserSalt = couch_util:get_value(<<"salt">>, User, <<>>),
-    case authenticate(Password, User) of
-        true ->
-            % setup the session cookie
-            Secret = ?l2b(ensure_cookie_auth_secret()),
-            CurrentTime = make_cookie_time(),
-            Cookie = cookie_auth_cookie(Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime),
-            % TODO document the "next" feature in Futon
-            {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
-                nil ->
-                    {200, [Cookie]};
-                Redirect ->
-                    {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
-            end,
-            send_json(Req#httpd{req_body=ReqBody}, Code, Headers,
-                {[
-                    {ok, true},
-                    {name, couch_util:get_value(<<"name">>, User, null)},
-                    {roles, couch_util:get_value(<<"roles">>, User, [])}
-                ]});
-        _Else ->
-            % clear the session
-            Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}] ++ cookie_scheme(Req)),
-            {Code, Headers} = case couch_httpd:qs_value(Req, "fail", nil) of
-                nil ->
-                    {401, [Cookie]};
-                Redirect ->
-                    {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
-            end,
-            send_json(Req, Code, Headers, {[{error, <<"unauthorized">>},{reason, <<"Name or password is incorrect.">>}]})
-    end;
-% get user info
-% GET /_session
-handle_session_req(#httpd{method='GET', user_ctx=UserCtx}=Req) ->
-    Name = UserCtx#user_ctx.name,
-    ForceLogin = couch_httpd:qs_value(Req, "basic", "false"),
-    case {Name, ForceLogin} of
-        {null, "true"} ->
-            throw({unauthorized, <<"Please login.">>});
-        {Name, _} ->
-            send_json(Req, {[
-                % remove this ok
-                {ok, true},
-                {<<"userCtx">>, {[
-                    {name, Name},
-                    {roles, UserCtx#user_ctx.roles}
-                ]}},
-                {info, {[
-                    {authentication_db, ?l2b(config:get("couch_httpd_auth", "authentication_db"))},
-                    {authentication_handlers, [auth_name(H) || H <- couch_httpd:make_fun_spec_strs(
-                            config:get("httpd", "authentication_handlers"))]}
-                ] ++ maybe_value(authenticated, UserCtx#user_ctx.handler, fun(Handler) ->
-                        auth_name(?b2l(Handler))
-                    end)}}
-            ]})
-    end;
-% logout by deleting the session
-handle_session_req(#httpd{method='DELETE'}=Req) ->
-    Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}] ++ cookie_scheme(Req)),
-    {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
-        nil ->
-            {200, [Cookie]};
-        Redirect ->
-            {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
-    end,
-    send_json(Req, Code, Headers, {[{ok, true}]});
-handle_session_req(Req) ->
-    send_method_not_allowed(Req, "GET,HEAD,POST,DELETE").
-
-maybe_value(_Key, undefined, _Fun) -> [];
-maybe_value(Key, Else, Fun) ->
-    [{Key, Fun(Else)}].
-
-authenticate(Pass, UserProps) ->
-    UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<>>),
-    {PasswordHash, ExpectedHash} =
-        case couch_util:get_value(<<"password_scheme">>, UserProps, <<"simple">>) of
-        <<"simple">> ->
-            {couch_passwords:simple(Pass, UserSalt),
-            couch_util:get_value(<<"password_sha">>, UserProps, nil)};
-        <<"pbkdf2">> ->
-            Iterations = couch_util:get_value(<<"iterations">>, UserProps, 10000),
-            {couch_passwords:pbkdf2(Pass, UserSalt, Iterations),
-             couch_util:get_value(<<"derived_key">>, UserProps, nil)}
-    end,
-    couch_passwords:verify(PasswordHash, ExpectedHash).
-
-auth_name(String) when is_list(String) ->
-    [_,_,_,_,_,Name|_] = re:split(String, "[\\W_]", [{return, list}]),
-    ?l2b(Name).
-
-make_cookie_time() ->
-    {NowMS, NowS, _} = erlang:now(),
-    NowMS * 1000000 + NowS.
-
-cookie_scheme(#httpd{mochi_req=MochiReq}) ->
-    [{http_only, true}] ++
-    case MochiReq:get(scheme) of
-        http -> [];
-        https -> [{secure, true}]
-    end.
-
-max_age() ->
-    case config:get("couch_httpd_auth", "allow_persistent_cookies", "false") of
-        "false" ->
-            [];
-        "true" ->
-            Timeout = list_to_integer(
-                config:get("couch_httpd_auth", "timeout", "600")),
-            [{max_age, Timeout}]
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_httpd_cors.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_httpd_cors.erl b/src/couch/src/couch_httpd_cors.erl
deleted file mode 100644
index d98357a..0000000
--- a/src/couch/src/couch_httpd_cors.erl
+++ /dev/null
@@ -1,343 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% @doc module to handle Cross-Origin Resource Sharing
-%%
-%% This module handles CROSS requests and preflight request for a
-%% couchdb Node. The config is done in the ini file.
-
-
--module(couch_httpd_cors).
-
--include_lib("couch/include/couch_db.hrl").
-
--export([is_preflight_request/1, cors_headers/2]).
-
--define(SUPPORTED_HEADERS, "Accept, Accept-Language, Content-Type," ++
-        "Expires, Last-Modified, Pragma, Origin, Content-Length," ++
-        "If-Match, Destination, X-Requested-With, " ++
-        "X-Http-Method-Override, Content-Range").
-
--define(SUPPORTED_METHODS, "GET, HEAD, POST, PUT, DELETE," ++
-        "TRACE, CONNECT, COPY, OPTIONS").
-
-% as defined in http://www.w3.org/TR/cors/#terminology
--define(SIMPLE_HEADERS, ["Cache-Control", "Content-Language",
-        "Content-Type", "Expires", "Last-Modified", "Pragma"]).
--define(SIMPLE_CONTENT_TYPE_VALUES, ["application/x-www-form-urlencoded",
-        "multipart/form-data", "text/plain"]).
-
-% TODO: - pick a sane default
--define(CORS_DEFAULT_MAX_AGE, 12345).
-
-%% is_preflight_request/1
-
-% http://www.w3.org/TR/cors/#resource-preflight-requests
-
-is_preflight_request(#httpd{method=Method}=Req) when Method /= 'OPTIONS' ->
-    Req;
-is_preflight_request(Req) ->
-    EnableCors = enable_cors(),
-    is_preflight_request(Req, EnableCors).
-
-is_preflight_request(Req, false) ->
-    Req;
-is_preflight_request(#httpd{mochi_req=MochiReq}=Req, true) ->
-    case preflight_request(MochiReq) of
-    {ok, PreflightHeaders} ->
-        send_preflight_response(Req, PreflightHeaders);
-    _ ->
-        Req
-    end.
-
-
-preflight_request(MochiReq) ->
-    Origin = MochiReq:get_header_value("Origin"),
-    preflight_request(MochiReq, Origin).
-
-preflight_request(MochiReq, undefined) ->
-    % If the Origin header is not present terminate this set of
-    % steps. The request is outside the scope of this specification.
-    % http://www.w3.org/TR/cors/#resource-preflight-requests
-    MochiReq;
-preflight_request(MochiReq, Origin) ->
-    Host = couch_httpd_vhost:host(MochiReq),
-    AcceptedOrigins = get_accepted_origins(Host),
-    AcceptAll = lists:member("*", AcceptedOrigins),
-
-    HandlerFun = fun() ->
-        OriginList = couch_util:to_list(Origin),
-        handle_preflight_request(OriginList, Host, MochiReq)
-    end,
-
-    case AcceptAll of
-    true ->
-        % Always matching is acceptable since the list of
-        % origins can be unbounded.
-        % http://www.w3.org/TR/cors/#resource-preflight-requests
-        HandlerFun();
-    false ->
-        case lists:member(Origin, AcceptedOrigins) of
-        % The Origin header can only contain a single origin as
-        % the user agent will not follow redirects.
-        % http://www.w3.org/TR/cors/#resource-preflight-requests
-        % TODO: Square against multi origin thinger in Security Considerations
-        true ->
-            HandlerFun();
-        false ->
-            % If the value of the Origin header is not a
-            % case-sensitive match for any of the values
-            % in list of origins do not set any additional
-            % headers and terminate this set of steps.
-            % http://www.w3.org/TR/cors/#resource-preflight-requests
-            false
-        end
-    end.
-
-
-handle_preflight_request(Origin, Host, MochiReq) ->
-    %% get supported methods
-    SupportedMethods = split_list(cors_config(Host, "methods",
-                                              ?SUPPORTED_METHODS)),
-
-    % get supported headers
-    AllSupportedHeaders = split_list(cors_config(Host, "headers",
-                                                 ?SUPPORTED_HEADERS)),
-
-    SupportedHeaders = [string:to_lower(H) || H <- AllSupportedHeaders],
-
-    % get max age
-    MaxAge = cors_config(Host, "max_age", ?CORS_DEFAULT_MAX_AGE),
-
-    PreflightHeaders0 = maybe_add_credentials(Origin, Host, [
-        {"Access-Control-Allow-Origin", Origin},
-        {"Access-Control-Max-Age", MaxAge},
-        {"Access-Control-Allow-Methods",
-            string:join(SupportedMethods, ", ")}]),
-
-    case MochiReq:get_header_value("Access-Control-Request-Method") of
-    undefined ->
-        % If there is no Access-Control-Request-Method header
-        % or if parsing failed, do not set any additional headers
-        % and terminate this set of steps. The request is outside
-        % the scope of this specification.
-        % http://www.w3.org/TR/cors/#resource-preflight-requests
-        {ok, PreflightHeaders0};
-    Method ->
-        case lists:member(Method, SupportedMethods) of
-        true ->
-            % method ok , check headers
-            AccessHeaders = MochiReq:get_header_value(
-                    "Access-Control-Request-Headers"),
-            {FinalReqHeaders, ReqHeaders} = case AccessHeaders of
-                undefined -> {"", []};
-                Headers ->
-                    % transform header list in something we
-                    % could check. make sure everything is a
-                    % list
-                    RH = [string:to_lower(H)
-                          || H <- split_headers(Headers)],
-                    {Headers, RH}
-            end,
-            % check if headers are supported
-            case ReqHeaders -- SupportedHeaders of
-            [] ->
-                PreflightHeaders = PreflightHeaders0 ++
-                                   [{"Access-Control-Allow-Headers",
-                                     FinalReqHeaders}],
-                {ok, PreflightHeaders};
-            _ ->
-                false
-            end;
-        false ->
-        % If method is not a case-sensitive match for any of
-        % the values in list of methods do not set any additional
-        % headers and terminate this set of steps.
-        % http://www.w3.org/TR/cors/#resource-preflight-requests
-            false
-        end
-    end.
-
-
-send_preflight_response(#httpd{mochi_req=MochiReq}=Req, Headers) ->
-    couch_httpd:log_request(Req, 204),
-    couch_stats_collector:increment({httpd_status_codes, 204}),
-    Headers1 = couch_httpd:http_1_0_keep_alive(MochiReq, Headers),
-    Headers2 = Headers1 ++ couch_httpd:server_header() ++
-               couch_httpd_auth:cookie_auth_header(Req, Headers1),
-    {ok, MochiReq:respond({204, Headers2, <<>>})}.
-
-
-% cors_headers/1
-
-cors_headers(MochiReq, RequestHeaders) ->
-    EnableCors = enable_cors(),
-    CorsHeaders = do_cors_headers(MochiReq, EnableCors),
-    maybe_apply_cors_headers(CorsHeaders, RequestHeaders).
-
-do_cors_headers(#httpd{mochi_req=MochiReq}, true) ->
-    Host = couch_httpd_vhost:host(MochiReq),
-    AcceptedOrigins = get_accepted_origins(Host),
-    case MochiReq:get_header_value("Origin") of
-    undefined ->
-        % If the Origin header is not present terminate
-        % this set of steps. The request is outside the scope
-        % of this specification.
-        % http://www.w3.org/TR/cors/#resource-processing-model
-        [];
-    Origin ->
-        handle_cors_headers(couch_util:to_list(Origin),
-                            Host, AcceptedOrigins)
-    end;
-do_cors_headers(_MochiReq, false) ->
-    [].
-
-maybe_apply_cors_headers([], RequestHeaders) ->
-    RequestHeaders;
-maybe_apply_cors_headers(CorsHeaders, RequestHeaders0) ->
-    % for each RequestHeader that isn't in SimpleHeaders,
-    % (or Content-Type with SIMPLE_CONTENT_TYPE_VALUES)
-    % append to Access-Control-Expose-Headers
-    % return: RequestHeaders ++ CorsHeaders ++ ACEH
-
-    RequestHeaders = [K || {K,_V} <- RequestHeaders0],
-    ExposedHeaders0 = reduce_headers(RequestHeaders, ?SIMPLE_HEADERS),
-
-    % here we may have not moved Content-Type into ExposedHeaders,
-    % now we need to check whether the Content-Type valus is
-    % in ?SIMPLE_CONTENT_TYPE_VALUES and if it isn’t add Content-
-    % Type to to ExposedHeaders
-    ContentType = string:to_lower(
-        proplists:get_value("Content-Type", RequestHeaders0)),
-
-    IncludeContentType = lists:member(ContentType, ?SIMPLE_CONTENT_TYPE_VALUES),
-    ExposedHeaders = case IncludeContentType of
-    false ->
-        lists:umerge(ExposedHeaders0, ["Content-Type"]);
-    true ->
-        ExposedHeaders0
-    end,
-    CorsHeaders
-    ++ RequestHeaders0
-    ++ [{"Access-Control-Expose-Headers",
-            string:join(ExposedHeaders, ", ")}].
-
-
-reduce_headers(A, B) ->
-    reduce_headers0(A, B, []).
-
-reduce_headers0([], _B, Result) ->
-    Result;
-reduce_headers0([ElmA|RestA], B, Result) ->
-    R = case member_nocase(ElmA, B) of
-    true -> Result;
-    _Else -> [ElmA | Result]
-    end,
-    reduce_headers0(RestA, B, R).
-
-member_nocase(ElmA, List) ->
-    lists:any(fun(ElmB) ->
-        string:to_lower(ElmA) =:= string:to_lower(ElmB)
-    end, List).
-
-handle_cors_headers(_Origin, _Host, []) ->
-    [];
-handle_cors_headers(Origin, Host, AcceptedOrigins) ->
-    AcceptAll = lists:member("*", AcceptedOrigins),
-    case {AcceptAll, lists:member(Origin, AcceptedOrigins)} of
-    {true, _} ->
-        make_cors_header(Origin, Host);
-    {false, true}  ->
-        make_cors_header(Origin, Host);
-    _ ->
-        % If the value of the Origin header is not a
-        % case-sensitive match for any of the values
-        % in list of origins, do not set any additional
-        % headers and terminate this set of steps.
-        % http://www.w3.org/TR/cors/#resource-requests
-        []
-    end.
-
-
-make_cors_header(Origin, Host) ->
-    Headers = [{"Access-Control-Allow-Origin", Origin}],
-    maybe_add_credentials(Origin, Host, Headers).
-
-
-%% util
-
-maybe_add_credentials(Origin, Host, Headers) ->
-    maybe_add_credentials(Headers, allow_credentials(Origin, Host)).
-
-maybe_add_credentials(Headers, false) ->
-    Headers;
-maybe_add_credentials(Headers, true) ->
-    Headers ++ [{"Access-Control-Allow-Credentials", "true"}].
-
-
-allow_credentials("*", _Host) ->
-    false;
-allow_credentials(_Origin, Host) ->
-    Default = get_bool_config("cors", "credentials", false),
-    get_bool_config(cors_section(Host), "credentials", Default).
-
-
-
-cors_config(Host, Key, Default) ->
-    config:get(cors_section(Host), Key,
-                     config:get("cors", Key, Default)).
-
-cors_section(Host0) ->
-    {Host, _Port} = split_host_port(Host0),
-    "cors:" ++ Host.
-
-enable_cors() ->
-    get_bool_config("httpd", "enable_cors", false).
-
-get_bool_config(Section, Key, Default) ->
-    case config:get(Section, Key) of
-    undefined ->
-        Default;
-    "true" ->
-        true;
-    "false" ->
-        false
-    end.
-
-get_accepted_origins(Host) ->
-    split_list(cors_config(Host, "origins", [])).
-
-split_list(S) ->
-    re:split(S, "\\s*,\\s*", [trim, {return, list}]).
-
-split_headers(H) ->
-    re:split(H, ",\\s*", [{return,list}, trim]).
-
-split_host_port(HostAsString) ->
-    % split at semicolon ":"
-    Split = string:rchr(HostAsString, $:),
-    split_host_port(HostAsString, Split).
-
-split_host_port(HostAsString, 0) ->
-    % no semicolon
-    {HostAsString, '*'};
-split_host_port(HostAsString, N) ->
-    HostPart = string:substr(HostAsString, 1, N-1),
-    % parse out port
-    % is there a nicer way?
-    case (catch erlang:list_to_integer(string:substr(HostAsString,
-                    N+1, length(HostAsString)))) of
-    {'EXIT', _} ->
-        {HostAsString, '*'};
-    Port ->
-        {HostPart, Port}
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_httpd_db.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_httpd_db.erl b/src/couch/src/couch_httpd_db.erl
deleted file mode 100644
index 50fba6c..0000000
--- a/src/couch/src/couch_httpd_db.erl
+++ /dev/null
@@ -1,1210 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_db).
--include_lib("couch/include/couch_db.hrl").
-
--export([handle_request/1, handle_compact_req/2, handle_design_req/2,
-    db_req/2, couch_doc_open/4,handle_changes_req/2,
-    update_doc_result_to_json/1, update_doc_result_to_json/2,
-    handle_design_info_req/3]).
-
--import(couch_httpd,
-    [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
-    start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1,
-    start_chunked_response/3, absolute_uri/2, send/2,
-    start_response_length/4, send_error/4]).
-
--record(doc_query_args, {
-    options = [],
-    rev = nil,
-    open_revs = [],
-    update_type = interactive_edit,
-    atts_since = nil
-}).
-
-% Database request handlers
-handle_request(#httpd{path_parts=[DbName|RestParts],method=Method,
-        db_url_handlers=DbUrlHandlers}=Req)->
-    case {Method, RestParts} of
-    {'PUT', []} ->
-        create_db_req(Req, DbName);
-    {'DELETE', []} ->
-        % if we get ?rev=... the user is using a faulty script where the
-        % document id is empty by accident. Let them recover safely.
-        case couch_httpd:qs_value(Req, "rev", false) of
-            false -> delete_db_req(Req, DbName);
-            _Rev -> throw({bad_request,
-                "You tried to DELETE a database with a ?=rev parameter. "
-                ++ "Did you mean to DELETE a document instead?"})
-        end;
-    {_, []} ->
-        do_db_req(Req, fun db_req/2);
-    {_, [SecondPart|_]} ->
-        Handler = couch_util:dict_find(SecondPart, DbUrlHandlers, fun db_req/2),
-        do_db_req(Req, Handler)
-    end.
-
-handle_changes_req(#httpd{method='POST'}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    handle_changes_req1(Req, Db);
-handle_changes_req(#httpd{method='GET'}=Req, Db) ->
-    handle_changes_req1(Req, Db);
-handle_changes_req(#httpd{path_parts=[_,<<"_changes">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "GET,HEAD,POST").
-
-handle_changes_req1(Req, #db{name=DbName}=Db) ->
-    AuthDbName = ?l2b(config:get("couch_httpd_auth", "authentication_db")),
-    case AuthDbName of
-    DbName ->
-        % in the authentication database, _changes is admin-only.
-        ok = couch_db:check_is_admin(Db);
-    _Else ->
-        % on other databases, _changes is free for all.
-        ok
-    end,
-    handle_changes_req2(Req, Db).
-
-handle_changes_req2(Req, Db) ->
-    MakeCallback = fun(Resp) ->
-        fun({change, {ChangeProp}=Change, _}, "eventsource") ->
-            Seq = proplists:get_value(<<"seq">>, ChangeProp),
-            send_chunk(Resp, ["data: ", ?JSON_ENCODE(Change),
-                              "\n", "id: ", ?JSON_ENCODE(Seq),
-                              "\n\n"]);
-        ({change, Change, _}, "continuous") ->
-            send_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]);
-        ({change, Change, Prepend}, _) ->
-            send_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]);
-        (start, "eventsource") ->
-            ok;
-        (start, "continuous") ->
-            ok;
-        (start, _) ->
-            send_chunk(Resp, "{\"results\":[\n");
-        ({stop, _EndSeq}, "eventsource") ->
-            end_json_response(Resp);
-        ({stop, EndSeq}, "continuous") ->
-            send_chunk(
-                Resp,
-                [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]
-            ),
-            end_json_response(Resp);
-        ({stop, EndSeq}, _) ->
-            send_chunk(
-                Resp,
-                io_lib:format("\n],\n\"last_seq\":~w}\n", [EndSeq])
-            ),
-            end_json_response(Resp);
-        (timeout, _) ->
-            send_chunk(Resp, "\n")
-        end
-    end,
-    ChangesArgs = parse_changes_query(Req, Db),
-    ChangesFun = couch_changes:handle_changes(ChangesArgs, Req, Db),
-    WrapperFun = case ChangesArgs#changes_args.feed of
-    "normal" ->
-        {ok, Info} = couch_db:get_db_info(Db),
-        CurrentEtag = couch_httpd:make_etag(Info),
-        fun(FeedChangesFun) ->
-            couch_httpd:etag_respond(
-                Req,
-                CurrentEtag,
-                fun() ->
-                    {ok, Resp} = couch_httpd:start_json_response(
-                         Req, 200, [{"ETag", CurrentEtag}]
-                    ),
-                    FeedChangesFun(MakeCallback(Resp))
-                end
-            )
-        end;
-    "eventsource" ->
-        Headers = [
-            {"Content-Type", "text/event-stream"},
-            {"Cache-Control", "no-cache"}
-        ],
-        {ok, Resp} = couch_httpd:start_chunked_response(Req, 200, Headers),
-        fun(FeedChangesFun) ->
-            FeedChangesFun(MakeCallback(Resp))
-        end;
-    _ ->
-        % "longpoll" or "continuous"
-        {ok, Resp} = couch_httpd:start_json_response(Req, 200),
-        fun(FeedChangesFun) ->
-            FeedChangesFun(MakeCallback(Resp))
-        end
-    end,
-    couch_stats_collector:increment(
-        {httpd, clients_requesting_changes}
-    ),
-    try
-        WrapperFun(ChangesFun)
-    after
-    couch_stats_collector:decrement(
-        {httpd, clients_requesting_changes}
-    )
-    end.
-
-handle_compact_req(#httpd{method='POST'}=Req, Db) ->
-    case Req#httpd.path_parts of
-        [_DbName, <<"_compact">>] ->
-            ok = couch_db:check_is_admin(Db),
-            couch_httpd:validate_ctype(Req, "application/json"),
-            {ok, _} = couch_db:start_compact(Db),
-            send_json(Req, 202, {[{ok, true}]});
-        [_DbName, <<"_compact">>, DesignName | _] ->
-            DesignId = <<"_design/", DesignName/binary>>,
-            DDoc = couch_httpd_db:couch_doc_open(
-                Db, DesignId, nil, [ejson_body]
-            ),
-            couch_mrview_http:handle_compact_req(Req, Db, DDoc)
-    end;
-
-handle_compact_req(Req, _Db) ->
-    send_method_not_allowed(Req, "POST").
-
-
-handle_design_req(#httpd{
-        path_parts=[_DbName, _Design, DesignName, <<"_",_/binary>> = Action | _Rest],
-        design_url_handlers = DesignUrlHandlers
-    }=Req, Db) ->
-    case couch_db:is_system_db(Db) of
-    true ->
-        case (catch couch_db:check_is_admin(Db)) of
-        ok -> ok;
-        _ ->
-            throw({forbidden, <<"Only admins can access design document",
-                " actions for system databases.">>})
-        end;
-    false -> ok
-    end,
-
-    % load ddoc
-    DesignId = <<"_design/", DesignName/binary>>,
-    DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
-    Handler = couch_util:dict_find(Action, DesignUrlHandlers, fun(_, _, _) ->
-        throw({not_found, <<"missing handler: ", Action/binary>>})
-    end),
-    Handler(Req, Db, DDoc);
-
-handle_design_req(Req, Db) ->
-    db_req(Req, Db).
-
-handle_design_info_req(#httpd{
-            method='GET',
-            path_parts=[_DbName, _Design, DesignName, _]
-        }=Req, Db, _DDoc) ->
-    DesignId = <<"_design/", DesignName/binary>>,
-    DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
-    couch_mrview_http:handle_info_req(Req, Db, DDoc).
-
-create_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    case couch_server:create(DbName, [{user_ctx, UserCtx}]) of
-    {ok, Db} ->
-        couch_db:close(Db),
-        DbUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
-        send_json(Req, 201, [{"Location", DbUrl}], {[{ok, true}]});
-    Error ->
-        throw(Error)
-    end.
-
-delete_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    Options = case couch_httpd:qs_value(Req, "sync") of
-        "true" -> [sync, {user_ctx, UserCtx}];
-        _ -> [{user_ctx, UserCtx}]
-    end,
-    case couch_server:delete(DbName, Options) of
-    ok ->
-        send_json(Req, 200, {[{ok, true}]});
-    Error ->
-        throw(Error)
-    end.
-
-do_db_req(#httpd{user_ctx=UserCtx,path_parts=[DbName|_]}=Req, Fun) ->
-    case couch_db:open(DbName, [{user_ctx, UserCtx}]) of
-    {ok, Db} ->
-        try
-            Fun(Req, Db)
-        after
-            catch couch_db:close(Db)
-        end;
-    Error ->
-        throw(Error)
-    end.
-
-db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) ->
-    {ok, DbInfo} = couch_db:get_db_info(Db),
-    send_json(Req, {DbInfo});
-
-db_req(#httpd{method='POST',path_parts=[_DbName]}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    Doc = couch_doc:from_json_obj(couch_httpd:json_body(Req)),
-    validate_attachment_names(Doc),
-    Doc2 = case Doc#doc.id of
-        <<"">> ->
-            Doc#doc{id=couch_uuids:new(), revs={0, []}};
-        _ ->
-            Doc
-    end,
-    DocId = Doc2#doc.id,
-    update_doc(Req, Db, DocId, Doc2);
-
-db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
-    send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    UpdateSeq = couch_db:get_update_seq(Db),
-    CommittedSeq = couch_db:get_committed_update_seq(Db),
-    {ok, StartTime} =
-    case couch_httpd:qs_value(Req, "seq") of
-    undefined ->
-        couch_db:ensure_full_commit(Db);
-    RequiredStr ->
-        RequiredSeq = list_to_integer(RequiredStr),
-        if RequiredSeq > UpdateSeq ->
-            throw({bad_request,
-                "can't do a full commit ahead of current update_seq"});
-        RequiredSeq > CommittedSeq ->
-            couch_db:ensure_full_commit(Db);
-        true ->
-            {ok, Db#db.instance_start_time}
-        end
-    end,
-    send_json(Req, 201, {[
-        {ok, true},
-        {instance_start_time, StartTime}
-    ]});
-
-db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
-    couch_stats_collector:increment({httpd, bulk_requests}),
-    couch_httpd:validate_ctype(Req, "application/json"),
-    {JsonProps} = couch_httpd:json_body_obj(Req),
-    case couch_util:get_value(<<"docs">>, JsonProps) of
-    undefined ->
-        send_error(Req, 400, <<"bad_request">>, <<"Missing JSON list of 'docs'">>);
-    DocsArray ->
-        case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
-        "true" ->
-            Options = [full_commit];
-        "false" ->
-            Options = [delay_commit];
-        _ ->
-            Options = []
-        end,
-        case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
-        true ->
-            Docs = lists:map(
-                fun({ObjProps} = JsonObj) ->
-                    Doc = couch_doc:from_json_obj(JsonObj),
-                    validate_attachment_names(Doc),
-                    Id = case Doc#doc.id of
-                        <<>> -> couch_uuids:new();
-                        Id0 -> Id0
-                    end,
-                    case couch_util:get_value(<<"_rev">>, ObjProps) of
-                    undefined ->
-                       Revs = {0, []};
-                    Rev  ->
-                        {Pos, RevId} = couch_doc:parse_rev(Rev),
-                        Revs = {Pos, [RevId]}
-                    end,
-                    Doc#doc{id=Id,revs=Revs}
-                end,
-                DocsArray),
-            Options2 =
-            case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
-            true  -> [all_or_nothing|Options];
-            _ -> Options
-            end,
-            case couch_db:update_docs(Db, Docs, Options2) of
-            {ok, Results} ->
-                % output the results
-                DocResults = lists:zipwith(fun update_doc_result_to_json/2,
-                    Docs, Results),
-                send_json(Req, 201, DocResults);
-            {aborted, Errors} ->
-                ErrorsJson =
-                    lists:map(fun update_doc_result_to_json/1, Errors),
-                send_json(Req, 417, ErrorsJson)
-            end;
-        false ->
-            Docs = lists:map(fun(JsonObj) ->
-                    Doc = couch_doc:from_json_obj(JsonObj),
-                    validate_attachment_names(Doc),
-                    Doc
-                end, DocsArray),
-            {ok, Errors} = couch_db:update_docs(Db, Docs, Options, replicated_changes),
-            ErrorsJson =
-                lists:map(fun update_doc_result_to_json/1, Errors),
-            send_json(Req, 201, ErrorsJson)
-        end
-    end;
-db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    {IdsRevs} = couch_httpd:json_body_obj(Req),
-    IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
-
-    case couch_db:purge_docs(Db, IdsRevs2) of
-    {ok, PurgeSeq, PurgedIdsRevs} ->
-        PurgedIdsRevs2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs} <- PurgedIdsRevs],
-        send_json(Req, 200, {[{<<"purge_seq">>, PurgeSeq}, {<<"purged">>, {PurgedIdsRevs2}}]});
-    Error ->
-        throw(Error)
-    end;
-
-db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
-    {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
-    JsonDocIdRevs2 = [{Id, [couch_doc:parse_rev(RevStr) || RevStr <- RevStrs]} || {Id, RevStrs} <- JsonDocIdRevs],
-    {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
-    Results2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs, _} <- Results],
-    send_json(Req, {[
-        {missing_revs, {Results2}}
-    ]});
-
-db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
-    {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
-    JsonDocIdRevs2 =
-        [{Id, couch_doc:parse_revs(RevStrs)} || {Id, RevStrs} <- JsonDocIdRevs],
-    {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
-    Results2 =
-    lists:map(fun({Id, MissingRevs, PossibleAncestors}) ->
-        {Id,
-            {[{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
-                if PossibleAncestors == [] ->
-                    [];
-                true ->
-                    [{possible_ancestors,
-                        couch_doc:revs_to_strs(PossibleAncestors)}]
-                end}}
-    end, Results),
-    send_json(Req, {Results2});
-
-db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>]}=Req, Db) ->
-    SecObj = couch_httpd:json_body(Req),
-    ok = couch_db:set_security(Db, SecObj),
-    send_json(Req, {[{<<"ok">>, true}]});
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_security">>]}=Req, Db) ->
-    send_json(Req, couch_db:get_security(Db));
-
-db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "PUT,GET");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>]}=Req,
-        Db) ->
-    Limit = couch_httpd:json_body(Req),
-   case is_integer(Limit) of
-   true ->
-       ok = couch_db:set_revs_limit(Db, Limit),
-       send_json(Req, {[{<<"ok">>, true}]});
-   false ->
-       throw({bad_request, <<"Rev limit has to be an integer">>})
-   end;
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
-    send_json(Req, couch_db:get_revs_limit(Db));
-
-db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "PUT,GET");
-
-% Special case to enable using an unencoded slash in the URL of design docs,
-% as slashes in document IDs must otherwise be URL encoded.
-db_req(#httpd{method='GET',mochi_req=MochiReq, path_parts=[DbName,<<"_design/",_/binary>>|_]}=Req, _Db) ->
-    PathFront = "/" ++ couch_httpd:quote(binary_to_list(DbName)) ++ "/",
-    [_|PathTail] = re:split(MochiReq:get(raw_path), "_design%2F",
-        [{return, list}]),
-    couch_httpd:send_redirect(Req, PathFront ++ "_design/" ++
-        mochiweb_util:join(PathTail, "_design%2F"));
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) ->
-    db_doc_req(Req, Db, <<"_design/",Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) ->
-    db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts);
-
-
-% Special case to allow for accessing local documents without %2F
-% encoding the docid. Throws out requests that don't have the second
-% path part or that specify an attachment name.
-db_req(#httpd{path_parts=[_DbName, <<"_local">>]}, _Db) ->
-    throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local/">>]}, _Db) ->
-    throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">>, Name]}=Req, Db) ->
-    db_doc_req(Req, Db, <<"_local/", Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">> | _Rest]}, _Db) ->
-    throw({bad_request, <<"_local documents do not accept attachments.">>});
-
-db_req(#httpd{path_parts=[_, DocId]}=Req, Db) ->
-    db_doc_req(Req, Db, DocId);
-
-db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
-    db_attachment_req(Req, Db, DocId, FileNameParts).
-
-db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
-    % check for the existence of the doc to handle the 404 case.
-    couch_doc_open(Db, DocId, nil, []),
-    case couch_httpd:qs_value(Req, "rev") of
-    undefined ->
-        update_doc(Req, Db, DocId,
-                couch_doc_from_req(Req, DocId, {[{<<"_deleted">>,true}]}));
-    Rev ->
-        update_doc(Req, Db, DocId,
-                couch_doc_from_req(Req, DocId,
-                    {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]}))
-    end;
-
-db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
-    #doc_query_args{
-        rev = Rev,
-        open_revs = Revs,
-        options = Options1,
-        atts_since = AttsSince
-    } = parse_doc_query(Req),
-    Options = case AttsSince of
-    nil ->
-        Options1;
-    RevList when is_list(RevList) ->
-        [{atts_since, RevList}, attachments | Options1]
-    end,
-    case Revs of
-    [] ->
-        Doc = couch_doc_open(Db, DocId, Rev, Options),
-        send_doc(Req, Doc, Options);
-    _ ->
-        {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options),
-        case MochiReq:accepts_content_type("multipart/mixed") of
-        false ->
-            {ok, Resp} = start_json_response(Req, 200),
-            send_chunk(Resp, "["),
-            % We loop through the docs. The first time through the separator
-            % is whitespace, then a comma on subsequent iterations.
-            lists:foldl(
-                fun(Result, AccSeparator) ->
-                    case Result of
-                    {ok, Doc} ->
-                        JsonDoc = couch_doc:to_json_obj(Doc, Options),
-                        Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
-                        send_chunk(Resp, AccSeparator ++ Json);
-                    {{not_found, missing}, RevId} ->
-                        RevStr = couch_doc:rev_to_str(RevId),
-                        Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
-                        send_chunk(Resp, AccSeparator ++ Json)
-                    end,
-                    "," % AccSeparator now has a comma
-                end,
-                "", Results),
-            send_chunk(Resp, "]"),
-            end_json_response(Resp);
-        true ->
-            send_docs_multipart(Req, Results, Options)
-        end
-    end;
-
-
-db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
-    couch_httpd:validate_referer(Req),
-    couch_doc:validate_docid(DocId),
-    couch_httpd:validate_ctype(Req, "multipart/form-data"),
-    Form = couch_httpd:parse_form(Req),
-    case couch_util:get_value("_doc", Form) of
-    undefined ->
-        Rev = couch_doc:parse_rev(couch_util:get_value("_rev", Form)),
-        {ok, [{ok, Doc}]} = couch_db:open_doc_revs(Db, DocId, [Rev], []);
-    Json ->
-        Doc = couch_doc_from_req(Req, DocId, ?JSON_DECODE(Json))
-    end,
-    UpdatedAtts = [
-        #att{name=validate_attachment_name(Name),
-            type=list_to_binary(ContentType),
-            data=Content} ||
-        {Name, {ContentType, _}, Content} <-
-        proplists:get_all_values("_attachments", Form)
-    ],
-    #doc{atts=OldAtts} = Doc,
-    OldAtts2 = lists:flatmap(
-        fun(#att{name=OldName}=Att) ->
-            case [1 || A <- UpdatedAtts, A#att.name == OldName] of
-            [] -> [Att]; % the attachment wasn't in the UpdatedAtts, return it
-            _ -> [] % the attachment was in the UpdatedAtts, drop it
-            end
-        end, OldAtts),
-    NewDoc = Doc#doc{
-        atts = UpdatedAtts ++ OldAtts2
-    },
-    update_doc(Req, Db, DocId, NewDoc);
-
-db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) ->
-    couch_doc:validate_docid(DocId),
-
-    case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
-    ("multipart/related;" ++ _) = ContentType ->
-        {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(
-            ContentType, fun() -> receive_request_data(Req) end),
-        Doc = couch_doc_from_req(Req, DocId, Doc0),
-        try
-            Result = update_doc(Req, Db, DocId, Doc),
-            WaitFun(),
-            Result
-        catch throw:Err ->
-            % Document rejected by a validate_doc_update function.
-            couch_doc:abort_multi_part_stream(Parser),
-            throw(Err)
-        end;
-    _Else ->
-        Body = couch_httpd:json_body(Req),
-        Doc = couch_doc_from_req(Req, DocId, Body),
-        update_doc(Req, Db, DocId, Doc)
-    end;
-
-db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
-    SourceRev =
-    case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
-        missing_rev -> nil;
-        Rev -> Rev
-    end,
-    {TargetDocId, TargetRevs} = parse_copy_destination_header(Req),
-    % open old doc
-    Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
-    % save new doc
-    update_doc(Req, Db, TargetDocId, Doc#doc{id=TargetDocId, revs=TargetRevs});
-
-db_doc_req(Req, _Db, _DocId) ->
-    send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
-
-
-send_doc(Req, Doc, Options) ->
-    case Doc#doc.meta of
-    [] ->
-        DiskEtag = couch_httpd:doc_etag(Doc),
-        % output etag only when we have no meta
-        couch_httpd:etag_respond(Req, DiskEtag, fun() ->
-            send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options)
-        end);
-    _ ->
-        send_doc_efficiently(Req, Doc, [], Options)
-    end.
-
-
-send_doc_efficiently(Req, #doc{atts=[]}=Doc, Headers, Options) ->
-        send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-send_doc_efficiently(#httpd{mochi_req = MochiReq} = Req,
-    #doc{atts = Atts} = Doc, Headers, Options) ->
-    case lists:member(attachments, Options) of
-    true ->
-        case MochiReq:accepts_content_type("multipart/related") of
-        false ->
-            send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-        true ->
-            Boundary = couch_uuids:random(),
-            JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc,
-                    [attachments, follows, att_encoding_info | Options])),
-            {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
-                    Boundary,JsonBytes, Atts, true),
-            CType = {<<"Content-Type">>, ContentType},
-            {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len),
-            couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts,
-                    fun(Data) -> couch_httpd:send(Resp, Data) end, true)
-        end;
-    false ->
-        send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
-    end.
-
-send_docs_multipart(Req, Results, Options1) ->
-    OuterBoundary = couch_uuids:random(),
-    InnerBoundary = couch_uuids:random(),
-    Options = [attachments, follows, att_encoding_info | Options1],
-    CType = {"Content-Type",
-        "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
-    {ok, Resp} = start_chunked_response(Req, 200, [CType]),
-    couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
-    lists:foreach(
-        fun({ok, #doc{atts=Atts}=Doc}) ->
-            JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
-            {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
-                    InnerBoundary, JsonBytes, Atts, true),
-            couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ",
-                    ContentType/binary, "\r\n\r\n">>),
-            couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
-                    fun(Data) -> couch_httpd:send_chunk(Resp, Data)
-                    end, true),
-             couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>);
-        ({{not_found, missing}, RevId}) ->
-             RevStr = couch_doc:rev_to_str(RevId),
-             Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
-             couch_httpd:send_chunk(Resp,
-                [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
-                Json,
-                <<"\r\n--", OuterBoundary/binary>>])
-         end, Results),
-    couch_httpd:send_chunk(Resp, <<"--">>),
-    couch_httpd:last_chunk(Resp).
-
-send_ranges_multipart(Req, ContentType, Len, Att, Ranges) ->
-    Boundary = couch_uuids:random(),
-    CType = {"Content-Type",
-        "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
-    {ok, Resp} = start_chunked_response(Req, 206, [CType]),
-    couch_httpd:send_chunk(Resp, <<"--", Boundary/binary>>),
-    lists:foreach(fun({From, To}) ->
-        ContentRange = make_content_range(From, To, Len),
-        couch_httpd:send_chunk(Resp,
-            <<"\r\nContent-Type: ", ContentType/binary, "\r\n",
-            "Content-Range: ", ContentRange/binary, "\r\n",
-           "\r\n">>),
-        couch_doc:range_att_foldl(Att, From, To + 1,
-            fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
-        couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
-    end, Ranges),
-    couch_httpd:send_chunk(Resp, <<"--">>),
-    couch_httpd:last_chunk(Resp),
-    {ok, Resp}.
-
-receive_request_data(Req) ->
-    receive_request_data(Req, couch_httpd:body_length(Req)).
-
-receive_request_data(Req, LenLeft) when LenLeft > 0 ->
-    Len = erlang:min(4096, LenLeft),
-    Data = couch_httpd:recv(Req, Len),
-    {Data, fun() -> receive_request_data(Req, LenLeft - iolist_size(Data)) end};
-receive_request_data(_Req, _) ->
-    throw(<<"expected more data">>).
-
-make_content_range(From, To, Len) ->
-    ?l2b(io_lib:format("bytes ~B-~B/~B", [From, To, Len])).
-
-update_doc_result_to_json({{Id, Rev}, Error}) ->
-        {_Code, Err, Msg} = couch_httpd:error_info(Error),
-        {[{id, Id}, {rev, couch_doc:rev_to_str(Rev)},
-            {error, Err}, {reason, Msg}]}.
-
-update_doc_result_to_json(#doc{id=DocId}, Result) ->
-    update_doc_result_to_json(DocId, Result);
-update_doc_result_to_json(DocId, {ok, NewRev}) ->
-    {[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
-update_doc_result_to_json(DocId, Error) ->
-    {_Code, ErrorStr, Reason} = couch_httpd:error_info(Error),
-    {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
-
-
-update_doc(Req, Db, DocId, #doc{deleted=false}=Doc) ->
-    Loc = absolute_uri(Req, "/" ++ ?b2l(Db#db.name) ++ "/" ++ ?b2l(DocId)),
-    update_doc(Req, Db, DocId, Doc, [{"Location", Loc}]);
-update_doc(Req, Db, DocId, Doc) ->
-    update_doc(Req, Db, DocId, Doc, []).
-
-update_doc(Req, Db, DocId, Doc, Headers) ->
-    #doc_query_args{
-        update_type = UpdateType
-    } = parse_doc_query(Req),
-    update_doc(Req, Db, DocId, Doc, Headers, UpdateType).
-
-update_doc(Req, Db, DocId, #doc{deleted=Deleted}=Doc, Headers, UpdateType) ->
-    case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
-    "true" ->
-        Options = [full_commit];
-    "false" ->
-        Options = [delay_commit];
-    _ ->
-        Options = []
-    end,
-    case couch_httpd:qs_value(Req, "batch") of
-    "ok" ->
-        % async batching
-        spawn(fun() ->
-                case catch(couch_db:update_doc(Db, Doc, Options, UpdateType)) of
-                {ok, _} -> ok;
-                Error ->
-                    ?LOG_INFO("Batch doc error (~s): ~p",[DocId, Error])
-                end
-            end),
-        send_json(Req, 202, Headers, {[
-            {ok, true},
-            {id, DocId}
-        ]});
-    _Normal ->
-        % normal
-        {ok, NewRev} = couch_db:update_doc(Db, Doc, Options, UpdateType),
-        NewRevStr = couch_doc:rev_to_str(NewRev),
-        ResponseHeaders = [{"ETag", <<"\"", NewRevStr/binary, "\"">>}] ++ Headers,
-        send_json(Req,
-            if Deleted orelse Req#httpd.method == 'DELETE' -> 200;
-            true -> 201 end,
-            ResponseHeaders, {[
-                {ok, true},
-                {id, DocId},
-                {rev, NewRevStr}]})
-    end.
-
-couch_doc_from_req(Req, DocId, #doc{revs=Revs}=Doc) ->
-    validate_attachment_names(Doc),
-    Rev = case couch_httpd:qs_value(Req, "rev") of
-    undefined ->
-        undefined;
-    QSRev ->
-        couch_doc:parse_rev(QSRev)
-    end,
-    Revs2 =
-    case Revs of
-    {Start, [RevId|_]} ->
-        if Rev /= undefined andalso Rev /= {Start, RevId} ->
-            throw({bad_request, "Document rev from request body and query "
-                   "string have different values"});
-        true ->
-            case extract_header_rev(Req, {Start, RevId}) of
-            missing_rev -> {0, []};
-            _ -> Revs
-            end
-        end;
-    _ ->
-        case extract_header_rev(Req, Rev) of
-        missing_rev -> {0, []};
-        {Pos, RevId2} -> {Pos, [RevId2]}
-        end
-    end,
-    Doc#doc{id=DocId, revs=Revs2};
-couch_doc_from_req(Req, DocId, Json) ->
-    couch_doc_from_req(Req, DocId, couch_doc:from_json_obj(Json)).
-
-% Useful for debugging
-% couch_doc_open(Db, DocId) ->
-%   couch_doc_open(Db, DocId, nil, []).
-
-couch_doc_open(Db, DocId, Rev, Options) ->
-    case Rev of
-    nil -> % open most recent rev
-        case couch_db:open_doc(Db, DocId, Options) of
-        {ok, Doc} ->
-            Doc;
-         Error ->
-             throw(Error)
-         end;
-  _ -> % open a specific rev (deletions come back as stubs)
-      case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of
-          {ok, [{ok, Doc}]} ->
-              Doc;
-          {ok, [{{not_found, missing}, Rev}]} ->
-              throw(not_found);
-          {ok, [Else]} ->
-              throw(Else)
-      end
-  end.
-
-% Attachment request handlers
-
-db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNameParts) ->
-    FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1, FileNameParts),"/")),
-    #doc_query_args{
-        rev=Rev,
-        options=Options
-    } = parse_doc_query(Req),
-    #doc{
-        atts=Atts
-    } = Doc = couch_doc_open(Db, DocId, Rev, Options),
-    case [A || A <- Atts, A#att.name == FileName] of
-    [] ->
-        throw({not_found, "Document is missing attachment"});
-    [#att{type=Type, encoding=Enc, disk_len=DiskLen, att_len=AttLen}=Att] ->
-        Etag = case Att#att.md5 of
-            <<>> -> couch_httpd:doc_etag(Doc);
-            Md5 -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\""
-        end,
-        ReqAcceptsAttEnc = lists:member(
-           atom_to_list(Enc),
-           couch_httpd:accepted_encodings(Req)
-        ),
-        Len = case {Enc, ReqAcceptsAttEnc} of
-        {identity, _} ->
-            % stored and served in identity form
-            DiskLen;
-        {_, false} when DiskLen =/= AttLen ->
-            % Stored encoded, but client doesn't accept the encoding we used,
-            % so we need to decode on the fly.  DiskLen is the identity length
-            % of the attachment.
-            DiskLen;
-        {_, true} ->
-            % Stored and served encoded.  AttLen is the encoded length.
-            AttLen;
-        _ ->
-            % We received an encoded attachment and stored it as such, so we
-            % don't know the identity length.  The client doesn't accept the
-            % encoding, and since we cannot serve a correct Content-Length
-            % header we'll fall back to a chunked response.
-            undefined
-        end,
-        Headers = [
-            {"ETag", Etag},
-            {"Cache-Control", "must-revalidate"},
-            {"Content-Type", binary_to_list(Type)}
-        ] ++ case ReqAcceptsAttEnc of
-        true when Enc =/= identity ->
-            % RFC 2616 says that the 'identify' encoding should not be used in
-            % the Content-Encoding header
-            [{"Content-Encoding", atom_to_list(Enc)}];
-        _ ->
-            []
-        end ++ case Enc of
-            identity ->
-                [{"Accept-Ranges", "bytes"}];
-            _ ->
-                [{"Accept-Ranges", "none"}]
-        end,
-        AttFun = case ReqAcceptsAttEnc of
-        false ->
-            fun couch_doc:att_foldl_decode/3;
-        true ->
-            fun couch_doc:att_foldl/3
-        end,
-        couch_httpd:etag_respond(
-            Req,
-            Etag,
-            fun() ->
-                case Len of
-                undefined ->
-                    {ok, Resp} = start_chunked_response(Req, 200, Headers),
-                    AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
-                    last_chunk(Resp);
-                _ ->
-                    Ranges = parse_ranges(MochiReq:get(range), Len),
-                    case {Enc, Ranges} of
-                        {identity, [{From, To}]} ->
-                            Headers1 = [{<<"Content-Range">>, make_content_range(From, To, Len)}]
-                                ++ Headers,
-                            {ok, Resp} = start_response_length(Req, 206, Headers1, To - From + 1),
-                            couch_doc:range_att_foldl(Att, From, To + 1,
-                                fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp});
-                        {identity, Ranges} when is_list(Ranges) andalso length(Ranges) < 10 ->
-                            send_ranges_multipart(Req, Type, Len, Att, Ranges);
-                        _ ->
-                            Headers1 = Headers ++
-                                if Enc =:= identity orelse ReqAcceptsAttEnc =:= true ->
-                                    [{"Content-MD5", base64:encode(Att#att.md5)}];
-                                true ->
-                                    []
-                            end,
-                            {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
-                            AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
-                    end
-                end
-            end
-        )
-    end;
-
-
-db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileNameParts)
-        when (Method == 'PUT') or (Method == 'DELETE') ->
-    FileName = validate_attachment_name(
-                    mochiweb_util:join(
-                        lists:map(fun binary_to_list/1,
-                            FileNameParts),"/")),
-
-    NewAtt = case Method of
-        'DELETE' ->
-            [];
-        _ ->
-            [#att{
-                name = FileName,
-                type = case couch_httpd:header_value(Req,"Content-Type") of
-                    undefined ->
-                        % We could throw an error here or guess by the FileName.
-                        % Currently, just giving it a default.
-                        <<"application/octet-stream">>;
-                    CType ->
-                        list_to_binary(CType)
-                    end,
-                data = case couch_httpd:body_length(Req) of
-                    undefined ->
-                        <<"">>;
-                    {unknown_transfer_encoding, Unknown} ->
-                        exit({unknown_transfer_encoding, Unknown});
-                    chunked ->
-                        fun(MaxChunkSize, ChunkFun, InitState) ->
-                            couch_httpd:recv_chunked(Req, MaxChunkSize,
-                                ChunkFun, InitState)
-                        end;
-                    0 ->
-                        <<"">>;
-                    Length when is_integer(Length) ->
-                        Expect = case couch_httpd:header_value(Req, "expect") of
-                                     undefined ->
-                                         undefined;
-                                     Value when is_list(Value) ->
-                                         string:to_lower(Value)
-                                 end,
-                        case Expect of
-                            "100-continue" ->
-                                MochiReq:start_raw_response({100, gb_trees:empty()});
-                            _Else ->
-                                ok
-                        end,
-
-
-                        fun(Size) -> couch_httpd:recv(Req, Size) end
-                    end,
-                att_len = case couch_httpd:header_value(Req,"Content-Length") of
-                    undefined ->
-                        undefined;
-                    Length ->
-                        list_to_integer(Length)
-                    end,
-                md5 = get_md5_header(Req),
-                encoding = case string:to_lower(string:strip(
-                    couch_httpd:header_value(Req,"Content-Encoding","identity")
-                )) of
-                "identity" ->
-                   identity;
-                "gzip" ->
-                   gzip;
-                _ ->
-                   throw({
-                       bad_ctype,
-                       "Only gzip and identity content-encodings are supported"
-                   })
-                end
-            }]
-    end,
-
-    Doc = case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
-        missing_rev -> % make the new doc
-            couch_doc:validate_docid(DocId),
-            #doc{id=DocId};
-        Rev ->
-            case couch_db:open_doc_revs(Db, DocId, [Rev], []) of
-                {ok, [{ok, Doc0}]} -> Doc0;
-                {ok, [{{not_found, missing}, Rev}]} -> throw(conflict);
-                {ok, [Error]} -> throw(Error)
-            end
-    end,
-
-    #doc{atts=Atts} = Doc,
-    DocEdited = Doc#doc{
-        atts = NewAtt ++ [A || A <- Atts, A#att.name /= FileName]
-    },
-
-    Headers = case Method of
-    'DELETE' ->
-        [];
-    _ ->
-        [{"Location", absolute_uri(Req, "/" ++
-            ?b2l(Db#db.name) ++ "/" ++
-            ?b2l(DocId) ++ "/" ++
-            ?b2l(FileName)
-        )}]
-    end,
-    update_doc(Req, Db, DocId, DocEdited, Headers);
-
-db_attachment_req(Req, _Db, _DocId, _FileNameParts) ->
-    send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT").
-
-parse_ranges(undefined, _Len) ->
-    undefined;
-parse_ranges(fail, _Len) ->
-    undefined;
-parse_ranges(Ranges, Len) ->
-    parse_ranges(Ranges, Len, []).
-
-parse_ranges([], _Len, Acc) ->
-    lists:reverse(Acc);
-parse_ranges([{0, none}|_], _Len, _Acc) ->
-    undefined;
-parse_ranges([{From, To}|_], _Len, _Acc) when is_integer(From) andalso is_integer(To) andalso To < From ->
-    throw(requested_range_not_satisfiable);
-parse_ranges([{From, To}|Rest], Len, Acc) when is_integer(To) andalso To >= Len ->
-    parse_ranges([{From, Len-1}] ++ Rest, Len, Acc);
-parse_ranges([{none, To}|Rest], Len, Acc) ->
-    parse_ranges([{Len - To, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From, none}|Rest], Len, Acc) ->
-    parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From,To}|Rest], Len, Acc) ->
-    parse_ranges(Rest, Len, [{From, To}] ++ Acc).
-
-get_md5_header(Req) ->
-    ContentMD5 = couch_httpd:header_value(Req, "Content-MD5"),
-    Length = couch_httpd:body_length(Req),
-    Trailer = couch_httpd:header_value(Req, "Trailer"),
-    case {ContentMD5, Length, Trailer} of
-        _ when is_list(ContentMD5) orelse is_binary(ContentMD5) ->
-            base64:decode(ContentMD5);
-        {_, chunked, undefined} ->
-            <<>>;
-        {_, chunked, _} ->
-            case re:run(Trailer, "\\bContent-MD5\\b", [caseless]) of
-                {match, _} ->
-                    md5_in_footer;
-                _ ->
-                    <<>>
-            end;
-        _ ->
-            <<>>
-    end.
-
-parse_doc_query(Req) ->
-    lists:foldl(fun({Key,Value}, Args) ->
-        case {Key, Value} of
-        {"attachments", "true"} ->
-            Options = [attachments | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"meta", "true"} ->
-            Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"revs", "true"} ->
-            Options = [revs | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"local_seq", "true"} ->
-            Options = [local_seq | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"revs_info", "true"} ->
-            Options = [revs_info | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"conflicts", "true"} ->
-            Options = [conflicts | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"deleted_conflicts", "true"} ->
-            Options = [deleted_conflicts | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"rev", Rev} ->
-            Args#doc_query_args{rev=couch_doc:parse_rev(Rev)};
-        {"open_revs", "all"} ->
-            Args#doc_query_args{open_revs=all};
-        {"open_revs", RevsJsonStr} ->
-            JsonArray = ?JSON_DECODE(RevsJsonStr),
-            Args#doc_query_args{open_revs=couch_doc:parse_revs(JsonArray)};
-        {"latest", "true"} ->
-            Options = [latest | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"atts_since", RevsJsonStr} ->
-            JsonArray = ?JSON_DECODE(RevsJsonStr),
-            Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
-        {"new_edits", "false"} ->
-            Args#doc_query_args{update_type=replicated_changes};
-        {"new_edits", "true"} ->
-            Args#doc_query_args{update_type=interactive_edit};
-        {"att_encoding_info", "true"} ->
-            Options = [att_encoding_info | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        _Else -> % unknown key value pair, ignore.
-            Args
-        end
-    end, #doc_query_args{}, couch_httpd:qs(Req)).
-
-parse_changes_query(Req, Db) ->
-    lists:foldl(fun({Key, Value}, Args) ->
-        case {string:to_lower(Key), Value} of
-        {"feed", _} ->
-            Args#changes_args{feed=Value};
-        {"descending", "true"} ->
-            Args#changes_args{dir=rev};
-        {"since", "now"} ->
-            UpdateSeq = couch_util:with_db(Db#db.name, fun(WDb) ->
-                                        couch_db:get_update_seq(WDb)
-                                end),
-            Args#changes_args{since=UpdateSeq};
-        {"since", _} ->
-            Args#changes_args{since=list_to_integer(Value)};
-        {"last-event-id", _} ->
-            Args#changes_args{since=list_to_integer(Value)};
-        {"limit", _} ->
-            Args#changes_args{limit=list_to_integer(Value)};
-        {"style", _} ->
-            Args#changes_args{style=list_to_existing_atom(Value)};
-        {"heartbeat", "true"} ->
-            Args#changes_args{heartbeat=true};
-        {"heartbeat", _} ->
-            Args#changes_args{heartbeat=list_to_integer(Value)};
-        {"timeout", _} ->
-            Args#changes_args{timeout=list_to_integer(Value)};
-        {"include_docs", "true"} ->
-            Args#changes_args{include_docs=true};
-        {"conflicts", "true"} ->
-            Args#changes_args{conflicts=true};
-        {"filter", _} ->
-            Args#changes_args{filter=Value};
-        _Else -> % unknown key value pair, ignore.
-            Args
-        end
-    end, #changes_args{}, couch_httpd:qs(Req)).
-
-extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
-    extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
-extract_header_rev(Req, ExplicitRev) ->
-    Etag = case couch_httpd:header_value(Req, "If-Match") of
-        undefined -> undefined;
-        Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
-    end,
-    case {ExplicitRev, Etag} of
-    {undefined, undefined} -> missing_rev;
-    {_, undefined} -> ExplicitRev;
-    {undefined, _} -> Etag;
-    _ when ExplicitRev == Etag -> Etag;
-    _ ->
-        throw({bad_request, "Document rev and etag have different values"})
-    end.
-
-
-parse_copy_destination_header(Req) ->
-    case couch_httpd:header_value(Req, "Destination") of
-    undefined ->
-        throw({bad_request, "Destination header is mandatory for COPY."});
-    Destination ->
-        case re:run(Destination, "^https?://", [{capture, none}]) of
-        match ->
-            throw({bad_request, "Destination URL must be relative."});
-        nomatch ->
-            % see if ?rev=revid got appended to the Destination header
-            case re:run(Destination, "\\?", [{capture, none}]) of
-            nomatch ->
-                {list_to_binary(Destination), {0, []}};
-            match ->
-                [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
-                [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
-                {Pos, RevId} = couch_doc:parse_rev(Rev),
-                {list_to_binary(DocId), {Pos, [RevId]}}
-            end
-        end
-    end.
-
-validate_attachment_names(Doc) ->
-    lists:foreach(fun(#att{name=Name}) ->
-        validate_attachment_name(Name)
-    end, Doc#doc.atts).
-
-validate_attachment_name(Name) when is_list(Name) ->
-    validate_attachment_name(list_to_binary(Name));
-validate_attachment_name(<<"_",_/binary>>) ->
-    throw({bad_request, <<"Attachment name can't start with '_'">>});
-validate_attachment_name(Name) ->
-    case couch_util:validate_utf8(Name) of
-        true -> Name;
-        false -> throw({bad_request, <<"Attachment name is not UTF-8 encoded">>})
-    end.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_httpd_external.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_httpd_external.erl b/src/couch/src/couch_httpd_external.erl
deleted file mode 100644
index 8322dcd..0000000
--- a/src/couch/src/couch_httpd_external.erl
+++ /dev/null
@@ -1,173 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_external).
-
--export([handle_external_req/2, handle_external_req/3]).
--export([send_external_response/2, json_req_obj/2, json_req_obj/3]).
--export([default_or_content_type/2, parse_external_response/1]).
-
--import(couch_httpd,[send_error/4]).
-
--include_lib("couch/include/couch_db.hrl").
-
-% handle_external_req/2
-% for the old type of config usage:
-% _external = {couch_httpd_external, handle_external_req}
-% with urls like
-% /db/_external/action/design/name
-handle_external_req(#httpd{
-                        path_parts=[_DbName, _External, UrlName | _Path]
-                    }=HttpReq, Db) ->
-    process_external_req(HttpReq, Db, UrlName);
-handle_external_req(#httpd{path_parts=[_, _]}=Req, _Db) ->
-    send_error(Req, 404, <<"external_server_error">>, <<"No server name specified.">>);
-handle_external_req(Req, _) ->
-    send_error(Req, 404, <<"external_server_error">>, <<"Broken assumption">>).
-
-% handle_external_req/3
-% for this type of config usage:
-% _action = {couch_httpd_external, handle_external_req, <<"action">>}
-% with urls like
-% /db/_action/design/name
-handle_external_req(HttpReq, Db, Name) ->
-    process_external_req(HttpReq, Db, Name).
-
-process_external_req(HttpReq, Db, Name) ->
-
-    Response = couch_external_manager:execute(binary_to_list(Name),
-        json_req_obj(HttpReq, Db)),
-
-    case Response of
-    {unknown_external_server, Msg} ->
-        send_error(HttpReq, 404, <<"external_server_error">>, Msg);
-    _ ->
-        send_external_response(HttpReq, Response)
-    end.
-json_req_obj(Req, Db) -> json_req_obj(Req, Db, null).
-json_req_obj(#httpd{mochi_req=Req,
-               method=Method,
-               requested_path_parts=RequestedPath,
-               path_parts=Path,
-               req_body=ReqBody
-            }, Db, DocId) ->
-    Body = case ReqBody of
-        undefined ->
-            MaxSize = list_to_integer(
-                config:get("couchdb", "max_document_size", "4294967296")),
-            Req:recv_body(MaxSize);
-        Else -> Else
-    end,
-    ParsedForm = case Req:get_primary_header_value("content-type") of
-        "application/x-www-form-urlencoded" ++ _ ->
-            case Body of
-            undefined -> [];
-            _ -> mochiweb_util:parse_qs(Body)
-            end;
-        _ ->
-            []
-    end,
-    Headers = Req:get(headers),
-    Hlist = mochiweb_headers:to_list(Headers),
-    {ok, Info} = couch_db:get_db_info(Db),
-    
-% add headers...
-    {[{<<"info">>, {Info}},
-        {<<"id">>, DocId},
-        {<<"uuid">>, couch_uuids:new()},
-        {<<"method">>, Method},
-        {<<"requested_path">>, RequestedPath},
-        {<<"path">>, Path},
-        {<<"raw_path">>, ?l2b(Req:get(raw_path))},
-        {<<"query">>, json_query_keys(to_json_terms(Req:parse_qs()))},
-        {<<"headers">>, to_json_terms(Hlist)},
-        {<<"body">>, Body},
-        {<<"peer">>, ?l2b(Req:get(peer))},
-        {<<"form">>, to_json_terms(ParsedForm)},
-        {<<"cookie">>, to_json_terms(Req:parse_cookie())},
-        {<<"userCtx">>, couch_util:json_user_ctx(Db)},
-        {<<"secObj">>, couch_db:get_security(Db)}]}.
-
-to_json_terms(Data) ->
-    to_json_terms(Data, []).
-
-to_json_terms([], Acc) ->
-    {lists:reverse(Acc)};
-to_json_terms([{Key, Value} | Rest], Acc) when is_atom(Key) ->
-    to_json_terms(Rest, [{list_to_binary(atom_to_list(Key)), list_to_binary(Value)} | Acc]);
-to_json_terms([{Key, Value} | Rest], Acc) ->
-    to_json_terms(Rest, [{list_to_binary(Key), list_to_binary(Value)} | Acc]).
-
-json_query_keys({Json}) ->
-    json_query_keys(Json, []).
-json_query_keys([], Acc) ->
-    {lists:reverse(Acc)};
-json_query_keys([{<<"startkey">>, Value} | Rest], Acc) ->
-    json_query_keys(Rest, [{<<"startkey">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([{<<"endkey">>, Value} | Rest], Acc) ->
-    json_query_keys(Rest, [{<<"endkey">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([{<<"key">>, Value} | Rest], Acc) ->
-    json_query_keys(Rest, [{<<"key">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([Term | Rest], Acc) ->
-    json_query_keys(Rest, [Term|Acc]).
-
-send_external_response(#httpd{mochi_req=MochiReq}=Req, Response) ->
-    #extern_resp_args{
-        code = Code,
-        data = Data,
-        ctype = CType,
-        headers = Headers
-    } = parse_external_response(Response),
-    couch_httpd:log_request(Req, Code),
-    Resp = MochiReq:respond({Code,
-        default_or_content_type(CType, Headers ++ couch_httpd:server_header()), Data}),
-    {ok, Resp}.
-
-parse_external_response({Response}) ->
-    lists:foldl(fun({Key,Value}, Args) ->
-        case {Key, Value} of
-            {"", _} ->
-                Args;
-            {<<"code">>, Value} ->
-                Args#extern_resp_args{code=Value};
-            {<<"stop">>, true} ->
-                Args#extern_resp_args{stop=true};
-            {<<"json">>, Value} ->
-                Args#extern_resp_args{
-                    data=?JSON_ENCODE(Value),
-                    ctype="application/json"};
-            {<<"body">>, Value} ->
-                Args#extern_resp_args{data=Value, ctype="text/html; charset=utf-8"};
-            {<<"base64">>, Value} ->
-                Args#extern_resp_args{
-                    data=base64:decode(Value),
-                    ctype="application/binary"
-                };
-            {<<"headers">>, {Headers}} ->
-                NewHeaders = lists:map(fun({Header, HVal}) ->
-                    {binary_to_list(Header), binary_to_list(HVal)}
-                end, Headers),
-                Args#extern_resp_args{headers=NewHeaders};
-            _ -> % unknown key
-                Msg = lists:flatten(io_lib:format("Invalid data from external server: ~p", [{Key, Value}])),
-                throw({external_response_error, Msg})
-            end
-        end, #extern_resp_args{}, Response).
-
-default_or_content_type(DefaultContentType, Headers) ->
-    IsContentType = fun({X, _}) -> string:to_lower(X) == "content-type" end,
-    case lists:any(IsContentType, Headers) of
-    false ->
-        [{"Content-Type", DefaultContentType} | Headers];
-    true ->
-        Headers
-    end.


[18/49] Remove src/couch_replicator

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/test/06-doc-missing-stubs.t
----------------------------------------------------------------------
diff --git a/src/couch_replicator/test/06-doc-missing-stubs.t b/src/couch_replicator/test/06-doc-missing-stubs.t
deleted file mode 100755
index 116550c..0000000
--- a/src/couch_replicator/test/06-doc-missing-stubs.t
+++ /dev/null
@@ -1,293 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Test replication of documents with many leaf revisions.
-% Motivated by COUCHDB-1340 and other similar issues where a document
-% GET with a too long ?open_revs revision list doesn't work due to
-% maximum web server limits for the HTTP request path.
-
--record(user_ctx, {
-    name = null,
-    roles = [],
-    handler
-}).
-
--record(doc, {
-    id = <<"">>,
-    revs = {0, []},
-    body = {[]},
-    atts = [],
-    deleted = false,
-    meta = []
-}).
-
--record(att, {
-    name,
-    type,
-    att_len,
-    disk_len,
-    md5= <<>>,
-    revpos=0,
-    data,
-    encoding=identity
-}).
-
--define(b2l(B), binary_to_list(B)).
-
-source_db_name() -> <<"couch_test_rep_db_a">>.
-target_db_name() -> <<"couch_test_rep_db_b">>.
-
-target_revs_limit() -> 3.
-
-
-main(_) ->
-    test_util:run(128, fun() -> test() end).
-
-
-% Test motivated by COUCHDB-1365.
-test() ->
-    test_util:start_couch(),
-    ibrowse:start(),
-
-    Pairs = [
-        {source_db_name(), target_db_name()},
-        {{remote, source_db_name()}, target_db_name()},
-        {source_db_name(), {remote, target_db_name()}},
-        {{remote, source_db_name()}, {remote, (target_db_name())}}
-    ],
-
-    lists:foreach(
-        fun({Source, Target}) ->
-            {ok, SourceDb} = create_db(source_db_name()),
-            etap:diag("Populating source database"),
-            populate_db(SourceDb),
-            ok = couch_db:close(SourceDb),
-
-            etap:diag("Creating target database"),
-            {ok, TargetDb} = create_db(target_db_name()),
-            ok = couch_db:set_revs_limit(TargetDb, target_revs_limit()),
-            ok = couch_db:close(TargetDb),
-
-            etap:diag("Triggering replication"),
-            replicate(Source, Target),
-            etap:diag("Replication finished, comparing source and target databases"),
-            compare_dbs(SourceDb, TargetDb),
-
-            etap:diag("Updating source database docs"),
-            update_db_docs(couch_db:name(SourceDb), target_revs_limit() + 2),
-
-            etap:diag("Triggering replication again"),
-            replicate(Source, Target),
-            etap:diag("Replication finished, comparing source and target databases"),
-            compare_dbs(SourceDb, TargetDb),
-
-            etap:diag("Deleting databases"),
-            delete_db(TargetDb),
-            delete_db(SourceDb),
-            ok = timer:sleep(1000)
-        end,
-        Pairs),
-
-    test_util:stop_couch().
-
-
-populate_db(Db) ->
-    AttData = crypto:rand_bytes(6000),
-    Doc1 = #doc{
-        id = <<"doc1">>,
-        atts = [
-            #att{
-                name = <<"doc1_att1">>,
-                type = <<"application/foobar">>,
-                att_len = byte_size(AttData),
-                data = AttData
-            }
-        ]
-    },
-    {ok, _} = couch_db:update_doc(Db, Doc1, []).
-
-
-update_db_docs(DbName, Times) ->
-    {ok, Db} = couch_db:open_int(DbName, []),
-    {ok, _, _} = couch_db:enum_docs(
-        Db,
-        fun(FDI, _, Acc) -> db_fold_fun(FDI, Acc) end,
-        {DbName, Times},
-        []),
-    ok = couch_db:close(Db).
-
-
-db_fold_fun(FullDocInfo, {DbName, Times}) ->
-    {ok, Db} = couch_db:open_int(DbName, []),
-    {ok, Doc} = couch_db:open_doc(Db, FullDocInfo),
-    lists:foldl(
-        fun(_, {Pos, RevId}) ->
-            {ok, Db2} = couch_db:reopen(Db),
-            NewDocVersion = Doc#doc{
-                revs = {Pos, [RevId]},
-                body = {[{<<"value">>, base64:encode(crypto:rand_bytes(100))}]}
-            },
-            {ok, NewRev} = couch_db:update_doc(Db2, NewDocVersion, []),
-            NewRev
-        end,
-        {element(1, Doc#doc.revs), hd(element(2, Doc#doc.revs))},
-        lists:seq(1, Times)),
-    ok = couch_db:close(Db),
-    {ok, {DbName, Times}}.
-
-
-compare_dbs(Source, Target) ->
-    {ok, SourceDb} = couch_db:open_int(couch_db:name(Source), []),
-    {ok, TargetDb} = couch_db:open_int(couch_db:name(Target), []),
-
-    Fun = fun(FullDocInfo, _, Acc) ->
-        {ok, DocSource} = couch_db:open_doc(
-            SourceDb, FullDocInfo, [conflicts, deleted_conflicts]),
-        Id = DocSource#doc.id,
-
-        etap:diag("Verifying document " ++ ?b2l(Id)),
-
-        {ok, DocTarget} = couch_db:open_doc(
-            TargetDb, Id, [conflicts, deleted_conflicts]),
-        etap:is(DocTarget#doc.body, DocSource#doc.body,
-            "Same body in source and target databases"),
-
-        etap:is(
-            couch_doc:to_json_obj(DocTarget, []),
-            couch_doc:to_json_obj(DocSource, []),
-            "Same doc body in source and target databases"),
-
-        #doc{atts = SourceAtts} = DocSource,
-        #doc{atts = TargetAtts} = DocTarget,
-        etap:is(
-            lists:sort([N || #att{name = N} <- SourceAtts]),
-            lists:sort([N || #att{name = N} <- TargetAtts]),
-            "Document has same number (and names) of attachments in "
-            "source and target databases"),
-
-        lists:foreach(
-            fun(#att{name = AttName} = Att) ->
-                etap:diag("Verifying attachment " ++ ?b2l(AttName)),
-
-                {ok, AttTarget} = find_att(TargetAtts, AttName),
-                SourceMd5 = att_md5(Att),
-                TargetMd5 = att_md5(AttTarget),
-                case AttName of
-                <<"att1">> ->
-                    etap:is(Att#att.encoding, gzip,
-                        "Attachment is gzip encoded in source database"),
-                    etap:is(AttTarget#att.encoding, gzip,
-                        "Attachment is gzip encoded in target database"),
-                    DecSourceMd5 = att_decoded_md5(Att),
-                    DecTargetMd5 = att_decoded_md5(AttTarget),
-                    etap:is(DecTargetMd5, DecSourceMd5,
-                        "Same identity content in source and target databases");
-                _ ->
-                    etap:is(Att#att.encoding, identity,
-                        "Attachment is not encoded in source database"),
-                    etap:is(AttTarget#att.encoding, identity,
-                        "Attachment is not encoded in target database")
-                end,
-                etap:is(TargetMd5, SourceMd5,
-                    "Same content in source and target databases"),
-                etap:is(is_integer(Att#att.disk_len), true,
-                    "#att.disk_len is an integer in source database"),
-                etap:is(is_integer(Att#att.att_len), true,
-                    "#att.att_len is an integer in source database"),
-                etap:is(is_integer(AttTarget#att.disk_len), true,
-                    "#att.disk_len is an integer in target database"),
-                etap:is(is_integer(AttTarget#att.att_len), true,
-                    "#att.att_len is an integer in target database"),
-                etap:is(Att#att.disk_len, AttTarget#att.disk_len,
-                    "Same identity length in source and target databases"),
-                etap:is(Att#att.att_len, AttTarget#att.att_len,
-                    "Same encoded length in source and target databases"),
-                etap:is(Att#att.type, AttTarget#att.type,
-                    "Same type in source and target databases"),
-                etap:is(Att#att.md5, SourceMd5, "Correct MD5 in source database"),
-                etap:is(AttTarget#att.md5, SourceMd5, "Correct MD5 in target database")
-            end,
-            SourceAtts),
-
-        {ok, Acc}
-    end,
-
-    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
-    ok = couch_db:close(SourceDb),
-    ok = couch_db:close(TargetDb).
-
-
-find_att([], _Name) ->
-    nil;
-find_att([#att{name = Name} = Att | _], Name) ->
-    {ok, Att};
-find_att([_ | Rest], Name) ->
-    find_att(Rest, Name).
-
-
-att_md5(Att) ->
-    Md50 = couch_doc:att_foldl(
-        Att,
-        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
-        couch_util:md5_init()),
-    couch_util:md5_final(Md50).
-
-att_decoded_md5(Att) ->
-    Md50 = couch_doc:att_foldl_decode(
-        Att,
-        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
-        couch_util:md5_init()),
-    couch_util:md5_final(Md50).
-
-
-db_url(DbName) ->
-    iolist_to_binary([
-        "http://", config:get("httpd", "bind_address", "127.0.0.1"),
-        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
-        "/", DbName
-    ]).
-
-
-create_db(DbName) ->
-    couch_db:create(
-        DbName,
-        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
-
-
-delete_db(Db) ->
-    ok = couch_server:delete(
-        couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
-
-
-replicate({remote, Db}, Target) ->
-    replicate(db_url(Db), Target);
-
-replicate(Source, {remote, Db}) ->
-    replicate(Source, db_url(Db));
-
-replicate(Source, Target) ->
-    RepObject = {[
-        {<<"source">>, Source},
-        {<<"target">>, Target}
-    ]},
-    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
-        RepObject, #user_ctx{roles = [<<"_admin">>]}),
-    {ok, Pid} = couch_replicator:async_replicate(Rep),
-    MonRef = erlang:monitor(process, Pid),
-    receive
-    {'DOWN', MonRef, process, Pid, Reason} ->
-        etap:is(Reason, normal, "Replication finished successfully")
-    after 300000 ->
-        etap:bail("Timeout waiting for replication to finish")
-    end.


[22/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/ddoc_cache


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/572ee3ce
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/572ee3ce
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/572ee3ce

Branch: refs/heads/1843-feature-bigcouch
Commit: 572ee3ce4603be89d8545b50142a183ded001d27
Parents: 550e820
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:40:28 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:40:28 2014 -0600

----------------------------------------------------------------------
 src/ddoc_cache/README.md                 |   4 -
 src/ddoc_cache/src/ddoc_cache.app.src    |  42 ------
 src/ddoc_cache/src/ddoc_cache.erl        |  81 -----------
 src/ddoc_cache/src/ddoc_cache_app.erl    |  25 ----
 src/ddoc_cache/src/ddoc_cache_opener.erl | 196 --------------------------
 src/ddoc_cache/src/ddoc_cache_sup.erl    |  67 ---------
 src/ddoc_cache/src/ddoc_cache_util.erl   |  34 -----
 7 files changed, 449 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/572ee3ce/src/ddoc_cache/README.md
----------------------------------------------------------------------
diff --git a/src/ddoc_cache/README.md b/src/ddoc_cache/README.md
deleted file mode 100644
index 81d600b..0000000
--- a/src/ddoc_cache/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-Design Doc Cache
-================
-
-Pretty much covers it.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/572ee3ce/src/ddoc_cache/src/ddoc_cache.app.src
----------------------------------------------------------------------
diff --git a/src/ddoc_cache/src/ddoc_cache.app.src b/src/ddoc_cache/src/ddoc_cache.app.src
deleted file mode 100644
index a183dbd..0000000
--- a/src/ddoc_cache/src/ddoc_cache.app.src
+++ /dev/null
@@ -1,42 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, ddoc_cache, [
-    {description, "Design Document Cache"},
-    {vsn, git},
-    {modules, [
-        ddoc_cache,
-        ddoc_cache_app,
-        ddoc_cache_opener,
-        ddoc_cache_sup,
-        ddoc_cache_util
-    ]},
-    {registered, [
-        ddoc_cache_lru,
-        ddoc_cache_opener
-    ]},
-    {applications, [
-        kernel,
-        stdlib,
-        crypto,
-        ets_lru,
-        mem3,
-        fabric,
-        twig
-    ]},
-    {mod, {ddoc_cache_app, []}},
-    {env, [
-        {max_objects, unlimited},
-        {max_size, 104857600}, % 100M
-        {max_lifetime, 60000} % 1m
-    ]}
-]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/572ee3ce/src/ddoc_cache/src/ddoc_cache.erl
----------------------------------------------------------------------
diff --git a/src/ddoc_cache/src/ddoc_cache.erl b/src/ddoc_cache/src/ddoc_cache.erl
deleted file mode 100644
index e862667..0000000
--- a/src/ddoc_cache/src/ddoc_cache.erl
+++ /dev/null
@@ -1,81 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache).
-
-
--export([
-    start/0,
-    stop/0,
-    
-    open/2,
-    evict/2
-]).
-
-
--define(CACHE, ddoc_cache_lru).
--define(OPENER, ddoc_cache_opener).
-
-
-start() ->
-    application:start(ddoc_cache).
-
-
-stop() ->
-    application:stop(ddoc_cache).
-
-
-open(DbName, validation_funs) ->
-    open({DbName, validation_funs});
-open(DbName, <<"_design/", _/binary>>=DDocId) when is_binary(DbName) ->
-    open({DbName, DDocId});
-open(DbName, DDocId) when is_binary(DDocId) ->
-    open({DbName, <<"_design/", DDocId/binary>>}).
-
-
-open(Key) ->
-    try ets_lru:lookup_d(?CACHE, Key) of
-        {ok, _} = Resp ->
-            Resp;
-        _ ->
-            case gen_server:call(?OPENER, {open, Key}, infinity) of
-                {open_ok, Resp} ->
-                    Resp;
-                {open_error, throw, Error} ->
-                    throw(Error);
-                {open_error, error, Error} ->
-                    erlang:error(Error);
-                {open_error, exit, Error} ->
-                    exit(Error)
-            end
-    catch
-        error:badarg ->
-            recover(Key)
-    end.
-
-
-evict(ShardDbName, DDocIds) ->
-    DbName = mem3:dbname(ShardDbName),
-    gen_server:cast(?OPENER, {evict, DbName, DDocIds}).
-
-
-recover({DbName, validation_funs}) ->
-    {ok, DDocs} = fabric:design_docs(mem3:dbname(DbName)),
-    Funs = lists:flatmap(fun(DDoc) ->
-        case couch_doc:get_validate_doc_fun(DDoc) of
-            nil -> [];
-            Fun -> [Fun]
-        end
-    end, DDocs),
-    {ok, Funs};
-recover({DbName, DDocId}) ->
-    fabric:open_doc(DbName, DDocId, [ejson_body]).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/572ee3ce/src/ddoc_cache/src/ddoc_cache_app.erl
----------------------------------------------------------------------
diff --git a/src/ddoc_cache/src/ddoc_cache_app.erl b/src/ddoc_cache/src/ddoc_cache_app.erl
deleted file mode 100644
index 5afa7ac..0000000
--- a/src/ddoc_cache/src/ddoc_cache_app.erl
+++ /dev/null
@@ -1,25 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_app).
--behaviour(application).
-
-
--export([start/2, stop/1]).
-
-
-start(_StartType, _StartArgs) ->
-    ddoc_cache_sup:start_link().
-
-
-stop(_State) ->
-    ok.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/572ee3ce/src/ddoc_cache/src/ddoc_cache_opener.erl
----------------------------------------------------------------------
diff --git a/src/ddoc_cache/src/ddoc_cache_opener.erl b/src/ddoc_cache/src/ddoc_cache_opener.erl
deleted file mode 100644
index 68b9127..0000000
--- a/src/ddoc_cache/src/ddoc_cache_opener.erl
+++ /dev/null
@@ -1,196 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_opener).
--behaviour(gen_server).
-
-
--include_lib("mem3/include/mem3.hrl").
-
-
--export([
-    start_link/0
-]).
-
--export([
-    open_ddoc/1
-]).
-
--export([
-    init/1,
-    terminate/2,
-
-    handle_call/3,
-    handle_cast/2,
-    handle_info/2,
-
-    code_change/3
-]).
-
--export([
-    evictor/1
-]).
-
-
--define(OPENING, ddoc_cache_opening).
-
-
--record(opener, {
-    key,
-    pid,
-    clients
-}).
-
--record(st, {
-    db_ddocs,
-    evictor
-}).
-
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-init(_) ->
-    process_flag(trap_exit, true),
-    ets:new(?OPENING, [set, protected, named_table, {keypos, #opener.key}]),
-    {ok, Evictor} = couch_db_update_notifier:start_link(fun ?MODULE:evictor/1),
-    {ok, #st{
-        evictor = Evictor
-    }}.
-
-
-terminate(_Reason, St) ->
-    case is_pid(St#st.evictor) of
-        true -> exit(St#st.evictor, kill);
-        false -> ok
-    end,
-    ok.
-
-
-handle_call({open, {_DbName, _DDocId}=Key}, From, St) ->
-    case ets:lookup(?OPENING, Key) of
-        [#opener{clients=Clients}=O] ->
-            ets:insert(?OPENING, O#opener{clients=[From | Clients]}),
-            {noreply, St};
-        [] ->
-            Pid = spawn_link(?MODULE, open_ddoc, [Key]),
-            ets:insert(?OPENING, #opener{key=Key, pid=Pid, clients=[From]}),
-            {noreply, St}
-    end;
-
-handle_call(Msg, _From, St) ->
-    {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
-
-handle_cast({evict, DbName}, St) ->
-    gen_server:abcast(mem3:nodes(), ?MODULE, {do_evict, DbName}),
-    {noreply, St};
-
-handle_cast({evict, DbName, DDocIds}, St) ->
-    gen_server:abcast(mem3:nodes(), ?MODULE, {do_evict, DbName, DDocIds}),
-    {noreply, St};
-
-handle_cast({do_evict, DbName}, St) ->
-    % Bit of hack to introspect the ets_lru ETS tables directly
-    % but I think this is better than having to manage our own
-    % DbName -> DDocIdList table
-    DDocIds = ets:foldl(fun(Obj, Acc) ->
-        entry = element(1, Obj), % assert this is an entry record
-        {EntryDbName, EntryDDocId} = element(2, Obj),
-        case EntryDbName == DbName of
-            true -> [EntryDDocId | Acc];
-            false -> Acc
-        end
-    end, [], ddoc_cache_lru_objects),
-    handle_cast({do_evict, DbName, DDocIds}, St);
-
-handle_cast({do_evict, DbName, DDocIds}, St) ->
-    ets_lru:remove(ddoc_cache_lru, {DbName, validation_funs}),
-    lists:foreach(fun(DDocId) ->
-        ets_lru:remove(ddoc_cache_lru, {DbName, DDocId})
-    end, DDocIds),
-    {noreply, St};
-
-handle_cast(Msg, St) ->
-    {stop, {invalid_cast, Msg}, St}.
-
-
-handle_info({'EXIT', Pid, Reason}, #st{evictor=Pid}=St) ->
-    twig:log(err, "ddoc_cache_opener evictor died ~w", [Reason]),
-    {ok, Evictor} = couch_db_update_notifier:start_link(fun ?MODULE:evictor/1),
-    {noreply, St#st{evictor=Evictor}};
-
-handle_info({'EXIT', _Pid, {open_ok, Key, Resp}}, St) ->
-    respond(Key, {open_ok, Resp}),
-    {noreply, St};
-
-handle_info({'EXIT', _Pid, {open_error, Key, Type, Error}}, St) ->
-    respond(Key, {open_error, Type, Error}),
-    {noreply, St};
-
-handle_info({'EXIT', Pid, Reason}, St) ->
-    Pattern = #opener{pid=Pid, _='_'},
-    case ets:match_object(?OPENING, Pattern) of
-        [#opener{key=Key, clients=Clients}] ->
-            [gen_server:reply(C, {error, Reason}) || C <- Clients],
-            ets:delete(?OPENING, Key),
-            {noreply, St};
-        [] ->
-            {stop, {unknown_pid_died, {Pid, Reason}}, St}
-    end;
-
-handle_info(Msg, St) ->
-    {stop, {invalid_info, Msg}, St}.
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-evictor({created, ShardDbName}) ->
-    DbName = mem3:dbname(ShardDbName),
-    gen_server:cast(?MODULE, {evict, DbName});
-evictor({deleted, ShardDbName}) ->
-    DbName = mem3:dbname(ShardDbName),
-    gen_server:cast(?MODULE, {evict, DbName});
-evictor(_) ->
-    ok.
-
-
-open_ddoc({DbName, validation_funs}=Key) ->
-    {ok, DDocs} = fabric:design_docs(mem3:dbname(DbName)),
-    Funs = lists:flatmap(fun(DDoc) ->
-        case couch_doc:get_validate_doc_fun(DDoc) of
-            nil -> [];
-            Fun -> [Fun]
-        end
-    end, DDocs),
-    ok = ets_lru:insert(ddoc_cache_lru, {DbName, validation_funs}, Funs),
-    exit({open_ok, Key, {ok, Funs}});
-open_ddoc({DbName, DDocId}=Key) ->
-    try fabric:open_doc(DbName, DDocId, [ejson_body]) of
-        {ok, Doc} ->
-            ok = ets_lru:insert(ddoc_cache_lru, {DbName, DDocId}, Doc),
-            exit({open_ok, Key, {ok, Doc}});
-        Else ->
-            exit({open_ok, Key, Else})
-    catch
-        Type:Reason ->
-            exit({open_error, Key, Type, Reason})
-    end.
-
-
-respond(Key, Resp) ->
-    [#opener{clients=Clients}] = ets:lookup(?OPENING, Key),
-    [gen_server:reply(C, Resp) || C <- Clients],
-    ets:delete(?OPENING, Key).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/572ee3ce/src/ddoc_cache/src/ddoc_cache_sup.erl
----------------------------------------------------------------------
diff --git a/src/ddoc_cache/src/ddoc_cache_sup.erl b/src/ddoc_cache/src/ddoc_cache_sup.erl
deleted file mode 100644
index 5ffd7e6..0000000
--- a/src/ddoc_cache/src/ddoc_cache_sup.erl
+++ /dev/null
@@ -1,67 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_sup).
--behaviour(supervisor).
-
-
--export([
-    start_link/0,
-    init/1
-]).
-
-
-start_link() ->
-    supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-
-init([]) ->
-    Children = [
-        {
-            ddoc_cache_lru,
-            {ets_lru, start_link, [ddoc_cache_lru, lru_opts()]},
-            permanent,
-            5000,
-            worker,
-            [ets_lru]
-        },
-        {
-            ddoc_cache_opener,
-            {ddoc_cache_opener, start_link, []},
-            permanent,
-            5000,
-            worker,
-            [ddoc_cache_opener]
-        }
-    ],
-    {ok, {{one_for_one, 5, 10}, Children}}.
-
-
-lru_opts() ->
-    case application:get_env(ddoc_cache, max_objects) of
-        {ok, MxObjs} when is_integer(MxObjs), MxObjs > 0 ->
-            [{max_objects, MxObjs}];
-        _ ->
-            []
-    end ++
-    case application:get_env(ddoc_cache, max_size) of
-        {ok, MxSize} when is_integer(MxSize), MxSize > 0 ->
-            [{max_size, MxSize}];
-        _ ->
-            []
-    end ++
-    case application:get_env(ddoc_cache, max_lifetime) of
-        {ok, MxLT} when is_integer(MxLT), MxLT > 0 ->
-            [{max_lifetime, MxLT}];
-        _ ->
-            []
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/572ee3ce/src/ddoc_cache/src/ddoc_cache_util.erl
----------------------------------------------------------------------
diff --git a/src/ddoc_cache/src/ddoc_cache_util.erl b/src/ddoc_cache/src/ddoc_cache_util.erl
deleted file mode 100644
index fb3c0b9..0000000
--- a/src/ddoc_cache/src/ddoc_cache_util.erl
+++ /dev/null
@@ -1,34 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ddoc_cache_util).
-
-
--export([
-    new_uuid/0
-]).
-
-
-new_uuid() ->
-    to_hex(crypto:rand_bytes(16), []).
-
-
-to_hex(<<>>, Acc) ->
-    list_to_binary(lists:reverse(Acc));
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
-
-
-hexdig(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdig(C) when C >= 10, C =< 15 ->
-    C + $A - 10.


[03/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/chttpd


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/e41cfa40
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/e41cfa40
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/e41cfa40

Branch: refs/heads/1843-feature-bigcouch
Commit: e41cfa40ea519312f677b7c4ca0905c4f106636b
Parents: cd36fd1
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:37:21 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:39:02 2014 -0600

----------------------------------------------------------------------
 src/chttpd/src/chttpd.app.src                   |   41 -
 src/chttpd/src/chttpd.erl                       |  848 ------------
 src/chttpd/src/chttpd_app.erl                   |   21 -
 src/chttpd/src/chttpd_config_listener.erl       |   80 --
 src/chttpd/src/chttpd_db.erl                    | 1280 ------------------
 src/chttpd/src/chttpd_external.erl              |  177 ---
 src/chttpd/src/chttpd_misc.erl                  |  312 -----
 src/chttpd/src/chttpd_rewrite.erl               |  456 -------
 src/chttpd/src/chttpd_show.erl                  |  322 -----
 src/chttpd/src/chttpd_sup.erl                   |   29 -
 src/chttpd/src/chttpd_view.erl                  |  405 ------
 .../test/chttpd_delayed_response_test.erl       |   41 -
 src/chttpd/test/mock_request.erl                |   37 -
 13 files changed, 4049 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/e41cfa40/src/chttpd/src/chttpd.app.src
----------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd.app.src b/src/chttpd/src/chttpd.app.src
deleted file mode 100644
index 9ab91c8..0000000
--- a/src/chttpd/src/chttpd.app.src
+++ /dev/null
@@ -1,41 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
- {application, chttpd, [
-    {description, "HTTP interface for CouchDB cluster"},
-    {vsn, git},
-    {modules, [
-        chttpd,
-        chttpd_app,
-        chttpd_config_listener,
-        chttpd_db,
-        chttpd_external,
-        chttpd_misc,
-        chttpd_rewrite,
-        chttpd_show,
-        chttpd_sup,
-        chttpd_view
-    ]},
-    {registered, [
-        chttpd_sup,
-        chttpd
-    ]},
-    {applications, [
-        kernel,
-        stdlib,
-        twig,
-        config,
-        couch,
-        fabric
-    ]},
-    {mod, {chttpd_app,[]}}
-]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/e41cfa40/src/chttpd/src/chttpd.erl
----------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
deleted file mode 100644
index b7f8013..0000000
--- a/src/chttpd/src/chttpd.erl
+++ /dev/null
@@ -1,848 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd).
--include_lib("couch/include/couch_db.hrl").
-
--export([start_link/0, start_link/1, start_link/2,
-    stop/0, handle_request/1,
-    primary_header_value/2, header_value/2, header_value/3, qs_value/2,
-    qs_value/3, qs/1, qs_json_value/3, path/1, absolute_uri/2, body_length/1,
-    verify_is_server_admin/1, unquote/1, quote/1, recv/2, recv_chunked/4,
-    error_info/1, parse_form/1, json_body/1, json_body_obj/1, body/1,
-    doc_etag/1, make_etag/1, etag_respond/3, partition/1, serve_file/3,
-    server_header/0, start_chunked_response/3,send_chunk/2,
-    start_response_length/4, send/2, start_json_response/2,
-    start_json_response/3, end_json_response/1, send_response/4,
-    send_method_not_allowed/2, send_error/2, send_error/4, send_redirect/2,
-    send_chunked_error/2, send_json/2,send_json/3,send_json/4]).
-
--export([start_delayed_json_response/2, start_delayed_json_response/3,
-    start_delayed_json_response/4,
-    start_delayed_chunked_response/3, start_delayed_chunked_response/4,
-    send_delayed_chunk/2, send_delayed_last_chunk/1,
-    send_delayed_error/2, end_delayed_json_response/1,
-    get_delayed_req/1]).
-
--record(delayed_resp, {
-    start_fun,
-    req,
-    code,
-    headers,
-    first_chunk,
-    resp=nil
-}).
-
-start_link() ->
-    start_link(http).
-start_link(http) ->
-    Port = config:get("chttpd", "port", "5984"),
-    start_link(?MODULE, [{port, Port}]);
-
-start_link(https) ->
-    Port = config:get("chttps", "port", "6984"),
-    CertFile = config:get("chttps", "cert_file", nil),
-    KeyFile = config:get("chttps", "key_file", nil),
-    Options = case CertFile /= nil andalso KeyFile /= nil of
-        true ->
-            SslOpts = [{certfile, CertFile}, {keyfile, KeyFile}],
-
-            %% set password if one is needed for the cert
-            SslOpts1 = case config:get("chttps", "password", nil) of
-                nil -> SslOpts;
-                Password ->
-                    SslOpts ++ [{password, Password}]
-            end,
-            % do we verify certificates ?
-            FinalSslOpts = case config:get("chttps",
-                    "verify_ssl_certificates", "false") of
-                "false" -> SslOpts1;
-                "true" ->
-                    case config:get("chttps",
-                            "cacert_file", nil) of
-                        nil ->
-                            io:format("Verify SSL certificate "
-                                ++"enabled but file containing "
-                                ++"PEM encoded CA certificates is "
-                                ++"missing", []),
-                            throw({error, missing_cacerts});
-                        CaCertFile ->
-                            Depth = list_to_integer(config:get("chttps",
-                                    "ssl_certificate_max_depth",
-                                    "1")),
-                            FinalOpts = [
-                                {cacertfile, CaCertFile},
-                                {depth, Depth},
-                                {verify, verify_peer}],
-                            % allows custom verify fun.
-                            case config:get("chttps",
-                                    "verify_fun", nil) of
-                                nil -> FinalOpts;
-                                SpecStr ->
-                                    FinalOpts
-                                    ++ [{verify_fun, couch_httpd:make_arity_3_fun(SpecStr)}]
-                            end
-                    end
-            end,
-
-            [{port, Port},
-                {ssl, true},
-                {ssl_opts, FinalSslOpts}];
-        false ->
-            io:format("SSL enabled but PEM certificates are missing.", []),
-            throw({error, missing_certs})
-    end,
-    start_link(https, Options).
-
-start_link(Name, Options) ->
-    Options1 = Options ++ [
-        {loop, fun ?MODULE:handle_request/1},
-        {name, Name},
-        {ip, config:get("chttpd", "bind_address", any)}
-    ],
-    ServerOptsCfg = config:get("chttpd", "server_options", "[]"),
-    {ok, ServerOpts} = couch_util:parse_term(ServerOptsCfg),
-    Options2 = lists:keymerge(1, lists:sort(Options1), lists:sort(ServerOpts)),
-    case mochiweb_http:start(Options2) of
-    {ok, Pid} ->
-        {ok, Pid};
-    {error, Reason} ->
-        io:format("Failure to start Mochiweb: ~s~n", [Reason]),
-        {error, Reason}
-    end.
-
-stop() ->
-    catch mochiweb_http:stop(https),
-    mochiweb_http:stop(?MODULE).
-
-handle_request(MochiReq) ->
-    Begin = os:timestamp(),
-
-    case config:get("chttpd", "socket_options") of
-    undefined ->
-        ok;
-    SocketOptsCfg ->
-        {ok, SocketOpts} = couch_util:parse_term(SocketOptsCfg),
-        ok = mochiweb_socket:setopts(MochiReq:get(socket), SocketOpts)
-    end,
-
-    AuthenticationFuns = [
-        fun couch_httpd_auth:cookie_authentication_handler/1,
-        fun couch_httpd_auth:default_authentication_handler/1
-    ],
-
-    % for the path, use the raw path with the query string and fragment
-    % removed, but URL quoting left intact
-    RawUri = MochiReq:get(raw_path),
-    {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
-    {HandlerKey, _, _} = mochiweb_util:partition(Path, "/"),
-
-    Peer = MochiReq:get(peer),
-    LogForClosedSocket = io_lib:format("mochiweb_recv_error for ~s - ~p ~s", [
-        Peer,
-        MochiReq:get(method),
-        RawUri
-    ]),
-
-    Method1 =
-    case MochiReq:get(method) of
-        % already an atom
-        Meth when is_atom(Meth) -> Meth;
-
-        % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
-        % possible (if any module references the atom, then it's existing).
-        Meth -> couch_util:to_existing_atom(Meth)
-    end,
-    increment_method_stats(Method1),
-
-    % allow broken HTTP clients to fake a full method vocabulary with an X-HTTP-METHOD-OVERRIDE header
-    MethodOverride = MochiReq:get_primary_header_value("X-HTTP-Method-Override"),
-    Method2 = case lists:member(MethodOverride, ["GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "COPY"]) of
-    true ->
-        twig:log(notice, "MethodOverride: ~s (real method was ~s)", [MethodOverride, Method1]),
-        case Method1 of
-        'POST' -> couch_util:to_existing_atom(MethodOverride);
-        _ ->
-            % Ignore X-HTTP-Method-Override when the original verb isn't POST.
-            % I'd like to send a 406 error to the client, but that'd require a nasty refactor.
-            % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>})
-            Method1
-        end;
-    _ -> Method1
-    end,
-
-    % alias HEAD to GET as mochiweb takes care of stripping the body
-    Method = case Method2 of
-        'HEAD' -> 'GET';
-        Other -> Other
-    end,
-
-    HttpReq = #httpd{
-        mochi_req = MochiReq,
-        method = Method,
-        path_parts = [list_to_binary(chttpd:unquote(Part))
-                || Part <- string:tokens(Path, "/")],
-        db_url_handlers = db_url_handlers(),
-        design_url_handlers = design_url_handlers()
-    },
-
-    % put small token on heap to keep requests synced to backend calls
-    erlang:put(nonce, couch_util:to_hex(crypto:rand_bytes(4))),
-
-    Result =
-    try
-        case authenticate_request(HttpReq, AuthenticationFuns) of
-        #httpd{} = Req ->
-            HandlerFun = url_handler(HandlerKey),
-            HandlerFun(possibly_hack(Req));
-        Response ->
-            Response
-        end
-    catch
-        throw:{http_head_abort, Resp0} ->
-            {ok, Resp0};
-        throw:{http_abort, Resp0, Reason0} ->
-            {aborted, Resp0, Reason0};
-        throw:{invalid_json, _} ->
-            send_error(HttpReq, {bad_request, "invalid UTF-8 JSON"});
-        exit:{mochiweb_recv_error, E} ->
-            twig:log(notice, LogForClosedSocket ++ " - ~p", [E]),
-            exit(normal);
-        throw:Error ->
-            send_error(HttpReq, Error);
-        error:database_does_not_exist ->
-            send_error(HttpReq, database_does_not_exist);
-        Tag:Error ->
-            Stack = erlang:get_stacktrace(),
-            % TODO improve logging and metrics collection for client disconnects
-            case {Tag, Error, Stack} of
-                {exit, normal, [{mochiweb_request, send, _, _} | _]} ->
-                    exit(normal); % Client disconnect (R15+)
-                {exit, normal, [{mochiweb_request, send, _} | _]} ->
-                    exit(normal); % Client disconnect (R14)
-                _Else ->
-                    JsonStack = json_stack({Error, nil, Stack}),
-                    twig:log(error, "req_err ~p:~p ~p", [Tag, Error, JsonStack]),
-                    send_error(HttpReq, {Error, nil, Stack})
-            end
-    end,
-
-    RequestTime = timer:now_diff(os:timestamp(), Begin)/1000,
-    {Status, Code} = case Result of
-    {ok, #delayed_resp{resp=Resp}} ->
-        {ok, Resp:get(code)};
-    {ok, Resp} ->
-        {ok, Resp:get(code)};
-    {aborted, Resp, _} ->
-        {aborted, Resp:get(code)}
-    end,
-    Host = MochiReq:get_header_value("Host"),
-    twig:log(notice, "~s ~s ~s ~s ~B ~p ~B", [Peer, Host,
-        atom_to_list(Method1), RawUri, Code, Status, round(RequestTime)]),
-    couch_stats_collector:record({couchdb, request_time}, RequestTime),
-    case Result of
-    {ok, _} ->
-        couch_stats_collector:increment({httpd, requests}),
-        {ok, Resp};
-    {aborted, _, Reason} ->
-        couch_stats_collector:increment({httpd, aborted_requests}),
-        twig:log(error, "Response abnormally terminated: ~p", [Reason]),
-        exit(normal)
-    end.
-
-%% HACK: replication currently handles two forms of input, #db{} style
-%% and #http_db style. We need a third that makes use of fabric. #db{}
-%% works fine for replicating the dbs and nodes database because they
-%% aren't sharded. So for now when a local db is specified as the source or
-%% the target, it's hacked to make it a full url and treated as a remote.
-possibly_hack(#httpd{path_parts=[<<"_replicate">>]}=Req) ->
-    {Props0} = chttpd:json_body_obj(Req),
-    Props1 = fix_uri(Req, Props0, <<"source">>),
-    Props2 = fix_uri(Req, Props1, <<"target">>),
-    put(post_body, {Props2}),
-    Req;
-possibly_hack(Req) ->
-    Req.
-
-fix_uri(Req, Props, Type) ->
-    case is_http(replication_uri(Type, Props)) of
-    true ->
-        Props;
-    false ->
-        Uri = make_uri(Req,replication_uri(Type, Props)),
-        [{Type,Uri}|proplists:delete(Type,Props)]
-    end.
-
-replication_uri(Type, PostProps) ->
-    case couch_util:get_value(Type, PostProps) of
-    {Props} ->
-        couch_util:get_value(<<"url">>, Props);
-    Else ->
-        Else
-    end.
-
-is_http(<<"http://", _/binary>>) ->
-    true;
-is_http(<<"https://", _/binary>>) ->
-    true;
-is_http(_) ->
-    false.
-
-make_uri(Req, Raw) ->
-    Url = list_to_binary(["http://", config:get("httpd", "bind_address"),
-                         ":", config:get("chttpd", "port"), "/", Raw]),
-    Headers = [
-        {<<"authorization">>, ?l2b(header_value(Req,"authorization",""))},
-        {<<"cookie">>, ?l2b(header_value(Req,"cookie",""))}
-    ],
-    {[{<<"url">>,Url}, {<<"headers">>,{Headers}}]}.
-%%% end hack
-
-
-% Try authentication handlers in order until one returns a result
-authenticate_request(#httpd{user_ctx=#user_ctx{}} = Req, _AuthFuns) ->
-    Req;
-authenticate_request(#httpd{} = Req, [AuthFun|Rest]) ->
-    authenticate_request(AuthFun(Req), Rest);
-authenticate_request(#httpd{} = Req, []) ->
-    case config:get("chttpd", "require_valid_user", "false") of
-    "true" ->
-        throw({unauthorized, <<"Authentication required.">>});
-    "false" ->
-        case config:get("admins") of
-        [] ->
-            Ctx = #user_ctx{roles=[<<"_reader">>, <<"_writer">>, <<"_admin">>]},
-            Req#httpd{user_ctx = Ctx};
-        _ ->
-            Req#httpd{user_ctx=#user_ctx{}}
-        end
-    end;
-authenticate_request(Response, _AuthFuns) ->
-    Response.
-
-increment_method_stats(Method) ->
-    couch_stats_collector:increment({httpd_request_methods, Method}).
-
-url_handler("") ->              fun chttpd_misc:handle_welcome_req/1;
-url_handler("favicon.ico") ->   fun chttpd_misc:handle_favicon_req/1;
-url_handler("_utils") ->        fun chttpd_misc:handle_utils_dir_req/1;
-url_handler("_all_dbs") ->      fun chttpd_misc:handle_all_dbs_req/1;
-url_handler("_active_tasks") -> fun chttpd_misc:handle_task_status_req/1;
-url_handler("_config") ->       fun chttpd_misc:handle_config_req/1;
-url_handler("_replicate") ->    fun chttpd_misc:handle_replicate_req/1;
-url_handler("_uuids") ->        fun chttpd_misc:handle_uuids_req/1;
-url_handler("_log") ->          fun chttpd_misc:handle_log_req/1;
-url_handler("_sleep") ->        fun chttpd_misc:handle_sleep_req/1;
-url_handler("_session") ->      fun couch_httpd_auth:handle_session_req/1;
-url_handler("_oauth") ->        fun couch_httpd_oauth:handle_oauth_req/1;
-url_handler("_up") ->           fun chttpd_misc:handle_up_req/1;
-url_handler("_membership") ->   fun mem3_httpd:handle_membership_req/1;
-url_handler(_) ->               fun chttpd_db:handle_request/1.
-
-db_url_handlers() ->
-    [
-        {<<"_view_cleanup">>,   fun chttpd_db:handle_view_cleanup_req/2},
-        {<<"_compact">>,        fun chttpd_db:handle_compact_req/2},
-        {<<"_design">>,         fun chttpd_db:handle_design_req/2},
-        {<<"_temp_view">>,      fun chttpd_view:handle_temp_view_req/2},
-        {<<"_changes">>,        fun chttpd_db:handle_changes_req/2}
-    ].
-
-design_url_handlers() ->
-    [
-        {<<"_view">>,           fun chttpd_view:handle_view_req/3},
-        {<<"_show">>,           fun chttpd_show:handle_doc_show_req/3},
-        {<<"_list">>,           fun chttpd_show:handle_view_list_req/3},
-        {<<"_update">>,         fun chttpd_show:handle_doc_update_req/3},
-        {<<"_info">>,           fun chttpd_db:handle_design_info_req/3},
-        {<<"_rewrite">>,        fun chttpd_rewrite:handle_rewrite_req/3}
-    ].
-
-% Utilities
-
-partition(Path) ->
-    mochiweb_util:partition(Path, "/").
-
-header_value(#httpd{mochi_req=MochiReq}, Key) ->
-    MochiReq:get_header_value(Key).
-
-header_value(#httpd{mochi_req=MochiReq}, Key, Default) ->
-    case MochiReq:get_header_value(Key) of
-    undefined -> Default;
-    Value -> Value
-    end.
-
-primary_header_value(#httpd{mochi_req=MochiReq}, Key) ->
-    MochiReq:get_primary_header_value(Key).
-
-serve_file(#httpd{mochi_req=MochiReq}=Req, RelativePath, DocumentRoot) ->
-    {ok, MochiReq:serve_file(RelativePath, DocumentRoot,
-        server_header() ++ couch_httpd_auth:cookie_auth_header(Req, []))}.
-
-qs_value(Req, Key) ->
-    qs_value(Req, Key, undefined).
-
-qs_value(Req, Key, Default) ->
-    couch_util:get_value(Key, qs(Req), Default).
-
-qs_json_value(Req, Key, Default) ->
-    case qs_value(Req, Key, Default) of
-        Default ->
-            Default;
-        Result ->
-            ?JSON_DECODE(Result)
-    end.
-
-qs(#httpd{mochi_req=MochiReq}) ->
-    MochiReq:parse_qs().
-
-path(#httpd{mochi_req=MochiReq}) ->
-    MochiReq:get(path).
-
-absolute_uri(#httpd{mochi_req=MochiReq}, Path) ->
-    XHost = config:get("httpd", "x_forwarded_host", "X-Forwarded-Host"),
-    Host = case MochiReq:get_header_value(XHost) of
-        undefined ->
-            case MochiReq:get_header_value("Host") of
-                undefined ->
-                    {ok, {Address, Port}} = inet:sockname(MochiReq:get(socket)),
-                    inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
-                Value1 ->
-                    Value1
-            end;
-        Value -> Value
-    end,
-    XSsl = config:get("httpd", "x_forwarded_ssl", "X-Forwarded-Ssl"),
-    Scheme = case MochiReq:get_header_value(XSsl) of
-        "on" -> "https";
-        _ ->
-            XProto = config:get("httpd", "x_forwarded_proto",
-                "X-Forwarded-Proto"),
-            case MochiReq:get_header_value(XProto) of
-                % Restrict to "https" and "http" schemes only
-                "https" -> "https";
-                _ ->
-                    case MochiReq:get(scheme) of
-                        https ->
-                            "https";
-                        http ->
-                            "http"
-                    end
-            end
-    end,
-    Scheme ++ "://" ++ Host ++ Path.
-
-unquote(UrlEncodedString) ->
-    mochiweb_util:unquote(UrlEncodedString).
-
-quote(UrlDecodedString) ->
-    mochiweb_util:quote_plus(UrlDecodedString).
-
-parse_form(#httpd{mochi_req=MochiReq}) ->
-    mochiweb_multipart:parse_form(MochiReq).
-
-recv(#httpd{mochi_req=MochiReq}, Len) ->
-    MochiReq:recv(Len).
-
-recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
-    % Fun is called once with each chunk
-    % Fun({Length, Binary}, State)
-    % called with Length == 0 on the last time.
-    MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState).
-
-body_length(Req) ->
-    case header_value(Req, "Transfer-Encoding") of
-        undefined ->
-            case header_value(Req, "Content-Length") of
-                undefined -> undefined;
-                Length -> list_to_integer(Length)
-            end;
-        "chunked" -> chunked;
-        Unknown -> {unknown_transfer_encoding, Unknown}
-    end.
-
-body(#httpd{mochi_req=MochiReq, req_body=ReqBody}) ->
-    case ReqBody of
-        undefined ->
-            % Maximum size of document PUT request body (4GB)
-            MaxSize = list_to_integer(
-                config:get("couchdb", "max_document_size", "4294967296")),
-            Begin = os:timestamp(),
-            try
-                MochiReq:recv_body(MaxSize)
-            after
-                T = timer:now_diff(os:timestamp(), Begin) div 1000,
-                put(body_time, T)
-            end;
-        _Else ->
-            ReqBody
-    end.
-
-json_body(Httpd) ->
-    case body(Httpd) of
-        undefined ->
-            throw({bad_request, "Missing request body"});
-        Body ->
-            ?JSON_DECODE(Body)
-    end.
-
-json_body_obj(Httpd) ->
-    case json_body(Httpd) of
-        {Props} -> {Props};
-        _Else ->
-            throw({bad_request, "Request body must be a JSON object"})
-    end.
-
-
-doc_etag(#doc{revs={Start, [DiskRev|_]}}) ->
-    "\"" ++ ?b2l(couch_doc:rev_to_str({Start, DiskRev})) ++ "\"".
-
-make_etag(Term) ->
-    <<SigInt:128/integer>> = erlang:md5(term_to_binary(Term)),
-    list_to_binary(io_lib:format("\"~.36B\"",[SigInt])).
-
-etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
-    etag_match(Req, binary_to_list(CurrentEtag));
-
-etag_match(Req, CurrentEtag) ->
-    EtagsToMatch = string:tokens(
-        chttpd:header_value(Req, "If-None-Match", ""), ", "),
-    lists:member(CurrentEtag, EtagsToMatch).
-
-etag_respond(Req, CurrentEtag, RespFun) ->
-    case etag_match(Req, CurrentEtag) of
-    true ->
-        % the client has this in their cache.
-        chttpd:send_response(Req, 304, [{"Etag", CurrentEtag}], <<>>);
-    false ->
-        % Run the function.
-        RespFun()
-    end.
-
-verify_is_server_admin(#httpd{user_ctx=#user_ctx{roles=Roles}}) ->
-    case lists:member(<<"_admin">>, Roles) of
-    true -> ok;
-    false -> throw({unauthorized, <<"You are not a server admin.">>})
-    end.
-
-start_response_length(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Length) ->
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    Resp = MochiReq:start_response_length({Code, Headers ++ server_header() ++
-        couch_httpd_auth:cookie_auth_header(Req, Headers), Length}),
-    case MochiReq:get(method) of
-    'HEAD' -> throw({http_head_abort, Resp});
-    _ -> ok
-    end,
-    {ok, Resp}.
-
-send(Resp, Data) ->
-    Resp:send(Data),
-    {ok, Resp}.
-
-start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers) ->
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    Resp = MochiReq:respond({Code, Headers ++ server_header() ++
-        couch_httpd_auth:cookie_auth_header(Req, Headers), chunked}),
-    case MochiReq:get(method) of
-    'HEAD' -> throw({http_head_abort, Resp});
-    _ -> ok
-    end,
-    {ok, Resp}.
-
-send_chunk(Resp, Data) ->
-    Resp:write_chunk(Data),
-    {ok, Resp}.
-
-send_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Body) ->
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    {ok, MochiReq:respond({Code, Headers ++ server_header() ++
-        couch_httpd_auth:cookie_auth_header(Req, Headers), Body})}.
-
-send_method_not_allowed(Req, Methods) ->
-    send_error(Req, 405, [{"Allow", Methods}], <<"method_not_allowed">>,
-        ?l2b("Only " ++ Methods ++ " allowed"), []).
-
-send_json(Req, Value) ->
-    send_json(Req, 200, Value).
-
-send_json(Req, Code, Value) ->
-    send_json(Req, Code, [], Value).
-
-send_json(Req, Code, Headers, Value) ->
-    couch_httpd:send_json(Req, Code, [timing(), reqid() | Headers], Value).
-
-start_json_response(Req, Code) ->
-    start_json_response(Req, Code, []).
-
-start_json_response(Req, Code, Headers) ->
-    couch_httpd:start_json_response(Req, Code, [timing(), reqid() | Headers]).
-
-end_json_response(Resp) ->
-    couch_httpd:end_json_response(Resp).
-
-start_delayed_json_response(Req, Code) ->
-    start_delayed_json_response(Req, Code, []).
-
-start_delayed_json_response(Req, Code, Headers) ->
-    start_delayed_json_response(Req, Code, Headers, "").
-
-start_delayed_json_response(Req, Code, Headers, FirstChunk) ->
-    {ok, #delayed_resp{
-        start_fun = fun start_json_response/3,
-        req = Req,
-        code = Code,
-        headers = Headers,
-        first_chunk = FirstChunk}}.
-
-start_delayed_chunked_response(Req, Code, Headers) ->
-    start_delayed_chunked_response(Req, Code, Headers, "").
-
-start_delayed_chunked_response(Req, Code, Headers, FirstChunk) ->
-    {ok, #delayed_resp{
-        start_fun = fun start_chunked_response/3,
-        req = Req,
-        code = Code,
-        headers = Headers,
-        first_chunk = FirstChunk}}.
-
-send_delayed_chunk(#delayed_resp{}=DelayedResp, Chunk) ->
-    {ok, #delayed_resp{resp=Resp}=DelayedResp1} =
-        start_delayed_response(DelayedResp),
-    {ok, Resp} = send_chunk(Resp, Chunk),
-    {ok, DelayedResp1}.
-
-send_delayed_last_chunk(Req) ->
-    send_delayed_chunk(Req, []).
-
-send_delayed_error(#delayed_resp{req=Req,resp=nil}, Reason) ->
-    {Code, ErrorStr, ReasonStr} = error_info(Reason),
-    send_error(Req, Code, ErrorStr, ReasonStr);
-send_delayed_error(#delayed_resp{resp=Resp}, Reason) ->
-    throw({http_abort, Resp, Reason}).
-
-end_delayed_json_response(#delayed_resp{}=DelayedResp) ->
-    {ok, #delayed_resp{resp=Resp}} =
-        start_delayed_response(DelayedResp),
-    end_json_response(Resp).
-
-get_delayed_req(#delayed_resp{req=#httpd{mochi_req=MochiReq}}) ->
-    MochiReq;
-get_delayed_req(Resp) ->
-    Resp:get(request).
-
-start_delayed_response(#delayed_resp{resp=nil}=DelayedResp) ->
-    #delayed_resp{
-        start_fun=StartFun,
-        req=Req,
-        code=Code,
-        headers=Headers,
-        first_chunk=FirstChunk
-    }=DelayedResp,
-    {ok, Resp} = StartFun(Req, Code, Headers),
-    case FirstChunk of
-        "" -> ok;
-        _ -> {ok, Resp} = send_chunk(Resp, FirstChunk)
-    end,
-    {ok, DelayedResp#delayed_resp{resp=Resp}};
-start_delayed_response(#delayed_resp{}=DelayedResp) ->
-    {ok, DelayedResp}.
-
-error_info({Error, Reason}) when is_list(Reason) ->
-    error_info({Error, couch_util:to_binary(Reason)});
-error_info(bad_request) ->
-    {400, <<"bad_request">>, <<>>};
-error_info({bad_request, Reason}) ->
-    {400, <<"bad_request">>, Reason};
-error_info({bad_request, Error, Reason}) ->
-    {400, couch_util:to_binary(Error), couch_util:to_binary(Reason)};
-error_info({query_parse_error, Reason}) ->
-    {400, <<"query_parse_error">>, Reason};
-error_info(database_does_not_exist) ->
-    {404, <<"not_found">>, <<"Database does not exist.">>};
-error_info(not_found) ->
-    {404, <<"not_found">>, <<"missing">>};
-error_info({not_found, Reason}) ->
-    {404, <<"not_found">>, Reason};
-error_info({not_acceptable, Reason}) ->
-    {406, <<"not_acceptable">>, Reason};
-error_info(conflict) ->
-    {409, <<"conflict">>, <<"Document update conflict.">>};
-error_info({conflict, _}) ->
-    {409, <<"conflict">>, <<"Document update conflict.">>};
-error_info({forbidden, Error, Msg}) ->
-    {403, Error, Msg};
-error_info({forbidden, Msg}) ->
-    {403, <<"forbidden">>, Msg};
-error_info({unauthorized, Msg}) ->
-    {401, <<"unauthorized">>, Msg};
-error_info(file_exists) ->
-    {412, <<"file_exists">>, <<"The database could not be "
-        "created, the file already exists.">>};
-error_info({r_quorum_not_met, Reason}) ->
-    {412, <<"read_quorum_not_met">>, Reason};
-error_info({w_quorum_not_met, Reason}) ->
-    {500, <<"write_quorum_not_met">>, Reason};
-error_info({bad_ctype, Reason}) ->
-    {415, <<"bad_content_type">>, Reason};
-error_info(requested_range_not_satisfiable) ->
-    {416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>};
-error_info({error, illegal_database_name}) ->
-    {400, <<"illegal_database_name">>, <<"Only lowercase letters (a-z), "
-        "digits (0-9), and any of the characters _, $, (, ), +, -, and / are "
-        "allowed. Moreover, the database name must begin with a letter.">>};
-error_info({missing_stub, Reason}) ->
-    {412, <<"missing_stub">>, Reason};
-error_info(not_implemented) ->
-    {501, <<"not_implemented">>, <<"this feature is not yet implemented">>};
-error_info({Error, null}) ->
-    {500, couch_util:to_binary(Error), null};
-error_info(timeout) ->
-    {500, <<"timeout">>, <<"The request could not be processed in a reasonable"
-        " amount of time.">>};
-error_info({Error, Reason}) ->
-    {500, couch_util:to_binary(Error), couch_util:to_binary(Reason)};
-error_info({Error, nil, _Stack}) ->
-    error_info(Error);
-error_info({Error, Reason, _Stack}) ->
-    error_info({Error, Reason});
-error_info(Error) ->
-    {500, couch_util:to_binary(Error), null}.
-
-error_headers(#httpd{mochi_req=MochiReq}=Req, 401=Code, ErrorStr, ReasonStr) ->
-    % this is where the basic auth popup is triggered
-    case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
-    undefined ->
-        case config:get("httpd", "WWW-Authenticate", nil) of
-        nil ->
-            % If the client is a browser and the basic auth popup isn't turned on
-            % redirect to the session page.
-            case ErrorStr of
-            <<"unauthorized">> ->
-                case config:get("couch_httpd_auth", "authentication_redirect", nil) of
-                nil -> {Code, []};
-                AuthRedirect ->
-                    case config:get("couch_httpd_auth", "require_valid_user", "false") of
-                    "true" ->
-                        % send the browser popup header no matter what if we are require_valid_user
-                        {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]};
-                    _False ->
-                        % if the accept header matches html, then do the redirect. else proceed as usual.
-                        Accepts = case MochiReq:get_header_value("Accept") of
-                        undefined ->
-                           % According to the HTTP 1.1 spec, if the Accept
-                           % header is missing, it means the client accepts
-                           % all media types.
-                           "html";
-                        Else ->
-                            Else
-                        end,
-                        case re:run(Accepts, "\\bhtml\\b",
-                                [{capture, none}, caseless]) of
-                        nomatch ->
-                            {Code, []};
-                        match ->
-                            AuthRedirectBin = ?l2b(AuthRedirect),
-                            % Redirect to the path the user requested, not
-                            % the one that is used internally.
-                            UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of
-                                undefined -> MochiReq:get(path);
-                                VHostPath -> VHostPath
-                            end,
-                            UrlReturn = ?l2b(couch_util:url_encode(UrlReturnRaw)),
-                            UrlReason = ?l2b(couch_util:url_encode(ReasonStr)),
-                            {302, [{"Location", couch_httpd:absolute_uri(Req, <<AuthRedirectBin/binary,"?return=",UrlReturn/binary,"&reason=",UrlReason/binary>>)}]}
-                        end
-                    end
-                end;
-            _Else ->
-                {Code, []}
-            end;
-        Type ->
-            {Code, [{"WWW-Authenticate", Type}]}
-        end;
-    Type ->
-       {Code, [{"WWW-Authenticate", Type}]}
-    end;
-error_headers(_, Code, _, _) ->
-    {Code, []}.
-
-send_error(_Req, {already_sent, Resp, _Error}) ->
-    {ok, Resp};
-
-send_error(Req, Error) ->
-    {Code, ErrorStr, ReasonStr} = error_info(Error),
-    {Code1, Headers} = error_headers(Req, Code, ErrorStr, ReasonStr),
-    send_error(Req, Code1, Headers, ErrorStr, ReasonStr, json_stack(Error)).
-
-send_error(Req, Code, ErrorStr, ReasonStr) ->
-    send_error(Req, Code, [], ErrorStr, ReasonStr, []).
-
-send_error(Req, Code, Headers, ErrorStr, ReasonStr, Stack) ->
-    send_json(Req, Code, Headers,
-        {[{<<"error">>,  ErrorStr},
-        {<<"reason">>, ReasonStr} |
-        case Stack of [] -> []; _ -> [{stack, Stack}] end
-    ]}).
-
-% give the option for list functions to output html or other raw errors
-send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
-    send_chunk(Resp, Reason),
-    send_chunk(Resp, []);
-
-send_chunked_error(Resp, Error) ->
-    {Code, ErrorStr, ReasonStr} = error_info(Error),
-    JsonError = {[{<<"code">>, Code},
-        {<<"error">>,  ErrorStr},
-        {<<"reason">>, ReasonStr} |
-        case json_stack(Error) of [] -> []; Stack -> [{stack, Stack}] end
-    ]},
-    send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])),
-    send_chunk(Resp, []).
-
-send_redirect(Req, Path) ->
-     Headers = [{"Location", chttpd:absolute_uri(Req, Path)}],
-     send_response(Req, 301, Headers, <<>>).
-
-server_header() ->
-    couch_httpd:server_header().
-
-timing() ->
-    case get(body_time) of
-        undefined ->
-            {"X-CouchDB-Body-Time", "0"};
-        Time ->
-            {"X-CouchDB-Body-Time", integer_to_list(Time)}
-    end.
-
-reqid() ->
-    {"X-Couch-Request-ID", get(nonce)}.
-
-json_stack({bad_request, _, _}) ->
-    [];
-json_stack({_Error, _Reason, Stack}) ->
-    lists:map(fun json_stack_item/1, Stack);
-json_stack(_) ->
-    [].
-
-json_stack_item({M,F,A}) ->
-    list_to_binary(io_lib:format("~s:~s/~B", [M, F, json_stack_arity(A)]));
-json_stack_item({M,F,A,L}) ->
-    case proplists:get_value(line, L) of
-    undefined -> json_stack_item({M,F,A});
-    Line -> list_to_binary(io_lib:format("~s:~s/~B L~B",
-        [M, F, json_stack_arity(A), Line]))
-    end;
-json_stack_item(_) ->
-    <<"bad entry in stacktrace">>.
-
-json_stack_arity(A) ->
-    if is_integer(A) -> A; is_list(A) -> length(A); true -> 0 end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/e41cfa40/src/chttpd/src/chttpd_app.erl
----------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd_app.erl b/src/chttpd/src/chttpd_app.erl
deleted file mode 100644
index d7a5aef..0000000
--- a/src/chttpd/src/chttpd_app.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License.  You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, StartArgs) ->
-    chttpd_sup:start_link(StartArgs).
-
-stop(_State) ->
-    ok.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/e41cfa40/src/chttpd/src/chttpd_config_listener.erl
----------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd_config_listener.erl b/src/chttpd/src/chttpd_config_listener.erl
deleted file mode 100644
index 4d07b83..0000000
--- a/src/chttpd/src/chttpd_config_listener.erl
+++ /dev/null
@@ -1,80 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_config_listener).
--behaviour(gen_server).
--behaviour(config_listener).
-
-% public interface
--export([start_link/0]).
-
-% gen_server callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
-    code_change/3, terminate/2]).
-
-% config_listener callback
--export([handle_config_change/5]).
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-init([]) ->
-    Settings = [
-        {bind_address, config:get("chttpd", "bind_address")},
-        {port, config:get("chttpd", "port")},
-        {backlog, config:get("chttpd", "backlog")},
-        {server_options, config:get("chttpd", "server_options")}
-    ],
-    ok = config:listen_for_changes(?MODULE, Settings),
-    {ok, Settings}.
-
-handle_config_change("chttpd", "bind_address", Value, _, Settings) ->
-    maybe_replace(bind_address, Value, Settings);
-handle_config_change("chttpd", "port", Value, _, Settings) ->
-    maybe_replace(port, Value, Settings);
-handle_config_change("chttpd", "backlog", Value, _, Settings) ->
-    maybe_replace(backlog, Value, Settings);
-handle_config_change("chttpd", "server_options", Value, _, Settings) ->
-    maybe_replace(server_options, Value, Settings);
-handle_config_change(_, _, _, _, Settings) ->
-    {ok, Settings}.
-
-handle_call(_, _, State) ->
-    {reply, ignored, State}.
-
-handle_cast(_, State) ->
-    {noreply, State}.
-
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {noreply, State};
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, State),
-    {noreply, State};
-handle_info(_Msg, State) ->
-    {noreply, State}.
-
-terminate(_, _State) ->
-    ok.
-
-code_change(_, State, _) ->
-    {ok, State}.
-
-% private
-maybe_replace(Key, Value, Settings) ->
-    case couch_util:get_value(Key, Settings) of
-    Value ->
-        {ok, Settings};
-    _ ->
-        chttpd:stop(),
-        {ok, lists:keyreplace(Key, 1, {Key, Value}, Settings)}
-    end.


[30/49] Remove src/ibrowse

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/3822d8f4/src/ibrowse/src/ibrowse_lb.erl
----------------------------------------------------------------------
diff --git a/src/ibrowse/src/ibrowse_lb.erl b/src/ibrowse/src/ibrowse_lb.erl
deleted file mode 100644
index 2ef67d9..0000000
--- a/src/ibrowse/src/ibrowse_lb.erl
+++ /dev/null
@@ -1,252 +0,0 @@
-%%%-------------------------------------------------------------------
-%%% File    : ibrowse_lb.erl
-%%% Author  : chandru <ch...@t-mobile.co.uk>
-%%% Description : 
-%%%
-%%% Created :  6 Mar 2008 by chandru <ch...@t-mobile.co.uk>
-%%%-------------------------------------------------------------------
--module(ibrowse_lb).
--author(chandru).
--behaviour(gen_server).
-%%--------------------------------------------------------------------
-%% Include files
-%%--------------------------------------------------------------------
-
-%%--------------------------------------------------------------------
-%% External exports
--export([
-	 start_link/1,
-	 spawn_connection/5,
-         stop/1
-	]).
-
-%% gen_server callbacks
--export([
-	 init/1,
-	 handle_call/3,
-	 handle_cast/2,
-	 handle_info/2,
-	 terminate/2,
-	 code_change/3
-	]).
-
--record(state, {parent_pid,
-		ets_tid,
-		host,
-		port,
-		max_sessions,
-		max_pipeline_size,
-		num_cur_sessions = 0,
-                proc_state
-               }).
-
--include_lib("ibrowse/include/ibrowse.hrl").
-
-%%====================================================================
-%% External functions
-%%====================================================================
-%%--------------------------------------------------------------------
-%% Function: start_link/0
-%% Description: Starts the server
-%%--------------------------------------------------------------------
-start_link(Args) ->
-    gen_server:start_link(?MODULE, Args, []).
-
-%%====================================================================
-%% Server functions
-%%====================================================================
-
-%%--------------------------------------------------------------------
-%% Function: init/1
-%% Description: Initiates the server
-%% Returns: {ok, State}          |
-%%          {ok, State, Timeout} |
-%%          ignore               |
-%%          {stop, Reason}
-%%--------------------------------------------------------------------
-init([Host, Port]) ->
-    process_flag(trap_exit, true),
-    Max_sessions = ibrowse:get_config_value({max_sessions, Host, Port}, 10),
-    Max_pipe_sz = ibrowse:get_config_value({max_pipeline_size, Host, Port}, 10),
-    put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
-    put(ibrowse_trace_token, ["LB: ", Host, $:, integer_to_list(Port)]),
-    Tid = ets:new(ibrowse_lb, [public, ordered_set]),
-    {ok, #state{parent_pid = whereis(ibrowse),
-		host = Host,
-		port = Port,
-		ets_tid = Tid,
-		max_pipeline_size = Max_pipe_sz,
-	        max_sessions = Max_sessions}}.
-
-spawn_connection(Lb_pid, Url,
-		 Max_sessions,
-		 Max_pipeline_size,
-		 SSL_options)
-  when is_pid(Lb_pid),
-       is_record(Url, url),
-       is_integer(Max_pipeline_size),
-       is_integer(Max_sessions) ->
-    gen_server:call(Lb_pid,
-		    {spawn_connection, Url, Max_sessions, Max_pipeline_size, SSL_options}).
-
-stop(Lb_pid) ->
-    case catch gen_server:call(Lb_pid, stop) of
-        {'EXIT', {timeout, _}} ->
-            exit(Lb_pid, kill);
-        ok ->
-            ok
-    end.
-%%--------------------------------------------------------------------
-%% Function: handle_call/3
-%% Description: Handling call messages
-%% Returns: {reply, Reply, State}          |
-%%          {reply, Reply, State, Timeout} |
-%%          {noreply, State}               |
-%%          {noreply, State, Timeout}      |
-%%          {stop, Reason, Reply, State}   | (terminate/2 is called)
-%%          {stop, Reason, State}            (terminate/2 is called)
-%%--------------------------------------------------------------------
-
-handle_call(stop, _From, #state{ets_tid = undefined} = State) ->
-    gen_server:reply(_From, ok),
-    {stop, normal, State};
-
-handle_call(stop, _From, #state{ets_tid = Tid} = State) ->
-    ets:foldl(fun({Pid, _, _}, Acc) ->
-                      ibrowse_http_client:stop(Pid),
-                      Acc
-              end, [], Tid),
-    gen_server:reply(_From, ok),
-    {stop, normal, State};
-
-handle_call(_, _From, #state{proc_state = shutting_down} = State) ->
-    {reply, {error, shutting_down}, State};
-
-%% Update max_sessions in #state with supplied value
-handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
-	    #state{num_cur_sessions = Num} = State) 
-    when Num >= Max_sess ->
-    State_1 = maybe_create_ets(State),
-    Reply = find_best_connection(State_1#state.ets_tid, Max_pipe),
-    {reply, Reply, State_1#state{max_sessions = Max_sess,
-                                 max_pipeline_size = Max_pipe}};
-
-handle_call({spawn_connection, Url, Max_sess, Max_pipe, SSL_options}, _From,
-	    #state{num_cur_sessions = Cur} = State) ->
-    State_1 = maybe_create_ets(State),
-    Tid = State_1#state.ets_tid,
-    {ok, Pid} = ibrowse_http_client:start_link({Tid, Url, SSL_options}),
-    ets:insert(Tid, {Pid, 0, 0}),
-    {reply, {ok, Pid}, State_1#state{num_cur_sessions = Cur + 1,
-                                     max_sessions = Max_sess,
-                                     max_pipeline_size = Max_pipe}};
-
-handle_call(Request, _From, State) ->
-    Reply = {unknown_request, Request},
-    {reply, Reply, State}.
-
-%%--------------------------------------------------------------------
-%% Function: handle_cast/2
-%% Description: Handling cast messages
-%% Returns: {noreply, State}          |
-%%          {noreply, State, Timeout} |
-%%          {stop, Reason, State}            (terminate/2 is called)
-%%--------------------------------------------------------------------
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-%%--------------------------------------------------------------------
-%% Function: handle_info/2
-%% Description: Handling all non call/cast messages
-%% Returns: {noreply, State}          |
-%%          {noreply, State, Timeout} |
-%%          {stop, Reason, State}            (terminate/2 is called)
-%%--------------------------------------------------------------------
-handle_info({'EXIT', Parent, _Reason}, #state{parent_pid = Parent} = State) ->
-    {stop, normal, State};
-
-handle_info({'EXIT', _Pid, _Reason}, #state{ets_tid = undefined} = State) ->
-    {noreply, State};
-
-handle_info({'EXIT', Pid, _Reason},
-	    #state{num_cur_sessions = Cur,
-		   ets_tid = Tid} = State) ->
-    ets:match_delete(Tid, {{'_', Pid}, '_'}),
-    Cur_1 = Cur - 1,
-    case Cur_1 of
-		  0 ->
-		      ets:delete(Tid),
-			  {noreply, State#state{ets_tid = undefined, num_cur_sessions = 0}, 10000};
-		  _ ->
-		      {noreply, State#state{num_cur_sessions = Cur_1}}
-	      end;
-
-handle_info({trace, Bool}, #state{ets_tid = undefined} = State) ->
-    put(my_trace_flag, Bool),
-    {noreply, State};
-
-handle_info({trace, Bool}, #state{ets_tid = Tid} = State) ->
-    ets:foldl(fun({{_, Pid}, _}, Acc) when is_pid(Pid) ->
-		      catch Pid ! {trace, Bool},
-		      Acc;
-		 (_, Acc) ->
-		      Acc
-	      end, undefined, Tid),
-    put(my_trace_flag, Bool),
-    {noreply, State};
-
-handle_info(timeout, State) ->
-    %% We can't shutdown the process immediately because a request
-    %% might be in flight. So we first remove the entry from the
-    %% ibrowse_lb ets table, and then shutdown a couple of seconds
-    %% later
-    ets:delete(ibrowse_lb, {State#state.host, State#state.port}),
-    erlang:send_after(2000, self(), shutdown),
-    {noreply, State#state{proc_state = shutting_down}};
-
-handle_info(shutdown, State) ->
-    {stop, normal, State};
-
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-%%--------------------------------------------------------------------
-%% Function: terminate/2
-%% Description: Shutdown the server
-%% Returns: any (ignored by gen_server)
-%%--------------------------------------------------------------------
-terminate(_Reason, _State) ->
-    ok.
-
-%%--------------------------------------------------------------------
-%% Func: code_change/3
-%% Purpose: Convert process state when code is changed
-%% Returns: {ok, NewState}
-%%--------------------------------------------------------------------
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-%%--------------------------------------------------------------------
-%%% Internal functions
-%%--------------------------------------------------------------------
-find_best_connection(Tid, Max_pipe) ->
-    Res = find_best_connection(ets:first(Tid), Tid, Max_pipe),
-    Res.
-
-find_best_connection('$end_of_table', _, _) ->
-    {error, retry_later};
-find_best_connection(Pid, Tid, Max_pipe) ->
-    case ets:lookup(Tid, Pid) of
-        [{Pid, Cur_sz, Speculative_sz}] when Cur_sz < Max_pipe,
-                                             Speculative_sz < Max_pipe ->
-            ets:update_counter(Tid, Pid, {3, 1, 9999999, 9999999}),
-            {ok, Pid};
-        _ ->
-            find_best_connection(ets:next(Tid, Pid), Tid, Max_pipe)
-    end.
-
-maybe_create_ets(#state{ets_tid = undefined} = State) ->
-    Tid = ets:new(ibrowse_lb, [public, ordered_set]),
-    State#state{ets_tid = Tid};
-maybe_create_ets(State) ->
-    State.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/3822d8f4/src/ibrowse/src/ibrowse_lib.erl
----------------------------------------------------------------------
diff --git a/src/ibrowse/src/ibrowse_lib.erl b/src/ibrowse/src/ibrowse_lib.erl
deleted file mode 100644
index 25873c0..0000000
--- a/src/ibrowse/src/ibrowse_lib.erl
+++ /dev/null
@@ -1,441 +0,0 @@
-%%% File    : ibrowse_lib.erl
-%%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%% Description : 
-%%% Created : 27 Feb 2004 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%% @doc Module with a few useful functions
-
--module(ibrowse_lib).
--author('chandru').
--ifdef(debug).
--compile(export_all).
--endif.
-
--include_lib("ibrowse/include/ibrowse.hrl").
-
--ifdef(EUNIT).
--include_lib("eunit/include/eunit.hrl").
--endif.
-
--export([
-         get_trace_status/2,
-         do_trace/2,
-         do_trace/3,
-         url_encode/1,
-         decode_rfc822_date/1,
-         status_code/1,
-         encode_base64/1,
-         decode_base64/1,
-         get_value/2,
-         get_value/3,
-         parse_url/1,
-         printable_date/0
-        ]).
-
-get_trace_status(Host, Port) ->
-    ibrowse:get_config_value({trace, Host, Port}, false).
-
-%% @doc URL-encodes a string based on RFC 1738. Returns a flat list.
-%% @spec url_encode(Str) -> UrlEncodedStr
-%% Str = string()
-%% UrlEncodedStr = string()
-url_encode(Str) when is_list(Str) ->
-    url_encode_char(lists:reverse(Str), []).
-
-url_encode_char([X | T], Acc) when X >= $0, X =< $9 ->
-    url_encode_char(T, [X | Acc]);
-url_encode_char([X | T], Acc) when X >= $a, X =< $z ->
-    url_encode_char(T, [X | Acc]);
-url_encode_char([X | T], Acc) when X >= $A, X =< $Z ->
-    url_encode_char(T, [X | Acc]);
-url_encode_char([X | T], Acc) when X == $-; X == $_; X == $. ->
-    url_encode_char(T, [X | Acc]);
-url_encode_char([32 | T], Acc) ->
-    url_encode_char(T, [$+ | Acc]);
-url_encode_char([X | T], Acc) ->
-    url_encode_char(T, [$%, d2h(X bsr 4), d2h(X band 16#0f) | Acc]);
-url_encode_char([], Acc) ->
-    Acc.
-
-d2h(N) when N<10 -> N+$0;
-d2h(N) -> N+$a-10.
-
-decode_rfc822_date(String) when is_list(String) ->
-    case catch decode_rfc822_date_1(string:tokens(String, ", \t\r\n")) of
-        {'EXIT', _} ->
-            {error, invalid_date};
-        Res ->
-            Res
-    end.
-
-% TODO: Have to handle the Zone
-decode_rfc822_date_1([_,DayInt,Month,Year, Time,Zone]) ->
-    decode_rfc822_date_1([DayInt,Month,Year, Time,Zone]);
-decode_rfc822_date_1([Day,Month,Year, Time,_Zone]) ->
-    DayI = list_to_integer(Day),
-    MonthI = month_int(Month),
-    YearI = list_to_integer(Year),
-    TimeTup = case string:tokens(Time, ":") of
-                  [H,M] ->
-                      {list_to_integer(H),
-                       list_to_integer(M),
-                       0};
-                  [H,M,S] ->
-                      {list_to_integer(H),
-                       list_to_integer(M),
-                       list_to_integer(S)}
-              end,
-    {{YearI,MonthI,DayI}, TimeTup}.
-
-month_int("Jan") -> 1;
-month_int("Feb") -> 2;
-month_int("Mar") -> 3;
-month_int("Apr") -> 4;
-month_int("May") -> 5;
-month_int("Jun") -> 6;
-month_int("Jul") -> 7;
-month_int("Aug") -> 8;
-month_int("Sep") -> 9;
-month_int("Oct") -> 10;
-month_int("Nov") -> 11;
-month_int("Dec") -> 12.
-
-%% @doc Given a status code, returns an atom describing the status code. 
-%% @spec status_code(StatusCode::status_code()) -> StatusDescription
-%% status_code() = string() | integer()
-%% StatusDescription = atom()
-status_code(100) -> continue;
-status_code(101) -> switching_protocols;
-status_code(102) -> processing;
-status_code(200) -> ok;
-status_code(201) -> created;
-status_code(202) -> accepted;
-status_code(203) -> non_authoritative_information;
-status_code(204) -> no_content;
-status_code(205) -> reset_content;
-status_code(206) -> partial_content;
-status_code(207) -> multi_status;
-status_code(300) -> multiple_choices;
-status_code(301) -> moved_permanently;
-status_code(302) -> found;
-status_code(303) -> see_other;
-status_code(304) -> not_modified;
-status_code(305) -> use_proxy;
-status_code(306) -> unused;
-status_code(307) -> temporary_redirect;
-status_code(400) -> bad_request;
-status_code(401) -> unauthorized;
-status_code(402) -> payment_required;
-status_code(403) -> forbidden;
-status_code(404) -> not_found;
-status_code(405) -> method_not_allowed;
-status_code(406) -> not_acceptable;
-status_code(407) -> proxy_authentication_required;
-status_code(408) -> request_timeout;
-status_code(409) -> conflict;
-status_code(410) -> gone;
-status_code(411) -> length_required;
-status_code(412) -> precondition_failed;
-status_code(413) -> request_entity_too_large;
-status_code(414) -> request_uri_too_long;
-status_code(415) -> unsupported_media_type;
-status_code(416) -> requested_range_not_satisfiable;
-status_code(417) -> expectation_failed;
-status_code(422) -> unprocessable_entity;
-status_code(423) -> locked;
-status_code(424) -> failed_dependency;
-status_code(500) -> internal_server_error;
-status_code(501) -> not_implemented;
-status_code(502) -> bad_gateway;
-status_code(503) -> service_unavailable;
-status_code(504) -> gateway_timeout;
-status_code(505) -> http_version_not_supported;
-status_code(507) -> insufficient_storage;
-status_code(X) when is_list(X) -> status_code(list_to_integer(X));
-status_code(_)   -> unknown_status_code.
-
-%% @doc Implements the base64 encoding algorithm. The output data type matches in the input data type.
-%% @spec encode_base64(In) -> Out
-%% In = string() | binary()
-%% Out = string() | binary()
-encode_base64(List) when is_list(List) ->
-    binary_to_list(base64:encode(List));
-encode_base64(Bin) when is_binary(Bin) ->
-    base64:encode(Bin).
-
-%% @doc Implements the base64 decoding algorithm. The output data type matches in the input data type.
-%% @spec decode_base64(In) -> Out | exit({error, invalid_input})
-%% In = string() | binary()
-%% Out = string() | binary()
-decode_base64(List) when is_list(List) ->
-    binary_to_list(base64:decode(List));
-decode_base64(Bin) when is_binary(Bin) ->
-    base64:decode(Bin).
-
-get_value(Tag, TVL, DefVal) ->
-    case lists:keysearch(Tag, 1, TVL) of
-        false ->
-            DefVal;
-        {value, {_, Val}} ->
-            Val
-    end.
-
-get_value(Tag, TVL) ->
-    {value, {_, V}} = lists:keysearch(Tag,1,TVL),
-    V.
-
-parse_url(Url) ->
-    try
-        case parse_url(Url, get_protocol, #url{abspath=Url}, []) of
-            #url{host_type = undefined, host = Host} = UrlRec ->
-                case inet_parse:address(Host) of
-                    {ok, {_, _, _, _, _, _, _, _}} ->
-                        UrlRec#url{host_type = ipv6_address};
-                    {ok, {_, _, _, _}} ->
-                        UrlRec#url{host_type = ipv4_address};
-                    _ ->
-                        UrlRec#url{host_type = hostname}
-                end;
-            #url{} = UrlRec ->
-                UrlRec;
-            _ ->
-                {error, invalid_uri}
-        end
-    catch _:_ ->
-            {error, invalid_uri}
-    end.
-
-parse_url([$:, $/, $/ | _], get_protocol, Url, []) ->
-    {invalid_uri_1, Url};
-parse_url([$:, $/, $/ | T], get_protocol, Url, TmpAcc) ->
-    Prot = list_to_existing_atom(lists:reverse(TmpAcc)),
-    parse_url(T, get_username, 
-              Url#url{protocol = Prot},
-              []);
-parse_url([H | T], get_username, Url, TmpAcc) when H == $/;
-                                                   H == $? ->
-    Path = case H of
-               $/ ->
-                   [$/ | T];
-               $? ->
-                   [$/, $? | T]
-           end,
-    %% No username/password. No  port number
-    Url#url{host = lists:reverse(TmpAcc),
-            port = default_port(Url#url.protocol),
-           path = Path};
-parse_url([$: | T], get_username, Url, TmpAcc) ->
-    %% It is possible that no username/password has been
-    %% specified. But we'll continue with the assumption that there is
-    %% a username/password. If we encounter a '@' later on, there is a
-    %% username/password indeed. If we encounter a '/', it was
-    %% actually the hostname
-    parse_url(T, get_password, 
-              Url#url{username = lists:reverse(TmpAcc)},
-              []);
-parse_url([$@ | T], get_username, Url, TmpAcc) ->
-    parse_url(T, get_host, 
-              Url#url{username = lists:reverse(TmpAcc),
-                      password = ""},
-              []);
-parse_url([$[ | T], get_username, Url, []) ->
-    % IPv6 address literals are enclosed by square brackets:
-    %     http://www.ietf.org/rfc/rfc2732.txt
-    parse_url(T, get_ipv6_address, Url#url{host_type = ipv6_address}, []);
-parse_url([$[ | T], get_username, _Url, TmpAcc) ->
-    {error, {invalid_username_or_host, lists:reverse(TmpAcc) ++ "[" ++ T}};
-parse_url([$[ | _], get_password, _Url, []) ->
-    {error, missing_password};
-parse_url([$[ | T], get_password, Url, TmpAcc) ->
-    % IPv6 address literals are enclosed by square brackets:
-    %     http://www.ietf.org/rfc/rfc2732.txt
-    parse_url(T, get_ipv6_address,
-              Url#url{host_type = ipv6_address,
-                      password = lists:reverse(TmpAcc)},
-              []);
-parse_url([$@ | T], get_password, Url, TmpAcc) ->
-    parse_url(T, get_host, 
-              Url#url{password = lists:reverse(TmpAcc)},
-              []);
-parse_url([H | T], get_password, Url, TmpAcc) when H == $/;
-                                                   H == $? ->
-    %% Ok, what we thought was the username/password was the hostname
-    %% and portnumber
-    #url{username=User} = Url,
-    Port = list_to_integer(lists:reverse(TmpAcc)),
-    Path = case H of
-               $/ ->
-                   [$/ | T];
-               $? ->
-                   [$/, $? | T]
-           end,
-    Url#url{host = User,
-            port = Port,
-            username = undefined,
-            password = undefined,
-           path = Path};
-parse_url([$] | T], get_ipv6_address, #url{protocol = Prot} = Url, TmpAcc) ->
-    Addr = lists:reverse(TmpAcc),
-    case inet_parse:address(Addr) of
-        {ok, {_, _, _, _, _, _, _, _}} ->
-            Url2 = Url#url{host = Addr, port = default_port(Prot)},
-            case T of
-                [$: | T2] ->
-                    parse_url(T2, get_port, Url2, []);
-                [$/ | T2] ->
-                    Url2#url{path = [$/ | T2]};
-                [$? | T2] ->
-                    Url2#url{path = [$/, $? | T2]};
-                [] ->
-                    Url2#url{path = "/"};
-                _ ->
-                    {error, {invalid_host, "[" ++ Addr ++ "]" ++ T}}
-            end;
-        _ ->
-            {error, {invalid_ipv6_address, Addr}}
-    end;
-parse_url([$[ | T], get_host, #url{} = Url, []) ->
-    parse_url(T, get_ipv6_address, Url#url{host_type = ipv6_address}, []);
-parse_url([$: | T], get_host, #url{} = Url, TmpAcc) ->
-    parse_url(T, get_port, 
-              Url#url{host = lists:reverse(TmpAcc)},
-              []);
-parse_url([H | T], get_host, #url{protocol=Prot} = Url, TmpAcc) when H == $/;
-                                                                     H == $? ->
-    Path = case H of
-               $/ ->
-                   [$/ | T];
-               $? ->
-                   [$/, $? | T]
-           end,
-    Url#url{host = lists:reverse(TmpAcc),
-            port = default_port(Prot),
-           path = Path};
-parse_url([H | T], get_port, #url{protocol=Prot} = Url, TmpAcc) when H == $/;
-                                                                     H == $? ->
-    Path = case H of
-               $/ ->
-                   [$/ | T];
-               $? ->
-                   [$/, $? | T]
-           end,
-    Port = case TmpAcc of
-               [] ->
-                   default_port(Prot);
-               _ ->
-                   list_to_integer(lists:reverse(TmpAcc))
-           end,
-    Url#url{port = Port, path = Path};
-parse_url([H | T], State, Url, TmpAcc) ->
-    parse_url(T, State, Url, [H | TmpAcc]);
-parse_url([], get_host, Url, TmpAcc) when TmpAcc /= [] ->
-    Url#url{host = lists:reverse(TmpAcc),
-            port = default_port(Url#url.protocol),
-            path = "/"};
-parse_url([], get_username, Url, TmpAcc) when TmpAcc /= [] ->
-    Url#url{host = lists:reverse(TmpAcc),
-            port = default_port(Url#url.protocol),
-            path = "/"};
-parse_url([], get_port, #url{protocol=Prot} = Url, TmpAcc) ->
-    Port = case TmpAcc of
-               [] ->
-                   default_port(Prot);
-               _ ->
-                   list_to_integer(lists:reverse(TmpAcc))
-           end,
-    Url#url{port = Port, 
-            path = "/"};
-parse_url([], get_password, Url, TmpAcc) ->
-    %% Ok, what we thought was the username/password was the hostname
-    %% and portnumber
-    #url{username=User} = Url,
-    Port = case TmpAcc of
-               [] ->
-                   default_port(Url#url.protocol);
-               _ ->
-                   list_to_integer(lists:reverse(TmpAcc))
-           end,
-    Url#url{host = User,
-            port = Port,
-            username = undefined,
-            password = undefined,
-            path = "/"};
-parse_url([], State, Url, TmpAcc) ->
-    {invalid_uri_2, State, Url, TmpAcc}.
-
-default_port(http)  -> 80;
-default_port(https) -> 443;
-default_port(ftp)   -> 21.
-
-printable_date() ->
-    {{Y,Mo,D},{H, M, S}} = calendar:local_time(),
-    {_,_,MicroSecs} = now(),
-    [integer_to_list(Y),
-     $-,
-     integer_to_list(Mo),
-     $-,
-     integer_to_list(D),
-     $_,
-     integer_to_list(H),
-     $:,
-     integer_to_list(M),
-     $:,
-     integer_to_list(S),
-     $:,
-     integer_to_list(MicroSecs div 1000)].
-
-do_trace(Fmt, Args) ->
-    do_trace(get(my_trace_flag), Fmt, Args).
-
--ifdef(DEBUG).
-do_trace(_, Fmt, Args) ->
-    io:format("~s -- (~s) - "++Fmt,
-              [printable_date(), 
-               get(ibrowse_trace_token) | Args]).
--else.
-do_trace(true, Fmt, Args) ->
-    io:format("~s -- (~s) - "++Fmt,
-              [printable_date(), 
-               get(ibrowse_trace_token) | Args]);
-do_trace(_, _, _) ->
-    ok.
--endif.
-
--ifdef(EUNIT).
-
-parse_url_test() ->
-    Urls = [{"http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/index.html",
-             #url{abspath = "http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/index.html",
-                  host = "FEDC:BA98:7654:3210:FEDC:BA98:7654:3210",
-                  port = 80, protocol = http, path = "/index.html",
-                  host_type = ipv6_address}},
-            {"http://[1080:0:0:0:8:800:200C:417A]/index.html",
-             #url{abspath = "http://[1080:0:0:0:8:800:200C:417A]/index.html",
-                  host_type = ipv6_address, port = 80, protocol = http,
-                  host = "1080:0:0:0:8:800:200C:417A", path = "/index.html"}},
-            {"http://[3ffe:2a00:100:7031::1]",
-             #url{abspath = "http://[3ffe:2a00:100:7031::1]",
-                  host_type = ipv6_address, port = 80, protocol = http,
-                  host = "3ffe:2a00:100:7031::1", path = "/"}},
-            {"http://[1080::8:800:200C:417A]/foo",
-             #url{abspath = "http://[1080::8:800:200C:417A]/foo",
-                  host_type = ipv6_address, port = 80, protocol = http,
-                  host = "1080::8:800:200C:417A", path = "/foo"}},
-            {"http://[::192.9.5.5]/ipng",
-             #url{abspath = "http://[::192.9.5.5]/ipng",
-                  host_type = ipv6_address, port = 80, protocol = http,
-                  host = "::192.9.5.5", path = "/ipng"}},
-            {"http://[::FFFF:129.144.52.38]:80/index.html",
-             #url{abspath = "http://[::FFFF:129.144.52.38]:80/index.html",
-                  host_type = ipv6_address, port = 80, protocol = http,
-                  host = "::FFFF:129.144.52.38", path = "/index.html"}},
-            {"http://[2010:836B:4179::836B:4179]",
-             #url{abspath = "http://[2010:836B:4179::836B:4179]",
-                  host_type = ipv6_address, port = 80, protocol = http,
-                  host = "2010:836B:4179::836B:4179", path = "/"}}
-           ],
-    lists:foreach(
-      fun({Url, Expected_result}) ->
-              ?assertMatch(Expected_result, parse_url(Url))
-      end, Urls).
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/3822d8f4/src/ibrowse/src/ibrowse_sup.erl
----------------------------------------------------------------------
diff --git a/src/ibrowse/src/ibrowse_sup.erl b/src/ibrowse/src/ibrowse_sup.erl
deleted file mode 100644
index ace33d1..0000000
--- a/src/ibrowse/src/ibrowse_sup.erl
+++ /dev/null
@@ -1,63 +0,0 @@
-%%%-------------------------------------------------------------------
-%%% File    : ibrowse_sup.erl
-%%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%% Description : 
-%%%
-%%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%%-------------------------------------------------------------------
--module(ibrowse_sup).
--behaviour(supervisor).
-%%--------------------------------------------------------------------
-%% Include files
-%%--------------------------------------------------------------------
-
-%%--------------------------------------------------------------------
-%% External exports
-%%--------------------------------------------------------------------
--export([
-	 start_link/0
-        ]).
-
-%%--------------------------------------------------------------------
-%% Internal exports
-%%--------------------------------------------------------------------
--export([
-	 init/1
-        ]).
-
-%%--------------------------------------------------------------------
-%% Macros
-%%--------------------------------------------------------------------
--define(SERVER, ?MODULE).
-
-%%--------------------------------------------------------------------
-%% Records
-%%--------------------------------------------------------------------
-
-%%====================================================================
-%% External functions
-%%====================================================================
-%%--------------------------------------------------------------------
-%% Function: start_link/0
-%% Description: Starts the supervisor
-%%--------------------------------------------------------------------
-start_link() ->
-    supervisor:start_link({local, ?SERVER}, ?MODULE, []).
-
-%%====================================================================
-%% Server functions
-%%====================================================================
-%%--------------------------------------------------------------------
-%% Func: init/1
-%% Returns: {ok,  {SupFlags,  [ChildSpec]}} |
-%%          ignore                          |
-%%          {error, Reason}   
-%%--------------------------------------------------------------------
-init([]) ->
-    AChild = {ibrowse,{ibrowse,start_link,[]},
-	      permanent,2000,worker,[ibrowse, ibrowse_http_client]},
-    {ok,{{one_for_all,10,1}, [AChild]}}.
-
-%%====================================================================
-%% Internal functions
-%%====================================================================

http://git-wip-us.apache.org/repos/asf/couchdb/blob/3822d8f4/src/ibrowse/src/ibrowse_test.erl
----------------------------------------------------------------------
diff --git a/src/ibrowse/src/ibrowse_test.erl b/src/ibrowse/src/ibrowse_test.erl
deleted file mode 100644
index d97f76c..0000000
--- a/src/ibrowse/src/ibrowse_test.erl
+++ /dev/null
@@ -1,625 +0,0 @@
-%%% File    : ibrowse_test.erl
-%%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%% Description : Test ibrowse
-%%% Created : 14 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-
--module(ibrowse_test).
--export([
-	 load_test/3,
-	 send_reqs_1/3,
-	 do_send_req/2,
-	 unit_tests/0,
-	 unit_tests/1,
-	 unit_tests_1/2,
-	 ue_test/0,
-	 ue_test/1,
-	 verify_chunked_streaming/0,
-	 verify_chunked_streaming/1,
-         test_chunked_streaming_once/0,
-	 i_do_async_req_list/4,
-	 test_stream_once/3,
-	 test_stream_once/4,
-         test_20122010/0,
-         test_20122010/1,
-         test_pipeline_head_timeout/0,
-         test_pipeline_head_timeout/1,
-         do_test_pipeline_head_timeout/4,
-         test_head_transfer_encoding/0,
-         test_head_transfer_encoding/1,
-         test_head_response_with_body/0,
-         test_head_response_with_body/1
-	]).
-
-test_stream_once(Url, Method, Options) ->
-    test_stream_once(Url, Method, Options, 5000).
-
-test_stream_once(Url, Method, Options, Timeout) ->
-    case ibrowse:send_req(Url, [], Method, [], [{stream_to, {self(), once}} | Options], Timeout) of
-	{ibrowse_req_id, Req_id} ->
-	    case ibrowse:stream_next(Req_id) of
-		ok ->
-		    test_stream_once(Req_id);
-		Err ->
-		    Err
-	    end;
-	Err ->
-	    Err
-    end.
-
-test_stream_once(Req_id) ->
-    receive
-	{ibrowse_async_headers, Req_id, StatCode, Headers} ->
-	    io:format("Recvd headers~n~p~n", [{ibrowse_async_headers, Req_id, StatCode, Headers}]),
-	    case ibrowse:stream_next(Req_id) of
-		ok ->
-		    test_stream_once(Req_id);
-		Err ->
-		    Err
-	    end;
-	{ibrowse_async_response, Req_id, {error, Err}} ->
-	    io:format("Recvd error: ~p~n", [Err]);
-	{ibrowse_async_response, Req_id, Body_1} ->
-	    io:format("Recvd body part: ~n~p~n", [{ibrowse_async_response, Req_id, Body_1}]),
-	    case ibrowse:stream_next(Req_id) of
-		ok ->
-		    test_stream_once(Req_id);
-		Err ->
-		    Err
-	    end;
-	{ibrowse_async_response_end, Req_id} ->
-	    ok
-    end.
-%% Use ibrowse:set_max_sessions/3 and ibrowse:set_max_pipeline_size/3 to
-%% tweak settings before running the load test. The defaults are 10 and 10.
-load_test(Url, NumWorkers, NumReqsPerWorker) when is_list(Url),
-                                                  is_integer(NumWorkers),
-                                                  is_integer(NumReqsPerWorker),
-                                                  NumWorkers > 0,
-                                                  NumReqsPerWorker > 0 ->
-    proc_lib:spawn(?MODULE, send_reqs_1, [Url, NumWorkers, NumReqsPerWorker]).
-
-send_reqs_1(Url, NumWorkers, NumReqsPerWorker) ->
-    Start_time = now(),
-    ets:new(pid_table, [named_table, public]),
-    ets:new(ibrowse_test_results, [named_table, public]),
-    ets:new(ibrowse_errors, [named_table, public, ordered_set]),
-    init_results(),
-    process_flag(trap_exit, true),
-    log_msg("Starting spawning of workers...~n", []),
-    spawn_workers(Url, NumWorkers, NumReqsPerWorker),
-    log_msg("Finished spawning workers...~n", []),
-    do_wait(Url),
-    End_time = now(),
-    log_msg("All workers are done...~n", []),
-    log_msg("ibrowse_test_results table: ~n~p~n", [ets:tab2list(ibrowse_test_results)]),
-    log_msg("Start time: ~1000.p~n", [calendar:now_to_local_time(Start_time)]),
-    log_msg("End time  : ~1000.p~n", [calendar:now_to_local_time(End_time)]),
-    Elapsed_time_secs = trunc(timer:now_diff(End_time, Start_time) / 1000000),
-    log_msg("Elapsed   : ~p~n", [Elapsed_time_secs]),
-    log_msg("Reqs/sec  : ~p~n", [round(trunc((NumWorkers*NumReqsPerWorker) / Elapsed_time_secs))]),
-    dump_errors().
-
-init_results() ->
-    ets:insert(ibrowse_test_results, {crash, 0}),
-    ets:insert(ibrowse_test_results, {send_failed, 0}),
-    ets:insert(ibrowse_test_results, {other_error, 0}),
-    ets:insert(ibrowse_test_results, {success, 0}),
-    ets:insert(ibrowse_test_results, {retry_later, 0}),
-    ets:insert(ibrowse_test_results, {trid_mismatch, 0}),
-    ets:insert(ibrowse_test_results, {success_no_trid, 0}),
-    ets:insert(ibrowse_test_results, {failed, 0}),
-    ets:insert(ibrowse_test_results, {timeout, 0}),
-    ets:insert(ibrowse_test_results, {req_id, 0}).
-
-spawn_workers(_Url, 0, _) ->
-    ok;
-spawn_workers(Url, NumWorkers, NumReqsPerWorker) ->
-    Pid = proc_lib:spawn_link(?MODULE, do_send_req, [Url, NumReqsPerWorker]),
-    ets:insert(pid_table, {Pid, []}),
-    spawn_workers(Url, NumWorkers - 1, NumReqsPerWorker).
-
-do_wait(Url) ->
-    receive
-	{'EXIT', _, normal} ->
-            catch ibrowse:show_dest_status(Url),
-            catch ibrowse:show_dest_status(),
-	    do_wait(Url);
-	{'EXIT', Pid, Reason} ->
-	    ets:delete(pid_table, Pid),
-	    ets:insert(ibrowse_errors, {Pid, Reason}),
-	    ets:update_counter(ibrowse_test_results, crash, 1),
-	    do_wait(Url);
-	Msg ->
-	    io:format("Recvd unknown message...~p~n", [Msg]),
-	    do_wait(Url)
-    after 1000 ->
-	    case ets:info(pid_table, size) of
-		0 ->
-		    done;
-		_ ->
-                    catch ibrowse:show_dest_status(Url),
-                    catch ibrowse:show_dest_status(),
-		    do_wait(Url)
-	    end
-    end.
-
-do_send_req(Url, NumReqs) ->
-    do_send_req_1(Url, NumReqs).
-
-do_send_req_1(_Url, 0) ->
-    ets:delete(pid_table, self());
-do_send_req_1(Url, NumReqs) ->
-    Counter = integer_to_list(ets:update_counter(ibrowse_test_results, req_id, 1)),
-    case ibrowse:send_req(Url, [{"ib_req_id", Counter}], get, [], [], 10000) of
-	{ok, _Status, Headers, _Body} ->
-	    case lists:keysearch("ib_req_id", 1, Headers) of
-		{value, {_, Counter}} ->
-		    ets:update_counter(ibrowse_test_results, success, 1);
-		{value, _} ->
-		    ets:update_counter(ibrowse_test_results, trid_mismatch, 1);
-		false ->
-		    ets:update_counter(ibrowse_test_results, success_no_trid, 1)
-	    end;
-	{error, req_timedout} ->
-	    ets:update_counter(ibrowse_test_results, timeout, 1);
-	{error, send_failed} ->
-	    ets:update_counter(ibrowse_test_results, send_failed, 1);
-	{error, retry_later} ->
-	    ets:update_counter(ibrowse_test_results, retry_later, 1);
-	Err ->
-	    ets:insert(ibrowse_errors, {now(), Err}),
-	    ets:update_counter(ibrowse_test_results, other_error, 1),
-	    ok
-    end,
-    do_send_req_1(Url, NumReqs-1).
-
-dump_errors() ->
-    case ets:info(ibrowse_errors, size) of
-	0 ->
-	    ok;
-	_ ->
-	    {A, B, C} = now(),
-	    Filename = lists:flatten(
-			 io_lib:format("ibrowse_errors_~p_~p_~p.txt" , [A, B, C])),
-	    case file:open(Filename, [write, delayed_write, raw]) of
-		{ok, Iod} ->
-		    dump_errors(ets:first(ibrowse_errors), Iod);
-		Err ->
-		    io:format("failed to create file ~s. Reason: ~p~n", [Filename, Err]),
-		    ok
-	    end
-    end.
-
-dump_errors('$end_of_table', Iod) ->
-    file:close(Iod);
-dump_errors(Key, Iod) ->
-    [{_, Term}] = ets:lookup(ibrowse_errors, Key),
-    file:write(Iod, io_lib:format("~p~n", [Term])),
-    dump_errors(ets:next(ibrowse_errors, Key), Iod).
-
-%%------------------------------------------------------------------------------
-%% Unit Tests
-%%------------------------------------------------------------------------------
--define(TEST_LIST, [{"http://intranet/messenger", get},
-		    {"http://www.google.co.uk", get},
-		    {"http://www.google.com", get},
-		    {"http://www.google.com", options},
-                    {"https://mail.google.com", get},
-		    {"http://www.sun.com", get},
-		    {"http://www.oracle.com", get},
-		    {"http://www.bbc.co.uk", get},
-		    {"http://www.bbc.co.uk", trace},
-		    {"http://www.bbc.co.uk", options},
-		    {"http://yaws.hyber.org", get},
-		    {"http://jigsaw.w3.org/HTTP/ChunkedScript", get},
-		    {"http://jigsaw.w3.org/HTTP/TE/foo.txt", get},
-		    {"http://jigsaw.w3.org/HTTP/TE/bar.txt", get},
-		    {"http://jigsaw.w3.org/HTTP/connection.html", get},
-		    {"http://jigsaw.w3.org/HTTP/cc.html", get},
-		    {"http://jigsaw.w3.org/HTTP/cc-private.html", get},
-		    {"http://jigsaw.w3.org/HTTP/cc-proxy-revalidate.html", get},
-		    {"http://jigsaw.w3.org/HTTP/cc-nocache.html", get},
-		    {"http://jigsaw.w3.org/HTTP/h-content-md5.html", get},
-		    {"http://jigsaw.w3.org/HTTP/h-retry-after.html", get},
-		    {"http://jigsaw.w3.org/HTTP/h-retry-after-date.html", get},
-		    {"http://jigsaw.w3.org/HTTP/neg", get},
-		    {"http://jigsaw.w3.org/HTTP/negbad", get},
-		    {"http://jigsaw.w3.org/HTTP/400/toolong/", get},
-		    {"http://jigsaw.w3.org/HTTP/300/", get},
-		    {"http://jigsaw.w3.org/HTTP/Basic/", get, [{basic_auth, {"guest", "guest"}}]},
-		    {"http://jigsaw.w3.org/HTTP/CL/", get},
-		    {"http://www.httpwatch.com/httpgallery/chunked/", get},
-                    {"https://github.com", get, [{ssl_options, [{depth, 2}]}]},
-                    {local_test_fun, test_20122010, []},
-                    {local_test_fun, test_pipeline_head_timeout, []},
-                    {local_test_fun, test_head_transfer_encoding, []},
-                    {local_test_fun, test_head_response_with_body, []}
-		   ]).
-
-unit_tests() ->
-    unit_tests([]).
-
-unit_tests(Options) ->
-    application:start(crypto),
-    application:start(public_key),
-    application:start(ssl),
-    (catch ibrowse_test_server:start_server(8181, tcp)),
-    ibrowse:start(),
-    Options_1 = Options ++ [{connect_timeout, 5000}],
-    Test_timeout = proplists:get_value(test_timeout, Options, 60000),
-    {Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options_1]),
-    receive 
-	{done, Pid} ->
-	    ok;
-	{'DOWN', Ref, _, _, Info} ->
-	    io:format("Test process crashed: ~p~n", [Info])
-    after Test_timeout ->
-	    exit(Pid, kill),
-	    io:format("Timed out waiting for tests to complete~n", [])
-    end,
-    catch ibrowse_test_server:stop_server(8181),
-    ok.
-
-unit_tests_1(Parent, Options) ->
-    lists:foreach(fun({local_test_fun, Fun_name, Args}) ->
-                          execute_req(local_test_fun, Fun_name, Args);
-                     ({Url, Method}) ->
-			  execute_req(Url, Method, Options);
-		     ({Url, Method, X_Opts}) ->
-			  execute_req(Url, Method, X_Opts ++ Options)
-		  end, ?TEST_LIST),
-    Parent ! {done, self()}.
-
-verify_chunked_streaming() ->
-    verify_chunked_streaming([]).
-
-verify_chunked_streaming(Options) ->
-    io:format("~nVerifying that chunked streaming is working...~n", []),
-    Url = "http://www.httpwatch.com/httpgallery/chunked/",
-    io:format("  URL: ~s~n", [Url]),
-    io:format("  Fetching data without streaming...~n", []),
-    Result_without_streaming = ibrowse:send_req(
-				 Url, [], get, [],
-				 [{response_format, binary} | Options]),
-    io:format("  Fetching data with streaming as list...~n", []),
-    Async_response_list = do_async_req_list(
-			    Url, get, [{response_format, list} | Options]),
-    io:format("  Fetching data with streaming as binary...~n", []),
-    Async_response_bin = do_async_req_list(
-			   Url, get, [{response_format, binary} | Options]),
-    io:format("  Fetching data with streaming as binary, {active, once}...~n", []),
-    Async_response_bin_once = do_async_req_list(
-                                Url, get, [once, {response_format, binary} | Options]),
-    Res1 = compare_responses(Result_without_streaming, Async_response_list, Async_response_bin),
-    Res2 = compare_responses(Result_without_streaming, Async_response_list, Async_response_bin_once),
-    case {Res1, Res2} of
-        {success, success} ->
-            io:format("  Chunked streaming working~n", []);
-        _ ->
-            ok
-    end.
-
-test_chunked_streaming_once() ->
-    test_chunked_streaming_once([]).
-
-test_chunked_streaming_once(Options) ->
-    io:format("~nTesting chunked streaming with the {stream_to, {Pid, once}} option...~n", []),
-    Url = "http://www.httpwatch.com/httpgallery/chunked/",
-    io:format("  URL: ~s~n", [Url]),
-    io:format("  Fetching data with streaming as binary, {active, once}...~n", []),
-    case do_async_req_list(Url, get, [once, {response_format, binary} | Options]) of
-        {ok, _, _, _} ->
-            io:format("  Success!~n", []);
-        Err ->
-            io:format("  Fail: ~p~n", [Err])
-    end.
-
-compare_responses({ok, St_code, _, Body}, {ok, St_code, _, Body}, {ok, St_code, _, Body}) ->
-    success;
-compare_responses({ok, St_code, _, Body_1}, {ok, St_code, _, Body_2}, {ok, St_code, _, Body_3}) ->
-    case Body_1 of
-	Body_2 ->
-	    io:format("Body_1 and Body_2 match~n", []);
-	Body_3 ->
-	    io:format("Body_1 and Body_3 match~n", []);
-	_ when Body_2 == Body_3 ->
-	    io:format("Body_2 and Body_3 match~n", []);
-	_ ->
-	    io:format("All three bodies are different!~n", [])
-    end,
-    io:format("Body_1 -> ~p~n", [Body_1]),
-    io:format("Body_2 -> ~p~n", [Body_2]),
-    io:format("Body_3 -> ~p~n", [Body_3]),
-    fail_bodies_mismatch;
-compare_responses(R1, R2, R3) ->
-    io:format("R1 -> ~p~n", [R1]),
-    io:format("R2 -> ~p~n", [R2]),
-    io:format("R3 -> ~p~n", [R3]),
-    fail.
-
-%% do_async_req_list(Url) ->
-%%     do_async_req_list(Url, get).
-
-%% do_async_req_list(Url, Method) ->
-%%     do_async_req_list(Url, Method, [{stream_to, self()},
-%% 				    {stream_chunk_size, 1000}]).
-
-do_async_req_list(Url, Method, Options) ->
-    {Pid,_} = erlang:spawn_monitor(?MODULE, i_do_async_req_list,
-				   [self(), Url, Method, 
-				    Options ++ [{stream_chunk_size, 1000}]]),
-%%    io:format("Spawned process ~p~n", [Pid]),
-    wait_for_resp(Pid).
-
-wait_for_resp(Pid) ->
-    receive
-	{async_result, Pid, Res} ->
-	    Res;
-	{async_result, Other_pid, _} ->
-	    io:format("~p: Waiting for result from ~p: got from ~p~n", [self(), Pid, Other_pid]),
-	    wait_for_resp(Pid);
-	{'DOWN', _, _, Pid, Reason} ->
-	    {'EXIT', Reason};
-	{'DOWN', _, _, _, _} ->
-	    wait_for_resp(Pid);
-	Msg ->
-	    io:format("Recvd unknown message: ~p~n", [Msg]),
-	    wait_for_resp(Pid)
-    after 100000 ->
-	  {error, timeout}
-    end.
-
-i_do_async_req_list(Parent, Url, Method, Options) ->
-    Options_1 = case lists:member(once, Options) of
-                    true ->
-                        [{stream_to, {self(), once}} | (Options -- [once])];
-                    false ->
-                        [{stream_to, self()} | Options]
-                end,
-    Res = ibrowse:send_req(Url, [], Method, [], Options_1),
-    case Res of
-	{ibrowse_req_id, Req_id} ->
-	    Result = wait_for_async_resp(Req_id, Options, undefined, undefined, []),
-	    Parent ! {async_result, self(), Result};
-	Err ->
-	    Parent ! {async_result, self(), Err}
-    end.
-
-wait_for_async_resp(Req_id, Options, Acc_Stat_code, Acc_Headers, Body) ->    
-    receive
-	{ibrowse_async_headers, Req_id, StatCode, Headers} ->
-            %% io:format("Recvd headers...~n", []),
-            maybe_stream_next(Req_id, Options),
-	    wait_for_async_resp(Req_id, Options, StatCode, Headers, Body);
-	{ibrowse_async_response_end, Req_id} ->
-            %% io:format("Recvd end of response.~n", []),
-	    Body_1 = list_to_binary(lists:reverse(Body)),
-	    {ok, Acc_Stat_code, Acc_Headers, Body_1};
-	{ibrowse_async_response, Req_id, Data} ->
-            maybe_stream_next(Req_id, Options),
-            %% io:format("Recvd data...~n", []),
-	    wait_for_async_resp(Req_id, Options, Acc_Stat_code, Acc_Headers, [Data | Body]);
-	{ibrowse_async_response, Req_id, {error, _} = Err} ->
-            {ok, Acc_Stat_code, Acc_Headers, Err};
-	Err ->
-	    {ok, Acc_Stat_code, Acc_Headers, Err}
-    after 10000 ->
-            {timeout, Acc_Stat_code, Acc_Headers, Body}
-    end.
-
-maybe_stream_next(Req_id, Options) ->
-    case lists:member(once, Options) of
-        true ->
-            ibrowse:stream_next(Req_id);
-        false ->
-            ok
-    end.
-
-execute_req(local_test_fun, Method, Args) ->
-    io:format("     ~-54.54w: ", [Method]),
-    Result = (catch apply(?MODULE, Method, Args)),
-    io:format("~p~n", [Result]);
-execute_req(Url, Method, Options) ->
-    io:format("~7.7w, ~50.50s: ", [Method, Url]),
-    Result = (catch ibrowse:send_req(Url, [], Method, [], Options)),
-    case Result of
-	{ok, SCode, _H, _B} ->
-	    io:format("Status code: ~p~n", [SCode]);
-	Err ->
-	    io:format("~p~n", [Err])
-    end.
-
-ue_test() ->
-    ue_test(lists:duplicate(1024, $?)).
-ue_test(Data) ->
-    {Time, Res} = timer:tc(ibrowse_lib, url_encode, [Data]),
-    io:format("Time -> ~p~n", [Time]),
-    io:format("Data Length -> ~p~n", [length(Data)]),
-    io:format("Res Length -> ~p~n", [length(Res)]).
-%    io:format("Result -> ~s~n", [Res]).
-
-log_msg(Fmt, Args) ->
-    io:format("~s -- " ++ Fmt,
-	      [ibrowse_lib:printable_date() | Args]).
-
-%%------------------------------------------------------------------------------
-%% Test what happens when the response to a HEAD request is a
-%% Chunked-Encoding response with a non-empty body. Issue #67 on
-%% Github
-%% ------------------------------------------------------------------------------
-test_head_transfer_encoding() ->
-    clear_msg_q(),
-    test_head_transfer_encoding("http://localhost:8181/ibrowse_head_test").
-
-test_head_transfer_encoding(Url) ->
-    case ibrowse:send_req(Url, [], head) of
-        {ok, "200", _, _} ->
-            success;
-        Res ->
-            {test_failed, Res}
-    end.
-
-%%------------------------------------------------------------------------------
-%% Test what happens when the response to a HEAD request is a
-%% Chunked-Encoding response with a non-empty body. Issue #67 on
-%% Github
-%% ------------------------------------------------------------------------------
-test_head_response_with_body() ->
-    clear_msg_q(),
-    test_head_response_with_body("http://localhost:8181/ibrowse_head_transfer_enc").
-
-test_head_response_with_body(Url) ->
-    case ibrowse:send_req(Url, [], head, [], [{workaround, head_response_with_body}]) of
-        {ok, "400", _, _} ->
-            success;
-        Res ->
-            {test_failed, Res}
-    end.
-
-%%------------------------------------------------------------------------------
-%% Test what happens when the request at the head of a pipeline times out
-%%------------------------------------------------------------------------------
-test_pipeline_head_timeout() ->
-    clear_msg_q(),
-    test_pipeline_head_timeout("http://localhost:8181/ibrowse_inac_timeout_test").
-
-test_pipeline_head_timeout(Url) ->
-    {ok, Pid} = ibrowse:spawn_worker_process(Url),
-    Test_parent = self(),
-    Fun = fun({fixed, Timeout}) ->
-                  spawn(fun() ->
-                                do_test_pipeline_head_timeout(Url, Pid, Test_parent, Timeout)
-                        end);
-             (Timeout_mult) ->
-                  spawn(fun() ->
-                                Timeout = 1000 + Timeout_mult*1000,
-                                do_test_pipeline_head_timeout(Url, Pid, Test_parent, Timeout)
-                        end)
-          end,
-    Pids = [Fun(X) || X <- [{fixed, 32000} | lists:seq(1,10)]],
-    Result = accumulate_worker_resp(Pids),
-    case lists:all(fun({_, X_res}) ->
-                           X_res == {error,req_timedout}
-                   end, Result) of
-        true ->
-            success;
-        false ->
-            {test_failed, Result}
-    end.
-
-do_test_pipeline_head_timeout(Url, Pid, Test_parent, Req_timeout) ->
-    Resp = ibrowse:send_req_direct(
-                                 Pid,
-                                 Url,
-                                 [], get, [],
-                                 [{socket_options,[{keepalive,true}]},
-                                  {inactivity_timeout,180000},
-                                  {connect_timeout,180000}], Req_timeout),
-    Test_parent ! {self(), Resp}.
-
-accumulate_worker_resp(Pids) ->
-    accumulate_worker_resp(Pids, []).
-
-accumulate_worker_resp([_ | _] = Pids, Acc) ->
-    receive
-        {Pid, Res} when is_pid(Pid) ->
-            accumulate_worker_resp(Pids -- [Pid], [{Pid, Res} | Acc]);
-        Err ->
-            io:format("Received unexpected: ~p~n", [Err])
-    end;
-accumulate_worker_resp([], Acc) ->
-    lists:reverse(Acc).
-
-clear_msg_q() ->
-    receive
-        _ ->
-            clear_msg_q()
-    after 0 ->
-            ok
-    end.
-%%------------------------------------------------------------------------------
-%% 
-%%------------------------------------------------------------------------------
-
-test_20122010() ->
-    test_20122010("http://localhost:8181").
-
-test_20122010(Url) ->
-    {ok, Pid} = ibrowse:spawn_worker_process(Url),
-    Expected_resp = <<"1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30-31-32-33-34-35-36-37-38-39-40-41-42-43-44-45-46-47-48-49-50-51-52-53-54-55-56-57-58-59-60-61-62-63-64-65-66-67-68-69-70-71-72-73-74-75-76-77-78-79-80-81-82-83-84-85-86-87-88-89-90-91-92-93-94-95-96-97-98-99-100">>,
-    Test_parent = self(),
-    Fun = fun() ->
-                  do_test_20122010(Url, Pid, Expected_resp, Test_parent)
-          end,
-    Pids = [erlang:spawn_monitor(Fun) || _ <- lists:seq(1,10)],
-    wait_for_workers(Pids).
-
-wait_for_workers([{Pid, _Ref} | Pids]) ->
-    receive
-        {Pid, success} ->
-            wait_for_workers(Pids)
-    after 60000 ->
-            test_failed
-    end;
-wait_for_workers([]) ->
-    success.
-
-do_test_20122010(Url, Pid, Expected_resp, Test_parent) ->
-    do_test_20122010(10, Url, Pid, Expected_resp, Test_parent).
-
-do_test_20122010(0, _Url, _Pid, _Expected_resp, Test_parent) ->
-    Test_parent ! {self(), success};
-do_test_20122010(Rem_count, Url, Pid, Expected_resp, Test_parent) ->
-    {ibrowse_req_id, Req_id} = ibrowse:send_req_direct(
-                                 Pid,
-                                 Url ++ "/ibrowse_stream_once_chunk_pipeline_test",
-                                 [], get, [],
-                                 [{stream_to, {self(), once}},
-                                  {inactivity_timeout, 10000},
-                                  {include_ibrowse_req_id, true}]),
-    do_trace("~p -- sent request ~1000.p~n", [self(), Req_id]),
-    Req_id_str = lists:flatten(io_lib:format("~1000.p",[Req_id])),
-    receive
-        {ibrowse_async_headers, Req_id, "200", Headers} ->
-            case lists:keysearch("x-ibrowse-request-id", 1, Headers) of
-                {value, {_, Req_id_str}} ->
-                    ok;
-                {value, {_, Req_id_1}} ->
-                    do_trace("~p -- Sent req-id: ~1000.p. Recvd: ~1000.p~n",
-                              [self(), Req_id, Req_id_1]),
-                    exit(req_id_mismatch)
-            end
-    after 5000 ->
-            do_trace("~p -- response headers not received~n", [self()]),
-            exit({timeout, test_failed})
-    end,
-    do_trace("~p -- response headers received~n", [self()]),
-    ok = ibrowse:stream_next(Req_id),
-    case do_test_20122010_1(Expected_resp, Req_id, []) of
-        true ->
-            do_test_20122010(Rem_count - 1, Url, Pid, Expected_resp, Test_parent);
-        false ->
-            Test_parent ! {self(), failed}
-    end.
-
-do_test_20122010_1(Expected_resp, Req_id, Acc) ->
-    receive
-        {ibrowse_async_response, Req_id, Body_part} ->
-            ok = ibrowse:stream_next(Req_id),
-            do_test_20122010_1(Expected_resp, Req_id, [Body_part | Acc]);
-        {ibrowse_async_response_end, Req_id} ->
-            Acc_1 = list_to_binary(lists:reverse(Acc)),
-            Result = Acc_1 == Expected_resp,
-            do_trace("~p -- End of response. Result: ~p~n", [self(), Result]),
-            Result
-    after 1000 ->
-            exit({timeout, test_failed})
-    end.
-
-do_trace(Fmt, Args) ->
-    do_trace(get(my_trace_flag), Fmt, Args).
-
-do_trace(true, Fmt, Args) ->
-    io:format("~s -- " ++ Fmt, [ibrowse_lib:printable_date() | Args]);
-do_trace(_, _, _) ->
-    ok.


[49/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Don't stop running tests on the first failure


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/3069c013
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/3069c013
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/3069c013

Branch: refs/heads/1843-feature-bigcouch
Commit: 3069c0134ef8e8eb416035d1ca2b7018f82e7b88
Parents: 0bb58f5
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 22:19:41 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Wed Feb 5 08:34:53 2014 -0600

----------------------------------------------------------------------
 test/javascript/cli_runner.js | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/3069c013/test/javascript/cli_runner.js
----------------------------------------------------------------------
diff --git a/test/javascript/cli_runner.js b/test/javascript/cli_runner.js
index e8ebd2e..da2eed0 100644
--- a/test/javascript/cli_runner.js
+++ b/test/javascript/cli_runner.js
@@ -38,7 +38,6 @@ function runTest() {
   } catch(e) {
     console.log("FAIL\nReason: " + e.message);
     fmtStack(e.stack);
-    quit(1);
   }
 }
 


[32/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/ibrowse


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/3822d8f4
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/3822d8f4
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/3822d8f4

Branch: refs/heads/1843-feature-bigcouch
Commit: 3822d8f4923cbf9c026a933f15759c2857cc20a0
Parents: 753e746
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:41:31 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:41:31 2014 -0600

----------------------------------------------------------------------
 src/ibrowse/include/ibrowse.hrl         |   21 -
 src/ibrowse/src/ibrowse.app.src         |    7 -
 src/ibrowse/src/ibrowse.erl             |  929 -------------
 src/ibrowse/src/ibrowse_app.erl         |   63 -
 src/ibrowse/src/ibrowse_http_client.erl | 1921 --------------------------
 src/ibrowse/src/ibrowse_lb.erl          |  252 ----
 src/ibrowse/src/ibrowse_lib.erl         |  441 ------
 src/ibrowse/src/ibrowse_sup.erl         |   63 -
 src/ibrowse/src/ibrowse_test.erl        |  625 ---------
 9 files changed, 4322 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/3822d8f4/src/ibrowse/include/ibrowse.hrl
----------------------------------------------------------------------
diff --git a/src/ibrowse/include/ibrowse.hrl b/src/ibrowse/include/ibrowse.hrl
deleted file mode 100644
index 18dde82..0000000
--- a/src/ibrowse/include/ibrowse.hrl
+++ /dev/null
@@ -1,21 +0,0 @@
--ifndef(IBROWSE_HRL).
--define(IBROWSE_HRL, "ibrowse.hrl").
-
--record(url, {
-          abspath,
-          host,
-          port,
-          username,
-          password,
-          path,
-          protocol,
-          host_type  % 'hostname', 'ipv4_address' or 'ipv6_address'
-}).
-
--record(lb_pid, {host_port, pid}).
-
--record(client_conn, {key, cur_pipeline_size = 0, reqs_served = 0}).
-
--record(ibrowse_conf, {key, value}).
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/3822d8f4/src/ibrowse/src/ibrowse.app.src
----------------------------------------------------------------------
diff --git a/src/ibrowse/src/ibrowse.app.src b/src/ibrowse/src/ibrowse.app.src
deleted file mode 100644
index f65ba44..0000000
--- a/src/ibrowse/src/ibrowse.app.src
+++ /dev/null
@@ -1,7 +0,0 @@
-{application, ibrowse,
-        [{description, "Erlang HTTP client application"},
-         {vsn, git},
-         {registered, [ibrowse_sup, ibrowse]},
-         {applications, [kernel,stdlib]},
-	 {env, []},
-	 {mod, {ibrowse_app, []}}]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/3822d8f4/src/ibrowse/src/ibrowse.erl
----------------------------------------------------------------------
diff --git a/src/ibrowse/src/ibrowse.erl b/src/ibrowse/src/ibrowse.erl
deleted file mode 100644
index b2a7919..0000000
--- a/src/ibrowse/src/ibrowse.erl
+++ /dev/null
@@ -1,929 +0,0 @@
-%%%-------------------------------------------------------------------
-%%% File    : ibrowse.erl
-%%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%% Description : Load balancer process for HTTP client connections.
-%%%
-%%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%%-------------------------------------------------------------------
-%% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
-%% @copyright 2005-2012 Chandrashekhar Mullaparthi
-%% @doc The ibrowse application implements an HTTP 1.1 client in erlang. This
-%% module implements the API of the HTTP client. There is one named
-%% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is
-%% one process to handle one TCP connection to a webserver
-%% (implemented in the module ibrowse_http_client). Multiple connections to a
-%% webserver are setup based on the settings for each webserver. The
-%% ibrowse process also determines which connection to pipeline a
-%% certain request on.  The functions to call are send_req/3,
-%% send_req/4, send_req/5, send_req/6.
-%%
-%% <p>Here are a few sample invocations.</p>
-%%
-%% <code>
-%% ibrowse:send_req("http://intranet/messenger/", [], get). 
-%% <br/><br/>
-%% 
-%% ibrowse:send_req("http://www.google.com/", [], get, [], 
-%%               [{proxy_user, "XXXXX"},
-%%                {proxy_password, "XXXXX"},
-%%                {proxy_host, "proxy"},
-%%                {proxy_port, 8080}], 1000). 
-%% <br/><br/>
-%%
-%%ibrowse:send_req("http://www.erlang.org/download/otp_src_R10B-3.tar.gz", [], get, [],
-%%               [{proxy_user, "XXXXX"},
-%%                {proxy_password, "XXXXX"},
-%%                {proxy_host, "proxy"},
-%%                {proxy_port, 8080},
-%%                {save_response_to_file, true}], 1000).
-%% <br/><br/>
-%%
-%% ibrowse:send_req("http://www.erlang.org", [], head).
-%%
-%% <br/><br/>
-%% ibrowse:send_req("http://www.sun.com", [], options).
-%%
-%% <br/><br/>
-%% ibrowse:send_req("http://www.bbc.co.uk", [], trace).
-%%
-%% <br/><br/>
-%% ibrowse:send_req("http://www.google.com", [], get, [], 
-%%                   [{stream_to, self()}]).
-%% </code>
-%%
-
--module(ibrowse).
--behaviour(gen_server).
-%%--------------------------------------------------------------------
-%% Include files
-%%--------------------------------------------------------------------
-
-%%--------------------------------------------------------------------
-%% External exports
--export([start_link/0, start/0, stop/0]).
-
-%% gen_server callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
-         terminate/2, code_change/3]).
-
-%% API interface
--export([
-         rescan_config/0,
-         rescan_config/1,
-         add_config/1,
-         get_config_value/1,
-         get_config_value/2,
-         spawn_worker_process/1,
-         spawn_worker_process/2,
-         spawn_link_worker_process/1,
-         spawn_link_worker_process/2,
-         stop_worker_process/1,
-         send_req/3,
-         send_req/4,
-         send_req/5,
-         send_req/6,
-         send_req_direct/4,
-         send_req_direct/5,
-         send_req_direct/6,
-         send_req_direct/7,
-         stream_next/1,
-         stream_close/1,
-         set_max_sessions/3,
-         set_max_pipeline_size/3,
-         set_dest/3,
-         trace_on/0,
-         trace_off/0,
-         trace_on/2,
-         trace_off/2,
-         all_trace_off/0,
-         show_dest_status/0,
-         show_dest_status/1,
-         show_dest_status/2,
-         get_metrics/0,
-         get_metrics/2
-        ]).
-
--ifdef(debug).
--compile(export_all).
--endif.
-
--import(ibrowse_lib, [
-                      parse_url/1,
-                      get_value/3,
-                      do_trace/2
-                     ]).
-                      
--record(state, {trace = false}).
-
--include_lib("ibrowse/include/ibrowse.hrl").
--include_lib("stdlib/include/ms_transform.hrl").
-
--define(DEF_MAX_SESSIONS,10).
--define(DEF_MAX_PIPELINE_SIZE,10).
-
-%%====================================================================
-%% External functions
-%%====================================================================
-%%--------------------------------------------------------------------
-%% Function: start_link/0
-%% Description: Starts the server
-%%--------------------------------------------------------------------
-%% @doc Starts the ibrowse process linked to the calling process. Usually invoked by the supervisor ibrowse_sup
-%% @spec start_link() -> {ok, pid()}
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-%% @doc Starts the ibrowse process without linking. Useful when testing using the shell
-start() ->
-    gen_server:start({local, ?MODULE}, ?MODULE, [], [{debug, []}]).
-
-%% @doc Stop the ibrowse process. Useful when testing using the shell.
-stop() ->
-    case catch gen_server:call(ibrowse, stop) of
-        {'EXIT',{noproc,_}} ->
-            ok;
-        Res ->
-            Res
-    end.
-
-%% @doc This is the basic function to send a HTTP request.
-%% The Status return value indicates the HTTP status code returned by the webserver
-%% @spec send_req(Url::string(), Headers::headerList(), Method::method()) -> response()
-%% headerList() = [{header(), value()}]
-%% header() = atom() | string()
-%% value() = term()
-%% method() = get | post | head | options | put | delete | trace | mkcol | propfind | proppatch | lock | unlock | move | copy
-%% Status = string()
-%% ResponseHeaders = [respHeader()]
-%% respHeader() = {headerName(), headerValue()}
-%% headerName() = string()
-%% headerValue() = string()
-%% response() = {ok, Status, ResponseHeaders, ResponseBody} | {ibrowse_req_id, req_id() } | {error, Reason}
-%% req_id() = term()
-%% ResponseBody = string() | {file, Filename}
-%% Reason = term()
-send_req(Url, Headers, Method) ->
-    send_req(Url, Headers, Method, [], []).
-
-%% @doc Same as send_req/3. 
-%% If a list is specified for the body it has to be a flat list. The body can also be a fun/0 or a fun/1. <br/>
-%% If fun/0, the connection handling process will repeatdely call the fun until it returns an error or eof. <pre>Fun() = {ok, Data} | eof</pre><br/>
-%% If fun/1, the connection handling process will repeatedly call the fun with the supplied state until it returns an error or eof. <pre>Fun(State) = {ok, Data} | {ok, Data, NewState} | eof</pre>
-%% @spec send_req(Url, Headers, Method::method(), Body::body()) -> response()
-%% body() = [] | string() | binary() | fun_arity_0() | {fun_arity_1(), initial_state()}
-%% initial_state() = term()
-send_req(Url, Headers, Method, Body) ->
-    send_req(Url, Headers, Method, Body, []).
-
-%% @doc Same as send_req/4. 
-%% For a description of SSL Options, look in the <a href="http://www.erlang.org/doc/apps/ssl/index.html">ssl</a> manpage. If the
-%% HTTP Version to use is not specified, the default is 1.1.
-%% <br/>
-%% <ul>
-%% <li>The <code>host_header</code> option is useful in the case where ibrowse is
-%% connecting to a component such as <a
-%% href="http://www.stunnel.org">stunnel</a> which then sets up a
-%% secure connection to a webserver. In this case, the URL supplied to
-%% ibrowse must have the stunnel host/port details, but that won't
-%% make sense to the destination webserver. This option can then be
-%% used to specify what should go in the <code>Host</code> header in
-%% the request.</li>
-%% <li>The <code>stream_to</code> option can be used to have the HTTP
-%% response streamed to a process as messages as data arrives on the
-%% socket. If the calling process wishes to control the rate at which
-%% data is received from the server, the option <code>{stream_to,
-%% {process(), once}}</code> can be specified. The calling process
-%% will have to invoke <code>ibrowse:stream_next(Request_id)</code> to
-%% receive the next packet.</li>
-%%
-%% <li>When both the options <code>save_response_to_file</code> and <code>stream_to</code> 
-%% are specified, the former takes precedence.</li>
-%%
-%% <li>For the <code>save_response_to_file</code> option, the response body is saved to
-%% file only if the status code is in the 200-299 range. If not, the response body is returned
-%% as a string.</li>
-%% <li>Whenever an error occurs in the processing of a request, ibrowse will return as much
-%% information as it has, such as HTTP Status Code and HTTP Headers. When this happens, the response
-%% is of the form <code>{error, {Reason, {stat_code, StatusCode}, HTTP_headers}}</code></li>
-%%
-%% <li>The <code>inactivity_timeout</code> option is useful when
-%% dealing with large response bodies and/or slow links. In these
-%% cases, it might be hard to estimate how long a request will take to
-%% complete. In such cases, the client might want to timeout if no
-%% data has been received on the link for a certain time interval.
-%% 
-%% This value is also used to close connections which are not in use for 
-%% the specified timeout value.
-%% </li>
-%%
-%% <li>
-%% The <code>connect_timeout</code> option is to specify how long the
-%% client process should wait for connection establishment. This is
-%% useful in scenarios where connections to servers are usually setup
-%% very fast, but responses might take much longer compared to
-%% connection setup. In such cases, it is better for the calling
-%% process to timeout faster if there is a problem (DNS lookup
-%% delays/failures, network routing issues, etc). The total timeout
-%% value specified for the request will enforced. To illustrate using
-%% an example:
-%% <code>
-%% ibrowse:send_req("http://www.example.com/cgi-bin/request", [], get, [], [{connect_timeout, 100}], 1000).
-%% </code>
-%% In the above invocation, if the connection isn't established within
-%% 100 milliseconds, the request will fail with 
-%% <code>{error, conn_failed}</code>.<br/>
-%% If connection setup succeeds, the total time allowed for the
-%% request to complete will be 1000 milliseconds minus the time taken
-%% for connection setup.
-%% </li>
-%% 
-%% <li> The <code>socket_options</code> option can be used to set
-%% specific options on the socket. The <code>{active, true | false | once}</code> 
-%% and <code>{packet_type, Packet_type}</code> will be filtered out by ibrowse.  </li>
-%%
-%% <li> The <code>headers_as_is</code> option is to enable the caller
-%% to send headers exactly as specified in the request without ibrowse
-%% adding some of its own. Required for some picky servers apparently.  </li>
-%%
-%% <li>The <code>give_raw_headers</code> option is to enable the
-%% caller to get access to the raw status line and raw unparsed
-%% headers. Not quite sure why someone would want this, but one of my
-%% users asked for it, so here it is. </li>
-%%
-%% <li> The <code>preserve_chunked_encoding</code> option enables the caller
-%% to receive the raw data stream when the Transfer-Encoding of the server
-%% response is Chunked.
-%% </li>
-%% </ul>
-%%
-%% @spec send_req(Url::string(), Headers::headerList(), Method::method(), Body::body(), Options::optionList()) -> response()
-%% optionList() = [option()]
-%% option() = {max_sessions, integer()}        |
-%%          {response_format,response_format()}|
-%%          {stream_chunk_size, integer()}     |
-%%          {max_pipeline_size, integer()}     |
-%%          {trace, boolean()}                 | 
-%%          {is_ssl, boolean()}                |
-%%          {ssl_options, [SSLOpt]}            |
-%%          {pool_name, atom()}                |
-%%          {proxy_host, string()}             |
-%%          {proxy_port, integer()}            |
-%%          {proxy_user, string()}             |
-%%          {proxy_password, string()}         |
-%%          {use_absolute_uri, boolean()}      |
-%%          {basic_auth, {username(), password()}} |
-%%          {cookie, string()}                 |
-%%          {content_length, integer()}        |
-%%          {content_type, string()}           |
-%%          {save_response_to_file, srtf()}    |
-%%          {stream_to, stream_to()}           |
-%%          {http_vsn, {MajorVsn, MinorVsn}}   |
-%%          {host_header, string()}            |
-%%          {inactivity_timeout, integer()}    |
-%%          {connect_timeout, integer()}       |
-%%          {socket_options, Sock_opts}        |
-%%          {transfer_encoding, {chunked, ChunkSize}} | 
-%%          {headers_as_is, boolean()}         |
-%%          {give_raw_headers, boolean()}      |
-%%          {preserve_chunked_encoding,boolean()}     |
-%%          {workaround, head_response_with_body}
-%%
-%% stream_to() = process() | {process(), once}
-%% process() = pid() | atom()
-%% username() = string()
-%% password() = string()
-%% SSLOpt = term()
-%% Sock_opts = [Sock_opt]
-%% Sock_opt = term()
-%% ChunkSize = integer()
-%% srtf() = boolean() | filename() | {append, filename()}
-%% filename() = string()
-%% response_format() = list | binary
-send_req(Url, Headers, Method, Body, Options) ->
-    send_req(Url, Headers, Method, Body, Options, 30000).
-
-%% @doc Same as send_req/5. 
-%% All timeout values are in milliseconds.
-%% @spec send_req(Url, Headers::headerList(), Method::method(), Body::body(), Options::optionList(), Timeout) -> response()
-%% Timeout = integer() | infinity
-send_req(Url, Headers, Method, Body, Options, Timeout) ->
-    case catch parse_url(Url) of
-        #url{host = Host,
-             port = Port,
-             protocol = Protocol} = Parsed_url ->
-            Lb_pid = case ets:lookup(ibrowse_lb, {Host, Port}) of
-                         [] ->
-                             get_lb_pid(Parsed_url);
-                         [#lb_pid{pid = Lb_pid_1}] ->
-                             Lb_pid_1
-                     end,
-            Max_sessions = get_max_sessions(Host, Port, Options),
-            Max_pipeline_size = get_max_pipeline_size(Host, Port, Options),
-            Options_1 = merge_options(Host, Port, Options),
-            {SSLOptions, IsSSL} =
-                case (Protocol == https) orelse
-                    get_value(is_ssl, Options_1, false) of
-                    false -> {[], false};
-                    true -> {get_value(ssl_options, Options_1, []), true}
-                end,
-            try_routing_request(Lb_pid, Parsed_url,
-                                Max_sessions, 
-                                Max_pipeline_size,
-                                {SSLOptions, IsSSL}, 
-                                Headers, Method, Body, Options_1, Timeout, 0);
-        Err ->
-            {error, {url_parsing_failed, Err}}
-    end.
-
-try_routing_request(Lb_pid, Parsed_url,
-                    Max_sessions, 
-                    Max_pipeline_size,
-                    {SSLOptions, IsSSL}, 
-                    Headers, Method, Body, Options_1, Timeout, Try_count) when Try_count < 3 ->
-    case ibrowse_lb:spawn_connection(Lb_pid, Parsed_url,
-                                             Max_sessions, 
-                                             Max_pipeline_size,
-                                             {SSLOptions, IsSSL}) of
-        {ok, Conn_Pid} ->
-            case do_send_req(Conn_Pid, Parsed_url, Headers,
-                             Method, Body, Options_1, Timeout) of
-                {error, sel_conn_closed} ->
-                    try_routing_request(Lb_pid, Parsed_url,
-                                        Max_sessions, 
-                                        Max_pipeline_size,
-                                        {SSLOptions, IsSSL}, 
-                                        Headers, Method, Body, Options_1, Timeout, Try_count + 1);
-                Res ->
-                    Res
-            end;
-        Err ->
-            Err
-    end;
-try_routing_request(_, _, _, _, _, _, _, _, _, _, _) ->
-    {error, retry_later}.
-
-merge_options(Host, Port, Options) ->
-    Config_options = get_config_value({options, Host, Port}, []) ++
-                     get_config_value({options, global}, []),
-    lists:foldl(
-      fun({Key, Val}, Acc) ->
-              case lists:keysearch(Key, 1, Options) of
-                  false ->
-                      [{Key, Val} | Acc];
-                  _ ->
-                      Acc
-              end
-      end, Options, Config_options).
-
-get_lb_pid(Url) ->
-    gen_server:call(?MODULE, {get_lb_pid, Url}).
-
-get_max_sessions(Host, Port, Options) ->
-    get_value(max_sessions, Options,
-              get_config_value({max_sessions, Host, Port},
-                               default_max_sessions())).
-
-get_max_pipeline_size(Host, Port, Options) ->
-    get_value(max_pipeline_size, Options,
-              get_config_value({max_pipeline_size, Host, Port},
-                               default_max_pipeline_size())).
-
-default_max_sessions() ->
-    safe_get_env(ibrowse, default_max_sessions, ?DEF_MAX_SESSIONS).
-
-default_max_pipeline_size() ->
-    safe_get_env(ibrowse, default_max_pipeline_size, ?DEF_MAX_PIPELINE_SIZE).
-
-safe_get_env(App, Key, Def_val) ->
-    case application:get_env(App, Key) of
-        undefined ->
-            Def_val;
-        {ok, Val} ->
-            Val
-    end.
-
-%% @doc Deprecated. Use set_max_sessions/3 and set_max_pipeline_size/3
-%% for achieving the same effect.
-set_dest(Host, Port, [{max_sessions, Max} | T]) ->
-    set_max_sessions(Host, Port, Max),
-    set_dest(Host, Port, T);
-set_dest(Host, Port, [{max_pipeline_size, Max} | T]) ->
-    set_max_pipeline_size(Host, Port, Max),
-    set_dest(Host, Port, T);
-set_dest(Host, Port, [{trace, Bool} | T]) when Bool == true; Bool == false ->
-    ibrowse ! {trace, true, Host, Port},
-    set_dest(Host, Port, T);
-set_dest(_Host, _Port, [H | _]) ->
-    exit({invalid_option, H});
-set_dest(_, _, []) ->
-    ok.
-    
-%% @doc Set the maximum number of connections allowed to a specific Host:Port.
-%% @spec set_max_sessions(Host::string(), Port::integer(), Max::integer()) -> ok
-set_max_sessions(Host, Port, Max) when is_integer(Max), Max > 0 ->
-    gen_server:call(?MODULE, {set_config_value, {max_sessions, Host, Port}, Max}).
-
-%% @doc Set the maximum pipeline size for each connection to a specific Host:Port.
-%% @spec set_max_pipeline_size(Host::string(), Port::integer(), Max::integer()) -> ok
-set_max_pipeline_size(Host, Port, Max) when is_integer(Max), Max > 0 ->
-    gen_server:call(?MODULE, {set_config_value, {max_pipeline_size, Host, Port}, Max}).
-
-do_send_req(Conn_Pid, Parsed_url, Headers, Method, Body, Options, Timeout) ->
-    case catch ibrowse_http_client:send_req(Conn_Pid, Parsed_url,
-                                            Headers, Method, ensure_bin(Body),
-                                            Options, Timeout) of
-        {'EXIT', {timeout, _}} ->
-            {error, req_timedout};
-        {'EXIT', {noproc, {gen_server, call, [Conn_Pid, _, _]}}} ->
-            {error, sel_conn_closed};
-        {'EXIT', {normal, _}} ->
-            {error, req_timedout};
-        {error, connection_closed} ->
-            {error, sel_conn_closed};
-        {'EXIT', Reason} ->
-            {error, {'EXIT', Reason}};
-        {ok, St_code, Headers, Body} = Ret when is_binary(Body) ->
-            case get_value(response_format, Options, list) of
-                list ->
-                    {ok, St_code, Headers, binary_to_list(Body)};
-                binary ->
-                    Ret
-            end;
-        Ret ->
-            Ret
-    end.
-
-ensure_bin(L) when is_list(L)                     -> list_to_binary(L);
-ensure_bin(B) when is_binary(B)                   -> B;
-ensure_bin(Fun) when is_function(Fun)             -> Fun;
-ensure_bin({Fun}) when is_function(Fun)           -> Fun;
-ensure_bin({Fun, _} = Body) when is_function(Fun) -> Body.
-
-%% @doc Creates a HTTP client process to the specified Host:Port which
-%% is not part of the load balancing pool. This is useful in cases
-%% where some requests to a webserver might take a long time whereas
-%% some might take a very short time. To avoid getting these quick
-%% requests stuck in the pipeline behind time consuming requests, use
-%% this function to get a handle to a connection process. <br/>
-%% <b>Note:</b> Calling this function only creates a worker process. No connection
-%% is setup. The connection attempt is made only when the first
-%% request is sent via any of the send_req_direct/4,5,6,7 functions.<br/>
-%% <b>Note:</b> It is the responsibility of the calling process to control
-%% pipeline size on such connections.
-%%
-%% @spec spawn_worker_process(Url::string()) -> {ok, pid()}
-spawn_worker_process(Url) ->
-    ibrowse_http_client:start(Url).
-
-%% @doc Same as spawn_worker_process/1 but takes as input a Host and Port
-%% instead of a URL.
-%% @spec spawn_worker_process(Host::string(), Port::integer()) -> {ok, pid()}
-spawn_worker_process(Host, Port) ->
-    ibrowse_http_client:start({Host, Port}).
-
-%% @doc Same as spawn_worker_process/1 except the the calling process
-%% is linked to the worker process which is spawned.
-%% @spec spawn_link_worker_process(Url::string()) -> {ok, pid()}
-spawn_link_worker_process(Url) ->
-    ibrowse_http_client:start_link(Url).
-
-%% @doc Same as spawn_worker_process/2 except the the calling process
-%% is linked to the worker process which is spawned.
-%% @spec spawn_link_worker_process(Host::string(), Port::integer()) -> {ok, pid()}
-spawn_link_worker_process(Host, Port) ->
-    ibrowse_http_client:start_link({Host, Port}).
-
-%% @doc Terminate a worker process spawned using
-%% spawn_worker_process/2 or spawn_link_worker_process/2. Requests in
-%% progress will get the error response <pre>{error, closing_on_request}</pre>
-%% @spec stop_worker_process(Conn_pid::pid()) -> ok
-stop_worker_process(Conn_pid) ->
-    ibrowse_http_client:stop(Conn_pid).
-
-%% @doc Same as send_req/3 except that the first argument is the PID
-%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
-send_req_direct(Conn_pid, Url, Headers, Method) ->
-    send_req_direct(Conn_pid, Url, Headers, Method, [], []).
-
-%% @doc Same as send_req/4 except that the first argument is the PID
-%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
-send_req_direct(Conn_pid, Url, Headers, Method, Body) ->
-    send_req_direct(Conn_pid, Url, Headers, Method, Body, []).
-
-%% @doc Same as send_req/5 except that the first argument is the PID
-%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
-send_req_direct(Conn_pid, Url, Headers, Method, Body, Options) ->
-    send_req_direct(Conn_pid, Url, Headers, Method, Body, Options, 30000).
-
-%% @doc Same as send_req/6 except that the first argument is the PID
-%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
-send_req_direct(Conn_pid, Url, Headers, Method, Body, Options, Timeout) ->
-    case catch parse_url(Url) of
-        #url{host = Host,
-             port = Port} = Parsed_url ->
-            Options_1 = merge_options(Host, Port, Options),
-            case do_send_req(Conn_pid, Parsed_url, Headers, Method, Body, Options_1, Timeout) of
-                {error, {'EXIT', {noproc, _}}} ->
-                    {error, worker_is_dead};
-                Ret ->
-                    Ret
-            end;
-        Err ->
-            {error, {url_parsing_failed, Err}}
-    end.
-
-%% @doc Tell ibrowse to stream the next chunk of data to the
-%% caller. Should be used in conjunction with the
-%% <code>stream_to</code> option
-%% @spec stream_next(Req_id :: req_id()) -> ok | {error, unknown_req_id}
-stream_next(Req_id) ->    
-    case ets:lookup(ibrowse_stream, {req_id_pid, Req_id}) of
-        [] ->
-            {error, unknown_req_id};
-        [{_, Pid}] ->
-            catch Pid ! {stream_next, Req_id},
-            ok
-    end.
-
-%% @doc Tell ibrowse to close the connection associated with the
-%% specified stream.  Should be used in conjunction with the
-%% <code>stream_to</code> option. Note that all requests in progress on
-%% the connection which is serving this Req_id will be aborted, and an
-%% error returned.
-%% @spec stream_close(Req_id :: req_id()) -> ok | {error, unknown_req_id}
-stream_close(Req_id) ->    
-    case ets:lookup(ibrowse_stream, {req_id_pid, Req_id}) of
-        [] ->
-            {error, unknown_req_id};
-        [{_, Pid}] ->
-            catch Pid ! {stream_close, Req_id},
-            ok
-    end.
-
-%% @doc Turn tracing on for the ibrowse process
-trace_on() ->
-    ibrowse ! {trace, true}.
-%% @doc Turn tracing off for the ibrowse process
-trace_off() ->
-    ibrowse ! {trace, false}.
-
-%% @doc Turn tracing on for all connections to the specified HTTP
-%% server. Host is whatever is specified as the domain name in the URL
-%% @spec trace_on(Host, Port) -> ok
-%% Host = string() 
-%% Port = integer()
-trace_on(Host, Port) ->
-    ibrowse ! {trace, true, Host, Port},
-    ok.
-
-%% @doc Turn tracing OFF for all connections to the specified HTTP
-%% server.
-%% @spec trace_off(Host, Port) -> ok
-trace_off(Host, Port) ->
-    ibrowse ! {trace, false, Host, Port},
-    ok.
-
-%% @doc Turn Off ALL tracing
-%% @spec all_trace_off() -> ok
-all_trace_off() ->
-    ibrowse ! all_trace_off,
-    ok.
-
-%% @doc Shows some internal information about load balancing. Info
-%% about workers spawned using spawn_worker_process/2 or
-%% spawn_link_worker_process/2 is not included.
-show_dest_status() ->
-    io:format("~-40.40s | ~-5.5s | ~-10.10s | ~s~n",
-              ["Server:port", "ETS", "Num conns", "LB Pid"]),
-    io:format("~80.80.=s~n", [""]),
-    Metrics = get_metrics(),
-    lists:foreach(
-      fun({Host, Port, Lb_pid, Tid, Size}) ->
-              io:format("~40.40s | ~-5.5s | ~-5.5s | ~p~n",
-                        [Host ++ ":" ++ integer_to_list(Port),
-                         integer_to_list(Tid),
-                         integer_to_list(Size), 
-                         Lb_pid])
-      end, Metrics).
-
-show_dest_status(Url) ->                                          
-    #url{host = Host, port = Port} = ibrowse_lib:parse_url(Url),
-    show_dest_status(Host, Port).
-
-%% @doc Shows some internal information about load balancing to a
-%% specified Host:Port. Info about workers spawned using
-%% spawn_worker_process/2 or spawn_link_worker_process/2 is not
-%% included.
-show_dest_status(Host, Port) ->
-    case get_metrics(Host, Port) of
-        {Lb_pid, MsgQueueSize, Tid, Size,
-         {{First_p_sz, First_speculative_sz},
-          {Last_p_sz, Last_speculative_sz}}} ->
-            io:format("Load Balancer Pid     : ~p~n"
-                      "LB process msg q size : ~p~n"
-                      "LB ETS table id       : ~p~n"
-                      "Num Connections       : ~p~n"
-                      "Smallest pipeline     : ~p:~p~n"
-                      "Largest pipeline      : ~p:~p~n",
-                      [Lb_pid, MsgQueueSize, Tid, Size, 
-                       First_p_sz, First_speculative_sz,
-                       Last_p_sz, Last_speculative_sz]);
-        _Err ->
-            io:format("Metrics not available~n", [])
-    end.
-
-get_metrics() ->
-    Dests = lists:filter(fun({lb_pid, {Host, Port}, _}) when is_list(Host),
-                                                             is_integer(Port) ->
-                                 true;
-                            (_) ->
-                                 false
-                         end, ets:tab2list(ibrowse_lb)),
-    All_ets = ets:all(),
-    lists:map(fun({lb_pid, {Host, Port}, Lb_pid}) ->
-                  case lists:dropwhile(
-                         fun(Tid) ->
-                                 ets:info(Tid, owner) /= Lb_pid
-                         end, All_ets) of
-                      [] ->
-                          {Host, Port, Lb_pid, unknown, 0};
-                      [Tid | _] ->
-                          Size = case catch (ets:info(Tid, size)) of
-                                     N when is_integer(N) -> N;
-                                     _ -> 0
-                                 end,
-                          {Host, Port, Lb_pid, Tid, Size}
-                  end
-              end, Dests).
-
-get_metrics(Host, Port) ->
-    case ets:lookup(ibrowse_lb, {Host, Port}) of
-        [] ->
-            no_active_processes;
-        [#lb_pid{pid = Lb_pid}] ->
-            MsgQueueSize = (catch process_info(Lb_pid, message_queue_len)),
-            %% {Lb_pid, MsgQueueSize,
-            case lists:dropwhile(
-                   fun(Tid) ->
-                           ets:info(Tid, owner) /= Lb_pid
-                   end, ets:all()) of
-                [] ->
-                    {Lb_pid, MsgQueueSize, unknown, 0, unknown};
-                [Tid | _] ->
-                    try
-                        Size = ets:info(Tid, size),
-                        case Size of
-                            0 ->
-                                ok;
-                            _ ->
-                                First = ets:first(Tid),
-                                Last = ets:last(Tid),
-                                [{_, First_p_sz, First_speculative_sz}] = ets:lookup(Tid, First),
-                                [{_, Last_p_sz, Last_speculative_sz}] = ets:lookup(Tid, Last),
-                                {Lb_pid, MsgQueueSize, Tid, Size,
-                                 {{First_p_sz, First_speculative_sz}, {Last_p_sz, Last_speculative_sz}}}
-                        end
-                    catch _:_ ->
-                            not_available
-                    end
-            end
-    end.
-
-%% @doc Clear current configuration for ibrowse and load from the file
-%% ibrowse.conf in the IBROWSE_EBIN/../priv directory. Current
-%% configuration is cleared only if the ibrowse.conf file is readable
-%% using file:consult/1
-rescan_config() ->
-    gen_server:call(?MODULE, rescan_config).
-
-%% Clear current configuration for ibrowse and load from the specified
-%% file. Current configuration is cleared only if the specified
-%% file is readable using file:consult/1
-rescan_config([{_,_}|_]=Terms) ->
-    gen_server:call(?MODULE, {rescan_config_terms, Terms});
-rescan_config(File) when is_list(File) ->
-    gen_server:call(?MODULE, {rescan_config, File}).
-
-%% @doc Add additional configuration elements at runtime.
-add_config([{_,_}|_]=Terms) ->
-    gen_server:call(?MODULE, {add_config_terms, Terms}).
-
-%%====================================================================
-%% Server functions
-%%====================================================================
-
-%%--------------------------------------------------------------------
-%% Function: init/1
-%% Description: Initiates the server
-%% Returns: {ok, State}          |
-%%          {ok, State, Timeout} |
-%%          ignore               |
-%%          {stop, Reason}
-%%--------------------------------------------------------------------
-init(_) ->
-    process_flag(trap_exit, true),
-    State = #state{},
-    put(my_trace_flag, State#state.trace),
-    put(ibrowse_trace_token, "ibrowse"),
-    ibrowse_lb     = ets:new(ibrowse_lb, [named_table, public, {keypos, 2}]),
-    ibrowse_conf   = ets:new(ibrowse_conf, [named_table, protected, {keypos, 2}]),
-    ibrowse_stream = ets:new(ibrowse_stream, [named_table, public]),
-    import_config(),
-    {ok, #state{}}.
-
-import_config() ->
-    case code:priv_dir(ibrowse) of
-        {error, _} ->
-            ok;
-        PrivDir ->
-            Filename = filename:join(PrivDir, "ibrowse.conf"),
-            import_config(Filename)
-    end.
-
-import_config(Filename) ->
-    case file:consult(Filename) of
-        {ok, Terms} ->
-            apply_config(Terms);
-        _Err ->
-            ok
-    end.
-
-apply_config(Terms) ->
-    ets:delete_all_objects(ibrowse_conf),
-    insert_config(Terms).
-
-insert_config(Terms) ->
-    Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options}) 
-             when is_list(Host), is_integer(Port),
-                  is_integer(MaxSess), MaxSess > 0,
-                  is_integer(MaxPipe), MaxPipe > 0, is_list(Options) ->
-                  I = [{{max_sessions, Host, Port}, MaxSess},
-                       {{max_pipeline_size, Host, Port}, MaxPipe},
-                       {{options, Host, Port}, Options}],
-                  lists:foreach(
-                    fun({X, Y}) ->
-                            ets:insert(ibrowse_conf,
-                                       #ibrowse_conf{key = X, 
-                                                     value = Y})
-                    end, I);
-             ({K, V}) ->
-                  ets:insert(ibrowse_conf,
-                             #ibrowse_conf{key = K,
-                                           value = V});
-             (X) ->
-                  io:format("Skipping unrecognised term: ~p~n", [X])
-          end,
-    lists:foreach(Fun, Terms).
-
-%% @doc Internal export
-get_config_value(Key) ->
-    try
-        [#ibrowse_conf{value = V}] = ets:lookup(ibrowse_conf, Key),
-        V
-    catch
-        error:badarg ->
-            throw({error, ibrowse_not_running})
-    end.
-
-%% @doc Internal export
-get_config_value(Key, DefVal) ->
-    try
-        case ets:lookup(ibrowse_conf, Key) of
-            [] ->
-                DefVal;
-            [#ibrowse_conf{value = V}] ->
-                V
-        end
-    catch
-        error:badarg ->
-            throw({error, ibrowse_not_running})
-    end.
-
-set_config_value(Key, Val) ->
-    ets:insert(ibrowse_conf, #ibrowse_conf{key = Key, value = Val}).
-%%--------------------------------------------------------------------
-%% Function: handle_call/3
-%% Description: Handling call messages
-%% Returns: {reply, Reply, State}          |
-%%          {reply, Reply, State, Timeout} |
-%%          {noreply, State}               |
-%%          {noreply, State, Timeout}      |
-%%          {stop, Reason, Reply, State}   | (terminate/2 is called)
-%%          {stop, Reason, State}            (terminate/2 is called)
-%%--------------------------------------------------------------------
-handle_call({get_lb_pid, #url{host = Host, port = Port} = Url}, _From, State) ->
-    Pid = do_get_connection(Url, ets:lookup(ibrowse_lb, {Host, Port})),
-    {reply, Pid, State};
-
-handle_call(stop, _From, State) ->
-    do_trace("IBROWSE shutting down~n", []),
-    ets:foldl(fun(#lb_pid{pid = Pid}, Acc) ->
-                      ibrowse_lb:stop(Pid),
-                      Acc
-              end, [], ibrowse_lb),
-    {stop, normal, ok, State};
-
-handle_call({set_config_value, Key, Val}, _From, State) ->
-    set_config_value(Key, Val),
-    {reply, ok, State};
-
-handle_call(rescan_config, _From, State) ->
-    Ret = (catch import_config()),
-    {reply, Ret, State};
-
-handle_call({rescan_config, File}, _From, State) ->
-    Ret = (catch import_config(File)),
-    {reply, Ret, State};
-
-handle_call({rescan_config_terms, Terms}, _From, State) ->
-    Ret = (catch apply_config(Terms)),
-    {reply, Ret, State};
-
-handle_call({add_config_terms, Terms}, _From, State) ->
-    Ret = (catch insert_config(Terms)),
-    {reply, Ret, State};
-
-handle_call(Request, _From, State) ->
-    Reply = {unknown_request, Request},
-    {reply, Reply, State}.
-
-%%--------------------------------------------------------------------
-%% Function: handle_cast/2
-%% Description: Handling cast messages
-%% Returns: {noreply, State}          |
-%%          {noreply, State, Timeout} |
-%%          {stop, Reason, State}            (terminate/2 is called)
-%%--------------------------------------------------------------------
-
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-%%--------------------------------------------------------------------
-%% Function: handle_info/2
-%% Description: Handling all non call/cast messages
-%% Returns: {noreply, State}          |
-%%          {noreply, State, Timeout} |
-%%          {stop, Reason, State}            (terminate/2 is called)
-%%--------------------------------------------------------------------
-handle_info(all_trace_off, State) ->
-    Mspec = [{{ibrowse_conf,{trace,'$1','$2'},true},[],[{{'$1','$2'}}]}],
-    Trace_on_dests = ets:select(ibrowse_conf, Mspec),
-    Fun = fun(#lb_pid{host_port = {H, P}, pid = Pid}, _) ->
-                  case lists:member({H, P}, Trace_on_dests) of
-                      false ->
-                          ok;
-                      true ->
-                          catch Pid ! {trace, false}
-                  end;
-             (_, Acc) ->
-                  Acc
-          end,
-    ets:foldl(Fun, undefined, ibrowse_lb),
-    ets:select_delete(ibrowse_conf, [{{ibrowse_conf,{trace,'$1','$2'},true},[],['true']}]),
-    {noreply, State};
-                                  
-handle_info({trace, Bool}, State) ->
-    put(my_trace_flag, Bool),
-    {noreply, State};
-
-handle_info({trace, Bool, Host, Port}, State) ->
-    Fun = fun(#lb_pid{host_port = {H, P}, pid = Pid}, _)
-             when H == Host,
-                  P == Port ->
-                  catch Pid ! {trace, Bool};
-             (_, Acc) ->
-                  Acc
-          end,
-    ets:foldl(Fun, undefined, ibrowse_lb),
-    ets:insert(ibrowse_conf, #ibrowse_conf{key = {trace, Host, Port},
-                                           value = Bool}),
-    {noreply, State};
-                     
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-%%--------------------------------------------------------------------
-%% Function: terminate/2
-%% Description: Shutdown the server
-%% Returns: any (ignored by gen_server)
-%%--------------------------------------------------------------------
-terminate(_Reason, _State) ->
-    ok.
-
-%%--------------------------------------------------------------------
-%% Func: code_change/3
-%% Purpose: Convert process state when code is changed
-%% Returns: {ok, NewState}
-%%--------------------------------------------------------------------
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-%%--------------------------------------------------------------------
-%%% Internal functions
-%%--------------------------------------------------------------------
-do_get_connection(#url{host = Host, port = Port}, []) ->
-    {ok, Pid} = ibrowse_lb:start_link([Host, Port]),
-    ets:insert(ibrowse_lb, #lb_pid{host_port = {Host, Port}, pid = Pid}),
-    Pid;
-do_get_connection(_Url, [#lb_pid{pid = Pid}]) ->
-    Pid.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/3822d8f4/src/ibrowse/src/ibrowse_app.erl
----------------------------------------------------------------------
diff --git a/src/ibrowse/src/ibrowse_app.erl b/src/ibrowse/src/ibrowse_app.erl
deleted file mode 100644
index d3a0f7b..0000000
--- a/src/ibrowse/src/ibrowse_app.erl
+++ /dev/null
@@ -1,63 +0,0 @@
-%%%-------------------------------------------------------------------
-%%% File    : ibrowse_app.erl
-%%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%% Description : 
-%%%
-%%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%%-------------------------------------------------------------------
--module(ibrowse_app).
-
--behaviour(application).
-%%--------------------------------------------------------------------
-%% Include files
-%%--------------------------------------------------------------------
-
-%%--------------------------------------------------------------------
-%% External exports
-%%--------------------------------------------------------------------
--export([
-	 start/2,
-	 stop/1
-        ]).
-
-%%--------------------------------------------------------------------
-%% Internal exports
-%%--------------------------------------------------------------------
--export([
-        ]).
-
-%%--------------------------------------------------------------------
-%% Macros
-%%--------------------------------------------------------------------
-
-%%--------------------------------------------------------------------
-%% Records
-%%--------------------------------------------------------------------
-
-%%====================================================================
-%% External functions
-%%====================================================================
-%%--------------------------------------------------------------------
-%% Func: start/2
-%% Returns: {ok, Pid}        |
-%%          {ok, Pid, State} |
-%%          {error, Reason}   
-%%--------------------------------------------------------------------
-start(_Type, _StartArgs) ->
-    case ibrowse_sup:start_link() of
-	{ok, Pid} -> 
-	    {ok, Pid};
-	Error ->
-	    Error
-    end.
-
-%%--------------------------------------------------------------------
-%% Func: stop/1
-%% Returns: any 
-%%--------------------------------------------------------------------
-stop(_State) ->
-    ok.
-
-%%====================================================================
-%% Internal functions
-%%====================================================================


[19/49] Remove src/couch_replicator

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator_worker.erl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator_worker.erl b/src/couch_replicator/src/couch_replicator_worker.erl
deleted file mode 100644
index 78d66ea..0000000
--- a/src/couch_replicator/src/couch_replicator_worker.erl
+++ /dev/null
@@ -1,514 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_worker).
--behaviour(gen_server).
-
-% public API
--export([start_link/5]).
-
-% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_replicator_api_wrap.hrl").
--include("couch_replicator.hrl").
-
-% TODO: maybe make both buffer max sizes configurable
--define(DOC_BUFFER_BYTE_SIZE, 512 * 1024).   % for remote targets
--define(DOC_BUFFER_LEN, 10).                 % for local targets, # of documents
--define(MAX_BULK_ATT_SIZE, 64 * 1024).
--define(MAX_BULK_ATTS_PER_DOC, 8).
--define(STATS_DELAY, 10000000).              % 10 seconds (in microseconds)
-
--define(inc_stat(StatPos, Stats, Inc),
-    setelement(StatPos, Stats, element(StatPos, Stats) + Inc)).
-
--import(couch_replicator_utils, [
-    open_db/1,
-    close_db/1,
-    start_db_compaction_notifier/2,
-    stop_db_compaction_notifier/1
-]).
--import(couch_util, [
-    to_binary/1,
-    get_value/3
-]).
-
-
--record(batch, {
-    docs = [],
-    size = 0
-}).
-
--record(state, {
-    cp,
-    loop,
-    max_parallel_conns,
-    source,
-    target,
-    readers = [],
-    writer = nil,
-    pending_fetch = nil,
-    flush_waiter = nil,
-    stats = #rep_stats{},
-    source_db_compaction_notifier = nil,
-    target_db_compaction_notifier = nil,
-    batch = #batch{}
-}).
-
-
-
-start_link(Cp, #db{} = Source, Target, ChangesManager, _MaxConns) ->
-    Pid = spawn_link(fun() ->
-        erlang:put(last_stats_report, now()),
-        queue_fetch_loop(Source, Target, Cp, Cp, ChangesManager)
-    end),
-    {ok, Pid};
-
-start_link(Cp, Source, Target, ChangesManager, MaxConns) ->
-    gen_server:start_link(
-        ?MODULE, {Cp, Source, Target, ChangesManager, MaxConns}, []).
-
-
-init({Cp, Source, Target, ChangesManager, MaxConns}) ->
-    process_flag(trap_exit, true),
-    Parent = self(),
-    LoopPid = spawn_link(fun() ->
-        queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager)
-    end),
-    erlang:put(last_stats_report, now()),
-    State = #state{
-        cp = Cp,
-        max_parallel_conns = MaxConns,
-        loop = LoopPid,
-        source = open_db(Source),
-        target = open_db(Target),
-        source_db_compaction_notifier =
-            start_db_compaction_notifier(Source, self()),
-        target_db_compaction_notifier =
-            start_db_compaction_notifier(Target, self())
-    },
-    {ok, State}.
-
-
-handle_call({fetch_doc, {_Id, _Revs, _PAs} = Params}, {Pid, _} = From,
-    #state{loop = Pid, readers = Readers, pending_fetch = nil,
-        source = Src, target = Tgt, max_parallel_conns = MaxConns} = State) ->
-    case length(Readers) of
-    Size when Size < MaxConns ->
-        Reader = spawn_doc_reader(Src, Tgt, Params),
-        NewState = State#state{
-            readers = [Reader | Readers]
-        },
-        {reply, ok, NewState};
-    _ ->
-        NewState = State#state{
-            pending_fetch = {From, Params}
-        },
-        {noreply, NewState}
-    end;
-
-handle_call({batch_doc, Doc}, From, State) ->
-    gen_server:reply(From, ok),
-    {noreply, maybe_flush_docs(Doc, State)};
-
-handle_call({add_stats, IncStats}, From, #state{stats = Stats} = State) ->
-    gen_server:reply(From, ok),
-    NewStats = couch_replicator_utils:sum_stats(Stats, IncStats),
-    NewStats2 = maybe_report_stats(State#state.cp, NewStats),
-    {noreply, State#state{stats = NewStats2}};
-
-handle_call(flush, {Pid, _} = From,
-    #state{loop = Pid, writer = nil, flush_waiter = nil,
-        target = Target, batch = Batch} = State) ->
-    State2 = case State#state.readers of
-    [] ->
-        State#state{writer = spawn_writer(Target, Batch)};
-    _ ->
-        State
-    end,
-    {noreply, State2#state{flush_waiter = From}}.
-
-
-handle_cast({db_compacted, DbName},
-    #state{source = #db{name = DbName} = Source} = State) ->
-    {ok, NewSource} = couch_db:reopen(Source),
-    {noreply, State#state{source = NewSource}};
-
-handle_cast({db_compacted, DbName},
-    #state{target = #db{name = DbName} = Target} = State) ->
-    {ok, NewTarget} = couch_db:reopen(Target),
-    {noreply, State#state{target = NewTarget}};
-
-handle_cast(Msg, State) ->
-    {stop, {unexpected_async_call, Msg}, State}.
-
-
-handle_info({'EXIT', Pid, normal}, #state{loop = Pid} = State) ->
-    #state{
-        batch = #batch{docs = []}, readers = [], writer = nil,
-        pending_fetch = nil, flush_waiter = nil
-    } = State,
-    {stop, normal, State};
-
-handle_info({'EXIT', Pid, normal}, #state{writer = Pid} = State) ->
-    {noreply, after_full_flush(State)};
-
-handle_info({'EXIT', Pid, normal}, #state{writer = nil} = State) ->
-    #state{
-        readers = Readers, writer = Writer, batch = Batch,
-        source = Source, target = Target,
-        pending_fetch = Fetch, flush_waiter = FlushWaiter
-    } = State,
-    case Readers -- [Pid] of
-    Readers ->
-        {noreply, State};
-    Readers2 ->
-        State2 = case Fetch of
-        nil ->
-            case (FlushWaiter =/= nil) andalso (Writer =:= nil) andalso
-                (Readers2 =:= [])  of
-            true ->
-                State#state{
-                    readers = Readers2,
-                    writer = spawn_writer(Target, Batch)
-                };
-            false ->
-                State#state{readers = Readers2}
-            end;
-        {From, FetchParams} ->
-            Reader = spawn_doc_reader(Source, Target, FetchParams),
-            gen_server:reply(From, ok),
-            State#state{
-                readers = [Reader | Readers2],
-                pending_fetch = nil
-            }
-        end,
-        {noreply, State2}
-    end;
-
-handle_info({'EXIT', Pid, Reason}, State) ->
-   {stop, {process_died, Pid, Reason}, State}.
-
-
-terminate(_Reason, State) ->
-    close_db(State#state.source),
-    close_db(State#state.target),
-    stop_db_compaction_notifier(State#state.source_db_compaction_notifier),
-    stop_db_compaction_notifier(State#state.target_db_compaction_notifier).
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager) ->
-    ChangesManager ! {get_changes, self()},
-    receive
-    {closed, ChangesManager} ->
-        ok;
-    {changes, ChangesManager, Changes, ReportSeq} ->
-        Target2 = open_db(Target),
-        {IdRevs, Stats0} = find_missing(Changes, Target2),
-        case Source of
-        #db{} ->
-            Source2 = open_db(Source),
-            Stats = local_process_batch(
-                IdRevs, Cp, Source2, Target2, #batch{}, Stats0),
-            close_db(Source2);
-        #httpdb{} ->
-            ok = gen_server:call(Parent, {add_stats, Stats0}, infinity),
-            remote_process_batch(IdRevs, Parent),
-            {ok, Stats} = gen_server:call(Parent, flush, infinity)
-        end,
-        close_db(Target2),
-        ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
-        erlang:put(last_stats_report, now()),
-        twig:log(debug,"Worker reported completion of seq ~p", [ReportSeq]),
-        queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager)
-    end.
-
-
-local_process_batch([], _Cp, _Src, _Tgt, #batch{docs = []}, Stats) ->
-    Stats;
-
-local_process_batch([], Cp, Source, Target, #batch{docs = Docs, size = Size}, Stats) ->
-    case Target of
-    #httpdb{} ->
-        twig:log(debug,"Worker flushing doc batch of size ~p bytes", [Size]);
-    #db{} ->
-        twig:log(debug,"Worker flushing doc batch of ~p docs", [Size])
-    end,
-    Stats2 = flush_docs(Target, Docs),
-    Stats3 = couch_replicator_utils:sum_stats(Stats, Stats2),
-    local_process_batch([], Cp, Source, Target, #batch{}, Stats3);
-
-local_process_batch([IdRevs | Rest], Cp, Source, Target, Batch, Stats) ->
-    {ok, {_, DocList, Stats2, _}} = fetch_doc(
-        Source, IdRevs, fun local_doc_handler/2, {Target, [], Stats, Cp}),
-    {Batch2, Stats3} = lists:foldl(
-        fun(Doc, {Batch0, Stats0}) ->
-            {Batch1, S} = maybe_flush_docs(Target, Batch0, Doc),
-            {Batch1, couch_replicator_utils:sum_stats(Stats0, S)}
-        end,
-        {Batch, Stats2}, DocList),
-    local_process_batch(Rest, Cp, Source, Target, Batch2, Stats3).
-
-
-remote_process_batch([], _Parent) ->
-    ok;
-
-remote_process_batch([{Id, Revs, PAs} | Rest], Parent) ->
-    % When the source is a remote database, we fetch a single document revision
-    % per HTTP request. This is mostly to facilitate retrying of HTTP requests
-    % due to network transient failures. It also helps not exceeding the maximum
-    % URL length allowed by proxies and Mochiweb.
-    lists:foreach(
-        fun(Rev) ->
-            ok = gen_server:call(Parent, {fetch_doc, {Id, [Rev], PAs}}, infinity)
-        end,
-        Revs),
-    remote_process_batch(Rest, Parent).
-
-
-spawn_doc_reader(Source, Target, FetchParams) ->
-    Parent = self(),
-    spawn_link(fun() ->
-        Source2 = open_db(Source),
-        fetch_doc(
-            Source2, FetchParams, fun remote_doc_handler/2, {Parent, Target}),
-        close_db(Source2)
-    end).
-
-
-fetch_doc(Source, {Id, Revs, PAs}, DocHandler, Acc) ->
-    try
-        couch_replicator_api_wrap:open_doc_revs(
-            Source, Id, Revs, [{atts_since, PAs}, latest], DocHandler, Acc)
-    catch
-    throw:{missing_stub, _} ->
-        twig:log(error,"Retrying fetch and update of document `~s` due to out of "
-            "sync attachment stubs. Missing revisions are: ~s",
-            [Id, couch_doc:revs_to_strs(Revs)]),
-        couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc)
-    end.
-
-
-local_doc_handler({ok, Doc}, {Target, DocList, Stats, Cp}) ->
-    Stats2 = ?inc_stat(#rep_stats.docs_read, Stats, 1),
-    case batch_doc(Doc) of
-    true ->
-        {ok, {Target, [Doc | DocList], Stats2, Cp}};
-    false ->
-        twig:log(debug,"Worker flushing doc with attachments", []),
-        Target2 = open_db(Target),
-        Success = (flush_doc(Target2, Doc) =:= ok),
-        close_db(Target2),
-        Stats3 = case Success of
-        true ->
-            ?inc_stat(#rep_stats.docs_written, Stats2, 1);
-        false ->
-            ?inc_stat(#rep_stats.doc_write_failures, Stats2, 1)
-        end,
-        Stats4 = maybe_report_stats(Cp, Stats3),
-        {ok, {Target, DocList, Stats4, Cp}}
-    end;
-local_doc_handler(_, Acc) ->
-    {ok, Acc}.
-
-
-remote_doc_handler({ok, #doc{atts = []} = Doc}, {Parent, _} = Acc) ->
-    ok = gen_server:call(Parent, {batch_doc, Doc}, infinity),
-    {ok, Acc};
-remote_doc_handler({ok, Doc}, {Parent, Target} = Acc) ->
-    % Immediately flush documents with attachments received from a remote
-    % source. The data property of each attachment is a function that starts
-    % streaming the attachment data from the remote source, therefore it's
-    % convenient to call it ASAP to avoid ibrowse inactivity timeouts.
-    Stats = #rep_stats{docs_read = 1},
-    twig:log(debug,"Worker flushing doc with attachments", []),
-    Target2 = open_db(Target),
-    Success = (flush_doc(Target2, Doc) =:= ok),
-    close_db(Target2),
-    {Result, Stats2} = case Success of
-    true ->
-        {{ok, Acc}, ?inc_stat(#rep_stats.docs_written, Stats, 1)};
-    false ->
-        {{skip, Acc}, ?inc_stat(#rep_stats.doc_write_failures, Stats, 1)}
-    end,
-    ok = gen_server:call(Parent, {add_stats, Stats2}, infinity),
-    Result;
-remote_doc_handler(_, Acc) ->
-    {ok, Acc}.
-
-
-spawn_writer(Target, #batch{docs = DocList, size = Size}) ->
-    case {Target, Size > 0} of
-    {#httpdb{}, true} ->
-        twig:log(debug,"Worker flushing doc batch of size ~p bytes", [Size]);
-    {#db{}, true} ->
-        twig:log(debug,"Worker flushing doc batch of ~p docs", [Size]);
-    _ ->
-        ok
-    end,
-    Parent = self(),
-    spawn_link(
-        fun() ->
-            Target2 = open_db(Target),
-            Stats = flush_docs(Target2, DocList),
-            close_db(Target2),
-            ok = gen_server:call(Parent, {add_stats, Stats}, infinity)
-        end).
-
-
-after_full_flush(#state{stats = Stats, flush_waiter = Waiter} = State) ->
-    gen_server:reply(Waiter, {ok, Stats}),
-    erlang:put(last_stats_report, now()),
-    State#state{
-        stats = #rep_stats{},
-        flush_waiter = nil,
-        writer = nil,
-        batch = #batch{}
-    }.
-
-
-maybe_flush_docs(Doc,State) ->
-    #state{
-        target = Target, batch = Batch,
-        stats = Stats, cp = Cp
-    } = State,
-    {Batch2, WStats} = maybe_flush_docs(Target, Batch, Doc),
-    Stats2 = couch_replicator_utils:sum_stats(Stats, WStats),
-    Stats3 = ?inc_stat(#rep_stats.docs_read, Stats2, 1),
-    Stats4 = maybe_report_stats(Cp, Stats3),
-    State#state{stats = Stats4, batch = Batch2}.
-
-
-maybe_flush_docs(#httpdb{} = Target, Batch, Doc) ->
-    #batch{docs = DocAcc, size = SizeAcc} = Batch,
-    case batch_doc(Doc) of
-    false ->
-        twig:log(debug,"Worker flushing doc with attachments", []),
-        case flush_doc(Target, Doc) of
-        ok ->
-            {Batch, #rep_stats{docs_written = 1}};
-        _ ->
-            {Batch, #rep_stats{doc_write_failures = 1}}
-        end;
-    true ->
-        JsonDoc = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
-        case SizeAcc + iolist_size(JsonDoc) of
-        SizeAcc2 when SizeAcc2 > ?DOC_BUFFER_BYTE_SIZE ->
-            twig:log(debug,"Worker flushing doc batch of size ~p bytes", [SizeAcc2]),
-            Stats = flush_docs(Target, [JsonDoc | DocAcc]),
-            {#batch{}, Stats};
-        SizeAcc2 ->
-            {#batch{docs = [JsonDoc | DocAcc], size = SizeAcc2}, #rep_stats{}}
-        end
-    end;
-
-maybe_flush_docs(#db{} = Target, #batch{docs = DocAcc, size = SizeAcc}, Doc) ->
-    case SizeAcc + 1 of
-    SizeAcc2 when SizeAcc2 >= ?DOC_BUFFER_LEN ->
-        twig:log(debug,"Worker flushing doc batch of ~p docs", [SizeAcc2]),
-        Stats = flush_docs(Target, [Doc | DocAcc]),
-        {#batch{}, Stats};
-    SizeAcc2 ->
-        {#batch{docs = [Doc | DocAcc], size = SizeAcc2}, #rep_stats{}}
-    end.
-
-
-batch_doc(#doc{atts = []}) ->
-    true;
-batch_doc(#doc{atts = Atts}) ->
-    (length(Atts) =< ?MAX_BULK_ATTS_PER_DOC) andalso
-        lists:all(
-            fun(#att{disk_len = L, data = Data}) ->
-                (L =< ?MAX_BULK_ATT_SIZE) andalso (Data =/= stub)
-            end, Atts).
-
-
-flush_docs(_Target, []) ->
-    #rep_stats{};
-
-flush_docs(Target, DocList) ->
-    {ok, Errors} = couch_replicator_api_wrap:update_docs(
-        Target, DocList, [delay_commit], replicated_changes),
-    DbUri = couch_replicator_api_wrap:db_uri(Target),
-    lists:foreach(
-        fun({Props}) ->
-            twig:log(error,"Replicator: couldn't write document `~s`, revision `~s`,"
-                " to target database `~s`. Error: `~s`, reason: `~s`.",
-                [get_value(id, Props, ""), get_value(rev, Props, ""), DbUri,
-                    get_value(error, Props, ""), get_value(reason, Props, "")])
-        end, Errors),
-    #rep_stats{
-        docs_written = length(DocList) - length(Errors),
-        doc_write_failures = length(Errors)
-    }.
-
-flush_doc(Target, #doc{id = Id, revs = {Pos, [RevId | _]}} = Doc) ->
-    try couch_replicator_api_wrap:update_doc(Target, Doc, [], replicated_changes) of
-    {ok, _} ->
-        ok;
-    Error ->
-        twig:log(error,"Replicator: error writing document `~s` to `~s`: ~s",
-            [Id, couch_replicator_api_wrap:db_uri(Target), couch_util:to_binary(Error)]),
-        Error
-    catch
-    throw:{missing_stub, _} = MissingStub ->
-        throw(MissingStub);
-    throw:{Error, Reason} ->
-        twig:log(error,"Replicator: couldn't write document `~s`, revision `~s`,"
-            " to target database `~s`. Error: `~s`, reason: `~s`.",
-            [Id, couch_doc:rev_to_str({Pos, RevId}),
-                couch_replicator_api_wrap:db_uri(Target), to_binary(Error), to_binary(Reason)]),
-        {error, Error};
-    throw:Err ->
-        twig:log(error,"Replicator: couldn't write document `~s`, revision `~s`,"
-            " to target database `~s`. Error: `~s`.",
-            [Id, couch_doc:rev_to_str({Pos, RevId}),
-                couch_replicator_api_wrap:db_uri(Target), to_binary(Err)]),
-        {error, Err}
-    end.
-
-
-find_missing(DocInfos, Target) ->
-    {IdRevs, AllRevsCount} = lists:foldr(
-        fun(#doc_info{id = Id, revs = RevsInfo}, {IdRevAcc, CountAcc}) ->
-            Revs = [Rev || #rev_info{rev = Rev} <- RevsInfo],
-            {[{Id, Revs} | IdRevAcc], CountAcc + length(Revs)}
-        end,
-        {[], 0}, DocInfos),
-    {ok, Missing} = couch_replicator_api_wrap:get_missing_revs(Target, IdRevs),
-    MissingRevsCount = lists:foldl(
-        fun({_Id, MissingRevs, _PAs}, Acc) -> Acc + length(MissingRevs) end,
-        0, Missing),
-    Stats = #rep_stats{
-        missing_checked = AllRevsCount,
-        missing_found = MissingRevsCount
-    },
-    {Missing, Stats}.
-
-
-maybe_report_stats(Cp, Stats) ->
-    Now = now(),
-    case timer:now_diff(erlang:get(last_stats_report), Now) >= ?STATS_DELAY of
-    true ->
-        ok = gen_server:call(Cp, {add_stats, Stats}, infinity),
-        erlang:put(last_stats_report, Now),
-        #rep_stats{};
-    false ->
-        Stats
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/json_stream_parse.erl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/json_stream_parse.erl b/src/couch_replicator/src/json_stream_parse.erl
deleted file mode 100644
index b63e011..0000000
--- a/src/couch_replicator/src/json_stream_parse.erl
+++ /dev/null
@@ -1,432 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(json_stream_parse).
-
-
--export([events/2, to_ejson/1, collect_object/2]).
-
--define(IS_WS(X), (X == $\  orelse X == $\t orelse X == $\n orelse X == $\r)).
--define(IS_DELIM(X), (X == $} orelse X == $] orelse X == $,)).
--define(IS_DIGIT(X), (X >= $0 andalso X =< $9)).
-
-
-
-% Parses the json into events.
-%
-% The DataFun param is a function that produces the data for parsing. When
-% called it must yield a tuple, or the atom done. The first element in the
-% tuple is the data itself, and the second element is a function to be called
-% next to get the next chunk of data in the stream.
-%
-% The EventFun is called everytime a json element is parsed. It must produce
-% a new function to be called for the next event.
-%
-% Events happen each time a new element in the json string is parsed.
-% For simple value types, the data itself is returned:
-% Strings
-% Integers
-% Floats
-% true
-% false
-% null
-%
-% For arrays, the start of the array is signaled by the event array_start
-% atom. The end is signaled by array_end. The events before the end are the
-% values, or nested values.
-%
-% For objects, the start of the object is signaled by the event object_start
-% atom. The end is signaled by object_end. Each key is signaled by
-% {key, KeyString}, and the following event is the value, or start of the
-% value (array_start, object_start).
-%
-events(Data,EventFun) when is_list(Data)->
-    events(list_to_binary(Data),EventFun);
-events(Data,EventFun) when is_binary(Data)->
-    events(fun() -> {Data, fun() -> done end} end,EventFun);
-events(DataFun,EventFun) ->
-    parse_one(DataFun, EventFun, <<>>).
-
-% converts the JSON directly to the erlang represention of Json
-to_ejson(DF) ->
-    {_DF2, EF, _Rest} = events(DF, fun(Ev) -> collect_events(Ev, []) end),
-    [[EJson]] = make_ejson(EF(get_results), [[]]),
-    EJson.
-
-
-% This function is used to return complete objects while parsing streams.
-%
-% Return this function from inside an event function right after getting an
-% object_start event. It then collects the remaining events for that object
-% and converts it to the erlang represention of Json.
-%
-% It then calls your ReturnControl function with the erlang object. Your
-% return control function then should yield another event function.
-%
-% This example stream parses an array of objects, calling
-% fun do_something_with_the_object/1 for each object.
-%
-%    ev_array(array_start) ->
-%        fun(Ev) -> ev_object_loop(Ev) end.
-%
-%    ev_object_loop(object_start) ->
-%        fun(Ev) ->
-%            json_stream_parse:collect_object(Ev,
-%                fun(Obj) ->
-%                    do_something_with_the_object(Obj),
-%                    fun(Ev2) -> ev_object_loop(Ev2) end
-%                end)
-%        end;
-%    ev_object_loop(array_end) ->
-%        ok
-%    end.
-%
-%    % invoke the parse
-%    main() ->
-%        ...
-%        events(Data, fun(Ev) -> ev_array(Ev) end).
-
-collect_object(Ev, ReturnControl) ->
-    collect_object(Ev, 0, ReturnControl, [object_start]).
-
-
-
-% internal methods
-
-parse_one(DF,EF,Acc) ->
-    case toke(DF, Acc) of
-    none ->
-        none;
-    {Token, DF2, Rest} ->
-        case Token of
-        "{" ->
-            EF2 = EF(object_start),
-            {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
-            {DF3, EF3(object_end), Rest2};
-        "[" ->
-            EF2 = EF(array_start),
-            {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
-            {DF3, EF3(array_end), Rest2};
-        Int when is_integer(Int)->
-            {DF2, EF(Int), Rest};
-        Float when is_float(Float)->
-            {DF2, EF(Float), Rest};
-        Atom when is_atom(Atom)->
-            {DF2, EF(Atom), Rest};
-        String when is_binary(String)->
-            {DF2, EF(String), Rest};
-        _OtherToken ->
-            err(unexpected_token)
-        end
-    end.
-
-must_parse_one(DF,EF,Acc,Error)->
-    case parse_one(DF, EF, Acc) of
-    none ->
-        err(Error);
-    Else ->
-        Else
-    end.
-
-must_toke(DF, Data, Error) ->
-    case toke(DF, Data) of
-    none ->
-        err(Error);
-    Result ->
-        Result
-    end.
-
-toke(DF, <<>>) ->
-    case DF() of
-    done ->
-        none;
-    {Data, DF2} ->
-        toke(DF2, Data)
-    end;
-toke(DF, <<C,Rest/binary>>) when ?IS_WS(C)->
-    toke(DF, Rest);
-toke(DF, <<${,Rest/binary>>) ->
-    {"{", DF, Rest};
-toke(DF, <<$},Rest/binary>>) ->
-    {"}", DF, Rest};
-toke(DF, <<$[,Rest/binary>>) ->
-    {"[", DF, Rest};
-toke(DF, <<$],Rest/binary>>) ->
-    {"]", DF, Rest};
-toke(DF, <<$",Rest/binary>>) ->
-    toke_string(DF,Rest,[]);
-toke(DF, <<$,,Rest/binary>>) ->
-    {",", DF, Rest};
-toke(DF, <<$:,Rest/binary>>) ->
-    {":", DF, Rest};
-toke(DF, <<$-,Rest/binary>>) ->
-    {<<C,_/binary>> = Data, DF2} = must_df(DF,1,Rest,expected_number),
-    case ?IS_DIGIT(C) of
-    true ->
-        toke_number_leading(DF2, Data, "-");
-    false ->
-        err(expected_number)
-    end;
-toke(DF, <<C,_/binary>> = Data) when ?IS_DIGIT(C) ->
-    toke_number_leading(DF, Data, []);
-toke(DF, <<$t,Rest/binary>>) ->
-    {Data, DF2} = must_match(<<"rue">>, DF, Rest),
-    {true, DF2, Data};
-toke(DF, <<$f,Rest/binary>>) ->
-    {Data, DF2} = must_match(<<"alse">>, DF, Rest),
-    {false, DF2, Data};
-toke(DF, <<$n,Rest/binary>>) ->
-    {Data, DF2} = must_match(<<"ull">>, DF, Rest),
-    {null, DF2, Data};
-toke(_, _) ->
-    err(bad_token).
-
-
-must_match(Pattern, DF, Data) ->
-    Size = size(Pattern),
-    case must_df(DF, Size, Data, bad_token) of
-    {<<Pattern:Size/binary,Data2/binary>>, DF2} ->
-        {Data2, DF2};
-    {_, _} ->
-        err(bad_token)
-    end.
-
-must_df(DF,Error)->
-    case DF() of
-    done ->
-        err(Error);
-    {Data, DF2} ->
-        {Data, DF2}
-    end.
-
-
-must_df(DF,NeedLen,Acc,Error)->
-    if size(Acc) >= NeedLen ->
-        {Acc, DF};
-    true ->
-        case DF() of
-        done ->
-            err(Error);
-        {Data, DF2} ->
-            must_df(DF2, NeedLen, <<Acc/binary, Data/binary>>, Error)
-        end
-    end.
-
-
-parse_object(DF,EF,Acc) ->
-    case must_toke(DF, Acc, unterminated_object) of
-    {String, DF2, Rest} when is_binary(String)->
-        EF2 = EF({key,String}),
-        case must_toke(DF2,Rest,unterminated_object) of
-        {":", DF3, Rest2} ->
-            {DF4, EF3, Rest3} = must_parse_one(DF3, EF2, Rest2, expected_value),
-            case must_toke(DF4,Rest3, unterminated_object) of
-            {",", DF5, Rest4} ->
-                parse_object(DF5, EF3, Rest4);
-            {"}", DF5, Rest4} ->
-                {DF5, EF3, Rest4};
-            {_, _, _} ->
-                err(unexpected_token)
-            end;
-        _Else ->
-            err(expected_colon)
-        end;
-    {"}", DF2, Rest} ->
-        {DF2, EF, Rest};
-    {_, _, _} ->
-        err(unexpected_token)
-    end.
-
-parse_array0(DF,EF,Acc) ->
-    case toke(DF, Acc) of
-    none ->
-        err(unterminated_array);
-    {",", DF2, Rest} ->
-        parse_array(DF2,EF,Rest);
-    {"]", DF2, Rest} ->
-        {DF2,EF,Rest};
-    _ ->
-        err(unexpected_token)
-    end.
-
-parse_array(DF,EF,Acc) ->
-    case toke(DF, Acc) of
-    none ->
-         err(unterminated_array);
-    {Token, DF2, Rest} ->
-        case Token of
-        "{" ->
-            EF2 = EF(object_start),
-            {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
-            parse_array0(DF3, EF3(object_end), Rest2);
-        "[" ->
-            EF2 = EF(array_start),
-            {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
-            parse_array0(DF3, EF3(array_end), Rest2);
-        Int when is_integer(Int)->
-            parse_array0(DF2, EF(Int), Rest);
-        Float when is_float(Float)->
-            parse_array0(DF2, EF(Float), Rest);
-        Atom when is_atom(Atom)->
-            parse_array0(DF2, EF(Atom), Rest);
-        String when is_binary(String)->
-            parse_array0(DF2, EF(String), Rest);
-        "]" ->
-            {DF2, EF, Rest};
-        _ ->
-            err(unexpected_token)
-        end
-    end.
-
-
-toke_string(DF, <<>>, Acc) ->
-    {Data, DF2} = must_df(DF, unterminated_string),
-    toke_string(DF2, Data, Acc);
-toke_string(DF, <<$\\,$",Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$" | Acc]);
-toke_string(DF, <<$\\,$\\,Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$\\ | Acc]);
-toke_string(DF, <<$\\,$/,Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$/ | Acc]);
-toke_string(DF, <<$\\,$b,Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$\b | Acc]);
-toke_string(DF, <<$\\,$f,Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$\f | Acc]);
-toke_string(DF, <<$\\,$n,Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$\n | Acc]);
-toke_string(DF, <<$\\,$r,Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$\r | Acc]);
-toke_string(DF, <<$\\,$t,Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$\t | Acc]);
-toke_string(DF, <<$\\,$u,Rest/binary>>, Acc) ->
-    {<<A,B,C,D,Data/binary>>, DF2} = must_df(DF,4,Rest,missing_hex),
-    UTFChar = erlang:list_to_integer([A, B, C, D], 16),
-    if UTFChar == 16#FFFF orelse UTFChar == 16#FFFE ->
-        err(invalid_utf_char);
-    true ->
-        ok
-    end,
-    Chars = xmerl_ucs:to_utf8(UTFChar),
-    toke_string(DF2, Data, lists:reverse(Chars) ++ Acc);
-toke_string(DF, <<$\\>>, Acc) ->
-    {Data, DF2} = must_df(DF, unterminated_string),
-    toke_string(DF2, <<$\\,Data/binary>>, Acc);
-toke_string(_DF, <<$\\, _/binary>>, _Acc) ->
-    err(bad_escape);
-toke_string(DF, <<$", Rest/binary>>, Acc) ->
-    {list_to_binary(lists:reverse(Acc)), DF, Rest};
-toke_string(DF, <<C, Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [C | Acc]).
-
-
-toke_number_leading(DF, <<Digit,Rest/binary>>, Acc)
-        when ?IS_DIGIT(Digit) ->
-    toke_number_leading(DF, Rest, [Digit | Acc]);
-toke_number_leading(DF, <<C,_/binary>>=Rest, Acc)
-        when ?IS_WS(C) orelse ?IS_DELIM(C) ->
-    {list_to_integer(lists:reverse(Acc)), DF, Rest};
-toke_number_leading(DF, <<>>, Acc) ->
-    case DF() of
-    done ->
-         {list_to_integer(lists:reverse(Acc)), fun() -> done end, <<>>};
-    {Data, DF2} ->
-        toke_number_leading(DF2, Data, Acc)
-    end;
-toke_number_leading(DF, <<$., Rest/binary>>, Acc) ->
-    toke_number_trailing(DF, Rest, [$.|Acc]);
-toke_number_leading(DF, <<$e, Rest/binary>>, Acc) ->
-    toke_number_exponent(DF, Rest, [$e, $0, $.|Acc]);
-toke_number_leading(DF, <<$E, Rest/binary>>, Acc) ->
-    toke_number_exponent(DF, Rest, [$e, $0, $.|Acc]);
-toke_number_leading(_, _, _) ->
-    err(unexpected_character_in_number).
-
-toke_number_trailing(DF, <<Digit,Rest/binary>>, Acc)
-        when ?IS_DIGIT(Digit) ->
-    toke_number_trailing(DF, Rest, [Digit | Acc]);
-toke_number_trailing(DF, <<C,_/binary>>=Rest, Acc)
-        when ?IS_WS(C) orelse ?IS_DELIM(C) ->
-    {list_to_float(lists:reverse(Acc)), DF, Rest};
-toke_number_trailing(DF, <<>>, Acc) ->
-    case DF() of
-    done ->
-        {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
-    {Data, DF2} ->
-        toke_number_trailing(DF2, Data, Acc)
-    end;
-toke_number_trailing(DF, <<"e", Rest/binary>>, [C|_]=Acc) when C /= $. ->
-    toke_number_exponent(DF, Rest, [$e|Acc]);
-toke_number_trailing(DF, <<"E", Rest/binary>>, [C|_]=Acc) when C /= $. ->
-    toke_number_exponent(DF, Rest, [$e|Acc]);
-toke_number_trailing(_, _, _) ->
-    err(unexpected_character_in_number).
-
-
-toke_number_exponent(DF, <<Digit,Rest/binary>>, Acc) when ?IS_DIGIT(Digit) ->
-    toke_number_exponent(DF, Rest, [Digit | Acc]);
-toke_number_exponent(DF, <<Sign,Rest/binary>>, [$e|_]=Acc)
-        when Sign == $+ orelse Sign == $- ->
-    toke_number_exponent(DF, Rest, [Sign | Acc]);
-toke_number_exponent(DF, <<C,_/binary>>=Rest, Acc)
-        when ?IS_WS(C) orelse ?IS_DELIM(C) ->
-    {list_to_float(lists:reverse(Acc)), DF, Rest};
-toke_number_exponent(DF, <<>>, Acc) ->
-    case DF() of
-    done ->
-        {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
-    {Data, DF2} ->
-        toke_number_exponent(DF2, Data, Acc)
-    end;
-toke_number_exponent(_, _, _) ->
-        err(unexpected_character_in_number).
-
-
-err(Error)->
-    throw({parse_error,Error}).
-
-
-make_ejson([], Stack) ->
-    Stack;
-make_ejson([array_start | RevEvs], [ArrayValues, PrevValues | RestStack]) ->
-    make_ejson(RevEvs, [[ArrayValues | PrevValues] | RestStack]);
-make_ejson([array_end | RevEvs], Stack) ->
-    make_ejson(RevEvs, [[] | Stack]);
-make_ejson([object_start | RevEvs], [ObjValues, PrevValues | RestStack]) ->
-    make_ejson(RevEvs, [[{ObjValues} | PrevValues] | RestStack]);
-make_ejson([object_end | RevEvs], Stack) ->
-    make_ejson(RevEvs, [[] | Stack]);
-make_ejson([{key, String} | RevEvs], [[PrevValue|RestObject] | RestStack] = _Stack) ->
-    make_ejson(RevEvs, [[{String, PrevValue}|RestObject] | RestStack]);
-make_ejson([Value | RevEvs], [Vals | RestStack] = _Stack) ->
-    make_ejson(RevEvs, [[Value | Vals] | RestStack]).
-
-collect_events(get_results, Acc) ->
-    Acc;
-collect_events(Ev, Acc) ->
-    fun(NextEv) -> collect_events(NextEv, [Ev | Acc]) end.
-
-
-collect_object(object_end, 0, ReturnControl, Acc) ->
-    [[Obj]] = make_ejson([object_end | Acc], [[]]),
-    ReturnControl(Obj);
-collect_object(object_end, NestCount, ReturnControl, Acc) ->
-    fun(Ev) ->
-        collect_object(Ev, NestCount - 1, ReturnControl, [object_end | Acc])
-    end;
-collect_object(object_start, NestCount, ReturnControl, Acc) ->
-    fun(Ev) ->
-        collect_object(Ev, NestCount + 1, ReturnControl, [object_start | Acc])
-    end;
-collect_object(Ev, NestCount, ReturnControl, Acc) ->
-    fun(Ev2) ->
-        collect_object(Ev2, NestCount, ReturnControl, [Ev | Acc])
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/test/01-load.t
----------------------------------------------------------------------
diff --git a/src/couch_replicator/test/01-load.t b/src/couch_replicator/test/01-load.t
deleted file mode 100644
index 8bd82dd..0000000
--- a/src/couch_replicator/test/01-load.t
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Test that we can load each module.
-
-main(_) ->
-    test_util:init_code_path(),
-    Modules = [
-        couch_replicator_api_wrap,
-        couch_replicator_httpc,
-        couch_replicator_httpd,
-        couch_replicator_manager,
-        couch_replicator_notifier,
-        couch_replicator,
-        couch_replicator_worker,
-        couch_replicator_utils,
-        couch_replicator_job_sup
-    ],
-
-    etap:plan(length(Modules)),
-    lists:foreach(
-        fun(Module) ->
-            etap:loaded_ok(Module, lists:concat(["Loaded: ", Module]))
-        end, Modules),
-    etap:end_tests().

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/test/02-httpc-pool.t
----------------------------------------------------------------------
diff --git a/src/couch_replicator/test/02-httpc-pool.t b/src/couch_replicator/test/02-httpc-pool.t
deleted file mode 100755
index fc86cce..0000000
--- a/src/couch_replicator/test/02-httpc-pool.t
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
-    test_util:run(55, fun() -> test() end).
-
-
-test() ->
-    test_util:start_couch(),
-    ibrowse:start(),
-
-    test_pool_full(),
-    test_worker_dead_pool_non_full(),
-    test_worker_dead_pool_full(),
-
-    couch_server_sup:stop(),
-    ok.
-
-
-test_pool_full() ->
-    Pool = spawn_pool(),
-    Client1 = spawn_client(Pool),
-    Client2 = spawn_client(Pool),
-    Client3 = spawn_client(Pool),
-
-    etap:diag("Check that we can spawn the max number of connections."),
-    etap:is(ping_client(Client1), ok, "Client 1 started ok."),
-    etap:is(ping_client(Client2), ok, "Client 2 started ok."),
-    etap:is(ping_client(Client3), ok, "Client 3 started ok."),
-
-    Worker1 = get_client_worker(Client1, "1"),
-    Worker2 = get_client_worker(Client2, "2"),
-    Worker3 = get_client_worker(Client3, "3"),
-    etap:is(is_process_alive(Worker1), true, "Client's 1 worker is alive."),
-    etap:is(is_process_alive(Worker2), true, "Client's 2 worker is alive."),
-    etap:is(is_process_alive(Worker3), true, "Client's 3 worker is alive."),
-
-    etap:isnt(Worker1, Worker2, "Clients 1 and 2 got different workers."),
-    etap:isnt(Worker2, Worker3, "Clients 2 and 3 got different workers."),
-    etap:isnt(Worker1, Worker3, "Clients 1 and 3 got different workers."),
-
-    etap:diag("Check that client 4 blocks waiting for a worker."),
-    Client4 = spawn_client(Pool),
-    etap:is(ping_client(Client4), timeout, "Client 4 blocked while waiting."),
-
-    etap:diag("Check that stopping a client gives up its worker."),
-    etap:is(stop_client(Client1), ok, "First client stopped."),
-
-    etap:diag("And check that our blocked client has been unblocked."),
-    etap:is(ping_client(Client4), ok, "Client 4 was unblocked."),
-
-    Worker4 = get_client_worker(Client4, "4"),
-    etap:is(is_process_alive(Worker4), true, "Client's 4 worker is alive."),
-    etap:is(Worker4, Worker1, "Client 4 got worker that client 1 got before."),
-
-    lists:foreach(fun(C) -> ok = stop_client(C) end, [Client2, Client3, Client4]),
-    stop_pool(Pool).
-
-
-test_worker_dead_pool_non_full() ->
-    Pool = spawn_pool(),
-    Client1 = spawn_client(Pool),
-
-    etap:is(ping_client(Client1), ok, "Client 1 started ok."),
-    Worker1 = get_client_worker(Client1, "1"),
-    etap:is(is_process_alive(Worker1), true, "Client's 1 worker is alive."),
-
-    etap:diag("Kill client's 1 worker."),
-    etap:is(kill_client_worker(Client1), ok, "Killed client's 1 worker."),
-    etap:is(is_process_alive(Worker1), false, "Client's 1 worker process is dead."),
-
-    etap:is(stop_client(Client1), ok, "First client stopped and released its worker."),
-
-    Client2 = spawn_client(Pool),
-    etap:is(ping_client(Client2), ok, "Client 2 started ok."),
-    Worker2 = get_client_worker(Client2, "2"),
-    etap:isnt(Worker2, Worker1, "Client 2 got a different worker from client 1"),
-    etap:is(is_process_alive(Worker2), true, "Client's 2 worker is alive."),
-
-    etap:is(stop_client(Client2), ok, "Second client stopped."),
-    stop_pool(Pool).
-
-
-test_worker_dead_pool_full() ->
-    Pool = spawn_pool(),
-    Client1 = spawn_client(Pool),
-    Client2 = spawn_client(Pool),
-    Client3 = spawn_client(Pool),
-
-    etap:diag("Check that we can spawn the max number of connections."),
-    etap:is(ping_client(Client1), ok, "Client 1 started ok."),
-    etap:is(ping_client(Client2), ok, "Client 2 started ok."),
-    etap:is(ping_client(Client3), ok, "Client 3 started ok."),
-
-    Worker1 = get_client_worker(Client1, "1"),
-    Worker2 = get_client_worker(Client2, "2"),
-    Worker3 = get_client_worker(Client3, "3"),
-    etap:is(is_process_alive(Worker1), true, "Client's 1 worker is alive."),
-    etap:is(is_process_alive(Worker2), true, "Client's 2 worker is alive."),
-    etap:is(is_process_alive(Worker3), true, "Client's 3 worker is alive."),
-
-    etap:isnt(Worker1, Worker2, "Clients 1 and 2 got different workers."),
-    etap:isnt(Worker2, Worker3, "Clients 2 and 3 got different workers."),
-    etap:isnt(Worker1, Worker3, "Clients 1 and 3 got different workers."),
-
-    etap:diag("Check that client 4 blocks waiting for a worker."),
-    Client4 = spawn_client(Pool),
-    etap:is(ping_client(Client4), timeout, "Client 4 blocked while waiting."),
-
-    etap:diag("Kill client's 1 worker."),
-    etap:is(kill_client_worker(Client1), ok, "Killed client's 1 worker."),
-    etap:is(is_process_alive(Worker1), false, "Client's 1 worker process is dead."),
-
-    etap:diag("Check client 4 got unblocked after first worker's death"),
-    etap:is(ping_client(Client4), ok, "Client 4 not blocked anymore."),
-
-    Worker4 = get_client_worker(Client4, "4"),
-    etap:is(is_process_alive(Worker4), true, "Client's 4 worker is alive."),
-    etap:isnt(Worker4, Worker1, "Client 4 got a worker different from client 1."),
-    etap:isnt(Worker4, Worker2, "Client 4 got a worker different from client 2."),
-    etap:isnt(Worker4, Worker3, "Client 4 got a worker different from client 3."),
-
-    etap:diag("Check that stopping client 1 is a noop."),
-    etap:is(stop_client(Client1), ok, "First client stopped."),
-
-    etap:is(is_process_alive(Worker2), true, "Client's 2 worker still alive."),
-    etap:is(is_process_alive(Worker3), true, "Client's 3 worker still alive."),
-    etap:is(is_process_alive(Worker4), true, "Client's 4 worker still alive."),
-
-    etap:diag("Check that client 5 blocks waiting for a worker."),
-    Client5 = spawn_client(Pool),
-    etap:is(ping_client(Client5), timeout, "Client 5 blocked while waiting."),
-
-    etap:diag("Check that stopping client 2 gives up its worker."),
-    etap:is(stop_client(Client2), ok, "Second client stopped."),
-
-    etap:diag("Now check that client 5 has been unblocked."),
-    etap:is(ping_client(Client5), ok, "Client 5 was unblocked."),
-
-    Worker5 = get_client_worker(Client5, "5"),
-    etap:is(is_process_alive(Worker5), true, "Client's 5 worker is alive."),
-    etap:isnt(Worker5, Worker1, "Client 5 got a worker different from client 1."),
-    etap:is(Worker5, Worker2, "Client 5 got same worker as client 2."),
-    etap:isnt(Worker5, Worker3, "Client 5 got a worker different from client 3."),
-    etap:isnt(Worker5, Worker4, "Client 5 got a worker different from client 4."),
-
-    etap:is(is_process_alive(Worker3), true, "Client's 3 worker still alive."),
-    etap:is(is_process_alive(Worker4), true, "Client's 4 worker still alive."),
-    etap:is(is_process_alive(Worker5), true, "Client's 5 worker still alive."),
-
-    lists:foreach(fun(C) -> ok = stop_client(C) end, [Client3, Client4, Client5]),
-    stop_pool(Pool).
-
-
-spawn_client(Pool) ->
-    Parent = self(),
-    Ref = make_ref(),
-    Pid = spawn(fun() ->
-        {ok, Worker} = couch_replicator_httpc_pool:get_worker(Pool),
-        loop(Parent, Ref, Worker, Pool)
-    end),
-    {Pid, Ref}.
-
-
-ping_client({Pid, Ref}) ->
-    Pid ! ping,
-    receive
-        {pong, Ref} ->
-            ok
-    after 3000 ->
-        timeout
-    end.
-
-
-get_client_worker({Pid, Ref}, ClientName) ->
-    Pid ! get_worker,
-    receive
-        {worker, Ref, Worker} ->
-            Worker
-    after 3000 ->
-        etap:bail("Timeout getting client " ++ ClientName ++ " worker.")
-    end.
-
-
-stop_client({Pid, Ref}) ->
-    Pid ! stop,
-    receive
-        {stop, Ref} ->
-            ok
-    after 3000 ->
-        timeout
-    end.
-
-
-kill_client_worker({Pid, Ref}) ->
-    Pid ! get_worker,
-    receive
-        {worker, Ref, Worker} ->
-            exit(Worker, kill),
-            ok
-    after 3000 ->
-        timeout
-    end.
-
-
-loop(Parent, Ref, Worker, Pool) ->
-    receive
-        ping ->
-            Parent ! {pong, Ref},
-            loop(Parent, Ref, Worker, Pool);
-        get_worker  ->
-            Parent ! {worker, Ref, Worker},
-            loop(Parent, Ref, Worker, Pool);
-        stop ->
-            couch_replicator_httpc_pool:release_worker(Pool, Worker),
-            Parent ! {stop, Ref}
-    end.
-
-
-spawn_pool() ->
-    Host = config:get("httpd", "bind_address", "127.0.0.1"),
-    Port = config:get("httpd", "port", "5984"),
-    {ok, Pool} = couch_replicator_httpc_pool:start_link(
-        "http://" ++ Host ++ ":5984", [{max_connections, 3}]),
-    Pool.
-
-
-stop_pool(Pool) ->
-    ok = couch_replicator_httpc_pool:stop(Pool).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/test/03-replication-compact.t
----------------------------------------------------------------------
diff --git a/src/couch_replicator/test/03-replication-compact.t b/src/couch_replicator/test/03-replication-compact.t
deleted file mode 100755
index 888d123..0000000
--- a/src/couch_replicator/test/03-replication-compact.t
+++ /dev/null
@@ -1,493 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Verify that compacting databases that are being used as the source or
-% target of a replication doesn't affect the replication and that the
-% replication doesn't hold their reference counters forever.
-
--mode(compile).
-
--define(b2l(B), binary_to_list(B)).
-
--record(user_ctx, {
-    name = null,
-    roles = [],
-    handler
-}).
-
--record(db, {
-    main_pid = nil,
-    compactor_pid = nil,
-    instance_start_time, % number of microsecs since jan 1 1970 as a binary string
-    fd,
-    fd_monitor,
-    header = nil,
-    committed_update_seq,
-    id_tree,
-    seq_tree,
-    local_tree,
-    update_seq,
-    name,
-    filepath,
-    validate_doc_funs = [],
-    security = [],
-    security_ptr = nil,
-    user_ctx = #user_ctx{},
-    waiting_delayed_commit = nil,
-    revs_limit = 1000,
-    fsync_options = [],
-    options = [],
-    compression,
-    before_doc_update,
-    after_doc_read
-}).
-
--record(rep, {
-    id,
-    source,
-    target,
-    options,
-    user_ctx,
-    doc_id
-}).
-
-
-source_db_name() -> <<"couch_test_rep_db_a">>.
-target_db_name() -> <<"couch_test_rep_db_b">>.
-
-
-main(_) ->
-    test_util:run(376, fun() -> test() end).
-
-
-test() ->
-    test_util:start_couch(),
-    ibrowse:start(),
-
-    Pairs = [
-        {source_db_name(), target_db_name()},
-        {{remote, source_db_name()}, target_db_name()},
-        {source_db_name(), {remote, target_db_name()}},
-        {{remote, source_db_name()}, {remote, (target_db_name())}}
-    ],
-
-    lists:foreach(
-        fun({Source, Target}) ->
-            {ok, SourceDb} = create_db(source_db_name()),
-            etap:is(couch_db:is_idle(SourceDb), true,
-                "Source database is idle before starting replication"),
-
-            {ok, TargetDb} = create_db(target_db_name()),
-            etap:is(couch_db:is_idle(TargetDb), true,
-                "Target database is idle before starting replication"),
-
-            {ok, RepPid, RepId} = replicate(Source, Target),
-            check_active_tasks(RepPid, RepId, Source, Target),
-            {ok, DocsWritten} = populate_and_compact_test(
-                RepPid, SourceDb, TargetDb),
-
-            wait_target_in_sync(DocsWritten, TargetDb),
-            check_active_tasks(RepPid, RepId, Source, Target),
-            cancel_replication(RepId, RepPid),
-            compare_dbs(SourceDb, TargetDb),
-
-            delete_db(SourceDb),
-            delete_db(TargetDb),
-            ok = test_util:stop_couch(),
-            ok = timer:sleep(1000),
-            ok = test_util:start_couch()
-        end,
-        Pairs),
-
-    ok = test_util:stop_couch(),
-    ok.
-
-
-populate_and_compact_test(RepPid, SourceDb0, TargetDb0) ->
-    etap:is(is_process_alive(RepPid), true, "Replication process is alive"),
-    check_db_alive("source", SourceDb0),
-    check_db_alive("target", TargetDb0),
-
-    Writer = spawn_writer(SourceDb0),
-
-    lists:foldl(
-        fun(_, {SourceDb, TargetDb, DocCount}) ->
-            pause_writer(Writer),
-
-            compact_db("source", SourceDb),
-            etap:is(is_process_alive(RepPid), true,
-                "Replication process is alive after source database compaction"),
-            check_db_alive("source", SourceDb),
-            check_fd("source", SourceDb),
-
-            compact_db("target", TargetDb),
-            etap:is(is_process_alive(RepPid), true,
-                "Replication process is alive after target database compaction"),
-            check_db_alive("target", TargetDb),
-            check_fd("target", TargetDb),
-
-            {ok, SourceDb2} = reopen_db(SourceDb),
-            {ok, TargetDb2} = reopen_db(TargetDb),
-
-            resume_writer(Writer),
-            wait_writer(Writer, DocCount),
-
-            compact_db("source", SourceDb2),
-            etap:is(is_process_alive(RepPid), true,
-                "Replication process is alive after source database compaction"),
-            check_db_alive("source", SourceDb2),
-            pause_writer(Writer),
-            check_fd("source", SourceDb2),
-            resume_writer(Writer),
-
-            compact_db("target", TargetDb2),
-            etap:is(is_process_alive(RepPid), true,
-                "Replication process is alive after target database compaction"),
-            check_db_alive("target", TargetDb2),
-            pause_writer(Writer),
-            check_fd("target", TargetDb2),
-            resume_writer(Writer),
-
-            {ok, SourceDb3} = reopen_db(SourceDb2),
-            {ok, TargetDb3} = reopen_db(TargetDb2),
-            {SourceDb3, TargetDb3, DocCount + 50}
-        end,
-        {SourceDb0, TargetDb0, 50}, lists:seq(1, 5)),
-
-    DocsWritten = stop_writer(Writer),
-    {ok, DocsWritten}.
-
-
-check_db_alive(Type, #db{main_pid = Pid}) ->
-    etap:is(is_process_alive(Pid), true,
-        "Local " ++ Type ++ " database main pid is alive").
-
-
-compact_db(Type, #db{name = Name}) ->
-    {ok, Db} = couch_db:open_int(Name, []),
-    {ok, CompactPid} = couch_db:start_compact(Db),
-    MonRef = erlang:monitor(process, CompactPid),
-    receive
-    {'DOWN', MonRef, process, CompactPid, normal} ->
-        ok;
-    {'DOWN', MonRef, process, CompactPid, Reason} ->
-        etap:bail("Error compacting " ++ Type ++ " database " ++ ?b2l(Name) ++
-            ": " ++ couch_util:to_list(Reason))
-    after 30000 ->
-        etap:bail("Compaction for " ++ Type ++ " database " ++ ?b2l(Name) ++
-            " didn't finish")
-    end,
-    ok = couch_db:close(Db).
-
-
-check_fd(Type, #db{name = Name, fd = Fd, fd_monitor=OldMonRef}) ->
-    {_, MonRef} = spawn_monitor(fun() ->
-        MC = fun(F) ->
-            % Speed up the close after the switch
-            Fd ! maybe_close,
-            {ok, Db} = couch_db:open_int(Name, []),
-            couch_db:close(Db),
-            case Db#db.fd of
-                Fd ->
-                    receive _ -> ok after 1000 -> ok end,
-                    F(F);
-                _ ->
-                    ok
-            end
-        end,
-        MC(MC)
-    end),
-    receive {'DOWN', MonRef, process, _, _} ->
-        etap:diag("Old " ++ Type ++ " database fd terminated")
-    after 30000 ->
-        etap:bail("Old " ++ Type ++ " database fd didn't terminate")
-    end,
-    {ok, #db{fd_monitor = NewMonRef} = Db} = couch_db:open_int(Name, []),
-    ok = couch_db:close(Db),
-    etap:isnt(
-        NewMonRef, OldMonRef, Type ++ " database has new fd monitor").
-
-
-reopen_db(#db{name = Name}) ->
-    {ok, Db} = couch_db:open_int(Name, []),
-    ok = couch_db:close(Db),
-    {ok, Db}.
-
-
-wait_target_in_sync(DocCount, #db{name = TargetName}) ->
-    wait_target_in_sync_loop(DocCount, TargetName, 300).
-
-
-wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
-    etap:bail("Could not get source and target databases in sync");
-wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
-    {ok, Target} = couch_db:open_int(TargetName, []),
-    {ok, TargetInfo} = couch_db:get_db_info(Target),
-    ok = couch_db:close(Target),
-    TargetDocCount = couch_util:get_value(doc_count, TargetInfo),
-    case TargetDocCount == DocCount of
-    true ->
-        etap:diag("Source and target databases are in sync");
-    false ->
-        ok = timer:sleep(100),
-        wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1)
-    end.
-
-
-compare_dbs(#db{name = SourceName}, #db{name = TargetName}) ->
-    {ok, SourceDb} = couch_db:open_int(SourceName, []),
-    {ok, TargetDb} = couch_db:open_int(TargetName, []),
-    Fun = fun(FullDocInfo, _, Acc) ->
-        {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
-        {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
-        DocId = couch_util:get_value(<<"_id">>, Props),
-        DocTarget = case couch_db:open_doc(TargetDb, DocId) of
-        {ok, DocT} ->
-            DocT;
-        Error ->
-            etap:bail("Error opening document '" ++ ?b2l(DocId) ++
-                "' from target: " ++ couch_util:to_list(Error))
-        end,
-        DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
-        case DocTargetJson of
-        DocJson ->
-            ok;
-        _ ->
-            etap:bail("Content from document '" ++ ?b2l(DocId) ++
-                "' differs in target database")
-        end,
-        {ok, Acc}
-    end,
-    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
-    etap:diag("Target database has the same documents as the source database"),
-    ok = couch_db:close(SourceDb),
-    ok = couch_db:close(TargetDb).
-
-
-check_active_tasks(RepPid, {BaseId, Ext} = _RepId, Src, Tgt) ->
-    timer:sleep(1000),
-    Source = case Src of
-    {remote, NameSrc} ->
-        <<(db_url(NameSrc))/binary, $/>>;
-    _ ->
-        Src
-    end,
-    Target = case Tgt of
-    {remote, NameTgt} ->
-        <<(db_url(NameTgt))/binary, $/>>;
-    _ ->
-        Tgt
-    end,
-    FullRepId = list_to_binary(BaseId ++ Ext),
-    Pid = list_to_binary(pid_to_list(RepPid)),
-    [RepTask] = couch_task_status:all(),
-    etap:is(couch_util:get_value(pid, RepTask), Pid,
-        "_active_tasks entry has correct pid property"),
-    etap:is(couch_util:get_value(replication_id, RepTask), FullRepId,
-        "_active_tasks entry has right replication id"),
-    etap:is(couch_util:get_value(continuous, RepTask), true,
-        "_active_tasks entry has continuous property set to true"),
-    etap:is(couch_util:get_value(source, RepTask), Source,
-        "_active_tasks entry has correct source property"),
-    etap:is(couch_util:get_value(target, RepTask), Target,
-        "_active_tasks entry has correct target property"),
-    etap:is(is_integer(couch_util:get_value(docs_read, RepTask)), true,
-        "_active_tasks entry has integer docs_read property"),
-    etap:is(is_integer(couch_util:get_value(docs_written, RepTask)), true,
-        "_active_tasks entry has integer docs_written property"),
-    etap:is(is_integer(couch_util:get_value(doc_write_failures, RepTask)), true,
-        "_active_tasks entry has integer doc_write_failures property"),
-    etap:is(is_integer(couch_util:get_value(revisions_checked, RepTask)), true,
-        "_active_tasks entry has integer revisions_checked property"),
-    etap:is(is_integer(couch_util:get_value(missing_revisions_found, RepTask)), true,
-        "_active_tasks entry has integer missing_revisions_found property"),
-    etap:is(is_integer(couch_util:get_value(checkpointed_source_seq, RepTask)), true,
-        "_active_tasks entry has integer checkpointed_source_seq property"),
-    etap:is(is_integer(couch_util:get_value(source_seq, RepTask)), true,
-        "_active_tasks entry has integer source_seq property"),
-    Progress = couch_util:get_value(progress, RepTask),
-    etap:is(is_integer(Progress), true,
-        "_active_tasks entry has an integer progress property"),
-    etap:is(Progress =< 100, true, "Progress is not greater than 100%").
-
-
-wait_writer(Pid, NumDocs) ->
-    case get_writer_num_docs_written(Pid) of
-    N when N >= NumDocs ->
-        ok;
-    _ ->
-        wait_writer(Pid, NumDocs)
-    end.
-
-
-spawn_writer(Db) ->
-    Parent = self(),
-    Pid = spawn(fun() -> writer_loop(Db, Parent, 0) end),
-    etap:diag("Started source database writer"),
-    Pid.
-
-
-pause_writer(Pid) ->
-    Ref = make_ref(),
-    Pid ! {pause, Ref},
-    receive
-    {paused, Ref} ->
-        ok
-    after 30000 ->
-        etap:bail("Failed to pause source database writer")
-    end.
-
-
-resume_writer(Pid) ->
-    Ref = make_ref(),
-    Pid ! {continue, Ref},
-    receive
-    {ok, Ref} ->
-        ok
-    after 30000 ->
-        etap:bail("Failed to unpause source database writer")
-    end.
-
-
-get_writer_num_docs_written(Pid) ->
-    Ref = make_ref(),
-    Pid ! {get_count, Ref},
-    receive
-    {count, Ref, Count} ->
-        Count
-    after 30000 ->
-        etap:bail("Timeout getting number of documents written from "
-            "source database writer")
-    end.
-
-
-stop_writer(Pid) ->
-    Ref = make_ref(),
-    Pid ! {stop, Ref},
-    receive
-    {stopped, Ref, DocsWritten} ->
-        MonRef = erlang:monitor(process, Pid),
-        receive
-        {'DOWN', MonRef, process, Pid, _Reason} ->
-            etap:diag("Stopped source database writer"),
-            DocsWritten
-        after 30000 ->
-            etap:bail("Timeout stopping source database writer")
-        end
-    after 30000 ->
-        etap:bail("Timeout stopping source database writer")
-    end.
-
-
-writer_loop(#db{name = DbName}, Parent, Counter) ->
-    maybe_pause(Parent, Counter),
-    Doc = couch_doc:from_json_obj({[
-        {<<"_id">>, list_to_binary(integer_to_list(Counter + 1))},
-        {<<"value">>, Counter + 1},
-        {<<"_attachments">>, {[
-            {<<"icon1.png">>, {[
-                {<<"data">>, base64:encode(att_data())},
-                {<<"content_type">>, <<"image/png">>}
-            ]}},
-            {<<"icon2.png">>, {[
-                {<<"data">>, base64:encode(iolist_to_binary(
-                    [att_data(), att_data()]))},
-                {<<"content_type">>, <<"image/png">>}
-            ]}}
-        ]}}
-    ]}),
-    maybe_pause(Parent, Counter),
-    {ok, Db} = couch_db:open_int(DbName, []),
-    {ok, _} = couch_db:update_doc(Db, Doc, []),
-    ok = couch_db:close(Db),
-    receive
-    {get_count, Ref} ->
-        Parent ! {count, Ref, Counter + 1},
-        writer_loop(Db, Parent, Counter + 1);
-    {stop, Ref} ->
-        Parent ! {stopped, Ref, Counter + 1}
-    after 0 ->
-        ok = timer:sleep(500),
-        writer_loop(Db, Parent, Counter + 1)
-    end.
-
-
-maybe_pause(Parent, Counter) ->
-    receive
-    {get_count, Ref} ->
-        Parent ! {count, Ref, Counter};
-    {pause, Ref} ->
-        Parent ! {paused, Ref},
-        receive {continue, Ref2} -> Parent ! {ok, Ref2} end
-    after 0 ->
-        ok
-    end.
-
-
-db_url(DbName) ->
-    iolist_to_binary([
-        "http://", config:get("httpd", "bind_address", "127.0.0.1"),
-        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
-        "/", DbName
-    ]).
-
-
-create_db(DbName) ->
-    {ok, Db} = couch_db:create(
-        DbName,
-        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]),
-    couch_db:close(Db),
-    {ok, Db}.
-
-
-delete_db(#db{name = DbName, main_pid = Pid}) ->
-    ok = couch_server:delete(
-        DbName, [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]),
-    MonRef = erlang:monitor(process, Pid),
-    receive
-    {'DOWN', MonRef, process, Pid, _Reason} ->
-        ok
-    after 30000 ->
-        etap:bail("Timeout deleting database")
-    end.
-
-
-replicate({remote, Db}, Target) ->
-    replicate(db_url(Db), Target);
-
-replicate(Source, {remote, Db}) ->
-    replicate(Source, db_url(Db));
-
-replicate(Source, Target) ->
-    RepObject = {[
-        {<<"source">>, Source},
-        {<<"target">>, Target},
-        {<<"continuous">>, true}
-    ]},
-    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
-        RepObject, #user_ctx{roles = [<<"_admin">>]}),
-    {ok, Pid} = couch_replicator:async_replicate(Rep),
-    {ok, Pid, Rep#rep.id}.
-
-
-cancel_replication(RepId, RepPid) ->
-    {ok, _} = couch_replicator:cancel_replication(RepId),
-    etap:is(is_process_alive(RepPid), false,
-        "Replication process is no longer alive after cancel").
-
-
-att_data() ->
-    {ok, Data} = file:read_file(
-        test_util:source_file("share/www/image/logo.png")),
-    Data.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/test/04-replication-large-atts.t
----------------------------------------------------------------------
diff --git a/src/couch_replicator/test/04-replication-large-atts.t b/src/couch_replicator/test/04-replication-large-atts.t
deleted file mode 100755
index 9adaead..0000000
--- a/src/couch_replicator/test/04-replication-large-atts.t
+++ /dev/null
@@ -1,256 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Test replication of large attachments. Verify that both source and
-% target have the same attachment data and metadata.
-
--define(b2l(Bin), binary_to_list(Bin)).
-
--record(user_ctx, {
-    name = null,
-    roles = [],
-    handler
-}).
-
--record(doc, {
-    id = <<"">>,
-    revs = {0, []},
-    body = {[]},
-    atts = [],
-    deleted = false,
-    meta = []
-}).
-
--record(att, {
-    name,
-    type,
-    att_len,
-    disk_len,
-    md5= <<>>,
-    revpos=0,
-    data,
-    encoding=identity
-}).
-
-
-source_db_name() -> <<"couch_test_rep_db_a">>.
-target_db_name() -> <<"couch_test_rep_db_b">>.
-
-
-main(_) ->
-    test_util:run(1192, fun() -> test() end).
-
-
-test() ->
-    test_util:start_couch(),
-    ibrowse:start(),
-    crypto:start(),
-    config:set("attachments", "compressible_types", "text/*", false),
-
-    Pairs = [
-        {source_db_name(), target_db_name()},
-        {{remote, source_db_name()}, target_db_name()},
-        {source_db_name(), {remote, target_db_name()}},
-        {{remote, source_db_name()}, {remote, (target_db_name())}}
-    ],
-
-    {ok, SourceDb} = create_db(source_db_name()),
-    etap:diag("Populating source database"),
-    populate_db(SourceDb, 11),
-    ok = couch_db:close(SourceDb),
-
-    lists:foreach(
-        fun({Source, Target}) ->
-            etap:diag("Creating target database"),
-            {ok, TargetDb} = create_db(target_db_name()),
-
-            ok = couch_db:close(TargetDb),
-            etap:diag("Triggering replication"),
-            replicate(Source, Target),
-            etap:diag("Replication finished, comparing source and target databases"),
-            compare_dbs(SourceDb, TargetDb),
-
-            etap:diag("Deleting target database"),
-            delete_db(TargetDb),
-            ok = timer:sleep(1000)
-        end,
-        Pairs),
-
-    delete_db(SourceDb),
-    test_util:stop_couch().
-
-
-populate_db(Db, DocCount) ->
-    Docs = lists:foldl(
-        fun(DocIdCounter, Acc) ->
-            Doc = #doc{
-                id = iolist_to_binary(["doc", integer_to_list(DocIdCounter)]),
-                body = {[]},
-                atts = [
-                    att(<<"att1">>, 2 * 1024 * 1024, <<"text/plain">>),
-                    att(<<"att2">>, round(6.6 * 1024 * 1024), <<"app/binary">>)
-                ]
-            },
-            [Doc | Acc]
-        end,
-        [], lists:seq(1, DocCount)),
-    {ok, _} = couch_db:update_docs(Db, Docs, []).
-
-
-att(Name, Size, Type) ->
-    #att{
-        name = Name,
-        type = Type,
-        att_len = Size,
-        data = fun(Count) -> crypto:rand_bytes(Count) end
-    }.
-
-
-compare_dbs(Source, Target) ->
-    {ok, SourceDb} = couch_db:open_int(couch_db:name(Source), []),
-    {ok, TargetDb} = couch_db:open_int(couch_db:name(Target), []),
-
-    Fun = fun(FullDocInfo, _, Acc) ->
-        {ok, DocSource} = couch_db:open_doc(SourceDb, FullDocInfo),
-        Id = DocSource#doc.id,
-
-        etap:diag("Verifying document " ++ ?b2l(Id)),
-
-        {ok, DocTarget} = couch_db:open_doc(TargetDb, Id),
-        etap:is(DocTarget#doc.body, DocSource#doc.body,
-            "Same body in source and target databases"),
-
-        #doc{atts = SourceAtts} = DocSource,
-        #doc{atts = TargetAtts} = DocTarget,
-        etap:is(
-            lists:sort([N || #att{name = N} <- SourceAtts]),
-            lists:sort([N || #att{name = N} <- TargetAtts]),
-            "Document has same number (and names) of attachments in "
-            "source and target databases"),
-
-        lists:foreach(
-            fun(#att{name = AttName} = Att) ->
-                etap:diag("Verifying attachment " ++ ?b2l(AttName)),
-
-                {ok, AttTarget} = find_att(TargetAtts, AttName),
-                SourceMd5 = att_md5(Att),
-                TargetMd5 = att_md5(AttTarget),
-                case AttName of
-                <<"att1">> ->
-                    etap:is(Att#att.encoding, gzip,
-                        "Attachment is gzip encoded in source database"),
-                    etap:is(AttTarget#att.encoding, gzip,
-                        "Attachment is gzip encoded in target database"),
-                    DecSourceMd5 = att_decoded_md5(Att),
-                    DecTargetMd5 = att_decoded_md5(AttTarget),
-                    etap:is(DecTargetMd5, DecSourceMd5,
-                        "Same identity content in source and target databases");
-                _ ->
-                    etap:is(Att#att.encoding, identity,
-                        "Attachment is not encoded in source database"),
-                    etap:is(AttTarget#att.encoding, identity,
-                        "Attachment is not encoded in target database")
-                end,
-                etap:is(TargetMd5, SourceMd5,
-                    "Same content in source and target databases"),
-                etap:is(is_integer(Att#att.disk_len), true,
-                    "#att.disk_len is an integer in source database"),
-                etap:is(is_integer(Att#att.att_len), true,
-                    "#att.att_len is an integer in source database"),
-                etap:is(is_integer(AttTarget#att.disk_len), true,
-                    "#att.disk_len is an integer in target database"),
-                etap:is(is_integer(AttTarget#att.att_len), true,
-                    "#att.att_len is an integer in target database"),
-                etap:is(Att#att.disk_len, AttTarget#att.disk_len,
-                    "Same identity length in source and target databases"),
-                etap:is(Att#att.att_len, AttTarget#att.att_len,
-                    "Same encoded length in source and target databases"),
-                etap:is(Att#att.type, AttTarget#att.type,
-                    "Same type in source and target databases"),
-                etap:is(Att#att.md5, SourceMd5, "Correct MD5 in source database"),
-                etap:is(AttTarget#att.md5, SourceMd5, "Correct MD5 in target database")
-            end,
-            SourceAtts),
-
-        {ok, Acc}
-    end,
-
-    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
-    ok = couch_db:close(SourceDb),
-    ok = couch_db:close(TargetDb).
-
-
-find_att([], _Name) ->
-    nil;
-find_att([#att{name = Name} = Att | _], Name) ->
-    {ok, Att};
-find_att([_ | Rest], Name) ->
-    find_att(Rest, Name).
-
-
-att_md5(Att) ->
-    Md50 = couch_doc:att_foldl(
-        Att,
-        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
-        couch_util:md5_init()),
-    couch_util:md5_final(Md50).
-
-att_decoded_md5(Att) ->
-    Md50 = couch_doc:att_foldl_decode(
-        Att,
-        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
-        couch_util:md5_init()),
-    couch_util:md5_final(Md50).
-
-
-db_url(DbName) ->
-    iolist_to_binary([
-        "http://", config:get("httpd", "bind_address", "127.0.0.1"),
-        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
-        "/", DbName
-    ]).
-
-
-create_db(DbName) ->
-    couch_db:create(
-        DbName,
-        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
-
-
-delete_db(Db) ->
-    ok = couch_server:delete(
-        couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
-
-
-replicate({remote, Db}, Target) ->
-    replicate(db_url(Db), Target);
-
-replicate(Source, {remote, Db}) ->
-    replicate(Source, db_url(Db));
-
-replicate(Source, Target) ->
-    RepObject = {[
-        {<<"source">>, Source},
-        {<<"target">>, Target}
-    ]},
-    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
-        RepObject, #user_ctx{roles = [<<"_admin">>]}),
-    {ok, Pid} = couch_replicator:async_replicate(Rep),
-    MonRef = erlang:monitor(process, Pid),
-    receive
-    {'DOWN', MonRef, process, Pid, Reason} ->
-        etap:is(Reason, normal, "Replication finished successfully")
-    after 300000 ->
-        etap:bail("Timeout waiting for replication to finish")
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/test/05-replication-many-leaves.t
----------------------------------------------------------------------
diff --git a/src/couch_replicator/test/05-replication-many-leaves.t b/src/couch_replicator/test/05-replication-many-leaves.t
deleted file mode 100755
index bd795ec..0000000
--- a/src/couch_replicator/test/05-replication-many-leaves.t
+++ /dev/null
@@ -1,283 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Test replication of documents with many leaf revisions.
-% Motivated by COUCHDB-1340 and other similar issues where a document
-% GET with a too long ?open_revs revision list doesn't work due to
-% maximum web server limits for the HTTP request path.
-
--record(user_ctx, {
-    name = null,
-    roles = [],
-    handler
-}).
-
--record(doc, {
-    id = <<"">>,
-    revs = {0, []},
-    body = {[]},
-    atts = [],
-    deleted = false,
-    meta = []
-}).
-
--record(att, {
-    name,
-    type,
-    att_len,
-    disk_len,
-    md5= <<>>,
-    revpos=0,
-    data,
-    encoding=identity
-}).
-
--define(b2l(B), binary_to_list(B)).
--define(l2b(L), list_to_binary(L)).
--define(i2l(I), integer_to_list(I)).
-
-
-source_db_name() -> <<"couch_test_rep_db_a">>.
-target_db_name() -> <<"couch_test_rep_db_b">>.
-
-doc_ids() ->
-    [<<"doc1">>, <<"doc2">>, <<"doc3">>].
-
-doc_num_conflicts(<<"doc1">>) -> 10;
-doc_num_conflicts(<<"doc2">>) -> 100;
-% a number > MaxURLlength (7000) / length(DocRevisionString)
-doc_num_conflicts(<<"doc3">>) -> 210.
-
-
-main(_) ->
-    test_util:run(56, fun() -> test() end).
-
-
-test() ->
-    test_util:start_couch(),
-    ibrowse:start(),
-    crypto:start(),
-    config:set("replicator", "connection_timeout", "90000", false),
-
-    Pairs = [
-        {source_db_name(), target_db_name()},
-        {{remote, source_db_name()}, target_db_name()},
-        {source_db_name(), {remote, target_db_name()}},
-        {{remote, source_db_name()}, {remote, (target_db_name())}}
-    ],
-
-    lists:foreach(
-        fun({Source, Target}) ->
-            {ok, SourceDb} = create_db(source_db_name()),
-            etap:diag("Populating source database"),
-            {ok, DocRevs} = populate_db(SourceDb),
-            ok = couch_db:close(SourceDb),
-            etap:diag("Creating target database"),
-            {ok, TargetDb} = create_db(target_db_name()),
-
-            ok = couch_db:close(TargetDb),
-            etap:diag("Triggering replication"),
-            replicate(Source, Target),
-            etap:diag("Replication finished, comparing source and target databases"),
-            {ok, SourceDb2} = couch_db:open_int(source_db_name(), []),
-            {ok, TargetDb2} = couch_db:open_int(target_db_name(), []),
-            verify_target(SourceDb2, TargetDb2, DocRevs),
-            ok = couch_db:close(SourceDb2),
-            ok = couch_db:close(TargetDb2),
-
-            {ok, SourceDb3} = couch_db:open_int(source_db_name(), []),
-            {ok, DocRevs2} = add_attachments(SourceDb3, DocRevs, 2),
-            ok = couch_db:close(SourceDb3),
-            etap:diag("Triggering replication again"),
-            replicate(Source, Target),
-            etap:diag("Replication finished, comparing source and target databases"),
-            {ok, SourceDb4} = couch_db:open_int(source_db_name(), []),
-            {ok, TargetDb4} = couch_db:open_int(target_db_name(), []),
-            verify_target(SourceDb4, TargetDb4, DocRevs2),
-            ok = couch_db:close(SourceDb4),
-            ok = couch_db:close(TargetDb4),
-
-            etap:diag("Deleting source and target databases"),
-            delete_db(TargetDb),
-            delete_db(SourceDb),
-            ok = timer:sleep(1000)
-        end,
-        Pairs),
-
-    test_util:stop_couch().
-
-
-populate_db(Db) ->
-    DocRevsDict = lists:foldl(
-        fun(DocId, Acc) ->
-            Value = <<"0">>,
-            Doc = #doc{
-                id = DocId,
-                body = {[ {<<"value">>, Value} ]}
-            },
-            {ok, Rev} = couch_db:update_doc(Db, Doc, []),
-            {ok, DocRevs} = add_doc_siblings(Db, DocId, doc_num_conflicts(DocId)),
-            dict:store(DocId, [Rev | DocRevs], Acc)
-        end,
-        dict:new(), doc_ids()),
-    {ok, dict:to_list(DocRevsDict)}.
-
-
-add_doc_siblings(Db, DocId, NumLeaves) when NumLeaves > 0 ->
-    add_doc_siblings(Db, DocId, NumLeaves, [], []).
-
-
-add_doc_siblings(Db, _DocId, 0, AccDocs, AccRevs) ->
-    {ok, []} = couch_db:update_docs(Db, AccDocs, [], replicated_changes),
-    {ok, AccRevs};
-
-add_doc_siblings(Db, DocId, NumLeaves, AccDocs, AccRevs) ->
-    Value = list_to_binary(integer_to_list(NumLeaves)),
-    Rev = couch_util:md5(Value),
-    Doc = #doc{
-        id = DocId,
-        revs = {1, [Rev]},
-        body = {[ {<<"value">>, Value} ]}
-    },
-    add_doc_siblings(Db, DocId, NumLeaves - 1, [Doc | AccDocs], [{1, Rev} | AccRevs]).
-
-
-verify_target(_SourceDb, _TargetDb, []) ->
-    ok;
-
-verify_target(SourceDb, TargetDb, [{DocId, RevList} | Rest]) ->
-    {ok, Lookups} = couch_db:open_doc_revs(
-        TargetDb,
-        DocId,
-        RevList,
-        [conflicts, deleted_conflicts]),
-    Docs = [Doc || {ok, Doc} <- Lookups],
-    {ok, SourceLookups} = couch_db:open_doc_revs(
-        SourceDb,
-        DocId,
-        RevList,
-        [conflicts, deleted_conflicts]),
-    SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
-    Total = doc_num_conflicts(DocId) + 1,
-    etap:is(
-        length(Docs),
-        Total,
-        "Target has " ++ ?i2l(Total) ++ " leaf revisions of document " ++ ?b2l(DocId)),
-    etap:diag("Verifying all revisions of document " ++ ?b2l(DocId)),
-    lists:foreach(
-        fun({#doc{id = Id, revs = Revs} = TgtDoc, #doc{id = Id, revs = Revs} = SrcDoc}) ->
-            SourceJson = couch_doc:to_json_obj(SrcDoc, [attachments]),
-            TargetJson = couch_doc:to_json_obj(TgtDoc, [attachments]),
-            case TargetJson of
-            SourceJson ->
-                ok;
-            _ ->
-                {Pos, [Rev | _]} = Revs,
-                etap:bail("Wrong value for revision " ++
-                    ?b2l(couch_doc:rev_to_str({Pos, Rev})) ++
-                    " of document " ++ ?b2l(DocId))
-            end
-        end,
-        lists:zip(Docs, SourceDocs)),
-    verify_target(SourceDb, TargetDb, Rest).
-
-
-add_attachments(Source, DocIdRevs, NumAtts) ->
-    add_attachments(Source, DocIdRevs, NumAtts, []).
-
-add_attachments(_SourceDb, [], _NumAtts, Acc) ->
-    {ok, Acc};
-
-add_attachments(SourceDb, [{DocId, RevList} | Rest], NumAtts, IdRevsAcc) ->
-    {ok, SourceLookups} = couch_db:open_doc_revs(
-        SourceDb,
-        DocId,
-        RevList,
-        []),
-    SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
-    Total = doc_num_conflicts(DocId) + 1,
-    etap:is(
-        length(SourceDocs),
-        Total,
-        "Source still has " ++ ?i2l(Total) ++
-            " leaf revisions of document " ++ ?b2l(DocId)),
-    etap:diag("Adding " ++ ?i2l(NumAtts) ++
-        " attachments to each revision of the document " ++ ?b2l(DocId)),
-    NewDocs = lists:foldl(
-        fun(#doc{atts = Atts, revs = {Pos, [Rev | _]}} = Doc, Acc) ->
-            NewAtts = lists:foldl(
-                fun(I, AttAcc) ->
-                    AttData = crypto:rand_bytes(100),
-                    NewAtt = #att{
-                        name = iolist_to_binary(
-                            ["att_", ?i2l(I), "_", couch_doc:rev_to_str({Pos, Rev})]),
-                        type = <<"application/foobar">>,
-                        att_len = byte_size(AttData),
-                        data = AttData
-                    },
-                    [NewAtt | AttAcc]
-                end,
-                [], lists:seq(1, NumAtts)),
-            [Doc#doc{atts = Atts ++ NewAtts} | Acc]
-        end,
-        [], SourceDocs),
-    {ok, UpdateResults} = couch_db:update_docs(SourceDb, NewDocs, []),
-    NewRevs = [R || {ok, R} <- UpdateResults],
-    etap:is(
-        length(NewRevs),
-        length(NewDocs),
-        "Document revisions updated with " ++ ?i2l(NumAtts) ++ " attachments"),
-    add_attachments(SourceDb, Rest, NumAtts, [{DocId, NewRevs} | IdRevsAcc]).
-
-
-db_url(DbName) ->
-    iolist_to_binary([
-        "http://", config:get("httpd", "bind_address", "127.0.0.1"),
-        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
-        "/", DbName
-    ]).
-
-
-create_db(DbName) ->
-    couch_db:create(
-        DbName,
-        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
-
-
-delete_db(Db) ->
-    ok = couch_server:delete(
-        couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
-
-
-replicate({remote, Db}, Target) ->
-    replicate(db_url(Db), Target);
-
-replicate(Source, {remote, Db}) ->
-    replicate(Source, db_url(Db));
-
-replicate(Source, Target) ->
-    RepObject = {[
-        {<<"source">>, Source},
-        {<<"target">>, Target}
-    ]},
-    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
-        RepObject, #user_ctx{roles = [<<"_admin">>]}),
-    {ok, Pid} = couch_replicator:async_replicate(Rep),
-    MonRef = erlang:monitor(process, Pid),
-    receive
-    {'DOWN', MonRef, process, Pid, Reason} ->
-        etap:is(Reason, normal, "Replication finished successfully")
-    after 900000 ->
-        etap:bail("Timeout waiting for replication to finish")
-    end.


[08/49] Remove src/couch

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_httpd_misc_handlers.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_httpd_misc_handlers.erl b/src/couch/src/couch_httpd_misc_handlers.erl
deleted file mode 100644
index 3b2bbeb..0000000
--- a/src/couch/src/couch_httpd_misc_handlers.erl
+++ /dev/null
@@ -1,306 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_misc_handlers).
-
--export([handle_welcome_req/2,handle_favicon_req/2,handle_utils_dir_req/2,
-    handle_all_dbs_req/1,handle_restart_req/1,
-    handle_uuids_req/1,handle_config_req/1,handle_log_req/1,
-    handle_task_status_req/1, handle_file_req/2]).
-
--export([increment_update_seq_req/2]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
--import(couch_httpd,
-    [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
-    start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1,
-    start_chunked_response/3, send_error/4]).
-
-% httpd global handlers
-
-handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) ->
-    send_json(Req, {[
-        {couchdb, WelcomeMessage},
-        {uuid, couch_server:get_uuid()},
-        {version, list_to_binary(couch_server:get_version())}
-        ] ++ case config:get("vendor") of
-        [] ->
-            [];
-        Properties ->
-            [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}]
-        end
-    });
-handle_welcome_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) ->
-    {{Year,Month,Day},Time} = erlang:universaltime(),
-    OneYearFromNow = {{Year+1,Month,Day},Time},
-    CachingHeaders = [
-        %favicon should expire a year from now
-        {"Cache-Control", "public, max-age=31536000"},
-        {"Expires", couch_util:rfc1123_date(OneYearFromNow)}
-    ],
-    couch_httpd:serve_file(Req, "favicon.ico", DocumentRoot, CachingHeaders);
-
-handle_favicon_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_file_req(#httpd{method='GET'}=Req, Document) ->
-    couch_httpd:serve_file(Req, filename:basename(Document), filename:dirname(Document));
-
-handle_file_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) ->
-    "/" ++ UrlPath = couch_httpd:path(Req),
-    case couch_httpd:partition(UrlPath) of
-    {_ActionKey, "/", RelativePath} ->
-        % GET /_utils/path or GET /_utils/
-        CachingHeaders =
-                [{"Cache-Control", "private, must-revalidate"}],
-        couch_httpd:serve_file(Req, RelativePath, DocumentRoot, CachingHeaders);
-    {_ActionKey, "", _RelativePath} ->
-        % GET /_utils
-        RedirectPath = couch_httpd:path(Req) ++ "/",
-        couch_httpd:send_redirect(Req, RedirectPath)
-    end;
-handle_utils_dir_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_all_dbs_req(#httpd{method='GET'}=Req) ->
-    {ok, DbNames} = couch_server:all_databases(),
-    send_json(Req, DbNames);
-handle_all_dbs_req(Req) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-
-handle_task_status_req(#httpd{method='GET'}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    % convert the list of prop lists to a list of json objects
-    send_json(Req, [{Props} || Props <- couch_task_status:all()]);
-handle_task_status_req(Req) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-
-handle_restart_req(#httpd{method='GET', path_parts=[_, <<"token">>]}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    Token = case application:get_env(couch, instance_token) of
-        {ok, Tok} ->
-            Tok;
-        _ ->
-            Tok = erlang:phash2(make_ref()),
-            application:set_env(couch, instance_token, Tok),
-            Tok
-    end,
-    send_json(Req, 200, {[{token, Token}]});
-handle_restart_req(#httpd{method='POST'}=Req) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    ok = couch_httpd:verify_is_server_admin(Req),
-    Result = send_json(Req, 202, {[{ok, true}]}),
-    couch:restart(),
-    Result;
-handle_restart_req(Req) ->
-    send_method_not_allowed(Req, "POST").
-
-
-handle_uuids_req(#httpd{method='GET'}=Req) ->
-    Count = list_to_integer(couch_httpd:qs_value(Req, "count", "1")),
-    UUIDs = [couch_uuids:new() || _ <- lists:seq(1, Count)],
-    Etag = couch_httpd:make_etag(UUIDs),
-    couch_httpd:etag_respond(Req, Etag, fun() ->
-        CacheBustingHeaders = [
-            {"Date", couch_util:rfc1123_date()},
-            {"Cache-Control", "no-cache"},
-            % Past date, ON PURPOSE!
-            {"Expires", "Fri, 01 Jan 1990 00:00:00 GMT"},
-            {"Pragma", "no-cache"},
-            {"ETag", Etag}
-        ],
-        send_json(Req, 200, CacheBustingHeaders, {[{<<"uuids">>, UUIDs}]})
-    end);
-handle_uuids_req(Req) ->
-    send_method_not_allowed(Req, "GET").
-
-
-% Config request handler
-
-
-% GET /_config/
-% GET /_config
-handle_config_req(#httpd{method='GET', path_parts=[_]}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
-        case dict:is_key(Section, Acc) of
-        true ->
-            dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
-        false ->
-            dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
-        end
-    end, dict:new(), config:all()),
-    KVs = dict:fold(fun(Section, Values, Acc) ->
-        [{list_to_binary(Section), {Values}} | Acc]
-    end, [], Grouped),
-    send_json(Req, 200, {KVs});
-% GET /_config/Section
-handle_config_req(#httpd{method='GET', path_parts=[_,Section]}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    KVs = [{list_to_binary(Key), list_to_binary(Value)}
-            || {Key, Value} <- config:get(Section)],
-    send_json(Req, 200, {KVs});
-% GET /_config/Section/Key
-handle_config_req(#httpd{method='GET', path_parts=[_, Section, Key]}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    case config:get(Section, Key, null) of
-    null ->
-        throw({not_found, unknown_config_value});
-    Value ->
-        send_json(Req, 200, list_to_binary(Value))
-    end;
-% POST /_config/_reload - Flushes unpersisted config values from RAM
-handle_config_req(#httpd{method='POST', path_parts=[_, <<"_reload">>]}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    ok = config:reload(),
-    send_json(Req, 200, {[{ok, true}]});
-% PUT or DELETE /_config/Section/Key
-handle_config_req(#httpd{method=Method, path_parts=[_, Section, Key]}=Req)
-      when (Method == 'PUT') or (Method == 'DELETE') ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    Persist = couch_httpd:header_value(Req, "X-Couch-Persist") /= "false",
-    case config:get(<<"httpd">>, <<"config_whitelist">>, null) of
-        null ->
-            % No whitelist; allow all changes.
-            handle_approved_config_req(Req, Persist);
-        WhitelistValue ->
-            % Provide a failsafe to protect against inadvertently locking
-            % onesself out of the config by supplying a syntactically-incorrect
-            % Erlang term. To intentionally lock down the whitelist, supply a
-            % well-formed list which does not include the whitelist config
-            % variable itself.
-            FallbackWhitelist = [{<<"httpd">>, <<"config_whitelist">>}],
-
-            Whitelist = case couch_util:parse_term(WhitelistValue) of
-                {ok, Value} when is_list(Value) ->
-                    Value;
-                {ok, _NonListValue} ->
-                    FallbackWhitelist;
-                {error, _} ->
-                    [{WhitelistSection, WhitelistKey}] = FallbackWhitelist,
-                    ?LOG_ERROR("Only whitelisting ~s/~s due to error parsing: ~p",
-                               [WhitelistSection, WhitelistKey, WhitelistValue]),
-                    FallbackWhitelist
-            end,
-
-            IsRequestedKeyVal = fun(Element) ->
-                case Element of
-                    {A, B} ->
-                        % For readability, tuples may be used instead of binaries
-                        % in the whitelist.
-                        case {couch_util:to_binary(A), couch_util:to_binary(B)} of
-                            {Section, Key} ->
-                                true;
-                            {Section, <<"*">>} ->
-                                true;
-                            _Else ->
-                                false
-                        end;
-                    _Else ->
-                        false
-                end
-            end,
-
-            case lists:any(IsRequestedKeyVal, Whitelist) of
-                true ->
-                    % Allow modifying this whitelisted variable.
-                    handle_approved_config_req(Req, Persist);
-                _NotWhitelisted ->
-                    % Disallow modifying this non-whitelisted variable.
-                    send_error(Req, 400, <<"modification_not_allowed">>,
-                               ?l2b("This config variable is read-only"))
-            end
-    end;
-handle_config_req(Req) ->
-    send_method_not_allowed(Req, "GET,PUT,POST,DELETE").
-
-% PUT /_config/Section/Key
-% "value"
-handle_approved_config_req(#httpd{method='PUT', path_parts=[_, Section, Key]}=Req, Persist) ->
-    Value = case Section of
-    <<"admins">> ->
-        couch_passwords:hash_admin_password(couch_httpd:json_body(Req));
-    _ ->
-        couch_httpd:json_body(Req)
-    end,
-    OldValue = config:get(Section, Key, ""),
-    case config:set(Section, Key, ?b2l(Value), Persist) of
-    ok ->
-        send_json(Req, 200, list_to_binary(OldValue));
-    Error ->
-        throw(Error)
-    end;
-% DELETE /_config/Section/Key
-handle_approved_config_req(#httpd{method='DELETE',path_parts=[_,Section,Key]}=Req, Persist) ->
-    case config:get(Section, Key, null) of
-    null ->
-        throw({not_found, unknown_config_value});
-    OldValue ->
-        config:delete(Section, Key, Persist),
-        send_json(Req, 200, list_to_binary(OldValue))
-    end.
-
-
-% httpd db handlers
-
-increment_update_seq_req(#httpd{method='POST'}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    {ok, NewSeq} = couch_db:increment_update_seq(Db),
-    send_json(Req, {[{ok, true},
-        {update_seq, NewSeq}
-    ]});
-increment_update_seq_req(Req, _Db) ->
-    send_method_not_allowed(Req, "POST").
-
-% httpd log handlers
-
-handle_log_req(#httpd{method='GET'}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    Bytes = list_to_integer(couch_httpd:qs_value(Req, "bytes", "1000")),
-    Offset = list_to_integer(couch_httpd:qs_value(Req, "offset", "0")),
-    Chunk = couch_log:read(Bytes, Offset),
-    {ok, Resp} = start_chunked_response(Req, 200, [
-        % send a plaintext response
-        {"Content-Type", "text/plain; charset=utf-8"},
-        {"Content-Length", integer_to_list(length(Chunk))}
-    ]),
-    send_chunk(Resp, Chunk),
-    last_chunk(Resp);
-handle_log_req(#httpd{method='POST'}=Req) ->
-    {PostBody} = couch_httpd:json_body_obj(Req),
-    Level = couch_util:get_value(<<"level">>, PostBody),
-    Message = ?b2l(couch_util:get_value(<<"message">>, PostBody)),
-    case Level of
-    <<"debug">> ->
-        ?LOG_DEBUG(Message, []),
-        send_json(Req, 200, {[{ok, true}]});
-    <<"info">> ->
-        ?LOG_INFO(Message, []),
-        send_json(Req, 200, {[{ok, true}]});
-    <<"error">> ->
-        ?LOG_ERROR(Message, []),
-        send_json(Req, 200, {[{ok, true}]});
-    _ ->
-        send_json(Req, 400, {[{error, ?l2b(io_lib:format("Unrecognized log level '~s'", [Level]))}]})
-    end;
-handle_log_req(Req) ->
-    send_method_not_allowed(Req, "GET,POST").

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_httpd_oauth.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_httpd_oauth.erl b/src/couch/src/couch_httpd_oauth.erl
deleted file mode 100644
index 8fd4c30..0000000
--- a/src/couch/src/couch_httpd_oauth.erl
+++ /dev/null
@@ -1,387 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License.  You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_oauth).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_js_functions.hrl").
-
--export([oauth_authentication_handler/1, handle_oauth_req/1]).
-
--define(OAUTH_DDOC_ID, <<"_design/oauth">>).
--define(OAUTH_VIEW_NAME, <<"oauth_credentials">>).
-
--record(callback_params, {
-    consumer,
-    token,
-    token_secret,
-    url,
-    signature,
-    params,
-    username
-}).
-
-% OAuth auth handler using per-node user db
-oauth_authentication_handler(Req) ->
-    serve_oauth(Req, fun oauth_auth_callback/2, true).
-
-
-oauth_auth_callback(Req, #callback_params{token_secret = undefined}) ->
-    couch_httpd:send_error(
-         Req, 400, <<"invalid_token">>, <<"Invalid OAuth token.">>);
-
-oauth_auth_callback(#httpd{mochi_req = MochiReq} = Req, CbParams) ->
-    Method = atom_to_list(MochiReq:get(method)),
-    #callback_params{
-        consumer = Consumer,
-        token_secret = TokenSecret,
-        url = Url,
-        signature = Sig,
-        params = Params,
-        username = User
-    } = CbParams,
-    case oauth:verify(Sig, Method, Url, Params, Consumer, TokenSecret) of
-    true ->
-        set_user_ctx(Req, User);
-    false ->
-        ?LOG_DEBUG("OAuth handler: signature verification failed for user `~p`~n"
-            "Received signature is `~p`~n"
-            "HTTP method is `~p`~n"
-            "URL is `~p`~n"
-            "Parameters are `~p`~n"
-            "Consumer is `~p`, token secret is `~p`~n"
-            "Expected signature was `~p`~n",
-            [User, Sig, Method, Url, Params, Consumer, TokenSecret,
-                oauth:sign(Method, Url, Params, Consumer, "", TokenSecret)]),
-        Req
-    end.
-
-
-% Look up the consumer key and get the roles to give the consumer
-set_user_ctx(_Req, undefined) ->
-    throw({bad_request, unknown_oauth_token});
-set_user_ctx(Req, Name) ->
-    case couch_auth_cache:get_user_creds(Name) of
-        nil ->
-            ?LOG_DEBUG("OAuth handler: user `~p` credentials not found", [Name]),
-            Req;
-        User ->
-            Roles = couch_util:get_value(<<"roles">>, User, []),
-            Req#httpd{user_ctx=#user_ctx{name=Name, roles=Roles}}
-    end.
-
-% OAuth request_token
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"request_token">>], method=Method}=Req1) ->
-    serve_oauth(Req1, fun(Req, CbParams) ->
-        #callback_params{
-            consumer = Consumer,
-            token_secret = TokenSecret,
-            url = Url,
-            signature = Sig,
-            params = Params
-        } = CbParams,
-        case oauth:verify(
-            Sig, atom_to_list(Method), Url, Params, Consumer, TokenSecret) of
-        true ->
-            ok(Req, <<"oauth_token=requestkey&oauth_token_secret=requestsecret">>);
-        false ->
-            invalid_signature(Req)
-        end
-    end, false);
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"authorize">>]}=Req) ->
-    {ok, serve_oauth_authorize(Req)};
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"access_token">>], method='GET'}=Req1) ->
-    serve_oauth(Req1, fun(Req, CbParams) ->
-        #callback_params{
-            consumer = Consumer,
-            token = Token,
-            url = Url,
-            signature = Sig,
-            params = Params
-        } = CbParams,
-        case Token of
-        "requestkey" ->
-            case oauth:verify(
-                Sig, "GET", Url, Params, Consumer, "requestsecret") of
-            true ->
-                ok(Req,
-                    <<"oauth_token=accesskey&oauth_token_secret=accesssecret">>);
-            false ->
-                invalid_signature(Req)
-            end;
-        _ ->
-            couch_httpd:send_error(
-                Req, 400, <<"invalid_token">>, <<"Invalid OAuth token.">>)
-        end
-    end, false);
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"access_token">>]}=Req) ->
-    couch_httpd:send_method_not_allowed(Req, "GET").
-
-invalid_signature(Req) ->
-    couch_httpd:send_error(Req, 400, <<"invalid_signature">>, <<"Invalid signature value.">>).
-
-% This needs to be protected i.e. force user to login using HTTP Basic Auth or form-based login.
-serve_oauth_authorize(#httpd{method=Method}=Req1) ->
-    case Method of
-        'GET' ->
-            % Confirm with the User that they want to authenticate the Consumer
-            serve_oauth(Req1, fun(Req, CbParams) ->
-                #callback_params{
-                    consumer = Consumer,
-                    token_secret = TokenSecret,
-                    url = Url,
-                    signature = Sig,
-                    params = Params
-                } = CbParams,
-                case oauth:verify(
-                    Sig, "GET", Url, Params, Consumer, TokenSecret) of
-                true ->
-                    ok(Req, <<"oauth_token=requestkey&",
-                        "oauth_token_secret=requestsecret">>);
-                false ->
-                    invalid_signature(Req)
-                end
-            end, false);
-        'POST' ->
-            % If the User has confirmed, we direct the User back to the Consumer with a verification code
-            serve_oauth(Req1, fun(Req, CbParams) ->
-                #callback_params{
-                    consumer = Consumer,
-                    token_secret = TokenSecret,
-                    url = Url,
-                    signature = Sig,
-                    params = Params
-                } = CbParams,
-                case oauth:verify(
-                    Sig, "POST", Url, Params, Consumer, TokenSecret) of
-                true ->
-                    %redirect(oauth_callback, oauth_token, oauth_verifier),
-                    ok(Req, <<"oauth_token=requestkey&",
-                        "oauth_token_secret=requestsecret">>);
-                false ->
-                    invalid_signature(Req)
-                end
-            end, false);
-        _ ->
-            couch_httpd:send_method_not_allowed(Req1, "GET,POST")
-    end.
-
-serve_oauth(#httpd{mochi_req=MochiReq}=Req, Fun, FailSilently) ->
-    % 1. In the HTTP Authorization header as defined in OAuth HTTP Authorization Scheme.
-    % 2. As the HTTP POST request body with a content-type of application/x-www-form-urlencoded.
-    % 3. Added to the URLs in the query part (as defined by [RFC3986] section 3).
-    AuthHeader = case MochiReq:get_header_value("authorization") of
-        undefined ->
-            "";
-        Else ->
-            [Head | Tail] = re:split(Else, "\\s", [{parts, 2}, {return, list}]),
-            case [string:to_lower(Head) | Tail] of
-                ["oauth", Rest] -> Rest;
-                _ -> ""
-            end
-    end,
-    HeaderParams = oauth:header_params_decode(AuthHeader),
-    %Realm = couch_util:get_value("realm", HeaderParams),
-
-    % get requested path
-    RequestedPath = case MochiReq:get_header_value("x-couchdb-requested-path") of
-        undefined ->
-            case MochiReq:get_header_value("x-couchdb-vhost-path") of
-                undefined ->
-                    MochiReq:get(raw_path);
-                VHostPath ->
-                    VHostPath
-            end;
-        RequestedPath0 ->
-           RequestedPath0
-    end,
-    {_, QueryString, _} = mochiweb_util:urlsplit_path(RequestedPath),
-
-    Params = proplists:delete("realm", HeaderParams) ++ mochiweb_util:parse_qs(QueryString),
-
-    ?LOG_DEBUG("OAuth Params: ~p", [Params]),
-    case couch_util:get_value("oauth_version", Params, "1.0") of
-        "1.0" ->
-            case couch_util:get_value("oauth_consumer_key", Params, undefined) of
-                undefined ->
-                    case FailSilently of
-                        true -> Req;
-                        false -> couch_httpd:send_error(Req, 400, <<"invalid_consumer">>, <<"Invalid consumer.">>)
-                    end;
-                ConsumerKey ->
-                    Url = couch_httpd:absolute_uri(Req, RequestedPath),
-                    case get_callback_params(ConsumerKey, Params, Url) of
-                        {ok, CallbackParams} ->
-                            Fun(Req, CallbackParams);
-                        invalid_consumer_token_pair ->
-                            couch_httpd:send_error(
-                                Req, 400,
-                                <<"invalid_consumer_token_pair">>,
-                                <<"Invalid consumer and token pair.">>);
-                        {error, {Error, Reason}} ->
-                            couch_httpd:send_error(Req, 400, Error, Reason)
-                    end
-            end;
-        _ ->
-            couch_httpd:send_error(Req, 400, <<"invalid_oauth_version">>, <<"Invalid OAuth version.">>)
-    end.
-
-
-get_callback_params(ConsumerKey, Params, Url) ->
-    Token = couch_util:get_value("oauth_token", Params),
-    SigMethod = sig_method(Params),
-    CbParams0 = #callback_params{
-        token = Token,
-        signature = couch_util:get_value("oauth_signature", Params),
-        params = proplists:delete("oauth_signature", Params),
-        url = Url
-    },
-    case oauth_credentials_info(Token, ConsumerKey) of
-    nil ->
-        invalid_consumer_token_pair;
-    {error, _} = Err ->
-        Err;
-    {OauthCreds} ->
-        User = couch_util:get_value(<<"username">>, OauthCreds, []),
-        ConsumerSecret = ?b2l(couch_util:get_value(
-            <<"consumer_secret">>, OauthCreds, <<>>)),
-        TokenSecret = ?b2l(couch_util:get_value(
-            <<"token_secret">>, OauthCreds, <<>>)),
-        case (User =:= []) orelse (ConsumerSecret =:= []) orelse
-            (TokenSecret =:= []) of
-        true ->
-            invalid_consumer_token_pair;
-        false ->
-            CbParams = CbParams0#callback_params{
-                consumer = {ConsumerKey, ConsumerSecret, SigMethod},
-                token_secret = TokenSecret,
-                username = User
-            },
-            ?LOG_DEBUG("Got OAuth credentials, for ConsumerKey `~p` and "
-                "Token `~p`, from the views, User: `~p`, "
-                "ConsumerSecret: `~p`, TokenSecret: `~p`",
-                [ConsumerKey, Token, User, ConsumerSecret, TokenSecret]),
-            {ok, CbParams}
-        end
-    end.
-
-
-sig_method(Params) ->
-    sig_method_1(couch_util:get_value("oauth_signature_method", Params)).
-sig_method_1("PLAINTEXT") ->
-    plaintext;
-% sig_method_1("RSA-SHA1") ->
-%    rsa_sha1;
-sig_method_1("HMAC-SHA1") ->
-    hmac_sha1;
-sig_method_1(_) ->
-    undefined.
-
-
-ok(#httpd{mochi_req=MochiReq}, Body) ->
-    {ok, MochiReq:respond({200, [], Body})}.
-
-
-oauth_credentials_info(Token, ConsumerKey) ->
-    case use_auth_db() of
-    {ok, Db} ->
-        Result = case query_oauth_view(Db, [?l2b(ConsumerKey), ?l2b(Token)]) of
-        [] ->
-            nil;
-        [Creds] ->
-            Creds;
-        [_ | _] ->
-            Reason = iolist_to_binary(
-                io_lib:format("Found multiple OAuth credentials for the pair "
-                    " (consumer_key: `~p`, token: `~p`)", [ConsumerKey, Token])),
-            {error, {<<"oauth_token_consumer_key_pair">>, Reason}}
-        end,
-        couch_db:close(Db),
-        Result;
-    nil ->
-        {
-            case config:get("oauth_consumer_secrets", ConsumerKey) of
-            undefined -> [];
-            ConsumerSecret -> [{<<"consumer_secret">>, ?l2b(ConsumerSecret)}]
-            end
-            ++
-            case config:get("oauth_token_secrets", Token) of
-            undefined -> [];
-            TokenSecret -> [{<<"token_secret">>, ?l2b(TokenSecret)}]
-            end
-            ++
-            case config:get("oauth_token_users", Token) of
-            undefined -> [];
-            User -> [{<<"username">>, ?l2b(User)}]
-            end
-        }
-    end.
-
-
-use_auth_db() ->
-    case config:get("couch_httpd_oauth", "use_users_db", "false") of
-    "false" ->
-        nil;
-    "true" ->
-        AuthDb = open_auth_db(),
-        {ok, _AuthDb2} = ensure_oauth_views_exist(AuthDb)
-    end.
-
-
-open_auth_db() ->
-    DbName = ?l2b(config:get("couch_httpd_auth", "authentication_db")),
-    DbOptions = [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}],
-    {ok, AuthDb} = couch_db:open_int(DbName, DbOptions),
-    AuthDb.
-
-
-ensure_oauth_views_exist(AuthDb) ->
-    case couch_db:open_doc(AuthDb, ?OAUTH_DDOC_ID, []) of
-    {ok, _DDoc} ->
-        {ok, AuthDb};
-    _ ->
-        {ok, DDoc} = get_oauth_ddoc(),
-        {ok, _Rev} = couch_db:update_doc(AuthDb, DDoc, []),
-        {ok, _AuthDb2} = couch_db:reopen(AuthDb)
-    end.
-
-
-get_oauth_ddoc() ->
-    Json = {[
-        {<<"_id">>, ?OAUTH_DDOC_ID},
-        {<<"language">>, <<"javascript">>},
-        {<<"views">>,
-            {[
-                {?OAUTH_VIEW_NAME,
-                    {[
-                        {<<"map">>, ?OAUTH_MAP_FUN}
-                    ]}
-                }
-            ]}
-        }
-    ]},
-    {ok, couch_doc:from_json_obj(Json)}.
-
-
-query_oauth_view(Db, Key) ->
-    ViewOptions = [
-        {start_key, Key},
-        {end_key, Key}
-    ],
-    Callback = fun({row, Row}, Acc) ->
-            {ok, [couch_util:get_value(value, Row) | Acc]};
-        (_, Acc) ->
-            {ok, Acc}
-    end,
-    {ok, Result} = couch_mrview:query_view(
-        Db, ?OAUTH_DDOC_ID, ?OAUTH_VIEW_NAME, ViewOptions, Callback, []),
-    Result.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_httpd_proxy.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_httpd_proxy.erl b/src/couch/src/couch_httpd_proxy.erl
deleted file mode 100644
index 7e9aed7..0000000
--- a/src/couch/src/couch_httpd_proxy.erl
+++ /dev/null
@@ -1,426 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_httpd_proxy).
-
--export([handle_proxy_req/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("ibrowse/include/ibrowse.hrl").
-
--define(TIMEOUT, infinity).
--define(PKT_SIZE, 4096).
-
-
-handle_proxy_req(Req, ProxyDest) ->
-    Method = get_method(Req),
-    Url = get_url(Req, ProxyDest),
-    Version = get_version(Req),
-    Headers = get_headers(Req),
-    Body = get_body(Req),
-    Options = [
-        {http_vsn, Version},
-        {headers_as_is, true},
-        {response_format, binary},
-        {stream_to, {self(), once}}
-    ],
-    case ibrowse:send_req(Url, Headers, Method, Body, Options, ?TIMEOUT) of
-        {ibrowse_req_id, ReqId} ->
-            stream_response(Req, ProxyDest, ReqId);
-        {error, Reason} ->
-            throw({error, Reason})
-    end.
-    
-
-get_method(#httpd{mochi_req=MochiReq}) ->
-    case MochiReq:get(method) of
-        Method when is_atom(Method) ->
-            list_to_atom(string:to_lower(atom_to_list(Method)));
-        Method when is_list(Method) ->
-            list_to_atom(string:to_lower(Method));
-        Method when is_binary(Method) ->
-            list_to_atom(string:to_lower(?b2l(Method)))
-    end.
-
-
-get_url(Req, ProxyDest) when is_binary(ProxyDest) ->
-    get_url(Req, ?b2l(ProxyDest));
-get_url(#httpd{mochi_req=MochiReq}=Req, ProxyDest) ->
-    BaseUrl = case mochiweb_util:partition(ProxyDest, "/") of
-        {[], "/", _} -> couch_httpd:absolute_uri(Req, ProxyDest);
-        _ -> ProxyDest
-    end,
-    ProxyPrefix = "/" ++ ?b2l(hd(Req#httpd.path_parts)),
-    RequestedPath = MochiReq:get(raw_path),
-    case mochiweb_util:partition(RequestedPath, ProxyPrefix) of
-        {[], ProxyPrefix, []} ->
-            BaseUrl;
-        {[], ProxyPrefix, [$/ | DestPath]} ->
-            remove_trailing_slash(BaseUrl) ++ "/" ++ DestPath;
-        {[], ProxyPrefix, DestPath} ->
-            remove_trailing_slash(BaseUrl) ++ "/" ++ DestPath;
-        _Else ->
-            throw({invalid_url_path, {ProxyPrefix, RequestedPath}})
-    end.
-
-get_version(#httpd{mochi_req=MochiReq}) ->
-    MochiReq:get(version).
-
-
-get_headers(#httpd{mochi_req=MochiReq}) ->
-    to_ibrowse_headers(mochiweb_headers:to_list(MochiReq:get(headers)), []).
-
-to_ibrowse_headers([], Acc) ->
-    lists:reverse(Acc);
-to_ibrowse_headers([{K, V} | Rest], Acc) when is_atom(K) ->
-    to_ibrowse_headers([{atom_to_list(K), V} | Rest], Acc);
-to_ibrowse_headers([{K, V} | Rest], Acc) when is_list(K) ->
-    case string:to_lower(K) of
-        "content-length" ->
-            to_ibrowse_headers(Rest, [{content_length, V} | Acc]);
-        % This appears to make ibrowse too smart.
-        %"transfer-encoding" ->
-        %    to_ibrowse_headers(Rest, [{transfer_encoding, V} | Acc]);
-        _ ->
-            to_ibrowse_headers(Rest, [{K, V} | Acc])
-    end.
-
-get_body(#httpd{method='GET'}) ->
-    fun() -> eof end;
-get_body(#httpd{method='HEAD'}) ->
-    fun() -> eof end;
-get_body(#httpd{method='DELETE'}) ->
-    fun() -> eof end;
-get_body(#httpd{mochi_req=MochiReq}) ->
-    case MochiReq:get(body_length) of
-        undefined ->
-            <<>>;
-        {unknown_transfer_encoding, Unknown} ->
-            exit({unknown_transfer_encoding, Unknown});
-        chunked ->
-            {fun stream_chunked_body/1, {init, MochiReq, 0}};
-        0 ->
-            <<>>;
-        Length when is_integer(Length) andalso Length > 0 ->
-            {fun stream_length_body/1, {init, MochiReq, Length}};
-        Length ->
-            exit({invalid_body_length, Length})
-    end.
-
-
-remove_trailing_slash(Url) ->
-    rem_slash(lists:reverse(Url)).
-
-rem_slash([]) ->
-    [];
-rem_slash([$\s | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash([$\t | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash([$\r | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash([$\n | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash([$/ | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash(RevUrl) ->
-    lists:reverse(RevUrl).
-
-
-stream_chunked_body({init, MReq, 0}) ->
-    % First chunk, do expect-continue dance.
-    init_body_stream(MReq),
-    stream_chunked_body({stream, MReq, 0, [], ?PKT_SIZE});
-stream_chunked_body({stream, MReq, 0, Buf, BRem}) ->
-    % Finished a chunk, get next length. If next length
-    % is 0, its time to try and read trailers.
-    {CRem, Data} = read_chunk_length(MReq),
-    case CRem of
-        0 ->
-            BodyData = lists:reverse(Buf, Data),
-            {ok, BodyData, {trailers, MReq, [], ?PKT_SIZE}}; 
-        _ ->
-            stream_chunked_body(
-                {stream, MReq, CRem, [Data | Buf], BRem-size(Data)}
-            )
-    end;
-stream_chunked_body({stream, MReq, CRem, Buf, BRem}) when BRem =< 0 ->
-    % Time to empty our buffers to the upstream socket.
-    BodyData = lists:reverse(Buf),
-    {ok, BodyData, {stream, MReq, CRem, [], ?PKT_SIZE}};
-stream_chunked_body({stream, MReq, CRem, Buf, BRem}) ->
-    % Buffer some more data from the client.
-    Length = lists:min([CRem, BRem]),
-    Socket = MReq:get(socket),
-    NewState = case mochiweb_socket:recv(Socket, Length, ?TIMEOUT) of
-        {ok, Data} when size(Data) == CRem ->
-            case mochiweb_socket:recv(Socket, 2, ?TIMEOUT) of
-                {ok, <<"\r\n">>} ->
-                    {stream, MReq, 0, [<<"\r\n">>, Data | Buf], BRem-Length-2};
-                _ ->
-                    exit(normal)
-            end;
-        {ok, Data} ->
-            {stream, MReq, CRem-Length, [Data | Buf], BRem-Length};
-        _ ->
-            exit(normal)
-    end,
-    stream_chunked_body(NewState);
-stream_chunked_body({trailers, MReq, Buf, BRem}) when BRem =< 0 ->
-    % Empty our buffers and send data upstream.
-    BodyData = lists:reverse(Buf),
-    {ok, BodyData, {trailers, MReq, [], ?PKT_SIZE}};
-stream_chunked_body({trailers, MReq, Buf, BRem}) ->
-    % Read another trailer into the buffer or stop on an
-    % empty line.
-    Socket = MReq:get(socket),
-    mochiweb_socket:setopts(Socket, [{packet, line}]),
-    case mochiweb_socket:recv(Socket, 0, ?TIMEOUT) of
-        {ok, <<"\r\n">>} ->
-            mochiweb_socket:setopts(Socket, [{packet, raw}]),
-            BodyData = lists:reverse(Buf, <<"\r\n">>),
-            {ok, BodyData, eof};
-        {ok, Footer} ->
-            mochiweb_socket:setopts(Socket, [{packet, raw}]),
-            NewState = {trailers, MReq, [Footer | Buf], BRem-size(Footer)},
-            stream_chunked_body(NewState);
-        _ ->
-            exit(normal)
-    end;
-stream_chunked_body(eof) ->
-    % Tell ibrowse we're done sending data.
-    eof.
-
-
-stream_length_body({init, MochiReq, Length}) ->
-    % Do the expect-continue dance
-    init_body_stream(MochiReq),
-    stream_length_body({stream, MochiReq, Length});
-stream_length_body({stream, _MochiReq, 0}) ->
-    % Finished streaming.
-    eof;
-stream_length_body({stream, MochiReq, Length}) ->
-    BufLen = lists:min([Length, ?PKT_SIZE]),
-    case MochiReq:recv(BufLen) of
-        <<>> -> eof;
-        Bin -> {ok, Bin, {stream, MochiReq, Length-BufLen}}
-    end.
-
-
-init_body_stream(MochiReq) ->
-    Expect = case MochiReq:get_header_value("expect") of
-        undefined ->
-            undefined;
-        Value when is_list(Value) ->
-            string:to_lower(Value)
-    end,
-    case Expect of
-        "100-continue" ->
-            MochiReq:start_raw_response({100, gb_trees:empty()});
-        _Else ->
-            ok
-    end.
-
-
-read_chunk_length(MochiReq) ->
-    Socket = MochiReq:get(socket),
-    mochiweb_socket:setopts(Socket, [{packet, line}]),
-    case mochiweb_socket:recv(Socket, 0, ?TIMEOUT) of
-        {ok, Header} ->
-            mochiweb_socket:setopts(Socket, [{packet, raw}]),
-            Splitter = fun(C) ->
-                C =/= $\r andalso C =/= $\n andalso C =/= $\s
-            end,
-            {Hex, _Rest} = lists:splitwith(Splitter, ?b2l(Header)),
-            {mochihex:to_int(Hex), Header};
-        _ ->
-            exit(normal)
-    end.
-
-
-stream_response(Req, ProxyDest, ReqId) ->
-    receive
-        {ibrowse_async_headers, ReqId, "100", _} ->
-            % ibrowse doesn't handle 100 Continue responses which
-            % means we have to discard them so the proxy client
-            % doesn't get confused.
-            ibrowse:stream_next(ReqId),
-            stream_response(Req, ProxyDest, ReqId);
-        {ibrowse_async_headers, ReqId, Status, Headers} ->
-            {Source, Dest} = get_urls(Req, ProxyDest),
-            FixedHeaders = fix_headers(Source, Dest, Headers, []),
-            case body_length(FixedHeaders) of
-                chunked ->
-                    {ok, Resp} = couch_httpd:start_chunked_response(
-                        Req, list_to_integer(Status), FixedHeaders
-                    ),
-                    ibrowse:stream_next(ReqId),
-                    stream_chunked_response(Req, ReqId, Resp),
-                    {ok, Resp};
-                Length when is_integer(Length) ->
-                    {ok, Resp} = couch_httpd:start_response_length(
-                        Req, list_to_integer(Status), FixedHeaders, Length
-                    ),
-                    ibrowse:stream_next(ReqId),
-                    stream_length_response(Req, ReqId, Resp),
-                    {ok, Resp};
-                _ ->
-                    {ok, Resp} = couch_httpd:start_response(
-                        Req, list_to_integer(Status), FixedHeaders
-                    ),
-                    ibrowse:stream_next(ReqId),
-                    stream_length_response(Req, ReqId, Resp),
-                    % XXX: MochiWeb apparently doesn't look at the
-                    % response to see if it must force close the
-                    % connection. So we help it out here.
-                    erlang:put(mochiweb_request_force_close, true),
-                    {ok, Resp}
-            end
-    end.
-
-
-stream_chunked_response(Req, ReqId, Resp) ->
-    receive
-        {ibrowse_async_response, ReqId, {error, Reason}} ->
-            throw({error, Reason});
-        {ibrowse_async_response, ReqId, Chunk} ->
-            couch_httpd:send_chunk(Resp, Chunk),
-            ibrowse:stream_next(ReqId),
-            stream_chunked_response(Req, ReqId, Resp);
-        {ibrowse_async_response_end, ReqId} ->
-            couch_httpd:last_chunk(Resp)
-    end.
-
-
-stream_length_response(Req, ReqId, Resp) ->
-    receive
-        {ibrowse_async_response, ReqId, {error, Reason}} ->
-            throw({error, Reason});
-        {ibrowse_async_response, ReqId, Chunk} ->
-            couch_httpd:send(Resp, Chunk),
-            ibrowse:stream_next(ReqId),
-            stream_length_response(Req, ReqId, Resp);
-        {ibrowse_async_response_end, ReqId} ->
-            ok
-    end.
-
-
-get_urls(Req, ProxyDest) ->
-    SourceUrl = couch_httpd:absolute_uri(Req, "/" ++ hd(Req#httpd.path_parts)),
-    Source = parse_url(?b2l(iolist_to_binary(SourceUrl))),
-    case (catch parse_url(ProxyDest)) of
-        Dest when is_record(Dest, url) ->
-            {Source, Dest};
-        _ ->
-            DestUrl = couch_httpd:absolute_uri(Req, ProxyDest),
-            {Source, parse_url(DestUrl)}
-    end.
-
-
-fix_headers(_, _, [], Acc) ->
-    lists:reverse(Acc);
-fix_headers(Source, Dest, [{K, V} | Rest], Acc) ->
-    Fixed = case string:to_lower(K) of
-        "location" -> rewrite_location(Source, Dest, V);
-        "content-location" -> rewrite_location(Source, Dest, V);
-        "uri" -> rewrite_location(Source, Dest, V);
-        "destination" -> rewrite_location(Source, Dest, V);
-        "set-cookie" -> rewrite_cookie(Source, Dest, V);
-        _ -> V
-    end,
-    fix_headers(Source, Dest, Rest, [{K, Fixed} | Acc]).
-
-
-rewrite_location(Source, #url{host=Host, port=Port, protocol=Proto}, Url) ->
-    case (catch parse_url(Url)) of
-        #url{host=Host, port=Port, protocol=Proto} = Location ->
-            DestLoc = #url{
-                protocol=Source#url.protocol,
-                host=Source#url.host,
-                port=Source#url.port,
-                path=join_url_path(Source#url.path, Location#url.path)
-            },
-            url_to_url(DestLoc);
-        #url{} ->
-            Url;
-        _ ->
-            url_to_url(Source#url{path=join_url_path(Source#url.path, Url)})
-    end.
-
-
-rewrite_cookie(_Source, _Dest, Cookie) ->
-    Cookie.
-
-
-parse_url(Url) when is_binary(Url) ->
-    ibrowse_lib:parse_url(?b2l(Url));
-parse_url(Url) when is_list(Url) ->
-    ibrowse_lib:parse_url(?b2l(iolist_to_binary(Url))).
-
-
-join_url_path(Src, Dst) ->
-    Src2 = case lists:reverse(Src) of
-        "/" ++ RestSrc -> lists:reverse(RestSrc);
-        _ -> Src
-    end,
-    Dst2 = case Dst of
-        "/" ++ RestDst -> RestDst;
-        _ -> Dst
-    end,
-    Src2 ++ "/" ++ Dst2.
-
-
-url_to_url(#url{host=Host, port=Port, path=Path, protocol=Proto} = Url) ->
-    LPort = case {Proto, Port} of
-        {http, 80} -> "";
-        {https, 443} -> "";
-        _ -> ":" ++ integer_to_list(Port)
-    end,
-    LPath = case Path of
-        "/" ++ _RestPath -> Path;
-        _ -> "/" ++ Path
-    end,
-    HostPart = case Url#url.host_type of
-        ipv6_address ->
-            "[" ++ Host ++ "]";
-        _ ->
-            Host
-    end,
-    atom_to_list(Proto) ++ "://" ++ HostPart ++ LPort ++ LPath.
-
-
-body_length(Headers) ->
-    case is_chunked(Headers) of
-        true -> chunked;
-        _ -> content_length(Headers)
-    end.
-
-
-is_chunked([]) ->
-    false;
-is_chunked([{K, V} | Rest]) ->
-    case string:to_lower(K) of
-        "transfer-encoding" ->
-            string:to_lower(V) == "chunked";
-        _ ->
-            is_chunked(Rest)
-    end.
-
-content_length([]) ->
-    undefined;
-content_length([{K, V} | Rest]) ->
-    case string:to_lower(K) of
-        "content-length" ->
-            list_to_integer(V);
-        _ ->
-            content_length(Rest)
-    end.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_httpd_rewrite.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_httpd_rewrite.erl b/src/couch/src/couch_httpd_rewrite.erl
deleted file mode 100644
index abd6af5..0000000
--- a/src/couch/src/couch_httpd_rewrite.erl
+++ /dev/null
@@ -1,483 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% bind_path is based on bind method from Webmachine
-
-
-%% @doc Module for URL rewriting by pattern matching.
-
--module(couch_httpd_rewrite).
--export([handle_rewrite_req/3]).
--include_lib("couch/include/couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, <<"*">>}).
-
-
-%% doc The http rewrite handler. All rewriting is done from
-%% /dbname/_design/ddocname/_rewrite by default.
-%%
-%% each rules should be in rewrites member of the design doc.
-%% Ex of a complete rule :
-%%
-%%  {
-%%      ....
-%%      "rewrites": [
-%%      {
-%%          "from": "",
-%%          "to": "index.html",
-%%          "method": "GET",
-%%          "query": {}
-%%      }
-%%      ]
-%%  }
-%%
-%%  from: is the path rule used to bind current uri to the rule. It
-%% use pattern matching for that.
-%%
-%%  to: rule to rewrite an url. It can contain variables depending on binding
-%% variables discovered during pattern matching and query args (url args and from
-%% the query member.)
-%%
-%%  method: method to bind the request method to the rule. by default "*"
-%%  query: query args you want to define they can contain dynamic variable
-%% by binding the key to the bindings
-%%
-%%
-%% to and from are path with  patterns. pattern can be string starting with ":" or
-%% "*". ex:
-%% /somepath/:var/*
-%%
-%% This path is converted in erlang list by splitting "/". Each var are
-%% converted in atom. "*" is converted to '*' atom. The pattern matching is done
-%% by splitting "/" in request url in a list of token. A string pattern will
-%% match equal token. The star atom ('*' in single quotes) will match any number
-%% of tokens, but may only be present as the last pathtern in a pathspec. If all
-%% tokens are matched and all pathterms are used, then the pathspec matches. It works
-%% like webmachine. Each identified token will be reused in to rule and in query
-%%
-%% The pattern matching is done by first matching the request method to a rule. by
-%% default all methods match a rule. (method is equal to "*" by default). Then
-%% It will try to match the path to one rule. If no rule match, then a 404 error
-%% is displayed.
-%%
-%% Once a rule is found we rewrite the request url using the "to" and
-%% "query" members. The identified token are matched to the rule and
-%% will replace var. if '*' is found in the rule it will contain the remaining
-%% part if it exists.
-%%
-%% Examples:
-%%
-%% Dispatch rule            URL             TO                  Tokens
-%%
-%% {"from": "/a/b",         /a/b?k=v        /some/b?k=v         var =:= b
-%% "to": "/some/"}                                              k = v
-%%
-%% {"from": "/a/b",         /a/b            /some/b?var=b       var =:= b
-%% "to": "/some/:var"}
-%%
-%% {"from": "/a",           /a              /some
-%% "to": "/some/*"}
-%%
-%% {"from": "/a/*",         /a/b/c          /some/b/c
-%% "to": "/some/*"}
-%%
-%% {"from": "/a",           /a              /some
-%% "to": "/some/*"}
-%%
-%% {"from": "/a/:foo/*",    /a/b/c          /some/b/c?foo=b     foo =:= b
-%% "to": "/some/:foo/*"}
-%%
-%% {"from": "/a/:foo",     /a/b             /some/?k=b&foo=b    foo =:= b
-%% "to": "/some",
-%%  "query": {
-%%      "k": ":foo"
-%%  }}
-%%
-%% {"from": "/a",           /a?foo=b        /some/b             foo =:= b
-%% "to": "/some/:foo",
-%%  }}
-
-
-
-handle_rewrite_req(#httpd{
-        path_parts=[DbName, <<"_design">>, DesignName, _Rewrite|PathParts],
-        method=Method,
-        mochi_req=MochiReq}=Req, _Db, DDoc) ->
-
-    % we are in a design handler
-    DesignId = <<"_design/", DesignName/binary>>,
-    Prefix = <<"/", (?l2b(couch_util:url_encode(DbName)))/binary, "/", DesignId/binary>>,
-    QueryList = lists:map(fun decode_query_value/1, couch_httpd:qs(Req)),
-
-    MaxRewritesList = config:get("httpd", "rewrite_limit", "100"),
-    MaxRewrites = list_to_integer(MaxRewritesList),
-    case get(couch_rewrite_count) of
-        undefined ->
-            put(couch_rewrite_count, 1);
-        NumRewrites when NumRewrites < MaxRewrites ->
-            put(couch_rewrite_count, NumRewrites + 1);
-        _ ->
-            throw({bad_request, <<"Exceeded rewrite recursion limit">>})
-    end,
-
-    #doc{body={Props}} = DDoc,
-
-    % get rules from ddoc
-    case couch_util:get_value(<<"rewrites">>, Props) of
-        undefined ->
-            couch_httpd:send_error(Req, 404, <<"rewrite_error">>,
-                <<"Invalid path.">>);
-        Bin when is_binary(Bin) ->
-            couch_httpd:send_error(Req, 400, <<"rewrite_error">>,
-                <<"Rewrite rules are a String. They must be a JSON Array.">>);
-        Rules ->
-            % create dispatch list from rules
-            DispatchList =  [make_rule(Rule) || {Rule} <- Rules],
-            Method1 = couch_util:to_binary(Method),
-
-            %% get raw path by matching url to a rule.
-            RawPath = case try_bind_path(DispatchList, Method1, 
-                    PathParts, QueryList) of
-                no_dispatch_path ->
-                    throw(not_found);
-                {NewPathParts, Bindings} ->
-                    Parts = [quote_plus(X) || X <- NewPathParts],
-
-                    % build new path, reencode query args, eventually convert
-                    % them to json
-                    Bindings1 = maybe_encode_bindings(Bindings),
-                    Path = binary_to_list(
-                        iolist_to_binary([
-                                string:join(Parts, [?SEPARATOR]),
-                                [["?", mochiweb_util:urlencode(Bindings1)] 
-                                    || Bindings1 =/= [] ]
-                            ])),
-                    
-                    % if path is relative detect it and rewrite path
-                    case mochiweb_util:safe_relative_path(Path) of
-                        undefined ->
-                            ?b2l(Prefix) ++ "/" ++ Path;
-                        P1 ->
-                            ?b2l(Prefix) ++ "/" ++ P1
-                    end
-
-                end,
-
-            % normalize final path (fix levels "." and "..")
-            RawPath1 = ?b2l(iolist_to_binary(normalize_path(RawPath))),
-
-            % In order to do OAuth correctly, we have to save the
-            % requested path. We use default so chained rewriting
-            % wont replace the original header.
-            Headers = mochiweb_headers:default("x-couchdb-requested-path",
-                                             MochiReq:get(raw_path),
-                                             MochiReq:get(headers)),
-
-            ?LOG_DEBUG("rewrite to ~p ~n", [RawPath1]),
-
-            % build a new mochiweb request
-            MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
-                                             MochiReq:get(method),
-                                             RawPath1,
-                                             MochiReq:get(version),
-                                             Headers),
-
-            % cleanup, It force mochiweb to reparse raw uri.
-            MochiReq1:cleanup(),
-
-            #httpd{
-                db_url_handlers = DbUrlHandlers,
-                design_url_handlers = DesignUrlHandlers,
-                default_fun = DefaultFun,
-                url_handlers = UrlHandlers,
-                user_ctx = UserCtx
-            } = Req,
-            erlang:put(pre_rewrite_user_ctx, UserCtx),
-            couch_httpd:handle_request_int(MochiReq1, DefaultFun,
-                    UrlHandlers, DbUrlHandlers, DesignUrlHandlers)
-        end.
-
-quote_plus({bind, X}) ->
-    mochiweb_util:quote_plus(X);
-quote_plus(X) ->
-    mochiweb_util:quote_plus(X).
-
-%% @doc Try to find a rule matching current url. If none is found
-%% 404 error not_found is raised
-try_bind_path([], _Method, _PathParts, _QueryList) ->
-    no_dispatch_path;
-try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
-    [{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch,
-    case bind_method(Method1, Method) of
-        true ->
-            case bind_path(PathParts1, PathParts, []) of
-                {ok, Remaining, Bindings} ->
-                    Bindings1 = Bindings ++ QueryList,
-                    % we parse query args from the rule and fill
-                    % it eventually with bindings vars
-                    QueryArgs1 = make_query_list(QueryArgs, Bindings1,
-                        Formats, []),
-                    % remove params in QueryLists1 that are already in
-                    % QueryArgs1
-                    Bindings2 = lists:foldl(fun({K, V}, Acc) ->
-                        K1 = to_binding(K),
-                        KV = case couch_util:get_value(K1, QueryArgs1) of
-                            undefined -> [{K1, V}];
-                            _V1 -> []
-                        end,
-                        Acc ++ KV
-                    end, [], Bindings1),
-
-                    FinalBindings = Bindings2 ++ QueryArgs1,
-                    NewPathParts = make_new_path(RedirectPath, FinalBindings,
-                                    Remaining, []),
-                    {NewPathParts, FinalBindings};
-                fail ->
-                    try_bind_path(Rest, Method, PathParts, QueryList)
-            end;
-        false ->
-            try_bind_path(Rest, Method, PathParts, QueryList)
-    end.
-
-%% rewriting dynamically the quey list given as query member in
-%% rewrites. Each value is replaced by one binding or an argument
-%% passed in url.
-make_query_list([], _Bindings, _Formats, Acc) ->
-    Acc;
-make_query_list([{Key, {Value}}|Rest], Bindings, Formats, Acc) ->
-    Value1 = {Value},
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_binary(Value) ->
-    Value1 = replace_var(Value, Bindings, Formats),
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_list(Value) ->
-    Value1 = replace_var(Value, Bindings, Formats),
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) ->
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value}|Acc]).
-
-replace_var(<<"*">>=Value, Bindings, Formats) ->
-    get_var(Value, Bindings, Value, Formats);
-replace_var(<<":", Var/binary>> = Value, Bindings, Formats) ->
-    get_var(Var, Bindings, Value, Formats);
-replace_var(Value, _Bindings, _Formats) when is_binary(Value) ->
-    Value;
-replace_var(Value, Bindings, Formats) when is_list(Value) ->
-    lists:reverse(lists:foldl(fun
-                (<<":", Var/binary>>=Value1, Acc) ->
-                    [get_var(Var, Bindings, Value1, Formats)|Acc];
-                (Value1, Acc) ->
-                    [Value1|Acc]
-            end, [], Value));
-replace_var(Value, _Bindings, _Formats) ->
-    Value.
-                    
-maybe_json(Key, Value) ->
-    case lists:member(Key, [<<"key">>, <<"startkey">>, <<"start_key">>,
-                <<"endkey">>, <<"end_key">>, <<"keys">>]) of
-        true ->
-            ?JSON_ENCODE(Value);
-        false ->
-            Value
-    end.
-
-get_var(VarName, Props, Default, Formats) ->
-    VarName1 = to_binding(VarName),
-    Val = couch_util:get_value(VarName1, Props, Default),
-    maybe_format(VarName, Val, Formats).
-
-maybe_format(VarName, Value, Formats) ->
-    case couch_util:get_value(VarName, Formats) of
-        undefined ->
-             Value;
-        Format ->
-            format(Format, Value)
-    end.
-
-format(<<"int">>, Value) when is_integer(Value) ->
-    Value;
-format(<<"int">>, Value) when is_binary(Value) ->
-    format(<<"int">>, ?b2l(Value));
-format(<<"int">>, Value) when is_list(Value) ->
-    case (catch list_to_integer(Value)) of
-        IntVal when is_integer(IntVal) ->
-            IntVal;
-        _ ->
-            Value
-    end;
-format(<<"bool">>, Value) when is_binary(Value) ->
-    format(<<"bool">>, ?b2l(Value));
-format(<<"bool">>, Value) when is_list(Value) ->
-    case string:to_lower(Value) of
-        "true" -> true;
-        "false" -> false;
-        _ -> Value
-    end;
-format(_Format, Value) ->
-   Value. 
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-make_new_path([], _Bindings, _Remaining, Acc) ->
-    lists:reverse(Acc);
-make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) ->
-    P2 = case couch_util:get_value({bind, P}, Bindings) of
-        undefined -> << "undefined">>;
-        P1 -> 
-            iolist_to_binary(P1)
-    end,
-    make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
-make_new_path([P|Rest], Bindings, Remaining, Acc) ->
-    make_new_path(Rest, Bindings, Remaining, [P|Acc]).
-
-
-%% @doc If method of the query fith the rule method. If the
-%% method rule is '*', which is the default, all
-%% request method will bind. It allows us to make rules
-%% depending on HTTP method.
-bind_method(?MATCH_ALL, _Method ) ->
-    true;
-bind_method({bind, Method}, Method) ->
-    true;
-bind_method(_, _) ->
-    false.
-
-
-%% @doc bind path. Using the rule from we try to bind variables given
-%% to the current url by pattern matching
-bind_path([], [], Bindings) ->
-    {ok, [], Bindings};
-bind_path([?MATCH_ALL], [Match|_RestMatch]=Rest, Bindings) ->
-    {ok, Rest, [{?MATCH_ALL, Match}|Bindings]};
-bind_path(_, [], _) ->
-    fail;
-bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) ->
-    bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]);
-bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
-    bind_path(RestToken, RestMatch, Bindings);
-bind_path(_, _, _) ->
-    fail.
-
-
-%% normalize path.
-normalize_path(Path)  ->
-    "/" ++ string:join(normalize_path1(string:tokens(Path,
-                "/"), []), [?SEPARATOR]).
-
-
-normalize_path1([], Acc) ->
-    lists:reverse(Acc);
-normalize_path1([".."|Rest], Acc) ->
-    Acc1 = case Acc of
-        [] -> [".."|Acc];
-        [T|_] when T =:= ".." -> [".."|Acc];
-        [_|R] -> R
-    end,
-    normalize_path1(Rest, Acc1);
-normalize_path1(["."|Rest], Acc) ->
-    normalize_path1(Rest, Acc);
-normalize_path1([Path|Rest], Acc) ->
-    normalize_path1(Rest, [Path|Acc]).
-
-
-%% @doc transform json rule in erlang for pattern matching
-make_rule(Rule) ->
-    Method = case couch_util:get_value(<<"method">>, Rule) of
-        undefined -> ?MATCH_ALL;
-        M -> to_binding(M)
-    end,
-    QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
-        undefined -> [];
-        {Args} -> Args
-        end,
-    FromParts  = case couch_util:get_value(<<"from">>, Rule) of
-        undefined -> [?MATCH_ALL];
-        From ->
-            parse_path(From)
-        end,
-    ToParts  = case couch_util:get_value(<<"to">>, Rule) of
-        undefined ->
-            throw({error, invalid_rewrite_target});
-        To ->
-            parse_path(To)
-        end,
-    Formats = case couch_util:get_value(<<"formats">>, Rule) of
-        undefined -> [];
-        {Fmts} -> Fmts
-    end,
-    [{FromParts, Method}, ToParts, QueryArgs, Formats].
-
-parse_path(Path) ->
-    {ok, SlashRE} = re:compile(<<"\\/">>),
-    path_to_list(re:split(Path, SlashRE), [], 0).
-
-%% @doc convert a path rule (from or to) to an erlang list
-%% * and path variable starting by ":" are converted
-%% in erlang atom.
-path_to_list([], Acc, _DotDotCount) ->
-    lists:reverse(Acc);
-path_to_list([<<>>|R], Acc, DotDotCount) ->
-    path_to_list(R, Acc, DotDotCount);
-path_to_list([<<"*">>|R], Acc, DotDotCount) ->
-    path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
-path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
-    case config:get("httpd", "secure_rewrites", "true") of
-    "false" ->
-        path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-    _Else ->
-        ?LOG_INFO("insecure_rewrite_rule ~p blocked", [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
-        throw({insecure_rewrite_rule, "too many ../.. segments"})
-    end;
-path_to_list([<<"..">>|R], Acc, DotDotCount) ->
-    path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-path_to_list([P|R], Acc, DotDotCount) ->
-    P1 = case P of
-        <<":", Var/binary>> ->
-            to_binding(Var);
-        _ -> P
-    end,
-    path_to_list(R, [P1|Acc], DotDotCount).
-
-maybe_encode_bindings([]) ->
-    [];
-maybe_encode_bindings(Props) -> 
-    lists:foldl(fun 
-            ({{bind, <<"*">>}, _V}, Acc) ->
-                Acc;
-            ({{bind, K}, V}, Acc) ->
-                V1 = iolist_to_binary(maybe_json(K, V)),
-                [{K, V1}|Acc]
-        end, [], Props).
-                
-decode_query_value({K,V}) ->
-    case lists:member(K, ["key", "startkey", "start_key",
-                "endkey", "end_key", "keys"]) of
-        true ->
-            {to_binding(K), ?JSON_DECODE(V)};
-        false ->
-            {to_binding(K), ?l2b(V)}
-    end.
-
-to_binding({bind, V}) ->
-    {bind, V};
-to_binding(V) when is_list(V) ->
-    to_binding(?l2b(V));
-to_binding(V) ->
-    {bind, V}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_httpd_stats_handlers.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_httpd_stats_handlers.erl b/src/couch/src/couch_httpd_stats_handlers.erl
deleted file mode 100644
index b858830..0000000
--- a/src/couch/src/couch_httpd_stats_handlers.erl
+++ /dev/null
@@ -1,56 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_stats_handlers).
--include_lib("couch/include/couch_db.hrl").
-
--export([handle_stats_req/1]).
--import(couch_httpd, [
-    send_json/2, send_json/3, send_json/4, send_method_not_allowed/2,
-    start_json_response/2, send_chunk/2, end_json_response/1,
-    start_chunked_response/3, send_error/4
-]).
-
-handle_stats_req(#httpd{method='GET', path_parts=[_]}=Req) ->
-    flush(Req),
-    send_json(Req, couch_stats_aggregator:all(range(Req)));
-
-handle_stats_req(#httpd{method='GET', path_parts=[_, _Mod]}) ->
-    throw({bad_request, <<"Stat names must have exactly to parts.">>});
-
-handle_stats_req(#httpd{method='GET', path_parts=[_, Mod, Key]}=Req) ->
-    flush(Req),
-    Stats = couch_stats_aggregator:get_json({list_to_atom(binary_to_list(Mod)),
-        list_to_atom(binary_to_list(Key))}, range(Req)),
-    send_json(Req, {[{Mod, {[{Key, Stats}]}}]});
-
-handle_stats_req(#httpd{method='GET', path_parts=[_, _Mod, _Key | _Extra]}) ->
-    throw({bad_request, <<"Stat names must have exactly two parts.">>});
-
-handle_stats_req(Req) ->
-    send_method_not_allowed(Req, "GET").
-
-range(Req) ->
-    case couch_util:get_value("range", couch_httpd:qs(Req)) of
-        undefined ->
-            0;
-        Value ->
-            list_to_integer(Value)
-    end.
-
-flush(Req) ->
-    case couch_util:get_value("flush", couch_httpd:qs(Req)) of
-        "true" ->
-            couch_stats_aggregator:collect_sample();
-        _Else ->
-            ok
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_httpd_vhost.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_httpd_vhost.erl b/src/couch/src/couch_httpd_vhost.erl
deleted file mode 100644
index 29fddfe..0000000
--- a/src/couch/src/couch_httpd_vhost.erl
+++ /dev/null
@@ -1,397 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_vhost).
--behaviour(gen_server).
--behaviour(config_listener).
-
--export([start_link/0, reload/0, get_state/0, dispatch_host/1]).
--export([urlsplit_netloc/2, redirect_to_vhost/2]).
--export([host/1, split_host_port/1]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
-
-% config_listener api
--export([handle_config_change/5]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, '*'}).
-
--record(vhosts_state, {
-        vhosts,
-        vhost_globals,
-        vhosts_fun}).
-
-%% doc the vhost manager.
-%% This gen_server keep state of vhosts added to the ini and try to
-%% match the Host header (or forwarded) against rules built against
-%% vhost list.
-%%
-%% Declaration of vhosts take place in the configuration file :
-%%
-%% [vhosts]
-%% example.com = /example
-%% *.example.com = /example
-%%
-%% The first line will rewrite the rquest to display the content of the
-%% example database. This rule works only if the Host header is
-%% 'example.com' and won't work for CNAMEs. Second rule on the other hand
-%% match all CNAMES to example db. So www.example.com or db.example.com
-%% will work.
-%%
-%% The wildcard ('*') should always be the last in the cnames:
-%%
-%%      "*.db.example.com = /"  will match all cname on top of db
-%% examples to the root of the machine.
-%%
-%%
-%% Rewriting Hosts to path
-%% -----------------------
-%%
-%% Like in the _rewrite handler you could match some variable and use
-%them to create the target path. Some examples:
-%%
-%%    [vhosts]
-%%    *.example.com = /*
-%%    :dbname.example.com = /:dbname
-%%    :ddocname.:dbname.example.com = /:dbname/_design/:ddocname/_rewrite
-%%
-%% First rule pass wildcard as dbname, second do the same but use a
-%% variable name and the third one allows you to use any app with
-%% @ddocname in any db with @dbname .
-%%
-%% You could also change the default function to handle request by
-%% changing the setting `redirect_vhost_handler` in `httpd` section of
-%% the Ini:
-%%
-%%    [httpd]
-%%    redirect_vhost_handler = {Module, Fun}
-%%
-%% The function take 2 args : the mochiweb request object and the target
-%%% path.
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-%% @doc reload vhosts rules
-reload() ->
-    gen_server:call(?MODULE, reload).
-
-get_state() ->
-    gen_server:call(?MODULE, get_state).
-
-%% @doc Try to find a rule matching current Host heade. some rule is
-%% found it rewrite the Mochiweb Request else it return current Request.
-dispatch_host(MochiReq) ->
-    #vhosts_state{
-        vhost_globals = VHostGlobals,
-        vhosts = VHosts,
-        vhosts_fun=Fun} = get_state(),
-
-    {"/" ++ VPath, Query, Fragment} = mochiweb_util:urlsplit_path(MochiReq:get(raw_path)),
-    VPathParts =  string:tokens(VPath, "/"),
-
-    VHost = host(MochiReq),
-    {VHostParts, VhostPort} = split_host_port(VHost),
-    FinalMochiReq = case try_bind_vhost(VHosts, lists:reverse(VHostParts),
-            VhostPort, VPathParts) of
-        no_vhost_matched -> MochiReq;
-        {VhostTarget, NewPath} ->
-            case vhost_global(VHostGlobals, MochiReq) of
-                true ->
-                    MochiReq;
-                _Else ->
-                    NewPath1 = mochiweb_util:urlunsplit_path({NewPath, Query,
-                                          Fragment}),
-                    MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
-                                      MochiReq:get(method),
-                                      NewPath1,
-                                      MochiReq:get(version),
-                                      MochiReq:get(headers)),
-                    Fun(MochiReq1, VhostTarget)
-            end
-    end,
-    FinalMochiReq.
-
-append_path("/"=_Target, "/"=_Path) ->
-    "/";
-append_path(Target, Path) ->
-    Target ++ Path.
-
-% default redirect vhost handler
-redirect_to_vhost(MochiReq, VhostTarget) ->
-    Path = MochiReq:get(raw_path),
-    Target = append_path(VhostTarget, Path),
-
-    ?LOG_DEBUG("Vhost Target: '~p'~n", [Target]),
-
-    Headers = mochiweb_headers:enter("x-couchdb-vhost-path", Path,
-        MochiReq:get(headers)),
-
-    % build a new mochiweb request
-    MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
-                                      MochiReq:get(method),
-                                      Target,
-                                      MochiReq:get(version),
-                                      Headers),
-    % cleanup, It force mochiweb to reparse raw uri.
-    MochiReq1:cleanup(),
-    MochiReq1.
-
-%% if so, then it will not be rewritten, but will run as a normal couchdb request.
-%* normally you'd use this for _uuids _utils and a few of the others you want to
-%% keep available on vhosts. You can also use it to make databases 'global'.
-vhost_global( VhostGlobals, MochiReq) ->
-    RawUri = MochiReq:get(raw_path),
-    {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
-
-    Front = case couch_httpd:partition(Path) of
-    {"", "", ""} ->
-        "/"; % Special case the root url handler
-    {FirstPart, _, _} ->
-        FirstPart
-    end,
-    [true] == [true||V <- VhostGlobals, V == Front].
-
-%% bind host
-%% first it try to bind the port then the hostname.
-try_bind_vhost([], _HostParts, _Port, _PathParts) ->
-    no_vhost_matched;
-try_bind_vhost([VhostSpec|Rest], HostParts, Port, PathParts) ->
-    {{VHostParts, VPort, VPath}, Path} = VhostSpec,
-    case bind_port(VPort, Port) of
-        ok ->
-            case bind_vhost(lists:reverse(VHostParts), HostParts, []) of
-                {ok, Bindings, Remainings} ->
-                    case bind_path(VPath, PathParts) of
-                        {ok, PathParts1} ->
-                            Path1 = make_target(Path, Bindings, Remainings, []),
-                            {make_path(Path1), make_path(PathParts1)};
-                        fail ->
-                            try_bind_vhost(Rest, HostParts, Port,
-                                PathParts)
-                    end;
-                fail -> try_bind_vhost(Rest, HostParts, Port, PathParts)
-            end;
-        fail ->  try_bind_vhost(Rest, HostParts, Port, PathParts)
-    end.
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-%% TODO: merge code with rewrite. But we need to make sure we are
-%% in string here.
-make_target([], _Bindings, _Remaining, Acc) ->
-    lists:reverse(Acc);
-make_target([?MATCH_ALL], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_target([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_target([{bind, P}|Rest], Bindings, Remaining, Acc) ->
-    P2 = case couch_util:get_value({bind, P}, Bindings) of
-        undefined ->  "undefined";
-        P1 -> P1
-    end,
-    make_target(Rest, Bindings, Remaining, [P2|Acc]);
-make_target([P|Rest], Bindings, Remaining, Acc) ->
-    make_target(Rest, Bindings, Remaining, [P|Acc]).
-
-%% bind port
-bind_port(Port, Port) -> ok;
-bind_port('*', _) -> ok;
-bind_port(_,_) -> fail.
-
-%% bind bhost
-bind_vhost([],[], Bindings) -> {ok, Bindings, []};
-bind_vhost([?MATCH_ALL], [], _Bindings) -> fail;
-bind_vhost([?MATCH_ALL], Rest, Bindings) -> {ok, Bindings, Rest};
-bind_vhost([], _HostParts, _Bindings) -> fail;
-bind_vhost([{bind, Token}|Rest], [Match|RestHost], Bindings) ->
-    bind_vhost(Rest, RestHost, [{{bind, Token}, Match}|Bindings]);
-bind_vhost([Cname|Rest], [Cname|RestHost], Bindings) ->
-    bind_vhost(Rest, RestHost, Bindings);
-bind_vhost(_, _, _) -> fail.
-
-%% bind path
-bind_path([], PathParts) ->
-    {ok, PathParts};
-bind_path(_VPathParts, []) ->
-    fail;
-bind_path([Path|VRest],[Path|Rest]) ->
-   bind_path(VRest, Rest);
-bind_path(_, _) ->
-    fail.
-
-% utilities
-
-
-%% create vhost list from ini
-
-host(MochiReq) ->
-    XHost = config:get("httpd", "x_forwarded_host",
-                             "X-Forwarded-Host"),
-    case MochiReq:get_header_value(XHost) of
-        undefined ->
-            case MochiReq:get_header_value("Host") of
-                undefined -> [];
-                Value1 -> Value1
-            end;
-        Value -> Value
-    end.
-
-make_vhosts() ->
-    Vhosts = lists:foldl(fun
-                ({_, ""}, Acc) ->
-                    Acc;
-                ({Vhost, Path}, Acc) ->
-                    [{parse_vhost(Vhost), split_path(Path)}|Acc]
-            end, [], config:get("vhosts")),
-
-    lists:reverse(lists:usort(Vhosts)).
-
-
-parse_vhost(Vhost) ->
-    case urlsplit_netloc(Vhost, []) of
-        {[], Path} ->
-            {make_spec("*", []), '*', Path};
-        {HostPort, []} ->
-            {H, P} = split_host_port(HostPort),
-            H1 = make_spec(H, []),
-            {H1, P, []};
-        {HostPort, Path} ->
-            {H, P} = split_host_port(HostPort),
-            H1 = make_spec(H, []),
-            {H1, P, string:tokens(Path, "/")}
-    end.
-
-
-split_host_port(HostAsString) ->
-    case string:rchr(HostAsString, $:) of
-        0 ->
-            {split_host(HostAsString), '*'};
-        N ->
-            HostPart = string:substr(HostAsString, 1, N-1),
-            case (catch erlang:list_to_integer(string:substr(HostAsString,
-                            N+1, length(HostAsString)))) of
-                {'EXIT', _} ->
-                    {split_host(HostAsString), '*'};
-                Port ->
-                    {split_host(HostPart), Port}
-            end
-    end.
-
-split_host(HostAsString) ->
-    string:tokens(HostAsString, "\.").
-
-split_path(Path) ->
-    make_spec(string:tokens(Path, "/"), []).
-
-
-make_spec([], Acc) ->
-    lists:reverse(Acc);
-make_spec([""|R], Acc) ->
-    make_spec(R, Acc);
-make_spec(["*"|R], Acc) ->
-    make_spec(R, [?MATCH_ALL|Acc]);
-make_spec([P|R], Acc) ->
-    P1 = parse_var(P),
-    make_spec(R, [P1|Acc]).
-
-
-parse_var(P) ->
-    case P of
-        ":" ++ Var ->
-            {bind, Var};
-        _ -> P
-    end.
-
-
-% mochiweb doesn't export it.
-urlsplit_netloc("", Acc) ->
-    {lists:reverse(Acc), ""};
-urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
-    {lists:reverse(Acc), Rest};
-urlsplit_netloc([C | Rest], Acc) ->
-    urlsplit_netloc(Rest, [C | Acc]).
-
-make_path(Parts) ->
-     "/" ++ string:join(Parts,[?SEPARATOR]).
-
-init(_) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-
-    %% load configuration
-    {VHostGlobals, VHosts, Fun} = load_conf(),
-    State = #vhosts_state{
-        vhost_globals=VHostGlobals,
-        vhosts=VHosts,
-        vhosts_fun=Fun},
-    {ok, State}.
-
-handle_call(reload, _From, _State) ->
-    {VHostGlobals, VHosts, Fun} = load_conf(),
-    {reply, ok, #vhosts_state{
-            vhost_globals=VHostGlobals,
-            vhosts=VHosts,
-            vhosts_fun=Fun}};
-handle_call(get_state, _From, State) ->
-    {reply, State, State};
-handle_call(_Msg, _From, State) ->
-    {noreply, State}.
-
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {noreply, State};
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-    {noreply, State};
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-handle_config_change("httpd", "vhost_global_handlers", _, _, _) ->
-    {ok, ?MODULE:reload()};
-handle_config_change("httpd", "redirect_vhost_handler", _, _, _) ->
-    {ok, ?MODULE:reload()};
-handle_config_change("vhosts", _, _, _, _) ->
-    {ok, ?MODULE:reload()};
-handle_config_change(_, _, _, _, _) ->
-    {ok, nil}.
-
-
-load_conf() ->
-    %% get vhost globals
-    VHostGlobals = re:split(config:get("httpd",
-            "vhost_global_handlers",""), "\\s*,\\s*",[{return, list}]),
-
-    %% build vhosts matching rules
-    VHosts = make_vhosts(),
-
-    %% build vhosts handler fun
-    DefaultVHostFun = "{couch_httpd_vhost, redirect_to_vhost}",
-    Fun = couch_httpd:make_arity_2_fun(config:get("httpd",
-            "redirect_vhost_handler", DefaultVHostFun)),
-
-    {VHostGlobals, VHosts, Fun}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_js_functions.hrl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_js_functions.hrl b/src/couch/src/couch_js_functions.hrl
deleted file mode 100644
index 2ecd851..0000000
--- a/src/couch/src/couch_js_functions.hrl
+++ /dev/null
@@ -1,155 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License.  You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(AUTH_DB_DOC_VALIDATE_FUNCTION, <<"
-    function(newDoc, oldDoc, userCtx, secObj) {
-        if (newDoc._deleted === true) {
-            // allow deletes by admins and matching users
-            // without checking the other fields
-            if ((userCtx.roles.indexOf('_admin') !== -1) ||
-                (userCtx.name == oldDoc.name)) {
-                return;
-            } else {
-                throw({forbidden: 'Only admins may delete other user docs.'});
-            }
-        }
-
-        if ((oldDoc && oldDoc.type !== 'user') || newDoc.type !== 'user') {
-            throw({forbidden : 'doc.type must be user'});
-        } // we only allow user docs for now
-
-        if (!newDoc.name) {
-            throw({forbidden: 'doc.name is required'});
-        }
-
-        if (!newDoc.roles) {
-            throw({forbidden: 'doc.roles must exist'});
-        }
-
-        if (!isArray(newDoc.roles)) {
-            throw({forbidden: 'doc.roles must be an array'});
-        }
-
-        if (newDoc._id !== ('org.couchdb.user:' + newDoc.name)) {
-            throw({
-                forbidden: 'Doc ID must be of the form org.couchdb.user:name'
-            });
-        }
-
-        if (oldDoc) { // validate all updates
-            if (oldDoc.name !== newDoc.name) {
-                throw({forbidden: 'Usernames can not be changed.'});
-            }
-        }
-
-        if (newDoc.password_sha && !newDoc.salt) {
-            throw({
-                forbidden: 'Users with password_sha must have a salt.' +
-                    'See /_utils/script/couch.js for example code.'
-            });
-        }
-
-        var is_server_or_database_admin = function(userCtx, secObj) {
-            // see if the user is a server admin
-            if(userCtx.roles.indexOf('_admin') !== -1) {
-                return true; // a server admin
-            }
-
-            // see if the user a database admin specified by name
-            if(secObj && secObj.admins && secObj.admins.names) {
-                if(secObj.admins.names.indexOf(userCtx.name) !== -1) {
-                    return true; // database admin
-                }
-            }
-
-            // see if the user a database admin specified by role
-            if(secObj && secObj.admins && secObj.admins.roles) {
-                var db_roles = secObj.admins.roles;
-                for(var idx = 0; idx < userCtx.roles.length; idx++) {
-                    var user_role = userCtx.roles[idx];
-                    if(db_roles.indexOf(user_role) !== -1) {
-                        return true; // role matches!
-                    }
-                }
-            }
-
-            return false; // default to no admin
-        }
-
-        if (!is_server_or_database_admin(userCtx, secObj)) {
-            if (oldDoc) { // validate non-admin updates
-                if (userCtx.name !== newDoc.name) {
-                    throw({
-                        forbidden: 'You may only update your own user document.'
-                    });
-                }
-                // validate role updates
-                var oldRoles = oldDoc.roles.sort();
-                var newRoles = newDoc.roles.sort();
-
-                if (oldRoles.length !== newRoles.length) {
-                    throw({forbidden: 'Only _admin may edit roles'});
-                }
-
-                for (var i = 0; i < oldRoles.length; i++) {
-                    if (oldRoles[i] !== newRoles[i]) {
-                        throw({forbidden: 'Only _admin may edit roles'});
-                    }
-                }
-            } else if (newDoc.roles.length > 0) {
-                throw({forbidden: 'Only _admin may set roles'});
-            }
-        }
-
-        // no system roles in users db
-        for (var i = 0; i < newDoc.roles.length; i++) {
-            if (newDoc.roles[i][0] === '_') {
-                throw({
-                    forbidden:
-                    'No system roles (starting with underscore) in users db.'
-                });
-            }
-        }
-
-        // no system names as names
-        if (newDoc.name[0] === '_') {
-            throw({forbidden: 'Username may not start with underscore.'});
-        }
-
-        var badUserNameChars = [':'];
-
-        for (var i = 0; i < badUserNameChars.length; i++) {
-            if (newDoc.name.indexOf(badUserNameChars[i]) >= 0) {
-                throw({forbidden: 'Character `' + badUserNameChars[i] +
-                        '` is not allowed in usernames.'});
-            }
-        }
-    }
-">>).
-
-
--define(OAUTH_MAP_FUN, <<"
-    function(doc) {
-        if (doc.type === 'user' && doc.oauth && doc.oauth.consumer_keys) {
-            for (var consumer_key in doc.oauth.consumer_keys) {
-                for (var token in doc.oauth.tokens) {
-                    var obj = {
-                        'consumer_secret': doc.oauth.consumer_keys[consumer_key],
-                        'token_secret': doc.oauth.tokens[token],
-                        'username': doc.name
-                    };
-                    emit([consumer_key, token], obj);
-                }
-            }
-        }
-    }
-">>).


[47/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Make sure that we don't destroy the current CONFIG

Its possible to also provide a rebar.config which rebar.config.script is
responsible for mutating. This just makes sure we update the provided
CONFIG values rather than possibly dropping unknown configuration
values.


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/0052a9b2
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/0052a9b2
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/0052a9b2

Branch: refs/heads/1843-feature-bigcouch
Commit: 0052a9b26806d155c56ba539c118fb6cffd61b03
Parents: 52689b7
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 21:02:36 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Wed Feb 5 08:34:52 2014 -0600

----------------------------------------------------------------------
 rebar.config.script | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/0052a9b2/rebar.config.script
----------------------------------------------------------------------
diff --git a/rebar.config.script b/rebar.config.script
index 0efa6fa..b541d02 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -37,11 +37,15 @@ MakeDep = fun({AppName, RepoName, Version}) ->
     {AppName, ".*", {git, Url, Version}}
 end,
 
-[
+AddConfig = [
     {deps_dir, "src"},
     {deps, lists:map(MakeDep, DepDescs)},
     {sub_dirs, ["rel"]},
     {lib_dirs, ["src/"]},
     {erl_opts, [debug_info]},
     {post_hooks, [{compile, "escript support/build_js.escript"}]}
-].
+],
+
+C = lists:foldl(fun({K, V}, CfgAcc) ->
+    lists:keystore(K, 1, CfgAcc, {K, V})
+end, CONFIG, AddConfig).


[45/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Move to multiple repositories


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/52689b75
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/52689b75
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/52689b75

Branch: refs/heads/1843-feature-bigcouch
Commit: 52689b75d7a13dd1df4dee48098856cc54857e93
Parents: 26dbcc1
Author: Paul J. Davis <pa...@gmail.com>
Authored: Mon Feb 3 16:16:43 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Wed Feb 5 08:34:52 2014 -0600

----------------------------------------------------------------------
 .gitignore          |  1 +
 configure           |  2 ++
 rebar.config        | 42 ------------------------------------------
 rebar.config.script | 47 +++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 50 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/52689b75/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index a43a06b..7b37cea 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,7 @@ install.mk
 rel/*.config
 rel/dev*
 rel/tmpdata
+src/
 
 *.o
 *.so

http://git-wip-us.apache.org/repos/asf/couchdb/blob/52689b75/configure
----------------------------------------------------------------------
diff --git a/configure b/configure
index 4e59ca6..c95a988 100755
--- a/configure
+++ b/configure
@@ -80,3 +80,5 @@ cat > rel/dev$i.config << EOF
 {backend_port, `expr 10000 \* $i + 5986`}.
 EOF
 done
+
+rebar get-deps && rebar update-deps && cat rel/couchdb.config

http://git-wip-us.apache.org/repos/asf/couchdb/blob/52689b75/rebar.config
----------------------------------------------------------------------
diff --git a/rebar.config b/rebar.config
deleted file mode 100644
index 1372f26..0000000
--- a/rebar.config
+++ /dev/null
@@ -1,42 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{require_otp_vsn, "R14B01|R14B03|R14B04|R15B02|R15B03|R16"}.
-
-{sub_dirs, [
-    "src/ibrowse",
-    "src/config",
-    "src/couch",
-    "src/couch_index",
-    "src/couch_mrview",
-    "src/mem3",
-
-    "src/chttpd",
-    "src/couch_replicator",
-    "src/ddoc_cache",
-    "src/ejson",
-    "src/ets_lru",
-    "src/fabric",
-    "src/mochiweb",
-    "src/oauth",
-    "src/rexi",
-    "src/snappy",
-    "src/twig",
-
-    "rel"
-]}.
-
-{lib_dirs, ["src/"]}.
-
-{erl_opts, [debug_info]}.
-
-{post_hooks, [{compile, "escript support/build_js.escript"}]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/52689b75/rebar.config.script
----------------------------------------------------------------------
diff --git a/rebar.config.script b/rebar.config.script
new file mode 100644
index 0000000..0efa6fa
--- /dev/null
+++ b/rebar.config.script
@@ -0,0 +1,47 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{require_otp_vsn, "R14B01|R14B03|R14B04|R15B02|R15B03|R16"}.
+
+DepDescs = [
+    {chttpd, "couchdb-chttpd", {branch, import}},
+    {config, "couchdb-config", {branch, import}},
+    {couch, "couchdb-couch", {branch, import}},
+    {couch_index, "couchdb-couch-index", {branch, import}},
+    {couch_mrview, "couchdb-couch-mrview", {branch, import}},
+    {couch_replicator, "couchdb-couch-replicator", {branch,  import}},
+    {ddoc_cache, "couchdb-ddoc-cache", {branch, import}},
+    {ets_lru, "couchdb-ets-lru", {branch, import}},
+    {fabric, "couchdb-fabric", {branch,  import}},
+    {ibrowse, "couchdb-ibrowse", {branch,  import}},
+    {jiffy, "couchdb-jiffy", {branch, import}},
+    {mem3, "couchdb-mem3", {branch, import}},
+    {mochiweb, "couchdb-mochiweb", {branch, import}},
+    {oauth, "couchdb-oauth", {branch, import}},
+    {rexi, "couchdb-rexi", {branch, import}},
+    {snappy, "couchdb-snappy", {branch, import}},
+    {twig, "couchdb-twig", {branch, import}}
+],
+
+MakeDep = fun({AppName, RepoName, Version}) ->
+    Url = "https://git-wip-us.apache.org/repos/asf/" ++ RepoName ++ ".git",
+    {AppName, ".*", {git, Url, Version}}
+end,
+
+[
+    {deps_dir, "src"},
+    {deps, lists:map(MakeDep, DepDescs)},
+    {sub_dirs, ["rel"]},
+    {lib_dirs, ["src/"]},
+    {erl_opts, [debug_info]},
+    {post_hooks, [{compile, "escript support/build_js.escript"}]}
+].


[35/49] Remove src/mochiweb

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_multipart.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_multipart.erl b/src/mochiweb/src/mochiweb_multipart.erl
deleted file mode 100644
index 3069cf4..0000000
--- a/src/mochiweb/src/mochiweb_multipart.erl
+++ /dev/null
@@ -1,824 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Utilities for parsing multipart/form-data.
-
--module(mochiweb_multipart).
--author('bob@mochimedia.com').
-
--export([parse_form/1, parse_form/2]).
--export([parse_multipart_request/2]).
--export([parts_to_body/3, parts_to_multipart_body/4]).
--export([default_file_handler/2]).
-
--define(CHUNKSIZE, 4096).
-
--record(mp, {state, boundary, length, buffer, callback, req}).
-
-%% TODO: DOCUMENT THIS MODULE.
-%% @type key() = atom() | string() | binary().
-%% @type value() = atom() | iolist() | integer().
-%% @type header() = {key(), value()}.
-%% @type bodypart() = {Start::integer(), End::integer(), Body::iolist()}.
-%% @type formfile() = {Name::string(), ContentType::string(), Content::binary()}.
-%% @type request().
-%% @type file_handler() = (Filename::string(), ContentType::string()) -> file_handler_callback().
-%% @type file_handler_callback() = (binary() | eof) -> file_handler_callback() | term().
-
-%% @spec parts_to_body([bodypart()], ContentType::string(),
-%%                     Size::integer()) -> {[header()], iolist()}
-%% @doc Return {[header()], iolist()} representing the body for the given
-%%      parts, may be a single part or multipart.
-parts_to_body([{Start, End, Body}], ContentType, Size) ->
-    HeaderList = [{"Content-Type", ContentType},
-                  {"Content-Range",
-                   ["bytes ",
-                    mochiweb_util:make_io(Start), "-", mochiweb_util:make_io(End),
-                    "/", mochiweb_util:make_io(Size)]}],
-    {HeaderList, Body};
-parts_to_body(BodyList, ContentType, Size) when is_list(BodyList) ->
-    parts_to_multipart_body(BodyList, ContentType, Size,
-                            mochihex:to_hex(crypto:rand_bytes(8))).
-
-%% @spec parts_to_multipart_body([bodypart()], ContentType::string(),
-%%                               Size::integer(), Boundary::string()) ->
-%%           {[header()], iolist()}
-%% @doc Return {[header()], iolist()} representing the body for the given
-%%      parts, always a multipart response.
-parts_to_multipart_body(BodyList, ContentType, Size, Boundary) ->
-    HeaderList = [{"Content-Type",
-                   ["multipart/byteranges; ",
-                    "boundary=", Boundary]}],
-    MultiPartBody = multipart_body(BodyList, ContentType, Boundary, Size),
-
-    {HeaderList, MultiPartBody}.
-
-%% @spec multipart_body([bodypart()], ContentType::string(),
-%%                      Boundary::string(), Size::integer()) -> iolist()
-%% @doc Return the representation of a multipart body for the given [bodypart()].
-multipart_body([], _ContentType, Boundary, _Size) ->
-    ["--", Boundary, "--\r\n"];
-multipart_body([{Start, End, Body} | BodyList], ContentType, Boundary, Size) ->
-    ["--", Boundary, "\r\n",
-     "Content-Type: ", ContentType, "\r\n",
-     "Content-Range: ",
-         "bytes ", mochiweb_util:make_io(Start), "-", mochiweb_util:make_io(End),
-             "/", mochiweb_util:make_io(Size), "\r\n\r\n",
-     Body, "\r\n"
-     | multipart_body(BodyList, ContentType, Boundary, Size)].
-
-%% @spec parse_form(request()) -> [{string(), string() | formfile()}]
-%% @doc Parse a multipart form from the given request using the in-memory
-%%      default_file_handler/2.
-parse_form(Req) ->
-    parse_form(Req, fun default_file_handler/2).
-
-%% @spec parse_form(request(), F::file_handler()) -> [{string(), string() | term()}]
-%% @doc Parse a multipart form from the given request using the given file_handler().
-parse_form(Req, FileHandler) ->
-    Callback = fun (Next) -> parse_form_outer(Next, FileHandler, []) end,
-    {_, _, Res} = parse_multipart_request(Req, Callback),
-    Res.
-
-parse_form_outer(eof, _, Acc) ->
-    lists:reverse(Acc);
-parse_form_outer({headers, H}, FileHandler, State) ->
-    {"form-data", H1} = proplists:get_value("content-disposition", H),
-    Name = proplists:get_value("name", H1),
-    Filename = proplists:get_value("filename", H1),
-    case Filename of
-        undefined ->
-            fun (Next) ->
-                    parse_form_value(Next, {Name, []}, FileHandler, State)
-            end;
-        _ ->
-            ContentType = proplists:get_value("content-type", H),
-            Handler = FileHandler(Filename, ContentType),
-            fun (Next) ->
-                    parse_form_file(Next, {Name, Handler}, FileHandler, State)
-            end
-    end.
-
-parse_form_value(body_end, {Name, Acc}, FileHandler, State) ->
-    Value = binary_to_list(iolist_to_binary(lists:reverse(Acc))),
-    State1 = [{Name, Value} | State],
-    fun (Next) -> parse_form_outer(Next, FileHandler, State1) end;
-parse_form_value({body, Data}, {Name, Acc}, FileHandler, State) ->
-    Acc1 = [Data | Acc],
-    fun (Next) -> parse_form_value(Next, {Name, Acc1}, FileHandler, State) end.
-
-parse_form_file(body_end, {Name, Handler}, FileHandler, State) ->
-    Value = Handler(eof),
-    State1 = [{Name, Value} | State],
-    fun (Next) -> parse_form_outer(Next, FileHandler, State1) end;
-parse_form_file({body, Data}, {Name, Handler}, FileHandler, State) ->
-    H1 = Handler(Data),
-    fun (Next) -> parse_form_file(Next, {Name, H1}, FileHandler, State) end.
-
-default_file_handler(Filename, ContentType) ->
-    default_file_handler_1(Filename, ContentType, []).
-
-default_file_handler_1(Filename, ContentType, Acc) ->
-    fun(eof) ->
-            Value = iolist_to_binary(lists:reverse(Acc)),
-            {Filename, ContentType, Value};
-       (Next) ->
-            default_file_handler_1(Filename, ContentType, [Next | Acc])
-    end.
-
-parse_multipart_request(Req, Callback) ->
-    %% TODO: Support chunked?
-    Length = list_to_integer(Req:get_header_value("content-length")),
-    Boundary = iolist_to_binary(
-                 get_boundary(Req:get_header_value("content-type"))),
-    Prefix = <<"\r\n--", Boundary/binary>>,
-    BS = byte_size(Boundary),
-    Chunk = read_chunk(Req, Length),
-    Length1 = Length - byte_size(Chunk),
-    <<"--", Boundary:BS/binary, "\r\n", Rest/binary>> = Chunk,
-    feed_mp(headers, flash_multipart_hack(#mp{boundary=Prefix,
-                                              length=Length1,
-                                              buffer=Rest,
-                                              callback=Callback,
-                                              req=Req})).
-
-parse_headers(<<>>) ->
-    [];
-parse_headers(Binary) ->
-    parse_headers(Binary, []).
-
-parse_headers(Binary, Acc) ->
-    case find_in_binary(<<"\r\n">>, Binary) of
-        {exact, N} ->
-            <<Line:N/binary, "\r\n", Rest/binary>> = Binary,
-            parse_headers(Rest, [split_header(Line) | Acc]);
-        not_found ->
-            lists:reverse([split_header(Binary) | Acc])
-    end.
-
-split_header(Line) ->
-    {Name, [$: | Value]} = lists:splitwith(fun (C) -> C =/= $: end,
-                                           binary_to_list(Line)),
-    {string:to_lower(string:strip(Name)),
-     mochiweb_util:parse_header(Value)}.
-
-read_chunk(Req, Length) when Length > 0 ->
-    case Length of
-        Length when Length < ?CHUNKSIZE ->
-            Req:recv(Length);
-        _ ->
-            Req:recv(?CHUNKSIZE)
-    end.
-
-read_more(State=#mp{length=Length, buffer=Buffer, req=Req}) ->
-    Data = read_chunk(Req, Length),
-    Buffer1 = <<Buffer/binary, Data/binary>>,
-    flash_multipart_hack(State#mp{length=Length - byte_size(Data),
-                                  buffer=Buffer1}).
-
-flash_multipart_hack(State=#mp{length=0, buffer=Buffer, boundary=Prefix}) ->
-    %% http://code.google.com/p/mochiweb/issues/detail?id=22
-    %% Flash doesn't terminate multipart with \r\n properly so we fix it up here
-    PrefixSize = size(Prefix),
-    case size(Buffer) - (2 + PrefixSize) of
-        Seek when Seek >= 0 ->
-            case Buffer of
-                <<_:Seek/binary, Prefix:PrefixSize/binary, "--">> ->
-                    Buffer1 = <<Buffer/binary, "\r\n">>,
-                    State#mp{buffer=Buffer1};
-                _ ->
-                    State
-            end;
-        _ ->
-            State
-    end;
-flash_multipart_hack(State) ->
-    State.
-
-feed_mp(headers, State=#mp{buffer=Buffer, callback=Callback}) ->
-    {State1, P} = case find_in_binary(<<"\r\n\r\n">>, Buffer) of
-                      {exact, N} ->
-                          {State, N};
-                      _ ->
-                          S1 = read_more(State),
-                          %% Assume headers must be less than ?CHUNKSIZE
-                          {exact, N} = find_in_binary(<<"\r\n\r\n">>,
-                                                      S1#mp.buffer),
-                          {S1, N}
-                  end,
-    <<Headers:P/binary, "\r\n\r\n", Rest/binary>> = State1#mp.buffer,
-    NextCallback = Callback({headers, parse_headers(Headers)}),
-    feed_mp(body, State1#mp{buffer=Rest,
-                            callback=NextCallback});
-feed_mp(body, State=#mp{boundary=Prefix, buffer=Buffer, callback=Callback}) ->
-    Boundary = find_boundary(Prefix, Buffer),
-    case Boundary of
-        {end_boundary, Start, Skip} ->
-            <<Data:Start/binary, _:Skip/binary, Rest/binary>> = Buffer,
-            C1 = Callback({body, Data}),
-            C2 = C1(body_end),
-            {State#mp.length, Rest, C2(eof)};
-        {next_boundary, Start, Skip} ->
-            <<Data:Start/binary, _:Skip/binary, Rest/binary>> = Buffer,
-            C1 = Callback({body, Data}),
-            feed_mp(headers, State#mp{callback=C1(body_end),
-                                      buffer=Rest});
-        {maybe, Start} ->
-            <<Data:Start/binary, Rest/binary>> = Buffer,
-            feed_mp(body, read_more(State#mp{callback=Callback({body, Data}),
-                                             buffer=Rest}));
-        not_found ->
-            {Data, Rest} = {Buffer, <<>>},
-            feed_mp(body, read_more(State#mp{callback=Callback({body, Data}),
-                                             buffer=Rest}))
-    end.
-
-get_boundary(ContentType) ->
-    {"multipart/form-data", Opts} = mochiweb_util:parse_header(ContentType),
-    case proplists:get_value("boundary", Opts) of
-        S when is_list(S) ->
-            S
-    end.
-
-find_in_binary(B, Data) when size(B) > 0 ->
-    case size(Data) - size(B) of
-        Last when Last < 0 ->
-            partial_find(B, Data, 0, size(Data));
-        Last ->
-            find_in_binary(B, size(B), Data, 0, Last)
-    end.
-
-find_in_binary(B, BS, D, N, Last) when N =< Last->
-    case D of
-        <<_:N/binary, B:BS/binary, _/binary>> ->
-            {exact, N};
-        _ ->
-            find_in_binary(B, BS, D, 1 + N, Last)
-    end;
-find_in_binary(B, BS, D, N, Last) when N =:= 1 + Last ->
-    partial_find(B, D, N, BS - 1).
-
-partial_find(_B, _D, _N, 0) ->
-    not_found;
-partial_find(B, D, N, K) ->
-    <<B1:K/binary, _/binary>> = B,
-    case D of
-        <<_Skip:N/binary, B1:K/binary>> ->
-            {partial, N, K};
-        _ ->
-            partial_find(B, D, 1 + N, K - 1)
-    end.
-
-find_boundary(Prefix, Data) ->
-    case find_in_binary(Prefix, Data) of
-        {exact, Skip} ->
-            PrefixSkip = Skip + size(Prefix),
-            case Data of
-                <<_:PrefixSkip/binary, "\r\n", _/binary>> ->
-                    {next_boundary, Skip, size(Prefix) + 2};
-                <<_:PrefixSkip/binary, "--\r\n", _/binary>> ->
-                    {end_boundary, Skip, size(Prefix) + 4};
-                _ when size(Data) < PrefixSkip + 4 ->
-                    %% Underflow
-                    {maybe, Skip};
-                _ ->
-                    %% False positive
-                    not_found
-            end;
-        {partial, Skip, Length} when (Skip + Length) =:= size(Data) ->
-            %% Underflow
-            {maybe, Skip};
-        _ ->
-            not_found
-    end.
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-ssl_cert_opts() ->
-    EbinDir = filename:dirname(code:which(?MODULE)),
-    CertDir = filename:join([EbinDir, "..", "support", "test-materials"]),
-    CertFile = filename:join(CertDir, "test_ssl_cert.pem"),
-    KeyFile = filename:join(CertDir, "test_ssl_key.pem"),
-    [{certfile, CertFile}, {keyfile, KeyFile}].
-
-with_socket_server(Transport, ServerFun, ClientFun) ->
-    ServerOpts0 = [{ip, "127.0.0.1"}, {port, 0}, {loop, ServerFun}],
-    ServerOpts = case Transport of
-        plain ->
-            ServerOpts0;
-        ssl ->
-            ServerOpts0 ++ [{ssl, true}, {ssl_opts, ssl_cert_opts()}]
-    end,
-    {ok, Server} = mochiweb_socket_server:start(ServerOpts),
-    Port = mochiweb_socket_server:get(Server, port),
-    ClientOpts = [binary, {active, false}],
-    {ok, Client} = case Transport of
-        plain ->
-            gen_tcp:connect("127.0.0.1", Port, ClientOpts);
-        ssl ->
-            ClientOpts1 = [{ssl_imp, new} | ClientOpts],
-            {ok, SslSocket} = ssl:connect("127.0.0.1", Port, ClientOpts1),
-            {ok, {ssl, SslSocket}}
-    end,
-    Res = (catch ClientFun(Client)),
-    mochiweb_socket_server:stop(Server),
-    Res.
-
-fake_request(Socket, ContentType, Length) ->
-    mochiweb_request:new(Socket,
-                         'POST',
-                         "/multipart",
-                         {1,1},
-                         mochiweb_headers:make(
-                           [{"content-type", ContentType},
-                            {"content-length", Length}])).
-
-test_callback({body, <<>>}, Rest=[body_end | _]) ->
-    %% When expecting the body_end we might get an empty binary
-    fun (Next) -> test_callback(Next, Rest) end;
-test_callback({body, Got}, [{body, Expect} | Rest]) when Got =/= Expect ->
-    %% Partial response
-    GotSize = size(Got),
-    <<Got:GotSize/binary, Expect1/binary>> = Expect,
-    fun (Next) -> test_callback(Next, [{body, Expect1} | Rest]) end;
-test_callback(Got, [Expect | Rest]) ->
-    ?assertEqual(Got, Expect),
-    case Rest of
-        [] ->
-            ok;
-        _ ->
-            fun (Next) -> test_callback(Next, Rest) end
-    end.
-
-parse3_http_test() ->
-    parse3(plain).
-
-parse3_https_test() ->
-    parse3(ssl).
-
-parse3(Transport) ->
-    ContentType = "multipart/form-data; boundary=---------------------------7386909285754635891697677882",
-    BinContent = <<"-----------------------------7386909285754635891697677882\r\nContent-Disposition: form-data; name=\"hidden\"\r\n\r\nmultipart message\r\n-----------------------------7386909285754635891697677882\r\nContent-Disposition: form-data; name=\"file\"; filename=\"test_file.txt\"\r\nContent-Type: text/plain\r\n\r\nWoo multiline text file\n\nLa la la\r\n-----------------------------7386909285754635891697677882--\r\n">>,
-    Expect = [{headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "hidden"}]}}]},
-              {body, <<"multipart message">>},
-              body_end,
-              {headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "file"}, {"filename", "test_file.txt"}]}},
-                {"content-type", {"text/plain", []}}]},
-              {body, <<"Woo multiline text file\n\nLa la la">>},
-              body_end,
-              eof],
-    TestCallback = fun (Next) -> test_callback(Next, Expect) end,
-    ServerFun = fun (Socket) ->
-                        ok = mochiweb_socket:send(Socket, BinContent),
-			exit(normal)
-                end,
-    ClientFun = fun (Socket) ->
-                        Req = fake_request(Socket, ContentType,
-                                           byte_size(BinContent)),
-                        Res = parse_multipart_request(Req, TestCallback),
-                        {0, <<>>, ok} = Res,
-                        ok
-                end,
-    ok = with_socket_server(Transport, ServerFun, ClientFun),
-    ok.
-
-parse2_http_test() ->
-    parse2(plain).
-
-parse2_https_test() ->
-    parse2(ssl).
-
-parse2(Transport) ->
-    ContentType = "multipart/form-data; boundary=---------------------------6072231407570234361599764024",
-    BinContent = <<"-----------------------------6072231407570234361599764024\r\nContent-Disposition: form-data; name=\"hidden\"\r\n\r\nmultipart message\r\n-----------------------------6072231407570234361599764024\r\nContent-Disposition: form-data; name=\"file\"; filename=\"\"\r\nContent-Type: application/octet-stream\r\n\r\n\r\n-----------------------------6072231407570234361599764024--\r\n">>,
-    Expect = [{headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "hidden"}]}}]},
-              {body, <<"multipart message">>},
-              body_end,
-              {headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "file"}, {"filename", ""}]}},
-                {"content-type", {"application/octet-stream", []}}]},
-              {body, <<>>},
-              body_end,
-              eof],
-    TestCallback = fun (Next) -> test_callback(Next, Expect) end,
-    ServerFun = fun (Socket) ->
-                        ok = mochiweb_socket:send(Socket, BinContent),
-			exit(normal)
-                end,
-    ClientFun = fun (Socket) ->
-                        Req = fake_request(Socket, ContentType,
-                                           byte_size(BinContent)),
-                        Res = parse_multipart_request(Req, TestCallback),
-                        {0, <<>>, ok} = Res,
-                        ok
-                end,
-    ok = with_socket_server(Transport, ServerFun, ClientFun),
-    ok.
-
-parse_form_http_test() ->
-    do_parse_form(plain).
-
-parse_form_https_test() ->
-    do_parse_form(ssl).
-
-do_parse_form(Transport) ->
-    ContentType = "multipart/form-data; boundary=AaB03x",
-    "AaB03x" = get_boundary(ContentType),
-    Content = mochiweb_util:join(
-                ["--AaB03x",
-                 "Content-Disposition: form-data; name=\"submit-name\"",
-                 "",
-                 "Larry",
-                 "--AaB03x",
-                 "Content-Disposition: form-data; name=\"files\";"
-                 ++ "filename=\"file1.txt\"",
-                 "Content-Type: text/plain",
-                 "",
-                 "... contents of file1.txt ...",
-                 "--AaB03x--",
-                 ""], "\r\n"),
-    BinContent = iolist_to_binary(Content),
-    ServerFun = fun (Socket) ->
-                        ok = mochiweb_socket:send(Socket, BinContent),
-			exit(normal)
-                end,
-    ClientFun = fun (Socket) ->
-                        Req = fake_request(Socket, ContentType,
-                                           byte_size(BinContent)),
-                        Res = parse_form(Req),
-                        [{"submit-name", "Larry"},
-                         {"files", {"file1.txt", {"text/plain",[]},
-                                    <<"... contents of file1.txt ...">>}
-                         }] = Res,
-                        ok
-                end,
-    ok = with_socket_server(Transport, ServerFun, ClientFun),
-    ok.
-
-parse_http_test() ->
-    do_parse(plain).
-
-parse_https_test() ->
-    do_parse(ssl).
-
-do_parse(Transport) ->
-    ContentType = "multipart/form-data; boundary=AaB03x",
-    "AaB03x" = get_boundary(ContentType),
-    Content = mochiweb_util:join(
-                ["--AaB03x",
-                 "Content-Disposition: form-data; name=\"submit-name\"",
-                 "",
-                 "Larry",
-                 "--AaB03x",
-                 "Content-Disposition: form-data; name=\"files\";"
-                 ++ "filename=\"file1.txt\"",
-                 "Content-Type: text/plain",
-                 "",
-                 "... contents of file1.txt ...",
-                 "--AaB03x--",
-                 ""], "\r\n"),
-    BinContent = iolist_to_binary(Content),
-    Expect = [{headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "submit-name"}]}}]},
-              {body, <<"Larry">>},
-              body_end,
-              {headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "files"}, {"filename", "file1.txt"}]}},
-                 {"content-type", {"text/plain", []}}]},
-              {body, <<"... contents of file1.txt ...">>},
-              body_end,
-              eof],
-    TestCallback = fun (Next) -> test_callback(Next, Expect) end,
-    ServerFun = fun (Socket) ->
-                        ok = mochiweb_socket:send(Socket, BinContent),
-			exit(normal)
-                end,
-    ClientFun = fun (Socket) ->
-                        Req = fake_request(Socket, ContentType,
-                                           byte_size(BinContent)),
-                        Res = parse_multipart_request(Req, TestCallback),
-                        {0, <<>>, ok} = Res,
-                        ok
-                end,
-    ok = with_socket_server(Transport, ServerFun, ClientFun),
-    ok.
-
-parse_partial_body_boundary_http_test() ->
-   parse_partial_body_boundary(plain).
-
-parse_partial_body_boundary_https_test() ->
-   parse_partial_body_boundary(ssl).
-
-parse_partial_body_boundary(Transport) ->
-    Boundary = string:copies("$", 2048),
-    ContentType = "multipart/form-data; boundary=" ++ Boundary,
-    ?assertEqual(Boundary, get_boundary(ContentType)),
-    Content = mochiweb_util:join(
-                ["--" ++ Boundary,
-                 "Content-Disposition: form-data; name=\"submit-name\"",
-                 "",
-                 "Larry",
-                 "--" ++ Boundary,
-                 "Content-Disposition: form-data; name=\"files\";"
-                 ++ "filename=\"file1.txt\"",
-                 "Content-Type: text/plain",
-                 "",
-                 "... contents of file1.txt ...",
-                 "--" ++ Boundary ++ "--",
-                 ""], "\r\n"),
-    BinContent = iolist_to_binary(Content),
-    Expect = [{headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "submit-name"}]}}]},
-              {body, <<"Larry">>},
-              body_end,
-              {headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "files"}, {"filename", "file1.txt"}]}},
-                {"content-type", {"text/plain", []}}
-               ]},
-              {body, <<"... contents of file1.txt ...">>},
-              body_end,
-              eof],
-    TestCallback = fun (Next) -> test_callback(Next, Expect) end,
-    ServerFun = fun (Socket) ->
-                        ok = mochiweb_socket:send(Socket, BinContent),
-			exit(normal)
-                end,
-    ClientFun = fun (Socket) ->
-                        Req = fake_request(Socket, ContentType,
-                                           byte_size(BinContent)),
-                        Res = parse_multipart_request(Req, TestCallback),
-                        {0, <<>>, ok} = Res,
-                        ok
-                end,
-    ok = with_socket_server(Transport, ServerFun, ClientFun),
-    ok.
-
-parse_large_header_http_test() ->
-    parse_large_header(plain).
-
-parse_large_header_https_test() ->
-    parse_large_header(ssl).
-
-parse_large_header(Transport) ->
-    ContentType = "multipart/form-data; boundary=AaB03x",
-    "AaB03x" = get_boundary(ContentType),
-    Content = mochiweb_util:join(
-                ["--AaB03x",
-                 "Content-Disposition: form-data; name=\"submit-name\"",
-                 "",
-                 "Larry",
-                 "--AaB03x",
-                 "Content-Disposition: form-data; name=\"files\";"
-                 ++ "filename=\"file1.txt\"",
-                 "Content-Type: text/plain",
-                 "x-large-header: " ++ string:copies("%", 4096),
-                 "",
-                 "... contents of file1.txt ...",
-                 "--AaB03x--",
-                 ""], "\r\n"),
-    BinContent = iolist_to_binary(Content),
-    Expect = [{headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "submit-name"}]}}]},
-              {body, <<"Larry">>},
-              body_end,
-              {headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "files"}, {"filename", "file1.txt"}]}},
-                {"content-type", {"text/plain", []}},
-                {"x-large-header", {string:copies("%", 4096), []}}
-               ]},
-              {body, <<"... contents of file1.txt ...">>},
-              body_end,
-              eof],
-    TestCallback = fun (Next) -> test_callback(Next, Expect) end,
-    ServerFun = fun (Socket) ->
-                        ok = mochiweb_socket:send(Socket, BinContent),
-			exit(normal)
-                end,
-    ClientFun = fun (Socket) ->
-                        Req = fake_request(Socket, ContentType,
-                                           byte_size(BinContent)),
-                        Res = parse_multipart_request(Req, TestCallback),
-                        {0, <<>>, ok} = Res,
-                        ok
-                end,
-    ok = with_socket_server(Transport, ServerFun, ClientFun),
-    ok.
-
-find_boundary_test() ->
-    B = <<"\r\n--X">>,
-    {next_boundary, 0, 7} = find_boundary(B, <<"\r\n--X\r\nRest">>),
-    {next_boundary, 1, 7} = find_boundary(B, <<"!\r\n--X\r\nRest">>),
-    {end_boundary, 0, 9} = find_boundary(B, <<"\r\n--X--\r\nRest">>),
-    {end_boundary, 1, 9} = find_boundary(B, <<"!\r\n--X--\r\nRest">>),
-    not_found = find_boundary(B, <<"--X\r\nRest">>),
-    {maybe, 0} = find_boundary(B, <<"\r\n--X\r">>),
-    {maybe, 1} = find_boundary(B, <<"!\r\n--X\r">>),
-    P = <<"\r\n-----------------------------16037454351082272548568224146">>,
-    B0 = <<55,212,131,77,206,23,216,198,35,87,252,118,252,8,25,211,132,229,
-          182,42,29,188,62,175,247,243,4,4,0,59, 13,10,45,45,45,45,45,45,45,
-          45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,
-          49,54,48,51,55,52,53,52,51,53,49>>,
-    {maybe, 30} = find_boundary(P, B0),
-    not_found = find_boundary(B, <<"\r\n--XJOPKE">>),
-    ok.
-
-find_in_binary_test() ->
-    {exact, 0} = find_in_binary(<<"foo">>, <<"foobarbaz">>),
-    {exact, 1} = find_in_binary(<<"oo">>, <<"foobarbaz">>),
-    {exact, 8} = find_in_binary(<<"z">>, <<"foobarbaz">>),
-    not_found = find_in_binary(<<"q">>, <<"foobarbaz">>),
-    {partial, 7, 2} = find_in_binary(<<"azul">>, <<"foobarbaz">>),
-    {exact, 0} = find_in_binary(<<"foobarbaz">>, <<"foobarbaz">>),
-    {partial, 0, 3} = find_in_binary(<<"foobar">>, <<"foo">>),
-    {partial, 1, 3} = find_in_binary(<<"foobar">>, <<"afoo">>),
-    ok.
-
-flash_parse_http_test() ->
-    flash_parse(plain).
-
-flash_parse_https_test() ->
-    flash_parse(ssl).
-
-flash_parse(Transport) ->
-    ContentType = "multipart/form-data; boundary=----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5",
-    "----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5" = get_boundary(ContentType),
-    BinContent = <<"------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Filename\"\r\n\r\nhello.txt\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"success_action_status\"\r\n\r\n201\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"file\"; filename=\"hello.txt\"\r\nContent-Type: application/octet-stream\r\n\r\nhello\n\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Upload\"\r\n\r\nSubmit Query\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5--">>,
-    Expect = [{headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "Filename"}]}}]},
-              {body, <<"hello.txt">>},
-              body_end,
-              {headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "success_action_status"}]}}]},
-              {body, <<"201">>},
-              body_end,
-              {headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "file"}, {"filename", "hello.txt"}]}},
-                {"content-type", {"application/octet-stream", []}}]},
-              {body, <<"hello\n">>},
-              body_end,
-              {headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "Upload"}]}}]},
-              {body, <<"Submit Query">>},
-              body_end,
-              eof],
-    TestCallback = fun (Next) -> test_callback(Next, Expect) end,
-    ServerFun = fun (Socket) ->
-                        ok = mochiweb_socket:send(Socket, BinContent),
-			exit(normal)
-                end,
-    ClientFun = fun (Socket) ->
-                        Req = fake_request(Socket, ContentType,
-                                           byte_size(BinContent)),
-                        Res = parse_multipart_request(Req, TestCallback),
-                        {0, <<>>, ok} = Res,
-                        ok
-                end,
-    ok = with_socket_server(Transport, ServerFun, ClientFun),
-    ok.
-
-flash_parse2_http_test() ->
-    flash_parse2(plain).
-
-flash_parse2_https_test() ->
-    flash_parse2(ssl).
-
-flash_parse2(Transport) ->
-    ContentType = "multipart/form-data; boundary=----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5",
-    "----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5" = get_boundary(ContentType),
-    Chunk = iolist_to_binary(string:copies("%", 4096)),
-    BinContent = <<"------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Filename\"\r\n\r\nhello.txt\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"success_action_status\"\r\n\r\n201\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"file\"; filename=\"hello.txt\"\r\nContent-Type: application/octet-stream\r\n\r\n", Chunk/binary, "\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Upload\"\r\n\r\nSubmit Query\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5--">>,
-    Expect = [{headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "Filename"}]}}]},
-              {body, <<"hello.txt">>},
-              body_end,
-              {headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "success_action_status"}]}}]},
-              {body, <<"201">>},
-              body_end,
-              {headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "file"}, {"filename", "hello.txt"}]}},
-                {"content-type", {"application/octet-stream", []}}]},
-              {body, Chunk},
-              body_end,
-              {headers,
-               [{"content-disposition",
-                 {"form-data", [{"name", "Upload"}]}}]},
-              {body, <<"Submit Query">>},
-              body_end,
-              eof],
-    TestCallback = fun (Next) -> test_callback(Next, Expect) end,
-    ServerFun = fun (Socket) ->
-                        ok = mochiweb_socket:send(Socket, BinContent),
-			exit(normal)
-                end,
-    ClientFun = fun (Socket) ->
-                        Req = fake_request(Socket, ContentType,
-                                           byte_size(BinContent)),
-                        Res = parse_multipart_request(Req, TestCallback),
-                        {0, <<>>, ok} = Res,
-                        ok
-                end,
-    ok = with_socket_server(Transport, ServerFun, ClientFun),
-    ok.
-
-parse_headers_test() ->
-    ?assertEqual([], parse_headers(<<>>)).
-
-flash_multipart_hack_test() ->
-    Buffer = <<"prefix-">>,
-    Prefix = <<"prefix">>,
-    State = #mp{length=0, buffer=Buffer, boundary=Prefix},
-    ?assertEqual(State,
-                 flash_multipart_hack(State)).
-
-parts_to_body_single_test() ->
-    {HL, B} = parts_to_body([{0, 5, <<"01234">>}],
-                            "text/plain",
-                            10),
-    [{"Content-Range", Range},
-     {"Content-Type", Type}] = lists:sort(HL),
-    ?assertEqual(
-       <<"bytes 0-5/10">>,
-       iolist_to_binary(Range)),
-    ?assertEqual(
-       <<"text/plain">>,
-       iolist_to_binary(Type)),
-    ?assertEqual(
-       <<"01234">>,
-       iolist_to_binary(B)),
-    ok.
-
-parts_to_body_multi_test() ->
-    {[{"Content-Type", Type}],
-     _B} = parts_to_body([{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
-                        "text/plain",
-                        10),
-    ?assertMatch(
-       <<"multipart/byteranges; boundary=", _/binary>>,
-       iolist_to_binary(Type)),
-    ok.
-
-parts_to_multipart_body_test() ->
-    {[{"Content-Type", V}], B} = parts_to_multipart_body(
-                                   [{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
-                                   "text/plain",
-                                   10,
-                                   "BOUNDARY"),
-    MB = multipart_body(
-           [{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
-           "text/plain",
-           "BOUNDARY",
-           10),
-    ?assertEqual(
-       <<"multipart/byteranges; boundary=BOUNDARY">>,
-       iolist_to_binary(V)),
-    ?assertEqual(
-       iolist_to_binary(MB),
-       iolist_to_binary(B)),
-    ok.
-
-multipart_body_test() ->
-    ?assertEqual(
-       <<"--BOUNDARY--\r\n">>,
-       iolist_to_binary(multipart_body([], "text/plain", "BOUNDARY", 0))),
-    ?assertEqual(
-       <<"--BOUNDARY\r\n"
-         "Content-Type: text/plain\r\n"
-         "Content-Range: bytes 0-5/10\r\n\r\n"
-         "01234\r\n"
-         "--BOUNDARY\r\n"
-         "Content-Type: text/plain\r\n"
-         "Content-Range: bytes 5-10/10\r\n\r\n"
-         "56789\r\n"
-         "--BOUNDARY--\r\n">>,
-       iolist_to_binary(multipart_body([{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
-                                       "text/plain",
-                                       "BOUNDARY",
-                                       10))),
-    ok.
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_request.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_request.erl b/src/mochiweb/src/mochiweb_request.erl
deleted file mode 100644
index 980f5ad..0000000
--- a/src/mochiweb/src/mochiweb_request.erl
+++ /dev/null
@@ -1,788 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc MochiWeb HTTP Request abstraction.
-
--module(mochiweb_request, [Socket, Method, RawPath, Version, Headers]).
--author('bob@mochimedia.com').
-
--include_lib("kernel/include/file.hrl").
--include("internal.hrl").
-
--define(QUIP, "Any of you quaids got a smint?").
-
--export([get_header_value/1, get_primary_header_value/1, get/1, dump/0]).
--export([send/1, recv/1, recv/2, recv_body/0, recv_body/1, stream_body/3]).
--export([start_response/1, start_response_length/1, start_raw_response/1]).
--export([respond/1, ok/1]).
--export([not_found/0, not_found/1]).
--export([parse_post/0, parse_qs/0]).
--export([should_close/0, cleanup/0]).
--export([parse_cookie/0, get_cookie_value/1]).
--export([serve_file/2, serve_file/3]).
--export([accepted_encodings/1]).
--export([accepts_content_type/1]).
-
--define(SAVE_QS, mochiweb_request_qs).
--define(SAVE_PATH, mochiweb_request_path).
--define(SAVE_RECV, mochiweb_request_recv).
--define(SAVE_BODY, mochiweb_request_body).
--define(SAVE_BODY_LENGTH, mochiweb_request_body_length).
--define(SAVE_POST, mochiweb_request_post).
--define(SAVE_COOKIE, mochiweb_request_cookie).
--define(SAVE_FORCE_CLOSE, mochiweb_request_force_close).
-
-%% @type iolist() = [iolist() | binary() | char()].
-%% @type iodata() = binary() | iolist().
-%% @type key() = atom() | string() | binary()
-%% @type value() = atom() | string() | binary() | integer()
-%% @type headers(). A mochiweb_headers structure.
-%% @type response(). A mochiweb_response parameterized module instance.
-%% @type ioheaders() = headers() | [{key(), value()}].
-
-% 5 minute default idle timeout
--define(IDLE_TIMEOUT, 300000).
-
-% Maximum recv_body() length of 1MB
--define(MAX_RECV_BODY, (1024*1024)).
-
-%% @spec get_header_value(K) -> undefined | Value
-%% @doc Get the value of a given request header.
-get_header_value(K) ->
-    mochiweb_headers:get_value(K, Headers).
-
-get_primary_header_value(K) ->
-    mochiweb_headers:get_primary_value(K, Headers).
-
-%% @type field() = socket | scheme | method | raw_path | version | headers | peer | path | body_length | range
-
-%% @spec get(field()) -> term()
-%% @doc Return the internal representation of the given field. If
-%%      <code>socket</code> is requested on a HTTPS connection, then
-%%      an ssl socket will be returned as <code>{ssl, SslSocket}</code>.
-%%      You can use <code>SslSocket</code> with the <code>ssl</code>
-%%      application, eg: <code>ssl:peercert(SslSocket)</code>.
-get(socket) ->
-    Socket;
-get(scheme) ->
-    case mochiweb_socket:type(Socket) of
-        plain ->
-            http;
-        ssl ->
-            https
-    end;
-get(method) ->
-    Method;
-get(raw_path) ->
-    RawPath;
-get(version) ->
-    Version;
-get(headers) ->
-    Headers;
-get(peer) ->
-    case mochiweb_socket:peername(Socket) of
-        {ok, {Addr={10, _, _, _}, _Port}} ->
-            case get_header_value("x-forwarded-for") of
-                undefined ->
-                    inet_parse:ntoa(Addr);
-                Hosts ->
-                    string:strip(lists:last(string:tokens(Hosts, ",")))
-            end;
-        {ok, {{127, 0, 0, 1}, _Port}} ->
-            case get_header_value("x-forwarded-for") of
-                undefined ->
-                    "127.0.0.1";
-                Hosts ->
-                    string:strip(lists:last(string:tokens(Hosts, ",")))
-            end;
-        {ok, {Addr, _Port}} ->
-            inet_parse:ntoa(Addr);
-        {error, enotconn} ->
-            exit(normal)
-    end;
-get(path) ->
-    case erlang:get(?SAVE_PATH) of
-        undefined ->
-            {Path0, _, _} = mochiweb_util:urlsplit_path(RawPath),
-            Path = mochiweb_util:unquote(Path0),
-            put(?SAVE_PATH, Path),
-            Path;
-        Cached ->
-            Cached
-    end;
-get(body_length) ->
-    case erlang:get(?SAVE_BODY_LENGTH) of
-        undefined ->
-            BodyLength = body_length(),
-            put(?SAVE_BODY_LENGTH, {cached, BodyLength}),
-            BodyLength;
-        {cached, Cached} ->
-            Cached
-    end;
-get(range) ->
-    case get_header_value(range) of
-        undefined ->
-            undefined;
-        RawRange ->
-            mochiweb_http:parse_range_request(RawRange)
-    end.
-
-%% @spec dump() -> {mochiweb_request, [{atom(), term()}]}
-%% @doc Dump the internal representation to a "human readable" set of terms
-%%      for debugging/inspection purposes.
-dump() ->
-    {?MODULE, [{method, Method},
-               {version, Version},
-               {raw_path, RawPath},
-               {headers, mochiweb_headers:to_list(Headers)}]}.
-
-%% @spec send(iodata()) -> ok
-%% @doc Send data over the socket.
-send(Data) ->
-    case mochiweb_socket:send(Socket, Data) of
-        ok ->
-            ok;
-        _ ->
-            exit(normal)
-    end.
-
-%% @spec recv(integer()) -> binary()
-%% @doc Receive Length bytes from the client as a binary, with the default
-%%      idle timeout.
-recv(Length) ->
-    recv(Length, ?IDLE_TIMEOUT).
-
-%% @spec recv(integer(), integer()) -> binary()
-%% @doc Receive Length bytes from the client as a binary, with the given
-%%      Timeout in msec.
-recv(Length, Timeout) ->
-    case mochiweb_socket:recv(Socket, Length, Timeout) of
-        {ok, Data} ->
-            put(?SAVE_RECV, true),
-            Data;
-        _ ->
-            exit(normal)
-    end.
-
-%% @spec body_length() -> undefined | chunked | unknown_transfer_encoding | integer()
-%% @doc  Infer body length from transfer-encoding and content-length headers.
-body_length() ->
-    case get_header_value("transfer-encoding") of
-        undefined ->
-            case get_header_value("content-length") of
-                undefined ->
-                    undefined;
-                Length ->
-                    list_to_integer(Length)
-            end;
-        "chunked" ->
-            chunked;
-        Unknown ->
-            {unknown_transfer_encoding, Unknown}
-    end.
-
-
-%% @spec recv_body() -> binary()
-%% @doc Receive the body of the HTTP request (defined by Content-Length).
-%%      Will only receive up to the default max-body length of 1MB.
-recv_body() ->
-    recv_body(?MAX_RECV_BODY).
-
-%% @spec recv_body(integer()) -> binary()
-%% @doc Receive the body of the HTTP request (defined by Content-Length).
-%%      Will receive up to MaxBody bytes.
-recv_body(MaxBody) ->
-    case erlang:get(?SAVE_BODY) of
-        undefined ->
-            % we could use a sane constant for max chunk size
-            Body = stream_body(?MAX_RECV_BODY, fun
-                ({0, _ChunkedFooter}, {_LengthAcc, BinAcc}) ->
-                    iolist_to_binary(lists:reverse(BinAcc));
-                ({Length, Bin}, {LengthAcc, BinAcc}) ->
-                    NewLength = Length + LengthAcc,
-                    if NewLength > MaxBody ->
-                        exit({body_too_large, chunked});
-                    true ->
-                        {NewLength, [Bin | BinAcc]}
-                    end
-                end, {0, []}, MaxBody),
-            put(?SAVE_BODY, Body),
-            Body;
-        Cached -> Cached
-    end.
-
-stream_body(MaxChunkSize, ChunkFun, FunState) ->
-    stream_body(MaxChunkSize, ChunkFun, FunState, undefined).
-
-stream_body(MaxChunkSize, ChunkFun, FunState, MaxBodyLength) ->
-    Expect = case get_header_value("expect") of
-                 undefined ->
-                     undefined;
-                 Value when is_list(Value) ->
-                     string:to_lower(Value)
-             end,
-    case Expect of
-        "100-continue" ->
-            start_raw_response({100, gb_trees:empty()});
-        _Else ->
-            ok
-    end,
-    case body_length() of
-        undefined ->
-            undefined;
-        {unknown_transfer_encoding, Unknown} ->
-            exit({unknown_transfer_encoding, Unknown});
-        chunked ->
-            % In this case the MaxBody is actually used to
-            % determine the maximum allowed size of a single
-            % chunk.
-            stream_chunked_body(MaxChunkSize, ChunkFun, FunState);
-        0 ->
-            <<>>;
-        Length when is_integer(Length) ->
-            case MaxBodyLength of
-            MaxBodyLength when is_integer(MaxBodyLength), MaxBodyLength < Length ->
-                exit({body_too_large, content_length});
-            _ ->
-                stream_unchunked_body(Length, ChunkFun, FunState)
-            end;
-        Length ->
-            exit({length_not_integer, Length})
-    end.
-
-
-%% @spec start_response({integer(), ioheaders()}) -> response()
-%% @doc Start the HTTP response by sending the Code HTTP response and
-%%      ResponseHeaders. The server will set header defaults such as Server
-%%      and Date if not present in ResponseHeaders.
-start_response({Code, ResponseHeaders}) ->
-    HResponse = mochiweb_headers:make(ResponseHeaders),
-    HResponse1 = mochiweb_headers:default_from_list(server_headers(),
-                                                    HResponse),
-    start_raw_response({Code, HResponse1}).
-
-%% @spec start_raw_response({integer(), headers()}) -> response()
-%% @doc Start the HTTP response by sending the Code HTTP response and
-%%      ResponseHeaders.
-start_raw_response({Code, ResponseHeaders}) ->
-    F = fun ({K, V}, Acc) ->
-                [mochiweb_util:make_io(K), <<": ">>, V, <<"\r\n">> | Acc]
-        end,
-    End = lists:foldl(F, [<<"\r\n">>],
-                      mochiweb_headers:to_list(ResponseHeaders)),
-    send([make_version(Version), make_code(Code), <<"\r\n">> | End]),
-    mochiweb:new_response({THIS, Code, ResponseHeaders}).
-
-
-%% @spec start_response_length({integer(), ioheaders(), integer()}) -> response()
-%% @doc Start the HTTP response by sending the Code HTTP response and
-%%      ResponseHeaders including a Content-Length of Length. The server
-%%      will set header defaults such as Server
-%%      and Date if not present in ResponseHeaders.
-start_response_length({Code, ResponseHeaders, Length}) ->
-    HResponse = mochiweb_headers:make(ResponseHeaders),
-    HResponse1 = mochiweb_headers:enter("Content-Length", Length, HResponse),
-    start_response({Code, HResponse1}).
-
-%% @spec respond({integer(), ioheaders(), iodata() | chunked | {file, IoDevice}}) -> response()
-%% @doc Start the HTTP response with start_response, and send Body to the
-%%      client (if the get(method) /= 'HEAD'). The Content-Length header
-%%      will be set by the Body length, and the server will insert header
-%%      defaults.
-respond({Code, ResponseHeaders, {file, IoDevice}}) ->
-    Length = mochiweb_io:iodevice_size(IoDevice),
-    Response = start_response_length({Code, ResponseHeaders, Length}),
-    case Method of
-        'HEAD' ->
-            ok;
-        _ ->
-            mochiweb_io:iodevice_stream(fun send/1, IoDevice)
-    end,
-    Response;
-respond({Code, ResponseHeaders, chunked}) ->
-    HResponse = mochiweb_headers:make(ResponseHeaders),
-    HResponse1 = case Method of
-                     'HEAD' ->
-                         %% This is what Google does, http://www.google.com/
-                         %% is chunked but HEAD gets Content-Length: 0.
-                         %% The RFC is ambiguous so emulating Google is smart.
-                         mochiweb_headers:enter("Content-Length", "0",
-                                                HResponse);
-                     _ when Version >= {1, 1} ->
-                         %% Only use chunked encoding for HTTP/1.1
-                         mochiweb_headers:enter("Transfer-Encoding", "chunked",
-                                                HResponse);
-                     _ ->
-                         %% For pre-1.1 clients we send the data as-is
-                         %% without a Content-Length header and without
-                         %% chunk delimiters. Since the end of the document
-                         %% is now ambiguous we must force a close.
-                         put(?SAVE_FORCE_CLOSE, true),
-                         HResponse
-                 end,
-    start_response({Code, HResponse1});
-respond({Code, ResponseHeaders, Body}) ->
-    Response = start_response_length({Code, ResponseHeaders, iolist_size(Body)}),
-    case Method of
-        'HEAD' ->
-            ok;
-        _ ->
-            send(Body)
-    end,
-    Response.
-
-%% @spec not_found() -> response()
-%% @doc Alias for <code>not_found([])</code>.
-not_found() ->
-    not_found([]).
-
-%% @spec not_found(ExtraHeaders) -> response()
-%% @doc Alias for <code>respond({404, [{"Content-Type", "text/plain"}
-%% | ExtraHeaders], &lt;&lt;"Not found."&gt;&gt;})</code>.
-not_found(ExtraHeaders) ->
-    respond({404, [{"Content-Type", "text/plain"} | ExtraHeaders],
-             <<"Not found.">>}).
-
-%% @spec ok({value(), iodata()} | {value(), ioheaders(), iodata() | {file, IoDevice}}) ->
-%%           response()
-%% @doc respond({200, [{"Content-Type", ContentType} | Headers], Body}).
-ok({ContentType, Body}) ->
-    ok({ContentType, [], Body});
-ok({ContentType, ResponseHeaders, Body}) ->
-    HResponse = mochiweb_headers:make(ResponseHeaders),
-    case THIS:get(range) of
-        X when (X =:= undefined orelse X =:= fail) orelse Body =:= chunked ->
-            %% http://code.google.com/p/mochiweb/issues/detail?id=54
-            %% Range header not supported when chunked, return 200 and provide
-            %% full response.
-            HResponse1 = mochiweb_headers:enter("Content-Type", ContentType,
-                                                HResponse),
-            respond({200, HResponse1, Body});
-        Ranges ->
-            {PartList, Size} = range_parts(Body, Ranges),
-            case PartList of
-                [] -> %% no valid ranges
-                    HResponse1 = mochiweb_headers:enter("Content-Type",
-                                                        ContentType,
-                                                        HResponse),
-                    %% could be 416, for now we'll just return 200
-                    respond({200, HResponse1, Body});
-                PartList ->
-                    {RangeHeaders, RangeBody} =
-                        mochiweb_multipart:parts_to_body(PartList, ContentType, Size),
-                    HResponse1 = mochiweb_headers:enter_from_list(
-                                   [{"Accept-Ranges", "bytes"} |
-                                    RangeHeaders],
-                                   HResponse),
-                    respond({206, HResponse1, RangeBody})
-            end
-    end.
-
-%% @spec should_close() -> bool()
-%% @doc Return true if the connection must be closed. If false, using
-%%      Keep-Alive should be safe.
-should_close() ->
-    ForceClose = erlang:get(?SAVE_FORCE_CLOSE) =/= undefined,
-    DidNotRecv = erlang:get(?SAVE_RECV) =:= undefined,
-    ForceClose orelse Version < {1, 0}
-        %% Connection: close
-        orelse get_header_value("connection") =:= "close"
-        %% HTTP 1.0 requires Connection: Keep-Alive
-        orelse (Version =:= {1, 0}
-                andalso get_header_value("connection") =/= "Keep-Alive")
-        %% unread data left on the socket, can't safely continue
-        orelse (DidNotRecv
-                andalso get_header_value("content-length") =/= undefined
-                andalso list_to_integer(get_header_value("content-length")) > 0)
-        orelse (DidNotRecv
-                andalso get_header_value("transfer-encoding") =:= "chunked").
-
-%% @spec cleanup() -> ok
-%% @doc Clean up any junk in the process dictionary, required before continuing
-%%      a Keep-Alive request.
-cleanup() ->
-    [erase(K) || K <- [?SAVE_QS,
-                       ?SAVE_PATH,
-                       ?SAVE_RECV,
-                       ?SAVE_BODY,
-                       ?SAVE_BODY_LENGTH,
-                       ?SAVE_POST,
-                       ?SAVE_COOKIE,
-                       ?SAVE_FORCE_CLOSE]],
-    ok.
-
-%% @spec parse_qs() -> [{Key::string(), Value::string()}]
-%% @doc Parse the query string of the URL.
-parse_qs() ->
-    case erlang:get(?SAVE_QS) of
-        undefined ->
-            {_, QueryString, _} = mochiweb_util:urlsplit_path(RawPath),
-            Parsed = mochiweb_util:parse_qs(QueryString),
-            put(?SAVE_QS, Parsed),
-            Parsed;
-        Cached ->
-            Cached
-    end.
-
-%% @spec get_cookie_value(Key::string) -> string() | undefined
-%% @doc Get the value of the given cookie.
-get_cookie_value(Key) ->
-    proplists:get_value(Key, parse_cookie()).
-
-%% @spec parse_cookie() -> [{Key::string(), Value::string()}]
-%% @doc Parse the cookie header.
-parse_cookie() ->
-    case erlang:get(?SAVE_COOKIE) of
-        undefined ->
-            Cookies = case get_header_value("cookie") of
-                          undefined ->
-                              [];
-                          Value ->
-                              mochiweb_cookies:parse_cookie(Value)
-                      end,
-            put(?SAVE_COOKIE, Cookies),
-            Cookies;
-        Cached ->
-            Cached
-    end.
-
-%% @spec parse_post() -> [{Key::string(), Value::string()}]
-%% @doc Parse an application/x-www-form-urlencoded form POST. This
-%%      has the side-effect of calling recv_body().
-parse_post() ->
-    case erlang:get(?SAVE_POST) of
-        undefined ->
-            Parsed = case recv_body() of
-                         undefined ->
-                             [];
-                         Binary ->
-                             case get_primary_header_value("content-type") of
-                                 "application/x-www-form-urlencoded" ++ _ ->
-                                     mochiweb_util:parse_qs(Binary);
-                                 _ ->
-                                     []
-                             end
-                     end,
-            put(?SAVE_POST, Parsed),
-            Parsed;
-        Cached ->
-            Cached
-    end.
-
-%% @spec stream_chunked_body(integer(), fun(), term()) -> term()
-%% @doc The function is called for each chunk.
-%%      Used internally by read_chunked_body.
-stream_chunked_body(MaxChunkSize, Fun, FunState) ->
-    case read_chunk_length() of
-        0 ->
-            Fun({0, read_chunk(0)}, FunState);
-        Length when Length > MaxChunkSize ->
-            NewState = read_sub_chunks(Length, MaxChunkSize, Fun, FunState),
-            stream_chunked_body(MaxChunkSize, Fun, NewState);
-        Length ->
-            NewState = Fun({Length, read_chunk(Length)}, FunState),
-            stream_chunked_body(MaxChunkSize, Fun, NewState)
-    end.
-
-stream_unchunked_body(0, Fun, FunState) ->
-    Fun({0, <<>>}, FunState);
-stream_unchunked_body(Length, Fun, FunState) when Length > 0 ->
-    PktSize = case Length > ?RECBUF_SIZE of
-        true ->
-            ?RECBUF_SIZE;
-        false ->
-            Length
-    end,
-    Bin = recv(PktSize),
-    NewState = Fun({PktSize, Bin}, FunState),
-    stream_unchunked_body(Length - PktSize, Fun, NewState).
-
-%% @spec read_chunk_length() -> integer()
-%% @doc Read the length of the next HTTP chunk.
-read_chunk_length() ->
-    mochiweb_socket:setopts(Socket, [{packet, line}]),
-    case mochiweb_socket:recv(Socket, 0, ?IDLE_TIMEOUT) of
-        {ok, Header} ->
-            mochiweb_socket:setopts(Socket, [{packet, raw}]),
-            Splitter = fun (C) ->
-                               C =/= $\r andalso C =/= $\n andalso C =/= $
-                       end,
-            {Hex, _Rest} = lists:splitwith(Splitter, binary_to_list(Header)),
-            mochihex:to_int(Hex);
-        _ ->
-            exit(normal)
-    end.
-
-%% @spec read_chunk(integer()) -> Chunk::binary() | [Footer::binary()]
-%% @doc Read in a HTTP chunk of the given length. If Length is 0, then read the
-%%      HTTP footers (as a list of binaries, since they're nominal).
-read_chunk(0) ->
-    mochiweb_socket:setopts(Socket, [{packet, line}]),
-    F = fun (F1, Acc) ->
-                case mochiweb_socket:recv(Socket, 0, ?IDLE_TIMEOUT) of
-                    {ok, <<"\r\n">>} ->
-                        Acc;
-                    {ok, Footer} ->
-                        F1(F1, [Footer | Acc]);
-                    _ ->
-                        exit(normal)
-                end
-        end,
-    Footers = F(F, []),
-    mochiweb_socket:setopts(Socket, [{packet, raw}]),
-    put(?SAVE_RECV, true),
-    Footers;
-read_chunk(Length) ->
-    case mochiweb_socket:recv(Socket, 2 + Length, ?IDLE_TIMEOUT) of
-        {ok, <<Chunk:Length/binary, "\r\n">>} ->
-            Chunk;
-        _ ->
-            exit(normal)
-    end.
-
-read_sub_chunks(Length, MaxChunkSize, Fun, FunState) when Length > MaxChunkSize ->
-    Bin = recv(MaxChunkSize),
-    NewState = Fun({size(Bin), Bin}, FunState),
-    read_sub_chunks(Length - MaxChunkSize, MaxChunkSize, Fun, NewState);
-
-read_sub_chunks(Length, _MaxChunkSize, Fun, FunState) ->
-    Fun({Length, read_chunk(Length)}, FunState).
-
-%% @spec serve_file(Path, DocRoot) -> Response
-%% @doc Serve a file relative to DocRoot.
-serve_file(Path, DocRoot) ->
-    serve_file(Path, DocRoot, []).
-
-%% @spec serve_file(Path, DocRoot, ExtraHeaders) -> Response
-%% @doc Serve a file relative to DocRoot.
-serve_file(Path, DocRoot, ExtraHeaders) ->
-    case mochiweb_util:safe_relative_path(Path) of
-        undefined ->
-            not_found(ExtraHeaders);
-        RelPath ->
-            FullPath = filename:join([DocRoot, RelPath]),
-            case filelib:is_dir(FullPath) of
-                true ->
-                    maybe_redirect(RelPath, FullPath, ExtraHeaders);
-                false ->
-                    maybe_serve_file(FullPath, ExtraHeaders)
-            end
-    end.
-
-%% Internal API
-
-%% This has the same effect as the DirectoryIndex directive in httpd
-directory_index(FullPath) ->
-    filename:join([FullPath, "index.html"]).
-
-maybe_redirect([], FullPath, ExtraHeaders) ->
-    maybe_serve_file(directory_index(FullPath), ExtraHeaders);
-
-maybe_redirect(RelPath, FullPath, ExtraHeaders) ->
-    case string:right(RelPath, 1) of
-        "/" ->
-            maybe_serve_file(directory_index(FullPath), ExtraHeaders);
-        _   ->
-            Host = mochiweb_headers:get_value("host", Headers),
-            Location = "http://" ++ Host  ++ "/" ++ RelPath ++ "/",
-            LocationBin = list_to_binary(Location),
-            MoreHeaders = [{"Location", Location},
-                           {"Content-Type", "text/html"} | ExtraHeaders],
-            Top = <<"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">"
-            "<html><head>"
-            "<title>301 Moved Permanently</title>"
-            "</head><body>"
-            "<h1>Moved Permanently</h1>"
-            "<p>The document has moved <a href=\"">>,
-            Bottom = <<">here</a>.</p></body></html>\n">>,
-            Body = <<Top/binary, LocationBin/binary, Bottom/binary>>,
-            respond({301, MoreHeaders, Body})
-    end.
-
-maybe_serve_file(File, ExtraHeaders) ->
-    case read_file_info(File) of
-        {ok, FileInfo} ->
-            LastModified = couch_util:rfc1123_date(FileInfo#file_info.mtime),
-            case get_header_value("if-modified-since") of
-                LastModified ->
-                    respond({304, ExtraHeaders, ""});
-                _ ->
-                    case file:open(File, [raw, binary]) of
-                        {ok, IoDevice} ->
-                            ContentType = mochiweb_util:guess_mime(File),
-                            Res = ok({ContentType,
-                                      [{"last-modified", LastModified}
-                                       | ExtraHeaders],
-                                      {file, IoDevice}}),
-                            file:close(IoDevice),
-                            Res;
-                        _ ->
-                            not_found(ExtraHeaders)
-                    end
-            end;
-        {error, _} ->
-            not_found(ExtraHeaders)
-    end.
-
-read_file_info(File) ->
-    try
-        file:read_file_info(File, [{time, universal}])
-    catch error:undef ->
-        case file:read_file_info(File) of
-            {ok, FileInfo} ->
-                {ok, FileInfo#file_info{
-                       atime=to_universal(FileInfo#file_info.atime),
-                       mtime=to_universal(FileInfo#file_info.mtime),
-                       ctime=to_universal(FileInfo#file_info.ctime)
-                      }};
-            Else ->
-                Else
-        end
-    end.
-
-to_universal(LocalTime) ->
-    erlang:localtime_to_universaltime(LocalTime).
-
-server_headers() ->
-    [{"Server", "MochiWeb/1.0 (" ++ ?QUIP ++ ")"},
-     {"Date", couch_util:rfc1123_date()}].
-
-make_code(X) when is_integer(X) ->
-    [integer_to_list(X), [" " | httpd_util:reason_phrase(X)]];
-make_code(Io) when is_list(Io); is_binary(Io) ->
-    Io.
-
-make_version({1, 0}) ->
-    <<"HTTP/1.0 ">>;
-make_version(_) ->
-    <<"HTTP/1.1 ">>.
-
-range_parts({file, IoDevice}, Ranges) ->
-    Size = mochiweb_io:iodevice_size(IoDevice),
-    F = fun (Spec, Acc) ->
-                case mochiweb_http:range_skip_length(Spec, Size) of
-                    invalid_range ->
-                        Acc;
-                    V ->
-                        [V | Acc]
-                end
-        end,
-    LocNums = lists:foldr(F, [], Ranges),
-    {ok, Data} = file:pread(IoDevice, LocNums),
-    Bodies = lists:zipwith(fun ({Skip, Length}, PartialBody) ->
-                                   {Skip, Skip + Length - 1, PartialBody}
-                           end,
-                           LocNums, Data),
-    {Bodies, Size};
-range_parts(Body0, Ranges) ->
-    Body = iolist_to_binary(Body0),
-    Size = size(Body),
-    F = fun(Spec, Acc) ->
-                case mochiweb_http:range_skip_length(Spec, Size) of
-                    invalid_range ->
-                        Acc;
-                    {Skip, Length} ->
-                        <<_:Skip/binary, PartialBody:Length/binary, _/binary>> = Body,
-                        [{Skip, Skip + Length - 1, PartialBody} | Acc]
-                end
-        end,
-    {lists:foldr(F, [], Ranges), Size}.
-
-%% @spec accepted_encodings([encoding()]) -> [encoding()] | bad_accept_encoding_value
-%% @type encoding() = string().
-%%
-%% @doc Returns a list of encodings accepted by a request. Encodings that are
-%%      not supported by the server will not be included in the return list.
-%%      This list is computed from the "Accept-Encoding" header and
-%%      its elements are ordered, descendingly, according to their Q values.
-%%
-%%      Section 14.3 of the RFC 2616 (HTTP 1.1) describes the "Accept-Encoding"
-%%      header and the process of determining which server supported encodings
-%%      can be used for encoding the body for the request's response.
-%%
-%%      Examples
-%%
-%%      1) For a missing "Accept-Encoding" header:
-%%         accepted_encodings(["gzip", "identity"]) -> ["identity"]
-%%
-%%      2) For an "Accept-Encoding" header with value "gzip, deflate":
-%%         accepted_encodings(["gzip", "identity"]) -> ["gzip", "identity"]
-%%
-%%      3) For an "Accept-Encoding" header with value "gzip;q=0.5, deflate":
-%%         accepted_encodings(["gzip", "deflate", "identity"]) ->
-%%            ["deflate", "gzip", "identity"]
-%%
-accepted_encodings(SupportedEncodings) ->
-    AcceptEncodingHeader = case get_header_value("Accept-Encoding") of
-        undefined ->
-            "";
-        Value ->
-            Value
-    end,
-    case mochiweb_util:parse_qvalues(AcceptEncodingHeader) of
-        invalid_qvalue_string ->
-            bad_accept_encoding_value;
-        QList ->
-            mochiweb_util:pick_accepted_encodings(
-                QList, SupportedEncodings, "identity"
-            )
-    end.
-
-%% @spec accepts_content_type(string() | binary()) -> boolean() | bad_accept_header
-%%
-%% @doc Determines whether a request accepts a given media type by analyzing its
-%%      "Accept" header.
-%%
-%%      Examples
-%%
-%%      1) For a missing "Accept" header:
-%%         accepts_content_type("application/json") -> true
-%%
-%%      2) For an "Accept" header with value "text/plain, application/*":
-%%         accepts_content_type("application/json") -> true
-%%
-%%      3) For an "Accept" header with value "text/plain, */*; q=0.0":
-%%         accepts_content_type("application/json") -> false
-%%
-%%      4) For an "Accept" header with value "text/plain; q=0.5, */*; q=0.1":
-%%         accepts_content_type("application/json") -> true
-%%
-%%      5) For an "Accept" header with value "text/*; q=0.0, */*":
-%%         accepts_content_type("text/plain") -> false
-%%
-accepts_content_type(ContentType) when is_binary(ContentType) ->
-    accepts_content_type(binary_to_list(ContentType));
-accepts_content_type(ContentType1) ->
-    ContentType = re:replace(ContentType1, "\\s", "", [global, {return, list}]),
-    AcceptHeader = case get_header_value("Accept") of
-        undefined ->
-            "*/*";
-        Value ->
-            Value
-    end,
-    case mochiweb_util:parse_qvalues(AcceptHeader) of
-        invalid_qvalue_string ->
-            bad_accept_header;
-        QList ->
-            [MainType, _SubType] = string:tokens(ContentType, "/"),
-            SuperType = MainType ++ "/*",
-            lists:any(
-                fun({"*/*", Q}) when Q > 0.0 ->
-                        true;
-                    ({Type, Q}) when Q > 0.0 ->
-                        Type =:= ContentType orelse Type =:= SuperType;
-                    (_) ->
-                        false
-                end,
-                QList
-            ) andalso
-            (not lists:member({ContentType, 0.0}, QList)) andalso
-            (not lists:member({SuperType, 0.0}, QList))
-    end.
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_request_tests.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_request_tests.erl b/src/mochiweb/src/mochiweb_request_tests.erl
deleted file mode 100644
index b61a583..0000000
--- a/src/mochiweb/src/mochiweb_request_tests.erl
+++ /dev/null
@@ -1,63 +0,0 @@
--module(mochiweb_request_tests).
-
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-accepts_content_type_test() ->
-    Req1 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
-        mochiweb_headers:make([{"Accept", "multipart/related"}])),
-    ?assertEqual(true, Req1:accepts_content_type("multipart/related")),
-
-    Req2 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
-        mochiweb_headers:make([{"Accept", "text/html"}])),
-    ?assertEqual(false, Req2:accepts_content_type("multipart/related")),
-
-    Req3 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
-        mochiweb_headers:make([{"Accept", "text/html, multipart/*"}])),
-    ?assertEqual(true, Req3:accepts_content_type("multipart/related")),
-
-    Req4 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
-        mochiweb_headers:make([{"Accept", "text/html, multipart/*; q=0.0"}])),
-    ?assertEqual(false, Req4:accepts_content_type("multipart/related")),
-
-    Req5 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
-        mochiweb_headers:make([{"Accept", "text/html, multipart/*; q=0"}])),
-    ?assertEqual(false, Req5:accepts_content_type("multipart/related")),
-
-    Req6 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
-        mochiweb_headers:make([{"Accept", "text/html, */*; q=0.0"}])),
-    ?assertEqual(false, Req6:accepts_content_type("multipart/related")),
-
-    Req7 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
-        mochiweb_headers:make([{"Accept", "multipart/*; q=0.0, */*"}])),
-    ?assertEqual(false, Req7:accepts_content_type("multipart/related")),
-
-    Req8 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
-        mochiweb_headers:make([{"Accept", "*/*; q=0.0, multipart/*"}])),
-    ?assertEqual(true, Req8:accepts_content_type("multipart/related")),
-
-    Req9 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
-        mochiweb_headers:make([{"Accept", "*/*; q=0.0, multipart/related"}])),
-    ?assertEqual(true, Req9:accepts_content_type("multipart/related")),
-
-    Req10 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
-        mochiweb_headers:make([{"Accept", "text/html; level=1"}])),
-    ?assertEqual(true, Req10:accepts_content_type("text/html;level=1")),
-
-    Req11 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
-        mochiweb_headers:make([{"Accept", "text/html; level=1, text/html"}])),
-    ?assertEqual(true, Req11:accepts_content_type("text/html")),
-
-    Req12 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
-        mochiweb_headers:make([{"Accept", "text/html; level=1; q=0.0, text/html"}])),
-    ?assertEqual(false, Req12:accepts_content_type("text/html;level=1")),
-
-    Req13 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
-        mochiweb_headers:make([{"Accept", "text/html; level=1; q=0.0, text/html"}])),
-    ?assertEqual(false, Req13:accepts_content_type("text/html; level=1")),
-
-    Req14 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
-        mochiweb_headers:make([{"Accept", "text/html;level=1;q=0.1, text/html"}])),
-    ?assertEqual(true, Req14:accepts_content_type("text/html; level=1")).
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_response.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_response.erl b/src/mochiweb/src/mochiweb_response.erl
deleted file mode 100644
index ab8ee61..0000000
--- a/src/mochiweb/src/mochiweb_response.erl
+++ /dev/null
@@ -1,64 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Response abstraction.
-
--module(mochiweb_response, [Request, Code, Headers]).
--author('bob@mochimedia.com').
-
--define(QUIP, "Any of you quaids got a smint?").
-
--export([get_header_value/1, get/1, dump/0]).
--export([send/1, write_chunk/1]).
-
-%% @spec get_header_value(string() | atom() | binary()) -> string() | undefined
-%% @doc Get the value of the given response header.
-get_header_value(K) ->
-    mochiweb_headers:get_value(K, Headers).
-
-%% @spec get(request | code | headers) -> term()
-%% @doc Return the internal representation of the given field.
-get(request) ->
-    Request;
-get(code) ->
-    Code;
-get(headers) ->
-    Headers.
-
-%% @spec dump() -> {mochiweb_request, [{atom(), term()}]}
-%% @doc Dump the internal representation to a "human readable" set of terms
-%%      for debugging/inspection purposes.
-dump() ->
-    [{request, Request:dump()},
-     {code, Code},
-     {headers, mochiweb_headers:to_list(Headers)}].
-
-%% @spec send(iodata()) -> ok
-%% @doc Send data over the socket if the method is not HEAD.
-send(Data) ->
-    case Request:get(method) of
-        'HEAD' ->
-            ok;
-        _ ->
-            Request:send(Data)
-    end.
-
-%% @spec write_chunk(iodata()) -> ok
-%% @doc Write a chunk of a HTTP chunked response. If Data is zero length,
-%%      then the chunked response will be finished.
-write_chunk(Data) ->
-    case Request:get(version) of
-        Version when Version >= {1, 1} ->
-            Length = iolist_size(Data),
-            send([io_lib:format("~.16b\r\n", [Length]), Data, <<"\r\n">>]);
-        _ ->
-            send(Data)
-    end.
-
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_skel.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_skel.erl b/src/mochiweb/src/mochiweb_skel.erl
deleted file mode 100644
index 76eefa6..0000000
--- a/src/mochiweb/src/mochiweb_skel.erl
+++ /dev/null
@@ -1,86 +0,0 @@
--module(mochiweb_skel).
--export([skelcopy/2]).
-
--include_lib("kernel/include/file.hrl").
-
-%% External API
-
-skelcopy(DestDir, Name) ->
-    ok = ensuredir(DestDir),
-    LDst = case length(filename:dirname(DestDir)) of
-               1 -> %% handle case when dirname returns "/"
-                   0;
-               N ->
-                   N + 1
-           end,
-    skelcopy(src(), DestDir, Name, LDst),
-    DestLink = filename:join([DestDir, Name, "deps", "mochiweb-src"]),
-    ok = filelib:ensure_dir(DestLink),
-    ok = file:make_symlink(
-           filename:join(filename:dirname(code:which(?MODULE)), ".."),
-           DestLink).
-
-%% Internal API
-
-src() ->
-    Dir = filename:dirname(code:which(?MODULE)),
-    filename:join(Dir, "../priv/skel").
-
-skel() ->
-    "skel".
-
-skelcopy(Src, DestDir, Name, LDst) ->
-    Dest = re:replace(filename:basename(Src), skel(), Name,
-                      [global, {return, list}]),
-    case file:read_file_info(Src) of
-        {ok, #file_info{type=directory, mode=Mode}} ->
-            Dir = DestDir ++ "/" ++ Dest,
-            EDst = lists:nthtail(LDst, Dir),
-            ok = ensuredir(Dir),
-            ok = file:write_file_info(Dir, #file_info{mode=Mode}),
-            case filename:basename(Src) of
-                "ebin" ->
-                    ok;
-                _ ->
-                    {ok, Files} = file:list_dir(Src),
-                    io:format("~s/~n", [EDst]),
-                    lists:foreach(fun ("." ++ _) -> ok;
-                                      (F) ->
-                                          skelcopy(filename:join(Src, F),
-                                                   Dir,
-                                                   Name,
-                                                   LDst)
-                                  end,
-                                  Files),
-                        ok
-            end;
-        {ok, #file_info{type=regular, mode=Mode}} ->
-            OutFile = filename:join(DestDir, Dest),
-            {ok, B} = file:read_file(Src),
-            S = re:replace(binary_to_list(B), skel(), Name,
-                           [{return, list}, global]),
-            ok = file:write_file(OutFile, list_to_binary(S)),
-            ok = file:write_file_info(OutFile, #file_info{mode=Mode}),
-            io:format("    ~s~n", [filename:basename(Src)]),
-            ok;
-        {ok, _} ->
-            io:format("ignored source file: ~p~n", [Src]),
-            ok
-    end.
-
-ensuredir(Dir) ->
-    case file:make_dir(Dir) of
-        ok ->
-            ok;
-        {error, eexist} ->
-            ok;
-        E ->
-            E
-    end.
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_socket.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_socket.erl b/src/mochiweb/src/mochiweb_socket.erl
deleted file mode 100644
index ad27204..0000000
--- a/src/mochiweb/src/mochiweb_socket.erl
+++ /dev/null
@@ -1,83 +0,0 @@
-%% @copyright 2010 Mochi Media, Inc.
-
-%% @doc MochiWeb socket - wrapper for plain and ssl sockets.
-
--module(mochiweb_socket).
-
--export([listen/4, accept/1, after_accept/1, recv/3, send/2, close/1, port/1, peername/1,
-         setopts/2, type/1]).
-
--define(ACCEPT_TIMEOUT, 2000).
--define(SSL_ACCEPT_TIMEOUT, 30000).
-
-listen(Ssl, Port, Opts, SslOpts) ->
-    case Ssl of
-        true ->
-            case ssl:listen(Port, Opts ++ SslOpts) of
-                {ok, ListenSocket} ->
-                    {ok, {ssl, ListenSocket}};
-                {error, _} = Err ->
-                    Err
-            end;
-        false ->
-            gen_tcp:listen(Port, Opts)
-    end.
-
-accept({ssl, ListenSocket}) ->
-    % There's a bug in ssl:transport_accept/2 at the moment, which is the
-    % reason for the try...catch block. Should be fixed in OTP R14.
-    try ssl:transport_accept(ListenSocket, ?ACCEPT_TIMEOUT) of
-        {ok, Socket} ->
-            {ok, {ssl, Socket}};
-        {error, _} = Err ->
-            Err
-    catch
-        error:{badmatch, {error, Reason}} ->
-            {error, Reason}
-    end;
-accept(ListenSocket) ->
-    gen_tcp:accept(ListenSocket, ?ACCEPT_TIMEOUT).
-
-after_accept({ssl, Socket}) -> ssl:ssl_accept(Socket, ?SSL_ACCEPT_TIMEOUT);
-after_accept(_Socket) -> ok.
-
-recv({ssl, Socket}, Length, Timeout) ->
-    ssl:recv(Socket, Length, Timeout);
-recv(Socket, Length, Timeout) ->
-    gen_tcp:recv(Socket, Length, Timeout).
-
-send({ssl, Socket}, Data) ->
-    ssl:send(Socket, Data);
-send(Socket, Data) ->
-    gen_tcp:send(Socket, Data).
-
-close({ssl, Socket}) ->
-    ssl:close(Socket);
-close(Socket) ->
-    gen_tcp:close(Socket).
-
-port({ssl, Socket}) ->
-    case ssl:sockname(Socket) of
-        {ok, {_, Port}} ->
-            {ok, Port};
-        {error, _} = Err ->
-            Err
-    end;
-port(Socket) ->
-    inet:port(Socket).
-
-peername({ssl, Socket}) ->
-    ssl:peername(Socket);
-peername(Socket) ->
-    inet:peername(Socket).
-
-setopts({ssl, Socket}, Opts) ->
-    ssl:setopts(Socket, Opts);
-setopts(Socket, Opts) ->
-    inet:setopts(Socket, Opts).
-
-type({ssl, _}) ->
-    ssl;
-type(_) ->
-    plain.
-


[04/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/config


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/49642148
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/49642148
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/49642148

Branch: refs/heads/1843-feature-bigcouch
Commit: 49642148ae7671c93b45cad994d0a6fb91450649
Parents: e41cfa4
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:38:29 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:39:07 2014 -0600

----------------------------------------------------------------------
 src/config/src/config.app.src      |  30 ----
 src/config/src/config.erl          | 257 --------------------------------
 src/config/src/config_app.erl      |  52 -------
 src/config/src/config_listener.erl |  59 --------
 src/config/src/config_sup.erl      |  50 -------
 src/config/src/config_util.erl     |  74 ---------
 src/config/src/config_writer.erl   |  79 ----------
 7 files changed, 601 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/49642148/src/config/src/config.app.src
----------------------------------------------------------------------
diff --git a/src/config/src/config.app.src b/src/config/src/config.app.src
deleted file mode 100644
index 6eea351..0000000
--- a/src/config/src/config.app.src
+++ /dev/null
@@ -1,30 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, config, [
-    {description, "INI file configuration system for Apache CouchDB"},
-    {vsn, git},
-    {modules, [
-        config,
-        config_app,
-        config_listener,
-        config_sup,
-        config_util,
-        config_writer
-    ]},
-    {registered, [
-        config,
-        config_event
-    ]},
-    {applications, [kernel, stdlib]},
-    {mod, {config_app, []}}
-]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/49642148/src/config/src/config.erl
----------------------------------------------------------------------
diff --git a/src/config/src/config.erl b/src/config/src/config.erl
deleted file mode 100644
index f47639a..0000000
--- a/src/config/src/config.erl
+++ /dev/null
@@ -1,257 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Reads CouchDB's ini file and gets queried for configuration parameters.
-% This module is initialized with a list of ini files that it consecutively
-% reads Key/Value pairs from and saves them in an ets table. If more an one
-% ini file is specified, the last one is used to write changes that are made
-% with store/2 back to that ini file.
-
--module(config).
--behaviour(gen_server).
-
--export([start_link/1, stop/0, reload/0]).
--export([all/0, get/1, get/2, get/3, set/3, set/4, delete/2, delete/3]).
--export([listen_for_changes/2]).
--export([parse_ini_file/1]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--record(config, {
-    notify_funs=[],
-    ini_files=undefined,
-    write_filename=undefined
-}).
-
-
-start_link(IniFiles) ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, IniFiles, []).
-
-stop() ->
-    gen_server:cast(?MODULE, stop).
-
-
-reload() ->
-    gen_server:call(?MODULE, reload).
-
-all() ->
-    lists:sort(gen_server:call(?MODULE, all, infinity)).
-
-
-get(Section) when is_binary(Section) ->
-    ?MODULE:get(binary_to_list(Section));
-get(Section) ->
-    Matches = ets:match(?MODULE, {{Section, '$1'}, '$2'}),
-    [{Key, Value} || [Key, Value] <- Matches].
-
-get(Section, Key) ->
-    ?MODULE:get(Section, Key, undefined).
-
-get(Section, Key, Default) when is_binary(Section) and is_binary(Key) ->
-    ?MODULE:get(binary_to_list(Section), binary_to_list(Key), Default);
-get(Section, Key, Default) ->
-    case ets:lookup(?MODULE, {Section, Key}) of
-        [] -> Default;
-        [{_, Match}] -> Match
-    end.
-
-set(Section, Key, Value) ->
-    ?MODULE:set(Section, Key, Value, true).
-
-set(Section, Key, Value, Persist) when is_binary(Section) and is_binary(Key)  ->
-    ?MODULE:set(binary_to_list(Section), binary_to_list(Key), Value, Persist);
-set(Section, Key, Value, Persist) ->
-    gen_server:call(?MODULE, {set, Section, Key, Value, Persist}).
-
-
-delete(Section, Key) when is_binary(Section) and is_binary(Key) ->
-    delete(binary_to_list(Section), binary_to_list(Key));
-delete(Section, Key) ->
-    delete(Section, Key, true).
-
-delete(Section, Key, Persist) when is_binary(Section) and is_binary(Key) ->
-    delete(binary_to_list(Section), binary_to_list(Key), Persist);
-delete(Section, Key, Persist) ->
-    gen_server:call(?MODULE, {delete, Section, Key, Persist}).
-
-listen_for_changes(CallbackModule, InitialState) ->
-    config_listener:start(CallbackModule, InitialState).
-
-init(IniFiles) ->
-    ets:new(?MODULE, [named_table, set, protected]),
-    lists:map(fun(IniFile) ->
-        {ok, ParsedIniValues} = parse_ini_file(IniFile),
-        ets:insert(?MODULE, ParsedIniValues)
-    end, IniFiles),
-    WriteFile = case IniFiles of
-        [_|_] -> lists:last(IniFiles);
-        _ -> undefined
-    end,
-    debug_config(),
-    {ok, #config{ini_files=IniFiles, write_filename=WriteFile}}.
-
-
-terminate(_Reason, _State) ->
-    ok.
-
-
-handle_call(all, _From, Config) ->
-    Resp = lists:sort((ets:tab2list(?MODULE))),
-    {reply, Resp, Config};
-handle_call({set, Sec, Key, Val, Persist}, _From, Config) ->
-    true = ets:insert(?MODULE, {{Sec, Key}, Val}),
-    twig:log(notice, "~p: [~s] ~s set to ~s", [?MODULE, Sec, Key, Val]),
-    case {Persist, Config#config.write_filename} of
-        {true, undefined} ->
-            ok;
-        {true, FileName} ->
-            config_writer:save_to_file({{Sec, Key}, Val}, FileName);
-        _ ->
-            ok
-    end,
-    Event = {config_change, Sec, Key, Val, Persist},
-    gen_event:sync_notify(config_event, Event),
-    {reply, ok, Config};
-handle_call({delete, Sec, Key, Persist}, _From, Config) ->
-    true = ets:delete(?MODULE, {Sec,Key}),
-    twig:log(notice, "~p: [~s] ~s deleted", [?MODULE, Sec, Key]),
-    case {Persist, Config#config.write_filename} of
-        {true, undefined} ->
-            ok;
-        {true, FileName} ->
-            config_writer:save_to_file({{Sec, Key}, ""}, FileName);
-        _ ->
-            ok
-    end,
-    Event = {config_change, Sec, Key, deleted, Persist},
-    gen_event:sync_notify(config_event, Event),
-    {reply, ok, Config};
-handle_call(reload, _From, Config) ->
-    DiskKVs = lists:foldl(fun(IniFile, DiskKVs0) ->
-        {ok, ParsedIniValues} = parse_ini_file(IniFile),
-        lists:foldl(fun({K, V}, DiskKVs1) ->
-            dict:store(K, V, DiskKVs1)
-        end, DiskKVs0, ParsedIniValues)
-    end, dict:new(), Config#config.ini_files),
-    % Update ets with anything we just read
-    % from disk
-    dict:fold(fun(K, V, _) ->
-        ets:insert(?MODULE, {K, V})
-    end, nil, DiskKVs),
-    % And remove anything in ets that wasn't
-    % on disk.
-    ets:foldl(fun({K, _}, _) ->
-        case dict:is_key(K, DiskKVs) of
-            true ->
-                ok;
-            false ->
-                ets:delete(?MODULE, K)
-        end
-    end, nil, ?MODULE),
-    {reply, ok, Config}.
-
-
-handle_cast(stop, State) ->
-    {stop, normal, State};
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-handle_info(Info, State) ->
-    twig:log(error, "config:handle_info Info: ~p~n", [Info]),
-    {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-parse_ini_file(IniFile) ->
-    IniFilename = config_util:abs_pathname(IniFile),
-    IniBin =
-    case file:read_file(IniFilename) of
-        {ok, IniBin0} ->
-            IniBin0;
-        {error, enoent} ->
-            Fmt = "Couldn't find server configuration file ~s.",
-            Msg = list_to_binary(io_lib:format(Fmt, [IniFilename])),
-            twig:log(error, "~s~n", [Msg]),
-            throw({startup_error, Msg})
-    end,
-
-    Lines = re:split(IniBin, "\r\n|\n|\r|\032", [{return, list}]),
-    {_, ParsedIniValues} =
-    lists:foldl(fun(Line, {AccSectionName, AccValues}) ->
-            case string:strip(Line) of
-            "[" ++ Rest ->
-                case re:split(Rest, "\\]", [{return, list}]) of
-                [NewSectionName, ""] ->
-                    {NewSectionName, AccValues};
-                _Else -> % end bracket not at end, ignore this line
-                    {AccSectionName, AccValues}
-                end;
-            ";" ++ _Comment ->
-                {AccSectionName, AccValues};
-            Line2 ->
-                case re:split(Line2, "\s?=\s?", [{return, list}]) of
-                [Value] ->
-                    MultiLineValuePart = case re:run(Line, "^ \\S", []) of
-                    {match, _} ->
-                        true;
-                    _ ->
-                        false
-                    end,
-                    case {MultiLineValuePart, AccValues} of
-                    {true, [{{_, ValueName}, PrevValue} | AccValuesRest]} ->
-                        % remove comment
-                        case re:split(Value, " ;|\t;", [{return, list}]) of
-                        [[]] ->
-                            % empty line
-                            {AccSectionName, AccValues};
-                        [LineValue | _Rest] ->
-                            E = {{AccSectionName, ValueName},
-                                PrevValue ++ " " ++ LineValue},
-                            {AccSectionName, [E | AccValuesRest]}
-                        end;
-                    _ ->
-                        {AccSectionName, AccValues}
-                    end;
-                [""|_LineValues] -> % line begins with "=", ignore
-                    {AccSectionName, AccValues};
-                [ValueName|LineValues] -> % yeehaw, got a line!
-                    RemainingLine = config_util:implode(LineValues, "="),
-                    % removes comments
-                    case re:split(RemainingLine, " ;|\t;", [{return, list}]) of
-                    [[]] ->
-                        % empty line means delete this key
-                        ets:delete(?MODULE, {AccSectionName, ValueName}),
-                        {AccSectionName, AccValues};
-                    [LineValue | _Rest] ->
-                        {AccSectionName,
-                            [{{AccSectionName, ValueName}, LineValue} | AccValues]}
-                    end
-                end
-            end
-        end, {"", []}, Lines),
-    {ok, ParsedIniValues}.
-
-
-debug_config() ->
-    case ?MODULE:get("log", "level") of
-        "debug" ->
-            io:format("Configuration Settings:~n", []),
-            lists:foreach(fun({{Mod, Key}, Val}) ->
-                io:format("  [~s] ~s=~p~n", [Mod, Key, Val])
-            end, lists:sort(ets:tab2list(?MODULE)));
-        _ ->
-            ok
-    end.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/49642148/src/config/src/config_app.erl
----------------------------------------------------------------------
diff --git a/src/config/src/config_app.erl b/src/config/src/config_app.erl
deleted file mode 100644
index 5c5515a..0000000
--- a/src/config/src/config_app.erl
+++ /dev/null
@@ -1,52 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(config_app).
-
--behaviour(application).
-
-%% Application callbacks
--export([start/2, stop/1]).
-
-%% ===================================================================
-%% Application callbacks
-%% ===================================================================
-
-start(_StartType, _StartArgs) ->
-    config_sup:start_link(get_ini_files()).
-
-stop(_State) ->
-    ok.
-
-get_ini_files() ->
-    hd([L || L <- [command_line(), env(), default()], L =/= skip]).
-
-env() ->
-    case application:get_env(config, ini_files) of
-        undefined ->
-            skip;
-        {ok, IniFiles} ->
-            IniFiles
-    end.
-
-command_line() ->
-    case init:get_argument(couch_ini) of
-        error ->
-            skip;
-        {ok, [IniFiles]} ->
-            IniFiles
-    end.
-
-default() ->
-    Etc = filename:join(code:root_dir(), "etc"),
-    Default = [filename:join(Etc,"default.ini"), filename:join(Etc,"local.ini")],
-    lists:filter(fun filelib:is_file/1, Default).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/49642148/src/config/src/config_listener.erl
----------------------------------------------------------------------
diff --git a/src/config/src/config_listener.erl b/src/config/src/config_listener.erl
deleted file mode 100644
index 3335b45..0000000
--- a/src/config/src/config_listener.erl
+++ /dev/null
@@ -1,59 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(config_listener).
-
--behaviour(gen_event).
-
-%% Public interface
--export([start/2]).
--export([start/3]).
-
--export([behaviour_info/1]).
-
-%% Required gen_event interface
--export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2, 
-    code_change/3]).
-
-behaviour_info(callbacks) ->
-    [{handle_config_change,5}];
-behaviour_info(_) ->
-    undefined.
-
-start(Module, State) ->
-    start(Module, Module, State).
-
-start(Module, Id, State) ->
-    gen_event:add_sup_handler(config_event, {?MODULE, Id}, {Module, State}).
-
-init({Module, State}) ->
-    {ok, {Module, State}}.
-
-handle_event({config_change, Sec, Key, Value, Persist}, {Module, State}) ->
-    case Module:handle_config_change(Sec, Key, Value, Persist, State) of
-        {ok, NewState} ->
-            {ok, {Module, NewState}};
-        remove_handler ->
-            remove_handler
-    end.
-
-handle_call(_Request, St) ->
-    {ok, ignored, St}.
-
-handle_info(_Info, St) ->
-    {ok, St}.
-
-terminate(_Reason, _St) ->
-    ok.
-
-code_change(_OldVsn, St, _Extra) ->
-    {ok, St}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/49642148/src/config/src/config_sup.erl
----------------------------------------------------------------------
diff --git a/src/config/src/config_sup.erl b/src/config/src/config_sup.erl
deleted file mode 100644
index a595b3c..0000000
--- a/src/config/src/config_sup.erl
+++ /dev/null
@@ -1,50 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License.  You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(config_sup).
--behaviour(supervisor).
-
-%% API
--export([start_link/1]).
-
-%% Supervisor callbacks
--export([init/1]).
-
-%% ===================================================================
-%% API functions
-%% ===================================================================
-
-start_link(IniFiles) ->
-    supervisor:start_link({local, ?MODULE}, ?MODULE, IniFiles).
-
-%% ===================================================================
-%% Supervisor callbacks
-%% ===================================================================
-
-init(IniFiles) ->
-    Children = [
-        {config,
-            {config, start_link, [IniFiles]},
-            permanent,
-            5000,
-            worker,
-            [config]
-        },
-        {config_event,
-            {gen_event, start_link, [{local, config_event}]},
-            permanent,
-            5000,
-            worker,
-            dynamic
-        }
-    ],
-    {ok, {{one_for_one, 5, 10}, Children}}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/49642148/src/config/src/config_util.erl
----------------------------------------------------------------------
diff --git a/src/config/src/config_util.erl b/src/config/src/config_util.erl
deleted file mode 100644
index d0f9ed4..0000000
--- a/src/config/src/config_util.erl
+++ /dev/null
@@ -1,74 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(config_util).
-
--export([abs_pathname/1]).
--export([abs_pathname/2]).
--export([implode/2]).
-
-% given a pathname "../foo/bar/" it gives back the fully qualified
-% absolute pathname.
-abs_pathname(" " ++ Filename) ->
-    % strip leading whitspace
-    abs_pathname(Filename);
-abs_pathname([$/ |_]=Filename) ->
-    Filename;
-abs_pathname(Filename) ->
-    {ok, Cwd} = file:get_cwd(),
-    {Filename2, Args} = separate_cmd_args(Filename, ""),
-    abs_pathname(Filename2, Cwd) ++ Args.
-
-abs_pathname(Filename, Dir) ->
-    Name = filename:absname(Filename, Dir ++ "/"),
-    OutFilename = filename:join(fix_path_list(filename:split(Name), [])),
-    % If the filename is a dir (last char slash, put back end slash
-    case string:right(Filename,1) of
-    "/" ->
-        OutFilename ++ "/";
-    "\\" ->
-        OutFilename ++ "/";
-    _Else->
-        OutFilename
-    end.
-
-implode(List, Sep) ->
-    implode(List, Sep, []).
-
-implode([], _Sep, Acc) ->
-    lists:flatten(lists:reverse(Acc));
-implode([H], Sep, Acc) ->
-    implode([], Sep, [H|Acc]);
-implode([H|T], Sep, Acc) ->
-    implode(T, Sep, [Sep,H|Acc]).
-
-% if this as an executable with arguments, seperate out the arguments
-% ""./foo\ bar.sh -baz=blah" -> {"./foo\ bar.sh", " -baz=blah"}
-separate_cmd_args("", CmdAcc) ->
-    {lists:reverse(CmdAcc), ""};
-separate_cmd_args("\\ " ++ Rest, CmdAcc) -> % handle skipped value
-    separate_cmd_args(Rest, " \\" ++ CmdAcc);
-separate_cmd_args(" " ++ Rest, CmdAcc) ->
-    {lists:reverse(CmdAcc), " " ++ Rest};
-separate_cmd_args([Char|Rest], CmdAcc) ->
-    separate_cmd_args(Rest, [Char | CmdAcc]).
-
-% takes a heirarchical list of dirs and removes the dots ".", double dots
-% ".." and the corresponding parent dirs.
-fix_path_list([], Acc) ->
-    lists:reverse(Acc);
-fix_path_list([".."|Rest], [_PrevAcc|RestAcc]) ->
-    fix_path_list(Rest, RestAcc);
-fix_path_list(["."|Rest], Acc) ->
-    fix_path_list(Rest, Acc);
-fix_path_list([Dir | Rest], Acc) ->
-    fix_path_list(Rest, [Dir | Acc]).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/49642148/src/config/src/config_writer.erl
----------------------------------------------------------------------
diff --git a/src/config/src/config_writer.erl b/src/config/src/config_writer.erl
deleted file mode 100644
index 2812686..0000000
--- a/src/config/src/config_writer.erl
+++ /dev/null
@@ -1,79 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% @doc Saves a Key/Value pair to a ini file. The Key consists of a Section
-%%      and Option combination. If that combination is found in the ini file
-%%      the new value replaces the old value. If only the Section is found the
-%%      Option and value combination is appended to the Section. If the Section
-%%      does not yet exist in the ini file, it is added and the Option/Value
-%%      pair is appended.
-%% @see config
-
--module(config_writer).
-
--export([save_to_file/2]).
-
-%% @spec save_to_file(
-%%           Config::{{Section::string(), Option::string()}, Value::string()},
-%%           File::filename()) -> ok
-%% @doc Saves a Section/Key/Value triple to the ini file File::filename()
-save_to_file({{Section, Key}, Value}, File) ->
-    {ok, OldFileContents} = file:read_file(File),
-    Lines = re:split(OldFileContents, "\r\n|\n|\r|\032", [{return, list}]),
-
-    SectionLine = "[" ++ Section ++ "]",
-    {ok, Pattern} = re:compile(["^(", Key, "\\s*=)|\\[[a-zA-Z0-9\.\_-]*\\]"]),
-
-    NewLines = process_file_lines(Lines, [], SectionLine, Pattern, Key, Value),
-    NewFileContents = reverse_and_add_newline(strip_empty_lines(NewLines), []),
-    ok = file:write_file(File, NewFileContents).
-
-
-process_file_lines([Section|Rest], SeenLines, Section, Pattern, Key, Value) ->
-    process_section_lines(Rest, [Section|SeenLines], Pattern, Key, Value);
-
-process_file_lines([Line|Rest], SeenLines, Section, Pattern, Key, Value) ->
-    process_file_lines(Rest, [Line|SeenLines], Section, Pattern, Key, Value);
-
-process_file_lines([], SeenLines, Section, _Pattern, Key, Value) ->
-    % Section wasn't found.  Append it with the option here.
-    [Key ++ " = " ++ Value, Section, "" | strip_empty_lines(SeenLines)].
-
-
-process_section_lines([Line|Rest], SeenLines, Pattern, Key, Value) ->
-    case re:run(Line, Pattern, [{capture, all_but_first}]) of
-    nomatch -> % Found nothing interesting. Move on.
-        process_section_lines(Rest, [Line|SeenLines], Pattern, Key, Value);
-    {match, []} -> % Found another section. Append the option here.
-        lists:reverse(Rest) ++
-        [Line, "", Key ++ " = " ++ Value | strip_empty_lines(SeenLines)];
-    {match, _} -> % Found the option itself. Replace it.
-        lists:reverse(Rest) ++ [Key ++ " = " ++ Value | SeenLines]
-    end;
-
-process_section_lines([], SeenLines, _Pattern, Key, Value) ->
-    % Found end of file within the section. Append the option here.
-    [Key ++ " = " ++ Value | strip_empty_lines(SeenLines)].
-
-
-reverse_and_add_newline([Line|Rest], Content) ->
-    reverse_and_add_newline(Rest, [Line, "\n", Content]);
-
-reverse_and_add_newline([], Content) ->
-    Content.
-
-
-strip_empty_lines(["" | Rest]) ->
-    strip_empty_lines(Rest);
-
-strip_empty_lines(All) ->
-    All.


[16/49] Remove src/couch_mrview

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/src/couch_mrview_util.erl
----------------------------------------------------------------------
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
deleted file mode 100644
index 27baa4a..0000000
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ /dev/null
@@ -1,710 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_util).
-
--export([get_view/4]).
--export([ddoc_to_mrst/2, init_state/4, reset_index/3]).
--export([make_header/1]).
--export([index_file/2, compaction_file/2, open_file/1]).
--export([delete_files/2, delete_index_file/2, delete_compaction_file/2]).
--export([get_row_count/1, all_docs_reduce_to_count/1, reduce_to_count/1]).
--export([all_docs_key_opts/1, all_docs_key_opts/2, key_opts/1, key_opts/2]).
--export([fold/4, fold_reduce/4]).
--export([temp_view_to_ddoc/1]).
--export([calculate_data_size/2]).
--export([validate_args/1]).
--export([maybe_load_doc/3, maybe_load_doc/4]).
-
--define(MOD, couch_mrview_index).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-
-get_view(Db, DDoc, ViewName, Args0) ->
-    ArgCheck = fun(InitState) ->
-        Args1 = set_view_type(Args0, ViewName, InitState#mrst.views),
-        {ok, validate_args(Args1)}
-    end,
-    {ok, Pid, Args2} = couch_index_server:get_index(?MOD, Db, DDoc, ArgCheck),
-    DbUpdateSeq = couch_util:with_db(Db, fun(WDb) ->
-        couch_db:get_update_seq(WDb)
-    end),
-    MinSeq = case Args2#mrargs.stale of
-        ok -> 0; update_after -> 0; _ -> DbUpdateSeq
-    end,
-    {ok, State} = case couch_index:get_state(Pid, MinSeq) of
-        {ok, _} = Resp -> Resp;
-        Error -> throw(Error)
-    end,
-    Ref = erlang:monitor(process, State#mrst.fd),
-    if Args2#mrargs.stale == update_after ->
-        spawn(fun() -> catch couch_index:get_state(Pid, DbUpdateSeq) end);
-        true -> ok
-    end,
-    #mrst{language=Lang, views=Views} = State,
-    {Type, View, Args3} = extract_view(Lang, Args2, ViewName, Views),
-    check_range(Args3, view_cmp(View)),
-    Sig = view_sig(Db, State, View, Args3),
-    {ok, {Type, View, Ref}, Sig, Args3}.
-
-
-ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
-    MakeDict = fun({Name, {MRFuns}}, DictBySrcAcc) ->
-        case couch_util:get_value(<<"map">>, MRFuns) of
-            MapSrc when is_binary(MapSrc) ->
-                RedSrc = couch_util:get_value(<<"reduce">>, MRFuns, null),
-                {ViewOpts} = couch_util:get_value(<<"options">>, MRFuns, {[]}),
-                View = case dict:find({MapSrc, ViewOpts}, DictBySrcAcc) of
-                    {ok, View0} -> View0;
-                    error -> #mrview{def=MapSrc, options=ViewOpts}
-                end,
-                {MapNames, RedSrcs} = case RedSrc of
-                    null ->
-                        MNames = [Name | View#mrview.map_names],
-                        {MNames, View#mrview.reduce_funs};
-                    _ ->
-                        RedFuns = [{Name, RedSrc} | View#mrview.reduce_funs],
-                        {View#mrview.map_names, RedFuns}
-                end,
-                View2 = View#mrview{map_names=MapNames, reduce_funs=RedSrcs},
-                dict:store({MapSrc, ViewOpts}, View2, DictBySrcAcc);
-            undefined ->
-                DictBySrcAcc
-        end;
-        ({Name, Else}, DictBySrcAcc) ->
-            ?LOG_ERROR("design_doc_to_view_group ~s views ~p", [Name, Else]),
-            DictBySrcAcc
-    end,
-    {RawViews} = couch_util:get_value(<<"views">>, Fields, {[]}),
-    BySrc = lists:foldl(MakeDict, dict:new(), RawViews),
-
-    NumViews = fun({_, View}, N) -> {View#mrview{id_num=N}, N+1} end,
-    {Views, _} = lists:mapfoldl(NumViews, 0, lists:sort(dict:to_list(BySrc))),
-
-    Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>),
-    {DesignOpts} = couch_util:get_value(<<"options">>, Fields, {[]}),
-    {RawViews} = couch_util:get_value(<<"views">>, Fields, {[]}),
-    Lib = couch_util:get_value(<<"lib">>, RawViews, {[]}),
-
-    IdxState = #mrst{
-        db_name=DbName,
-        idx_name=Id,
-        lib=Lib,
-        views=Views,
-        language=Language,
-        design_opts=DesignOpts
-    },
-    SigInfo = {Views, Language, DesignOpts, couch_index_util:sort_lib(Lib)},
-    {ok, IdxState#mrst{sig=couch_util:md5(term_to_binary(SigInfo))}}.
-
-
-set_view_type(_Args, _ViewName, []) ->
-    throw({not_found, missing_named_view});
-set_view_type(Args, ViewName, [View | Rest]) ->
-    RedNames = [N || {N, _} <- View#mrview.reduce_funs],
-    case lists:member(ViewName, RedNames) of
-        true ->
-            case Args#mrargs.reduce of
-                false -> Args#mrargs{view_type=map};
-                _ -> Args#mrargs{view_type=red}
-            end;
-        false ->
-            case lists:member(ViewName, View#mrview.map_names) of
-                true -> Args#mrargs{view_type=map};
-                false -> set_view_type(Args, ViewName, Rest)
-            end
-    end.
-
-
-extract_view(_Lang, _Args, _ViewName, []) ->
-    throw({not_found, missing_named_view});
-extract_view(Lang, #mrargs{view_type=map}=Args, Name, [View | Rest]) ->
-    Names = View#mrview.map_names ++ [N || {N, _} <- View#mrview.reduce_funs],
-    case lists:member(Name, Names) of
-        true -> {map, View, Args};
-        _ -> extract_view(Lang, Args, Name, Rest)
-    end;
-extract_view(Lang, #mrargs{view_type=red}=Args, Name, [View | Rest]) ->
-    RedNames = [N || {N, _} <- View#mrview.reduce_funs],
-    case lists:member(Name, RedNames) of
-        true -> {red, {index_of(Name, RedNames), Lang, View}, Args};
-        false -> extract_view(Lang, Args, Name, Rest)
-    end.
-
-
-view_sig(Db, State, View, #mrargs{include_docs=true}=Args) ->
-    BaseSig = view_sig(Db, State, View, Args#mrargs{include_docs=false}),
-    UpdateSeq = couch_db:get_update_seq(Db),
-    PurgeSeq = couch_db:get_purge_seq(Db),
-    Bin = term_to_binary({BaseSig, UpdateSeq, PurgeSeq}),
-    couch_index_util:hexsig(couch_util:md5(Bin));
-view_sig(Db, State, {_Nth, _Lang, View}, Args) ->
-    view_sig(Db, State, View, Args);
-view_sig(_Db, State, View, Args0) ->
-    Sig = State#mrst.sig,
-    UpdateSeq = View#mrview.update_seq,
-    PurgeSeq = View#mrview.purge_seq,
-    Args = Args0#mrargs{
-        preflight_fun=undefined,
-        extra=[]
-    },
-    Bin = term_to_binary({Sig, UpdateSeq, PurgeSeq, Args}),
-    couch_index_util:hexsig(couch_util:md5(Bin)).
-
-
-init_state(Db, Fd, #mrst{views=Views}=State, nil) ->
-    Header = #mrheader{
-        seq=0,
-        purge_seq=couch_db:get_purge_seq(Db),
-        id_btree_state=nil,
-        view_states=[{nil, 0, 0} || _ <- Views]
-    },
-    init_state(Db, Fd, State, Header);
-init_state(Db, Fd, State, Header) ->
-    #mrst{language=Lang, views=Views} = State,
-    #mrheader{
-        seq=Seq,
-        purge_seq=PurgeSeq,
-        id_btree_state=IdBtreeState,
-        view_states=ViewStates
-    } = Header,
-
-    StateUpdate = fun
-        ({_, _, _}=St) -> St;
-        (St) -> {St, 0, 0}
-    end,
-    ViewStates2 = lists:map(StateUpdate, ViewStates),
-
-    IdReduce = fun
-        (reduce, KVs) -> length(KVs);
-        (rereduce, Reds) -> lists:sum(Reds)
-    end,
-
-    IdBtOpts = [{reduce, IdReduce}, {compression, couch_db:compression(Db)}],
-    {ok, IdBtree} = couch_btree:open(IdBtreeState, Fd, IdBtOpts),
-
-    OpenViewFun = fun(St, View) -> open_view(Db, Fd, Lang, St, View) end,
-    Views2 = lists:zipwith(OpenViewFun, ViewStates2, Views),
-
-    State#mrst{
-        fd=Fd,
-        fd_monitor=erlang:monitor(process, Fd),
-        update_seq=Seq,
-        purge_seq=PurgeSeq,
-        id_btree=IdBtree,
-        views=Views2
-    }.
-
-
-open_view(Db, Fd, Lang, {BTState, USeq, PSeq}, View) ->
-    FunSrcs = [FunSrc || {_Name, FunSrc} <- View#mrview.reduce_funs],
-    ReduceFun =
-        fun(reduce, KVs) ->
-            KVs2 = detuple_kvs(expand_dups(KVs, []), []),
-            {ok, Result} = couch_query_servers:reduce(Lang, FunSrcs, KVs2),
-            {length(KVs2), Result};
-        (rereduce, Reds) ->
-            Count = lists:sum([Count0 || {Count0, _} <- Reds]),
-            UsrReds = [UsrRedsList || {_, UsrRedsList} <- Reds],
-            {ok, Result} = couch_query_servers:rereduce(Lang, FunSrcs, UsrReds),
-            {Count, Result}
-        end,
-
-    Less = case couch_util:get_value(<<"collation">>, View#mrview.options) of
-        <<"raw">> -> fun(A, B) -> A < B end;
-        _ -> fun couch_ejson_compare:less_json_ids/2
-    end,
-
-    ViewBtOpts = [
-        {less, Less},
-        {reduce, ReduceFun},
-        {compression, couch_db:compression(Db)}
-    ],
-    {ok, Btree} = couch_btree:open(BTState, Fd, ViewBtOpts),
-    View#mrview{btree=Btree, update_seq=USeq, purge_seq=PSeq}.
-
-
-temp_view_to_ddoc({Props}) ->
-    Language = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
-    Options = couch_util:get_value(<<"options">>, Props, {[]}),
-    View0 = [{<<"map">>, couch_util:get_value(<<"map">>, Props)}],
-    View1 = View0 ++ case couch_util:get_value(<<"reduce">>, Props) of
-        RedSrc when is_binary(RedSrc) -> [{<<"reduce">>, RedSrc}];
-        _ -> []
-    end,
-    DDoc = {[
-        {<<"_id">>, couch_uuids:random()},
-        {<<"language">>, Language},
-        {<<"options">>, Options},
-        {<<"views">>, {[
-            {<<"temp">>, {View1}}
-        ]}}
-    ]},
-    couch_doc:from_json_obj(DDoc).
-
-
-get_row_count(#mrview{btree=Bt}) ->
-    {ok, {Count, _Reds}} = couch_btree:full_reduce(Bt),
-    {ok, Count}.
-
-
-all_docs_reduce_to_count(Reductions) ->
-    Reduce = fun couch_db_updater:btree_by_id_reduce/2,
-    {Count, _, _} = couch_btree:final_reduce(Reduce, Reductions),
-    Count.
-
-reduce_to_count(nil) ->
-    0;
-reduce_to_count(Reductions) ->
-    Reduce = fun
-        (reduce, KVs) ->
-            Counts = [
-                case V of {dups, Vals} -> length(Vals); _ -> 1 end
-                || {_,V} <- KVs
-            ],
-            {lists:sum(Counts), []};
-        (rereduce, Reds) ->
-            {lists:sum([Count0 || {Count0, _} <- Reds]), []}
-    end,
-    {Count, _} = couch_btree:final_reduce(Reduce, Reductions),
-    Count.
-
-
-fold(#mrview{btree=Bt}, Fun, Acc, Opts) ->
-    WrapperFun = fun(KV, Reds, Acc2) ->
-        fold_fun(Fun, expand_dups([KV], []), Reds, Acc2)
-    end,
-    {ok, _LastRed, _Acc} = couch_btree:fold(Bt, WrapperFun, Acc, Opts).
-
-
-fold_fun(_Fun, [], _, Acc) ->
-    {ok, Acc};
-fold_fun(Fun, [KV|Rest], {KVReds, Reds}, Acc) ->
-    case Fun(KV, {KVReds, Reds}, Acc) of
-        {ok, Acc2} ->
-            fold_fun(Fun, Rest, {[KV|KVReds], Reds}, Acc2);
-        {stop, Acc2} ->
-            {stop, Acc2}
-    end.
-
-
-fold_reduce({NthRed, Lang, View}, Fun,  Acc, Options) ->
-    #mrview{
-        btree=Bt,
-        reduce_funs=RedFuns
-    } = View,
-    LPad = lists:duplicate(NthRed - 1, []),
-    RPad = lists:duplicate(length(RedFuns) - NthRed, []),
-    {_Name, FunSrc} = lists:nth(NthRed,RedFuns),
-
-    ReduceFun = fun
-        (reduce, KVs0) ->
-            KVs1 = detuple_kvs(expand_dups(KVs0, []), []),
-            {ok, Red} = couch_query_servers:reduce(Lang, [FunSrc], KVs1),
-            {0, LPad ++ Red ++ RPad};
-        (rereduce, Reds) ->
-            ExtractRed = fun({_, UReds0}) -> [lists:nth(NthRed, UReds0)] end,
-            UReds = lists:map(ExtractRed, Reds),
-            {ok, Red} = couch_query_servers:rereduce(Lang, [FunSrc], UReds),
-            {0, LPad ++ Red ++ RPad}
-    end,
-
-    WrapperFun = fun({GroupedKey, _}, PartialReds, Acc0) ->
-        {_, Reds} = couch_btree:final_reduce(ReduceFun, PartialReds),
-        Fun(GroupedKey, lists:nth(NthRed, Reds), Acc0)
-    end,
-
-    couch_btree:fold_reduce(Bt, WrapperFun, Acc, Options).
-
-
-validate_args(Args) ->
-    Reduce = Args#mrargs.reduce,
-    case Reduce == undefined orelse is_boolean(Reduce) of
-        true -> ok;
-        _ -> mrverror(<<"Invalid `reduce` value.">>)
-    end,
-
-    case {Args#mrargs.view_type, Reduce} of
-        {map, true} -> mrverror(<<"Reduce is invalid for map-only views.">>);
-        _ -> ok
-    end,
-
-    case {Args#mrargs.view_type, Args#mrargs.group_level, Args#mrargs.keys} of
-        {red, exact, _} -> ok;
-        {red, _, KeyList} when is_list(KeyList) ->
-            Msg = <<"Multi-key fetchs for reduce views must use `group=true`">>,
-            mrverror(Msg);
-        _ -> ok
-    end,
-
-    case Args#mrargs.keys of
-        Keys when is_list(Keys) -> ok;
-        undefined -> ok;
-        _ -> mrverror(<<"`keys` must be an array of strings.">>)
-    end,
-
-    case {Args#mrargs.keys, Args#mrargs.start_key} of
-        {undefined, _} -> ok;
-        {[], _} -> ok;
-        {[_|_], undefined} -> ok;
-        _ -> mrverror(<<"`start_key` is incompatible with `keys`">>)
-    end,
-
-    case Args#mrargs.start_key_docid of
-        undefined -> ok;
-        SKDocId0 when is_binary(SKDocId0) -> ok;
-        _ -> mrverror(<<"`start_key_docid` must be a string.">>)
-    end,
-
-    case {Args#mrargs.keys, Args#mrargs.end_key} of
-        {undefined, _} -> ok;
-        {[], _} -> ok;
-        {[_|_], undefined} -> ok;
-        _ -> mrverror(<<"`end_key` is incompatible with `keys`">>)
-    end,
-
-    case Args#mrargs.end_key_docid of
-        undefined -> ok;
-        EKDocId0 when is_binary(EKDocId0) -> ok;
-        _ -> mrverror(<<"`end_key_docid` must be a string.">>)
-    end,
-
-    case Args#mrargs.direction of
-        fwd -> ok;
-        rev -> ok;
-        _ -> mrverror(<<"Invalid direction.">>)
-    end,
-
-    case {Args#mrargs.limit >= 0, Args#mrargs.limit == undefined} of
-        {true, _} -> ok;
-        {_, true} -> ok;
-        _ -> mrverror(<<"`limit` must be a positive integer.">>)
-    end,
-
-    case Args#mrargs.skip < 0 of
-        true -> mrverror(<<"`skip` must be >= 0">>);
-        _ -> ok
-    end,
-
-    case {Args#mrargs.view_type, Args#mrargs.group_level} of
-        {red, exact} -> ok;
-        {_, 0} -> ok;
-        {red, Int} when is_integer(Int), Int >= 0 -> ok;
-        {red, _} -> mrverror(<<"`group_level` must be >= 0">>);
-        {map, _} -> mrverror(<<"Invalid use of grouping on a map view.">>)
-    end,
-
-    case Args#mrargs.stale of
-        ok -> ok;
-        update_after -> ok;
-        false -> ok;
-        _ -> mrverror(<<"Invalid value for `stale`.">>)
-    end,
-
-    case is_boolean(Args#mrargs.inclusive_end) of
-        true -> ok;
-        _ -> mrverror(<<"Invalid value for `inclusive_end`.">>)
-    end,
-
-    case {Args#mrargs.view_type, Args#mrargs.include_docs} of
-        {red, true} -> mrverror(<<"`include_docs` is invalid for reduce">>);
-        {_, ID} when is_boolean(ID) -> ok;
-        _ -> mrverror(<<"Invalid value for `include_docs`">>)
-    end,
-
-    case {Args#mrargs.view_type, Args#mrargs.conflicts} of
-        {_, undefined} -> ok;
-        {map, V} when is_boolean(V) -> ok;
-        {red, undefined} -> ok;
-        {map, _} -> mrverror(<<"Invalid value for `conflicts`.">>);
-        {red, _} -> mrverror(<<"`conflicts` is invalid for reduce views.">>)
-    end,
-
-    SKDocId = case {Args#mrargs.direction, Args#mrargs.start_key_docid} of
-        {fwd, undefined} -> <<>>;
-        {rev, undefined} -> <<255>>;
-        {_, SKDocId1} -> SKDocId1
-    end,
-
-    EKDocId = case {Args#mrargs.direction, Args#mrargs.end_key_docid} of
-        {fwd, undefined} -> <<255>>;
-        {rev, undefined} -> <<>>;
-        {_, EKDocId1} -> EKDocId1
-    end,
-
-    Args#mrargs{
-        start_key_docid=SKDocId,
-        end_key_docid=EKDocId
-    }.
-
-
-check_range(#mrargs{start_key=undefined}, _Cmp) ->
-    ok;
-check_range(#mrargs{end_key=undefined}, _Cmp) ->
-    ok;
-check_range(#mrargs{start_key=K, end_key=K}, _Cmp) ->
-    ok;
-check_range(Args, Cmp) ->
-    #mrargs{
-        direction=Dir,
-        start_key=SK,
-        start_key_docid=SKD,
-        end_key=EK,
-        end_key_docid=EKD
-    } = Args,
-    case {Dir, Cmp({SK, SKD}, {EK, EKD})} of
-        {fwd, false} ->
-            throw({query_parse_error,
-                <<"No rows can match your key range, reverse your ",
-                    "start_key and end_key or set descending=true">>});
-        {rev, true} ->
-            throw({query_parse_error,
-                <<"No rows can match your key range, reverse your ",
-                    "start_key and end_key or set descending=false">>});
-        _ -> ok
-    end.
-
-
-view_cmp({_Nth, _Lang, View}) ->
-    view_cmp(View);
-view_cmp(View) ->
-    fun(A, B) -> couch_btree:less(View#mrview.btree, A, B) end.
-
-
-make_header(State) ->
-    #mrst{
-        update_seq=Seq,
-        purge_seq=PurgeSeq,
-        id_btree=IdBtree,
-        views=Views
-    } = State,
-    ViewStates = [
-        {
-            couch_btree:get_state(V#mrview.btree),
-            V#mrview.update_seq,
-            V#mrview.purge_seq
-        }
-        ||
-        V <- Views
-    ],
-    #mrheader{
-        seq=Seq,
-        purge_seq=PurgeSeq,
-        id_btree_state=couch_btree:get_state(IdBtree),
-        view_states=ViewStates
-    }.
-
-
-index_file(DbName, Sig) ->
-    FileName = couch_index_util:hexsig(Sig) ++ ".view",
-    couch_index_util:index_file(mrview, DbName, FileName).
-
-
-compaction_file(DbName, Sig) ->
-    FileName = couch_index_util:hexsig(Sig) ++ ".compact.view",
-    couch_index_util:index_file(mrview, DbName, FileName).
-
-
-open_file(FName) ->
-    case couch_file:open(FName) of
-        {ok, Fd} -> {ok, Fd};
-        {error, enoent} -> couch_file:open(FName, [create]);
-        Error -> Error
-    end.
-
-
-delete_files(DbName, Sig) ->
-    delete_index_file(DbName, Sig),
-    delete_compaction_file(DbName, Sig).
-
-
-delete_index_file(DbName, Sig) ->
-    delete_file(index_file(DbName, Sig)).
-
-
-delete_compaction_file(DbName, Sig) ->
-    delete_file(compaction_file(DbName, Sig)).
-    
-
-delete_file(FName) ->
-    case filelib:is_file(FName) of
-        true ->
-            RootDir = couch_index_util:root_dir(),
-            couch_file:delete(RootDir, FName);
-        _ ->
-            ok
-    end.
-
-
-reset_index(Db, Fd, #mrst{sig=Sig}=State) ->
-    ok = couch_file:truncate(Fd, 0),
-    ok = couch_file:write_header(Fd, {Sig, nil}),
-    init_state(Db, Fd, reset_state(State), nil).
-
-
-reset_state(State) ->
-    State#mrst{
-        fd=nil,
-        qserver=nil,
-        update_seq=0,
-        id_btree=nil,
-        views=[View#mrview{btree=nil} || View <- State#mrst.views]
-    }.
-
-
-all_docs_key_opts(Args) ->
-    all_docs_key_opts(Args, []).
-
-
-all_docs_key_opts(#mrargs{keys=undefined}=Args, Extra) ->
-    all_docs_key_opts(Args#mrargs{keys=[]}, Extra);
-all_docs_key_opts(#mrargs{keys=[], direction=Dir}=Args, Extra) ->
-    [[{dir, Dir}] ++ ad_skey_opts(Args) ++ ad_ekey_opts(Args) ++ Extra];
-all_docs_key_opts(#mrargs{keys=Keys, direction=Dir}=Args, Extra) ->
-    lists:map(fun(K) ->
-        [{dir, Dir}]
-        ++ ad_skey_opts(Args#mrargs{start_key=K})
-        ++ ad_ekey_opts(Args#mrargs{end_key=K})
-        ++ Extra
-    end, Keys).
-
-
-ad_skey_opts(#mrargs{start_key=SKey}) when is_binary(SKey) ->
-    [{start_key, SKey}];
-ad_skey_opts(#mrargs{start_key_docid=SKeyDocId}) ->
-    [{start_key, SKeyDocId}].
-
-
-ad_ekey_opts(#mrargs{end_key=EKey}=Args) when is_binary(EKey) ->
-    Type = if Args#mrargs.inclusive_end -> end_key; true -> end_key_gt end,
-    [{Type, EKey}];
-ad_ekey_opts(#mrargs{end_key_docid=EKeyDocId}=Args) ->
-    Type = if Args#mrargs.inclusive_end -> end_key; true -> end_key_gt end,
-    [{Type, EKeyDocId}].
-
-
-key_opts(Args) ->
-    key_opts(Args, []).
-
-key_opts(#mrargs{keys=undefined, direction=Dir}=Args, Extra) ->
-    [[{dir, Dir}] ++ skey_opts(Args) ++ ekey_opts(Args) ++ Extra];
-key_opts(#mrargs{keys=Keys, direction=Dir}=Args, Extra) ->
-    lists:map(fun(K) ->
-        [{dir, Dir}]
-        ++ skey_opts(Args#mrargs{start_key=K})
-        ++ ekey_opts(Args#mrargs{end_key=K})
-        ++ Extra
-    end, Keys).
-
-
-skey_opts(#mrargs{start_key=undefined}) ->
-    [];
-skey_opts(#mrargs{start_key=SKey, start_key_docid=SKeyDocId}) ->
-    [{start_key, {SKey, SKeyDocId}}].
-
-
-ekey_opts(#mrargs{end_key=undefined}) ->
-    [];
-ekey_opts(#mrargs{end_key=EKey, end_key_docid=EKeyDocId}=Args) ->
-    case Args#mrargs.inclusive_end of
-        true -> [{end_key, {EKey, EKeyDocId}}];
-        false -> [{end_key_gt, {EKey, reverse_key_default(EKeyDocId)}}]
-    end.
-
-
-reverse_key_default(<<>>) -> <<255>>;
-reverse_key_default(<<255>>) -> <<>>;
-reverse_key_default(Key) -> Key.
-
-
-calculate_data_size(IdBt, Views) ->
-    SumFun = fun(#mrview{btree=Bt}, Acc) ->
-        sum_btree_sizes(Acc, couch_btree:size(Bt))
-    end,
-    Size = lists:foldl(SumFun, couch_btree:size(IdBt), Views),
-    {ok, Size}.
-
-
-sum_btree_sizes(nil, _) ->
-    null;
-sum_btree_sizes(_, nil) ->
-    null;
-sum_btree_sizes(Size1, Size2) ->
-    Size1 + Size2.
-
-
-detuple_kvs([], Acc) ->
-    lists:reverse(Acc);
-detuple_kvs([KV | Rest], Acc) ->
-    {{Key,Id},Value} = KV,
-    NKV = [[Key, Id], Value],
-    detuple_kvs(Rest, [NKV | Acc]).
-
-
-expand_dups([], Acc) ->
-    lists:reverse(Acc);
-expand_dups([{Key, {dups, Vals}} | Rest], Acc) ->
-    Expanded = [{Key, Val} || Val <- Vals],
-    expand_dups(Rest, Expanded ++ Acc);
-expand_dups([KV | Rest], Acc) ->
-    expand_dups(Rest, [KV | Acc]).
-
-
-maybe_load_doc(_Db, _DI, #mrargs{include_docs=false}) ->
-    [];
-maybe_load_doc(Db, #doc_info{}=DI, #mrargs{conflicts=true}) ->
-    doc_row(couch_index_util:load_doc(Db, DI, [conflicts]));
-maybe_load_doc(Db, #doc_info{}=DI, _Args) ->
-    doc_row(couch_index_util:load_doc(Db, DI, [])).
-
-
-maybe_load_doc(_Db, _Id, _Val, #mrargs{include_docs=false}) ->
-    [];
-maybe_load_doc(Db, Id, Val, #mrargs{conflicts=true}) ->
-    doc_row(couch_index_util:load_doc(Db, docid_rev(Id, Val), [conflicts]));
-maybe_load_doc(Db, Id, Val, _Args) ->
-    doc_row(couch_index_util:load_doc(Db, docid_rev(Id, Val), [])).
-
-
-doc_row(null) ->
-    [{doc, null}];
-doc_row(Doc) ->
-    [{doc, couch_doc:to_json_obj(Doc, [])}].
-
-
-docid_rev(Id, {Props}) ->
-    DocId = couch_util:get_value(<<"_id">>, Props, Id),
-    Rev = case couch_util:get_value(<<"_rev">>, Props, nil) of
-        nil -> nil;
-        Rev0 -> couch_doc:parse_rev(Rev0)
-    end,
-    {DocId, Rev};
-docid_rev(Id, _) ->
-    {Id, nil}.
-
-
-index_of(Key, List) ->
-    index_of(Key, List, 1).
-
-
-index_of(_, [], _) ->
-    throw({error, missing_named_view});
-index_of(Key, [Key | _], Idx) ->
-    Idx;
-index_of(Key, [_ | Rest], Idx) ->
-    index_of(Key, Rest, Idx+1).
-
-
-mrverror(Mesg) ->
-    throw({query_parse_error, Mesg}).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/test/01-load.t
----------------------------------------------------------------------
diff --git a/src/couch_mrview/test/01-load.t b/src/couch_mrview/test/01-load.t
deleted file mode 100644
index a57c1a7..0000000
--- a/src/couch_mrview/test/01-load.t
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Test that we can load each module.
-
-main(_) ->
-    test_util:init_code_path(),
-    Modules = [
-        couch_mrview,
-        couch_mrview_compactor,
-        couch_mrview_http,
-        couch_mrview_index,
-        couch_mrview_updater,
-        couch_mrview_util
-    ],
-
-    etap:plan(length(Modules)),
-    lists:foreach(
-        fun(Module) ->
-            etap:loaded_ok(Module, lists:concat(["Loaded: ", Module]))
-        end, Modules),
-    etap:end_tests().

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/test/02-map-views.t
----------------------------------------------------------------------
diff --git a/src/couch_mrview/test/02-map-views.t b/src/couch_mrview/test/02-map-views.t
deleted file mode 100644
index bcf4ce1..0000000
--- a/src/couch_mrview/test/02-map-views.t
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
-    test_util:run(6, fun() -> test() end).
-
-test() ->
-    test_util:start_couch(),
-
-    {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, map),
-
-    test_basic(Db),
-    test_range(Db),
-    test_rev_range(Db),
-    test_limit_and_skip(Db),
-    test_include_docs(Db),
-    test_empty_view(Db),
-
-    ok.
-
-
-test_basic(Db) ->
-    Result = run_query(Db, []),
-    Expect = {ok, [
-        {meta, [{total, 10}, {offset, 0}]},
-        {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
-        {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
-        {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
-        {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
-        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
-        {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
-        {row, [{id, <<"7">>}, {key, 7}, {value, 7}]},
-        {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
-        {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
-        {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
-    ]},
-    etap:is(Result, Expect, "Simple view query worked.").
-
-
-test_range(Db) ->
-    Result = run_query(Db, [{start_key, 3}, {end_key, 5}]),
-    Expect = {ok, [
-        {meta, [{total, 10}, {offset, 2}]},
-        {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
-        {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
-        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
-    ]},
-    etap:is(Result, Expect, "Query with range works.").
-
-
-test_rev_range(Db) ->
-    Result = run_query(Db, [
-        {direction, rev},
-        {start_key, 5}, {end_key, 3},
-        {inclusive_end, true}
-    ]),
-    Expect = {ok, [
-        {meta, [{total, 10}, {offset, 5}]},
-        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
-        {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
-        {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}
-    ]},
-    etap:is(Result, Expect, "Query with reversed range works.").
-
-
-test_limit_and_skip(Db) ->
-    Result = run_query(Db, [
-        {start_key, 2},
-        {limit, 3},
-        {skip, 3}
-    ]),
-    Expect = {ok, [
-        {meta, [{total, 10}, {offset, 4}]},
-        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
-        {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
-        {row, [{id, <<"7">>}, {key, 7}, {value, 7}]}
-    ]},
-    etap:is(Result, Expect, "Query with limit and skip works.").
-
-
-test_include_docs(Db) ->
-    Result = run_query(Db, [
-        {start_key, 8},
-        {end_key, 8},
-        {include_docs, true}
-    ]),
-    Doc = {[
-        {<<"_id">>,<<"8">>},
-        {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
-        {<<"val">>,8}
-    ]},
-    Expect = {ok, [
-        {meta, [{total, 10}, {offset, 7}]},
-        {row, [{id, <<"8">>}, {key, 8}, {value, 8}, {doc, Doc}]}
-    ]},
-    etap:is(Result, Expect, "Query with include docs works.").
-
-
-test_empty_view(Db) ->
-    Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>),
-    Expect = {ok, [
-        {meta, [{total, 0}, {offset, 0}]}
-    ]},
-    etap:is(Result, Expect, "Empty views are correct.").
-
-
-run_query(Db, Opts) ->
-    couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/test/03-red-views.t
----------------------------------------------------------------------
diff --git a/src/couch_mrview/test/03-red-views.t b/src/couch_mrview/test/03-red-views.t
deleted file mode 100644
index 0d11d51..0000000
--- a/src/couch_mrview/test/03-red-views.t
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
-    test_util:run(4, fun() -> test() end).
-
-test() ->
-    test_util:start_couch(),
-
-    {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, red),
-
-    test_basic(Db),
-    test_key_range(Db),
-    test_group_level(Db),
-    test_group_exact(Db),
-
-    ok.
-
-
-test_basic(Db) ->
-    Result = run_query(Db, []),
-    Expect = {ok, [
-        {meta, []},
-        {row, [{key, null}, {value, 55}]}
-    ]},
-    etap:is(Result, Expect, "Simple reduce view works.").
-
-
-test_key_range(Db) ->
-    Result = run_query(Db, [{start_key, [0, 2]}, {end_key, [0, 4]}]),
-    Expect = {ok, [
-        {meta, []},
-        {row, [{key, null}, {value, 6}]}
-    ]},
-    etap:is(Result, Expect, "Reduce with key range works.").
-
-
-test_group_level(Db) ->
-    Result = run_query(Db, [{group_level, 1}]),
-    Expect = {ok, [
-        {meta, []},
-        {row, [{key, [0]}, {value, 30}]},
-        {row, [{key, [1]}, {value, 25}]}
-    ]},
-    etap:is(Result, Expect, "Group level works.").
-
-test_group_exact(Db) ->
-    Result = run_query(Db, [{group_level, exact}]),
-    Expect = {ok, [
-        {meta, []},
-        {row, [{key, [0, 2]}, {value, 2}]},
-        {row, [{key, [0, 4]}, {value, 4}]},
-        {row, [{key, [0, 6]}, {value, 6}]},
-        {row, [{key, [0, 8]}, {value, 8}]},
-        {row, [{key, [0, 10]}, {value, 10}]},
-        {row, [{key, [1, 1]}, {value, 1}]},
-        {row, [{key, [1, 3]}, {value, 3}]},
-        {row, [{key, [1, 5]}, {value, 5}]},
-        {row, [{key, [1, 7]}, {value, 7}]},
-        {row, [{key, [1, 9]}, {value, 9}]}
-    ]},
-    etap:is(Result, Expect, "Group exact works.").
-
-
-run_query(Db, Opts) ->
-    couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/test/04-index-info.t
----------------------------------------------------------------------
diff --git a/src/couch_mrview/test/04-index-info.t b/src/couch_mrview/test/04-index-info.t
deleted file mode 100644
index c86b168..0000000
--- a/src/couch_mrview/test/04-index-info.t
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
-    test_util:run(9, fun() -> test() end).
-
-sig() -> <<"276df562b152b3c4e5d34024f62672ed">>.
-
-test() ->
-    test_util:start_couch(),
-
-    {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, map),
-    couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
-
-    {ok, Info} = couch_mrview:get_info(Db, <<"_design/bar">>),
-
-    etap:is(getval(signature, Info), sig(), "Signature is ok."),
-    etap:is(getval(language, Info), <<"javascript">>, "Language is ok."),
-    etap:is_greater(getval(disk_size, Info), 0, "Disk size is ok."),
-    etap:is_greater(getval(data_size, Info), 0, "Data size is ok."),
-    etap:is(getval(update_seq, Info), 11, "Update seq is ok."),
-    etap:is(getval(purge_seq, Info), 0, "Purge seq is ok."),
-    etap:is(getval(updater_running, Info), false, "No updater running."),
-    etap:is(getval(compact_running, Info), false, "No compaction running."),
-    etap:is(getval(waiting_clients, Info), 0, "No waiting clients."),
-
-    ok.
-
-getval(Key, PL) ->
-    {value, {Key, Val}} = lists:keysearch(Key, 1, PL),
-    Val.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/test/05-collation.t
----------------------------------------------------------------------
diff --git a/src/couch_mrview/test/05-collation.t b/src/couch_mrview/test/05-collation.t
deleted file mode 100644
index 09878af..0000000
--- a/src/couch_mrview/test/05-collation.t
+++ /dev/null
@@ -1,164 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
-    test_util:run(9, fun() -> test() end).
-
-
-test() ->
-    test_util:start_couch(),
-
-    {ok, Db0} = couch_mrview_test_util:new_db(<<"foo">>, map),
-    {ok, Db1} = couch_mrview_test_util:save_docs(Db0, docs()),
-
-    test_collated_fwd(Db1),
-    test_collated_rev(Db1),
-    test_range_collation(Db1),
-    test_inclusive_end(Db1),
-    test_uninclusive_end(Db1),
-    test_with_endkey_docid(Db1),
-
-    ok.
-
-test_collated_fwd(Db) ->
-    {ok, Results} = run_query(Db, []),
-    Expect = [{meta, [{total, 26}, {offset, 0}]}] ++ rows(),
-    etap:is(Results, Expect, "Values were collated correctly.").
-
-
-test_collated_rev(Db) ->
-    {ok, Results} = run_query(Db, [{direction, rev}]),
-    Expect = [{meta, [{total, 26}, {offset, 0}]}] ++ lists:reverse(rows()),
-    etap:is(Results, Expect, "Values were collated correctly descending.").
-
-
-test_range_collation(Db) ->
-    {_, Error} = lists:foldl(fun(V, {Count, Error}) ->
-        {ok, Results} = run_query(Db, [{start_key, V}, {end_key, V}]),
-        Id = list_to_binary(integer_to_list(Count)),
-        Expect = [
-            {meta, [{total, 26}, {offset, Count}]},
-            {row, [{id, Id}, {key, V}, {value, 0}]}
-        ],
-        case Results == Expect of
-            true -> {Count+1, Error};
-            _ -> {Count+1, true}
-        end
-    end, {0, false}, vals()),
-    etap:is(Error, false, "Found each individual key correctly.").
-
-
-test_inclusive_end(Db) ->
-    Opts = [{end_key, <<"b">>}, {inclusive_end, true}],
-    {ok, Rows0} = run_query(Db, Opts),
-    LastRow0 = lists:last(Rows0),
-    Expect0 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
-    etap:is(LastRow0, Expect0, "Inclusive end is correct."),
-
-    {ok, Rows1} = run_query(Db, Opts ++ [{direction, rev}]),
-    LastRow1 = lists:last(Rows1),
-    Expect1 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
-    etap:is(LastRow1, Expect1,
-            "Inclusive end is correct with descending=true").
-
-test_uninclusive_end(Db) ->
-    Opts = [{end_key, <<"b">>}, {inclusive_end, false}],
-    {ok, Rows0} = run_query(Db, Opts),
-    LastRow0 = lists:last(Rows0),
-    Expect0 = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]},
-    etap:is(LastRow0, Expect0, "Uninclusive end is correct."),
-
-    {ok, Rows1} = run_query(Db, Opts ++ [{direction, rev}]),
-    LastRow1 = lists:last(Rows1),
-    Expect1 = {row, [{id,<<"11">>}, {key,<<"B">>}, {value,0}]},
-    etap:is(LastRow1, Expect1,
-            "Uninclusive end is correct with descending=true").
-
-
-test_with_endkey_docid(Db) ->
-    {ok, Rows0} = run_query(Db, [
-        {end_key, <<"b">>}, {end_key_docid, <<"10">>},
-        {inclusive_end, false}
-    ]),
-    Result0 = lists:last(Rows0),
-    Expect0 = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]},
-    etap:is(Result0, Expect0, "Uninclsuive end with endkey_docid set is ok."),
-
-    {ok, Rows1} = run_query(Db, [
-        {end_key, <<"b">>}, {end_key_docid, <<"11">>},
-        {inclusive_end, false}
-    ]),
-    Result1 = lists:last(Rows1),
-    Expect1 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
-    etap:is(Result1, Expect1, "Uninclsuive end with endkey_docid set is ok.").
-
-
-run_query(Db, Opts) ->
-    couch_mrview:query_view(Db, <<"_design/bar">>, <<"zing">>, Opts).
-
-
-docs() ->
-    {Docs, _} = lists:foldl(fun(V, {Docs0, Count}) ->
-        Doc = couch_doc:from_json_obj({[
-            {<<"_id">>, list_to_binary(integer_to_list(Count))},
-            {<<"foo">>, V}
-        ]}),
-        {[Doc | Docs0], Count+1}
-    end, {[], 0}, vals()),
-    Docs.
-
-
-rows() ->
-    {Rows, _} = lists:foldl(fun(V, {Rows0, Count}) ->
-        Id = list_to_binary(integer_to_list(Count)),
-        Row = {row, [{id, Id}, {key, V}, {value, 0}]},
-        {[Row | Rows0], Count+1}
-    end, {[], 0}, vals()),
-    lists:reverse(Rows).
-
-
-vals() ->
-    [
-        null,
-        false,
-        true,
-
-        1,
-        2,
-        3.0,
-        4,
-
-        <<"a">>,
-        <<"A">>,
-        <<"aa">>,
-        <<"b">>,
-        <<"B">>,
-        <<"ba">>,
-        <<"bb">>,
-
-        [<<"a">>],
-        [<<"b">>],
-        [<<"b">>, <<"c">>],
-        [<<"b">>, <<"c">>, <<"a">>],
-        [<<"b">>, <<"d">>],
-        [<<"b">>, <<"d">>, <<"e">>],
-
-        {[{<<"a">>, 1}]},
-        {[{<<"a">>, 2}]},
-        {[{<<"b">>, 1}]},
-        {[{<<"b">>, 2}]},
-        {[{<<"b">>, 2}, {<<"a">>, 1}]},
-        {[{<<"b">>, 2}, {<<"c">>, 2}]}
-    ].

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/test/06-all-docs.t
----------------------------------------------------------------------
diff --git a/src/couch_mrview/test/06-all-docs.t b/src/couch_mrview/test/06-all-docs.t
deleted file mode 100644
index a3aafa0..0000000
--- a/src/couch_mrview/test/06-all-docs.t
+++ /dev/null
@@ -1,127 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
-    test_util:run(6, fun() -> test() end).
-
-
-test() ->
-    test_util:start_couch(),
-
-    {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, map),
-
-    test_basic(Db),
-    test_range(Db),
-    test_rev_range(Db),
-    test_limit_and_skip(Db),
-    test_include_docs(Db),
-    test_empty_view(Db),
-
-    ok.
-
-
-test_basic(Db) ->
-    Result = run_query(Db, []),
-    Expect = {ok, [
-        {meta, [{total, 11}, {offset, 0}]},
-        mk_row(<<"1">>, <<"1-08d53a5760b95fce6df2e2c5b008be39">>),
-        mk_row(<<"10">>, <<"1-a05b6ea2bc0243949f103d5b4f15f71e">>),
-        mk_row(<<"2">>, <<"1-b57c77a9e6f7574ca6469f0d6dcd78bb">>),
-        mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>),
-        mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
-        mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
-        mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
-        mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>),
-        mk_row(<<"8">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>),
-        mk_row(<<"9">>, <<"1-558c8487d9aee25399a91b5d31d90fe2">>),
-        mk_row(<<"_design/bar">>, <<"1-a44e1dd1994a7717bf89c894ebd1f081">>)
-    ]},
-    etap:is(Result, Expect, "Simple view query worked.").
-
-
-test_range(Db) ->
-    Result = run_query(Db, [{start_key, <<"3">>}, {end_key, <<"5">>}]),
-    Expect = {ok, [
-        {meta, [{total, 11}, {offset, 3}]},
-        mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>),
-        mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
-        mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>)
-    ]},
-    etap:is(Result, Expect, "Query with range works.").
-
-
-test_rev_range(Db) ->
-    Result = run_query(Db, [
-        {direction, rev},
-        {start_key, <<"5">>}, {end_key, <<"3">>},
-        {inclusive_end, true}
-    ]),
-    Expect = {ok, [
-        {meta, [{total, 11}, {offset, 5}]},
-        mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
-        mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
-        mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>)
-    ]},
-    etap:is(Result, Expect, "Query with reversed range works.").
-
-
-test_limit_and_skip(Db) ->
-    Result = run_query(Db, [
-        {start_key, <<"2">>},
-        {limit, 3},
-        {skip, 3}
-    ]),
-    Expect = {ok, [
-        {meta, [{total, 11}, {offset, 5}]},
-        mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
-        mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
-        mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>)
-    ]},
-    etap:is(Result, Expect, "Query with limit and skip works.").
-
-
-test_include_docs(Db) ->
-    Result = run_query(Db, [
-        {start_key, <<"8">>},
-        {end_key, <<"8">>},
-        {include_docs, true}
-    ]),
-    Doc = {[
-        {<<"_id">>,<<"8">>},
-        {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
-        {<<"val">>, 8}
-    ]},
-    Val = {[{rev, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>}]},
-    Expect = {ok, [
-        {meta, [{total, 11}, {offset, 8}]},
-        {row, [{id, <<"8">>}, {key, <<"8">>}, {value, Val}, {doc, Doc}]}
-    ]},
-    etap:is(Result, Expect, "Query with include docs works.").
-
-
-test_empty_view(Db) ->
-    Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>),
-    Expect = {ok, [
-        {meta, [{total, 0}, {offset, 0}]}
-    ]},
-    etap:is(Result, Expect, "Empty views are correct.").
-
-
-mk_row(Id, Rev) ->
-    {row, [{id, Id}, {key, Id}, {value, {[{rev, Rev}]}}]}.
-
-
-run_query(Db, Opts) ->
-    couch_mrview:query_all_docs(Db, Opts).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/test/07-compact-swap.t
----------------------------------------------------------------------
diff --git a/src/couch_mrview/test/07-compact-swap.t b/src/couch_mrview/test/07-compact-swap.t
deleted file mode 100644
index c1fd043..0000000
--- a/src/couch_mrview/test/07-compact-swap.t
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-main(_) ->
-    test_util:run(1, fun() -> test() end).
-
-
-test() ->
-    test_util:start_couch(),
-    {ok, Db} = couch_mrview_test_util:init_db(<<"foo">>, map, 1000),
-    couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
-    test_swap(Db),
-    ok.
-
-
-test_swap(Db) ->
-    {ok, QPid} = start_query(Db),    
-    {ok, MonRef} = couch_mrview:compact(Db, <<"_design/bar">>, [monitor]),
-    receive
-        {'DOWN', MonRef, process, _, _} -> ok
-    after 1000 ->
-        throw(compaction_failed)
-    end,
-    QPid ! {self(), continue},
-    receive
-        {QPid, Count} ->
-            etap:is(Count, 1000, "View finished successfully.")
-    after 1000 ->
-        throw("query failed")
-    end.
-
-
-start_query(Db) ->
-    Self = self(),
-    Pid = spawn(fun() ->
-        CB = fun
-            (_, wait) -> receive {Self, continue} -> {ok, 0} end;
-            ({row, _}, Count) -> {ok, Count+1};
-            (_, Count) -> {ok, Count}
-        end,
-        {ok, Result} = 
-        couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, [], CB, wait),
-        Self ! {self(), Result}
-    end),
-    {ok, Pid}.


[48/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Improved configure scripts

This provides a better help output and argument parsing for the
'./configure' script. It implements a method for pulling config values
into rebar.config.script's so that we can control the build a bit more.

As an example there's now a '-c' option to build couchjs with cURL
bindings.


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/a204d0a8
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/a204d0a8
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/a204d0a8

Branch: refs/heads/1843-feature-bigcouch
Commit: a204d0a891281b0c2b9b9382312dc78b7f9f7a66
Parents: 0052a9b
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 21:44:44 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Wed Feb 5 08:34:52 2014 -0600

----------------------------------------------------------------------
 .gitignore          |   1 +
 Makefile            |   8 +++-
 configure           | 117 +++++++++++++++++++++++++++++++----------------
 rebar.config.script |   7 ++-
 4 files changed, 91 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/a204d0a8/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 7b37cea..902035d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
+config.erl
 install.mk
 rel/*.config
 rel/dev*

http://git-wip-us.apache.org/repos/asf/couchdb/blob/a204d0a8/Makefile
----------------------------------------------------------------------
diff --git a/Makefile b/Makefile
index 8651369..812849d 100644
--- a/Makefile
+++ b/Makefile
@@ -12,7 +12,13 @@
 
 all:  compile
 
-compile:
+config.erl:
+	@echo "Apache CouchDB has not been configured."
+	@echo "Try \"./configure -h\" for help."
+	@echo
+	@false
+
+compile: config.erl
 	@echo "==> couchjs (compile)"
 	@rebar compile
 

http://git-wip-us.apache.org/repos/asf/couchdb/blob/a204d0a8/configure
----------------------------------------------------------------------
diff --git a/configure b/configure
index c95a988..7036d7c 100755
--- a/configure
+++ b/configure
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/sh -e
 # Licensed under the Apache License, Version 2.0 (the "License"); you may not
 # use this file except in compliance with the License. You may obtain a copy of
 # the License at
@@ -13,42 +13,73 @@
 
 PREFIX="/opt/couchdb"
 COUCHDB_USER=`whoami`
-ABSPATH="$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"
-if test ! -n "$DIRPATH"; then DIRPATH=`dirname "$ABSPATH"`; fi
-
-while [ $# -gt 0 ]
-do
-  case $1
-  in
-    -p)
-      PREFIX=$2
-      shift 2
-    ;;
-    -t)
-      TEMPLATE=$2
-      shift 2
-    ;;
-    -d)
-      DATA=$2
-      shift 2
-    ;;
-    -v)
-      VIEW=$2
-      shift 2
-    ;;
-    -u)
-      COUCHDB_USER=$2
-      shift 2
-    ;;
-    *)
-      echo "usage: $0 [-p {prefix} -t {template} -d {data_dir} -v {view_dir} -u {user}]"
-      exit
-    ;;
-  esac
-done
+WITH_CURL="false"
+
+rootdir="$(cd "${0%/*}" 2>/dev/null; echo "$PWD")"
+basename=`basename $0`
+
+
+display_help () {
+    cat << EOF
+Usage: $basename [OPTION]
+
+The $basename command is responsible for generating the build
+system for Apache CouchDB.
+
+Options:
+
+  -h            display a short help message and exit
+  -u USER       set the username to run as (defaults to $COUCHDB_USER)
+  -p DIRECTORY  set the prefix for installation (defaults to $PREFIX)
+  -d DIRECTORY  specify the data directory (defaults to $PREFIX/var/lib)
+  -v DIRECTORY  specify the view directory (defaults to $PREFIX/var/lib)
+  -c            request that couchjs is linked to cURL (default false)
+
+EOF
+}
+
+
+display_error () {
+    if test -n "$1"; then
+        echo $1 >&2
+    fi
+    echo >&2
+    echo "Try \"$basename -h\" for more information." >&2
+    false
+}
+
+
+parse_opts () {
+    set +e
+    options=`getopt hu:p:d:v:c $@`
+    if test ! $? -eq 0; then
+        display_error
+    fi
+    set -e
+    eval set -- $options
+    while [ $# -gt 0 ]; do
+        case "$1" in
+            -h) shift; display_help; exit;;
+            -u) shift; COUCHDB_USER=$1; shift;;
+            -p) shift; PREFIX=$1; shift;;
+            -d) shift; DATA=$1; shift;;
+            -v) shift; VIEW=$1; shift;;
+            -c) shift; WITH_CURL="true";;
+            --) shift; break;;
+            *) display_error "Unknown option: $1" >&2;;
+        esac
+    done
+    if test ! -n "$DATA"; then
+        DATA="$PREFIX/var/lib";
+    fi
+    if test ! -n "$VIEW"; then
+        VIEW="$PREFIX/var/lib";
+    fi
+}
+
+
+parse_opts $@
 
-if test ! -n "$DATA"; then DATA="$PREFIX/var/lib"; fi
-if test ! -n "$VIEW"; then VIEW="$PREFIX/var/lib"; fi
 
 echo "==> configuring couchdb in rel/couchdb.config"
 cat > rel/couchdb.config << EOF
@@ -69,16 +100,22 @@ view_dir = $VIEW
 user = $COUCHDB_USER
 EOF
 
+cat > $rootdir/config.erl << EOF
+{with_curl, $WITH_CURL}.
+EOF
+
 # finally, a few config files for local development nodes
 for i in 1 2 3; do
 cat > rel/dev$i.config << EOF
-{prefix, "$DIRPATH/rel/dev$i"}.
-{data_dir, "$DIRPATH/rel/tmpdata/dev$i"}.
-{view_dir, "$DIRPATH/rel/tmpdata/dev$i"}.
+{prefix, "$rootdir/rel/dev$i"}.
+{data_dir, "$rootdir/rel/tmpdata/dev$i"}.
+{view_dir, "$rootdir/rel/tmpdata/dev$i"}.
 {node_name, "-name dev$i@127.0.0.1"}.
 {cluster_port, `expr 10000 \* $i + 5984`}.
 {backend_port, `expr 10000 \* $i + 5986`}.
 EOF
 done
 
-rebar get-deps && rebar update-deps && cat rel/couchdb.config
+
+echo "==> updating dependencies"
+rebar get-deps update-deps

http://git-wip-us.apache.org/repos/asf/couchdb/blob/a204d0a8/rebar.config.script
----------------------------------------------------------------------
diff --git a/rebar.config.script b/rebar.config.script
index b541d02..e572495 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -10,7 +10,11 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
-{require_otp_vsn, "R14B01|R14B03|R14B04|R15B02|R15B03|R16"}.
+% Set the path to the configuration environment generated
+% by `./configure`.
+ConfigureEnv = filename:join(filename:dirname(SCRIPT), "config.erl"),
+os:putenv("COUCHDB_CONFIG", ConfigureEnv).
+
 
 DepDescs = [
     {chttpd, "couchdb-chttpd", {branch, import}},
@@ -38,6 +42,7 @@ MakeDep = fun({AppName, RepoName, Version}) ->
 end,
 
 AddConfig = [
+    {require_otp_vsn, "R14B01|R14B03|R14B04|R15B02|R15B03|R16"},
     {deps_dir, "src"},
     {deps, lists:map(MakeDep, DepDescs)},
     {sub_dirs, ["rel"]},


[15/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/couch_index


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/de4ff66d
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/de4ff66d
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/de4ff66d

Branch: refs/heads/1843-feature-bigcouch
Commit: de4ff66d4eb56ae6eeba4508a220e55a6fdf92c0
Parents: ed98610
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:39:38 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:39:41 2014 -0600

----------------------------------------------------------------------
 src/couch_index/src/couch_index.app.src       |  22 --
 src/couch_index/src/couch_index.erl           | 364 ---------------------
 src/couch_index/src/couch_index_api.erl       |  54 ---
 src/couch_index/src/couch_index_compactor.erl | 114 -------
 src/couch_index/src/couch_index_server.erl    | 266 ---------------
 src/couch_index/src/couch_index_updater.erl   | 211 ------------
 src/couch_index/src/couch_index_util.erl      |  81 -----
 7 files changed, 1112 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/de4ff66d/src/couch_index/src/couch_index.app.src
----------------------------------------------------------------------
diff --git a/src/couch_index/src/couch_index.app.src b/src/couch_index/src/couch_index.app.src
deleted file mode 100644
index 594589d..0000000
--- a/src/couch_index/src/couch_index.app.src
+++ /dev/null
@@ -1,22 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_index, [
-    {description, "CouchDB Secondary Index Manager"},
-    {vsn, git},
-    {modules, [
-        couch_index,
-        couch_index_server
-    ]},
-    {registered, [couch_index_server]},
-    {applications, [kernel, stdlib]}
-]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/de4ff66d/src/couch_index/src/couch_index.erl
----------------------------------------------------------------------
diff --git a/src/couch_index/src/couch_index.erl b/src/couch_index/src/couch_index.erl
deleted file mode 100644
index 3253a32..0000000
--- a/src/couch_index/src/couch_index.erl
+++ /dev/null
@@ -1,364 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index).
--behaviour(gen_server).
--behaviour(config_listener).
-
-%% API
--export([start_link/1, stop/1, get_state/2, get_info/1]).
--export([trigger_update/2]).
--export([compact/1, compact/2]).
-
-%% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-% config_listener api
--export([handle_config_change/5]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
-
--record(st, {
-    mod,
-    idx_state,
-    updater,
-    compactor,
-    waiters=[],
-    commit_delay,
-    committed=true,
-    shutdown=false
-}).
-
-
-start_link({Module, IdxState}) ->
-    proc_lib:start_link(?MODULE, init, [{Module, IdxState}]).
-
-
-stop(Pid) ->
-    gen_server:cast(Pid, stop).
-
-
-get_state(Pid, RequestSeq) ->
-    gen_server:call(Pid, {get_state, RequestSeq}, infinity).
-
-
-get_info(Pid) ->
-    gen_server:call(Pid, get_info).
-
-
-trigger_update(Pid, UpdateSeq) ->
-    gen_server:cast(Pid, {trigger_update, UpdateSeq}).
-
-
-compact(Pid) ->
-    compact(Pid, []).
-
-
-compact(Pid, Options) ->
-    {ok, CPid} = gen_server:call(Pid, compact),
-    case lists:member(monitor, Options) of
-        true -> {ok, erlang:monitor(process, CPid)};
-        false -> ok
-    end.
-
-
-init({Mod, IdxState}) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-    DbName = Mod:get(db_name, IdxState),
-    Resp = couch_util:with_db(DbName, fun(Db) ->
-        case Mod:open(Db, IdxState) of
-            {ok, IdxSt} ->
-                couch_db:monitor(Db),
-                {ok, IdxSt};
-            Error ->
-                Error
-        end
-    end),
-    case Resp of
-        {ok, NewIdxState} ->
-            {ok, UPid} = couch_index_updater:start_link(self(), Mod),
-            {ok, CPid} = couch_index_compactor:start_link(self(), Mod),
-            Delay = config:get("query_server_config", "commit_freq", "5"),
-            MsDelay = 1000 * list_to_integer(Delay),
-            State = #st{
-                mod=Mod,
-                idx_state=NewIdxState,
-                updater=UPid,
-                compactor=CPid,
-                commit_delay=MsDelay
-            },
-            Args = [
-                Mod:get(db_name, IdxState),
-                Mod:get(idx_name, IdxState),
-                couch_index_util:hexsig(Mod:get(signature, IdxState))
-            ],
-            ?LOG_INFO("Opening index for db: ~s idx: ~s sig: ~p", Args),
-            proc_lib:init_ack({ok, self()}),
-            gen_server:enter_loop(?MODULE, [], State);
-        Other ->
-            proc_lib:init_ack(Other)
-    end.
-
-
-terminate(Reason, State) ->
-    #st{mod=Mod, idx_state=IdxState}=State,
-    Mod:close(IdxState),
-    send_all(State#st.waiters, Reason),
-    couch_util:shutdown_sync(State#st.updater),
-    couch_util:shutdown_sync(State#st.compactor),
-    Args = [
-        Mod:get(db_name, IdxState),
-        Mod:get(idx_name, IdxState),
-        couch_index_util:hexsig(Mod:get(signature, IdxState)),
-        Reason
-    ],
-    ?LOG_INFO("Closing index for db: ~s idx: ~s sig: ~p~nreason: ~p", Args),
-    ok.
-
-
-handle_call({get_state, ReqSeq}, From, State) ->
-    #st{
-        mod=Mod,
-        idx_state=IdxState,
-        waiters=Waiters
-    } = State,
-    IdxSeq = Mod:get(update_seq, IdxState),
-    case ReqSeq =< IdxSeq of
-        true ->
-            {reply, {ok, IdxState}, State};
-        _ -> % View update required
-            couch_index_updater:run(State#st.updater, IdxState),
-            Waiters2 = [{From, ReqSeq} | Waiters],
-            {noreply, State#st{waiters=Waiters2}, infinity}
-    end;
-handle_call(get_info, _From, State) ->
-    #st{mod=Mod} = State,
-    {ok, Info0} = Mod:get(info, State#st.idx_state),
-    IsUpdating = couch_index_updater:is_running(State#st.updater),
-    IsCompacting = couch_index_compactor:is_running(State#st.compactor),
-    Info = Info0 ++ [
-        {updater_running, IsUpdating},
-        {compact_running, IsCompacting},
-        {waiting_commit, State#st.committed == false},
-        {waiting_clients, length(State#st.waiters)}
-    ],
-    {reply, {ok, Info}, State};
-handle_call(reset, _From, State) ->
-    #st{
-        mod=Mod,
-        idx_state=IdxState
-    } = State,
-    {ok, NewIdxState} = Mod:reset(IdxState),
-    {reply, {ok, NewIdxState}, State#st{idx_state=NewIdxState}};
-handle_call(compact, _From, State) ->
-    Resp = couch_index_compactor:run(State#st.compactor, State#st.idx_state),
-    {reply, Resp, State};
-handle_call(get_compactor_pid, _From, State) ->
-    {reply, {ok, State#st.compactor}, State};
-handle_call({compacted, NewIdxState}, _From, State) ->
-    #st{
-        mod=Mod,
-        idx_state=OldIdxState,
-        updater=Updater,
-        commit_delay=Delay
-    } = State,
-    assert_signature_match(Mod, OldIdxState, NewIdxState),
-    NewSeq = Mod:get(update_seq, NewIdxState),
-    OldSeq = Mod:get(update_seq, OldIdxState),
-    % For indices that require swapping files, we have to make sure we're
-    % up to date with the current index. Otherwise indexes could roll back
-    % (perhaps considerably) to previous points in history.
-    case NewSeq >= OldSeq of
-        true ->
-            {ok, NewIdxState1} = Mod:swap_compacted(OldIdxState, NewIdxState),
-            % Restart the indexer if it's running.
-            case couch_index_updater:is_running(Updater) of
-                true -> ok = couch_index_updater:restart(Updater, NewIdxState1);
-                false -> ok
-            end,
-            case State#st.committed of
-                true -> erlang:send_after(Delay, self(), commit);
-                false -> ok
-            end,
-            {reply, ok, State#st{
-                idx_state=NewIdxState1,
-                committed=false
-            }};
-        _ ->
-            {reply, recompact, State}
-    end.
-
-
-handle_cast({config_change, NewDelay}, State) ->
-    MsDelay = 1000 * list_to_integer(NewDelay),
-    {noreply, State#st{commit_delay=MsDelay}};
-handle_cast({trigger_update, UpdateSeq}, State) ->
-    #st{
-        mod=Mod,
-        idx_state=IdxState
-    } = State,
-    case UpdateSeq =< Mod:get(update_seq, IdxState) of
-        true ->
-            {noreply, State};
-        false ->
-            couch_index_updater:run(State#st.updater, IdxState),
-            {noreply, State}
-    end;
-handle_cast({updated, NewIdxState}, State) ->
-    {noreply, NewState} = handle_cast({new_state, NewIdxState}, State),
-    case NewState#st.shutdown andalso (NewState#st.waiters =:= []) of
-        true ->
-            {stop, normal, NewState};
-        false ->
-            maybe_restart_updater(NewState),
-            {noreply, NewState}
-    end;
-handle_cast({new_state, NewIdxState}, State) ->
-    #st{
-        mod=Mod,
-        idx_state=OldIdxState,
-        commit_delay=Delay
-    } = State,
-    assert_signature_match(Mod, OldIdxState, NewIdxState),
-    CurrSeq = Mod:get(update_seq, NewIdxState),
-    Args = [
-        Mod:get(db_name, NewIdxState),
-        Mod:get(idx_name, NewIdxState),
-        CurrSeq
-    ],
-    ?LOG_DEBUG("Updated index for db: ~s idx: ~s seq: ~B", Args),
-    Rest = send_replies(State#st.waiters, CurrSeq, NewIdxState),
-    case State#st.committed of
-        true -> erlang:send_after(Delay, self(), commit);
-        false -> ok
-    end,
-    {noreply, State#st{
-        idx_state=NewIdxState,
-        waiters=Rest,
-        committed=false
-    }};
-handle_cast({update_error, Error}, State) ->
-    send_all(State#st.waiters, Error),
-    {noreply, State#st{waiters=[]}};
-handle_cast(stop, State) ->
-    {stop, normal, State};
-handle_cast(delete, State) ->
-    #st{mod=Mod, idx_state=IdxState} = State,
-    ok = Mod:delete(IdxState),
-    {stop, normal, State};
-handle_cast(ddoc_updated, State) ->
-    #st{mod = Mod, idx_state = IdxState, waiters = Waiters} = State,
-    DbName = Mod:get(db_name, IdxState),
-    DDocId = Mod:get(idx_name, IdxState),
-    Shutdown = couch_util:with_db(DbName, fun(Db) ->
-        case couch_db:open_doc(Db, DDocId, [ejson_body]) of
-            {not_found, deleted} ->
-                true;
-            {ok, DDoc} ->
-                {ok, NewIdxState} = Mod:init(Db, DDoc),
-                Mod:get(signature, NewIdxState) =/= Mod:get(signature, IdxState)
-        end
-    end),
-    case Shutdown of
-        true ->
-            case Waiters of
-                [] ->
-                    {stop, normal, State};
-                _ ->
-                    {noreply, State#st{shutdown = true}}
-            end;
-        false ->
-            {noreply, State#st{shutdown = false}}
-    end;
-handle_cast(_Mesg, State) ->
-    {stop, unhandled_cast, State}.
-
-
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {noreply, State};
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-    {noreply, State};
-handle_info(commit, #st{committed=true}=State) ->
-    {noreply, State};
-handle_info(commit, State) ->
-    #st{mod=Mod, idx_state=IdxState, commit_delay=Delay} = State,
-    DbName = Mod:get(db_name, IdxState),
-    GetCommSeq = fun(Db) -> couch_db:get_committed_update_seq(Db) end,
-    CommittedSeq = couch_util:with_db(DbName, GetCommSeq),
-    case CommittedSeq >= Mod:get(update_seq, IdxState) of
-        true ->
-            % Commit the updates
-            ok = Mod:commit(IdxState),
-            {noreply, State#st{committed=true}};
-        _ ->
-            % We can't commit the header because the database seq that's
-            % fully committed to disk is still behind us. If we committed
-            % now and the database lost those changes our view could be
-            % forever out of sync with the database. But a crash before we
-            % commit these changes, no big deal, we only lose incremental
-            % changes since last committal.
-            erlang:send_after(Delay, self(), commit),
-            {noreply, State}
-    end;
-handle_info({'DOWN', _, _, _Pid, _}, #st{mod=Mod, idx_state=IdxState}=State) ->
-    Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
-    ?LOG_INFO("Index shutdown by monitor notice for db: ~s idx: ~s", Args),
-    catch send_all(State#st.waiters, shutdown),
-    {stop, normal, State#st{waiters=[]}}.
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-handle_config_change("query_server_config", "commit_freq", Val, _, _) ->
-    {ok, gen_server:cast(?MODULE, {config_update, Val})};
-handle_config_change(_, _, _, _, _) ->
-    {ok, nil}.
-
-
-maybe_restart_updater(#st{waiters=[]}) ->
-    ok;
-maybe_restart_updater(#st{mod=Mod, idx_state=IdxState}=State) ->
-    couch_util:with_db(Mod:get(db_name, IdxState), fun(Db) ->
-        UpdateSeq = couch_db:get_update_seq(Db),
-        CommittedSeq = couch_db:get_committed_update_seq(Db),
-        CanUpdate = UpdateSeq > CommittedSeq,
-        UOpts = Mod:get(update_options, IdxState),
-        case CanUpdate and lists:member(committed_only, UOpts) of
-            true -> couch_db:ensure_full_commit(Db);
-            false -> ok
-        end
-    end),
-    couch_index_updater:run(State#st.updater, IdxState).
-
-
-send_all(Waiters, Reply) ->
-    [gen_server:reply(From, Reply) || {From, _} <- Waiters].
-
-
-send_replies(Waiters, UpdateSeq, IdxState) ->
-    Pred = fun({_, S}) -> S =< UpdateSeq end,
-    {ToSend, Remaining} = lists:partition(Pred, Waiters),
-    [gen_server:reply(From, {ok, IdxState}) || {From, _} <- ToSend],
-    Remaining.
-
-assert_signature_match(Mod, OldIdxState, NewIdxState) ->
-    case {Mod:get(signature, OldIdxState), Mod:get(signature, NewIdxState)} of
-        {Sig, Sig} -> ok;
-        _ -> erlang:error(signature_mismatch)
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/de4ff66d/src/couch_index/src/couch_index_api.erl
----------------------------------------------------------------------
diff --git a/src/couch_index/src/couch_index_api.erl b/src/couch_index/src/couch_index_api.erl
deleted file mode 100644
index 9d3a67c..0000000
--- a/src/couch_index/src/couch_index_api.erl
+++ /dev/null
@@ -1,54 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_api).
-
-get(Field, State) ->
-    ok.
-
-init(Db, Ddoc) ->
-    ok.
-
-open(Db, State) ->
-    ok.
-
-close(State) ->
-    ok.
-
-delete(State) ->
-    ok.
-
-reset(State) ->
-    ok.
-
-
-start_update(State, PurgedState, NumChanges) ->
-    {ok, State}.
-
-purge(Db, PurgeSeq, PurgedIdRevs, State) ->
-    ok.
-
-process_doc(Doc, Seq, State) ->
-    ok.
-
-finish_update(State) ->
-    {ok, State}.
-
-commit(State) ->
-    ok.
-
-
-compact(Parent, State, Opts) ->
-    ok.
-
-swap_compacted(OldState, NewState) ->
-    ok.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/de4ff66d/src/couch_index/src/couch_index_compactor.erl
----------------------------------------------------------------------
diff --git a/src/couch_index/src/couch_index_compactor.erl b/src/couch_index/src/couch_index_compactor.erl
deleted file mode 100644
index 10c3e14..0000000
--- a/src/couch_index/src/couch_index_compactor.erl
+++ /dev/null
@@ -1,114 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_compactor).
--behaviour(gen_server).
-
-
-%% API
--export([start_link/2, run/2, cancel/1, is_running/1]).
-
-%% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
-
--record(st, {
-    idx,
-    mod,
-    pid
-}).
-
-
-start_link(Index, Module) ->
-    gen_server:start_link(?MODULE, {Index, Module}, []).
-
-
-run(Pid, IdxState) ->
-    gen_server:call(Pid, {compact, IdxState}).
-
-
-cancel(Pid) ->
-    gen_server:call(Pid, cancel).
-
-
-is_running(Pid) ->
-    gen_server:call(Pid, is_running).
-
-
-init({Index, Module}) ->
-    process_flag(trap_exit, true),
-    {ok, #st{idx=Index, mod=Module}}.
-
-
-terminate(_Reason, State) ->
-    couch_util:shutdown_sync(State#st.pid),
-    ok.
-
-
-handle_call({compact, _}, _From, #st{pid=Pid}=State) when is_pid(Pid) ->
-    {reply, {ok, Pid}, State};
-handle_call({compact, IdxState}, _From, #st{idx=Idx}=State) ->
-    Pid = spawn_link(fun() -> compact(Idx, State#st.mod, IdxState) end),
-    {reply, {ok, Pid}, State#st{pid=Pid}};
-handle_call(cancel, _From, #st{pid=undefined}=State) ->
-    {reply, ok, State};
-handle_call(cancel, _From, #st{pid=Pid}=State) ->
-    unlink(Pid),
-    exit(Pid, kill),
-    {reply, ok, State#st{pid=undefined}};
-handle_call(is_running, _From, #st{pid=Pid}=State) when is_pid(Pid) ->
-    {reply, true, State};
-handle_call(is_running, _From, State) ->
-    {reply, false, State}.
-
-
-handle_cast(_Mesg, State) ->
-    {stop, unknown_cast, State}.
-
-
-handle_info({'EXIT', Pid, normal}, #st{pid=Pid}=State) ->
-    {noreply, State#st{pid=undefined}};
-handle_info({'EXIT', _Pid, normal}, State) ->
-    {noreply, State};
-handle_info({'EXIT', Pid, _Reason}, #st{idx=Pid}=State) ->
-    {stop, normal, State};
-handle_info(_Mesg, State) ->
-    {stop, unknown_info, State}.
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-compact(Parent, Mod, IdxState) ->
-    compact(Parent, Mod, IdxState, []).
-
-compact(Idx, Mod, IdxState, Opts) ->
-    DbName = Mod:get(db_name, IdxState),
-    Args = [DbName, Mod:get(idx_name, IdxState)],
-    ?LOG_INFO("Compaction started for db: ~s idx: ~s", Args),
-    {ok, NewIdxState} = couch_util:with_db(DbName, fun(Db) ->
-        Mod:compact(Db, IdxState, Opts)
-    end),
-    ok = Mod:commit(NewIdxState),
-    case gen_server:call(Idx, {compacted, NewIdxState}) of
-        recompact ->
-            ?LOG_INFO("Compaction restarting for db: ~s idx: ~s", Args),
-            compact(Idx, Mod, NewIdxState, [recompact]);
-        _ ->
-            ?LOG_INFO("Compaction finished for db: ~s idx: ~s", Args),
-            ok
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/de4ff66d/src/couch_index/src/couch_index_server.erl
----------------------------------------------------------------------
diff --git a/src/couch_index/src/couch_index_server.erl b/src/couch_index/src/couch_index_server.erl
deleted file mode 100644
index 3d8a797..0000000
--- a/src/couch_index/src/couch_index_server.erl
+++ /dev/null
@@ -1,266 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_server).
--behaviour(gen_server).
--behaviour(config_listener).
-
--export([start_link/0, validate/2, get_index/4, get_index/3, get_index/2]).
--export([update_notify/1]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-% config_listener api
--export([handle_config_change/5]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(BY_SIG, couchdb_indexes_by_sig).
--define(BY_PID, couchdb_indexes_by_pid).
--define(BY_DB, couchdb_indexes_by_db).
-
-
--record(st, {root_dir}).
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-validate(DbName, DDoc) ->
-    LoadModFun = fun
-        ({ModNameList, "true"}) ->
-            try
-                [list_to_existing_atom(ModNameList)]
-            catch error:badarg ->
-                []
-            end;
-        ({_ModNameList, _Enabled}) ->
-            []
-    end,
-    ValidateFun = fun
-        (ModName, ok) ->
-            try
-                ModName:validate(DbName, DDoc)
-            catch Type:Reason ->
-                {Type, Reason}
-            end;
-        (_ModName, Error) ->
-            Error
-    end,
-    EnabledIndexers = lists:flatmap(LoadModFun, config:get("indexers")),
-    lists:foldl(ValidateFun, ok, EnabledIndexers).
-
-
-get_index(Module, <<"shards/", _/binary>>=DbName, DDoc) ->
-    {Pid, Ref} = spawn_monitor(fun() ->
-        exit(fabric:open_doc(mem3:dbname(DbName), DDoc, []))
-    end),
-    receive {'DOWN', Ref, process, Pid, {ok, Doc}} ->
-        get_index(Module, DbName, Doc, nil);
-    {'DOWN', Ref, process, Pid, Error} ->
-        Error
-    after 61000 ->
-        erlang:demonitor(Ref, [flush]),
-        {error, timeout}
-    end;
-
-get_index(Module, DbName, DDoc) ->
-    get_index(Module, DbName, DDoc, nil).
-
-
-get_index(Module, DbName, DDoc, Fun) when is_binary(DbName) ->
-    couch_util:with_db(DbName, fun(Db) ->
-        get_index(Module, Db, DDoc, Fun)
-    end);
-get_index(Module, Db, DDoc, Fun) when is_binary(DDoc) ->
-    case couch_db:open_doc(Db, DDoc, [ejson_body]) of
-        {ok, Doc} -> get_index(Module, Db, Doc, Fun);
-        Error -> Error
-    end;
-get_index(Module, Db, DDoc, Fun) when is_function(Fun, 1) ->
-    {ok, InitState} = Module:init(Db, DDoc),
-    {ok, FunResp} = Fun(InitState),
-    {ok, Pid} = get_index(Module, InitState),
-    {ok, Pid, FunResp};
-get_index(Module, Db, DDoc, _Fun) ->
-    {ok, InitState} = Module:init(Db, DDoc),
-    get_index(Module, InitState).
-
-
-get_index(Module, IdxState) ->
-    DbName = Module:get(db_name, IdxState),
-    Sig = Module:get(signature, IdxState),
-    case ets:lookup(?BY_SIG, {DbName, Sig}) of
-        [{_, Pid}] when is_pid(Pid) ->
-            {ok, Pid};
-        _ ->
-            Args = {Module, IdxState, DbName, Sig},
-            gen_server:call(?MODULE, {get_index, Args}, infinity)
-    end.
-
-
-init([]) ->
-    process_flag(trap_exit, true),
-    ok = config:listen_for_changes(?MODULE, nil),
-    ets:new(?BY_SIG, [protected, set, named_table]),
-    ets:new(?BY_PID, [private, set, named_table]),
-    ets:new(?BY_DB, [protected, bag, named_table]),
-    couch_db_update_notifier:start_link(fun ?MODULE:update_notify/1),
-    RootDir = couch_index_util:root_dir(),
-    % Deprecation warning if it wasn't index_dir
-    case config:get("couchdb", "index_dir") of
-        undefined ->
-            Msg = "Deprecation warning: 'view_index_dir' is now 'index_dir'",
-            ?LOG_ERROR(Msg, []);
-        _ -> ok
-    end,
-    couch_file:init_delete_dir(RootDir),
-    {ok, #st{root_dir=RootDir}}.
-
-
-terminate(_Reason, _State) ->
-    Pids = [Pid || {Pid, _} <- ets:tab2list(?BY_PID)],
-    lists:map(fun couch_util:shutdown_sync/1, Pids),
-    ok.
-
-
-handle_call({get_index, {_Mod, _IdxState, DbName, Sig}=Args}, From, State) ->
-    case ets:lookup(?BY_SIG, {DbName, Sig}) of
-        [] ->
-            spawn_link(fun() -> new_index(Args) end),
-            ets:insert(?BY_SIG, {{DbName, Sig}, [From]}),
-            {noreply, State};
-        [{_, Waiters}] when is_list(Waiters) ->
-            ets:insert(?BY_SIG, {{DbName, Sig}, [From | Waiters]}),
-            {noreply, State};
-        [{_, Pid}] when is_pid(Pid) ->
-            {reply, {ok, Pid}, State}
-    end;
-handle_call({async_open, {DbName, DDocId, Sig}, {ok, Pid}}, _From, State) ->
-    [{_, Waiters}] = ets:lookup(?BY_SIG, {DbName, Sig}),
-    [gen_server:reply(From, {ok, Pid}) || From <- Waiters],
-    link(Pid),
-    add_to_ets(DbName, Sig, DDocId, Pid),
-    {reply, ok, State};
-handle_call({async_error, {DbName, _DDocId, Sig}, Error}, _From, State) ->
-    [{_, Waiters}] = ets:lookup(?BY_SIG, {DbName, Sig}),
-    [gen_server:reply(From, Error) || From <- Waiters],
-    ets:delete(?BY_SIG, {DbName, Sig}),
-    {reply, ok, State};
-handle_call({reset_indexes, DbName}, _From, State) ->
-    reset_indexes(DbName, State#st.root_dir),
-    {reply, ok, State}.
-
-
-handle_cast({reset_indexes, DbName}, State) ->
-    reset_indexes(DbName, State#st.root_dir),
-    {noreply, State}.
-
-
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {noreply, State};
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, State#st.root_dir),
-    {noreply, State};
-handle_info({'EXIT', Pid, Reason}, Server) ->
-    case ets:lookup(?BY_PID, Pid) of
-        [{Pid, {DbName, Sig}}] ->
-            [{DbName, {DDocId, Sig}}] =
-                ets:match_object(?BY_DB, {DbName, {'$1', Sig}}),
-            rem_from_ets(DbName, Sig, DDocId, Pid);
-        [] when Reason /= normal ->
-            exit(Reason);
-        _Else ->
-            ok
-    end,
-    {noreply, Server};
-handle_info(Msg, State) ->
-    twig:log(warn, "~p did not expect ~p", [?MODULE, Msg]),
-    {noreply, State}.
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-handle_config_change("couchdb", "index_dir", RootDir, _, RootDir) ->
-    {ok, RootDir};
-handle_config_change("couchdb", "view_index_dir", RootDir, _, RootDir) ->
-    {ok, RootDir};
-handle_config_change("couchdb", "index_dir", _, _, _) ->
-    exit(whereis(couch_index_server), config_change),
-    remove_handler;
-handle_config_change("couchdb", "view_index_dir", _, _, _) ->
-    exit(whereis(couch_index_server), config_change),
-    remove_handler;
-handle_config_change(_, _, _, _, RootDir) ->
-    {ok, RootDir}.
-
-new_index({Mod, IdxState, DbName, Sig}) ->
-    DDocId = Mod:get(idx_name, IdxState),
-    case couch_index:start_link({Mod, IdxState}) of
-        {ok, Pid} ->
-            ok = gen_server:call(
-                ?MODULE, {async_open, {DbName, DDocId, Sig}, {ok, Pid}}),
-            unlink(Pid);
-        Error ->
-            ok = gen_server:call(
-                ?MODULE, {async_error, {DbName, DDocId, Sig}, Error})
-    end.
-
-
-reset_indexes(DbName, Root) ->
-    % shutdown all the updaters and clear the files, the db got changed
-    Fun = fun({_, {DDocId, Sig}}) ->
-        [{_, Pid}] = ets:lookup(?BY_SIG, {DbName, Sig}),
-        MRef = erlang:monitor(process, Pid),
-        gen_server:cast(Pid, delete),
-        receive {'DOWN', MRef, _, _, _} -> ok end,
-        rem_from_ets(DbName, Sig, DDocId, Pid)
-    end,
-    lists:foreach(Fun, ets:lookup(?BY_DB, DbName)),
-    Path = couch_index_util:index_dir("", DbName),
-    couch_file:nuke_dir(Root, Path).
-
-
-add_to_ets(DbName, Sig, DDocId, Pid) ->
-    ets:insert(?BY_SIG, {{DbName, Sig}, Pid}),
-    ets:insert(?BY_PID, {Pid, {DbName, Sig}}),
-    ets:insert(?BY_DB, {DbName, {DDocId, Sig}}).
-
-
-rem_from_ets(DbName, Sig, DDocId, Pid) ->
-    ets:delete(?BY_SIG, {DbName, Sig}),
-    ets:delete(?BY_PID, Pid),
-    ets:delete_object(?BY_DB, {DbName, {DDocId, Sig}}).
-
-
-update_notify({deleted, DbName}) ->
-    gen_server:cast(?MODULE, {reset_indexes, DbName});
-update_notify({created, DbName}) ->
-    gen_server:cast(?MODULE, {reset_indexes, DbName});
-update_notify({ddoc_updated, {DbName, DDocId}}) ->
-    lists:foreach(
-        fun({_DbName, {_DDocId, Sig}}) ->
-            case ets:lookup(?BY_SIG, {DbName, Sig}) of
-                [{_, IndexPid}] ->
-                    (catch gen_server:cast(IndexPid, ddoc_updated));
-                [] ->
-                    ok
-            end
-        end,
-        ets:match_object(?BY_DB, {DbName, {DDocId, '$1'}}));
-update_notify(_) ->
-    ok.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/de4ff66d/src/couch_index/src/couch_index_updater.erl
----------------------------------------------------------------------
diff --git a/src/couch_index/src/couch_index_updater.erl b/src/couch_index/src/couch_index_updater.erl
deleted file mode 100644
index ab68dc5..0000000
--- a/src/couch_index/src/couch_index_updater.erl
+++ /dev/null
@@ -1,211 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_updater).
--behaviour(gen_server).
-
-
-%% API
--export([start_link/2, run/2, is_running/1, update/2, restart/2]).
-
-%% for upgrades
--export([update/3]).
-
-%% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--record(st, {
-    idx,
-    mod,
-    pid=nil
-}).
-
-
-start_link(Index, Module) ->
-    gen_server:start_link(?MODULE, {Index, Module}, []).
-
-
-run(Pid, IdxState) ->
-    gen_server:call(Pid, {update, IdxState}).
-
-
-is_running(Pid) ->
-    gen_server:call(Pid, is_running).
-
-
-update(Mod, State) ->
-    update(nil, Mod, State).
-
-
-restart(Pid, IdxState) ->
-    gen_server:call(Pid, {restart, IdxState}).
-
-
-init({Index, Module}) ->
-    process_flag(trap_exit, true),
-    {ok, #st{idx=Index, mod=Module}}.
-
-
-terminate(_Reason, State) ->
-    couch_util:shutdown_sync(State#st.pid),
-    ok.
-
-
-handle_call({update, _IdxState}, _From, #st{pid=Pid}=State) when is_pid(Pid) ->
-    {reply, ok, State};
-handle_call({update, IdxState}, _From, #st{idx=Idx, mod=Mod}=State) ->
-    Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
-    ?LOG_INFO("Starting index update for db: ~s idx: ~s", Args),
-    Pid = spawn_link(?MODULE, update, [Idx, Mod, IdxState]),
-    {reply, ok, State#st{pid=Pid}};
-handle_call({restart, IdxState}, _From, #st{idx=Idx, mod=Mod}=State) ->
-    Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
-    ?LOG_INFO("Restarting index update for db: ~s idx: ~s", Args),
-    case is_pid(State#st.pid) of
-        true -> couch_util:shutdown_sync(State#st.pid);
-        _ -> ok
-    end,
-    Pid = spawn_link(?MODULE, update, [Idx, State#st.mod, IdxState]),
-    {reply, ok, State#st{pid=Pid}};
-handle_call(is_running, _From, #st{pid=Pid}=State) when is_pid(Pid) ->
-    {reply, true, State};
-handle_call(is_running, _From, State) ->
-    {reply, false, State}.
-
-
-handle_cast(_Mesg, State) ->
-    {stop, unknown_cast, State}.
-
-
-handle_info({'EXIT', _, {updated, Pid, IdxState}}, #st{pid=Pid}=State) ->
-    Mod = State#st.mod,
-    Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
-    ?LOG_INFO("Index update finished for db: ~s idx: ~s", Args),
-    ok = gen_server:cast(State#st.idx, {updated, IdxState}),
-    {noreply, State#st{pid=undefined}};
-handle_info({'EXIT', _, {reset, Pid}}, #st{idx=Idx, pid=Pid}=State) ->
-    {ok, NewIdxState} = gen_server:call(State#st.idx, reset),
-    Pid2 = spawn_link(?MODULE, update, [Idx, State#st.mod, NewIdxState]),
-    {noreply, State#st{pid=Pid2}};
-handle_info({'EXIT', Pid, normal}, #st{pid=Pid}=State) ->
-    {noreply, State#st{pid=undefined}};
-handle_info({'EXIT', Pid, {{nocatch, Error}, _Trace}}, State) ->
-    handle_info({'EXIT', Pid, Error}, State);
-handle_info({'EXIT', Pid, Error}, #st{pid=Pid}=State) ->
-    ok = gen_server:cast(State#st.idx, {update_error, Error}),
-    {noreply, State#st{pid=undefined}};
-handle_info({'EXIT', Pid, _Reason}, #st{idx=Pid}=State) ->
-    {stop, normal, State};
-handle_info({'EXIT', _Pid, normal}, State) ->
-    {noreply, State};
-handle_info(_Mesg, State) ->
-    {stop, unknown_info, State}.
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-update(Idx, Mod, IdxState) ->
-    DbName = Mod:get(db_name, IdxState),
-    CurrSeq = Mod:get(update_seq, IdxState),
-    UpdateOpts = Mod:get(update_options, IdxState),
-    CommittedOnly = lists:member(committed_only, UpdateOpts),
-    IncludeDesign = lists:member(include_design, UpdateOpts),
-    DocOpts = case lists:member(local_seq, UpdateOpts) of
-        true -> [conflicts, deleted_conflicts, local_seq];
-        _ -> [conflicts, deleted_conflicts]
-    end,
-
-    couch_util:with_db(DbName, fun(Db) ->
-        DbUpdateSeq = couch_db:get_update_seq(Db),
-        DbCommittedSeq = couch_db:get_committed_update_seq(Db),
-
-        PurgedIdxState = case purge_index(Db, Mod, IdxState) of
-            {ok, IdxState0} -> IdxState0;
-            reset -> exit({reset, self()})
-        end,
-
-        NumChanges = couch_db:count_changes_since(Db, CurrSeq),
-
-        GetSeq = fun
-            (#full_doc_info{update_seq=Seq}) -> Seq;
-            (#doc_info{high_seq=Seq}) -> Seq
-        end,
-
-        GetInfo = fun
-            (#full_doc_info{id=Id, update_seq=Seq, deleted=Del}=FDI) ->
-                {Id, Seq, Del, couch_doc:to_doc_info(FDI)};
-            (#doc_info{id=Id, high_seq=Seq, revs=[RI|_]}=DI) ->
-                {Id, Seq, RI#rev_info.deleted, DI}
-        end,
-
-        LoadDoc = fun(DI) ->
-            {DocId, Seq, Deleted, DocInfo} = GetInfo(DI),
-
-            case {IncludeDesign, DocId} of
-                {false, <<"_design/", _/binary>>} ->
-                    {nil, Seq};
-                _ when Deleted ->
-                    {#doc{id=DocId, deleted=true}, Seq};
-                _ ->
-                    {ok, Doc} = couch_db:open_doc_int(Db, DocInfo, DocOpts),
-                    {Doc, Seq}
-            end
-        end,
-
-        Proc = fun(DocInfo, _, {IdxStateAcc, _}) ->
-            case CommittedOnly and (GetSeq(DocInfo) > DbCommittedSeq) of
-                true ->
-                    {stop, {IdxStateAcc, false}};
-                false ->
-                    {Doc, Seq} = LoadDoc(DocInfo),
-                    {ok, NewSt} = Mod:process_doc(Doc, Seq, IdxStateAcc),
-                    garbage_collect(),
-                    {ok, {NewSt, true}}
-            end
-        end,
-
-        {ok, InitIdxState} = Mod:start_update(Idx, PurgedIdxState, NumChanges),
-        Acc0 = {InitIdxState, true},
-        {ok, _, Acc} = couch_db:enum_docs_since(Db, CurrSeq, Proc, Acc0, []),
-        {ProcIdxSt, SendLast} = Acc,
-
-        % If we didn't bail due to hitting the last committed seq we need
-        % to send our last update_seq through.
-        {ok, LastIdxSt} = case SendLast of
-            true ->
-                Mod:process_doc(nil, DbUpdateSeq, ProcIdxSt);
-            _ ->
-                {ok, ProcIdxSt}
-        end,
-
-        {ok, FinalIdxState} = Mod:finish_update(LastIdxSt),
-        exit({updated, self(), FinalIdxState})
-    end).
-
-
-purge_index(Db, Mod, IdxState) ->
-    DbPurgeSeq = couch_db:get_purge_seq(Db),
-    IdxPurgeSeq = Mod:get(purge_seq, IdxState),
-    if
-        DbPurgeSeq == IdxPurgeSeq ->
-            {ok, IdxState};
-        DbPurgeSeq == IdxPurgeSeq + 1 ->
-            {ok, PurgedIdRevs} = couch_db:get_last_purged(Db),
-            Mod:purge(Db, DbPurgeSeq, PurgedIdRevs, IdxState);
-        true ->
-            reset
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/de4ff66d/src/couch_index/src/couch_index_util.erl
----------------------------------------------------------------------
diff --git a/src/couch_index/src/couch_index_util.erl b/src/couch_index/src/couch_index_util.erl
deleted file mode 100644
index cf1ff75..0000000
--- a/src/couch_index/src/couch_index_util.erl
+++ /dev/null
@@ -1,81 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_index_util).
-
--export([root_dir/0, index_dir/2, index_file/3]).
--export([load_doc/3, sort_lib/1, hexsig/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-
-root_dir() ->
-    case config:get("couchdb", "index_dir") of
-        undefined -> config:get("couchdb", "view_index_dir");
-        Value -> Value
-    end.
-
-
-index_dir(Module, DbName) when is_binary(DbName) ->
-    DbDir = "." ++ binary_to_list(DbName) ++ "_design",
-    filename:join([root_dir(), DbDir, Module]);
-index_dir(Module, #db{}=Db) ->
-    index_dir(Module, couch_db:name(Db)).
-
-
-index_file(Module, DbName, FileName) ->
-    filename:join(index_dir(Module, DbName), FileName).
-
-
-load_doc(Db, #doc_info{}=DI, Opts) ->
-    Deleted = lists:member(deleted, Opts),
-    case (catch couch_db:open_doc(Db, DI, Opts)) of
-        {ok, #doc{deleted=false}=Doc} -> Doc;
-        {ok, #doc{deleted=true}=Doc} when Deleted -> Doc;
-        _Else -> null
-    end;
-load_doc(Db, {DocId, Rev}, Opts) ->
-    case (catch load_doc(Db, DocId, Rev, Opts)) of
-        #doc{deleted=false} = Doc -> Doc;
-        _ -> null
-    end.
-
-
-load_doc(Db, DocId, Rev, Options) ->
-    case Rev of
-        nil -> % open most recent rev
-            case (catch couch_db:open_doc(Db, DocId, Options)) of
-                {ok, Doc} -> Doc;
-                _Error -> null
-            end;
-        _ -> % open a specific rev (deletions come back as stubs)
-            case (catch couch_db:open_doc_revs(Db, DocId, [Rev], Options)) of
-                {ok, [{ok, Doc}]} -> Doc;
-                {ok, [{{not_found, missing}, Rev}]} -> null;
-                {ok, [_Else]} -> null
-            end
-    end.
-
-
-sort_lib({Lib}) ->
-    sort_lib(Lib, []).
-sort_lib([], LAcc) ->
-    lists:keysort(1, LAcc);
-sort_lib([{LName, {LObj}}|Rest], LAcc) ->
-    LSorted = sort_lib(LObj, []), % descend into nested object
-    sort_lib(Rest, [{LName, LSorted}|LAcc]);
-sort_lib([{LName, LCode}|Rest], LAcc) ->
-    sort_lib(Rest, [{LName, LCode}|LAcc]).
-
-
-hexsig(Sig) ->
-    couch_util:to_hex(binary_to_list(Sig)).


[41/49] Remove src/snappy

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/c_src/google-snappy/snappy.cc
----------------------------------------------------------------------
diff --git a/src/snappy/c_src/google-snappy/snappy.cc b/src/snappy/c_src/google-snappy/snappy.cc
deleted file mode 100644
index 4d4eb42..0000000
--- a/src/snappy/c_src/google-snappy/snappy.cc
+++ /dev/null
@@ -1,1111 +0,0 @@
-// Copyright 2005 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "snappy.h"
-#include "snappy-internal.h"
-#include "snappy-sinksource.h"
-
-#include <stdio.h>
-
-#include <algorithm>
-#include <string>
-#include <vector>
-
-
-namespace snappy {
-
-// Any hash function will produce a valid compressed bitstream, but a good
-// hash function reduces the number of collisions and thus yields better
-// compression for compressible input, and more speed for incompressible
-// input. Of course, it doesn't hurt if the hash function is reasonably fast
-// either, as it gets called a lot.
-static inline uint32 HashBytes(uint32 bytes, int shift) {
-  uint32 kMul = 0x1e35a7bd;
-  return (bytes * kMul) >> shift;
-}
-static inline uint32 Hash(const char* p, int shift) {
-  return HashBytes(UNALIGNED_LOAD32(p), shift);
-}
-
-size_t MaxCompressedLength(size_t source_len) {
-  // Compressed data can be defined as:
-  //    compressed := item* literal*
-  //    item       := literal* copy
-  //
-  // The trailing literal sequence has a space blowup of at most 62/60
-  // since a literal of length 60 needs one tag byte + one extra byte
-  // for length information.
-  //
-  // Item blowup is trickier to measure.  Suppose the "copy" op copies
-  // 4 bytes of data.  Because of a special check in the encoding code,
-  // we produce a 4-byte copy only if the offset is < 65536.  Therefore
-  // the copy op takes 3 bytes to encode, and this type of item leads
-  // to at most the 62/60 blowup for representing literals.
-  //
-  // Suppose the "copy" op copies 5 bytes of data.  If the offset is big
-  // enough, it will take 5 bytes to encode the copy op.  Therefore the
-  // worst case here is a one-byte literal followed by a five-byte copy.
-  // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
-  //
-  // This last factor dominates the blowup, so the final estimate is:
-  return 32 + source_len + source_len/6;
-}
-
-enum {
-  LITERAL = 0,
-  COPY_1_BYTE_OFFSET = 1,  // 3 bit length + 3 bits of offset in opcode
-  COPY_2_BYTE_OFFSET = 2,
-  COPY_4_BYTE_OFFSET = 3
-};
-
-// Copy "len" bytes from "src" to "op", one byte at a time.  Used for
-// handling COPY operations where the input and output regions may
-// overlap.  For example, suppose:
-//    src    == "ab"
-//    op     == src + 2
-//    len    == 20
-// After IncrementalCopy(src, op, len), the result will have
-// eleven copies of "ab"
-//    ababababababababababab
-// Note that this does not match the semantics of either memcpy()
-// or memmove().
-static inline void IncrementalCopy(const char* src, char* op, int len) {
-  DCHECK_GT(len, 0);
-  do {
-    *op++ = *src++;
-  } while (--len > 0);
-}
-
-// Equivalent to IncrementalCopy except that it can write up to ten extra
-// bytes after the end of the copy, and that it is faster.
-//
-// The main part of this loop is a simple copy of eight bytes at a time until
-// we've copied (at least) the requested amount of bytes.  However, if op and
-// src are less than eight bytes apart (indicating a repeating pattern of
-// length < 8), we first need to expand the pattern in order to get the correct
-// results. For instance, if the buffer looks like this, with the eight-byte
-// <src> and <op> patterns marked as intervals:
-//
-//    abxxxxxxxxxxxx
-//    [------]           src
-//      [------]         op
-//
-// a single eight-byte copy from <src> to <op> will repeat the pattern once,
-// after which we can move <op> two bytes without moving <src>:
-//
-//    ababxxxxxxxxxx
-//    [------]           src
-//        [------]       op
-//
-// and repeat the exercise until the two no longer overlap.
-//
-// This allows us to do very well in the special case of one single byte
-// repeated many times, without taking a big hit for more general cases.
-//
-// The worst case of extra writing past the end of the match occurs when
-// op - src == 1 and len == 1; the last copy will read from byte positions
-// [0..7] and write to [4..11], whereas it was only supposed to write to
-// position 1. Thus, ten excess bytes.
-
-namespace {
-
-const int kMaxIncrementCopyOverflow = 10;
-
-}  // namespace
-
-static inline void IncrementalCopyFastPath(const char* src, char* op, int len) {
-  while (op - src < 8) {
-    UnalignedCopy64(src, op);
-    len -= op - src;
-    op += op - src;
-  }
-  while (len > 0) {
-    UnalignedCopy64(src, op);
-    src += 8;
-    op += 8;
-    len -= 8;
-  }
-}
-
-static inline char* EmitLiteral(char* op,
-                                const char* literal,
-                                int len,
-                                bool allow_fast_path) {
-  int n = len - 1;      // Zero-length literals are disallowed
-  if (n < 60) {
-    // Fits in tag byte
-    *op++ = LITERAL | (n << 2);
-
-    // The vast majority of copies are below 16 bytes, for which a
-    // call to memcpy is overkill. This fast path can sometimes
-    // copy up to 15 bytes too much, but that is okay in the
-    // main loop, since we have a bit to go on for both sides:
-    //
-    //   - The input will always have kInputMarginBytes = 15 extra
-    //     available bytes, as long as we're in the main loop, and
-    //     if not, allow_fast_path = false.
-    //   - The output will always have 32 spare bytes (see
-    //     MaxCompressedLength).
-    if (allow_fast_path && len <= 16) {
-      UnalignedCopy64(literal, op);
-      UnalignedCopy64(literal + 8, op + 8);
-      return op + len;
-    }
-  } else {
-    // Encode in upcoming bytes
-    char* base = op;
-    int count = 0;
-    op++;
-    while (n > 0) {
-      *op++ = n & 0xff;
-      n >>= 8;
-      count++;
-    }
-    assert(count >= 1);
-    assert(count <= 4);
-    *base = LITERAL | ((59+count) << 2);
-  }
-  memcpy(op, literal, len);
-  return op + len;
-}
-
-static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) {
-  DCHECK_LE(len, 64);
-  DCHECK_GE(len, 4);
-  DCHECK_LT(offset, 65536);
-
-  if ((len < 12) && (offset < 2048)) {
-    size_t len_minus_4 = len - 4;
-    assert(len_minus_4 < 8);            // Must fit in 3 bits
-    *op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
-    *op++ = offset & 0xff;
-  } else {
-    *op++ = COPY_2_BYTE_OFFSET | ((len-1) << 2);
-    LittleEndian::Store16(op, offset);
-    op += 2;
-  }
-  return op;
-}
-
-static inline char* EmitCopy(char* op, size_t offset, int len) {
-  // Emit 64 byte copies but make sure to keep at least four bytes reserved
-  while (len >= 68) {
-    op = EmitCopyLessThan64(op, offset, 64);
-    len -= 64;
-  }
-
-  // Emit an extra 60 byte copy if have too much data to fit in one copy
-  if (len > 64) {
-    op = EmitCopyLessThan64(op, offset, 60);
-    len -= 60;
-  }
-
-  // Emit remainder
-  op = EmitCopyLessThan64(op, offset, len);
-  return op;
-}
-
-
-bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
-  uint32 v = 0;
-  const char* limit = start + n;
-  if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
-    *result = v;
-    return true;
-  } else {
-    return false;
-  }
-}
-
-namespace internal {
-uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
-  // Use smaller hash table when input.size() is smaller, since we
-  // fill the table, incurring O(hash table size) overhead for
-  // compression, and if the input is short, we won't need that
-  // many hash table entries anyway.
-  assert(kMaxHashTableSize >= 256);
-  size_t htsize = 256;
-  while (htsize < kMaxHashTableSize && htsize < input_size) {
-    htsize <<= 1;
-  }
-  CHECK_EQ(0, htsize & (htsize - 1)) << ": must be power of two";
-  CHECK_LE(htsize, kMaxHashTableSize) << ": hash table too large";
-
-  uint16* table;
-  if (htsize <= ARRAYSIZE(small_table_)) {
-    table = small_table_;
-  } else {
-    if (large_table_ == NULL) {
-      large_table_ = new uint16[kMaxHashTableSize];
-    }
-    table = large_table_;
-  }
-
-  *table_size = htsize;
-  memset(table, 0, htsize * sizeof(*table));
-  return table;
-}
-}  // end namespace internal
-
-// For 0 <= offset <= 4, GetUint32AtOffset(GetEightBytesAt(p), offset) will
-// equal UNALIGNED_LOAD32(p + offset).  Motivation: On x86-64 hardware we have
-// empirically found that overlapping loads such as
-//  UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
-// are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
-//
-// We have different versions for 64- and 32-bit; ideally we would avoid the
-// two functions and just inline the UNALIGNED_LOAD64 call into
-// GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
-// enough to avoid loading the value multiple times then. For 64-bit, the load
-// is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
-// done at GetUint32AtOffset() time.
-
-#ifdef ARCH_K8
-
-typedef uint64 EightBytesReference;
-
-static inline EightBytesReference GetEightBytesAt(const char* ptr) {
-  return UNALIGNED_LOAD64(ptr);
-}
-
-static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
-  DCHECK_GE(offset, 0);
-  DCHECK_LE(offset, 4);
-  return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
-}
-
-#else
-
-typedef const char* EightBytesReference;
-
-static inline EightBytesReference GetEightBytesAt(const char* ptr) {
-  return ptr;
-}
-
-static inline uint32 GetUint32AtOffset(const char* v, int offset) {
-  DCHECK_GE(offset, 0);
-  DCHECK_LE(offset, 4);
-  return UNALIGNED_LOAD32(v + offset);
-}
-
-#endif
-
-// Flat array compression that does not emit the "uncompressed length"
-// prefix. Compresses "input" string to the "*op" buffer.
-//
-// REQUIRES: "input" is at most "kBlockSize" bytes long.
-// REQUIRES: "op" points to an array of memory that is at least
-// "MaxCompressedLength(input.size())" in size.
-// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
-// REQUIRES: "table_size" is a power of two
-//
-// Returns an "end" pointer into "op" buffer.
-// "end - op" is the compressed size of "input".
-namespace internal {
-char* CompressFragment(const char* input,
-                       size_t input_size,
-                       char* op,
-                       uint16* table,
-                       const int table_size) {
-  // "ip" is the input pointer, and "op" is the output pointer.
-  const char* ip = input;
-  CHECK_LE(input_size, kBlockSize);
-  CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
-  const int shift = 32 - Bits::Log2Floor(table_size);
-  DCHECK_EQ(static_cast<int>(kuint32max >> shift), table_size - 1);
-  const char* ip_end = input + input_size;
-  const char* base_ip = ip;
-  // Bytes in [next_emit, ip) will be emitted as literal bytes.  Or
-  // [next_emit, ip_end) after the main loop.
-  const char* next_emit = ip;
-
-  const size_t kInputMarginBytes = 15;
-  if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
-    const char* ip_limit = input + input_size - kInputMarginBytes;
-
-    for (uint32 next_hash = Hash(++ip, shift); ; ) {
-      DCHECK_LT(next_emit, ip);
-      // The body of this loop calls EmitLiteral once and then EmitCopy one or
-      // more times.  (The exception is that when we're close to exhausting
-      // the input we goto emit_remainder.)
-      //
-      // In the first iteration of this loop we're just starting, so
-      // there's nothing to copy, so calling EmitLiteral once is
-      // necessary.  And we only start a new iteration when the
-      // current iteration has determined that a call to EmitLiteral will
-      // precede the next call to EmitCopy (if any).
-      //
-      // Step 1: Scan forward in the input looking for a 4-byte-long match.
-      // If we get close to exhausting the input then goto emit_remainder.
-      //
-      // Heuristic match skipping: If 32 bytes are scanned with no matches
-      // found, start looking only at every other byte. If 32 more bytes are
-      // scanned, look at every third byte, etc.. When a match is found,
-      // immediately go back to looking at every byte. This is a small loss
-      // (~5% performance, ~0.1% density) for compressible data due to more
-      // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
-      // win since the compressor quickly "realizes" the data is incompressible
-      // and doesn't bother looking for matches everywhere.
-      //
-      // The "skip" variable keeps track of how many bytes there are since the
-      // last match; dividing it by 32 (ie. right-shifting by five) gives the
-      // number of bytes to move ahead for each iteration.
-      uint32 skip = 32;
-
-      const char* next_ip = ip;
-      const char* candidate;
-      do {
-        ip = next_ip;
-        uint32 hash = next_hash;
-        DCHECK_EQ(hash, Hash(ip, shift));
-        uint32 bytes_between_hash_lookups = skip++ >> 5;
-        next_ip = ip + bytes_between_hash_lookups;
-        if (PREDICT_FALSE(next_ip > ip_limit)) {
-          goto emit_remainder;
-        }
-        next_hash = Hash(next_ip, shift);
-        candidate = base_ip + table[hash];
-        DCHECK_GE(candidate, base_ip);
-        DCHECK_LT(candidate, ip);
-
-        table[hash] = ip - base_ip;
-      } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
-                            UNALIGNED_LOAD32(candidate)));
-
-      // Step 2: A 4-byte match has been found.  We'll later see if more
-      // than 4 bytes match.  But, prior to the match, input
-      // bytes [next_emit, ip) are unmatched.  Emit them as "literal bytes."
-      DCHECK_LE(next_emit + 16, ip_end);
-      op = EmitLiteral(op, next_emit, ip - next_emit, true);
-
-      // Step 3: Call EmitCopy, and then see if another EmitCopy could
-      // be our next move.  Repeat until we find no match for the
-      // input immediately after what was consumed by the last EmitCopy call.
-      //
-      // If we exit this loop normally then we need to call EmitLiteral next,
-      // though we don't yet know how big the literal will be.  We handle that
-      // by proceeding to the next iteration of the main loop.  We also can exit
-      // this loop via goto if we get close to exhausting the input.
-      EightBytesReference input_bytes;
-      uint32 candidate_bytes = 0;
-
-      do {
-        // We have a 4-byte match at ip, and no need to emit any
-        // "literal bytes" prior to ip.
-        const char* base = ip;
-        int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
-        ip += matched;
-        size_t offset = base - candidate;
-        DCHECK_EQ(0, memcmp(base, candidate, matched));
-        op = EmitCopy(op, offset, matched);
-        // We could immediately start working at ip now, but to improve
-        // compression we first update table[Hash(ip - 1, ...)].
-        const char* insert_tail = ip - 1;
-        next_emit = ip;
-        if (PREDICT_FALSE(ip >= ip_limit)) {
-          goto emit_remainder;
-        }
-        input_bytes = GetEightBytesAt(insert_tail);
-        uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
-        table[prev_hash] = ip - base_ip - 1;
-        uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
-        candidate = base_ip + table[cur_hash];
-        candidate_bytes = UNALIGNED_LOAD32(candidate);
-        table[cur_hash] = ip - base_ip;
-      } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
-
-      next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
-      ++ip;
-    }
-  }
-
- emit_remainder:
-  // Emit the remaining bytes as a literal
-  if (next_emit < ip_end) {
-    op = EmitLiteral(op, next_emit, ip_end - next_emit, false);
-  }
-
-  return op;
-}
-}  // end namespace internal
-
-// Signature of output types needed by decompression code.
-// The decompression code is templatized on a type that obeys this
-// signature so that we do not pay virtual function call overhead in
-// the middle of a tight decompression loop.
-//
-// class DecompressionWriter {
-//  public:
-//   // Called before decompression
-//   void SetExpectedLength(size_t length);
-//
-//   // Called after decompression
-//   bool CheckLength() const;
-//
-//   // Called repeatedly during decompression
-//   bool Append(const char* ip, size_t length);
-//   bool AppendFromSelf(uint32 offset, size_t length);
-//
-//   // The difference between TryFastAppend and Append is that TryFastAppend
-//   // is allowed to read up to <available> bytes from the input buffer,
-//   // whereas Append is allowed to read <length>.
-//   //
-//   // Also, TryFastAppend is allowed to return false, declining the append,
-//   // without it being a fatal error -- just "return false" would be
-//   // a perfectly legal implementation of TryFastAppend. The intention
-//   // is for TryFastAppend to allow a fast path in the common case of
-//   // a small append.
-//   //
-//   // NOTE(user): TryFastAppend must always return decline (return false)
-//   // if <length> is 61 or more, as in this case the literal length is not
-//   // decoded fully. In practice, this should not be a big problem,
-//   // as it is unlikely that one would implement a fast path accepting
-//   // this much data.
-//   bool TryFastAppend(const char* ip, size_t available, size_t length);
-// };
-
-// -----------------------------------------------------------------------
-// Lookup table for decompression code.  Generated by ComputeTable() below.
-// -----------------------------------------------------------------------
-
-// Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
-static const uint32 wordmask[] = {
-  0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
-};
-
-// Data stored per entry in lookup table:
-//      Range   Bits-used       Description
-//      ------------------------------------
-//      1..64   0..7            Literal/copy length encoded in opcode byte
-//      0..7    8..10           Copy offset encoded in opcode byte / 256
-//      0..4    11..13          Extra bytes after opcode
-//
-// We use eight bits for the length even though 7 would have sufficed
-// because of efficiency reasons:
-//      (1) Extracting a byte is faster than a bit-field
-//      (2) It properly aligns copy offset so we do not need a <<8
-static const uint16 char_table[256] = {
-  0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
-  0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
-  0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
-  0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
-  0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
-  0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
-  0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
-  0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
-  0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
-  0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
-  0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
-  0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
-  0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
-  0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
-  0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
-  0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
-  0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
-  0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
-  0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
-  0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
-  0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
-  0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
-  0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
-  0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
-  0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
-  0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
-  0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
-  0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
-  0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
-  0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
-  0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
-  0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
-};
-
-// In debug mode, allow optional computation of the table at startup.
-// Also, check that the decompression table is correct.
-#ifndef NDEBUG
-DEFINE_bool(snappy_dump_decompression_table, false,
-            "If true, we print the decompression table at startup.");
-
-static uint16 MakeEntry(unsigned int extra,
-                        unsigned int len,
-                        unsigned int copy_offset) {
-  // Check that all of the fields fit within the allocated space
-  DCHECK_EQ(extra,       extra & 0x7);          // At most 3 bits
-  DCHECK_EQ(copy_offset, copy_offset & 0x7);    // At most 3 bits
-  DCHECK_EQ(len,         len & 0x7f);           // At most 7 bits
-  return len | (copy_offset << 8) | (extra << 11);
-}
-
-static void ComputeTable() {
-  uint16 dst[256];
-
-  // Place invalid entries in all places to detect missing initialization
-  int assigned = 0;
-  for (int i = 0; i < 256; i++) {
-    dst[i] = 0xffff;
-  }
-
-  // Small LITERAL entries.  We store (len-1) in the top 6 bits.
-  for (unsigned int len = 1; len <= 60; len++) {
-    dst[LITERAL | ((len-1) << 2)] = MakeEntry(0, len, 0);
-    assigned++;
-  }
-
-  // Large LITERAL entries.  We use 60..63 in the high 6 bits to
-  // encode the number of bytes of length info that follow the opcode.
-  for (unsigned int extra_bytes = 1; extra_bytes <= 4; extra_bytes++) {
-    // We set the length field in the lookup table to 1 because extra
-    // bytes encode len-1.
-    dst[LITERAL | ((extra_bytes+59) << 2)] = MakeEntry(extra_bytes, 1, 0);
-    assigned++;
-  }
-
-  // COPY_1_BYTE_OFFSET.
-  //
-  // The tag byte in the compressed data stores len-4 in 3 bits, and
-  // offset/256 in 5 bits.  offset%256 is stored in the next byte.
-  //
-  // This format is used for length in range [4..11] and offset in
-  // range [0..2047]
-  for (unsigned int len = 4; len < 12; len++) {
-    for (unsigned int offset = 0; offset < 2048; offset += 256) {
-      dst[COPY_1_BYTE_OFFSET | ((len-4)<<2) | ((offset>>8)<<5)] =
-        MakeEntry(1, len, offset>>8);
-      assigned++;
-    }
-  }
-
-  // COPY_2_BYTE_OFFSET.
-  // Tag contains len-1 in top 6 bits, and offset in next two bytes.
-  for (unsigned int len = 1; len <= 64; len++) {
-    dst[COPY_2_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(2, len, 0);
-    assigned++;
-  }
-
-  // COPY_4_BYTE_OFFSET.
-  // Tag contents len-1 in top 6 bits, and offset in next four bytes.
-  for (unsigned int len = 1; len <= 64; len++) {
-    dst[COPY_4_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(4, len, 0);
-    assigned++;
-  }
-
-  // Check that each entry was initialized exactly once.
-  CHECK_EQ(assigned, 256);
-  for (int i = 0; i < 256; i++) {
-    CHECK_NE(dst[i], 0xffff);
-  }
-
-  if (FLAGS_snappy_dump_decompression_table) {
-    printf("static const uint16 char_table[256] = {\n  ");
-    for (int i = 0; i < 256; i++) {
-      printf("0x%04x%s",
-             dst[i],
-             ((i == 255) ? "\n" : (((i%8) == 7) ? ",\n  " : ", ")));
-    }
-    printf("};\n");
-  }
-
-  // Check that computed table matched recorded table
-  for (int i = 0; i < 256; i++) {
-    CHECK_EQ(dst[i], char_table[i]);
-  }
-}
-#endif /* !NDEBUG */
-
-// Helper class for decompression
-class SnappyDecompressor {
- private:
-  Source*       reader_;         // Underlying source of bytes to decompress
-  const char*   ip_;             // Points to next buffered byte
-  const char*   ip_limit_;       // Points just past buffered bytes
-  uint32        peeked_;         // Bytes peeked from reader (need to skip)
-  bool          eof_;            // Hit end of input without an error?
-  char          scratch_[5];     // Temporary buffer for PeekFast() boundaries
-
-  // Ensure that all of the tag metadata for the next tag is available
-  // in [ip_..ip_limit_-1].  Also ensures that [ip,ip+4] is readable even
-  // if (ip_limit_ - ip_ < 5).
-  //
-  // Returns true on success, false on error or end of input.
-  bool RefillTag();
-
- public:
-  explicit SnappyDecompressor(Source* reader)
-      : reader_(reader),
-        ip_(NULL),
-        ip_limit_(NULL),
-        peeked_(0),
-        eof_(false) {
-  }
-
-  ~SnappyDecompressor() {
-    // Advance past any bytes we peeked at from the reader
-    reader_->Skip(peeked_);
-  }
-
-  // Returns true iff we have hit the end of the input without an error.
-  bool eof() const {
-    return eof_;
-  }
-
-  // Read the uncompressed length stored at the start of the compressed data.
-  // On succcess, stores the length in *result and returns true.
-  // On failure, returns false.
-  bool ReadUncompressedLength(uint32* result) {
-    DCHECK(ip_ == NULL);       // Must not have read anything yet
-    // Length is encoded in 1..5 bytes
-    *result = 0;
-    uint32 shift = 0;
-    while (true) {
-      if (shift >= 32) return false;
-      size_t n;
-      const char* ip = reader_->Peek(&n);
-      if (n == 0) return false;
-      const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
-      reader_->Skip(1);
-      *result |= static_cast<uint32>(c & 0x7f) << shift;
-      if (c < 128) {
-        break;
-      }
-      shift += 7;
-    }
-    return true;
-  }
-
-  // Process the next item found in the input.
-  // Returns true if successful, false on error or end of input.
-  template <class Writer>
-  void DecompressAllTags(Writer* writer) {
-    const char* ip = ip_;
-
-    // We could have put this refill fragment only at the beginning of the loop.
-    // However, duplicating it at the end of each branch gives the compiler more
-    // scope to optimize the <ip_limit_ - ip> expression based on the local
-    // context, which overall increases speed.
-    #define MAYBE_REFILL() \
-        if (ip_limit_ - ip < 5) { \
-          ip_ = ip; \
-          if (!RefillTag()) return; \
-          ip = ip_; \
-        }
-
-    MAYBE_REFILL();
-    for ( ;; ) {
-      const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
-
-      if ((c & 0x3) == LITERAL) {
-        size_t literal_length = (c >> 2) + 1u;
-        if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
-          DCHECK_LT(literal_length, 61);
-          ip += literal_length;
-          MAYBE_REFILL();
-          continue;
-        }
-        if (PREDICT_FALSE(literal_length >= 61)) {
-          // Long literal.
-          const size_t literal_length_length = literal_length - 60;
-          literal_length =
-              (LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1;
-          ip += literal_length_length;
-        }
-
-        size_t avail = ip_limit_ - ip;
-        while (avail < literal_length) {
-          if (!writer->Append(ip, avail)) return;
-          literal_length -= avail;
-          reader_->Skip(peeked_);
-          size_t n;
-          ip = reader_->Peek(&n);
-          avail = n;
-          peeked_ = avail;
-          if (avail == 0) return;  // Premature end of input
-          ip_limit_ = ip + avail;
-        }
-        if (!writer->Append(ip, literal_length)) {
-          return;
-        }
-        ip += literal_length;
-        MAYBE_REFILL();
-      } else {
-        const uint32 entry = char_table[c];
-        const uint32 trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11];
-        const uint32 length = entry & 0xff;
-        ip += entry >> 11;
-
-        // copy_offset/256 is encoded in bits 8..10.  By just fetching
-        // those bits, we get copy_offset (since the bit-field starts at
-        // bit 8).
-        const uint32 copy_offset = entry & 0x700;
-        if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
-          return;
-        }
-        MAYBE_REFILL();
-      }
-    }
-
-#undef MAYBE_REFILL
-  }
-};
-
-bool SnappyDecompressor::RefillTag() {
-  const char* ip = ip_;
-  if (ip == ip_limit_) {
-    // Fetch a new fragment from the reader
-    reader_->Skip(peeked_);   // All peeked bytes are used up
-    size_t n;
-    ip = reader_->Peek(&n);
-    peeked_ = n;
-    if (n == 0) {
-      eof_ = true;
-      return false;
-    }
-    ip_limit_ = ip + n;
-  }
-
-  // Read the tag character
-  DCHECK_LT(ip, ip_limit_);
-  const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
-  const uint32 entry = char_table[c];
-  const uint32 needed = (entry >> 11) + 1;  // +1 byte for 'c'
-  DCHECK_LE(needed, sizeof(scratch_));
-
-  // Read more bytes from reader if needed
-  uint32 nbuf = ip_limit_ - ip;
-  if (nbuf < needed) {
-    // Stitch together bytes from ip and reader to form the word
-    // contents.  We store the needed bytes in "scratch_".  They
-    // will be consumed immediately by the caller since we do not
-    // read more than we need.
-    memmove(scratch_, ip, nbuf);
-    reader_->Skip(peeked_);  // All peeked bytes are used up
-    peeked_ = 0;
-    while (nbuf < needed) {
-      size_t length;
-      const char* src = reader_->Peek(&length);
-      if (length == 0) return false;
-      uint32 to_add = min<uint32>(needed - nbuf, length);
-      memcpy(scratch_ + nbuf, src, to_add);
-      nbuf += to_add;
-      reader_->Skip(to_add);
-    }
-    DCHECK_EQ(nbuf, needed);
-    ip_ = scratch_;
-    ip_limit_ = scratch_ + needed;
-  } else if (nbuf < 5) {
-    // Have enough bytes, but move into scratch_ so that we do not
-    // read past end of input
-    memmove(scratch_, ip, nbuf);
-    reader_->Skip(peeked_);  // All peeked bytes are used up
-    peeked_ = 0;
-    ip_ = scratch_;
-    ip_limit_ = scratch_ + nbuf;
-  } else {
-    // Pass pointer to buffer returned by reader_.
-    ip_ = ip;
-  }
-  return true;
-}
-
-template <typename Writer>
-static bool InternalUncompress(Source* r,
-                               Writer* writer,
-                               uint32 max_len) {
-  // Read the uncompressed length from the front of the compressed input
-  SnappyDecompressor decompressor(r);
-  uint32 uncompressed_len = 0;
-  if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
-  return InternalUncompressAllTags(
-      &decompressor, writer, uncompressed_len, max_len);
-}
-
-template <typename Writer>
-static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
-                                      Writer* writer,
-                                      uint32 uncompressed_len,
-                                      uint32 max_len) {
-  // Protect against possible DoS attack
-  if (static_cast<uint64>(uncompressed_len) > max_len) {
-    return false;
-  }
-
-  writer->SetExpectedLength(uncompressed_len);
-
-  // Process the entire input
-  decompressor->DecompressAllTags(writer);
-  return (decompressor->eof() && writer->CheckLength());
-}
-
-bool GetUncompressedLength(Source* source, uint32* result) {
-  SnappyDecompressor decompressor(source);
-  return decompressor.ReadUncompressedLength(result);
-}
-
-size_t Compress(Source* reader, Sink* writer) {
-  size_t written = 0;
-  size_t N = reader->Available();
-  char ulength[Varint::kMax32];
-  char* p = Varint::Encode32(ulength, N);
-  writer->Append(ulength, p-ulength);
-  written += (p - ulength);
-
-  internal::WorkingMemory wmem;
-  char* scratch = NULL;
-  char* scratch_output = NULL;
-
-  while (N > 0) {
-    // Get next block to compress (without copying if possible)
-    size_t fragment_size;
-    const char* fragment = reader->Peek(&fragment_size);
-    DCHECK_NE(fragment_size, 0) << ": premature end of input";
-    const size_t num_to_read = min(N, kBlockSize);
-    size_t bytes_read = fragment_size;
-
-    size_t pending_advance = 0;
-    if (bytes_read >= num_to_read) {
-      // Buffer returned by reader is large enough
-      pending_advance = num_to_read;
-      fragment_size = num_to_read;
-    } else {
-      // Read into scratch buffer
-      if (scratch == NULL) {
-        // If this is the last iteration, we want to allocate N bytes
-        // of space, otherwise the max possible kBlockSize space.
-        // num_to_read contains exactly the correct value
-        scratch = new char[num_to_read];
-      }
-      memcpy(scratch, fragment, bytes_read);
-      reader->Skip(bytes_read);
-
-      while (bytes_read < num_to_read) {
-        fragment = reader->Peek(&fragment_size);
-        size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
-        memcpy(scratch + bytes_read, fragment, n);
-        bytes_read += n;
-        reader->Skip(n);
-      }
-      DCHECK_EQ(bytes_read, num_to_read);
-      fragment = scratch;
-      fragment_size = num_to_read;
-    }
-    DCHECK_EQ(fragment_size, num_to_read);
-
-    // Get encoding table for compression
-    int table_size;
-    uint16* table = wmem.GetHashTable(num_to_read, &table_size);
-
-    // Compress input_fragment and append to dest
-    const int max_output = MaxCompressedLength(num_to_read);
-
-    // Need a scratch buffer for the output, in case the byte sink doesn't
-    // have room for us directly.
-    if (scratch_output == NULL) {
-      scratch_output = new char[max_output];
-    } else {
-      // Since we encode kBlockSize regions followed by a region
-      // which is <= kBlockSize in length, a previously allocated
-      // scratch_output[] region is big enough for this iteration.
-    }
-    char* dest = writer->GetAppendBuffer(max_output, scratch_output);
-    char* end = internal::CompressFragment(fragment, fragment_size,
-                                           dest, table, table_size);
-    writer->Append(dest, end - dest);
-    written += (end - dest);
-
-    N -= num_to_read;
-    reader->Skip(pending_advance);
-  }
-
-  delete[] scratch;
-  delete[] scratch_output;
-
-  return written;
-}
-
-// -----------------------------------------------------------------------
-// Flat array interfaces
-// -----------------------------------------------------------------------
-
-// A type that writes to a flat array.
-// Note that this is not a "ByteSink", but a type that matches the
-// Writer template argument to SnappyDecompressor::DecompressAllTags().
-class SnappyArrayWriter {
- private:
-  char* base_;
-  char* op_;
-  char* op_limit_;
-
- public:
-  inline explicit SnappyArrayWriter(char* dst)
-      : base_(dst),
-        op_(dst) {
-  }
-
-  inline void SetExpectedLength(size_t len) {
-    op_limit_ = op_ + len;
-  }
-
-  inline bool CheckLength() const {
-    return op_ == op_limit_;
-  }
-
-  inline bool Append(const char* ip, size_t len) {
-    char* op = op_;
-    const size_t space_left = op_limit_ - op;
-    if (space_left < len) {
-      return false;
-    }
-    memcpy(op, ip, len);
-    op_ = op + len;
-    return true;
-  }
-
-  inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
-    char* op = op_;
-    const size_t space_left = op_limit_ - op;
-    if (len <= 16 && available >= 16 && space_left >= 16) {
-      // Fast path, used for the majority (about 95%) of invocations.
-      UnalignedCopy64(ip, op);
-      UnalignedCopy64(ip + 8, op + 8);
-      op_ = op + len;
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  inline bool AppendFromSelf(size_t offset, size_t len) {
-    char* op = op_;
-    const size_t space_left = op_limit_ - op;
-
-    if (op - base_ <= offset - 1u) {  // -1u catches offset==0
-      return false;
-    }
-    if (len <= 16 && offset >= 8 && space_left >= 16) {
-      // Fast path, used for the majority (70-80%) of dynamic invocations.
-      UnalignedCopy64(op - offset, op);
-      UnalignedCopy64(op - offset + 8, op + 8);
-    } else {
-      if (space_left >= len + kMaxIncrementCopyOverflow) {
-        IncrementalCopyFastPath(op - offset, op, len);
-      } else {
-        if (space_left < len) {
-          return false;
-        }
-        IncrementalCopy(op - offset, op, len);
-      }
-    }
-
-    op_ = op + len;
-    return true;
-  }
-};
-
-bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
-  ByteArraySource reader(compressed, n);
-  return RawUncompress(&reader, uncompressed);
-}
-
-bool RawUncompress(Source* compressed, char* uncompressed) {
-  SnappyArrayWriter output(uncompressed);
-  return InternalUncompress(compressed, &output, kuint32max);
-}
-
-bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
-  size_t ulength;
-  if (!GetUncompressedLength(compressed, n, &ulength)) {
-    return false;
-  }
-  // Protect against possible DoS attack
-  if ((static_cast<uint64>(ulength) + uncompressed->size()) >
-      uncompressed->max_size()) {
-    return false;
-  }
-  STLStringResizeUninitialized(uncompressed, ulength);
-  return RawUncompress(compressed, n, string_as_array(uncompressed));
-}
-
-
-// A Writer that drops everything on the floor and just does validation
-class SnappyDecompressionValidator {
- private:
-  size_t expected_;
-  size_t produced_;
-
- public:
-  inline SnappyDecompressionValidator() : produced_(0) { }
-  inline void SetExpectedLength(size_t len) {
-    expected_ = len;
-  }
-  inline bool CheckLength() const {
-    return expected_ == produced_;
-  }
-  inline bool Append(const char* ip, size_t len) {
-    produced_ += len;
-    return produced_ <= expected_;
-  }
-  inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
-    return false;
-  }
-  inline bool AppendFromSelf(size_t offset, size_t len) {
-    if (produced_ <= offset - 1u) return false;  // -1u catches offset==0
-    produced_ += len;
-    return produced_ <= expected_;
-  }
-};
-
-bool IsValidCompressedBuffer(const char* compressed, size_t n) {
-  ByteArraySource reader(compressed, n);
-  SnappyDecompressionValidator writer;
-  return InternalUncompress(&reader, &writer, kuint32max);
-}
-
-void RawCompress(const char* input,
-                 size_t input_length,
-                 char* compressed,
-                 size_t* compressed_length) {
-  ByteArraySource reader(input, input_length);
-  UncheckedByteArraySink writer(compressed);
-  Compress(&reader, &writer);
-
-  // Compute how many bytes were added
-  *compressed_length = (writer.CurrentDestination() - compressed);
-}
-
-size_t Compress(const char* input, size_t input_length, string* compressed) {
-  // Pre-grow the buffer to the max length of the compressed output
-  compressed->resize(MaxCompressedLength(input_length));
-
-  size_t compressed_length;
-  RawCompress(input, input_length, string_as_array(compressed),
-              &compressed_length);
-  compressed->resize(compressed_length);
-  return compressed_length;
-}
-
-
-} // end namespace snappy
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/c_src/google-snappy/snappy.h
----------------------------------------------------------------------
diff --git a/src/snappy/c_src/google-snappy/snappy.h b/src/snappy/c_src/google-snappy/snappy.h
deleted file mode 100644
index 8c2075f..0000000
--- a/src/snappy/c_src/google-snappy/snappy.h
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2005 and onwards Google Inc.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// A light-weight compression algorithm.  It is designed for speed of
-// compression and decompression, rather than for the utmost in space
-// savings.
-//
-// For getting better compression ratios when you are compressing data
-// with long repeated sequences or compressing data that is similar to
-// other data, while still compressing fast, you might look at first
-// using BMDiff and then compressing the output of BMDiff with
-// Snappy.
-
-#ifndef UTIL_SNAPPY_SNAPPY_H__
-#define UTIL_SNAPPY_SNAPPY_H__
-
-#include <stddef.h>
-#include <string>
-
-#include "snappy-stubs-public.h"
-
-namespace snappy {
-  class Source;
-  class Sink;
-
-  // ------------------------------------------------------------------------
-  // Generic compression/decompression routines.
-  // ------------------------------------------------------------------------
-
-  // Compress the bytes read from "*source" and append to "*sink". Return the
-  // number of bytes written.
-  size_t Compress(Source* source, Sink* sink);
-
-  bool GetUncompressedLength(Source* source, uint32* result);
-
-  // ------------------------------------------------------------------------
-  // Higher-level string based routines (should be sufficient for most users)
-  // ------------------------------------------------------------------------
-
-  // Sets "*output" to the compressed version of "input[0,input_length-1]".
-  // Original contents of *output are lost.
-  //
-  // REQUIRES: "input[]" is not an alias of "*output".
-  size_t Compress(const char* input, size_t input_length, string* output);
-
-  // Decompresses "compressed[0,compressed_length-1]" to "*uncompressed".
-  // Original contents of "*uncompressed" are lost.
-  //
-  // REQUIRES: "compressed[]" is not an alias of "*uncompressed".
-  //
-  // returns false if the message is corrupted and could not be decompressed
-  bool Uncompress(const char* compressed, size_t compressed_length,
-                  string* uncompressed);
-
-
-  // ------------------------------------------------------------------------
-  // Lower-level character array based routines.  May be useful for
-  // efficiency reasons in certain circumstances.
-  // ------------------------------------------------------------------------
-
-  // REQUIRES: "compressed" must point to an area of memory that is at
-  // least "MaxCompressedLength(input_length)" bytes in length.
-  //
-  // Takes the data stored in "input[0..input_length]" and stores
-  // it in the array pointed to by "compressed".
-  //
-  // "*compressed_length" is set to the length of the compressed output.
-  //
-  // Example:
-  //    char* output = new char[snappy::MaxCompressedLength(input_length)];
-  //    size_t output_length;
-  //    RawCompress(input, input_length, output, &output_length);
-  //    ... Process(output, output_length) ...
-  //    delete [] output;
-  void RawCompress(const char* input,
-                   size_t input_length,
-                   char* compressed,
-                   size_t* compressed_length);
-
-  // Given data in "compressed[0..compressed_length-1]" generated by
-  // calling the Snappy::Compress routine, this routine
-  // stores the uncompressed data to
-  //    uncompressed[0..GetUncompressedLength(compressed)-1]
-  // returns false if the message is corrupted and could not be decrypted
-  bool RawUncompress(const char* compressed, size_t compressed_length,
-                     char* uncompressed);
-
-  // Given data from the byte source 'compressed' generated by calling
-  // the Snappy::Compress routine, this routine stores the uncompressed
-  // data to
-  //    uncompressed[0..GetUncompressedLength(compressed,compressed_length)-1]
-  // returns false if the message is corrupted and could not be decrypted
-  bool RawUncompress(Source* compressed, char* uncompressed);
-
-  // Returns the maximal size of the compressed representation of
-  // input data that is "source_bytes" bytes in length;
-  size_t MaxCompressedLength(size_t source_bytes);
-
-  // REQUIRES: "compressed[]" was produced by RawCompress() or Compress()
-  // Returns true and stores the length of the uncompressed data in
-  // *result normally.  Returns false on parsing error.
-  // This operation takes O(1) time.
-  bool GetUncompressedLength(const char* compressed, size_t compressed_length,
-                             size_t* result);
-
-  // Returns true iff the contents of "compressed[]" can be uncompressed
-  // successfully.  Does not return the uncompressed data.  Takes
-  // time proportional to compressed_length, but is usually at least
-  // a factor of four faster than actual decompression.
-  bool IsValidCompressedBuffer(const char* compressed,
-                               size_t compressed_length);
-
-  // *** DO NOT CHANGE THE VALUE OF kBlockSize ***
-  //
-  // New Compression code chops up the input into blocks of at most
-  // the following size.  This ensures that back-references in the
-  // output never cross kBlockSize block boundaries.  This can be
-  // helpful in implementing blocked decompression.  However the
-  // decompression code should not rely on this guarantee since older
-  // compression code may not obey it.
-  static const int kBlockLog = 15;
-  static const size_t kBlockSize = 1 << kBlockLog;
-
-  static const int kMaxHashTableBits = 14;
-  static const size_t kMaxHashTableSize = 1 << kMaxHashTableBits;
-
-}  // end namespace snappy
-
-
-#endif  // UTIL_SNAPPY_SNAPPY_H__

http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/c_src/snappy_nif.cc
----------------------------------------------------------------------
diff --git a/src/snappy/c_src/snappy_nif.cc b/src/snappy/c_src/snappy_nif.cc
deleted file mode 100644
index ae28d91..0000000
--- a/src/snappy/c_src/snappy_nif.cc
+++ /dev/null
@@ -1,265 +0,0 @@
-/**
- * Copyright 2011,  Filipe David Manana  <fd...@apache.org>
- * Web:  http://github.com/fdmanana/snappy-erlang-nif
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- **/
-
-#include <iostream>
-#include <cstring>
-
-#include "erl_nif_compat.h"
-#include "google-snappy/snappy.h"
-#include "google-snappy/snappy-sinksource.h"
-
-#ifdef OTP_R13B03
-#error OTP R13B03 not supported. Upgrade to R13B04 or later.
-#endif
-
-#ifdef __cplusplus
-#define BEGIN_C extern "C" {
-#define END_C }
-#else
-#define BEGIN_C
-#define END_C
-#endif
-
-#define SC_PTR(c) reinterpret_cast<char *>(c)
-
-class SnappyNifSink : public snappy::Sink
-{
-    public:
-        SnappyNifSink(ErlNifEnv* e);
-        ~SnappyNifSink();
-        
-        void Append(const char* data, size_t n);
-        char* GetAppendBuffer(size_t len, char* scratch);
-        ErlNifBinary& getBin();
-
-    private:
-        ErlNifEnv* env;
-        ErlNifBinary bin;
-        size_t length;
-};
-
-SnappyNifSink::SnappyNifSink(ErlNifEnv* e) : env(e), length(0)
-{
-    if(!enif_alloc_binary_compat(env, 0, &bin)) {
-        env = NULL;
-        throw std::bad_alloc();
-    }
-}
-
-SnappyNifSink::~SnappyNifSink()
-{
-    if(env != NULL) {
-        enif_release_binary_compat(env, &bin);
-    }
-}
-
-void
-SnappyNifSink::Append(const char *data, size_t n)
-{
-    if(data != (SC_PTR(bin.data) + length)) {
-        memcpy(bin.data + length, data, n);
-    }
-    length += n;
-}
-
-char*
-SnappyNifSink::GetAppendBuffer(size_t len, char* scratch)
-{
-    size_t sz;
-    
-    if((length + len) > bin.size) {
-        sz = (len * 4) < 8192 ? 8192 : (len * 4);
-
-        if(!enif_realloc_binary_compat(env, &bin, bin.size + sz)) {
-            throw std::bad_alloc();
-        }
-    }
-
-    return SC_PTR(bin.data) + length;
-}
-
-ErlNifBinary&
-SnappyNifSink::getBin()
-{
-    if(bin.size > length) {
-        if(!enif_realloc_binary_compat(env, &bin, length)) {
-            throw std::bad_alloc();
-        }
-    }
-    return bin;
-}
-
-
-static inline ERL_NIF_TERM
-make_atom(ErlNifEnv* env, const char* name)
-{
-    ERL_NIF_TERM ret;
-    if(enif_make_existing_atom_compat(env, name, &ret, ERL_NIF_LATIN1)) {
-        return ret;
-    }
-    return enif_make_atom(env, name);
-}
-
-
-static inline ERL_NIF_TERM
-make_ok(ErlNifEnv* env, ERL_NIF_TERM mesg)
-{
-    ERL_NIF_TERM ok = make_atom(env, "ok");
-    return enif_make_tuple2(env, ok, mesg);   
-}
-
-
-static inline ERL_NIF_TERM
-make_error(ErlNifEnv* env, const char* mesg)
-{
-    ERL_NIF_TERM error = make_atom(env, "error");
-    return enif_make_tuple2(env, error, make_atom(env, mesg));
-}
-
-
-BEGIN_C
-
-
-ERL_NIF_TERM
-snappy_compress(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
-    ErlNifBinary input;
-
-    if(!enif_inspect_iolist_as_binary(env, argv[0], &input)) {
-        return enif_make_badarg(env);
-    }
-
-    try {
-        snappy::ByteArraySource source(SC_PTR(input.data), input.size);
-        SnappyNifSink sink(env);
-        snappy::Compress(&source, &sink);
-        return make_ok(env, enif_make_binary(env, &sink.getBin()));
-    } catch(std::bad_alloc e) {
-        return make_error(env, "insufficient_memory");
-    } catch(...) {
-        return make_error(env, "unknown");
-    }
-}
-
-
-ERL_NIF_TERM
-snappy_decompress(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
-    ErlNifBinary bin;
-    ErlNifBinary ret;
-    size_t len;
-
-    if(!enif_inspect_iolist_as_binary(env, argv[0], &bin)) {
-        return enif_make_badarg(env);
-    }
-
-    try {
-        if(!snappy::GetUncompressedLength(SC_PTR(bin.data), bin.size, &len)) {
-            return make_error(env, "data_not_compressed");
-        }
-
-        if(!enif_alloc_binary_compat(env, len, &ret)) {
-            return make_error(env, "insufficient_memory");
-        }
-
-        if(!snappy::RawUncompress(SC_PTR(bin.data), bin.size,
-                                            SC_PTR(ret.data))) {
-            return make_error(env, "corrupted_data");
-        }
-
-        return make_ok(env, enif_make_binary(env, &ret));
-    } catch(...) {
-        return make_error(env, "unknown");
-    }
-}
-
-
-ERL_NIF_TERM
-snappy_uncompressed_length(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
-    ErlNifBinary bin;
-    size_t len;
-
-    if(!enif_inspect_iolist_as_binary(env, argv[0], &bin)) {
-        return enif_make_badarg(env);
-    }
-
-    try {
-        if(!snappy::GetUncompressedLength(SC_PTR(bin.data), bin.size, &len)) {
-            return make_error(env, "data_not_compressed");
-        }
-        return make_ok(env, enif_make_ulong(env, len));
-    } catch(...) {
-        return make_error(env, "unknown");
-    }
-}
-
-
-ERL_NIF_TERM
-snappy_is_valid(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
-    ErlNifBinary bin;
-
-    if (!enif_inspect_iolist_as_binary(env, argv[0], &bin)) {
-        return enif_make_badarg(env);
-    }
-
-    try {
-        if(snappy::IsValidCompressedBuffer(SC_PTR(bin.data), bin.size)) {
-            return make_atom(env, "true");
-        } else {
-            return make_atom(env, "false");
-        }
-    } catch(...) {
-        return make_error(env, "unknown");
-    }
-}
-
-
-int
-on_load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info)
-{
-    return 0;
-}
-
-
-int
-on_reload(ErlNifEnv* env, void** priv, ERL_NIF_TERM info)
-{
-    return 0;
-}
-
-
-int
-on_upgrade(ErlNifEnv* env, void** priv, void** old_priv, ERL_NIF_TERM info)
-{
-    return 0;
-}
-
-
-static ErlNifFunc nif_functions[] = {
-    {"compress", 1, snappy_compress},
-    {"decompress", 1, snappy_decompress},
-    {"uncompressed_length", 1, snappy_uncompressed_length},
-    {"is_valid", 1, snappy_is_valid}
-};
-
-
-ERL_NIF_INIT(snappy, nif_functions, &on_load, &on_reload, &on_upgrade, NULL);
-
-
-END_C

http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/src/snappy.app.src
----------------------------------------------------------------------
diff --git a/src/snappy/src/snappy.app.src b/src/snappy/src/snappy.app.src
deleted file mode 100644
index 9f3f9a3..0000000
--- a/src/snappy/src/snappy.app.src
+++ /dev/null
@@ -1,12 +0,0 @@
-{application, snappy,
- [
-  {description, "snappy compressor/decompressor Erlang NIF wrapper"},
-  {vsn, git},
-  {registered, []},
-  {applications, [
-                  kernel,
-                  stdlib
-                 ]},
-  {env, []},
-  {modules, [snappy]}
- ]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/src/snappy.erl
----------------------------------------------------------------------
diff --git a/src/snappy/src/snappy.erl b/src/snappy/src/snappy.erl
deleted file mode 100644
index 7d3d36a..0000000
--- a/src/snappy/src/snappy.erl
+++ /dev/null
@@ -1,56 +0,0 @@
-%% Copyright 2011,  Filipe David Manana  <fd...@apache.org>
-%% Web:  http://github.com/fdmanana/snappy-erlang-nif
-%%
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%%  http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
--module(snappy).
-
--export([compress/1, decompress/1]).
--export([uncompressed_length/1, is_valid/1]).
-
--on_load(init/0).
-
-
-init() ->
-    SoName = case code:priv_dir(?MODULE) of
-    {error, bad_name} ->
-        case filelib:is_dir(filename:join(["..", "priv"])) of
-        true ->
-            filename:join(["..", "priv", "snappy_nif"]);
-        false ->
-            filename:join(["priv", "snappy_nif"])
-        end;
-    Dir ->
-        filename:join(Dir, "snappy_nif")
-    end,
-    (catch erlang:load_nif(SoName, 0)),
-    case erlang:system_info(otp_release) of
-    "R13B03" -> true;
-    _ -> ok
-    end.
-
-
-compress(_IoList) ->
-    exit(snappy_nif_not_loaded).
-
-
-decompress(_IoList) ->
-    exit(snappy_nif_not_loaded).
-
-
-uncompressed_length(_IoList) ->
-    exit(snappy_nif_not_loaded).
-
-
-is_valid(_IoList) ->
-    exit(snappy_nif_not_loaded).


[23/49] Remove src/ejson

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/src/mochijson2.erl
----------------------------------------------------------------------
diff --git a/src/ejson/src/mochijson2.erl b/src/ejson/src/mochijson2.erl
deleted file mode 100644
index 954a07d..0000000
--- a/src/ejson/src/mochijson2.erl
+++ /dev/null
@@ -1,849 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Yet another JSON (RFC 4627) library for Erlang. mochijson2 works
-%%      with binaries as strings, arrays as lists (without an {array, _})
-%%      wrapper and it only knows how to decode UTF-8 (and ASCII).
-%%
-%%      JSON terms are decoded as follows (javascript -> erlang):
-%%      <ul>
-%%          <li>{"key": "value"} ->
-%%              {struct, [{&lt;&lt;"key">>, &lt;&lt;"value">>}]}</li>
-%%          <li>["array", 123, 12.34, true, false, null] ->
-%%              [&lt;&lt;"array">>, 123, 12.34, true, false, null]
-%%          </li>
-%%      </ul>
-%%      <ul>
-%%          <li>Strings in JSON decode to UTF-8 binaries in Erlang</li>
-%%          <li>Objects decode to {struct, PropList}</li>
-%%          <li>Numbers decode to integer or float</li>
-%%          <li>true, false, null decode to their respective terms.</li>
-%%      </ul>
-%%      The encoder will accept the same format that the decoder will produce,
-%%      but will also allow additional cases for leniency:
-%%      <ul>
-%%          <li>atoms other than true, false, null will be considered UTF-8
-%%              strings (even as a proplist key)
-%%          </li>
-%%          <li>{json, IoList} will insert IoList directly into the output
-%%              with no validation
-%%          </li>
-%%          <li>{array, Array} will be encoded as Array
-%%              (legacy mochijson style)
-%%          </li>
-%%          <li>A non-empty raw proplist will be encoded as an object as long
-%%              as the first pair does not have an atom key of json, struct,
-%%              or array
-%%          </li>
-%%      </ul>
-
--module(mochijson2).
--author('bob@mochimedia.com').
--export([encoder/1, encode/1]).
--export([decoder/1, decode/1]).
-
-% This is a macro to placate syntax highlighters..
--define(Q, $\").
--define(ADV_COL(S, N), S#decoder{offset=N+S#decoder.offset,
-                                 column=N+S#decoder.column}).
--define(INC_COL(S), S#decoder{offset=1+S#decoder.offset,
-                              column=1+S#decoder.column}).
--define(INC_LINE(S), S#decoder{offset=1+S#decoder.offset,
-                               column=1,
-                               line=1+S#decoder.line}).
--define(INC_CHAR(S, C),
-        case C of
-            $\n ->
-                S#decoder{column=1,
-                          line=1+S#decoder.line,
-                          offset=1+S#decoder.offset};
-            _ ->
-                S#decoder{column=1+S#decoder.column,
-                          offset=1+S#decoder.offset}
-        end).
--define(IS_WHITESPACE(C),
-        (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
-
-%% @type iolist() = [char() | binary() | iolist()]
-%% @type iodata() = iolist() | binary()
-%% @type json_string() = atom | binary()
-%% @type json_number() = integer() | float()
-%% @type json_array() = [json_term()]
-%% @type json_object() = {struct, [{json_string(), json_term()}]}
-%% @type json_iolist() = {json, iolist()}
-%% @type json_term() = json_string() | json_number() | json_array() |
-%%                     json_object() | json_iolist()
-
--record(encoder, {handler=null,
-                  utf8=false}).
-
--record(decoder, {object_hook=null,
-                  offset=0,
-                  line=1,
-                  column=1,
-                  state=null}).
-
-%% @spec encoder([encoder_option()]) -> function()
-%% @doc Create an encoder/1 with the given options.
-%% @type encoder_option() = handler_option() | utf8_option()
-%% @type utf8_option() = boolean(). Emit unicode as utf8 (default - false)
-encoder(Options) ->
-    State = parse_encoder_options(Options, #encoder{}),
-    fun (O) -> json_encode(O, State) end.
-
-%% @spec encode(json_term()) -> iolist()
-%% @doc Encode the given as JSON to an iolist.
-encode(Any) ->
-    json_encode(Any, #encoder{}).
-
-%% @spec decoder([decoder_option()]) -> function()
-%% @doc Create a decoder/1 with the given options.
-decoder(Options) ->
-    State = parse_decoder_options(Options, #decoder{}),
-    fun (O) -> json_decode(O, State) end.
-
-%% @spec decode(iolist()) -> json_term()
-%% @doc Decode the given iolist to Erlang terms.
-decode(S) ->
-    json_decode(S, #decoder{}).
-
-%% Internal API
-
-parse_encoder_options([], State) ->
-    State;
-parse_encoder_options([{handler, Handler} | Rest], State) ->
-    parse_encoder_options(Rest, State#encoder{handler=Handler});
-parse_encoder_options([{utf8, Switch} | Rest], State) ->
-    parse_encoder_options(Rest, State#encoder{utf8=Switch}).
-
-parse_decoder_options([], State) ->
-    State;
-parse_decoder_options([{object_hook, Hook} | Rest], State) ->
-    parse_decoder_options(Rest, State#decoder{object_hook=Hook}).
-
-json_encode(true, _State) ->
-    <<"true">>;
-json_encode(false, _State) ->
-    <<"false">>;
-json_encode(null, _State) ->
-    <<"null">>;
-json_encode(I, _State) when is_integer(I) ->
-    integer_to_list(I);
-json_encode(F, _State) when is_float(F) ->
-    mochinum:digits(F);
-json_encode(S, State) when is_binary(S); is_atom(S) ->
-    json_encode_string(S, State);
-json_encode([{K, _}|_] = Props, State) when (K =/= struct andalso
-                                             K =/= array andalso
-                                             K =/= json) ->
-    json_encode_proplist(Props, State);
-json_encode({struct, Props}, State) when is_list(Props) ->
-    json_encode_proplist(Props, State);
-json_encode(Array, State) when is_list(Array) ->
-    json_encode_array(Array, State);
-json_encode({array, Array}, State) when is_list(Array) ->
-    json_encode_array(Array, State);
-json_encode({json, IoList}, _State) ->
-    IoList;
-json_encode(Bad, #encoder{handler=null}) ->
-    exit({json_encode, {bad_term, Bad}});
-json_encode(Bad, State=#encoder{handler=Handler}) ->
-    json_encode(Handler(Bad), State).
-
-json_encode_array([], _State) ->
-    <<"[]">>;
-json_encode_array(L, State) ->
-    F = fun (O, Acc) ->
-                [$,, json_encode(O, State) | Acc]
-        end,
-    [$, | Acc1] = lists:foldl(F, "[", L),
-    lists:reverse([$\] | Acc1]).
-
-json_encode_proplist([], _State) ->
-    <<"{}">>;
-json_encode_proplist(Props, State) ->
-    F = fun ({K, V}, Acc) ->
-                KS = json_encode_string(K, State),
-                VS = json_encode(V, State),
-                [$,, VS, $:, KS | Acc]
-        end,
-    [$, | Acc1] = lists:foldl(F, "{", Props),
-    lists:reverse([$\} | Acc1]).
-
-json_encode_string(A, State) when is_atom(A) ->
-    L = atom_to_list(A),
-    case json_string_is_safe(L) of
-        true ->
-            [?Q, L, ?Q];
-        false ->
-            json_encode_string_unicode(xmerl_ucs:from_utf8(L), State, [?Q])
-    end;
-json_encode_string(B, State) when is_binary(B) ->
-    case json_bin_is_safe(B) of
-        true ->
-            [?Q, B, ?Q];
-        false ->
-            json_encode_string_unicode(xmerl_ucs:from_utf8(B), State, [?Q])
-    end;
-json_encode_string(I, _State) when is_integer(I) ->
-    [?Q, integer_to_list(I), ?Q];
-json_encode_string(L, State) when is_list(L) ->
-    case json_string_is_safe(L) of
-        true ->
-            [?Q, L, ?Q];
-        false ->
-            json_encode_string_unicode(L, State, [?Q])
-    end.
-
-json_string_is_safe([]) ->
-    true;
-json_string_is_safe([C | Rest]) ->
-    case C of
-        ?Q ->
-            false;
-        $\\ ->
-            false;
-        $\b ->
-            false;
-        $\f ->
-            false;
-        $\n ->
-            false;
-        $\r ->
-            false;
-        $\t ->
-            false;
-        C when C >= 0, C < $\s; C >= 16#7f, C =< 16#10FFFF ->
-            false;
-        C when C < 16#7f ->
-            json_string_is_safe(Rest);
-        _ ->
-            false
-    end.
-
-json_bin_is_safe(<<>>) ->
-    true;
-json_bin_is_safe(<<C, Rest/binary>>) ->
-    case C of
-        ?Q ->
-            false;
-        $\\ ->
-            false;
-        $\b ->
-            false;
-        $\f ->
-            false;
-        $\n ->
-            false;
-        $\r ->
-            false;
-        $\t ->
-            false;
-        C when C >= 0, C < $\s; C >= 16#7f ->
-            false;
-        C when C < 16#7f ->
-            json_bin_is_safe(Rest)
-    end.
-
-json_encode_string_unicode([], _State, Acc) ->
-    lists:reverse([$\" | Acc]);
-json_encode_string_unicode([C | Cs], State, Acc) ->
-    Acc1 = case C of
-               ?Q ->
-                   [?Q, $\\ | Acc];
-               %% Escaping solidus is only useful when trying to protect
-               %% against "</script>" injection attacks which are only
-               %% possible when JSON is inserted into a HTML document
-               %% in-line. mochijson2 does not protect you from this, so
-               %% if you do insert directly into HTML then you need to
-               %% uncomment the following case or escape the output of encode.
-               %%
-               %% $/ ->
-               %%    [$/, $\\ | Acc];
-               %%
-               $\\ ->
-                   [$\\, $\\ | Acc];
-               $\b ->
-                   [$b, $\\ | Acc];
-               $\f ->
-                   [$f, $\\ | Acc];
-               $\n ->
-                   [$n, $\\ | Acc];
-               $\r ->
-                   [$r, $\\ | Acc];
-               $\t ->
-                   [$t, $\\ | Acc];
-               C when C >= 0, C < $\s ->
-                   [unihex(C) | Acc];
-               C when C >= 16#7f, C =< 16#10FFFF, State#encoder.utf8 ->
-                   [xmerl_ucs:to_utf8(C) | Acc];
-               C when  C >= 16#7f, C =< 16#10FFFF, not State#encoder.utf8 ->
-                   [unihex(C) | Acc];
-               C when C < 16#7f ->
-                   [C | Acc];
-               _ ->
-                   exit({json_encode, {bad_char, C}})
-           end,
-    json_encode_string_unicode(Cs, State, Acc1).
-
-hexdigit(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdigit(C) when C =< 15 ->
-    C + $a - 10.
-
-unihex(C) when C < 16#10000 ->
-    <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
-    Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
-    [$\\, $u | Digits];
-unihex(C) when C =< 16#10FFFF ->
-    N = C - 16#10000,
-    S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
-    S2 = 16#dc00 bor (N band 16#3ff),
-    [unihex(S1), unihex(S2)].
-
-json_decode(L, S) when is_list(L) ->
-    json_decode(iolist_to_binary(L), S);
-json_decode(B, S) ->
-    {Res, S1} = decode1(B, S),
-    {eof, _} = tokenize(B, S1#decoder{state=trim}),
-    Res.
-
-decode1(B, S=#decoder{state=null}) ->
-    case tokenize(B, S#decoder{state=any}) of
-        {{const, C}, S1} ->
-            {C, S1};
-        {start_array, S1} ->
-            decode_array(B, S1);
-        {start_object, S1} ->
-            decode_object(B, S1)
-    end.
-
-make_object(V, #decoder{object_hook=null}) ->
-    V;
-make_object(V, #decoder{object_hook=Hook}) ->
-    Hook(V).
-
-decode_object(B, S) ->
-    decode_object(B, S#decoder{state=key}, []).
-
-decode_object(B, S=#decoder{state=key}, Acc) ->
-    case tokenize(B, S) of
-        {end_object, S1} ->
-            V = make_object({struct, lists:reverse(Acc)}, S1),
-            {V, S1#decoder{state=null}};
-        {{const, K}, S1} ->
-            {colon, S2} = tokenize(B, S1),
-            {V, S3} = decode1(B, S2#decoder{state=null}),
-            decode_object(B, S3#decoder{state=comma}, [{K, V} | Acc])
-    end;
-decode_object(B, S=#decoder{state=comma}, Acc) ->
-    case tokenize(B, S) of
-        {end_object, S1} ->
-            V = make_object({struct, lists:reverse(Acc)}, S1),
-            {V, S1#decoder{state=null}};
-        {comma, S1} ->
-            decode_object(B, S1#decoder{state=key}, Acc)
-    end.
-
-decode_array(B, S) ->
-    decode_array(B, S#decoder{state=any}, []).
-
-decode_array(B, S=#decoder{state=any}, Acc) ->
-    case tokenize(B, S) of
-        {end_array, S1} ->
-            {lists:reverse(Acc), S1#decoder{state=null}};
-        {start_array, S1} ->
-            {Array, S2} = decode_array(B, S1),
-            decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
-        {start_object, S1} ->
-            {Array, S2} = decode_object(B, S1),
-            decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
-        {{const, Const}, S1} ->
-            decode_array(B, S1#decoder{state=comma}, [Const | Acc])
-    end;
-decode_array(B, S=#decoder{state=comma}, Acc) ->
-    case tokenize(B, S) of
-        {end_array, S1} ->
-            {lists:reverse(Acc), S1#decoder{state=null}};
-        {comma, S1} ->
-            decode_array(B, S1#decoder{state=any}, Acc)
-    end.
-
-tokenize_string(B, S=#decoder{offset=O}) ->
-    case tokenize_string_fast(B, O) of
-        {escape, O1} ->
-            Length = O1 - O,
-            S1 = ?ADV_COL(S, Length),
-            <<_:O/binary, Head:Length/binary, _/binary>> = B,
-            tokenize_string(B, S1, lists:reverse(binary_to_list(Head)));
-        O1 ->
-            Length = O1 - O,
-            <<_:O/binary, String:Length/binary, ?Q, _/binary>> = B,
-            {{const, String}, ?ADV_COL(S, Length + 1)}
-    end.
-
-tokenize_string_fast(B, O) ->
-    case B of
-        <<_:O/binary, ?Q, _/binary>> ->
-            O;
-        <<_:O/binary, $\\, _/binary>> ->
-            {escape, O};
-        <<_:O/binary, C1, _/binary>> when C1 < 128 ->
-            tokenize_string_fast(B, 1 + O);
-        <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
-                C2 >= 128, C2 =< 191 ->
-            tokenize_string_fast(B, 2 + O);
-        <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
-                C2 >= 128, C2 =< 191,
-                C3 >= 128, C3 =< 191 ->
-            tokenize_string_fast(B, 3 + O);
-        <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
-                C2 >= 128, C2 =< 191,
-                C3 >= 128, C3 =< 191,
-                C4 >= 128, C4 =< 191 ->
-            tokenize_string_fast(B, 4 + O);
-        _ ->
-            throw(invalid_utf8)
-    end.
-
-tokenize_string(B, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, ?Q, _/binary>> ->
-            {{const, iolist_to_binary(lists:reverse(Acc))}, ?INC_COL(S)};
-        <<_:O/binary, "\\\"", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$\" | Acc]);
-        <<_:O/binary, "\\\\", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$\\ | Acc]);
-        <<_:O/binary, "\\/", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$/ | Acc]);
-        <<_:O/binary, "\\b", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$\b | Acc]);
-        <<_:O/binary, "\\f", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$\f | Acc]);
-        <<_:O/binary, "\\n", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$\n | Acc]);
-        <<_:O/binary, "\\r", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$\r | Acc]);
-        <<_:O/binary, "\\t", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$\t | Acc]);
-        <<_:O/binary, "\\u", C3, C2, C1, C0, Rest/binary>> ->
-            C = erlang:list_to_integer([C3, C2, C1, C0], 16),
-            if C > 16#D7FF, C < 16#DC00 ->
-                %% coalesce UTF-16 surrogate pair
-                <<"\\u", D3, D2, D1, D0, _/binary>> = Rest,
-                D = erlang:list_to_integer([D3,D2,D1,D0], 16),
-                [CodePoint] = xmerl_ucs:from_utf16be(<<C:16/big-unsigned-integer,
-                    D:16/big-unsigned-integer>>),
-                Acc1 = lists:reverse(xmerl_ucs:to_utf8(CodePoint), Acc),
-                tokenize_string(B, ?ADV_COL(S, 12), Acc1);
-            true ->
-                Acc1 = lists:reverse(xmerl_ucs:to_utf8(C), Acc),
-                tokenize_string(B, ?ADV_COL(S, 6), Acc1)
-            end;
-        <<_:O/binary, C1, _/binary>> when C1 < 128 ->
-            tokenize_string(B, ?INC_CHAR(S, C1), [C1 | Acc]);
-        <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
-                C2 >= 128, C2 =< 191 ->
-            tokenize_string(B, ?ADV_COL(S, 2), [C2, C1 | Acc]);
-        <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
-                C2 >= 128, C2 =< 191,
-                C3 >= 128, C3 =< 191 ->
-            tokenize_string(B, ?ADV_COL(S, 3), [C3, C2, C1 | Acc]);
-        <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
-                C2 >= 128, C2 =< 191,
-                C3 >= 128, C3 =< 191,
-                C4 >= 128, C4 =< 191 ->
-            tokenize_string(B, ?ADV_COL(S, 4), [C4, C3, C2, C1 | Acc]);
-        _ ->
-            throw(invalid_utf8)
-    end.
-
-tokenize_number(B, S) ->
-    case tokenize_number(B, sign, S, []) of
-        {{int, Int}, S1} ->
-            {{const, list_to_integer(Int)}, S1};
-        {{float, Float}, S1} ->
-            {{const, list_to_float(Float)}, S1}
-    end.
-
-tokenize_number(B, sign, S=#decoder{offset=O}, []) ->
-    case B of
-        <<_:O/binary, $-, _/binary>> ->
-            tokenize_number(B, int, ?INC_COL(S), [$-]);
-        _ ->
-            tokenize_number(B, int, S, [])
-    end;
-tokenize_number(B, int, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, $0, _/binary>> ->
-            tokenize_number(B, frac, ?INC_COL(S), [$0 | Acc]);
-        <<_:O/binary, C, _/binary>> when C >= $1 andalso C =< $9 ->
-            tokenize_number(B, int1, ?INC_COL(S), [C | Acc])
-    end;
-tokenize_number(B, int1, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
-            tokenize_number(B, int1, ?INC_COL(S), [C | Acc]);
-        _ ->
-            tokenize_number(B, frac, S, Acc)
-    end;
-tokenize_number(B, frac, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, $., C, _/binary>> when C >= $0, C =< $9 ->
-            tokenize_number(B, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
-        <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
-            tokenize_number(B, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
-        _ ->
-            {{int, lists:reverse(Acc)}, S}
-    end;
-tokenize_number(B, frac1, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
-            tokenize_number(B, frac1, ?INC_COL(S), [C | Acc]);
-        <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
-            tokenize_number(B, esign, ?INC_COL(S), [$e | Acc]);
-        _ ->
-            {{float, lists:reverse(Acc)}, S}
-    end;
-tokenize_number(B, esign, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, C, _/binary>> when C =:= $- orelse C=:= $+ ->
-            tokenize_number(B, eint, ?INC_COL(S), [C | Acc]);
-        _ ->
-            tokenize_number(B, eint, S, Acc)
-    end;
-tokenize_number(B, eint, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
-            tokenize_number(B, eint1, ?INC_COL(S), [C | Acc])
-    end;
-tokenize_number(B, eint1, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
-            tokenize_number(B, eint1, ?INC_COL(S), [C | Acc]);
-        _ ->
-            {{float, lists:reverse(Acc)}, S}
-    end.
-
-tokenize(B, S=#decoder{offset=O}) ->
-    case B of
-        <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
-            tokenize(B, ?INC_CHAR(S, C));
-        <<_:O/binary, "{", _/binary>> ->
-            {start_object, ?INC_COL(S)};
-        <<_:O/binary, "}", _/binary>> ->
-            {end_object, ?INC_COL(S)};
-        <<_:O/binary, "[", _/binary>> ->
-            {start_array, ?INC_COL(S)};
-        <<_:O/binary, "]", _/binary>> ->
-            {end_array, ?INC_COL(S)};
-        <<_:O/binary, ",", _/binary>> ->
-            {comma, ?INC_COL(S)};
-        <<_:O/binary, ":", _/binary>> ->
-            {colon, ?INC_COL(S)};
-        <<_:O/binary, "null", _/binary>> ->
-            {{const, null}, ?ADV_COL(S, 4)};
-        <<_:O/binary, "true", _/binary>> ->
-            {{const, true}, ?ADV_COL(S, 4)};
-        <<_:O/binary, "false", _/binary>> ->
-            {{const, false}, ?ADV_COL(S, 5)};
-        <<_:O/binary, "\"", _/binary>> ->
-            tokenize_string(B, ?INC_COL(S));
-        <<_:O/binary, C, _/binary>> when (C >= $0 andalso C =< $9)
-                                         orelse C =:= $- ->
-            tokenize_number(B, S);
-        <<_:O/binary>> ->
-            trim = S#decoder.state,
-            {eof, S}
-    end.
-%%
-%% Tests
-%%
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-
-%% testing constructs borrowed from the Yaws JSON implementation.
-
-%% Create an object from a list of Key/Value pairs.
-
-obj_new() ->
-    {struct, []}.
-
-is_obj({struct, Props}) ->
-    F = fun ({K, _}) when is_binary(K) -> true end,
-    lists:all(F, Props).
-
-obj_from_list(Props) ->
-    Obj = {struct, Props},
-    ?assert(is_obj(Obj)),
-    Obj.
-
-%% Test for equivalence of Erlang terms.
-%% Due to arbitrary order of construction, equivalent objects might
-%% compare unequal as erlang terms, so we need to carefully recurse
-%% through aggregates (tuples and objects).
-
-equiv({struct, Props1}, {struct, Props2}) ->
-    equiv_object(Props1, Props2);
-equiv(L1, L2) when is_list(L1), is_list(L2) ->
-    equiv_list(L1, L2);
-equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
-equiv(B1, B2) when is_binary(B1), is_binary(B2) -> B1 == B2;
-equiv(A, A) when A =:= true orelse A =:= false orelse A =:= null -> true.
-
-%% Object representation and traversal order is unknown.
-%% Use the sledgehammer and sort property lists.
-
-equiv_object(Props1, Props2) ->
-    L1 = lists:keysort(1, Props1),
-    L2 = lists:keysort(1, Props2),
-    Pairs = lists:zip(L1, L2),
-    true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
-                             equiv(K1, K2) and equiv(V1, V2)
-                     end, Pairs).
-
-%% Recursively compare tuple elements for equivalence.
-
-equiv_list([], []) ->
-    true;
-equiv_list([V1 | L1], [V2 | L2]) ->
-    equiv(V1, V2) andalso equiv_list(L1, L2).
-
-decode_test() ->
-    [1199344435545.0, 1] = decode(<<"[1199344435545.0,1]">>),
-    <<16#F0,16#9D,16#9C,16#95>> = decode([34,"\\ud835","\\udf15",34]).
-
-e2j_vec_test() ->
-    test_one(e2j_test_vec(utf8), 1).
-
-test_one([], _N) ->
-    %% io:format("~p tests passed~n", [N-1]),
-    ok;
-test_one([{E, J} | Rest], N) ->
-    %% io:format("[~p] ~p ~p~n", [N, E, J]),
-    true = equiv(E, decode(J)),
-    true = equiv(E, decode(encode(E))),
-    test_one(Rest, 1+N).
-
-e2j_test_vec(utf8) ->
-    [
-     {1, "1"},
-     {3.1416, "3.14160"}, %% text representation may truncate, trail zeroes
-     {-1, "-1"},
-     {-3.1416, "-3.14160"},
-     {12.0e10, "1.20000e+11"},
-     {1.234E+10, "1.23400e+10"},
-     {-1.234E-10, "-1.23400e-10"},
-     {10.0, "1.0e+01"},
-     {123.456, "1.23456E+2"},
-     {10.0, "1e1"},
-     {<<"foo">>, "\"foo\""},
-     {<<"foo", 5, "bar">>, "\"foo\\u0005bar\""},
-     {<<"">>, "\"\""},
-     {<<"\n\n\n">>, "\"\\n\\n\\n\""},
-     {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\""},
-     {obj_new(), "{}"},
-     {obj_from_list([{<<"foo">>, <<"bar">>}]), "{\"foo\":\"bar\"}"},
-     {obj_from_list([{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]),
-      "{\"foo\":\"bar\",\"baz\":123}"},
-     {[], "[]"},
-     {[[]], "[[]]"},
-     {[1, <<"foo">>], "[1,\"foo\"]"},
-
-     %% json array in a json object
-     {obj_from_list([{<<"foo">>, [123]}]),
-      "{\"foo\":[123]}"},
-
-     %% json object in a json object
-     {obj_from_list([{<<"foo">>, obj_from_list([{<<"bar">>, true}])}]),
-      "{\"foo\":{\"bar\":true}}"},
-
-     %% fold evaluation order
-     {obj_from_list([{<<"foo">>, []},
-                     {<<"bar">>, obj_from_list([{<<"baz">>, true}])},
-                     {<<"alice">>, <<"bob">>}]),
-      "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
-
-     %% json object in a json array
-     {[-123, <<"foo">>, obj_from_list([{<<"bar">>, []}]), null],
-      "[-123,\"foo\",{\"bar\":[]},null]"}
-    ].
-
-%% test utf8 encoding
-encoder_utf8_test() ->
-    %% safe conversion case (default)
-    [34,"\\u0001","\\u0442","\\u0435","\\u0441","\\u0442",34] =
-        encode(<<1,"\321\202\320\265\321\201\321\202">>),
-
-    %% raw utf8 output (optional)
-    Enc = mochijson2:encoder([{utf8, true}]),
-    [34,"\\u0001",[209,130],[208,181],[209,129],[209,130],34] =
-        Enc(<<1,"\321\202\320\265\321\201\321\202">>).
-
-input_validation_test() ->
-    Good = [
-        {16#00A3, <<?Q, 16#C2, 16#A3, ?Q>>}, %% pound
-        {16#20AC, <<?Q, 16#E2, 16#82, 16#AC, ?Q>>}, %% euro
-        {16#10196, <<?Q, 16#F0, 16#90, 16#86, 16#96, ?Q>>} %% denarius
-    ],
-    lists:foreach(fun({CodePoint, UTF8}) ->
-        Expect = list_to_binary(xmerl_ucs:to_utf8(CodePoint)),
-        Expect = decode(UTF8)
-    end, Good),
-
-    Bad = [
-        %% 2nd, 3rd, or 4th byte of a multi-byte sequence w/o leading byte
-        <<?Q, 16#80, ?Q>>,
-        %% missing continuations, last byte in each should be 80-BF
-        <<?Q, 16#C2, 16#7F, ?Q>>,
-        <<?Q, 16#E0, 16#80,16#7F, ?Q>>,
-        <<?Q, 16#F0, 16#80, 16#80, 16#7F, ?Q>>,
-        %% we don't support code points > 10FFFF per RFC 3629
-        <<?Q, 16#F5, 16#80, 16#80, 16#80, ?Q>>,
-        %% escape characters trigger a different code path
-        <<?Q, $\\, $\n, 16#80, ?Q>>
-    ],
-    lists:foreach(
-      fun(X) ->
-              ok = try decode(X) catch invalid_utf8 -> ok end,
-              %% could be {ucs,{bad_utf8_character_code}} or
-              %%          {json_encode,{bad_char,_}}
-              {'EXIT', _} = (catch encode(X))
-      end, Bad).
-
-inline_json_test() ->
-    ?assertEqual(<<"\"iodata iodata\"">>,
-                 iolist_to_binary(
-                   encode({json, [<<"\"iodata">>, " iodata\""]}))),
-    ?assertEqual({struct, [{<<"key">>, <<"iodata iodata">>}]},
-                 decode(
-                   encode({struct,
-                           [{key, {json, [<<"\"iodata">>, " iodata\""]}}]}))),
-    ok.
-
-big_unicode_test() ->
-    UTF8Seq = list_to_binary(xmerl_ucs:to_utf8(16#0001d120)),
-    ?assertEqual(
-       <<"\"\\ud834\\udd20\"">>,
-       iolist_to_binary(encode(UTF8Seq))),
-    ?assertEqual(
-       UTF8Seq,
-       decode(iolist_to_binary(encode(UTF8Seq)))),
-    ok.
-
-custom_decoder_test() ->
-    ?assertEqual(
-       {struct, [{<<"key">>, <<"value">>}]},
-       (decoder([]))("{\"key\": \"value\"}")),
-    F = fun ({struct, [{<<"key">>, <<"value">>}]}) -> win end,
-    ?assertEqual(
-       win,
-       (decoder([{object_hook, F}]))("{\"key\": \"value\"}")),
-    ok.
-
-atom_test() ->
-    %% JSON native atoms
-    [begin
-         ?assertEqual(A, decode(atom_to_list(A))),
-         ?assertEqual(iolist_to_binary(atom_to_list(A)),
-                      iolist_to_binary(encode(A)))
-     end || A <- [true, false, null]],
-    %% Atom to string
-    ?assertEqual(
-       <<"\"foo\"">>,
-       iolist_to_binary(encode(foo))),
-    ?assertEqual(
-       <<"\"\\ud834\\udd20\"">>,
-       iolist_to_binary(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))),
-    ok.
-
-key_encode_test() ->
-    %% Some forms are accepted as keys that would not be strings in other
-    %% cases
-    ?assertEqual(
-       <<"{\"foo\":1}">>,
-       iolist_to_binary(encode({struct, [{foo, 1}]}))),
-    ?assertEqual(
-       <<"{\"foo\":1}">>,
-       iolist_to_binary(encode({struct, [{<<"foo">>, 1}]}))),
-    ?assertEqual(
-       <<"{\"foo\":1}">>,
-       iolist_to_binary(encode({struct, [{"foo", 1}]}))),
-	?assertEqual(
-       <<"{\"foo\":1}">>,
-       iolist_to_binary(encode([{foo, 1}]))),
-    ?assertEqual(
-       <<"{\"foo\":1}">>,
-       iolist_to_binary(encode([{<<"foo">>, 1}]))),
-    ?assertEqual(
-       <<"{\"foo\":1}">>,
-       iolist_to_binary(encode([{"foo", 1}]))),
-    ?assertEqual(
-       <<"{\"\\ud834\\udd20\":1}">>,
-       iolist_to_binary(
-         encode({struct, [{[16#0001d120], 1}]}))),
-    ?assertEqual(
-       <<"{\"1\":1}">>,
-       iolist_to_binary(encode({struct, [{1, 1}]}))),
-    ok.
-
-unsafe_chars_test() ->
-    Chars = "\"\\\b\f\n\r\t",
-    [begin
-         ?assertEqual(false, json_string_is_safe([C])),
-         ?assertEqual(false, json_bin_is_safe(<<C>>)),
-         ?assertEqual(<<C>>, decode(encode(<<C>>)))
-     end || C <- Chars],
-    ?assertEqual(
-       false,
-       json_string_is_safe([16#0001d120])),
-    ?assertEqual(
-       false,
-       json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8(16#0001d120)))),
-    ?assertEqual(
-       [16#0001d120],
-       xmerl_ucs:from_utf8(
-         binary_to_list(
-           decode(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))))),
-    ?assertEqual(
-       false,
-       json_string_is_safe([16#110000])),
-    ?assertEqual(
-       false,
-       json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8([16#110000])))),
-    %% solidus can be escaped but isn't unsafe by default
-    ?assertEqual(
-       <<"/">>,
-       decode(<<"\"\\/\"">>)),
-    ok.
-
-int_test() ->
-    ?assertEqual(0, decode("0")),
-    ?assertEqual(1, decode("1")),
-    ?assertEqual(11, decode("11")),
-    ok.
-
-large_int_test() ->
-    ?assertEqual(<<"-2147483649214748364921474836492147483649">>,
-        iolist_to_binary(encode(-2147483649214748364921474836492147483649))),
-    ?assertEqual(<<"2147483649214748364921474836492147483649">>,
-        iolist_to_binary(encode(2147483649214748364921474836492147483649))),
-    ok.
-
-float_test() ->
-    ?assertEqual(<<"-2147483649.0">>, iolist_to_binary(encode(-2147483649.0))),
-    ?assertEqual(<<"2147483648.0">>, iolist_to_binary(encode(2147483648.0))),
-    ok.
-
-handler_test() ->
-    ?assertEqual(
-       {'EXIT',{json_encode,{bad_term,{}}}},
-       catch encode({})),
-    F = fun ({}) -> [] end,
-    ?assertEqual(
-       <<"[]">>,
-       iolist_to_binary((encoder([{handler, F}]))({}))),
-    ok.
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/src/mochinum.erl
----------------------------------------------------------------------
diff --git a/src/ejson/src/mochinum.erl b/src/ejson/src/mochinum.erl
deleted file mode 100644
index c52b15c..0000000
--- a/src/ejson/src/mochinum.erl
+++ /dev/null
@@ -1,354 +0,0 @@
-%% @copyright 2007 Mochi Media, Inc.
-%% @author Bob Ippolito <bo...@mochimedia.com>
-
-%% @doc Useful numeric algorithms for floats that cover some deficiencies
-%% in the math module. More interesting is digits/1, which implements
-%% the algorithm from:
-%% http://www.cs.indiana.edu/~burger/fp/index.html
-%% See also "Printing Floating-Point Numbers Quickly and Accurately"
-%% in Proceedings of the SIGPLAN '96 Conference on Programming Language
-%% Design and Implementation.
-
--module(mochinum).
--author("Bob Ippolito <bo...@mochimedia.com>").
--export([digits/1, frexp/1, int_pow/2, int_ceil/1]).
-
-%% IEEE 754 Float exponent bias
--define(FLOAT_BIAS, 1022).
--define(MIN_EXP, -1074).
--define(BIG_POW, 4503599627370496).
-
-%% External API
-
-%% @spec digits(number()) -> string()
-%% @doc  Returns a string that accurately represents the given integer or float
-%%       using a conservative amount of digits. Great for generating
-%%       human-readable output, or compact ASCII serializations for floats.
-digits(N) when is_integer(N) ->
-    integer_to_list(N);
-digits(0.0) ->
-    "0.0";
-digits(Float) ->
-    {Frac1, Exp1} = frexp_int(Float),
-    [Place0 | Digits0] = digits1(Float, Exp1, Frac1),
-    {Place, Digits} = transform_digits(Place0, Digits0),
-    R = insert_decimal(Place, Digits),
-    case Float < 0 of
-        true ->
-            [$- | R];
-        _ ->
-            R
-    end.
-
-%% @spec frexp(F::float()) -> {Frac::float(), Exp::float()}
-%% @doc  Return the fractional and exponent part of an IEEE 754 double,
-%%       equivalent to the libc function of the same name.
-%%       F = Frac * pow(2, Exp).
-frexp(F) ->
-    frexp1(unpack(F)).
-
-%% @spec int_pow(X::integer(), N::integer()) -> Y::integer()
-%% @doc  Moderately efficient way to exponentiate integers.
-%%       int_pow(10, 2) = 100.
-int_pow(_X, 0) ->
-    1;
-int_pow(X, N) when N > 0 ->
-    int_pow(X, N, 1).
-
-%% @spec int_ceil(F::float()) -> integer()
-%% @doc  Return the ceiling of F as an integer. The ceiling is defined as
-%%       F when F == trunc(F);
-%%       trunc(F) when F &lt; 0;
-%%       trunc(F) + 1 when F &gt; 0.
-int_ceil(X) ->
-    T = trunc(X),
-    case (X - T) of
-        Pos when Pos > 0 -> T + 1;
-        _ -> T
-    end.
-
-
-%% Internal API
-
-int_pow(X, N, R) when N < 2 ->
-    R * X;
-int_pow(X, N, R) ->
-    int_pow(X * X, N bsr 1, case N band 1 of 1 -> R * X; 0 -> R end).
-
-insert_decimal(0, S) ->
-    "0." ++ S;
-insert_decimal(Place, S) when Place > 0 ->
-    L = length(S),
-    case Place - L of
-         0 ->
-            S ++ ".0";
-        N when N < 0 ->
-            {S0, S1} = lists:split(L + N, S),
-            S0 ++ "." ++ S1;
-        N when N < 6 ->
-            %% More places than digits
-            S ++ lists:duplicate(N, $0) ++ ".0";
-        _ ->
-            insert_decimal_exp(Place, S)
-    end;
-insert_decimal(Place, S) when Place > -6 ->
-    "0." ++ lists:duplicate(abs(Place), $0) ++ S;
-insert_decimal(Place, S) ->
-    insert_decimal_exp(Place, S).
-
-insert_decimal_exp(Place, S) ->
-    [C | S0] = S,
-    S1 = case S0 of
-             [] ->
-                 "0";
-             _ ->
-                 S0
-         end,
-    Exp = case Place < 0 of
-              true ->
-                  "e-";
-              false ->
-                  "e+"
-          end,
-    [C] ++ "." ++ S1 ++ Exp ++ integer_to_list(abs(Place - 1)).
-
-
-digits1(Float, Exp, Frac) ->
-    Round = ((Frac band 1) =:= 0),
-    case Exp >= 0 of
-        true ->
-            BExp = 1 bsl Exp,
-            case (Frac =/= ?BIG_POW) of
-                true ->
-                    scale((Frac * BExp * 2), 2, BExp, BExp,
-                          Round, Round, Float);
-                false ->
-                    scale((Frac * BExp * 4), 4, (BExp * 2), BExp,
-                          Round, Round, Float)
-            end;
-        false ->
-            case (Exp =:= ?MIN_EXP) orelse (Frac =/= ?BIG_POW) of
-                true ->
-                    scale((Frac * 2), 1 bsl (1 - Exp), 1, 1,
-                          Round, Round, Float);
-                false ->
-                    scale((Frac * 4), 1 bsl (2 - Exp), 2, 1,
-                          Round, Round, Float)
-            end
-    end.
-
-scale(R, S, MPlus, MMinus, LowOk, HighOk, Float) ->
-    Est = int_ceil(math:log10(abs(Float)) - 1.0e-10),
-    %% Note that the scheme implementation uses a 326 element look-up table
-    %% for int_pow(10, N) where we do not.
-    case Est >= 0 of
-        true ->
-            fixup(R, S * int_pow(10, Est), MPlus, MMinus, Est,
-                  LowOk, HighOk);
-        false ->
-            Scale = int_pow(10, -Est),
-            fixup(R * Scale, S, MPlus * Scale, MMinus * Scale, Est,
-                  LowOk, HighOk)
-    end.
-
-fixup(R, S, MPlus, MMinus, K, LowOk, HighOk) ->
-    TooLow = case HighOk of
-                 true ->
-                     (R + MPlus) >= S;
-                 false ->
-                     (R + MPlus) > S
-             end,
-    case TooLow of
-        true ->
-            [(K + 1) | generate(R, S, MPlus, MMinus, LowOk, HighOk)];
-        false ->
-            [K | generate(R * 10, S, MPlus * 10, MMinus * 10, LowOk, HighOk)]
-    end.
-
-generate(R0, S, MPlus, MMinus, LowOk, HighOk) ->
-    D = R0 div S,
-    R = R0 rem S,
-    TC1 = case LowOk of
-              true ->
-                  R =< MMinus;
-              false ->
-                  R < MMinus
-          end,
-    TC2 = case HighOk of
-              true ->
-                  (R + MPlus) >= S;
-              false ->
-                  (R + MPlus) > S
-          end,
-    case TC1 of
-        false ->
-            case TC2 of
-                false ->
-                    [D | generate(R * 10, S, MPlus * 10, MMinus * 10,
-                                  LowOk, HighOk)];
-                true ->
-                    [D + 1]
-            end;
-        true ->
-            case TC2 of
-                false ->
-                    [D];
-                true ->
-                    case R * 2 < S of
-                        true ->
-                            [D];
-                        false ->
-                            [D + 1]
-                    end
-            end
-    end.
-
-unpack(Float) ->
-    <<Sign:1, Exp:11, Frac:52>> = <<Float:64/float>>,
-    {Sign, Exp, Frac}.
-
-frexp1({_Sign, 0, 0}) ->
-    {0.0, 0};
-frexp1({Sign, 0, Frac}) ->
-    Exp = log2floor(Frac),
-    <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, (Frac-1):52>>,
-    {Frac1, -(?FLOAT_BIAS) - 52 + Exp};
-frexp1({Sign, Exp, Frac}) ->
-    <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, Frac:52>>,
-    {Frac1, Exp - ?FLOAT_BIAS}.
-
-log2floor(Int) ->
-    log2floor(Int, 0).
-
-log2floor(0, N) ->
-    N;
-log2floor(Int, N) ->
-    log2floor(Int bsr 1, 1 + N).
-
-
-transform_digits(Place, [0 | Rest]) ->
-    transform_digits(Place, Rest);
-transform_digits(Place, Digits) ->
-    {Place, [$0 + D || D <- Digits]}.
-
-
-frexp_int(F) ->
-    case unpack(F) of
-        {_Sign, 0, Frac} ->
-            {Frac, ?MIN_EXP};
-        {_Sign, Exp, Frac} ->
-            {Frac + (1 bsl 52), Exp - 53 - ?FLOAT_BIAS}
-    end.
-
-%%
-%% Tests
-%%
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-int_ceil_test() ->
-    ?assertEqual(1, int_ceil(0.0001)),
-    ?assertEqual(0, int_ceil(0.0)),
-    ?assertEqual(1, int_ceil(0.99)),
-    ?assertEqual(1, int_ceil(1.0)),
-    ?assertEqual(-1, int_ceil(-1.5)),
-    ?assertEqual(-2, int_ceil(-2.0)),
-    ok.
-
-int_pow_test() ->
-    ?assertEqual(1, int_pow(1, 1)),
-    ?assertEqual(1, int_pow(1, 0)),
-    ?assertEqual(1, int_pow(10, 0)),
-    ?assertEqual(10, int_pow(10, 1)),
-    ?assertEqual(100, int_pow(10, 2)),
-    ?assertEqual(1000, int_pow(10, 3)),
-    ok.
-
-digits_test() ->
-    ?assertEqual("0",
-                 digits(0)),
-    ?assertEqual("0.0",
-                 digits(0.0)),
-    ?assertEqual("1.0",
-                 digits(1.0)),
-    ?assertEqual("-1.0",
-                 digits(-1.0)),
-    ?assertEqual("0.1",
-                 digits(0.1)),
-    ?assertEqual("0.01",
-                 digits(0.01)),
-    ?assertEqual("0.001",
-                 digits(0.001)),
-    ?assertEqual("1.0e+6",
-                 digits(1000000.0)),
-    ?assertEqual("0.5",
-                 digits(0.5)),
-    ?assertEqual("4503599627370496.0",
-                 digits(4503599627370496.0)),
-    %% small denormalized number
-    %% 4.94065645841246544177e-324 =:= 5.0e-324
-    <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
-    ?assertEqual("5.0e-324",
-                 digits(SmallDenorm)),
-    ?assertEqual(SmallDenorm,
-                 list_to_float(digits(SmallDenorm))),
-    %% large denormalized number
-    %% 2.22507385850720088902e-308
-    <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
-    ?assertEqual("2.225073858507201e-308",
-                 digits(BigDenorm)),
-    ?assertEqual(BigDenorm,
-                 list_to_float(digits(BigDenorm))),
-    %% small normalized number
-    %% 2.22507385850720138309e-308
-    <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
-    ?assertEqual("2.2250738585072014e-308",
-                 digits(SmallNorm)),
-    ?assertEqual(SmallNorm,
-                 list_to_float(digits(SmallNorm))),
-    %% large normalized number
-    %% 1.79769313486231570815e+308
-    <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
-    ?assertEqual("1.7976931348623157e+308",
-                 digits(LargeNorm)),
-    ?assertEqual(LargeNorm,
-                 list_to_float(digits(LargeNorm))),
-    %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
-    ?assertEqual("5.0e-324",
-                 digits(math:pow(2, -1074))),
-    ok.
-
-frexp_test() ->
-    %% zero
-    ?assertEqual({0.0, 0}, frexp(0.0)),
-    %% one
-    ?assertEqual({0.5, 1}, frexp(1.0)),
-    %% negative one
-    ?assertEqual({-0.5, 1}, frexp(-1.0)),
-    %% small denormalized number
-    %% 4.94065645841246544177e-324
-    <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
-    ?assertEqual({0.5, -1073}, frexp(SmallDenorm)),
-    %% large denormalized number
-    %% 2.22507385850720088902e-308
-    <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
-    ?assertEqual(
-       {0.99999999999999978, -1022},
-       frexp(BigDenorm)),
-    %% small normalized number
-    %% 2.22507385850720138309e-308
-    <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
-    ?assertEqual({0.5, -1021}, frexp(SmallNorm)),
-    %% large normalized number
-    %% 1.79769313486231570815e+308
-    <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
-    ?assertEqual(
-        {0.99999999999999989, 1024},
-        frexp(LargeNorm)),
-    %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
-    ?assertEqual(
-       {0.5, -1073},
-       frexp(math:pow(2, -1074))),
-    ok.
-
--endif.


[43/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/twig


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/26dbcc16
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/26dbcc16
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/26dbcc16

Branch: refs/heads/1843-feature-bigcouch
Commit: 26dbcc160ee6b019fe6513d02edbe8821af4af18
Parents: 350a7ef
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:42:54 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:42:54 2014 -0600

----------------------------------------------------------------------
 src/twig/README.md                  |  11 --
 src/twig/src/trunc_io.erl           | 215 -------------------------------
 src/twig/src/twig.app.src           |  20 ---
 src/twig/src/twig.erl               |  53 --------
 src/twig/src/twig_app.erl           |  21 ---
 src/twig/src/twig_event_handler.erl | 162 -----------------------
 src/twig/src/twig_int.hrl           |  24 ----
 src/twig/src/twig_monitor.erl       |  46 -------
 src/twig/src/twig_sup.erl           |  24 ----
 src/twig/src/twig_util.erl          |  80 ------------
 10 files changed, 656 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/26dbcc16/src/twig/README.md
----------------------------------------------------------------------
diff --git a/src/twig/README.md b/src/twig/README.md
deleted file mode 100644
index 97852ef..0000000
--- a/src/twig/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-Twig is a SASL-compliant Erlang/OTP logger.  It installs a gen_event handler in the error_logger event manager, where it consumes standard OTP reports and messages as well as events generated by twig:log/2,3,4.  Log messages are written to a syslog server over UDP using the format specified in RFC 5424.
-
-Twig's behavior is controlled using the application environment:
-
-* host (undefined): the hostname of the syslog server
-* port (514): the port of the syslog server
-* facility (local2): syslog facility to be used
-* level (info): logging threshold. Messages "above" this threshold (in syslog parlance) will be discarded. Acceptable values are debug, info, notice, warn, err, crit, alert, and emerg.
-* appid ("twig"): inserted as the APPID in the syslog message
-* max_term_size (8192): raw data size below which we format normally
-* max_message_size (16000): approx. max size of truncated string

http://git-wip-us.apache.org/repos/asf/couchdb/blob/26dbcc16/src/twig/src/trunc_io.erl
----------------------------------------------------------------------
diff --git a/src/twig/src/trunc_io.erl b/src/twig/src/trunc_io.erl
deleted file mode 100644
index cfa6c97..0000000
--- a/src/twig/src/trunc_io.erl
+++ /dev/null
@@ -1,215 +0,0 @@
-%% ``The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with your Erlang distribution. If not, it can be
-%% retrieved via the world wide web at http://www.erlang.org/.
-%% 
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%% 
-%% The Initial Developer of the Original Code is Corelatus AB.
-%% Portions created by Corelatus are Copyright 2003, Corelatus
-%% AB. All Rights Reserved.''
-%%
-%% Module to print out terms for logging. Limits by length rather than depth.
-%%
-%% The resulting string may be slightly larger than the limit; the intention
-%% is to provide predictable CPU and memory consumption for formatting
-%% terms, not produce precise string lengths.
-%%
-%% Typical use:
-%%
-%%   trunc_io:print(Term, 500).
-%%
--module(trunc_io).
--author('matthias@corelatus.se').
-%% And thanks to Chris Newcombe for a bug fix 
--export([print/2, fprint/2, safe/2]).               % interface functions
--export([perf/0, perf/3, perf1/0, test/0, test/2]). % testing functions
--version("$Id: trunc_io.erl,v 1.11 2009-02-23 12:01:06 matthias Exp $").
-
-
-%% Returns an flattened list containing the ASCII representation of the given
-%% term.
-fprint(T, Max) -> 
-    {L, _} = print(T, Max),
-    lists:flatten(L).
-
-%% Same as print, but never crashes. 
-%%
-%% This is a tradeoff. Print might conceivably crash if it's asked to
-%% print something it doesn't understand, for example some new data
-%% type in a future version of Erlang. If print crashes, we fall back
-%% to io_lib to format the term, but then the formatting is
-%% depth-limited instead of length limited, so you might run out
-%% memory printing it. Out of the frying pan and into the fire.
-%% 
-safe(What, Len) ->
-    case catch print(What, Len) of
-	{L, Used} when is_list(L) -> {L, Used};
-	_ -> {"unable to print" ++ io_lib:write(What, 99)}
-    end.	     
-
-%% Returns {List, Length}
-print(_, Max) when Max < 0 -> {"...", 3};
-print(Tuple, Max) when is_tuple(Tuple) ->
-    {TC, Len} = tuple_contents(Tuple, Max-2),
-    {[${, TC, $}], Len + 2};
-
-%% We assume atoms, floats, funs, integers, PIDs, ports and refs never need 
-%% to be truncated. This isn't strictly true, someone could make an 
-%% arbitrarily long bignum. Let's assume that won't happen unless someone
-%% is being malicious.
-%%
-print(Atom, _Max) when is_atom(Atom) ->
-    L = atom_to_list(Atom),
-    {L, length(L)};
-
-print(<<>>, _Max) ->
-    {"<<>>", 4};
-
-print(Binary, Max) when is_binary(Binary) ->
-    B = binary_to_list(Binary, 1, lists:min([Max, size(Binary)])),
-    {L, Len} = alist_start(B, Max-4),
-    {["<<", L, ">>"], Len};
-
-print(Float, _Max) when is_float(Float) ->
-    L = float_to_list(Float),
-    {L, length(L)};
-
-print(Fun, _Max) when is_function(Fun) ->
-    L = erlang:fun_to_list(Fun),
-    {L, length(L)};
-
-print(Integer, _Max) when is_integer(Integer) ->
-    L = integer_to_list(Integer),
-    {L, length(L)};
-
-print(Pid, _Max) when is_pid(Pid) ->
-    L = pid_to_list(Pid),
-    {L, length(L)};
-
-print(Ref, _Max) when is_reference(Ref) ->
-    L = erlang:ref_to_list(Ref),
-    {L, length(L)};
-
-print(Port, _Max) when is_port(Port) ->
-    L = erlang:port_to_list(Port),
-    {L, length(L)};
-
-print(List, Max) when is_list(List) ->
-    alist_start(List, Max).
-
-%% Returns {List, Length}
-tuple_contents(Tuple, Max) ->
-    L = tuple_to_list(Tuple),
-    list_body(L, Max).
-
-%% Format the inside of a list, i.e. do not add a leading [ or trailing ].
-%% Returns {List, Length}
-list_body([], _) -> {[], 0};
-list_body(_, Max) when Max < 4 -> {"...", 3};
-list_body([H|T], Max) -> 
-    {List, Len} = print(H, Max),
-    {Final, FLen} = list_bodyc(T, Max - Len),
-    {[List|Final], FLen + Len};
-list_body(X, Max) ->  %% improper list
-    {List, Len} = print(X, Max - 1),
-    {[$|,List], Len + 1}.
-
-list_bodyc([], _) -> {[], 0};
-list_bodyc(_, Max) when Max < 4 -> {"...", 3};
-list_bodyc([H|T], Max) -> 
-    {List, Len} = print(H, Max),
-    {Final, FLen} = list_bodyc(T, Max - Len - 1),
-    {[$,, List|Final], FLen + Len + 1};
-list_bodyc(X,Max) ->  %% improper list
-    {List, Len} = print(X, Max - 1),
-    {[$|,List], Len + 1}.
-
-%% The head of a list we hope is ascii. Examples:
-%%
-%% [65,66,67] -> "ABC"
-%% [65,0,67] -> "A"[0,67]
-%% [0,65,66] -> [0,65,66]
-%% [65,b,66] -> "A"[b,66]
-%%
-alist_start([], _) -> {"[]", 2};
-alist_start(_, Max) when Max < 4 -> {"...", 3};
-alist_start([H|T], Max) when H >= 16#20, H =< 16#7e ->  % definitely printable
-    {L, Len} = alist([H|T], Max-1),
-    {[$\"|L], Len + 1};
-alist_start([H|T], Max) when H == 9; H == 10; H == 13 ->   % show as space
-    {L, Len} = alist(T, Max-1),
-    {[$ |L], Len + 1};
-alist_start(L, Max) ->
-    {R, Len} = list_body(L, Max-2),
-    {[$[, R, $]], Len + 2}.
-
-alist([], _) -> {"\"", 1};
-alist(_, Max) when Max < 5 -> {"...\"", 4};
-alist([H|T], Max) when H >= 16#20, H =< 16#7e ->     % definitely printable
-    {L, Len} = alist(T, Max-1),
-    {[H|L], Len + 1};
-alist([H|T], Max) when H == 9; H == 10; H == 13 ->   % show as space
-    {L, Len} = alist(T, Max-1),
-    {[$ |L], Len + 1};
-alist(L, Max) ->
-    {R, Len} = list_body(L, Max-3),
-    {[$\", $[, R, $]], Len + 3}.
-
-
-%%--------------------
-%% The start of a test suite. So far, it only checks for not crashing.
-test() ->
-    test(trunc_io, print).
-
-test(Mod, Func) ->
-    Simple_items = [atom, 1234, 1234.0, {tuple}, [], [list], "string", self(),
-		    <<1,2,3>>, make_ref(), fun() -> ok end],
-    F = fun(A) ->
-		Mod:Func(A, 100),
-		Mod:Func(A, 2),
-		Mod:Func(A, 20)
-	end,
-
-    G = fun(A) ->
-		case catch F(A) of
-		    {'EXIT', _} -> exit({failed, A});
-		    _ -> ok
-		end
-	end,
-    
-    lists:foreach(G, Simple_items),
-    
-    Tuples = [ {1,2,3,a,b,c}, {"abc", def, 1234},
-	       {{{{a},b,c,{d},e}},f}],
-    
-    Lists = [ [1,2,3,4,5,6,7], lists:seq(1,1000),
-	      [{a}, {a,b}, {a, [b,c]}, "def"], [a|b], [$a|$b] ],
-    
-    
-    lists:foreach(G, Tuples),
-    lists:foreach(G, Lists).
-    
-perf() ->
-    {New, _} = timer:tc(trunc_io, perf, [trunc_io, print, 1000]),
-    {Old, _} = timer:tc(trunc_io, perf, [io_lib, write, 1000]),
-    io:fwrite("New code took ~p us, old code ~p\n", [New, Old]).
-
-perf(M, F, Reps) when Reps > 0 ->
-    test(M,F),
-    perf(M,F,Reps-1);
-perf(_,_,_) ->
-    done.    
-
-%% Performance test. Needs a particularly large term I saved as a binary...
-perf1() ->
-    {ok, Bin} = file:read_file("bin"),
-    A = binary_to_term(Bin),
-    {N, _} = timer:tc(trunc_io, print, [A, 1500]),
-    {M, _} = timer:tc(io_lib, write, [A]),
-    {N, M}.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/26dbcc16/src/twig/src/twig.app.src
----------------------------------------------------------------------
diff --git a/src/twig/src/twig.app.src b/src/twig/src/twig.app.src
deleted file mode 100644
index 751375e..0000000
--- a/src/twig/src/twig.app.src
+++ /dev/null
@@ -1,20 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, twig, [
-    {description, "Logger"},
-    {vsn, git},
-    {registered, []},
-    {applications, [kernel, stdlib]},
-    {mod, {twig_app, []}},
-    {env, []}
-]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/26dbcc16/src/twig/src/twig.erl
----------------------------------------------------------------------
diff --git a/src/twig/src/twig.erl b/src/twig/src/twig.erl
deleted file mode 100644
index e62cd33..0000000
--- a/src/twig/src/twig.erl
+++ /dev/null
@@ -1,53 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(twig).
-
--export([log/2, log/3, log/4, set_level/1]).
-
--include("twig_int.hrl").
-
-set_level(LevelAtom) ->
-    application:set_env(twig, level, twig_util:level(LevelAtom)).
-
-log(LevelAtom, String) ->
-    log(LevelAtom, String, [], []).
-
-log(LevelAtom, Format, Data) ->
-    log(LevelAtom, Format, Data, []).
-
-log(LevelAtom, Format, Data, _Options) ->
-    %% TODO do something useful with options
-    Level = twig_util:level(LevelAtom),
-    case application:get_env(twig, level) of
-        {ok, Threshold} when Level =< Threshold ->
-            send_message(Level, Format, Data);
-        undefined when Level =< ?LEVEL_INFO ->
-            send_message(Level, Format, Data);
-        _ ->
-            ok
-    end.
-
-%% internal
-
-send_message(Level, Format, Data) ->
-    gen_event:sync_notify(error_logger, format(Level, Format, Data)).
-
-format(Level, Format, Data) ->
-    %% TODO truncate large messages
-    #twig{
-        level = Level,
-        msg = iolist_to_binary(twig_util:format(Format, Data)),
-        msgid = erlang:get(nonce),
-        pid = self()
-    }.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/26dbcc16/src/twig/src/twig_app.erl
----------------------------------------------------------------------
diff --git a/src/twig/src/twig_app.erl b/src/twig/src/twig_app.erl
deleted file mode 100644
index e16ad58..0000000
--- a/src/twig/src/twig_app.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(twig_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_StartType, _StartArgs) ->
-    twig_sup:start_link().
-
-stop(_State) ->
-    ok.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/26dbcc16/src/twig/src/twig_event_handler.erl
----------------------------------------------------------------------
diff --git a/src/twig/src/twig_event_handler.erl b/src/twig/src/twig_event_handler.erl
deleted file mode 100644
index d7b691b..0000000
--- a/src/twig/src/twig_event_handler.erl
+++ /dev/null
@@ -1,162 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(twig_event_handler).
-
--behaviour(gen_event).
-
--export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2,
-        code_change/3]).
-
--import(twig_util, [get_env/2]).
-
--record(state, {
-    socket,
-    host,
-    port,
-    hostname,
-    os_pid,
-    appid,
-    facility,
-    level
-}).
-
--include("twig_int.hrl").
-
-init([]) ->
-    {ok, Socket} = gen_udp:open(0),
-    {ok, ok, State} = handle_call(load_config, #state{socket=Socket}),
-    {ok, State}.
-
-handle_event(#twig{level=Level, msgid=MsgId, msg=Msg, pid=Pid}, State) ->
-    write(Level, MsgId, Msg, Pid, State),
-    {ok, State};
-
-% OTP standard events
-handle_event({Class, _GL, {Pid, Format, Args}}, #state{level=Max} = State) ->
-    case otp_event_level(Class, Format) of
-        undefined ->
-            {ok, State};
-        Level when Level > Max ->
-            {ok, State};
-        Level ->
-            {MsgId, Msg} = message(Format, Args),
-            write(Level, MsgId, Msg, Pid, State),
-            {ok, State}
-    end;
-
-handle_event(_Event, State) ->
-    {ok, State}.
-
-handle_call({set_level, Level}, State) ->
-    {ok, ok, State#state{level = Level}};
-
-handle_call(load_config, State) ->
-    Host = case inet:getaddr(get_env(host, undefined), inet) of
-    {ok, Address} ->
-        Address;
-    {error, _} ->
-        undefined
-    end,
-    NewState = State#state{
-        host = Host,
-        port = get_env(port, 514),
-        hostname = net_adm:localhost(),
-        os_pid = os:getpid(),
-        appid = get_env(appid, "twig"),
-        facility = twig_util:facility(get_env(facility, local2)),
-        level = twig_util:level(get_env(level, info))
-    },
-    {ok, ok, NewState};
-
-handle_call(_Call, State) ->
-    {ok, ignored, State}.
-
-handle_info(_Info, State) ->
-    {ok, State}.
-
-terminate(_Reason, State) ->
-    gen_udp:close(State#state.socket).
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-write(Level, undefined, Msg, Pid, State) ->
-    write(Level, "--------", Msg, Pid, State);
-write(Level, MsgId, Msg, Pid, State) when is_list(Msg); is_binary(Msg) ->
-    #state{facility=Facil, appid=App, hostname=Hostname, host=Host, port=Port,
-        socket=Socket} = State,
-    Pre = io_lib:format("<~B>~B ~s ~s ~s ~p ~s - ", [Facil bor Level,
-        ?SYSLOG_VERSION, twig_util:iso8601_timestamp(), Hostname, App, Pid,
-        MsgId]),
-    send(Socket, Host, Port, [Pre, Msg, $\n]).
-
-send(_, undefined, _, Packet) ->
-    io:put_chars(Packet);
-send(Socket, Host, Port, Packet) ->
-    gen_udp:send(Socket, Host, Port, Packet).
-
-message(crash_report, Report) ->
-    Msg = case erts_debug:flat_size(Report) > get_env(max_term_size, 8192) of
-        true ->
-            MaxString = get_env(max_message_size, 16000),
-            ["*Truncated* - ", trunc_io:print(Report, MaxString)];
-        false ->
-            proc_lib:format(Report)
-    end,
-    {crash_report, Msg};
-message(supervisor_report, Report) ->
-    Name = get_value(supervisor, Report),
-    Error = get_value(errorContext, Report),
-    Reason = get_value(reason, Report),
-    Offender = get_value(offender, Report),
-    ChildPid = get_value(pid, Offender),
-    ChildName = get_value(name, Offender),
-    case get_value(mfa, Offender) of
-        undefined ->
-            {M,F,_} = get_value(mfargs, Offender);
-        {M,F,_} ->
-            ok
-    end,
-    {supervisor_report, twig_util:format("~p ~p (~p) child: ~p [~p] ~p:~p",
-            [Name, Error, Reason, ChildName, ChildPid, M, F])};
-message(Type, Report) when Type == std_error;
-                           Type == std_info;
-                           Type == std_warning;
-                           Type == progress_report;
-                           Type == progress ->
-    {Type, twig_util:format("~2048.0p", [Report])};
-message(Format, Args) when is_list(Format) ->
-    {msg, twig_util:format(Format, Args)};
-message(Format, Args) ->
-    {unknown, twig_util:format("~2048.0p ~2048.0p", [Format, Args])}.
-
-otp_event_level(_, crash_report) ->         ?LEVEL_CRIT;
-otp_event_level(_, supervisor_report) ->    ?LEVEL_WARN;
-otp_event_level(_, supervisor) ->           ?LEVEL_WARN;
-otp_event_level(_, progress_report) ->      ?LEVEL_DEBUG;
-otp_event_level(_, progress) ->             ?LEVEL_DEBUG;
-otp_event_level(error, _) ->                ?LEVEL_ERR;
-otp_event_level(warning_msg, _) ->          ?LEVEL_WARN;
-otp_event_level(info_msg, _) ->             ?LEVEL_NOTICE;
-otp_event_level(error_report, _) ->         ?LEVEL_ERR;
-otp_event_level(warning_report, _) ->       ?LEVEL_WARN;
-otp_event_level(info_report, _) ->          ?LEVEL_NOTICE;
-otp_event_level(_, _) ->                    ?LEVEL_DEBUG.
-
-get_value(Key, Props) ->
-    case lists:keyfind(Key, 1, Props) of
-        {Key, Value} ->
-            Value;
-        false ->
-            undefined
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/26dbcc16/src/twig/src/twig_int.hrl
----------------------------------------------------------------------
diff --git a/src/twig/src/twig_int.hrl b/src/twig/src/twig_int.hrl
deleted file mode 100644
index 81ef8b5..0000000
--- a/src/twig/src/twig_int.hrl
+++ /dev/null
@@ -1,24 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(SYSLOG_VERSION, 1).
-
--define(LEVEL_DEBUG, 7).
--define(LEVEL_INFO, 6).
--define(LEVEL_NOTICE, 5).
--define(LEVEL_WARN, 4).
--define(LEVEL_ERR, 3).
--define(LEVEL_CRIT, 2).
--define(LEVEL_ALERT, 1).
--define(LEVEL_EMERG, 0).
-
--record(twig, {level, msgid, msg, pid}).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/26dbcc16/src/twig/src/twig_monitor.erl
----------------------------------------------------------------------
diff --git a/src/twig/src/twig_monitor.erl b/src/twig/src/twig_monitor.erl
deleted file mode 100644
index 81b04bd..0000000
--- a/src/twig/src/twig_monitor.erl
+++ /dev/null
@@ -1,46 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(twig_monitor).
-
--behaviour(gen_server).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
-        code_change/3]).
-
--export([start_link/0]).
-
-start_link() ->
-    gen_server:start_link(?MODULE, [], []).
-
-init(_) ->
-    ok = gen_event:add_sup_handler(error_logger, twig_event_handler, []),
-    {ok, nil}.
-
-handle_call(_Call, _From, State) ->
-    {reply, ignored, State}.
-
-handle_cast(_Cast, State) ->
-    {noreply, State}.
-
-handle_info({gen_event_EXIT, twig_event_handler, Reason} = Msg, State) ->
-    io:format("~p~n", [Msg]),
-    {stop, Reason, State};
-
-handle_info(_Msg, State) ->
-    {noreply, State}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-code_change(_, State, _) ->
-    {ok, State}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/26dbcc16/src/twig/src/twig_sup.erl
----------------------------------------------------------------------
diff --git a/src/twig/src/twig_sup.erl b/src/twig/src/twig_sup.erl
deleted file mode 100644
index f778f0d..0000000
--- a/src/twig/src/twig_sup.erl
+++ /dev/null
@@ -1,24 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(twig_sup).
--behaviour(supervisor).
--export([start_link/0, init/1]).
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
-start_link() ->
-    supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init([]) ->
-    {ok, { {one_for_one, 5, 10}, [?CHILD(twig_monitor, worker)]} }.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/26dbcc16/src/twig/src/twig_util.erl
----------------------------------------------------------------------
diff --git a/src/twig/src/twig_util.erl b/src/twig/src/twig_util.erl
deleted file mode 100644
index f5442fd..0000000
--- a/src/twig/src/twig_util.erl
+++ /dev/null
@@ -1,80 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(twig_util).
-
--export([format/2, get_env/2, level/1, facility/1, iso8601_timestamp/0]).
-
-level(debug) ->     7;
-level(info) ->      6;
-level(notice) ->    5;
-level(warn) ->      4;
-level(warning) ->   4;
-level(err) ->       3;
-level(error) ->     3;
-level(crit) ->      2;
-level(alert) ->     1;
-level(emerg) ->     0;
-level(panic) ->     0;
-
-level(I) when is_integer(I), I >= 0, I =< 7 ->
-    I;
-level(_BadLevel) ->
-    3.
-
-facility(kern)     -> (0 bsl 3) ; % kernel messages
-facility(user)     -> (1 bsl 3) ; % random user-level messages
-facility(mail)     -> (2 bsl 3) ; % mail system
-facility(daemon)   -> (3 bsl 3) ; % system daemons
-facility(auth)     -> (4 bsl 3) ; % security/authorization messages
-facility(syslog)   -> (5 bsl 3) ; % messages generated internally by syslogd
-facility(lpr)      -> (6 bsl 3) ; % line printer subsystem
-facility(news)     -> (7 bsl 3) ; % network news subsystem
-facility(uucp)     -> (8 bsl 3) ; % UUCP subsystem
-facility(cron)     -> (9 bsl 3) ; % clock daemon
-facility(authpriv) -> (10 bsl 3); % security/authorization messages (private)
-facility(ftp)      -> (11 bsl 3); % ftp daemon
-
-facility(local0)   -> (16 bsl 3);
-facility(local1)   -> (17 bsl 3);
-facility(local2)   -> (18 bsl 3);
-facility(local3)   -> (19 bsl 3);
-facility(local4)   -> (20 bsl 3);
-facility(local5)   -> (21 bsl 3);
-facility(local6)   -> (22 bsl 3);
-facility(local7)   -> (23 bsl 3).
-
-
-iso8601_timestamp() ->
-    {_,_,Micro} = Now = os:timestamp(),
-    {{Year,Month,Date},{Hour,Minute,Second}} = calendar:now_to_datetime(Now),
-    Format = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0B.~6.10.0BZ",
-    io_lib:format(Format, [Year, Month, Date, Hour, Minute, Second, Micro]).
-
-format(Format, Data) ->
-    MaxTermSize = get_env(max_term_size, 8192),
-    case erts_debug:flat_size(Data) > MaxTermSize of
-        true ->
-            MaxString = get_env(max_message_size, 16000),
-            {Truncated, _} = trunc_io:print(Data, MaxString),
-            ["*Truncated* ", Format, " - ", Truncated];
-        false ->
-            io_lib:format(Format, Data)
-    end.
-
-get_env(Key, Default) ->
-    case application:get_env(twig, Key) of
-        {ok, Value} ->
-            Value;
-        undefined ->
-            Default
-    end.


[17/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/couch_mrview


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/2acbbd31
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/2acbbd31
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/2acbbd31

Branch: refs/heads/1843-feature-bigcouch
Commit: 2acbbd31352ef327ce7177a21f46a1f0a1efd4c5
Parents: de4ff66
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:39:56 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:39:56 2014 -0600

----------------------------------------------------------------------
 src/couch_mrview/include/couch_mrview.hrl       |  82 ---
 src/couch_mrview/src/couch_mrview.app.src       |  28 -
 src/couch_mrview/src/couch_mrview.erl           | 436 ------------
 src/couch_mrview/src/couch_mrview_cleanup.erl   |  47 --
 src/couch_mrview/src/couch_mrview_compactor.erl | 183 -----
 src/couch_mrview/src/couch_mrview_http.erl      | 339 ---------
 src/couch_mrview/src/couch_mrview_index.erl     | 140 ----
 src/couch_mrview/src/couch_mrview_show.erl      | 363 ----------
 src/couch_mrview/src/couch_mrview_test_util.erl |  91 ---
 src/couch_mrview/src/couch_mrview_updater.erl   | 322 ---------
 src/couch_mrview/src/couch_mrview_util.erl      | 710 -------------------
 src/couch_mrview/test/01-load.t                 |  34 -
 src/couch_mrview/test/02-map-views.t            | 120 ----
 src/couch_mrview/test/03-red-views.t            |  78 --
 src/couch_mrview/test/04-index-info.t           |  43 --
 src/couch_mrview/test/05-collation.t            | 164 -----
 src/couch_mrview/test/06-all-docs.t             | 127 ----
 src/couch_mrview/test/07-compact-swap.t         |  57 --
 18 files changed, 3364 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/include/couch_mrview.hrl
----------------------------------------------------------------------
diff --git a/src/couch_mrview/include/couch_mrview.hrl b/src/couch_mrview/include/couch_mrview.hrl
deleted file mode 100644
index 6a0dfd0..0000000
--- a/src/couch_mrview/include/couch_mrview.hrl
+++ /dev/null
@@ -1,82 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(mrst, {
-    sig=nil,
-    fd=nil,
-    fd_monitor,
-    db_name,
-    idx_name,
-    language,
-    design_opts=[],
-    lib,
-    views,
-    id_btree=nil,
-    update_seq=0,
-    purge_seq=0,
-
-    first_build,
-    partial_resp_pid,
-    doc_acc,
-    doc_queue,
-    write_queue,
-    qserver=nil
-}).
-
-
--record(mrview, {
-    id_num,
-    update_seq=0,
-    purge_seq=0,
-    map_names=[],
-    reduce_funs=[],
-    def,
-    btree=nil,
-    options=[]
-}).
-
-
--record(mrheader, {
-    seq=0,
-    purge_seq=0,
-    id_btree_state=nil,
-    view_states=nil
-}).
-
-
--record(mrargs, {
-    view_type,
-    reduce,
-
-    preflight_fun,
-
-    start_key,
-    start_key_docid,
-    end_key,
-    end_key_docid,
-    keys,
-
-    direction = fwd,
-    limit = 16#10000000,
-    skip = 0,
-    group_level = 0,
-    stale = false,
-    multi_get = false,
-    inclusive_end = true,
-    include_docs = false,
-    update_seq=false,
-    conflicts,
-    callback,
-    list,
-    sorted = true,
-    extra = []
-}).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/src/couch_mrview.app.src
----------------------------------------------------------------------
diff --git a/src/couch_mrview/src/couch_mrview.app.src b/src/couch_mrview/src/couch_mrview.app.src
deleted file mode 100644
index 99c52f0..0000000
--- a/src/couch_mrview/src/couch_mrview.app.src
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_mrview, [
-    {description, "CouchDB Map/Reduce Views"},
-    {vsn, git},
-    {modules, [
-        couch_mrview,
-        couch_mrview_compactor,
-        couch_mrview_http,
-        couch_mrview_index,
-        couch_mrview_show,
-        couch_mrview_test_util,
-        couch_mrview_updater,
-        couch_mrview_util
-    ]},
-    {registered, []},
-    {applications, [kernel, stdlib, couch_index]}
-]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/src/couch_mrview.erl
----------------------------------------------------------------------
diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl
deleted file mode 100644
index 29b86d7..0000000
--- a/src/couch_mrview/src/couch_mrview.erl
+++ /dev/null
@@ -1,436 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview).
-
--export([validate/2]).
--export([query_all_docs/2, query_all_docs/4]).
--export([query_view/3, query_view/4, query_view/6]).
--export([get_info/2]).
--export([trigger_update/2, trigger_update/3]).
--export([compact/2, compact/3, cancel_compaction/2]).
--export([cleanup/1]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--record(mracc, {
-    db,
-    meta_sent=false,
-    total_rows,
-    offset,
-    limit,
-    skip,
-    group_level,
-    doc_info,
-    callback,
-    user_acc,
-    last_go=ok,
-    reduce_fun,
-    update_seq,
-    args
-}).
-
-
-validate(DbName, DDoc) ->
-    GetName = fun
-        (#mrview{map_names = [Name | _]}) -> Name;
-        (#mrview{reduce_funs = [{Name, _} | _]}) -> Name;
-        (_) -> null
-    end,
-    ValidateView = fun(Proc, #mrview{def=MapSrc, reduce_funs=Reds}=View) ->
-        couch_query_servers:try_compile(Proc, map, GetName(View), MapSrc),
-        lists:foreach(fun
-            ({_RedName, <<"_", _/binary>>}) ->
-                ok;
-            ({RedName, RedSrc}) ->
-                couch_query_servers:try_compile(Proc, reduce, RedName, RedSrc)
-        end, Reds)
-    end,
-    {ok, #mrst{language=Lang, views=Views}}
-            = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
-    try Views =/= [] andalso couch_query_servers:get_os_process(Lang) of
-        false ->
-            ok;
-        Proc ->
-            try
-                lists:foreach(fun(V) -> ValidateView(Proc, V) end, Views)
-            catch Error ->
-                Error
-            after
-                couch_query_servers:ret_os_process(Proc)
-            end
-    catch {unknown_query_language, _Lang} ->
-        %% Allow users to save ddocs written in uknown languages
-        ok
-    end.
-
-
-query_all_docs(Db, Args) ->
-    query_all_docs(Db, Args, fun default_cb/2, []).
-
-
-query_all_docs(Db, Args, Callback, Acc) when is_list(Args) ->
-    query_all_docs(Db, to_mrargs(Args), Callback, Acc);
-query_all_docs(Db, Args0, Callback, Acc) ->
-    Sig = couch_util:with_db(Db, fun(WDb) ->
-        {ok, Info} = couch_db:get_db_info(WDb),
-        couch_index_util:hexsig(couch_util:md5(term_to_binary(Info)))
-    end),
-    Args1 = Args0#mrargs{view_type=map},
-    Args2 = couch_mrview_util:validate_args(Args1),
-    {ok, Acc1} = case Args2#mrargs.preflight_fun of
-        PFFun when is_function(PFFun, 2) -> PFFun(Sig, Acc);
-        _ -> {ok, Acc}
-    end,
-    all_docs_fold(Db, Args2, Callback, Acc1).
-
-
-query_view(Db, DDoc, VName) ->
-    query_view(Db, DDoc, VName, #mrargs{}).
-
-
-query_view(Db, DDoc, VName, Args) when is_list(Args) ->
-    query_view(Db, DDoc, VName, to_mrargs(Args), fun default_cb/2, []);
-query_view(Db, DDoc, VName, Args) ->
-    query_view(Db, DDoc, VName, Args, fun default_cb/2, []).
-
-
-query_view(Db, DDoc, VName, Args, Callback, Acc) when is_list(Args) ->
-    query_view(Db, DDoc, VName, to_mrargs(Args), Callback, Acc);
-query_view(Db, DDoc, VName, Args0, Callback, Acc0) ->
-    {ok, VInfo, Sig, Args} = couch_mrview_util:get_view(Db, DDoc, VName, Args0),
-    {ok, Acc1} = case Args#mrargs.preflight_fun of
-        PFFun when is_function(PFFun, 2) -> PFFun(Sig, Acc0);
-        _ -> {ok, Acc0}
-    end,
-    query_view(Db, VInfo, Args, Callback, Acc1).
-
-
-query_view(Db, {Type, View, Ref}, Args, Callback, Acc) ->
-    try
-        case Type of
-            map -> map_fold(Db, View, Args, Callback, Acc);
-            red -> red_fold(Db, View, Args, Callback, Acc)
-        end
-    after
-        erlang:demonitor(Ref, [flush])
-    end.
-
-
-get_info(Db, DDoc) ->
-    {ok, Pid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
-    couch_index:get_info(Pid).
-
-
-trigger_update(Db, DDoc) ->
-    trigger_update(Db, DDoc, couch_db:get_update_seq(Db)).
-
-
-trigger_update(Db, DDoc, UpdateSeq) ->
-    {ok, Pid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
-    couch_index:trigger_update(Pid, UpdateSeq).
-
-
-compact(Db, DDoc) ->
-    compact(Db, DDoc, []).
-
-
-compact(Db, DDoc, Opts) ->
-    {ok, Pid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
-    couch_index:compact(Pid, Opts).
-
-
-cancel_compaction(Db, DDoc) ->
-    {ok, IPid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
-    {ok, CPid} = couch_index:get_compactor_pid(IPid),
-    ok = couch_index_compactor:cancel(CPid),
-
-    % Cleanup the compaction file if it exists
-    {ok, #mrst{sig=Sig, db_name=DbName}} = couch_index:get_state(IPid, 0),
-    couch_mrview_util:delete_compaction_file(DbName, Sig),
-    ok.
-
-
-cleanup(Db) ->
-    couch_mrview_cleanup:run(Db).
-
-
-all_docs_fold(Db, #mrargs{keys=undefined}=Args, Callback, UAcc) ->
-    {ok, Info} = couch_db:get_db_info(Db),
-    Total = couch_util:get_value(doc_count, Info),
-    UpdateSeq = couch_db:get_update_seq(Db),
-    Acc = #mracc{
-        db=Db,
-        total_rows=Total,
-        limit=Args#mrargs.limit,
-        skip=Args#mrargs.skip,
-        callback=Callback,
-        user_acc=UAcc,
-        reduce_fun=fun couch_mrview_util:all_docs_reduce_to_count/1,
-        update_seq=UpdateSeq,
-        args=Args
-    },
-    [Opts] = couch_mrview_util:all_docs_key_opts(Args),
-    {ok, Offset, FinalAcc} = couch_db:enum_docs(Db, fun map_fold/3, Acc, Opts),
-    finish_fold(FinalAcc, [{total, Total}, {offset, Offset}]);
-all_docs_fold(Db, #mrargs{direction=Dir, keys=Keys0}=Args, Callback, UAcc) ->
-    {ok, Info} = couch_db:get_db_info(Db),
-    Total = couch_util:get_value(doc_count, Info),
-    UpdateSeq = couch_db:get_update_seq(Db),
-    Acc = #mracc{
-        db=Db,
-        total_rows=Total,
-        limit=Args#mrargs.limit,
-        skip=Args#mrargs.skip,
-        callback=Callback,
-        user_acc=UAcc,
-        reduce_fun=fun couch_mrview_util:all_docs_reduce_to_count/1,
-        update_seq=UpdateSeq,
-        args=Args
-    },
-    % Backwards compatibility hack. The old _all_docs iterates keys
-    % in reverse if descending=true was passed. Here we'll just
-    % reverse the list instead.
-    Keys = if Dir =:= fwd -> Keys0; true -> lists:reverse(Keys0) end,
-
-    FoldFun = fun(Key, Acc0) ->
-        DocInfo = (catch couch_db:get_doc_info(Db, Key)),
-        {Doc, Acc1} = case DocInfo of
-            {ok, #doc_info{id=Id, revs=[RevInfo | _RestRevs]}=DI} ->
-                Rev = couch_doc:rev_to_str(RevInfo#rev_info.rev),
-                Props = [{rev, Rev}] ++ case RevInfo#rev_info.deleted of
-                    true -> [{deleted, true}];
-                    false -> []
-                end,
-                {{{Id, Id}, {Props}}, Acc0#mracc{doc_info=DI}};
-            not_found ->
-                {{{Key, error}, not_found}, Acc0}
-        end,
-        {_, Acc2} = map_fold(Doc, {[], [{0, 0, 0}]}, Acc1),
-        Acc2
-    end,
-    FinalAcc = lists:foldl(FoldFun, Acc, Keys),
-    finish_fold(FinalAcc, [{total, Total}]).
-
-
-map_fold(Db, View, Args, Callback, UAcc) ->
-    {ok, Total} = couch_mrview_util:get_row_count(View),
-    Acc = #mracc{
-        db=Db,
-        total_rows=Total,
-        limit=Args#mrargs.limit,
-        skip=Args#mrargs.skip,
-        callback=Callback,
-        user_acc=UAcc,
-        reduce_fun=fun couch_mrview_util:reduce_to_count/1,
-        update_seq=View#mrview.update_seq,
-        args=Args
-    },
-    OptList = couch_mrview_util:key_opts(Args),
-    {Reds, Acc2} = lists:foldl(fun(Opts, {_, Acc0}) ->
-        {ok, R, A} = couch_mrview_util:fold(View, fun map_fold/3, Acc0, Opts),
-        {R, A}
-    end, {nil, Acc}, OptList),
-    Offset = couch_mrview_util:reduce_to_count(Reds),
-    finish_fold(Acc2, [{total, Total}, {offset, Offset}]).
-
-
-map_fold(#full_doc_info{} = FullDocInfo, OffsetReds, Acc) ->
-    % matches for _all_docs and translates #full_doc_info{} -> KV pair
-    case couch_doc:to_doc_info(FullDocInfo) of
-        #doc_info{id=Id, revs=[#rev_info{deleted=false, rev=Rev}|_]} = DI ->
-            Value = {[{rev, couch_doc:rev_to_str(Rev)}]},
-            map_fold({{Id, Id}, Value}, OffsetReds, Acc#mracc{doc_info=DI});
-        #doc_info{revs=[#rev_info{deleted=true}|_]} ->
-            {ok, Acc}
-    end;
-map_fold(_KV, _Offset, #mracc{skip=N}=Acc) when N > 0 ->
-    {ok, Acc#mracc{skip=N-1, last_go=ok}};
-map_fold(KV, OffsetReds, #mracc{offset=undefined}=Acc) ->
-    #mracc{
-        total_rows=Total,
-        callback=Callback,
-        user_acc=UAcc0,
-        reduce_fun=Reduce,
-        update_seq=UpdateSeq,
-        args=Args
-    } = Acc,
-    Offset = Reduce(OffsetReds),
-    Meta = make_meta(Args, UpdateSeq, [{total, Total}, {offset, Offset}]),
-    {Go, UAcc1} = Callback(Meta, UAcc0),
-    Acc1 = Acc#mracc{meta_sent=true, offset=Offset, user_acc=UAcc1, last_go=Go},
-    case Go of
-        ok -> map_fold(KV, OffsetReds, Acc1);
-        stop -> {stop, Acc1}
-    end;
-map_fold(_KV, _Offset, #mracc{limit=0}=Acc) ->
-    {stop, Acc};
-map_fold({{Key, Id}, Val}, _Offset, Acc) ->
-    #mracc{
-        db=Db,
-        limit=Limit,
-        doc_info=DI,
-        callback=Callback,
-        user_acc=UAcc0,
-        args=Args
-    } = Acc,
-    Doc = case DI of
-        #doc_info{} -> couch_mrview_util:maybe_load_doc(Db, DI, Args);
-        _ -> couch_mrview_util:maybe_load_doc(Db, Id, Val, Args)
-    end,
-    Row = [{id, Id}, {key, Key}, {value, Val}] ++ Doc,
-    {Go, UAcc1} = Callback({row, Row}, UAcc0),
-    {Go, Acc#mracc{
-        limit=Limit-1,
-        doc_info=undefined,
-        user_acc=UAcc1,
-        last_go=Go
-    }}.
-
-
-red_fold(Db, {_Nth, _Lang, View}=RedView, Args, Callback, UAcc) ->
-    Acc = #mracc{
-        db=Db,
-        total_rows=null,
-        limit=Args#mrargs.limit,
-        skip=Args#mrargs.skip,
-        group_level=Args#mrargs.group_level,
-        callback=Callback,
-        user_acc=UAcc,
-        update_seq=View#mrview.update_seq,
-        args=Args
-    },
-    GroupFun = group_rows_fun(Args#mrargs.group_level),
-    OptList = couch_mrview_util:key_opts(Args, [{key_group_fun, GroupFun}]),
-    Acc2 = lists:foldl(fun(Opts, Acc0) ->
-        {ok, Acc1} =
-            couch_mrview_util:fold_reduce(RedView, fun red_fold/3,  Acc0, Opts),
-        Acc1
-    end, Acc, OptList),
-    finish_fold(Acc2, []).
-
-red_fold(_Key, _Red, #mracc{skip=N}=Acc) when N > 0 ->
-    {ok, Acc#mracc{skip=N-1, last_go=ok}};
-red_fold(Key, Red, #mracc{meta_sent=false}=Acc) ->
-    #mracc{
-        args=Args,
-        callback=Callback,
-        user_acc=UAcc0,
-        update_seq=UpdateSeq
-    } = Acc,
-    Meta = make_meta(Args, UpdateSeq, []),
-    {Go, UAcc1} = Callback(Meta, UAcc0),
-    Acc1 = Acc#mracc{user_acc=UAcc1, meta_sent=true, last_go=Go},
-    case Go of
-        ok -> red_fold(Key, Red, Acc1);
-        _ -> {Go, Acc1}
-    end;
-red_fold(_Key, _Red, #mracc{limit=0} = Acc) ->
-    {stop, Acc};
-red_fold(_Key, Red, #mracc{group_level=0} = Acc) ->
-    #mracc{
-        limit=Limit,
-        callback=Callback,
-        user_acc=UAcc0
-    } = Acc,
-    Row = [{key, null}, {value, Red}],
-    {Go, UAcc1} = Callback({row, Row}, UAcc0),
-    {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
-red_fold(Key, Red, #mracc{group_level=exact} = Acc) ->
-    #mracc{
-        limit=Limit,
-        callback=Callback,
-        user_acc=UAcc0
-    } = Acc,
-    Row = [{key, Key}, {value, Red}],
-    {Go, UAcc1} = Callback({row, Row}, UAcc0),
-    {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
-red_fold(K, Red, #mracc{group_level=I} = Acc) when I > 0, is_list(K) ->
-    #mracc{
-        limit=Limit,
-        callback=Callback,
-        user_acc=UAcc0
-    } = Acc,
-    Row = [{key, lists:sublist(K, I)}, {value, Red}],
-    {Go, UAcc1} = Callback({row, Row}, UAcc0),
-    {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
-red_fold(K, Red, #mracc{group_level=I} = Acc) when I > 0 ->
-    #mracc{
-        limit=Limit,
-        callback=Callback,
-        user_acc=UAcc0
-    } = Acc,
-    Row = [{key, K}, {value, Red}],
-    {Go, UAcc1} = Callback({row, Row}, UAcc0),
-    {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}}.
-
-
-finish_fold(#mracc{last_go=ok, update_seq=UpdateSeq}=Acc,  ExtraMeta) ->
-    #mracc{callback=Callback, user_acc=UAcc, args=Args}=Acc,
-    % Possible send meta info
-    Meta = make_meta(Args, UpdateSeq, ExtraMeta),
-    {Go, UAcc1} = case Acc#mracc.meta_sent of
-        false -> Callback(Meta, UAcc);
-        _ -> {ok, Acc#mracc.user_acc}
-    end,
-    % Notify callback that the fold is complete.
-    {_, UAcc2} = case Go of
-        ok -> Callback(complete, UAcc1);
-        _ -> {ok, UAcc1}
-    end,
-    {ok, UAcc2};
-finish_fold(#mracc{user_acc=UAcc}, _ExtraMeta) ->
-    {ok, UAcc}.
-
-
-make_meta(Args, UpdateSeq, Base) ->
-    case Args#mrargs.update_seq of
-        true -> {meta, Base ++ [{update_seq, UpdateSeq}]};
-        _ -> {meta, Base}
-    end.
-
-
-group_rows_fun(exact) ->
-    fun({Key1,_}, {Key2,_}) -> Key1 == Key2 end;
-group_rows_fun(0) ->
-    fun(_A, _B) -> true end;
-group_rows_fun(GroupLevel) when is_integer(GroupLevel) ->
-    fun({[_|_] = Key1,_}, {[_|_] = Key2,_}) ->
-        lists:sublist(Key1, GroupLevel) == lists:sublist(Key2, GroupLevel);
-    ({Key1,_}, {Key2,_}) ->
-        Key1 == Key2
-    end.
-
-
-default_cb(complete, Acc) ->
-    {ok, lists:reverse(Acc)};
-default_cb({final, Info}, []) ->
-    {ok, [Info]};
-default_cb({final, _}, Acc) ->
-    {ok, Acc};
-default_cb(Row, Acc) ->
-    {ok, [Row | Acc]}.
-
-
-to_mrargs(KeyList) ->
-    lists:foldl(fun({Key, Value}, Acc) ->
-        Index = lookup_index(couch_util:to_existing_atom(Key)),
-        setelement(Index, Acc, Value)
-    end, #mrargs{}, KeyList).
-
-
-lookup_index(Key) ->
-    Index = lists:zip(
-        record_info(fields, mrargs), lists:seq(2, record_info(size, mrargs))
-    ),
-    couch_util:get_value(Key, Index).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/src/couch_mrview_cleanup.erl
----------------------------------------------------------------------
diff --git a/src/couch_mrview/src/couch_mrview_cleanup.erl b/src/couch_mrview/src/couch_mrview_cleanup.erl
deleted file mode 100644
index d6b69b4..0000000
--- a/src/couch_mrview/src/couch_mrview_cleanup.erl
+++ /dev/null
@@ -1,47 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_cleanup).
-
--export([run/1]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-
-run(Db) ->
-    RootDir = couch_index_util:root_dir(),
-    DbName = couch_db:name(Db),
-
-    {ok, DesignDocs} = couch_db:get_design_docs(Db),
-    SigFiles = lists:foldl(fun(DDocInfo, SFAcc) ->
-        {ok, DDoc} = couch_db:open_doc_int(Db, DDocInfo, [ejson_body]),
-        {ok, InitState} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
-        Sig = InitState#mrst.sig,
-        IFName = couch_mrview_util:index_file(DbName, Sig),
-        CFName = couch_mrview_util:compaction_file(DbName, Sig),
-        [IFName, CFName | SFAcc]
-    end, [], [DD || DD <- DesignDocs, DD#full_doc_info.deleted == false]),
-
-    IdxDir = couch_index_util:index_dir(mrview, DbName),
-    DiskFiles = filelib:wildcard(filename:join(IdxDir, "*")),
-
-    % We need to delete files that have no ddoc.
-    ToDelete = DiskFiles -- SigFiles,
-
-    lists:foreach(fun(FN) ->
-        ?LOG_DEBUG("Deleting stale view file: ~s", [FN]),
-        couch_file:delete(RootDir, FN, false)
-    end, ToDelete),
-
-    ok.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/src/couch_mrview_compactor.erl
----------------------------------------------------------------------
diff --git a/src/couch_mrview/src/couch_mrview_compactor.erl b/src/couch_mrview/src/couch_mrview_compactor.erl
deleted file mode 100644
index b45d92b..0000000
--- a/src/couch_mrview/src/couch_mrview_compactor.erl
+++ /dev/null
@@ -1,183 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_compactor).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--export([compact/3, swap_compacted/2]).
-
--record(acc, {
-   btree = nil,
-   last_id = nil,
-   kvs = [],
-   kvs_size = 0,
-   changes = 0,
-   total_changes
-}).
-
-
-compact(_Db, State, Opts) ->
-    case lists:member(recompact, Opts) of
-        false -> compact(State);
-        true -> recompact(State)
-    end.
-
-compact(State) ->
-    #mrst{
-        db_name=DbName,
-        idx_name=IdxName,
-        sig=Sig,
-        update_seq=Seq,
-        id_btree=IdBtree,
-        views=Views
-    } = State,
-
-    EmptyState = couch_util:with_db(DbName, fun(Db) ->
-        CompactFName = couch_mrview_util:compaction_file(DbName, Sig),
-        {ok, Fd} = couch_mrview_util:open_file(CompactFName),
-        couch_mrview_util:reset_index(Db, Fd, State)
-    end),
-
-    #mrst{
-        id_btree = EmptyIdBtree,
-        views = EmptyViews
-    } = EmptyState,
-
-    {ok, Count} = couch_btree:full_reduce(IdBtree),
-    TotalChanges = lists:foldl(
-        fun(View, Acc) ->
-            {ok, Kvs} = couch_mrview_util:get_row_count(View),
-            Acc + Kvs
-        end,
-        Count, Views),
-    couch_task_status:add_task([
-        {type, view_compaction},
-        {database, DbName},
-        {design_document, IdxName},
-        {progress, 0}
-    ]),
-
-    BufferSize0 = config:get(
-        "view_compaction", "keyvalue_buffer_size", "2097152"
-    ),
-    BufferSize = list_to_integer(BufferSize0),
-
-    FoldFun = fun({DocId, ViewIdKeys} = KV, Acc) ->
-        #acc{btree = Bt, kvs = Kvs, kvs_size = KvsSize} = Acc,
-        NewKvs = case Kvs of
-            [{DocId, OldViewIdKeys} | Rest] ->
-                ?LOG_ERROR("Dupes of ~s in ~s ~s", [DocId, DbName, IdxName]),
-                [{DocId, ViewIdKeys ++ OldViewIdKeys} | Rest];
-            _ ->
-                [KV | Kvs]
-        end,
-        KvsSize2 = KvsSize + ?term_size(KV),
-        case KvsSize2 >= BufferSize of
-            true ->
-                {ok, Bt2} = couch_btree:add(Bt, lists:reverse(NewKvs)),
-                Acc2 = update_task(Acc, length(NewKvs)),
-                {ok, Acc2#acc{
-                    btree = Bt2, kvs = [], kvs_size = 0, last_id = DocId}};
-            _ ->
-                {ok, Acc#acc{
-                    kvs = NewKvs, kvs_size = KvsSize2, last_id = DocId}}
-        end
-    end,
-
-    InitAcc = #acc{total_changes = TotalChanges, btree = EmptyIdBtree},
-    {ok, _, FinalAcc} = couch_btree:foldl(IdBtree, FoldFun, InitAcc),
-    #acc{btree = Bt3, kvs = Uncopied} = FinalAcc,
-    {ok, NewIdBtree} = couch_btree:add(Bt3, lists:reverse(Uncopied)),
-    FinalAcc2 = update_task(FinalAcc, length(Uncopied)),
-
-    {NewViews, _} = lists:mapfoldl(fun({View, EmptyView}, Acc) ->
-        compact_view(View, EmptyView, BufferSize, Acc)
-    end, FinalAcc2, lists:zip(Views, EmptyViews)),
-
-    unlink(EmptyState#mrst.fd),
-    {ok, EmptyState#mrst{
-        id_btree=NewIdBtree,
-        views=NewViews,
-        update_seq=Seq
-    }}.
-
-
-recompact(State) ->
-    link(State#mrst.fd),
-    {Pid, Ref} = erlang:spawn_monitor(fun() ->
-        couch_index_updater:update(couch_mrview_index, State)
-    end),
-    receive
-        {'DOWN', Ref, _, _, {updated, Pid, State2}} ->
-            unlink(State#mrst.fd),
-            {ok, State2}
-    end.
-
-
-%% @spec compact_view(View, EmptyView, Retry, Acc) -> {CompactView, NewAcc}
-compact_view(#mrview{id_num=VID}=View, EmptyView, BufferSize, Acc0) ->
-    Fun = fun(KV, #acc{btree = Bt, kvs = Kvs, kvs_size = KvsSize} = Acc) ->
-        KvsSize2 = KvsSize + ?term_size(KV),
-        if KvsSize2 >= BufferSize ->
-            {ok, Bt2} = couch_btree:add(Bt, lists:reverse([KV | Kvs])),
-            Acc2 = update_task(VID, Acc, 1 + length(Kvs)),
-            {ok, Acc2#acc{btree = Bt2, kvs = [], kvs_size = 0}};
-        true ->
-            {ok, Acc#acc{kvs = [KV | Kvs], kvs_size = KvsSize2}}
-        end
-    end,
-
-    InitAcc = Acc0#acc{kvs = [], kvs_size = 0, btree = EmptyView#mrview.btree},
-    {ok, _, FinalAcc} = couch_btree:foldl(View#mrview.btree, Fun, InitAcc),
-    #acc{btree = Bt3, kvs = Uncopied} = FinalAcc,
-    {ok, NewBt} = couch_btree:add(Bt3, lists:reverse(Uncopied)),
-    FinalAcc2 = update_task(VID, FinalAcc, length(Uncopied)),
-    {EmptyView#mrview{btree=NewBt}, FinalAcc2}.
-
-
-update_task(Acc, ChangesInc) ->
-    update_task(null, Acc, ChangesInc).
-
-
-update_task(VID, #acc{changes=Changes, total_changes=Total}=Acc, ChangesInc) ->
-    Phase = if is_integer(VID) -> view; true -> ids end,
-    Changes2 = Changes + ChangesInc,
-    couch_task_status:update([
-        {phase, Phase},
-        {view, VID},
-        {changes_done, Changes2},
-        {total_changes, Total},
-        {progress, (Changes2 * 100) div Total}
-    ]),
-    Acc#acc{changes = Changes2}.
-
-
-swap_compacted(OldState, NewState) ->
-    #mrst{
-        sig=Sig,
-        db_name=DbName
-    } = NewState,
-
-    link(NewState#mrst.fd),
-
-    RootDir = couch_index_util:root_dir(),
-    IndexFName = couch_mrview_util:index_file(DbName, Sig),
-    CompactFName = couch_mrview_util:compaction_file(DbName, Sig),
-    ok = couch_file:delete(RootDir, IndexFName),
-    ok = file:rename(CompactFName, IndexFName),
-
-    unlink(OldState#mrst.fd),
-    erlang:demonitor(OldState#mrst.fd_monitor, [flush]),
-    
-    {ok, NewState}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/src/couch_mrview_http.erl
----------------------------------------------------------------------
diff --git a/src/couch_mrview/src/couch_mrview_http.erl b/src/couch_mrview/src/couch_mrview_http.erl
deleted file mode 100644
index cf81b33..0000000
--- a/src/couch_mrview/src/couch_mrview_http.erl
+++ /dev/null
@@ -1,339 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_http).
-
--export([
-    handle_all_docs_req/2,
-    handle_view_req/3,
-    handle_temp_view_req/2,
-    handle_info_req/3,
-    handle_compact_req/3,
-    handle_cleanup_req/2,
-    parse_qs/2
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-
--record(vacc, {
-    db,
-    req,
-    resp,
-    prepend,
-    etag
-}).
-
-
-handle_all_docs_req(#httpd{method='GET'}=Req, Db) ->
-    all_docs_req(Req, Db, undefined);
-handle_all_docs_req(#httpd{method='POST'}=Req, Db) ->
-    Keys = get_view_keys(couch_httpd:json_body_obj(Req)),
-    all_docs_req(Req, Db, Keys);
-handle_all_docs_req(Req, _Db) ->
-    couch_httpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-
-handle_view_req(#httpd{method='GET'}=Req, Db, DDoc) ->
-    [_, _, _, _, ViewName] = Req#httpd.path_parts,
-    couch_stats_collector:increment({httpd, view_reads}),
-    design_doc_view(Req, Db, DDoc, ViewName, undefined);
-handle_view_req(#httpd{method='POST'}=Req, Db, DDoc) ->
-    [_, _, _, _, ViewName] = Req#httpd.path_parts,
-    Keys = get_view_keys(couch_httpd:json_body_obj(Req)),
-    couch_stats_collector:increment({httpd, view_reads}),
-    design_doc_view(Req, Db, DDoc, ViewName, Keys);
-handle_view_req(Req, _Db, _DDoc) ->
-    couch_httpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-
-handle_temp_view_req(#httpd{method='POST'}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    ok = couch_db:check_is_admin(Db),
-    {Body} = couch_httpd:json_body_obj(Req),
-    DDoc = couch_mrview_util:temp_view_to_ddoc({Body}),
-    Keys = get_view_keys({Body}),
-    couch_stats_collector:increment({httpd, temporary_view_reads}),
-    design_doc_view(Req, Db, DDoc, <<"temp">>, Keys);
-handle_temp_view_req(Req, _Db) ->
-    couch_httpd:send_method_not_allowed(Req, "POST").
-
-
-handle_info_req(#httpd{method='GET'}=Req, Db, DDoc) ->
-    [_, _, Name, _] = Req#httpd.path_parts,
-    {ok, Info} = couch_mrview:get_info(Db, DDoc),
-    couch_httpd:send_json(Req, 200, {[
-        {name, Name},
-        {view_index, {Info}}
-    ]});
-handle_info_req(Req, _Db, _DDoc) ->
-    couch_httpd:send_method_not_allowed(Req, "GET").
-
-
-handle_compact_req(#httpd{method='POST'}=Req, Db, DDoc) ->
-    ok = couch_db:check_is_admin(Db),
-    couch_httpd:validate_ctype(Req, "application/json"),
-    ok = couch_mrview:compact(Db, DDoc),
-    couch_httpd:send_json(Req, 202, {[{ok, true}]});
-handle_compact_req(Req, _Db, _DDoc) ->
-    couch_httpd:send_method_not_allowd(Req, "POST").
-
-
-handle_cleanup_req(#httpd{method='POST'}=Req, Db) ->
-    ok = couch_db:check_is_admin(Db),
-    couch_httpd:validate_ctype(Req, "application/json"),
-    ok = couch_mrview:cleanup(Db),
-    couch_httpd:send_json(Req, 202, {[{ok, true}]});
-handle_cleanup_req(Req, _Db) ->
-    couch_httpd:send_method_not_allowed(Req, "POST").
-
-
-all_docs_req(Req, Db, Keys) ->
-    case couch_db:is_system_db(Db) of
-    true ->
-        case (catch couch_db:check_is_admin(Db)) of
-        ok ->
-            do_all_docs_req(Req, Db, Keys);
-        _ ->
-            throw({forbidden, <<"Only admins can access _all_docs",
-                " of system databases.">>})
-        end;
-    false ->
-        do_all_docs_req(Req, Db, Keys)
-    end.
-
-
-do_all_docs_req(Req, Db, Keys) ->
-    Args0 = parse_qs(Req, Keys),
-    ETagFun = fun(Sig, Acc0) ->
-        ETag = couch_httpd:make_etag(Sig),
-        case couch_httpd:etag_match(Req, ETag) of
-            true -> throw({etag_match, ETag});
-            false -> {ok, Acc0#vacc{etag=ETag}}
-        end
-    end,
-    Args = Args0#mrargs{preflight_fun=ETagFun},
-    {ok, Resp} = couch_httpd:etag_maybe(Req, fun() ->
-        VAcc0 = #vacc{db=Db, req=Req},
-        couch_mrview:query_all_docs(Db, Args, fun view_cb/2, VAcc0)
-    end),
-    case is_record(Resp, vacc) of
-        true -> {ok, Resp#vacc.resp};
-        _ -> {ok, Resp}
-    end.
-
-
-design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
-    Args0 = parse_qs(Req, Keys),
-    ETagFun = fun(Sig, Acc0) ->
-        ETag = couch_httpd:make_etag(Sig),
-        case couch_httpd:etag_match(Req, ETag) of
-            true -> throw({etag_match, ETag});
-            false -> {ok, Acc0#vacc{etag=ETag}}
-        end
-    end,
-    Args = Args0#mrargs{preflight_fun=ETagFun},
-    {ok, Resp} = couch_httpd:etag_maybe(Req, fun() ->
-        VAcc0 = #vacc{db=Db, req=Req},
-        couch_mrview:query_view(Db, DDoc, ViewName, Args, fun view_cb/2, VAcc0)
-    end),
-    case is_record(Resp, vacc) of
-        true -> {ok, Resp#vacc.resp};
-        _ -> {ok, Resp}
-    end.
-
-
-view_cb({meta, Meta}, #vacc{resp=undefined}=Acc) ->
-    Headers = [{"ETag", Acc#vacc.etag}],
-    {ok, Resp} = couch_httpd:start_json_response(Acc#vacc.req, 200, Headers),
-    % Map function starting
-    Parts = case couch_util:get_value(total, Meta) of
-        undefined -> [];
-        Total -> [io_lib:format("\"total_rows\":~p", [Total])]
-    end ++ case couch_util:get_value(offset, Meta) of
-        undefined -> [];
-        Offset -> [io_lib:format("\"offset\":~p", [Offset])]
-    end ++ case couch_util:get_value(update_seq, Meta) of
-        undefined -> [];
-        UpdateSeq -> [io_lib:format("\"update_seq\":~p", [UpdateSeq])]
-    end ++ ["\"rows\":["],
-    Chunk = lists:flatten("{" ++ string:join(Parts, ",") ++ "\r\n"),
-    couch_httpd:send_chunk(Resp, Chunk),
-    {ok, Acc#vacc{resp=Resp, prepend=""}};
-view_cb({row, Row}, #vacc{resp=undefined}=Acc) ->
-    % Reduce function starting
-    Headers = [{"ETag", Acc#vacc.etag}],
-    {ok, Resp} = couch_httpd:start_json_response(Acc#vacc.req, 200, Headers),
-    couch_httpd:send_chunk(Resp, ["{\"rows\":[\r\n", row_to_json(Row)]),
-    {ok, #vacc{resp=Resp, prepend=",\r\n"}};
-view_cb({row, Row}, Acc) ->
-    % Adding another row
-    couch_httpd:send_chunk(Acc#vacc.resp, [Acc#vacc.prepend, row_to_json(Row)]),
-    {ok, Acc#vacc{prepend=",\r\n"}};
-view_cb(complete, #vacc{resp=undefined}=Acc) ->
-    % Nothing in view
-    {ok, Resp} = couch_httpd:send_json(Acc#vacc.req, 200, {[{rows, []}]}),
-    {ok, Acc#vacc{resp=Resp}};
-view_cb(complete, Acc) ->
-    % Finish view output
-    couch_httpd:send_chunk(Acc#vacc.resp, "\r\n]}"),
-    couch_httpd:end_json_response(Acc#vacc.resp),
-    {ok, Acc}.
-
-
-row_to_json(Row) ->
-    Id = couch_util:get_value(id, Row),
-    row_to_json(Id, Row).
-
-
-row_to_json(error, Row) ->
-    % Special case for _all_docs request with KEYS to
-    % match prior behavior.
-    Key = couch_util:get_value(key, Row),
-    Val = couch_util:get_value(value, Row),
-    Obj = {[{key, Key}, {error, Val}]},
-    ?JSON_ENCODE(Obj);
-row_to_json(Id0, Row) ->
-    Id = case Id0 of
-        undefined -> [];
-        Id0 -> [{id, Id0}]
-    end,
-    Key = couch_util:get_value(key, Row, null),
-    Val = couch_util:get_value(value, Row),
-    Doc = case couch_util:get_value(doc, Row) of
-        undefined -> [];
-        Doc0 -> [{doc, Doc0}]
-    end,
-    Obj = {Id ++ [{key, Key}, {value, Val}] ++ Doc},
-    ?JSON_ENCODE(Obj).
-
-
-get_view_keys({Props}) ->
-    case couch_util:get_value(<<"keys">>, Props) of
-        undefined ->
-            ?LOG_DEBUG("POST with no keys member.", []),
-            undefined;
-        Keys when is_list(Keys) ->
-            Keys;
-        _ ->
-            throw({bad_request, "`keys` member must be a array."})
-    end.
-
-
-parse_qs(Req, Keys) ->
-    Args = #mrargs{keys=Keys},
-    lists:foldl(fun({K, V}, Acc) ->
-        parse_qs(K, V, Acc)
-    end, Args, couch_httpd:qs(Req)).
-
-
-parse_qs(Key, Val, Args) ->
-    case Key of
-        "" ->
-            Args;
-        "reduce" ->
-            Args#mrargs{reduce=parse_boolean(Val)};
-        "key" ->
-            JsonKey = ?JSON_DECODE(Val),
-            Args#mrargs{start_key=JsonKey, end_key=JsonKey};
-        "keys" ->
-            Args#mrargs{keys=?JSON_DECODE(Val)};
-        "startkey" ->
-            Args#mrargs{start_key=?JSON_DECODE(Val)};
-        "start_key" ->
-            Args#mrargs{start_key=?JSON_DECODE(Val)};
-        "startkey_docid" ->
-            Args#mrargs{start_key_docid=list_to_binary(Val)};
-        "start_key_doc_id" ->
-            Args#mrargs{start_key_docid=list_to_binary(Val)};
-        "endkey" ->
-            Args#mrargs{end_key=?JSON_DECODE(Val)};
-        "end_key" ->
-            Args#mrargs{end_key=?JSON_DECODE(Val)};
-        "endkey_docid" ->
-            Args#mrargs{end_key_docid=list_to_binary(Val)};
-        "end_key_doc_id" ->
-            Args#mrargs{end_key_docid=list_to_binary(Val)};
-        "limit" ->
-            Args#mrargs{limit=parse_pos_int(Val)};
-        "count" ->
-            throw({query_parse_error, <<"QS param `count` is not `limit`">>});
-        "stale" when Val == "ok" ->
-            Args#mrargs{stale=ok};
-        "stale" when Val == "update_after" ->
-            Args#mrargs{stale=update_after};
-        "stale" ->
-            throw({query_parse_error, <<"Invalid value for `stale`.">>});
-        "descending" ->
-            case parse_boolean(Val) of
-                true -> Args#mrargs{direction=rev};
-                _ -> Args#mrargs{direction=fwd}
-            end;
-        "skip" ->
-            Args#mrargs{skip=parse_pos_int(Val)};
-        "group" ->
-            case parse_boolean(Val) of
-                true -> Args#mrargs{group_level=exact};
-                _ -> Args#mrargs{group_level=0}
-            end;
-        "group_level" ->
-            Args#mrargs{group_level=parse_pos_int(Val)};
-        "inclusive_end" ->
-            Args#mrargs{inclusive_end=parse_boolean(Val)};
-        "include_docs" ->
-            Args#mrargs{include_docs=parse_boolean(Val)};
-        "update_seq" ->
-            Args#mrargs{update_seq=parse_boolean(Val)};
-        "conflicts" ->
-            Args#mrargs{conflicts=parse_boolean(Val)};
-        "list" ->
-            Args#mrargs{list=list_to_binary(Val)};
-        "callback" ->
-            Args#mrargs{callback=list_to_binary(Val)};
-        _ ->
-            BKey = list_to_binary(Key),
-            BVal = list_to_binary(Val),
-            Args#mrargs{extra=[{BKey, BVal} | Args#mrargs.extra]}
-    end.
-
-
-parse_boolean(Val) ->
-    case string:to_lower(Val) of
-    "true" -> true;
-    "false" -> false;
-    _ ->
-        Msg = io_lib:format("Invalid boolean parameter: ~p", [Val]),
-        throw({query_parse_error, ?l2b(Msg)})
-    end.
-
-
-parse_int(Val) ->
-    case (catch list_to_integer(Val)) of
-    IntVal when is_integer(IntVal) ->
-        IntVal;
-    _ ->
-        Msg = io_lib:format("Invalid value for integer: ~p", [Val]),
-        throw({query_parse_error, ?l2b(Msg)})
-    end.
-
-
-parse_pos_int(Val) ->
-    case parse_int(Val) of
-    IntVal when IntVal >= 0 ->
-        IntVal;
-    _ ->
-        Fmt = "Invalid value for positive integer: ~p",
-        Msg = io_lib:format(Fmt, [Val]),
-        throw({query_parse_error, ?l2b(Msg)})
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/src/couch_mrview_index.erl
----------------------------------------------------------------------
diff --git a/src/couch_mrview/src/couch_mrview_index.erl b/src/couch_mrview/src/couch_mrview_index.erl
deleted file mode 100644
index ffcbf5d..0000000
--- a/src/couch_mrview/src/couch_mrview_index.erl
+++ /dev/null
@@ -1,140 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_index).
-
-
--export([get/2]).
--export([init/2, open/2, close/1, reset/1, delete/1]).
--export([start_update/3, purge/4, process_doc/3, finish_update/1, commit/1]).
--export([compact/3, swap_compacted/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-
-get(Property, State) ->
-    case Property of
-        db_name ->
-            State#mrst.db_name;
-        idx_name ->
-            State#mrst.idx_name;
-        signature ->
-            State#mrst.sig;
-        update_seq ->
-            State#mrst.update_seq;
-        purge_seq ->
-            State#mrst.purge_seq;
-        update_options ->
-            Opts = State#mrst.design_opts,
-            IncDesign = couch_util:get_value(<<"include_design">>, Opts, false),
-            LocalSeq = couch_util:get_value(<<"local_seq">>, Opts, false),
-            if IncDesign -> [include_design]; true -> [] end
-                ++ if LocalSeq -> [local_seq]; true -> [] end;
-        info ->
-            #mrst{
-                fd = Fd,
-                sig = Sig,
-                id_btree = Btree,
-                language = Lang,
-                update_seq = UpdateSeq,
-                purge_seq = PurgeSeq,
-                views = Views
-            } = State,
-            {ok, Size} = couch_file:bytes(Fd),
-            {ok, DataSize} = couch_mrview_util:calculate_data_size(Btree,Views),
-            {ok, [
-                {signature, list_to_binary(couch_index_util:hexsig(Sig))},
-                {language, Lang},
-                {disk_size, Size},
-                {data_size, DataSize},
-                {update_seq, UpdateSeq},
-                {purge_seq, PurgeSeq}
-            ]};
-        Other ->
-            throw({unknown_index_property, Other})
-    end.
-
-
-init(Db, DDoc) ->
-    couch_mrview_util:ddoc_to_mrst(couch_db:name(Db), DDoc).
-
-
-open(Db, State) ->
-    #mrst{
-        db_name=DbName,
-        sig=Sig
-    } = State,
-    IndexFName = couch_mrview_util:index_file(DbName, Sig),
-    case couch_mrview_util:open_file(IndexFName) of
-        {ok, Fd} ->
-            case (catch couch_file:read_header(Fd)) of
-                {ok, {Sig, Header}} ->
-                    % Matching view signatures.
-                    NewSt = couch_mrview_util:init_state(Db, Fd, State, Header),
-                    {ok, NewSt#mrst{fd_monitor=erlang:monitor(process, Fd)}};
-                _ ->
-                    NewSt = couch_mrview_util:reset_index(Db, Fd, State),
-                    {ok, NewSt#mrst{fd_monitor=erlang:monitor(process, Fd)}}
-            end;
-        {error, Reason} = Error ->
-            ?LOG_ERROR("Failed to open view file '~s': ~s",
-                       [IndexFName, file:format_error(Reason)]),
-            Error
-    end.
-
-
-close(State) ->
-    erlang:demonitor(State#mrst.fd_monitor, [flush]),
-    couch_file:close(State#mrst.fd).
-
-
-delete(#mrst{db_name=DbName, sig=Sig}=State) ->
-    couch_file:close(State#mrst.fd),
-    catch couch_mrview_util:delete_files(DbName, Sig).
-
-
-reset(State) ->
-    couch_util:with_db(State#mrst.db_name, fun(Db) ->
-        NewState = couch_mrview_util:reset_index(Db, State#mrst.fd, State),
-        {ok, NewState}
-    end).
-
-
-start_update(PartialDest, State, NumChanges) ->
-    couch_mrview_updater:start_update(PartialDest, State, NumChanges).
-
-
-purge(Db, PurgeSeq, PurgedIdRevs, State) ->
-    couch_mrview_updater:purge(Db, PurgeSeq, PurgedIdRevs, State).
-
-
-process_doc(Doc, Seq, State) ->
-    couch_mrview_updater:process_doc(Doc, Seq, State).
-
-
-finish_update(State) ->
-    couch_mrview_updater:finish_update(State).
-
-
-commit(State) ->
-    Header = {State#mrst.sig, couch_mrview_util:make_header(State)},
-    couch_file:write_header(State#mrst.fd, Header).
-
-
-compact(Db, State, Opts) ->
-    couch_mrview_compactor:compact(Db, State, Opts).
-
-
-swap_compacted(OldState, NewState) ->
-    couch_mrview_compactor:swap_compacted(OldState, NewState).
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/src/couch_mrview_show.erl
----------------------------------------------------------------------
diff --git a/src/couch_mrview/src/couch_mrview_show.erl b/src/couch_mrview/src/couch_mrview_show.erl
deleted file mode 100644
index 1be96d5..0000000
--- a/src/couch_mrview/src/couch_mrview_show.erl
+++ /dev/null
@@ -1,363 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_show).
-
--export([
-    handle_doc_show_req/3,
-    handle_doc_update_req/3,
-    handle_view_list_req/3
-]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--record(lacc, {
-    db,
-    req,
-    resp,
-    qserver,
-    lname,
-    etag,
-    code,
-    headers
-}).
-
-% /db/_design/foo/_show/bar/docid
-% show converts a json doc to a response of any content-type.
-% it looks up the doc an then passes it to the query server.
-% then it sends the response from the query server to the http client.
-
-maybe_open_doc(Db, DocId) ->
-    case catch couch_httpd_db:couch_doc_open(Db, DocId, nil, [conflicts]) of
-        #doc{} = Doc -> Doc;
-        {not_found, _} -> nil
-    end.
-
-handle_doc_show_req(#httpd{
-        path_parts=[_, _, _, _, ShowName, DocId]
-    }=Req, Db, DDoc) ->
-
-    % open the doc
-    Doc = maybe_open_doc(Db, DocId),
-
-    % we don't handle revs here b/c they are an internal api
-    % returns 404 if there is no doc with DocId
-    handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId);
-
-handle_doc_show_req(#httpd{
-        path_parts=[_, _, _, _, ShowName, DocId|Rest]
-    }=Req, Db, DDoc) ->
-
-    DocParts = [DocId|Rest],
-    DocId1 = ?l2b(string:join([?b2l(P)|| P <- DocParts], "/")),
-
-    % open the doc
-    Doc = maybe_open_doc(Db, DocId1),
-
-    % we don't handle revs here b/c they are an internal api
-    % pass 404 docs to the show function
-    handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId1);
-
-handle_doc_show_req(#httpd{
-        path_parts=[_, _, _, _, ShowName]
-    }=Req, Db, DDoc) ->
-    % with no docid the doc is nil
-    handle_doc_show(Req, Db, DDoc, ShowName, nil);
-
-handle_doc_show_req(Req, _Db, _DDoc) ->
-    couch_httpd:send_error(Req, 404, <<"show_error">>, <<"Invalid path.">>).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc) ->
-    handle_doc_show(Req, Db, DDoc, ShowName, Doc, null).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId) ->
-    % get responder for ddoc/showname
-    CurrentEtag = show_etag(Req, Doc, DDoc, []),
-    couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
-        JsonReq = couch_httpd_external:json_req_obj(Req, Db, DocId),
-        JsonDoc = couch_query_servers:json_doc(Doc),
-        [<<"resp">>, ExternalResp] =
-            couch_query_servers:ddoc_prompt(DDoc, [<<"shows">>, ShowName],
-                [JsonDoc, JsonReq]),
-        JsonResp = apply_etag(ExternalResp, CurrentEtag),
-        couch_httpd_external:send_external_response(Req, JsonResp)
-    end).
-
-
-show_etag(#httpd{user_ctx=UserCtx}=Req, Doc, DDoc, More) ->
-    Accept = couch_httpd:header_value(Req, "Accept"),
-    DocPart = case Doc of
-        nil -> nil;
-        Doc -> couch_httpd:doc_etag(Doc)
-    end,
-    couch_httpd:make_etag({couch_httpd:doc_etag(DDoc), DocPart, Accept,
-        {UserCtx#user_ctx.name, UserCtx#user_ctx.roles}, More}).
-
-% updates a doc based on a request
-% handle_doc_update_req(#httpd{method = 'GET'}=Req, _Db, _DDoc) ->
-%     % anything but GET
-%     send_method_not_allowed(Req, "POST,PUT,DELETE,ETC");
-
-% This call is creating a new doc using an _update function to
-% modify the provided request body.
-% /db/_design/foo/_update/bar
-handle_doc_update_req(#httpd{
-        path_parts=[_, _, _, _, UpdateName]
-    }=Req, Db, DDoc) ->
-    send_doc_update_response(Req, Db, DDoc, UpdateName, nil, null);
-
-% /db/_design/foo/_update/bar/docid
-handle_doc_update_req(#httpd{
-        path_parts=[_, _, _, _, UpdateName | DocIdParts]
-    }=Req, Db, DDoc) ->
-    DocId = ?l2b(string:join([?b2l(P) || P <- DocIdParts], "/")),
-    Doc = maybe_open_doc(Db, DocId),
-    send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId);
-
-
-handle_doc_update_req(Req, _Db, _DDoc) ->
-    couch_httpd:send_error(Req, 404, <<"update_error">>, <<"Invalid path.">>).
-
-send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
-    JsonReq = couch_httpd_external:json_req_obj(Req, Db, DocId),
-    JsonDoc = couch_query_servers:json_doc(Doc),
-    Cmd = [<<"updates">>, UpdateName],
-    UpdateResp = couch_query_servers:ddoc_prompt(DDoc, Cmd, [JsonDoc, JsonReq]),
-    JsonResp = case UpdateResp of
-        [<<"up">>, {NewJsonDoc}, {JsonResp0}] ->
-            case couch_httpd:header_value(
-                    Req, "X-Couch-Full-Commit", "false") of
-                "true" ->
-                    Options = [full_commit, {user_ctx, Req#httpd.user_ctx}];
-                _ ->
-                    Options = [{user_ctx, Req#httpd.user_ctx}]
-            end,
-            NewDoc = couch_doc:from_json_obj({NewJsonDoc}),
-            couch_doc:validate_docid(NewDoc#doc.id),
-            {ok, NewRev} = couch_db:update_doc(Db, NewDoc, Options),
-            NewRevStr = couch_doc:rev_to_str(NewRev),
-            {[
-                {<<"code">>, 201},
-                {<<"headers">>, {[
-                    {<<"X-Couch-Update-NewRev">>, NewRevStr},
-                    {<<"X-Couch-Id">>, NewDoc#doc.id}
-                ]}}
-                | JsonResp0]};
-        [<<"up">>, _Other, {JsonResp0}] ->
-            {[{<<"code">>, 200} | JsonResp0]}
-    end,
-    % todo set location field
-    couch_httpd_external:send_external_response(Req, JsonResp).
-
-
-handle_view_list_req(#httpd{method=Method}=Req, Db, DDoc)
-    when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
-    case Req#httpd.path_parts of
-        [_, _, _DName, _, LName, VName] ->
-            % Same design doc for view and list
-            handle_view_list(Req, Db, DDoc, LName, DDoc, VName, undefined);
-        [_, _, _, _, LName, DName, VName] ->
-            % Different design docs for view and list
-            VDocId = <<"_design/", DName/binary>>,
-            {ok, VDDoc} = couch_db:open_doc(Db, VDocId, [ejson_body]),
-            handle_view_list(Req, Db, DDoc, LName, VDDoc, VName, undefined);
-        _ ->
-            couch_httpd:send_error(Req, 404, <<"list_error">>, <<"Bad path.">>)
-    end;
-handle_view_list_req(#httpd{method='POST'}=Req, Db, DDoc) ->
-    {Props} = couch_httpd:json_body_obj(Req),
-    Keys = proplists:get_value(<<"keys">>, Props),
-    case Req#httpd.path_parts of
-        [_, _, _DName, _, LName, VName] ->
-            handle_view_list(Req, Db, DDoc, LName, DDoc, VName, Keys);
-        [_, _, _, _, LName, DName, VName] ->
-            % Different design docs for view and list
-            VDocId = <<"_design/", DName/binary>>,
-            {ok, VDDoc} = couch_db:open_doc(Db, VDocId, [ejson_body]),
-            handle_view_list(Req, Db, DDoc, LName, VDDoc, VName, Keys);
-        _ ->
-            couch_httpd:send_error(Req, 404, <<"list_error">>, <<"Bad path.">>)
-    end;
-handle_view_list_req(Req, _Db, _DDoc) ->
-    couch_httpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-
-handle_view_list(Req, Db, DDoc, LName, VDDoc, VName, Keys) ->
-    Args0 = couch_mrview_http:parse_qs(Req, Keys),
-    ETagFun = fun(BaseSig, Acc0) ->
-        UserCtx = Req#httpd.user_ctx,
-        Name = UserCtx#user_ctx.name,
-        Roles = UserCtx#user_ctx.roles,
-        Accept = couch_httpd:header_value(Req, "Accept"),
-        Parts = {couch_httpd:doc_etag(DDoc), Accept, {Name, Roles}},
-        ETag = couch_httpd:make_etag({BaseSig, Parts}),
-        case couch_httpd:etag_match(Req, ETag) of
-            true -> throw({etag_match, ETag});
-            false -> {ok, Acc0#lacc{etag=ETag}}
-        end
-    end,
-    Args = Args0#mrargs{preflight_fun=ETagFun},
-    couch_httpd:etag_maybe(Req, fun() ->
-        couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) ->
-            Acc = #lacc{db=Db, req=Req, qserver=QServer, lname=LName},
-            couch_mrview:query_view(Db, VDDoc, VName, Args, fun list_cb/2, Acc)
-        end)
-    end).
-
-
-list_cb({meta, Meta}, #lacc{code=undefined} = Acc) ->
-    MetaProps = case couch_util:get_value(total, Meta) of
-        undefined -> [];
-        Total -> [{total_rows, Total}]
-    end ++ case couch_util:get_value(offset, Meta) of
-        undefined -> [];
-        Offset -> [{offset, Offset}]
-    end ++ case couch_util:get_value(update_seq, Meta) of
-        undefined -> [];
-        UpdateSeq -> [{update_seq, UpdateSeq}]
-    end,
-    start_list_resp({MetaProps}, Acc);
-list_cb({row, Row}, #lacc{code=undefined} = Acc) ->
-    {ok, NewAcc} = start_list_resp({[]}, Acc),
-    send_list_row(Row, NewAcc);
-list_cb({row, Row}, Acc) ->
-    send_list_row(Row, Acc);
-list_cb(complete, Acc) ->
-    #lacc{qserver = {Proc, _}, resp = Resp0} = Acc,
-    if Resp0 =:= nil ->
-        {ok, #lacc{resp = Resp}} = start_list_resp({[]}, Acc);
-    true ->
-        Resp = Resp0
-    end,
-    case couch_query_servers:proc_prompt(Proc, [<<"list_end">>]) of
-        [<<"end">>, Data, Headers] ->
-            Acc2 = fixup_headers(Headers, Acc#lacc{resp=Resp}),
-            #lacc{resp = Resp2} = send_non_empty_chunk(Acc2, Data);
-        [<<"end">>, Data] ->
-            #lacc{resp = Resp2} = send_non_empty_chunk(Acc#lacc{resp=Resp}, Data)
-    end,
-    couch_httpd:last_chunk(Resp2),
-    {ok, Resp2}.
-
-start_list_resp(Head, Acc) ->
-    #lacc{db=Db, req=Req, qserver=QServer, lname=LName} = Acc,
-    JsonReq = couch_httpd_external:json_req_obj(Req, Db),
-
-    [<<"start">>,Chunk,JsonResp] = couch_query_servers:ddoc_proc_prompt(QServer,
-        [<<"lists">>, LName], [Head, JsonReq]),
-    Acc2 = send_non_empty_chunk(fixup_headers(JsonResp, Acc), Chunk),
-    {ok, Acc2}.
-
-fixup_headers(Headers, #lacc{etag=ETag} = Acc) ->
-    Headers2 = apply_etag(Headers, ETag),
-    #extern_resp_args{
-        code = Code,
-        ctype = CType,
-        headers = ExtHeaders
-    } = couch_httpd_external:parse_external_response(Headers2),
-    Headers3 = couch_httpd_external:default_or_content_type(CType, ExtHeaders),
-    Acc#lacc{code=Code, headers=Headers3}.
-
-send_list_row(Row, #lacc{qserver = {Proc, _}, resp = Resp} = Acc) ->
-    RowObj = case couch_util:get_value(id, Row) of
-        undefined -> [];
-        Id -> [{id, Id}]
-    end ++ case couch_util:get_value(key, Row) of
-        undefined -> [];
-        Key -> [{key, Key}]
-    end ++ case couch_util:get_value(value, Row) of
-        undefined -> [];
-        Val -> [{value, Val}]
-    end ++ case couch_util:get_value(doc, Row) of
-        undefined -> [];
-        Doc -> [{doc, Doc}]
-    end,
-    try couch_query_servers:proc_prompt(Proc, [<<"list_row">>, {RowObj}]) of
-    [<<"chunks">>, Chunk, Headers] ->
-        Acc2 = send_non_empty_chunk(fixup_headers(Headers, Acc), Chunk),
-        {ok, Acc2};
-    [<<"chunks">>, Chunk] ->
-        Acc2 = send_non_empty_chunk(Acc, Chunk),
-        {ok, Acc2};
-    [<<"end">>, Chunk, Headers] ->
-        Acc2 = send_non_empty_chunk(fixup_headers(Headers, Acc), Chunk),
-        #lacc{resp = Resp2} = Acc2,
-        couch_httpd:last_chunk(Resp2),
-        {stop, Acc2};
-    [<<"end">>, Chunk] ->
-        Acc2 = send_non_empty_chunk(Acc, Chunk),
-        #lacc{resp = Resp2} = Acc2,
-        couch_httpd:last_chunk(Resp2),
-        {stop, Acc2}
-    catch Error ->
-        case Resp of
-            undefined ->
-                {Code, _, _} = couch_httpd:error_info(Error),
-                #lacc{req=Req, headers=Headers} = Acc,
-                {ok, Resp2} = couch_httpd:start_chunked_response(Req, Code, Headers),
-                Acc2 = Acc#lacc{resp=Resp2, code=Code};
-            _ -> Resp2 = Resp, Acc2 = Acc
-        end,
-        couch_httpd:send_chunked_error(Resp2, Error),
-        {stop, Acc2}
-    end.
-
-send_non_empty_chunk(Acc, []) ->
-    Acc;
-send_non_empty_chunk(#lacc{resp=undefined} = Acc, Chunk) ->
-    #lacc{req=Req, code=Code, headers=Headers} = Acc,
-    {ok, Resp} = couch_httpd:start_chunked_response(Req, Code, Headers),
-    send_non_empty_chunk(Acc#lacc{resp = Resp}, Chunk);
-send_non_empty_chunk(#lacc{resp=Resp} = Acc, Chunk) ->
-    couch_httpd:send_chunk(Resp, Chunk),
-    Acc.
-
-
-apply_etag({ExternalResponse}, CurrentEtag) ->
-    % Here we embark on the delicate task of replacing or creating the
-    % headers on the JsonResponse object. We need to control the Etag and
-    % Vary headers. If the external function controls the Etag, we'd have to
-    % run it to check for a match, which sort of defeats the purpose.
-    case couch_util:get_value(<<"headers">>, ExternalResponse, nil) of
-    nil ->
-        % no JSON headers
-        % add our Etag and Vary headers to the response
-        {[{<<"headers">>, {[{<<"Etag">>, CurrentEtag}, {<<"Vary">>, <<"Accept">>}]}} | ExternalResponse]};
-    JsonHeaders ->
-        {[case Field of
-        {<<"headers">>, JsonHeaders} -> % add our headers
-            JsonHeadersEtagged = json_apply_field({<<"Etag">>, CurrentEtag}, JsonHeaders),
-            JsonHeadersVaried = json_apply_field({<<"Vary">>, <<"Accept">>}, JsonHeadersEtagged),
-            {<<"headers">>, JsonHeadersVaried};
-        _ -> % skip non-header fields
-            Field
-        end || Field <- ExternalResponse]}
-    end.
-
-
-% Maybe this is in the proplists API
-% todo move to couch_util
-json_apply_field(H, {L}) ->
-    json_apply_field(H, L, []).
-
-
-json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
-    % drop matching keys
-    json_apply_field({Key, NewValue}, Headers, Acc);
-json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
-    % something else is next, leave it alone.
-    json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
-json_apply_field({Key, NewValue}, [], Acc) ->
-    % end of list, add ours
-    {[{Key, NewValue}|Acc]}.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/src/couch_mrview_test_util.erl
----------------------------------------------------------------------
diff --git a/src/couch_mrview/src/couch_mrview_test_util.erl b/src/couch_mrview/src/couch_mrview_test_util.erl
deleted file mode 100644
index 4109fd6..0000000
--- a/src/couch_mrview/src/couch_mrview_test_util.erl
+++ /dev/null
@@ -1,91 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_test_util).
-
--compile(export_all).
-
--include_lib("couch/include/couch_db.hrl").
--define(ADMIN, #user_ctx{roles=[<<"_admin">>]}).
-
-
-init_db(Name, Type) ->
-    init_db(Name, Type, 10).
-
-
-init_db(Name, Type, Count) ->
-    {ok, Db} = new_db(Name, Type),
-    Docs = make_docs(Count),
-    save_docs(Db, Docs).
-
-
-new_db(Name, Type) ->
-    couch_server:delete(Name, [{user_ctx, ?ADMIN}]),
-    {ok, Db} = couch_db:create(Name, [{user_ctx, ?ADMIN}]),
-    save_docs(Db, [ddoc(Type)]).
-
-
-save_docs(Db, Docs) ->
-    {ok, _} = couch_db:update_docs(Db, Docs, []),
-    couch_db:reopen(Db).
-
-
-make_docs(Count) ->
-    make_docs(Count, []).
-
-make_docs(Count, Acc) when Count =< 0 ->
-    Acc;
-make_docs(Count, Acc) ->
-    make_docs(Count-1, [doc(Count) | Acc]).
-
-
-ddoc(map) ->
-    couch_doc:from_json_obj({[
-        {<<"_id">>, <<"_design/bar">>},
-        {<<"views">>, {[
-            {<<"baz">>, {[
-                {<<"map">>, <<"function(doc) {emit(doc.val, doc.val);}">>}
-            ]}},
-            {<<"bing">>, {[
-                {<<"map">>, <<"function(doc) {}">>}
-            ]}},
-            {<<"zing">>, {[
-                {<<"map">>, <<
-                    "function(doc) {\n"
-                    "  if(doc.foo !== undefined)\n"
-                    "    emit(doc.foo, 0);\n"
-                    "}"
-                >>}
-            ]}}
-        ]}}
-    ]});
-ddoc(red) ->
-    couch_doc:from_json_obj({[
-        {<<"_id">>, <<"_design/bar">>},
-        {<<"views">>, {[
-            {<<"baz">>, {[
-                {<<"map">>, <<
-                    "function(doc) {\n"
-                    "  emit([doc.val % 2, doc.val], doc.val);\n"
-                    "}\n"
-                >>},
-                {<<"reduce">>, <<"function(keys, vals) {return sum(vals);}">>}
-            ]}}
-        ]}}
-    ]}).
-
-
-doc(Id) ->
-    couch_doc:from_json_obj({[
-        {<<"_id">>, list_to_binary(integer_to_list(Id))},
-        {<<"val">>, Id}
-    ]}).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/2acbbd31/src/couch_mrview/src/couch_mrview_updater.erl
----------------------------------------------------------------------
diff --git a/src/couch_mrview/src/couch_mrview_updater.erl b/src/couch_mrview/src/couch_mrview_updater.erl
deleted file mode 100644
index 99aedd0..0000000
--- a/src/couch_mrview/src/couch_mrview_updater.erl
+++ /dev/null
@@ -1,322 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_mrview_updater).
-
--export([start_update/3, purge/4, process_doc/3, finish_update/1]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-
-start_update(Partial, State, NumChanges) ->
-    QueueOpts = [{max_size, 100000}, {max_items, 500}],
-    {ok, DocQueue} = couch_work_queue:new(QueueOpts),
-    {ok, WriteQueue} = couch_work_queue:new(QueueOpts),
-
-    InitState = State#mrst{
-        first_build=State#mrst.update_seq==0,
-        partial_resp_pid=Partial,
-        doc_acc=[],
-        doc_queue=DocQueue,
-        write_queue=WriteQueue
-    },
-
-    Self = self(),
-    MapFun = fun() ->
-        couch_task_status:add_task([
-            {type, indexer},
-            {database, State#mrst.db_name},
-            {design_document, State#mrst.idx_name},
-            {progress, 0},
-            {changes_done, 0},
-            {total_changes, NumChanges}
-        ]),
-        couch_task_status:set_update_frequency(500),
-        map_docs(Self, InitState)
-    end,
-    WriteFun = fun() -> write_results(Self, InitState) end,
-
-    spawn_link(MapFun),
-    spawn_link(WriteFun),
-
-    {ok, InitState}.
-
-
-purge(_Db, PurgeSeq, PurgedIdRevs, State) ->
-    #mrst{
-        id_btree=IdBtree,
-        views=Views
-    } = State,
-
-    Ids = [Id || {Id, _Revs} <- PurgedIdRevs],
-    {ok, Lookups, IdBtree2} = couch_btree:query_modify(IdBtree, Ids, [], Ids),
-
-    MakeDictFun = fun
-        ({ok, {DocId, ViewNumRowKeys}}, DictAcc) ->
-            FoldFun = fun({ViewNum, RowKey}, DictAcc2) ->
-                dict:append(ViewNum, {RowKey, DocId}, DictAcc2)
-            end,
-            lists:foldl(FoldFun, DictAcc, ViewNumRowKeys);
-        ({not_found, _}, DictAcc) ->
-            DictAcc
-    end,
-    KeysToRemove = lists:foldl(MakeDictFun, dict:new(), Lookups),
-
-    RemKeysFun = fun(#mrview{id_num=Num, btree=Btree}=View) ->
-        case dict:find(Num, KeysToRemove) of
-            {ok, RemKeys} ->
-                {ok, Btree2} = couch_btree:add_remove(Btree, [], RemKeys),
-                NewPurgeSeq = case Btree2 /= Btree of
-                    true -> PurgeSeq;
-                    _ -> View#mrview.purge_seq
-                end,
-                View#mrview{btree=Btree2, purge_seq=NewPurgeSeq};
-            error ->
-                View
-        end
-    end,
-
-    Views2 = lists:map(RemKeysFun, Views),
-    {ok, State#mrst{
-        id_btree=IdBtree2,
-        views=Views2,
-        purge_seq=PurgeSeq
-    }}.
-
-
-process_doc(Doc, Seq, #mrst{doc_acc=Acc}=State) when length(Acc) > 100 ->
-    couch_work_queue:queue(State#mrst.doc_queue, lists:reverse(Acc)),
-    process_doc(Doc, Seq, State#mrst{doc_acc=[]});
-process_doc(nil, Seq, #mrst{doc_acc=Acc}=State) ->
-    {ok, State#mrst{doc_acc=[{nil, Seq, nil} | Acc]}};
-process_doc(#doc{id=Id, deleted=true}, Seq, #mrst{doc_acc=Acc}=State) ->
-    {ok, State#mrst{doc_acc=[{Id, Seq, deleted} | Acc]}};
-process_doc(#doc{id=Id}=Doc, Seq, #mrst{doc_acc=Acc}=State) ->
-    {ok, State#mrst{doc_acc=[{Id, Seq, Doc} | Acc]}}.
-
-
-finish_update(#mrst{doc_acc=Acc}=State) ->
-    if Acc /= [] ->
-        couch_work_queue:queue(State#mrst.doc_queue, Acc);
-        true -> ok
-    end,
-    couch_work_queue:close(State#mrst.doc_queue),
-    receive
-        {new_state, NewState} ->
-            {ok, NewState#mrst{
-                first_build=undefined,
-                partial_resp_pid=undefined,
-                doc_acc=undefined,
-                doc_queue=undefined,
-                write_queue=undefined,
-                qserver=nil
-            }}
-    end.
-
-
-map_docs(Parent, State0) ->
-    case couch_work_queue:dequeue(State0#mrst.doc_queue) of
-        closed ->
-            couch_query_servers:stop_doc_map(State0#mrst.qserver),
-            couch_work_queue:close(State0#mrst.write_queue);
-        {ok, Dequeued} ->
-            State1 = case State0#mrst.qserver of
-                nil -> start_query_server(State0);
-                _ -> State0
-            end,
-            {ok, MapResults} = compute_map_results(State1, Dequeued),
-            couch_work_queue:queue(State1#mrst.write_queue, MapResults),
-            map_docs(Parent, State1)
-    end.
-
-
-compute_map_results(#mrst{qserver = Qs}, Dequeued) ->
-    % Run all the non deleted docs through the view engine and
-    % then pass the results on to the writer process.
-    DocFun = fun
-        ({nil, Seq, _}, {SeqAcc, AccDel, AccNotDel}) ->
-            {erlang:max(Seq, SeqAcc), AccDel, AccNotDel};
-        ({Id, Seq, deleted}, {SeqAcc, AccDel, AccNotDel}) ->
-            {erlang:max(Seq, SeqAcc), [{Id, []} | AccDel], AccNotDel};
-        ({_Id, Seq, Doc}, {SeqAcc, AccDel, AccNotDel}) ->
-            {erlang:max(Seq, SeqAcc), AccDel, [Doc | AccNotDel]}
-    end,
-    FoldFun = fun(Docs, Acc) ->
-        lists:foldl(DocFun, Acc, Docs)
-    end,
-    {MaxSeq, DeletedResults, Docs} =
-        lists:foldl(FoldFun, {0, [], []}, Dequeued),
-    {ok, MapResultList} = couch_query_servers:map_docs_raw(Qs, Docs),
-    NotDeletedResults = lists:zipwith(
-        fun(#doc{id = Id}, MapResults) -> {Id, MapResults} end,
-        Docs,
-        MapResultList),
-    AllMapResults = DeletedResults ++ NotDeletedResults,
-    update_task(length(AllMapResults)),
-    {ok, {MaxSeq, AllMapResults}}.
-
-
-write_results(Parent, State) ->
-    case accumulate_writes(State, State#mrst.write_queue, nil) of
-        stop ->
-            Parent ! {new_state, State};
-        {Go, {Seq, ViewKVs, DocIdKeys}} ->
-            NewState = write_kvs(State, Seq, ViewKVs, DocIdKeys),
-            if Go == stop ->
-                Parent ! {new_state, NewState};
-            true ->
-                send_partial(NewState#mrst.partial_resp_pid, NewState),
-                write_results(Parent, NewState)
-            end
-    end.
-
-
-start_query_server(State) ->
-    #mrst{
-        language=Language,
-        lib=Lib,
-        views=Views
-    } = State,
-    Defs = [View#mrview.def || View <- Views],
-    {ok, QServer} = couch_query_servers:start_doc_map(Language, Defs, Lib),
-    State#mrst{qserver=QServer}.
-
-
-accumulate_writes(State, W, Acc0) ->
-    {Seq, ViewKVs, DocIdKVs} = case Acc0 of
-        nil -> {0, [{V#mrview.id_num, []} || V <- State#mrst.views], []};
-        _ -> Acc0
-    end,
-    case couch_work_queue:dequeue(W) of
-        closed when Seq == 0 ->
-            stop;
-        closed ->
-            {stop, {Seq, ViewKVs, DocIdKVs}};
-        {ok, Info} ->
-            {_, _, NewIds} = Acc = merge_results(Info, Seq, ViewKVs, DocIdKVs),
-            case accumulate_more(length(NewIds)) of
-                true -> accumulate_writes(State, W, Acc);
-                false -> {ok, Acc}
-            end
-    end.
-
-
-accumulate_more(NumDocIds) ->
-    % check if we have enough items now
-    MinItems = config:get("view_updater", "min_writer_items", "100"),
-    MinSize = config:get("view_updater", "min_writer_size", "16777216"),
-    {memory, CurrMem} = process_info(self(), memory),
-    NumDocIds < list_to_integer(MinItems)
-        andalso CurrMem < list_to_integer(MinSize).
-
-
-merge_results([], SeqAcc, ViewKVs, DocIdKeys) ->
-    {SeqAcc, ViewKVs, DocIdKeys};
-merge_results([{Seq, Results} | Rest], SeqAcc, ViewKVs, DocIdKeys) ->
-    Fun = fun(RawResults, {VKV, DIK}) ->
-        merge_results(RawResults, VKV, DIK)
-    end,
-    {ViewKVs1, DocIdKeys1} = lists:foldl(Fun, {ViewKVs, DocIdKeys}, Results),
-    merge_results(Rest, erlang:max(Seq, SeqAcc), ViewKVs1, DocIdKeys1).
-
-
-merge_results({DocId, []}, ViewKVs, DocIdKeys) ->
-    {ViewKVs, [{DocId, []} | DocIdKeys]};
-merge_results({DocId, RawResults}, ViewKVs, DocIdKeys) ->
-    JsonResults = couch_query_servers:raw_to_ejson(RawResults),
-    Results = [[list_to_tuple(Res) || Res <- FunRs] || FunRs <- JsonResults],
-    {ViewKVs1, ViewIdKeys} = insert_results(DocId, Results, ViewKVs, [], []),
-    {ViewKVs1, [ViewIdKeys | DocIdKeys]}.
-
-
-insert_results(DocId, [], [], ViewKVs, ViewIdKeys) ->
-    {lists:reverse(ViewKVs), {DocId, ViewIdKeys}};
-insert_results(DocId, [KVs | RKVs], [{Id, VKVs} | RVKVs], VKVAcc, VIdKeys) ->
-    CombineDupesFun = fun
-        ({Key, Val}, {[{Key, {dups, Vals}} | Rest], IdKeys}) ->
-            {[{Key, {dups, [Val | Vals]}} | Rest], IdKeys};
-        ({Key, Val1}, {[{Key, Val2} | Rest], IdKeys}) ->
-            {[{Key, {dups, [Val1, Val2]}} | Rest], IdKeys};
-        ({Key, _}=KV, {Rest, IdKeys}) ->
-            {[KV | Rest], [{Id, Key} | IdKeys]}
-    end,
-    InitAcc = {[], VIdKeys},
-    {Duped, VIdKeys0} = lists:foldl(CombineDupesFun, InitAcc, lists:sort(KVs)),
-    FinalKVs = [{{Key, DocId}, Val} || {Key, Val} <- Duped] ++ VKVs,
-    insert_results(DocId, RKVs, RVKVs, [{Id, FinalKVs} | VKVAcc], VIdKeys0).
-
-
-write_kvs(State, UpdateSeq, ViewKVs, DocIdKeys) ->
-    #mrst{
-        id_btree=IdBtree,
-        first_build=FirstBuild
-    } = State,
-
-    {ok, ToRemove, IdBtree2} = update_id_btree(IdBtree, DocIdKeys, FirstBuild),
-    ToRemByView = collapse_rem_keys(ToRemove, dict:new()),
-
-    UpdateView = fun(#mrview{id_num=ViewId}=View, {ViewId, KVs}) ->
-        ToRem = couch_util:dict_find(ViewId, ToRemByView, []),
-        {ok, VBtree2} = couch_btree:add_remove(View#mrview.btree, KVs, ToRem),
-        NewUpdateSeq = case VBtree2 =/= View#mrview.btree of
-            true -> UpdateSeq;
-            _ -> View#mrview.update_seq
-        end,
-        View#mrview{btree=VBtree2, update_seq=NewUpdateSeq}
-    end,
-
-    State#mrst{
-        views=lists:zipwith(UpdateView, State#mrst.views, ViewKVs),
-        update_seq=UpdateSeq,
-        id_btree=IdBtree2
-    }.
-
-
-update_id_btree(Btree, DocIdKeys, true) ->
-    ToAdd = [{Id, DIKeys} || {Id, DIKeys} <- DocIdKeys, DIKeys /= []],
-    couch_btree:query_modify(Btree, [], ToAdd, []);
-update_id_btree(Btree, DocIdKeys, _) ->
-    ToFind = [Id || {Id, _} <- DocIdKeys],
-    ToAdd = [{Id, DIKeys} || {Id, DIKeys} <- DocIdKeys, DIKeys /= []],
-    ToRem = [Id || {Id, DIKeys} <- DocIdKeys, DIKeys == []],
-    couch_btree:query_modify(Btree, ToFind, ToAdd, ToRem).
-
-
-collapse_rem_keys([], Acc) ->
-    Acc;
-collapse_rem_keys([{ok, {DocId, ViewIdKeys}} | Rest], Acc) ->
-    NewAcc = lists:foldl(fun({ViewId, Key}, Acc2) ->
-        dict:append(ViewId, {Key, DocId}, Acc2)
-    end, Acc, ViewIdKeys),
-    collapse_rem_keys(Rest, NewAcc);
-collapse_rem_keys([{not_found, _} | Rest], Acc) ->
-    collapse_rem_keys(Rest, Acc).
-
-
-send_partial(Pid, State) when is_pid(Pid) ->
-    gen_server:cast(Pid, {new_state, State});
-send_partial(_, _) ->
-    ok.
-
-
-update_task(NumChanges) ->
-    [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
-    Changes2 = Changes + NumChanges,
-    Progress = case Total of
-        0 ->
-            % updater restart after compaction finishes
-            0;
-        _ ->
-            (Changes2 * 100) div Total
-    end,
-    couch_task_status:update([{progress, Progress}, {changes_done, Changes2}]).


[38/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/mochiweb


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/acf8eaff
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/acf8eaff
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/acf8eaff

Branch: refs/heads/1843-feature-bigcouch
Commit: acf8eaff91cfe1b1558570df199aa802041bf103
Parents: 4cac46a
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:41:55 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:41:55 2014 -0600

----------------------------------------------------------------------
 src/mochiweb/src/internal.hrl               |    3 -
 src/mochiweb/src/mochifmt.erl               |  425 --------
 src/mochiweb/src/mochifmt_records.erl       |   38 -
 src/mochiweb/src/mochifmt_std.erl           |   30 -
 src/mochiweb/src/mochiglobal.erl            |  107 --
 src/mochiweb/src/mochihex.erl               |   91 --
 src/mochiweb/src/mochijson.erl              |  531 ---------
 src/mochiweb/src/mochijson2.erl             |  849 ---------------
 src/mochiweb/src/mochilists.erl             |  104 --
 src/mochiweb/src/mochilogfile2.erl          |  140 ---
 src/mochiweb/src/mochinum.erl               |  354 ------
 src/mochiweb/src/mochitemp.erl              |  310 ------
 src/mochiweb/src/mochiutf8.erl              |  316 ------
 src/mochiweb/src/mochiweb.app.src           |    8 -
 src/mochiweb/src/mochiweb.erl               |  289 -----
 src/mochiweb/src/mochiweb_acceptor.erl      |   49 -
 src/mochiweb/src/mochiweb_app.erl           |   27 -
 src/mochiweb/src/mochiweb_charref.erl       |  308 ------
 src/mochiweb/src/mochiweb_cookies.erl       |  309 ------
 src/mochiweb/src/mochiweb_cover.erl         |   75 --
 src/mochiweb/src/mochiweb_echo.erl          |   38 -
 src/mochiweb/src/mochiweb_headers.erl       |  299 -----
 src/mochiweb/src/mochiweb_html.erl          | 1264 ----------------------
 src/mochiweb/src/mochiweb_http.erl          |  290 -----
 src/mochiweb/src/mochiweb_io.erl            |   46 -
 src/mochiweb/src/mochiweb_mime.erl          |   94 --
 src/mochiweb/src/mochiweb_multipart.erl     |  824 --------------
 src/mochiweb/src/mochiweb_request.erl       |  788 --------------
 src/mochiweb/src/mochiweb_request_tests.erl |   63 --
 src/mochiweb/src/mochiweb_response.erl      |   64 --
 src/mochiweb/src/mochiweb_skel.erl          |   86 --
 src/mochiweb/src/mochiweb_socket.erl        |   83 --
 src/mochiweb/src/mochiweb_socket_server.erl |  364 -------
 src/mochiweb/src/mochiweb_sup.erl           |   41 -
 src/mochiweb/src/mochiweb_util.erl          |  980 -----------------
 src/mochiweb/src/reloader.erl               |  161 ---
 36 files changed, 9848 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/internal.hrl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/internal.hrl b/src/mochiweb/src/internal.hrl
deleted file mode 100644
index 6db899a..0000000
--- a/src/mochiweb/src/internal.hrl
+++ /dev/null
@@ -1,3 +0,0 @@
-
--define(RECBUF_SIZE, 8192).
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochifmt.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochifmt.erl b/src/mochiweb/src/mochifmt.erl
deleted file mode 100644
index 5bc6b9c..0000000
--- a/src/mochiweb/src/mochifmt.erl
+++ /dev/null
@@ -1,425 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2008 Mochi Media, Inc.
-
-%% @doc String Formatting for Erlang, inspired by Python 2.6
-%%      (<a href="http://www.python.org/dev/peps/pep-3101/">PEP 3101</a>).
-%%
--module(mochifmt).
--author('bob@mochimedia.com').
--export([format/2, format_field/2, convert_field/2, get_value/2, get_field/2]).
--export([tokenize/1, format/3, get_field/3, format_field/3]).
--export([bformat/2, bformat/3]).
--export([f/2, f/3]).
-
--record(conversion, {length, precision, ctype, align, fill_char, sign}).
-
-%% @spec tokenize(S::string()) -> tokens()
-%% @doc Tokenize a format string into mochifmt's internal format.
-tokenize(S) ->
-    {?MODULE, tokenize(S, "", [])}.
-
-%% @spec convert_field(Arg, Conversion::conversion()) -> term()
-%% @doc Process Arg according to the given explicit conversion specifier.
-convert_field(Arg, "") ->
-    Arg;
-convert_field(Arg, "r") ->
-    repr(Arg);
-convert_field(Arg, "s") ->
-    str(Arg).
-
-%% @spec get_value(Key::string(), Args::args()) -> term()
-%% @doc Get the Key from Args. If Args is a tuple then convert Key to
-%%      an integer and get element(1 + Key, Args). If Args is a list and Key
-%%      can be parsed as an integer then use lists:nth(1 + Key, Args),
-%%      otherwise try and look for Key in Args as a proplist, converting
-%%      Key to an atom or binary if necessary.
-get_value(Key, Args) when is_tuple(Args) ->
-    element(1 + list_to_integer(Key), Args);
-get_value(Key, Args) when is_list(Args) ->
-    try lists:nth(1 + list_to_integer(Key), Args)
-    catch error:_ ->
-            {_K, V} = proplist_lookup(Key, Args),
-            V
-    end.
-
-%% @spec get_field(Key::string(), Args) -> term()
-%% @doc Consecutively call get_value/2 on parts of Key delimited by ".",
-%%      replacing Args with the result of the previous get_value. This
-%%      is used to implement formats such as {0.0}.
-get_field(Key, Args) ->
-    get_field(Key, Args, ?MODULE).
-
-%% @spec get_field(Key::string(), Args, Module) -> term()
-%% @doc Consecutively call Module:get_value/2 on parts of Key delimited by ".",
-%%      replacing Args with the result of the previous get_value. This
-%%      is used to implement formats such as {0.0}.
-get_field(Key, Args, Module) ->
-    {Name, Next} = lists:splitwith(fun (C) -> C =/= $. end, Key),
-    Res = try Module:get_value(Name, Args)
-          catch error:undef -> get_value(Name, Args) end,
-    case Next of
-        "" ->
-            Res;
-        "." ++ S1 ->
-            get_field(S1, Res, Module)
-    end.
-
-%% @spec format(Format::string(), Args) -> iolist()
-%% @doc Format Args with Format.
-format(Format, Args) ->
-    format(Format, Args, ?MODULE).
-
-%% @spec format(Format::string(), Args, Module) -> iolist()
-%% @doc Format Args with Format using Module.
-format({?MODULE, Parts}, Args, Module) ->
-    format2(Parts, Args, Module, []);
-format(S, Args, Module) ->
-    format(tokenize(S), Args, Module).
-
-%% @spec format_field(Arg, Format) -> iolist()
-%% @doc Format Arg with Format.
-format_field(Arg, Format) ->
-    format_field(Arg, Format, ?MODULE).
-
-%% @spec format_field(Arg, Format, _Module) -> iolist()
-%% @doc Format Arg with Format.
-format_field(Arg, Format, _Module) ->
-    F = default_ctype(Arg, parse_std_conversion(Format)),
-    fix_padding(fix_sign(convert2(Arg, F), F), F).
-
-%% @spec f(Format::string(), Args) -> string()
-%% @doc Format Args with Format and return a string().
-f(Format, Args) ->
-    f(Format, Args, ?MODULE).
-
-%% @spec f(Format::string(), Args, Module) -> string()
-%% @doc Format Args with Format using Module and return a string().
-f(Format, Args, Module) ->
-    case lists:member(${, Format) of
-        true ->
-            binary_to_list(bformat(Format, Args, Module));
-        false ->
-            Format
-    end.
-
-%% @spec bformat(Format::string(), Args) -> binary()
-%% @doc Format Args with Format and return a binary().
-bformat(Format, Args) ->
-    iolist_to_binary(format(Format, Args)).
-
-%% @spec bformat(Format::string(), Args, Module) -> binary()
-%% @doc Format Args with Format using Module and return a binary().
-bformat(Format, Args, Module) ->
-    iolist_to_binary(format(Format, Args, Module)).
-
-%% Internal API
-
-add_raw("", Acc) ->
-    Acc;
-add_raw(S, Acc) ->
-    [{raw, lists:reverse(S)} | Acc].
-
-tokenize([], S, Acc) ->
-    lists:reverse(add_raw(S, Acc));
-tokenize("{{" ++ Rest, S, Acc) ->
-    tokenize(Rest, "{" ++ S, Acc);
-tokenize("{" ++ Rest, S, Acc) ->
-    {Format, Rest1} = tokenize_format(Rest),
-    tokenize(Rest1, "", [{format, make_format(Format)} | add_raw(S, Acc)]);
-tokenize("}}" ++ Rest, S, Acc) ->
-    tokenize(Rest, "}" ++ S, Acc);
-tokenize([C | Rest], S, Acc) ->
-    tokenize(Rest, [C | S], Acc).
-
-tokenize_format(S) ->
-    tokenize_format(S, 1, []).
-
-tokenize_format("}" ++ Rest, 1, Acc) ->
-    {lists:reverse(Acc), Rest};
-tokenize_format("}" ++ Rest, N, Acc) ->
-    tokenize_format(Rest, N - 1, "}" ++ Acc);
-tokenize_format("{" ++ Rest, N, Acc) ->
-    tokenize_format(Rest, 1 + N, "{" ++ Acc);
-tokenize_format([C | Rest], N, Acc) ->
-    tokenize_format(Rest, N, [C | Acc]).
-
-make_format(S) ->
-    {Name0, Spec} = case lists:splitwith(fun (C) -> C =/= $: end, S) of
-                        {_, ""} ->
-                            {S, ""};
-                        {SN, ":" ++ SS} ->
-                            {SN, SS}
-                    end,
-    {Name, Transform} = case lists:splitwith(fun (C) -> C =/= $! end, Name0) of
-                            {_, ""} ->
-                                {Name0, ""};
-                            {TN, "!" ++ TT} ->
-                                {TN, TT}
-                        end,
-    {Name, Transform, Spec}.
-
-proplist_lookup(S, P) ->
-    A = try list_to_existing_atom(S)
-        catch error:_ -> make_ref() end,
-    B = try list_to_binary(S)
-        catch error:_ -> make_ref() end,
-    proplist_lookup2({S, A, B}, P).
-
-proplist_lookup2({KS, KA, KB}, [{K, V} | _])
-  when KS =:= K orelse KA =:= K orelse KB =:= K ->
-    {K, V};
-proplist_lookup2(Keys, [_ | Rest]) ->
-    proplist_lookup2(Keys, Rest).
-
-format2([], _Args, _Module, Acc) ->
-    lists:reverse(Acc);
-format2([{raw, S} | Rest], Args, Module, Acc) ->
-    format2(Rest, Args, Module, [S | Acc]);
-format2([{format, {Key, Convert, Format0}} | Rest], Args, Module, Acc) ->
-    Format = f(Format0, Args, Module),
-    V = case Module of
-            ?MODULE ->
-                V0 = get_field(Key, Args),
-                V1 = convert_field(V0, Convert),
-                format_field(V1, Format);
-            _ ->
-                V0 = try Module:get_field(Key, Args)
-                     catch error:undef -> get_field(Key, Args, Module) end,
-                V1 = try Module:convert_field(V0, Convert)
-                     catch error:undef -> convert_field(V0, Convert) end,
-                try Module:format_field(V1, Format)
-                catch error:undef -> format_field(V1, Format, Module) end
-        end,
-    format2(Rest, Args, Module, [V | Acc]).
-
-default_ctype(_Arg, C=#conversion{ctype=N}) when N =/= undefined ->
-    C;
-default_ctype(Arg, C) when is_integer(Arg) ->
-    C#conversion{ctype=decimal};
-default_ctype(Arg, C) when is_float(Arg) ->
-    C#conversion{ctype=general};
-default_ctype(_Arg, C) ->
-    C#conversion{ctype=string}.
-
-fix_padding(Arg, #conversion{length=undefined}) ->
-    Arg;
-fix_padding(Arg, F=#conversion{length=Length, fill_char=Fill0, align=Align0,
-                               ctype=Type}) ->
-    Padding = Length - iolist_size(Arg),
-    Fill = case Fill0 of
-               undefined ->
-                   $\s;
-               _ ->
-                   Fill0
-           end,
-    Align = case Align0 of
-                undefined ->
-                    case Type of
-                        string ->
-                            left;
-                        _ ->
-                            right
-                    end;
-                _ ->
-                    Align0
-            end,
-    case Padding > 0 of
-        true ->
-            do_padding(Arg, Padding, Fill, Align, F);
-        false ->
-            Arg
-    end.
-
-do_padding(Arg, Padding, Fill, right, _F) ->
-    [lists:duplicate(Padding, Fill), Arg];
-do_padding(Arg, Padding, Fill, center, _F) ->
-    LPadding = lists:duplicate(Padding div 2, Fill),
-    RPadding = case Padding band 1 of
-                   1 ->
-                       [Fill | LPadding];
-                   _ ->
-                       LPadding
-               end,
-    [LPadding, Arg, RPadding];
-do_padding([$- | Arg], Padding, Fill, sign_right, _F) ->
-    [[$- | lists:duplicate(Padding, Fill)], Arg];
-do_padding(Arg, Padding, Fill, sign_right, #conversion{sign=$-}) ->
-    [lists:duplicate(Padding, Fill), Arg];
-do_padding([S | Arg], Padding, Fill, sign_right, #conversion{sign=S}) ->
-    [[S | lists:duplicate(Padding, Fill)], Arg];
-do_padding(Arg, Padding, Fill, sign_right, #conversion{sign=undefined}) ->
-    [lists:duplicate(Padding, Fill), Arg];
-do_padding(Arg, Padding, Fill, left, _F) ->
-    [Arg | lists:duplicate(Padding, Fill)].
-
-fix_sign(Arg, #conversion{sign=$+}) when Arg >= 0 ->
-    [$+, Arg];
-fix_sign(Arg, #conversion{sign=$\s}) when Arg >= 0 ->
-    [$\s, Arg];
-fix_sign(Arg, _F) ->
-    Arg.
-
-ctype($\%) -> percent;
-ctype($s) -> string;
-ctype($b) -> bin;
-ctype($o) -> oct;
-ctype($X) -> upper_hex;
-ctype($x) -> hex;
-ctype($c) -> char;
-ctype($d) -> decimal;
-ctype($g) -> general;
-ctype($f) -> fixed;
-ctype($e) -> exp.
-
-align($<) -> left;
-align($>) -> right;
-align($^) -> center;
-align($=) -> sign_right.
-
-convert2(Arg, F=#conversion{ctype=percent}) ->
-    [convert2(100.0 * Arg, F#conversion{ctype=fixed}), $\%];
-convert2(Arg, #conversion{ctype=string}) ->
-    str(Arg);
-convert2(Arg, #conversion{ctype=bin}) ->
-    erlang:integer_to_list(Arg, 2);
-convert2(Arg, #conversion{ctype=oct}) ->
-    erlang:integer_to_list(Arg, 8);
-convert2(Arg, #conversion{ctype=upper_hex}) ->
-    erlang:integer_to_list(Arg, 16);
-convert2(Arg, #conversion{ctype=hex}) ->
-    string:to_lower(erlang:integer_to_list(Arg, 16));
-convert2(Arg, #conversion{ctype=char}) when Arg < 16#80 ->
-    [Arg];
-convert2(Arg, #conversion{ctype=char}) ->
-    xmerl_ucs:to_utf8(Arg);
-convert2(Arg, #conversion{ctype=decimal}) ->
-    integer_to_list(Arg);
-convert2(Arg, #conversion{ctype=general, precision=undefined}) ->
-    try mochinum:digits(Arg)
-    catch error:undef -> io_lib:format("~g", [Arg]) end;
-convert2(Arg, #conversion{ctype=fixed, precision=undefined}) ->
-    io_lib:format("~f", [Arg]);
-convert2(Arg, #conversion{ctype=exp, precision=undefined}) ->
-    io_lib:format("~e", [Arg]);
-convert2(Arg, #conversion{ctype=general, precision=P}) ->
-    io_lib:format("~." ++ integer_to_list(P) ++ "g", [Arg]);
-convert2(Arg, #conversion{ctype=fixed, precision=P}) ->
-    io_lib:format("~." ++ integer_to_list(P) ++ "f", [Arg]);
-convert2(Arg, #conversion{ctype=exp, precision=P}) ->
-    io_lib:format("~." ++ integer_to_list(P) ++ "e", [Arg]).
-
-str(A) when is_atom(A) ->
-    atom_to_list(A);
-str(I) when is_integer(I) ->
-    integer_to_list(I);
-str(F) when is_float(F) ->
-    try mochinum:digits(F)
-    catch error:undef -> io_lib:format("~g", [F]) end;
-str(L) when is_list(L) ->
-    L;
-str(B) when is_binary(B) ->
-    B;
-str(P) ->
-    repr(P).
-
-repr(P) when is_float(P) ->
-    try mochinum:digits(P)
-    catch error:undef -> float_to_list(P) end;
-repr(P) ->
-    io_lib:format("~p", [P]).
-
-parse_std_conversion(S) ->
-    parse_std_conversion(S, #conversion{}).
-
-parse_std_conversion("", Acc) ->
-    Acc;
-parse_std_conversion([Fill, Align | Spec], Acc)
-  when Align =:= $< orelse Align =:= $> orelse Align =:= $= orelse Align =:= $^ ->
-    parse_std_conversion(Spec, Acc#conversion{fill_char=Fill,
-                                              align=align(Align)});
-parse_std_conversion([Align | Spec], Acc)
-  when Align =:= $< orelse Align =:= $> orelse Align =:= $= orelse Align =:= $^ ->
-    parse_std_conversion(Spec, Acc#conversion{align=align(Align)});
-parse_std_conversion([Sign | Spec], Acc)
-  when Sign =:= $+ orelse Sign =:= $- orelse Sign =:= $\s ->
-    parse_std_conversion(Spec, Acc#conversion{sign=Sign});
-parse_std_conversion("0" ++ Spec, Acc) ->
-    Align = case Acc#conversion.align of
-                undefined ->
-                    sign_right;
-                A ->
-                    A
-            end,
-    parse_std_conversion(Spec, Acc#conversion{fill_char=$0, align=Align});
-parse_std_conversion(Spec=[D|_], Acc) when D >= $0 andalso D =< $9 ->
-    {W, Spec1} = lists:splitwith(fun (C) -> C >= $0 andalso C =< $9 end, Spec),
-    parse_std_conversion(Spec1, Acc#conversion{length=list_to_integer(W)});
-parse_std_conversion([$. | Spec], Acc) ->
-    case lists:splitwith(fun (C) -> C >= $0 andalso C =< $9 end, Spec) of
-        {"", Spec1} ->
-            parse_std_conversion(Spec1, Acc);
-        {P, Spec1} ->
-            parse_std_conversion(Spec1,
-                                 Acc#conversion{precision=list_to_integer(P)})
-    end;
-parse_std_conversion([Type], Acc) ->
-    parse_std_conversion("", Acc#conversion{ctype=ctype(Type)}).
-
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-tokenize_test() ->
-    {?MODULE, [{raw, "ABC"}]} = tokenize("ABC"),
-    {?MODULE, [{format, {"0", "", ""}}]} = tokenize("{0}"),
-    {?MODULE, [{raw, "ABC"}, {format, {"1", "", ""}}, {raw, "DEF"}]} =
-        tokenize("ABC{1}DEF"),
-    ok.
-
-format_test() ->
-    <<"  -4">> = bformat("{0:4}", [-4]),
-    <<"   4">> = bformat("{0:4}", [4]),
-    <<"   4">> = bformat("{0:{0}}", [4]),
-    <<"4   ">> = bformat("{0:4}", ["4"]),
-    <<"4   ">> = bformat("{0:{0}}", ["4"]),
-    <<"1.2yoDEF">> = bformat("{2}{0}{1}{3}", {yo, "DE", 1.2, <<"F">>}),
-    <<"cafebabe">> = bformat("{0:x}", {16#cafebabe}),
-    <<"CAFEBABE">> = bformat("{0:X}", {16#cafebabe}),
-    <<"CAFEBABE">> = bformat("{0:X}", {16#cafebabe}),
-    <<"755">> = bformat("{0:o}", {8#755}),
-    <<"a">> = bformat("{0:c}", {97}),
-    %% Horizontal ellipsis
-    <<226, 128, 166>> = bformat("{0:c}", {16#2026}),
-    <<"11">> = bformat("{0:b}", {3}),
-    <<"11">> = bformat("{0:b}", [3]),
-    <<"11">> = bformat("{three:b}", [{three, 3}]),
-    <<"11">> = bformat("{three:b}", [{"three", 3}]),
-    <<"11">> = bformat("{three:b}", [{<<"three">>, 3}]),
-    <<"\"foo\"">> = bformat("{0!r}", {"foo"}),
-    <<"2008-5-4">> = bformat("{0.0}-{0.1}-{0.2}", {{2008,5,4}}),
-    <<"2008-05-04">> = bformat("{0.0:04}-{0.1:02}-{0.2:02}", {{2008,5,4}}),
-    <<"foo6bar-6">> = bformat("foo{1}{0}-{1}", {bar, 6}),
-    <<"-'atom test'-">> = bformat("-{arg!r}-", [{arg, 'atom test'}]),
-    <<"2008-05-04">> = bformat("{0.0:0{1.0}}-{0.1:0{1.1}}-{0.2:0{1.2}}",
-                               {{2008,5,4}, {4, 2, 2}}),
-    ok.
-
-std_test() ->
-    M = mochifmt_std:new(),
-    <<"01">> = bformat("{0}{1}", [0, 1], M),
-    ok.
-
-records_test() ->
-    M = mochifmt_records:new([{conversion, record_info(fields, conversion)}]),
-    R = #conversion{length=long, precision=hard, sign=peace},
-    long = M:get_value("length", R),
-    hard = M:get_value("precision", R),
-    peace = M:get_value("sign", R),
-    <<"long hard">> = bformat("{length} {precision}", R, M),
-    <<"long hard">> = bformat("{0.length} {0.precision}", [R], M),
-    ok.
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochifmt_records.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochifmt_records.erl b/src/mochiweb/src/mochifmt_records.erl
deleted file mode 100644
index 2326d1d..0000000
--- a/src/mochiweb/src/mochifmt_records.erl
+++ /dev/null
@@ -1,38 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2008 Mochi Media, Inc.
-
-%% @doc Formatter that understands records.
-%%
-%% Usage:
-%%
-%%    1> M = mochifmt_records:new([{rec, record_info(fields, rec)}]),
-%%    M:format("{0.bar}", [#rec{bar=foo}]).
-%%    foo
-
--module(mochifmt_records, [Recs]).
--author('bob@mochimedia.com').
--export([get_value/2]).
-
-get_value(Key, Rec) when is_tuple(Rec) and is_atom(element(1, Rec)) ->
-    try begin
-            Atom = list_to_existing_atom(Key),
-            {_, Fields} = proplists:lookup(element(1, Rec), Recs),
-            element(get_rec_index(Atom, Fields, 2), Rec)
-        end
-    catch error:_ -> mochifmt:get_value(Key, Rec)
-    end;
-get_value(Key, Args) ->
-    mochifmt:get_value(Key, Args).
-
-get_rec_index(Atom, [Atom | _], Index) ->
-    Index;
-get_rec_index(Atom, [_ | Rest], Index) ->
-    get_rec_index(Atom, Rest, 1 + Index).
-
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochifmt_std.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochifmt_std.erl b/src/mochiweb/src/mochifmt_std.erl
deleted file mode 100644
index d4d74f6..0000000
--- a/src/mochiweb/src/mochifmt_std.erl
+++ /dev/null
@@ -1,30 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2008 Mochi Media, Inc.
-
-%% @doc Template module for a mochifmt formatter.
-
--module(mochifmt_std, []).
--author('bob@mochimedia.com').
--export([format/2, get_value/2, format_field/2, get_field/2, convert_field/2]).
-
-format(Format, Args) ->
-    mochifmt:format(Format, Args, THIS).
-
-get_field(Key, Args) ->
-    mochifmt:get_field(Key, Args, THIS).
-
-convert_field(Key, Args) ->
-    mochifmt:convert_field(Key, Args).
-
-get_value(Key, Args) ->
-    mochifmt:get_value(Key, Args).
-
-format_field(Arg, Format) ->
-    mochifmt:format_field(Arg, Format, THIS).
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiglobal.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiglobal.erl b/src/mochiweb/src/mochiglobal.erl
deleted file mode 100644
index c740b87..0000000
--- a/src/mochiweb/src/mochiglobal.erl
+++ /dev/null
@@ -1,107 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2010 Mochi Media, Inc.
-%% @doc Abuse module constant pools as a "read-only shared heap" (since erts 5.6)
-%%      <a href="http://www.erlang.org/pipermail/erlang-questions/2009-March/042503.html">[1]</a>.
--module(mochiglobal).
--author("Bob Ippolito <bo...@mochimedia.com>").
--export([get/1, get/2, put/2, delete/1]).
-
--spec get(atom()) -> any() | undefined.
-%% @equiv get(K, undefined)
-get(K) ->
-    get(K, undefined).
-
--spec get(atom(), T) -> any() | T.
-%% @doc Get the term for K or return Default.
-get(K, Default) ->
-    get(K, Default, key_to_module(K)).
-
-get(_K, Default, Mod) ->
-    try Mod:term()
-    catch error:undef ->
-            Default
-    end.
-
--spec put(atom(), any()) -> ok.
-%% @doc Store term V at K, replaces an existing term if present.
-put(K, V) ->
-    put(K, V, key_to_module(K)).
-
-put(_K, V, Mod) ->
-    Bin = compile(Mod, V),
-    code:purge(Mod),
-    code:load_binary(Mod, atom_to_list(Mod) ++ ".erl", Bin),
-    ok.
-
--spec delete(atom()) -> boolean().
-%% @doc Delete term stored at K, no-op if non-existent.
-delete(K) ->
-    delete(K, key_to_module(K)).
-
-delete(_K, Mod) ->
-    code:purge(Mod),
-    code:delete(Mod).
-
--spec key_to_module(atom()) -> atom().
-key_to_module(K) ->
-    list_to_atom("mochiglobal:" ++ atom_to_list(K)).
-
--spec compile(atom(), any()) -> binary().
-compile(Module, T) ->
-    {ok, Module, Bin} = compile:forms(forms(Module, T),
-                                      [verbose, report_errors]),
-    Bin.
-
--spec forms(atom(), any()) -> [erl_syntax:syntaxTree()].
-forms(Module, T) ->
-    [erl_syntax:revert(X) || X <- term_to_abstract(Module, term, T)].
-
--spec term_to_abstract(atom(), atom(), any()) -> [erl_syntax:syntaxTree()].
-term_to_abstract(Module, Getter, T) ->
-    [%% -module(Module).
-     erl_syntax:attribute(
-       erl_syntax:atom(module),
-       [erl_syntax:atom(Module)]),
-     %% -export([Getter/0]).
-     erl_syntax:attribute(
-       erl_syntax:atom(export),
-       [erl_syntax:list(
-         [erl_syntax:arity_qualifier(
-            erl_syntax:atom(Getter),
-            erl_syntax:integer(0))])]),
-     %% Getter() -> T.
-     erl_syntax:function(
-       erl_syntax:atom(Getter),
-       [erl_syntax:clause([], none, [erl_syntax:abstract(T)])])].
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-get_put_delete_test() ->
-    K = '$$test$$mochiglobal',
-    delete(K),
-    ?assertEqual(
-       bar,
-       get(K, bar)),
-    try
-        ?MODULE:put(K, baz),
-        ?assertEqual(
-           baz,
-           get(K, bar)),
-        ?MODULE:put(K, wibble),
-        ?assertEqual(
-           wibble,
-           ?MODULE:get(K))
-    after
-        delete(K)
-    end,
-    ?assertEqual(
-       bar,
-       get(K, bar)),
-    ?assertEqual(
-       undefined,
-       ?MODULE:get(K)),
-    ok.
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochihex.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochihex.erl b/src/mochiweb/src/mochihex.erl
deleted file mode 100644
index 44a2aa7..0000000
--- a/src/mochiweb/src/mochihex.erl
+++ /dev/null
@@ -1,91 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2006 Mochi Media, Inc.
-
-%% @doc Utilities for working with hexadecimal strings.
-
--module(mochihex).
--author('bob@mochimedia.com').
-
--export([to_hex/1, to_bin/1, to_int/1, dehex/1, hexdigit/1]).
-
-%% @type iolist() = [char() | binary() | iolist()]
-%% @type iodata() = iolist() | binary()
-
-%% @spec to_hex(integer | iolist()) -> string()
-%% @doc Convert an iolist to a hexadecimal string.
-to_hex(0) ->
-    "0";
-to_hex(I) when is_integer(I), I > 0 ->
-    to_hex_int(I, []);
-to_hex(B) ->
-    to_hex(iolist_to_binary(B), []).
-
-%% @spec to_bin(string()) -> binary()
-%% @doc Convert a hexadecimal string to a binary.
-to_bin(L) ->
-    to_bin(L, []).
-
-%% @spec to_int(string()) -> integer()
-%% @doc Convert a hexadecimal string to an integer.
-to_int(L) ->
-    erlang:list_to_integer(L, 16).
-
-%% @spec dehex(char()) -> integer()
-%% @doc Convert a hex digit to its integer value.
-dehex(C) when C >= $0, C =< $9 ->
-    C - $0;
-dehex(C) when C >= $a, C =< $f ->
-    C - $a + 10;
-dehex(C) when C >= $A, C =< $F ->
-    C - $A + 10.
-
-%% @spec hexdigit(integer()) -> char()
-%% @doc Convert an integer less than 16 to a hex digit.
-hexdigit(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdigit(C) when C =< 15 ->
-    C + $a - 10.
-
-%% Internal API
-
-to_hex(<<>>, Acc) ->
-    lists:reverse(Acc);
-to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
-    to_hex(Rest, [hexdigit(C2), hexdigit(C1) | Acc]).
-
-to_hex_int(0, Acc) ->
-    Acc;
-to_hex_int(I, Acc) ->
-    to_hex_int(I bsr 4, [hexdigit(I band 15) | Acc]).
-
-to_bin([], Acc) ->
-    iolist_to_binary(lists:reverse(Acc));
-to_bin([C1, C2 | Rest], Acc) ->
-    to_bin(Rest, [(dehex(C1) bsl 4) bor dehex(C2) | Acc]).
-
-
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-to_hex_test() ->
-    "ff000ff1" = to_hex([255, 0, 15, 241]),
-    "ff000ff1" = to_hex(16#ff000ff1),
-    "0" = to_hex(16#0),
-    ok.
-
-to_bin_test() ->
-    <<255, 0, 15, 241>> = to_bin("ff000ff1"),
-    <<255, 0, 10, 161>> = to_bin("Ff000aA1"),
-    ok.
-
-to_int_test() ->
-    16#ff000ff1 = to_int("ff000ff1"),
-    16#ff000aa1 = to_int("FF000Aa1"),
-    16#0 = to_int("0"),
-    ok.
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochijson.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochijson.erl b/src/mochiweb/src/mochijson.erl
deleted file mode 100644
index 2e3d145..0000000
--- a/src/mochiweb/src/mochijson.erl
+++ /dev/null
@@ -1,531 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2006 Mochi Media, Inc.
-
-%% @doc Yet another JSON (RFC 4627) library for Erlang.
--module(mochijson).
--author('bob@mochimedia.com').
--export([encoder/1, encode/1]).
--export([decoder/1, decode/1]).
--export([binary_encoder/1, binary_encode/1]).
--export([binary_decoder/1, binary_decode/1]).
-
-% This is a macro to placate syntax highlighters..
--define(Q, $\").
--define(ADV_COL(S, N), S#decoder{column=N+S#decoder.column}).
--define(INC_COL(S), S#decoder{column=1+S#decoder.column}).
--define(INC_LINE(S), S#decoder{column=1, line=1+S#decoder.line}).
-
-%% @type iolist() = [char() | binary() | iolist()]
-%% @type iodata() = iolist() | binary()
-%% @type json_string() = atom | string() | binary()
-%% @type json_number() = integer() | float()
-%% @type json_array() = {array, [json_term()]}
-%% @type json_object() = {struct, [{json_string(), json_term()}]}
-%% @type json_term() = json_string() | json_number() | json_array() |
-%%                     json_object()
-%% @type encoding() = utf8 | unicode
-%% @type encoder_option() = {input_encoding, encoding()} |
-%%                          {handler, function()}
-%% @type decoder_option() = {input_encoding, encoding()} |
-%%                          {object_hook, function()}
-%% @type bjson_string() = binary()
-%% @type bjson_number() = integer() | float()
-%% @type bjson_array() = [bjson_term()]
-%% @type bjson_object() = {struct, [{bjson_string(), bjson_term()}]}
-%% @type bjson_term() = bjson_string() | bjson_number() | bjson_array() |
-%%                      bjson_object()
-%% @type binary_encoder_option() = {handler, function()}
-%% @type binary_decoder_option() = {object_hook, function()}
-
--record(encoder, {input_encoding=unicode,
-                  handler=null}).
-
--record(decoder, {input_encoding=utf8,
-                  object_hook=null,
-                  line=1,
-                  column=1,
-                  state=null}).
-
-%% @spec encoder([encoder_option()]) -> function()
-%% @doc Create an encoder/1 with the given options.
-encoder(Options) ->
-    State = parse_encoder_options(Options, #encoder{}),
-    fun (O) -> json_encode(O, State) end.
-
-%% @spec encode(json_term()) -> iolist()
-%% @doc Encode the given as JSON to an iolist.
-encode(Any) ->
-    json_encode(Any, #encoder{}).
-
-%% @spec decoder([decoder_option()]) -> function()
-%% @doc Create a decoder/1 with the given options.
-decoder(Options) ->
-    State = parse_decoder_options(Options, #decoder{}),
-    fun (O) -> json_decode(O, State) end.
-
-%% @spec decode(iolist()) -> json_term()
-%% @doc Decode the given iolist to Erlang terms.
-decode(S) ->
-    json_decode(S, #decoder{}).
-
-%% @spec binary_decoder([binary_decoder_option()]) -> function()
-%% @doc Create a binary_decoder/1 with the given options.
-binary_decoder(Options) ->
-    mochijson2:decoder(Options).
-
-%% @spec binary_encoder([binary_encoder_option()]) -> function()
-%% @doc Create a binary_encoder/1 with the given options.
-binary_encoder(Options) ->
-    mochijson2:encoder(Options).
-
-%% @spec binary_encode(bjson_term()) -> iolist()
-%% @doc Encode the given as JSON to an iolist, using lists for arrays and
-%%      binaries for strings.
-binary_encode(Any) ->
-    mochijson2:encode(Any).
-
-%% @spec binary_decode(iolist()) -> bjson_term()
-%% @doc Decode the given iolist to Erlang terms, using lists for arrays and
-%%      binaries for strings.
-binary_decode(S) ->
-    mochijson2:decode(S).
-
-%% Internal API
-
-parse_encoder_options([], State) ->
-    State;
-parse_encoder_options([{input_encoding, Encoding} | Rest], State) ->
-    parse_encoder_options(Rest, State#encoder{input_encoding=Encoding});
-parse_encoder_options([{handler, Handler} | Rest], State) ->
-    parse_encoder_options(Rest, State#encoder{handler=Handler}).
-
-parse_decoder_options([], State) ->
-    State;
-parse_decoder_options([{input_encoding, Encoding} | Rest], State) ->
-    parse_decoder_options(Rest, State#decoder{input_encoding=Encoding});
-parse_decoder_options([{object_hook, Hook} | Rest], State) ->
-    parse_decoder_options(Rest, State#decoder{object_hook=Hook}).
-
-json_encode(true, _State) ->
-    "true";
-json_encode(false, _State) ->
-    "false";
-json_encode(null, _State) ->
-    "null";
-json_encode(I, _State) when is_integer(I) ->
-    integer_to_list(I);
-json_encode(F, _State) when is_float(F) ->
-    mochinum:digits(F);
-json_encode(L, State) when is_list(L); is_binary(L); is_atom(L) ->
-    json_encode_string(L, State);
-json_encode({array, Props}, State) when is_list(Props) ->
-    json_encode_array(Props, State);
-json_encode({struct, Props}, State) when is_list(Props) ->
-    json_encode_proplist(Props, State);
-json_encode(Bad, #encoder{handler=null}) ->
-    exit({json_encode, {bad_term, Bad}});
-json_encode(Bad, State=#encoder{handler=Handler}) ->
-    json_encode(Handler(Bad), State).
-
-json_encode_array([], _State) ->
-    "[]";
-json_encode_array(L, State) ->
-    F = fun (O, Acc) ->
-                [$,, json_encode(O, State) | Acc]
-        end,
-    [$, | Acc1] = lists:foldl(F, "[", L),
-    lists:reverse([$\] | Acc1]).
-
-json_encode_proplist([], _State) ->
-    "{}";
-json_encode_proplist(Props, State) ->
-    F = fun ({K, V}, Acc) ->
-                KS = case K of 
-                         K when is_atom(K) ->
-                             json_encode_string_utf8(atom_to_list(K));
-                         K when is_integer(K) ->
-                             json_encode_string(integer_to_list(K), State);
-                         K when is_list(K); is_binary(K) ->
-                             json_encode_string(K, State)
-                     end,
-                VS = json_encode(V, State),
-                [$,, VS, $:, KS | Acc]
-        end,
-    [$, | Acc1] = lists:foldl(F, "{", Props),
-    lists:reverse([$\} | Acc1]).
-
-json_encode_string(A, _State) when is_atom(A) ->
-    json_encode_string_unicode(xmerl_ucs:from_utf8(atom_to_list(A)));
-json_encode_string(B, _State) when is_binary(B) ->
-    json_encode_string_unicode(xmerl_ucs:from_utf8(B));
-json_encode_string(S, #encoder{input_encoding=utf8}) ->
-    json_encode_string_utf8(S);
-json_encode_string(S, #encoder{input_encoding=unicode}) ->
-    json_encode_string_unicode(S).
-
-json_encode_string_utf8(S) ->
-    [?Q | json_encode_string_utf8_1(S)].
-
-json_encode_string_utf8_1([C | Cs]) when C >= 0, C =< 16#7f ->
-    NewC = case C of
-               $\\ -> "\\\\";
-               ?Q -> "\\\"";
-               _ when C >= $\s, C < 16#7f -> C;
-               $\t -> "\\t";
-               $\n -> "\\n";
-               $\r -> "\\r";
-               $\f -> "\\f";
-               $\b -> "\\b";
-               _ when C >= 0, C =< 16#7f -> unihex(C);
-               _ -> exit({json_encode, {bad_char, C}})
-           end,
-    [NewC | json_encode_string_utf8_1(Cs)];
-json_encode_string_utf8_1(All=[C | _]) when C >= 16#80, C =< 16#10FFFF ->
-    [?Q | Rest] = json_encode_string_unicode(xmerl_ucs:from_utf8(All)),
-    Rest;
-json_encode_string_utf8_1([]) ->
-    "\"".
-
-json_encode_string_unicode(S) ->
-    [?Q | json_encode_string_unicode_1(S)].
-
-json_encode_string_unicode_1([C | Cs]) ->
-    NewC = case C of
-               $\\ -> "\\\\";
-               ?Q -> "\\\"";
-               _ when C >= $\s, C < 16#7f -> C;
-               $\t -> "\\t";
-               $\n -> "\\n";
-               $\r -> "\\r";
-               $\f -> "\\f";
-               $\b -> "\\b";
-               _ when C >= 0, C =< 16#10FFFF -> unihex(C);
-               _ -> exit({json_encode, {bad_char, C}})
-           end,
-    [NewC | json_encode_string_unicode_1(Cs)];
-json_encode_string_unicode_1([]) ->
-    "\"".
-
-dehex(C) when C >= $0, C =< $9 ->
-    C - $0;
-dehex(C) when C >= $a, C =< $f ->
-    C - $a + 10;
-dehex(C) when C >= $A, C =< $F ->
-    C - $A + 10.
-
-hexdigit(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdigit(C) when C =< 15 ->
-    C + $a - 10.
-
-unihex(C) when C < 16#10000 ->
-    <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
-    Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
-    [$\\, $u | Digits];
-unihex(C) when C =< 16#10FFFF ->
-    N = C - 16#10000,
-    S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
-    S2 = 16#dc00 bor (N band 16#3ff),
-    [unihex(S1), unihex(S2)].
-
-json_decode(B, S) when is_binary(B) ->
-    json_decode(binary_to_list(B), S);
-json_decode(L, S) ->
-    {Res, L1, S1} = decode1(L, S),
-    {eof, [], _} = tokenize(L1, S1#decoder{state=trim}),
-    Res.
-
-decode1(L, S=#decoder{state=null}) ->
-    case tokenize(L, S#decoder{state=any}) of
-        {{const, C}, L1, S1} ->
-            {C, L1, S1};
-        {start_array, L1, S1} ->
-            decode_array(L1, S1#decoder{state=any}, []);
-        {start_object, L1, S1} ->
-            decode_object(L1, S1#decoder{state=key}, [])
-    end.
-
-make_object(V, #decoder{object_hook=null}) ->
-    V;
-make_object(V, #decoder{object_hook=Hook}) ->
-    Hook(V).
-
-decode_object(L, S=#decoder{state=key}, Acc) ->
-    case tokenize(L, S) of
-        {end_object, Rest, S1} ->
-            V = make_object({struct, lists:reverse(Acc)}, S1),
-            {V, Rest, S1#decoder{state=null}};
-        {{const, K}, Rest, S1} when is_list(K) ->
-            {colon, L2, S2} = tokenize(Rest, S1),
-            {V, L3, S3} = decode1(L2, S2#decoder{state=null}),
-            decode_object(L3, S3#decoder{state=comma}, [{K, V} | Acc])
-    end;
-decode_object(L, S=#decoder{state=comma}, Acc) ->
-    case tokenize(L, S) of
-        {end_object, Rest, S1} ->
-            V = make_object({struct, lists:reverse(Acc)}, S1),
-            {V, Rest, S1#decoder{state=null}};
-        {comma, Rest, S1} ->
-            decode_object(Rest, S1#decoder{state=key}, Acc)
-    end.
-
-decode_array(L, S=#decoder{state=any}, Acc) ->
-    case tokenize(L, S) of
-        {end_array, Rest, S1} ->
-            {{array, lists:reverse(Acc)}, Rest, S1#decoder{state=null}};
-        {start_array, Rest, S1} ->
-            {Array, Rest1, S2} = decode_array(Rest, S1#decoder{state=any}, []),
-            decode_array(Rest1, S2#decoder{state=comma}, [Array | Acc]);
-        {start_object, Rest, S1} ->
-            {Array, Rest1, S2} = decode_object(Rest, S1#decoder{state=key}, []),
-            decode_array(Rest1, S2#decoder{state=comma}, [Array | Acc]);
-        {{const, Const}, Rest, S1} ->
-            decode_array(Rest, S1#decoder{state=comma}, [Const | Acc])
-    end;
-decode_array(L, S=#decoder{state=comma}, Acc) ->
-    case tokenize(L, S) of
-        {end_array, Rest, S1} ->
-            {{array, lists:reverse(Acc)}, Rest, S1#decoder{state=null}};
-        {comma, Rest, S1} ->
-            decode_array(Rest, S1#decoder{state=any}, Acc)
-    end.
-
-tokenize_string(IoList=[C | _], S=#decoder{input_encoding=utf8}, Acc)
-  when is_list(C); is_binary(C); C >= 16#7f ->
-    List = xmerl_ucs:from_utf8(iolist_to_binary(IoList)),
-    tokenize_string(List, S#decoder{input_encoding=unicode}, Acc);
-tokenize_string("\"" ++ Rest, S, Acc) ->
-    {lists:reverse(Acc), Rest, ?INC_COL(S)};
-tokenize_string("\\\"" ++ Rest, S, Acc) ->
-    tokenize_string(Rest, ?ADV_COL(S, 2), [$\" | Acc]);
-tokenize_string("\\\\" ++ Rest, S, Acc) ->
-    tokenize_string(Rest, ?ADV_COL(S, 2), [$\\ | Acc]);
-tokenize_string("\\/" ++ Rest, S, Acc) ->
-    tokenize_string(Rest, ?ADV_COL(S, 2), [$/ | Acc]);
-tokenize_string("\\b" ++ Rest, S, Acc) ->
-    tokenize_string(Rest, ?ADV_COL(S, 2), [$\b | Acc]);
-tokenize_string("\\f" ++ Rest, S, Acc) ->
-    tokenize_string(Rest, ?ADV_COL(S, 2), [$\f | Acc]);
-tokenize_string("\\n" ++ Rest, S, Acc) ->
-    tokenize_string(Rest, ?ADV_COL(S, 2), [$\n | Acc]);
-tokenize_string("\\r" ++ Rest, S, Acc) ->
-    tokenize_string(Rest, ?ADV_COL(S, 2), [$\r | Acc]);
-tokenize_string("\\t" ++ Rest, S, Acc) ->
-    tokenize_string(Rest, ?ADV_COL(S, 2), [$\t | Acc]);
-tokenize_string([$\\, $u, C3, C2, C1, C0 | Rest], S, Acc) ->
-    % coalesce UTF-16 surrogate pair?
-    C = dehex(C0) bor
-        (dehex(C1) bsl 4) bor
-        (dehex(C2) bsl 8) bor 
-        (dehex(C3) bsl 12),
-    tokenize_string(Rest, ?ADV_COL(S, 6), [C | Acc]);
-tokenize_string([C | Rest], S, Acc) when C >= $\s; C < 16#10FFFF ->
-    tokenize_string(Rest, ?ADV_COL(S, 1), [C | Acc]).
-    
-tokenize_number(IoList=[C | _], Mode, S=#decoder{input_encoding=utf8}, Acc)
-  when is_list(C); is_binary(C); C >= 16#7f ->
-    List = xmerl_ucs:from_utf8(iolist_to_binary(IoList)),
-    tokenize_number(List, Mode, S#decoder{input_encoding=unicode}, Acc);
-tokenize_number([$- | Rest], sign, S, []) ->
-    tokenize_number(Rest, int, ?INC_COL(S), [$-]);
-tokenize_number(Rest, sign, S, []) ->
-    tokenize_number(Rest, int, S, []);
-tokenize_number([$0 | Rest], int, S, Acc) ->
-    tokenize_number(Rest, frac, ?INC_COL(S), [$0 | Acc]);
-tokenize_number([C | Rest], int, S, Acc) when C >= $1, C =< $9 ->
-    tokenize_number(Rest, int1, ?INC_COL(S), [C | Acc]);
-tokenize_number([C | Rest], int1, S, Acc) when C >= $0, C =< $9 ->
-    tokenize_number(Rest, int1, ?INC_COL(S), [C | Acc]);
-tokenize_number(Rest, int1, S, Acc) ->
-    tokenize_number(Rest, frac, S, Acc);
-tokenize_number([$., C | Rest], frac, S, Acc) when C >= $0, C =< $9 ->
-    tokenize_number(Rest, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
-tokenize_number([E | Rest], frac, S, Acc) when E == $e; E == $E ->
-    tokenize_number(Rest, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
-tokenize_number(Rest, frac, S, Acc) ->
-    {{int, lists:reverse(Acc)}, Rest, S};
-tokenize_number([C | Rest], frac1, S, Acc) when C >= $0, C =< $9 ->
-    tokenize_number(Rest, frac1, ?INC_COL(S), [C | Acc]);
-tokenize_number([E | Rest], frac1, S, Acc) when E == $e; E == $E ->
-    tokenize_number(Rest, esign, ?INC_COL(S), [$e | Acc]);
-tokenize_number(Rest, frac1, S, Acc) ->
-    {{float, lists:reverse(Acc)}, Rest, S};
-tokenize_number([C | Rest], esign, S, Acc) when C == $-; C == $+ ->
-    tokenize_number(Rest, eint, ?INC_COL(S), [C | Acc]);
-tokenize_number(Rest, esign, S, Acc) ->
-    tokenize_number(Rest, eint, S, Acc);
-tokenize_number([C | Rest], eint, S, Acc) when C >= $0, C =< $9 ->
-    tokenize_number(Rest, eint1, ?INC_COL(S), [C | Acc]);
-tokenize_number([C | Rest], eint1, S, Acc) when C >= $0, C =< $9 ->
-    tokenize_number(Rest, eint1, ?INC_COL(S), [C | Acc]);
-tokenize_number(Rest, eint1, S, Acc) ->
-    {{float, lists:reverse(Acc)}, Rest, S}.
-
-tokenize([], S=#decoder{state=trim}) ->
-    {eof, [], S};
-tokenize([L | Rest], S) when is_list(L) ->
-    tokenize(L ++ Rest, S);
-tokenize([B | Rest], S) when is_binary(B) ->
-    tokenize(xmerl_ucs:from_utf8(B) ++ Rest, S);
-tokenize("\r\n" ++ Rest, S) ->
-    tokenize(Rest, ?INC_LINE(S));
-tokenize("\n" ++ Rest, S) ->
-    tokenize(Rest, ?INC_LINE(S));
-tokenize([C | Rest], S) when C == $\s; C == $\t ->
-    tokenize(Rest, ?INC_COL(S));
-tokenize("{" ++ Rest, S) ->
-    {start_object, Rest, ?INC_COL(S)};
-tokenize("}" ++ Rest, S) ->
-    {end_object, Rest, ?INC_COL(S)};
-tokenize("[" ++ Rest, S) ->
-    {start_array, Rest, ?INC_COL(S)};
-tokenize("]" ++ Rest, S) ->
-    {end_array, Rest, ?INC_COL(S)};
-tokenize("," ++ Rest, S) ->
-    {comma, Rest, ?INC_COL(S)};
-tokenize(":" ++ Rest, S) ->
-    {colon, Rest, ?INC_COL(S)};
-tokenize("null" ++ Rest, S) ->
-    {{const, null}, Rest, ?ADV_COL(S, 4)};
-tokenize("true" ++ Rest, S) ->
-    {{const, true}, Rest, ?ADV_COL(S, 4)};
-tokenize("false" ++ Rest, S) ->
-    {{const, false}, Rest, ?ADV_COL(S, 5)};
-tokenize("\"" ++ Rest, S) ->
-    {String, Rest1, S1} = tokenize_string(Rest, ?INC_COL(S), []),
-    {{const, String}, Rest1, S1};
-tokenize(L=[C | _], S) when C >= $0, C =< $9; C == $- ->
-    case tokenize_number(L, sign, S, []) of
-        {{int, Int}, Rest, S1} ->
-            {{const, list_to_integer(Int)}, Rest, S1};
-        {{float, Float}, Rest, S1} ->
-            {{const, list_to_float(Float)}, Rest, S1}
-    end.
-
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-%% testing constructs borrowed from the Yaws JSON implementation.
-
-%% Create an object from a list of Key/Value pairs.
-
-obj_new() ->
-    {struct, []}.
-
-is_obj({struct, Props}) ->
-    F = fun ({K, _}) when is_list(K) ->
-                true;
-            (_) ->
-                false
-        end,    
-    lists:all(F, Props).
-
-obj_from_list(Props) ->
-    Obj = {struct, Props},
-    case is_obj(Obj) of
-        true -> Obj;
-        false -> exit(json_bad_object)
-    end.
-
-%% Test for equivalence of Erlang terms.
-%% Due to arbitrary order of construction, equivalent objects might
-%% compare unequal as erlang terms, so we need to carefully recurse
-%% through aggregates (tuples and objects).
-
-equiv({struct, Props1}, {struct, Props2}) ->
-    equiv_object(Props1, Props2);
-equiv({array, L1}, {array, L2}) ->
-    equiv_list(L1, L2);
-equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
-equiv(S1, S2) when is_list(S1), is_list(S2)     -> S1 == S2;
-equiv(true, true) -> true;
-equiv(false, false) -> true;
-equiv(null, null) -> true.
-
-%% Object representation and traversal order is unknown.
-%% Use the sledgehammer and sort property lists.
-
-equiv_object(Props1, Props2) ->
-    L1 = lists:keysort(1, Props1),
-    L2 = lists:keysort(1, Props2),
-    Pairs = lists:zip(L1, L2),
-    true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
-        equiv(K1, K2) and equiv(V1, V2)
-    end, Pairs).
-
-%% Recursively compare tuple elements for equivalence.
-
-equiv_list([], []) ->
-    true;
-equiv_list([V1 | L1], [V2 | L2]) ->
-    equiv(V1, V2) andalso equiv_list(L1, L2).
-
-e2j_vec_test() ->
-    test_one(e2j_test_vec(utf8), 1).
-
-issue33_test() ->
-    %% http://code.google.com/p/mochiweb/issues/detail?id=33
-    Js = {struct, [{"key", [194, 163]}]},
-    Encoder = encoder([{input_encoding, utf8}]),
-    "{\"key\":\"\\u00a3\"}" = lists:flatten(Encoder(Js)).
-
-test_one([], _N) ->
-    %% io:format("~p tests passed~n", [N-1]),
-    ok;
-test_one([{E, J} | Rest], N) ->
-    %% io:format("[~p] ~p ~p~n", [N, E, J]),
-    true = equiv(E, decode(J)),
-    true = equiv(E, decode(encode(E))),
-    test_one(Rest, 1+N).
-
-e2j_test_vec(utf8) ->
-    [
-    {1, "1"},
-    {3.1416, "3.14160"}, % text representation may truncate, trail zeroes
-    {-1, "-1"},
-    {-3.1416, "-3.14160"},
-    {12.0e10, "1.20000e+11"},
-    {1.234E+10, "1.23400e+10"},
-    {-1.234E-10, "-1.23400e-10"},
-    {10.0, "1.0e+01"},
-    {123.456, "1.23456E+2"},
-    {10.0, "1e1"},
-    {"foo", "\"foo\""},
-    {"foo" ++ [5] ++ "bar", "\"foo\\u0005bar\""},
-    {"", "\"\""},
-    {"\"", "\"\\\"\""},
-    {"\n\n\n", "\"\\n\\n\\n\""},
-    {"\\", "\"\\\\\""},
-    {"\" \b\f\r\n\t\"", "\"\\\" \\b\\f\\r\\n\\t\\\"\""},
-    {obj_new(), "{}"},
-    {obj_from_list([{"foo", "bar"}]), "{\"foo\":\"bar\"}"},
-    {obj_from_list([{"foo", "bar"}, {"baz", 123}]),
-     "{\"foo\":\"bar\",\"baz\":123}"},
-    {{array, []}, "[]"},
-    {{array, [{array, []}]}, "[[]]"},
-    {{array, [1, "foo"]}, "[1,\"foo\"]"},
-
-    % json array in a json object
-    {obj_from_list([{"foo", {array, [123]}}]),
-     "{\"foo\":[123]}"},
-
-    % json object in a json object
-    {obj_from_list([{"foo", obj_from_list([{"bar", true}])}]),
-     "{\"foo\":{\"bar\":true}}"},
-
-    % fold evaluation order
-    {obj_from_list([{"foo", {array, []}},
-                     {"bar", obj_from_list([{"baz", true}])},
-                     {"alice", "bob"}]),
-     "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
-
-    % json object in a json array
-    {{array, [-123, "foo", obj_from_list([{"bar", {array, []}}]), null]},
-     "[-123,\"foo\",{\"bar\":[]},null]"}
-    ].
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochijson2.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochijson2.erl b/src/mochiweb/src/mochijson2.erl
deleted file mode 100644
index bdf6d77..0000000
--- a/src/mochiweb/src/mochijson2.erl
+++ /dev/null
@@ -1,849 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Yet another JSON (RFC 4627) library for Erlang. mochijson2 works
-%%      with binaries as strings, arrays as lists (without an {array, _})
-%%      wrapper and it only knows how to decode UTF-8 (and ASCII).
-%%
-%%      JSON terms are decoded as follows (javascript -> erlang):
-%%      <ul>
-%%          <li>{"key": "value"} ->
-%%              {struct, [{&lt;&lt;"key">>, &lt;&lt;"value">>}]}</li>
-%%          <li>["array", 123, 12.34, true, false, null] ->
-%%              [&lt;&lt;"array">>, 123, 12.34, true, false, null]
-%%          </li>
-%%      </ul>
-%%      <ul>
-%%          <li>Strings in JSON decode to UTF-8 binaries in Erlang</li>
-%%          <li>Objects decode to {struct, PropList}</li>
-%%          <li>Numbers decode to integer or float</li>
-%%          <li>true, false, null decode to their respective terms.</li>
-%%      </ul>
-%%      The encoder will accept the same format that the decoder will produce,
-%%      but will also allow additional cases for leniency:
-%%      <ul>
-%%          <li>atoms other than true, false, null will be considered UTF-8
-%%              strings (even as a proplist key)
-%%          </li>
-%%          <li>{json, IoList} will insert IoList directly into the output
-%%              with no validation
-%%          </li>
-%%          <li>{array, Array} will be encoded as Array
-%%              (legacy mochijson style)
-%%          </li>
-%%          <li>A non-empty raw proplist will be encoded as an object as long
-%%              as the first pair does not have an atom key of json, struct,
-%%              or array
-%%          </li>
-%%      </ul>
-
--module(mochijson2).
--author('bob@mochimedia.com').
--export([encoder/1, encode/1]).
--export([decoder/1, decode/1]).
-
-% This is a macro to placate syntax highlighters..
--define(Q, $\").
--define(ADV_COL(S, N), S#decoder{offset=N+S#decoder.offset,
-                                 column=N+S#decoder.column}).
--define(INC_COL(S), S#decoder{offset=1+S#decoder.offset,
-                              column=1+S#decoder.column}).
--define(INC_LINE(S), S#decoder{offset=1+S#decoder.offset,
-                               column=1,
-                               line=1+S#decoder.line}).
--define(INC_CHAR(S, C),
-        case C of
-            $\n ->
-                S#decoder{column=1,
-                          line=1+S#decoder.line,
-                          offset=1+S#decoder.offset};
-            _ ->
-                S#decoder{column=1+S#decoder.column,
-                          offset=1+S#decoder.offset}
-        end).
--define(IS_WHITESPACE(C),
-        (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
-
-%% @type iolist() = [char() | binary() | iolist()]
-%% @type iodata() = iolist() | binary()
-%% @type json_string() = atom | binary()
-%% @type json_number() = integer() | float()
-%% @type json_array() = [json_term()]
-%% @type json_object() = {struct, [{json_string(), json_term()}]}
-%% @type json_iolist() = {json, iolist()}
-%% @type json_term() = json_string() | json_number() | json_array() |
-%%                     json_object() | json_iolist()
-
--record(encoder, {handler=null,
-                  utf8=false}).
-
--record(decoder, {object_hook=null,
-                  offset=0,
-                  line=1,
-                  column=1,
-                  state=null}).
-
-%% @spec encoder([encoder_option()]) -> function()
-%% @doc Create an encoder/1 with the given options.
-%% @type encoder_option() = handler_option() | utf8_option()
-%% @type utf8_option() = boolean(). Emit unicode as utf8 (default - false)
-encoder(Options) ->
-    State = parse_encoder_options(Options, #encoder{}),
-    fun (O) -> json_encode(O, State) end.
-
-%% @spec encode(json_term()) -> iolist()
-%% @doc Encode the given as JSON to an iolist.
-encode(Any) ->
-    json_encode(Any, #encoder{}).
-
-%% @spec decoder([decoder_option()]) -> function()
-%% @doc Create a decoder/1 with the given options.
-decoder(Options) ->
-    State = parse_decoder_options(Options, #decoder{}),
-    fun (O) -> json_decode(O, State) end.
-
-%% @spec decode(iolist()) -> json_term()
-%% @doc Decode the given iolist to Erlang terms.
-decode(S) ->
-    json_decode(S, #decoder{}).
-
-%% Internal API
-
-parse_encoder_options([], State) ->
-    State;
-parse_encoder_options([{handler, Handler} | Rest], State) ->
-    parse_encoder_options(Rest, State#encoder{handler=Handler});
-parse_encoder_options([{utf8, Switch} | Rest], State) ->
-    parse_encoder_options(Rest, State#encoder{utf8=Switch}).
-
-parse_decoder_options([], State) ->
-    State;
-parse_decoder_options([{object_hook, Hook} | Rest], State) ->
-    parse_decoder_options(Rest, State#decoder{object_hook=Hook}).
-
-json_encode(true, _State) ->
-    <<"true">>;
-json_encode(false, _State) ->
-    <<"false">>;
-json_encode(null, _State) ->
-    <<"null">>;
-json_encode(I, _State) when is_integer(I) ->
-    integer_to_list(I);
-json_encode(F, _State) when is_float(F) ->
-    mochinum:digits(F);
-json_encode(S, State) when is_binary(S); is_atom(S) ->
-    json_encode_string(S, State);
-json_encode([{K, _}|_] = Props, State) when (K =/= struct andalso
-                                             K =/= array andalso
-                                             K =/= json) ->
-    json_encode_proplist(Props, State);
-json_encode({struct, Props}, State) when is_list(Props) ->
-    json_encode_proplist(Props, State);
-json_encode(Array, State) when is_list(Array) ->
-    json_encode_array(Array, State);
-json_encode({array, Array}, State) when is_list(Array) ->
-    json_encode_array(Array, State);
-json_encode({json, IoList}, _State) ->
-    IoList;
-json_encode(Bad, #encoder{handler=null}) ->
-    exit({json_encode, {bad_term, Bad}});
-json_encode(Bad, State=#encoder{handler=Handler}) ->
-    json_encode(Handler(Bad), State).
-
-json_encode_array([], _State) ->
-    <<"[]">>;
-json_encode_array(L, State) ->
-    F = fun (O, Acc) ->
-                [$,, json_encode(O, State) | Acc]
-        end,
-    [$, | Acc1] = lists:foldl(F, "[", L),
-    lists:reverse([$\] | Acc1]).
-
-json_encode_proplist([], _State) ->
-    <<"{}">>;
-json_encode_proplist(Props, State) ->
-    F = fun ({K, V}, Acc) ->
-                KS = json_encode_string(K, State),
-                VS = json_encode(V, State),
-                [$,, VS, $:, KS | Acc]
-        end,
-    [$, | Acc1] = lists:foldl(F, "{", Props),
-    lists:reverse([$\} | Acc1]).
-
-json_encode_string(A, State) when is_atom(A) ->
-    L = atom_to_list(A),
-    case json_string_is_safe(L) of
-        true ->
-            [?Q, L, ?Q];
-        false ->
-            json_encode_string_unicode(xmerl_ucs:from_utf8(L), State, [?Q])
-    end;
-json_encode_string(B, State) when is_binary(B) ->
-    case json_bin_is_safe(B) of
-        true ->
-            [?Q, B, ?Q];
-        false ->
-            json_encode_string_unicode(xmerl_ucs:from_utf8(B), State, [?Q])
-    end;
-json_encode_string(I, _State) when is_integer(I) ->
-    [?Q, integer_to_list(I), ?Q];
-json_encode_string(L, State) when is_list(L) ->
-    case json_string_is_safe(L) of
-        true ->
-            [?Q, L, ?Q];
-        false ->
-            json_encode_string_unicode(L, State, [?Q])
-    end.
-
-json_string_is_safe([]) ->
-    true;
-json_string_is_safe([C | Rest]) ->
-    case C of
-        ?Q ->
-            false;
-        $\\ ->
-            false;
-        $\b ->
-            false;
-        $\f ->
-            false;
-        $\n ->
-            false;
-        $\r ->
-            false;
-        $\t ->
-            false;
-        C when C >= 0, C < $\s; C >= 16#7f, C =< 16#10FFFF ->
-            false;
-        C when C < 16#7f ->
-            json_string_is_safe(Rest);
-        _ ->
-            false
-    end.
-
-json_bin_is_safe(<<>>) ->
-    true;
-json_bin_is_safe(<<C, Rest/binary>>) ->
-    case C of
-        ?Q ->
-            false;
-        $\\ ->
-            false;
-        $\b ->
-            false;
-        $\f ->
-            false;
-        $\n ->
-            false;
-        $\r ->
-            false;
-        $\t ->
-            false;
-        C when C >= 0, C < $\s; C >= 16#7f ->
-            false;
-        C when C < 16#7f ->
-            json_bin_is_safe(Rest)
-    end.
-
-json_encode_string_unicode([], _State, Acc) ->
-    lists:reverse([$\" | Acc]);
-json_encode_string_unicode([C | Cs], State, Acc) ->
-    Acc1 = case C of
-               ?Q ->
-                   [?Q, $\\ | Acc];
-               %% Escaping solidus is only useful when trying to protect
-               %% against "</script>" injection attacks which are only
-               %% possible when JSON is inserted into a HTML document
-               %% in-line. mochijson2 does not protect you from this, so
-               %% if you do insert directly into HTML then you need to
-               %% uncomment the following case or escape the output of encode.
-               %%
-               %% $/ ->
-               %%    [$/, $\\ | Acc];
-               %%
-               $\\ ->
-                   [$\\, $\\ | Acc];
-               $\b ->
-                   [$b, $\\ | Acc];
-               $\f ->
-                   [$f, $\\ | Acc];
-               $\n ->
-                   [$n, $\\ | Acc];
-               $\r ->
-                   [$r, $\\ | Acc];
-               $\t ->
-                   [$t, $\\ | Acc];
-               C when C >= 0, C < $\s ->
-                   [unihex(C) | Acc];
-               C when C >= 16#7f, C =< 16#10FFFF, State#encoder.utf8 ->
-                   [xmerl_ucs:to_utf8(C) | Acc];
-               C when  C >= 16#7f, C =< 16#10FFFF, not State#encoder.utf8 ->
-                   [unihex(C) | Acc];
-               C when C < 16#7f ->
-                   [C | Acc];
-               _ ->
-                   exit({json_encode, {bad_char, C}})
-           end,
-    json_encode_string_unicode(Cs, State, Acc1).
-
-hexdigit(C) when C >= 0, C =< 9 ->
-    C + $0;
-hexdigit(C) when C =< 15 ->
-    C + $a - 10.
-
-unihex(C) when C < 16#10000 ->
-    <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
-    Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
-    [$\\, $u | Digits];
-unihex(C) when C =< 16#10FFFF ->
-    N = C - 16#10000,
-    S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
-    S2 = 16#dc00 bor (N band 16#3ff),
-    [unihex(S1), unihex(S2)].
-
-json_decode(L, S) when is_list(L) ->
-    json_decode(iolist_to_binary(L), S);
-json_decode(B, S) ->
-    {Res, S1} = decode1(B, S),
-    {eof, _} = tokenize(B, S1#decoder{state=trim}),
-    Res.
-
-decode1(B, S=#decoder{state=null}) ->
-    case tokenize(B, S#decoder{state=any}) of
-        {{const, C}, S1} ->
-            {C, S1};
-        {start_array, S1} ->
-            decode_array(B, S1);
-        {start_object, S1} ->
-            decode_object(B, S1)
-    end.
-
-make_object(V, #decoder{object_hook=null}) ->
-    V;
-make_object(V, #decoder{object_hook=Hook}) ->
-    Hook(V).
-
-decode_object(B, S) ->
-    decode_object(B, S#decoder{state=key}, []).
-
-decode_object(B, S=#decoder{state=key}, Acc) ->
-    case tokenize(B, S) of
-        {end_object, S1} ->
-            V = make_object({struct, lists:reverse(Acc)}, S1),
-            {V, S1#decoder{state=null}};
-        {{const, K}, S1} ->
-            {colon, S2} = tokenize(B, S1),
-            {V, S3} = decode1(B, S2#decoder{state=null}),
-            decode_object(B, S3#decoder{state=comma}, [{K, V} | Acc])
-    end;
-decode_object(B, S=#decoder{state=comma}, Acc) ->
-    case tokenize(B, S) of
-        {end_object, S1} ->
-            V = make_object({struct, lists:reverse(Acc)}, S1),
-            {V, S1#decoder{state=null}};
-        {comma, S1} ->
-            decode_object(B, S1#decoder{state=key}, Acc)
-    end.
-
-decode_array(B, S) ->
-    decode_array(B, S#decoder{state=any}, []).
-
-decode_array(B, S=#decoder{state=any}, Acc) ->
-    case tokenize(B, S) of
-        {end_array, S1} ->
-            {lists:reverse(Acc), S1#decoder{state=null}};
-        {start_array, S1} ->
-            {Array, S2} = decode_array(B, S1),
-            decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
-        {start_object, S1} ->
-            {Array, S2} = decode_object(B, S1),
-            decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
-        {{const, Const}, S1} ->
-            decode_array(B, S1#decoder{state=comma}, [Const | Acc])
-    end;
-decode_array(B, S=#decoder{state=comma}, Acc) ->
-    case tokenize(B, S) of
-        {end_array, S1} ->
-            {lists:reverse(Acc), S1#decoder{state=null}};
-        {comma, S1} ->
-            decode_array(B, S1#decoder{state=any}, Acc)
-    end.
-
-tokenize_string(B, S=#decoder{offset=O}) ->
-    case tokenize_string_fast(B, O) of
-        {escape, O1} ->
-            Length = O1 - O,
-            S1 = ?ADV_COL(S, Length),
-            <<_:O/binary, Head:Length/binary, _/binary>> = B,
-            tokenize_string(B, S1, lists:reverse(binary_to_list(Head)));
-        O1 ->
-            Length = O1 - O,
-            <<_:O/binary, String:Length/binary, ?Q, _/binary>> = B,
-            {{const, String}, ?ADV_COL(S, Length + 1)}
-    end.
-
-tokenize_string_fast(B, O) ->
-    case B of
-        <<_:O/binary, ?Q, _/binary>> ->
-            O;
-        <<_:O/binary, $\\, _/binary>> ->
-            {escape, O};
-        <<_:O/binary, C1, _/binary>> when C1 < 128 ->
-            tokenize_string_fast(B, 1 + O);
-        <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
-                C2 >= 128, C2 =< 191 ->
-            tokenize_string_fast(B, 2 + O);
-        <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
-                C2 >= 128, C2 =< 191,
-                C3 >= 128, C3 =< 191 ->
-            tokenize_string_fast(B, 3 + O);
-        <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
-                C2 >= 128, C2 =< 191,
-                C3 >= 128, C3 =< 191,
-                C4 >= 128, C4 =< 191 ->
-            tokenize_string_fast(B, 4 + O);
-        _ ->
-            throw(invalid_utf8)
-    end.
-
-tokenize_string(B, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, ?Q, _/binary>> ->
-            {{const, iolist_to_binary(lists:reverse(Acc))}, ?INC_COL(S)};
-        <<_:O/binary, "\\\"", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$\" | Acc]);
-        <<_:O/binary, "\\\\", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$\\ | Acc]);
-        <<_:O/binary, "\\/", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$/ | Acc]);
-        <<_:O/binary, "\\b", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$\b | Acc]);
-        <<_:O/binary, "\\f", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$\f | Acc]);
-        <<_:O/binary, "\\n", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$\n | Acc]);
-        <<_:O/binary, "\\r", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$\r | Acc]);
-        <<_:O/binary, "\\t", _/binary>> ->
-            tokenize_string(B, ?ADV_COL(S, 2), [$\t | Acc]);
-        <<_:O/binary, "\\u", C3, C2, C1, C0, Rest/binary>> ->
-            C = erlang:list_to_integer([C3, C2, C1, C0], 16),
-            if C > 16#D7FF, C < 16#DC00 ->
-                %% coalesce UTF-16 surrogate pair
-                <<"\\u", D3, D2, D1, D0, _/binary>> = Rest,
-                D = erlang:list_to_integer([D3,D2,D1,D0], 16),
-                [CodePoint] = xmerl_ucs:from_utf16be(<<C:16/big-unsigned-integer,
-                    D:16/big-unsigned-integer>>),
-                Acc1 = lists:reverse(xmerl_ucs:to_utf8(CodePoint), Acc),
-                tokenize_string(B, ?ADV_COL(S, 12), Acc1);
-            true ->
-                Acc1 = lists:reverse(xmerl_ucs:to_utf8(C), Acc),
-                tokenize_string(B, ?ADV_COL(S, 6), Acc1)
-            end;
-        <<_:O/binary, C1, _/binary>> when C1 < 128 ->
-            tokenize_string(B, ?INC_CHAR(S, C1), [C1 | Acc]);
-        <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
-                C2 >= 128, C2 =< 191 ->
-            tokenize_string(B, ?ADV_COL(S, 2), [C2, C1 | Acc]);
-        <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
-                C2 >= 128, C2 =< 191,
-                C3 >= 128, C3 =< 191 ->
-            tokenize_string(B, ?ADV_COL(S, 3), [C3, C2, C1 | Acc]);
-        <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
-                C2 >= 128, C2 =< 191,
-                C3 >= 128, C3 =< 191,
-                C4 >= 128, C4 =< 191 ->
-            tokenize_string(B, ?ADV_COL(S, 4), [C4, C3, C2, C1 | Acc]);
-        _ ->
-            throw(invalid_utf8)
-    end.
-
-tokenize_number(B, S) ->
-    case tokenize_number(B, sign, S, []) of
-        {{int, Int}, S1} ->
-            {{const, list_to_integer(Int)}, S1};
-        {{float, Float}, S1} ->
-            {{const, list_to_float(Float)}, S1}
-    end.
-
-tokenize_number(B, sign, S=#decoder{offset=O}, []) ->
-    case B of
-        <<_:O/binary, $-, _/binary>> ->
-            tokenize_number(B, int, ?INC_COL(S), [$-]);
-        _ ->
-            tokenize_number(B, int, S, [])
-    end;
-tokenize_number(B, int, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, $0, _/binary>> ->
-            tokenize_number(B, frac, ?INC_COL(S), [$0 | Acc]);
-        <<_:O/binary, C, _/binary>> when C >= $1 andalso C =< $9 ->
-            tokenize_number(B, int1, ?INC_COL(S), [C | Acc])
-    end;
-tokenize_number(B, int1, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
-            tokenize_number(B, int1, ?INC_COL(S), [C | Acc]);
-        _ ->
-            tokenize_number(B, frac, S, Acc)
-    end;
-tokenize_number(B, frac, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, $., C, _/binary>> when C >= $0, C =< $9 ->
-            tokenize_number(B, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
-        <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
-            tokenize_number(B, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
-        _ ->
-            {{int, lists:reverse(Acc)}, S}
-    end;
-tokenize_number(B, frac1, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
-            tokenize_number(B, frac1, ?INC_COL(S), [C | Acc]);
-        <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
-            tokenize_number(B, esign, ?INC_COL(S), [$e | Acc]);
-        _ ->
-            {{float, lists:reverse(Acc)}, S}
-    end;
-tokenize_number(B, esign, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, C, _/binary>> when C =:= $- orelse C=:= $+ ->
-            tokenize_number(B, eint, ?INC_COL(S), [C | Acc]);
-        _ ->
-            tokenize_number(B, eint, S, Acc)
-    end;
-tokenize_number(B, eint, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
-            tokenize_number(B, eint1, ?INC_COL(S), [C | Acc])
-    end;
-tokenize_number(B, eint1, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
-            tokenize_number(B, eint1, ?INC_COL(S), [C | Acc]);
-        _ ->
-            {{float, lists:reverse(Acc)}, S}
-    end.
-
-tokenize(B, S=#decoder{offset=O}) ->
-    case B of
-        <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
-            tokenize(B, ?INC_CHAR(S, C));
-        <<_:O/binary, "{", _/binary>> ->
-            {start_object, ?INC_COL(S)};
-        <<_:O/binary, "}", _/binary>> ->
-            {end_object, ?INC_COL(S)};
-        <<_:O/binary, "[", _/binary>> ->
-            {start_array, ?INC_COL(S)};
-        <<_:O/binary, "]", _/binary>> ->
-            {end_array, ?INC_COL(S)};
-        <<_:O/binary, ",", _/binary>> ->
-            {comma, ?INC_COL(S)};
-        <<_:O/binary, ":", _/binary>> ->
-            {colon, ?INC_COL(S)};
-        <<_:O/binary, "null", _/binary>> ->
-            {{const, null}, ?ADV_COL(S, 4)};
-        <<_:O/binary, "true", _/binary>> ->
-            {{const, true}, ?ADV_COL(S, 4)};
-        <<_:O/binary, "false", _/binary>> ->
-            {{const, false}, ?ADV_COL(S, 5)};
-        <<_:O/binary, "\"", _/binary>> ->
-            tokenize_string(B, ?INC_COL(S));
-        <<_:O/binary, C, _/binary>> when (C >= $0 andalso C =< $9)
-                                         orelse C =:= $- ->
-            tokenize_number(B, S);
-        <<_:O/binary>> ->
-            trim = S#decoder.state,
-            {eof, S}
-    end.
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-
-%% testing constructs borrowed from the Yaws JSON implementation.
-
-%% Create an object from a list of Key/Value pairs.
-
-obj_new() ->
-    {struct, []}.
-
-is_obj({struct, Props}) ->
-    F = fun ({K, _}) when is_binary(K) -> true end,
-    lists:all(F, Props).
-
-obj_from_list(Props) ->
-    Obj = {struct, Props},
-    ?assert(is_obj(Obj)),
-    Obj.
-
-%% Test for equivalence of Erlang terms.
-%% Due to arbitrary order of construction, equivalent objects might
-%% compare unequal as erlang terms, so we need to carefully recurse
-%% through aggregates (tuples and objects).
-
-equiv({struct, Props1}, {struct, Props2}) ->
-    equiv_object(Props1, Props2);
-equiv(L1, L2) when is_list(L1), is_list(L2) ->
-    equiv_list(L1, L2);
-equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
-equiv(B1, B2) when is_binary(B1), is_binary(B2) -> B1 == B2;
-equiv(A, A) when A =:= true orelse A =:= false orelse A =:= null -> true.
-
-%% Object representation and traversal order is unknown.
-%% Use the sledgehammer and sort property lists.
-
-equiv_object(Props1, Props2) ->
-    L1 = lists:keysort(1, Props1),
-    L2 = lists:keysort(1, Props2),
-    Pairs = lists:zip(L1, L2),
-    true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
-                             equiv(K1, K2) and equiv(V1, V2)
-                     end, Pairs).
-
-%% Recursively compare tuple elements for equivalence.
-
-equiv_list([], []) ->
-    true;
-equiv_list([V1 | L1], [V2 | L2]) ->
-    equiv(V1, V2) andalso equiv_list(L1, L2).
-
-decode_test() ->
-    [1199344435545.0, 1] = decode(<<"[1199344435545.0,1]">>),
-    <<16#F0,16#9D,16#9C,16#95>> = decode([34,"\\ud835","\\udf15",34]).
-
-e2j_vec_test() ->
-    test_one(e2j_test_vec(utf8), 1).
-
-test_one([], _N) ->
-    %% io:format("~p tests passed~n", [N-1]),
-    ok;
-test_one([{E, J} | Rest], N) ->
-    %% io:format("[~p] ~p ~p~n", [N, E, J]),
-    true = equiv(E, decode(J)),
-    true = equiv(E, decode(encode(E))),
-    test_one(Rest, 1+N).
-
-e2j_test_vec(utf8) ->
-    [
-     {1, "1"},
-     {3.1416, "3.14160"}, %% text representation may truncate, trail zeroes
-     {-1, "-1"},
-     {-3.1416, "-3.14160"},
-     {12.0e10, "1.20000e+11"},
-     {1.234E+10, "1.23400e+10"},
-     {-1.234E-10, "-1.23400e-10"},
-     {10.0, "1.0e+01"},
-     {123.456, "1.23456E+2"},
-     {10.0, "1e1"},
-     {<<"foo">>, "\"foo\""},
-     {<<"foo", 5, "bar">>, "\"foo\\u0005bar\""},
-     {<<"">>, "\"\""},
-     {<<"\n\n\n">>, "\"\\n\\n\\n\""},
-     {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\""},
-     {obj_new(), "{}"},
-     {obj_from_list([{<<"foo">>, <<"bar">>}]), "{\"foo\":\"bar\"}"},
-     {obj_from_list([{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]),
-      "{\"foo\":\"bar\",\"baz\":123}"},
-     {[], "[]"},
-     {[[]], "[[]]"},
-     {[1, <<"foo">>], "[1,\"foo\"]"},
-
-     %% json array in a json object
-     {obj_from_list([{<<"foo">>, [123]}]),
-      "{\"foo\":[123]}"},
-
-     %% json object in a json object
-     {obj_from_list([{<<"foo">>, obj_from_list([{<<"bar">>, true}])}]),
-      "{\"foo\":{\"bar\":true}}"},
-
-     %% fold evaluation order
-     {obj_from_list([{<<"foo">>, []},
-                     {<<"bar">>, obj_from_list([{<<"baz">>, true}])},
-                     {<<"alice">>, <<"bob">>}]),
-      "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
-
-     %% json object in a json array
-     {[-123, <<"foo">>, obj_from_list([{<<"bar">>, []}]), null],
-      "[-123,\"foo\",{\"bar\":[]},null]"}
-    ].
-
-%% test utf8 encoding
-encoder_utf8_test() ->
-    %% safe conversion case (default)
-    [34,"\\u0001","\\u0442","\\u0435","\\u0441","\\u0442",34] =
-        encode(<<1,"\321\202\320\265\321\201\321\202">>),
-
-    %% raw utf8 output (optional)
-    Enc = mochijson2:encoder([{utf8, true}]),
-    [34,"\\u0001",[209,130],[208,181],[209,129],[209,130],34] =
-        Enc(<<1,"\321\202\320\265\321\201\321\202">>).
-
-input_validation_test() ->
-    Good = [
-        {16#00A3, <<?Q, 16#C2, 16#A3, ?Q>>}, %% pound
-        {16#20AC, <<?Q, 16#E2, 16#82, 16#AC, ?Q>>}, %% euro
-        {16#10196, <<?Q, 16#F0, 16#90, 16#86, 16#96, ?Q>>} %% denarius
-    ],
-    lists:foreach(fun({CodePoint, UTF8}) ->
-        Expect = list_to_binary(xmerl_ucs:to_utf8(CodePoint)),
-        Expect = decode(UTF8)
-    end, Good),
-
-    Bad = [
-        %% 2nd, 3rd, or 4th byte of a multi-byte sequence w/o leading byte
-        <<?Q, 16#80, ?Q>>,
-        %% missing continuations, last byte in each should be 80-BF
-        <<?Q, 16#C2, 16#7F, ?Q>>,
-        <<?Q, 16#E0, 16#80,16#7F, ?Q>>,
-        <<?Q, 16#F0, 16#80, 16#80, 16#7F, ?Q>>,
-        %% we don't support code points > 10FFFF per RFC 3629
-        <<?Q, 16#F5, 16#80, 16#80, 16#80, ?Q>>,
-        %% escape characters trigger a different code path
-        <<?Q, $\\, $\n, 16#80, ?Q>>
-    ],
-    lists:foreach(
-      fun(X) ->
-              ok = try decode(X) catch invalid_utf8 -> ok end,
-              %% could be {ucs,{bad_utf8_character_code}} or
-              %%          {json_encode,{bad_char,_}}
-              {'EXIT', _} = (catch encode(X))
-      end, Bad).
-
-inline_json_test() ->
-    ?assertEqual(<<"\"iodata iodata\"">>,
-                 iolist_to_binary(
-                   encode({json, [<<"\"iodata">>, " iodata\""]}))),
-    ?assertEqual({struct, [{<<"key">>, <<"iodata iodata">>}]},
-                 decode(
-                   encode({struct,
-                           [{key, {json, [<<"\"iodata">>, " iodata\""]}}]}))),
-    ok.
-
-big_unicode_test() ->
-    UTF8Seq = list_to_binary(xmerl_ucs:to_utf8(16#0001d120)),
-    ?assertEqual(
-       <<"\"\\ud834\\udd20\"">>,
-       iolist_to_binary(encode(UTF8Seq))),
-    ?assertEqual(
-       UTF8Seq,
-       decode(iolist_to_binary(encode(UTF8Seq)))),
-    ok.
-
-custom_decoder_test() ->
-    ?assertEqual(
-       {struct, [{<<"key">>, <<"value">>}]},
-       (decoder([]))("{\"key\": \"value\"}")),
-    F = fun ({struct, [{<<"key">>, <<"value">>}]}) -> win end,
-    ?assertEqual(
-       win,
-       (decoder([{object_hook, F}]))("{\"key\": \"value\"}")),
-    ok.
-
-atom_test() ->
-    %% JSON native atoms
-    [begin
-         ?assertEqual(A, decode(atom_to_list(A))),
-         ?assertEqual(iolist_to_binary(atom_to_list(A)),
-                      iolist_to_binary(encode(A)))
-     end || A <- [true, false, null]],
-    %% Atom to string
-    ?assertEqual(
-       <<"\"foo\"">>,
-       iolist_to_binary(encode(foo))),
-    ?assertEqual(
-       <<"\"\\ud834\\udd20\"">>,
-       iolist_to_binary(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))),
-    ok.
-
-key_encode_test() ->
-    %% Some forms are accepted as keys that would not be strings in other
-    %% cases
-    ?assertEqual(
-       <<"{\"foo\":1}">>,
-       iolist_to_binary(encode({struct, [{foo, 1}]}))),
-    ?assertEqual(
-       <<"{\"foo\":1}">>,
-       iolist_to_binary(encode({struct, [{<<"foo">>, 1}]}))),
-    ?assertEqual(
-       <<"{\"foo\":1}">>,
-       iolist_to_binary(encode({struct, [{"foo", 1}]}))),
-	?assertEqual(
-       <<"{\"foo\":1}">>,
-       iolist_to_binary(encode([{foo, 1}]))),
-    ?assertEqual(
-       <<"{\"foo\":1}">>,
-       iolist_to_binary(encode([{<<"foo">>, 1}]))),
-    ?assertEqual(
-       <<"{\"foo\":1}">>,
-       iolist_to_binary(encode([{"foo", 1}]))),
-    ?assertEqual(
-       <<"{\"\\ud834\\udd20\":1}">>,
-       iolist_to_binary(
-         encode({struct, [{[16#0001d120], 1}]}))),
-    ?assertEqual(
-       <<"{\"1\":1}">>,
-       iolist_to_binary(encode({struct, [{1, 1}]}))),
-    ok.
-
-unsafe_chars_test() ->
-    Chars = "\"\\\b\f\n\r\t",
-    [begin
-         ?assertEqual(false, json_string_is_safe([C])),
-         ?assertEqual(false, json_bin_is_safe(<<C>>)),
-         ?assertEqual(<<C>>, decode(encode(<<C>>)))
-     end || C <- Chars],
-    ?assertEqual(
-       false,
-       json_string_is_safe([16#0001d120])),
-    ?assertEqual(
-       false,
-       json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8(16#0001d120)))),
-    ?assertEqual(
-       [16#0001d120],
-       xmerl_ucs:from_utf8(
-         binary_to_list(
-           decode(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))))),
-    ?assertEqual(
-       false,
-       json_string_is_safe([16#110000])),
-    ?assertEqual(
-       false,
-       json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8([16#110000])))),
-    %% solidus can be escaped but isn't unsafe by default
-    ?assertEqual(
-       <<"/">>,
-       decode(<<"\"\\/\"">>)),
-    ok.
-
-int_test() ->
-    ?assertEqual(0, decode("0")),
-    ?assertEqual(1, decode("1")),
-    ?assertEqual(11, decode("11")),
-    ok.
-
-large_int_test() ->
-    ?assertEqual(<<"-2147483649214748364921474836492147483649">>,
-        iolist_to_binary(encode(-2147483649214748364921474836492147483649))),
-    ?assertEqual(<<"2147483649214748364921474836492147483649">>,
-        iolist_to_binary(encode(2147483649214748364921474836492147483649))),
-    ok.
-
-float_test() ->
-    ?assertEqual(<<"-2147483649.0">>, iolist_to_binary(encode(-2147483649.0))),
-    ?assertEqual(<<"2147483648.0">>, iolist_to_binary(encode(2147483648.0))),
-    ok.
-
-handler_test() ->
-    ?assertEqual(
-       {'EXIT',{json_encode,{bad_term,{}}}},
-       catch encode({})),
-    F = fun ({}) -> [] end,
-    ?assertEqual(
-       <<"[]">>,
-       iolist_to_binary((encoder([{handler, F}]))({}))),
-    ok.
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochilists.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochilists.erl b/src/mochiweb/src/mochilists.erl
deleted file mode 100644
index 8981e7b..0000000
--- a/src/mochiweb/src/mochilists.erl
+++ /dev/null
@@ -1,104 +0,0 @@
-%% @copyright Copyright (c) 2010 Mochi Media, Inc.
-%% @author David Reid <dr...@mochimedia.com>
-
-%% @doc Utility functions for dealing with proplists.
-
--module(mochilists).
--author("David Reid <dr...@mochimedia.com>").
--export([get_value/2, get_value/3, is_defined/2, set_default/2, set_defaults/2]).
-
-%% @spec set_default({Key::term(), Value::term()}, Proplist::list()) -> list()
-%%
-%% @doc Return new Proplist with {Key, Value} set if not is_defined(Key, Proplist).
-set_default({Key, Value}, Proplist) ->
-    case is_defined(Key, Proplist) of
-        true ->
-            Proplist;
-        false ->
-            [{Key, Value} | Proplist]
-    end.
-
-%% @spec set_defaults([{Key::term(), Value::term()}], Proplist::list()) -> list()
-%%
-%% @doc Return new Proplist with {Key, Value} set if not is_defined(Key, Proplist).
-set_defaults(DefaultProps, Proplist) ->
-    lists:foldl(fun set_default/2, Proplist, DefaultProps).
-
-
-%% @spec is_defined(Key::term(), Proplist::list()) -> bool()
-%%
-%% @doc Returns true if Propist contains at least one entry associated
-%%      with Key, otherwise false is returned.
-is_defined(Key, Proplist) ->
-    lists:keyfind(Key, 1, Proplist) =/= false.
-
-
-%% @spec get_value(Key::term(), Proplist::list()) -> term() | undefined
-%%
-%% @doc Return the value of <code>Key</code> or undefined
-get_value(Key, Proplist) ->
-    get_value(Key, Proplist, undefined).
-
-%% @spec get_value(Key::term(), Proplist::list(), Default::term()) -> term()
-%%
-%% @doc Return the value of <code>Key</code> or <code>Default</code>
-get_value(_Key, [], Default) ->
-    Default;
-get_value(Key, Proplist, Default) ->
-    case lists:keyfind(Key, 1, Proplist) of
-        false ->
-            Default;
-        {Key, Value} ->
-            Value
-    end.
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-set_defaults_test() ->
-    ?assertEqual(
-       [{k, v}],
-       set_defaults([{k, v}], [])),
-    ?assertEqual(
-       [{k, v}],
-       set_defaults([{k, vee}], [{k, v}])),
-    ?assertEqual(
-       lists:sort([{kay, vee}, {k, v}]),
-       lists:sort(set_defaults([{k, vee}, {kay, vee}], [{k, v}]))),
-    ok.
-
-set_default_test() ->
-    ?assertEqual(
-       [{k, v}],
-       set_default({k, v}, [])),
-    ?assertEqual(
-       [{k, v}],
-       set_default({k, vee}, [{k, v}])),
-    ok.
-
-get_value_test() ->
-    ?assertEqual(
-       undefined,
-       get_value(foo, [])),
-    ?assertEqual(
-       undefined,
-       get_value(foo, [{bar, baz}])),
-    ?assertEqual(
-       bar,
-       get_value(foo, [{foo, bar}])),
-    ?assertEqual(
-       default,
-       get_value(foo, [], default)),
-    ?assertEqual(
-       default,
-       get_value(foo, [{bar, baz}], default)),
-    ?assertEqual(
-       bar,
-       get_value(foo, [{foo, bar}], default)),
-    ok.
-
--endif.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochilogfile2.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochilogfile2.erl b/src/mochiweb/src/mochilogfile2.erl
deleted file mode 100644
index c34ee73..0000000
--- a/src/mochiweb/src/mochilogfile2.erl
+++ /dev/null
@@ -1,140 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2010 Mochi Media, Inc.
-
-%% @doc Write newline delimited log files, ensuring that if a truncated
-%%      entry is found on log open then it is fixed before writing. Uses
-%%      delayed writes and raw files for performance.
--module(mochilogfile2).
--author('bob@mochimedia.com').
-
--export([open/1, write/2, close/1, name/1]).
-
-%% @spec open(Name) -> Handle
-%% @doc Open the log file Name, creating or appending as necessary. All data
-%%      at the end of the file will be truncated until a newline is found, to
-%%      ensure that all records are complete.
-open(Name) ->
-    {ok, FD} = file:open(Name, [raw, read, write, delayed_write, binary]),
-    fix_log(FD),
-    {?MODULE, Name, FD}.
-
-%% @spec name(Handle) -> string()
-%% @doc Return the path of the log file.
-name({?MODULE, Name, _FD}) ->
-    Name.
-
-%% @spec write(Handle, IoData) -> ok
-%% @doc Write IoData to the log file referenced by Handle.
-write({?MODULE, _Name, FD}, IoData) ->
-    ok = file:write(FD, [IoData, $\n]),
-    ok.
-
-%% @spec close(Handle) -> ok
-%% @doc Close the log file referenced by Handle.
-close({?MODULE, _Name, FD}) ->
-    ok = file:sync(FD),
-    ok = file:close(FD),
-    ok.
-
-fix_log(FD) ->
-    {ok, Location} = file:position(FD, eof),
-    Seek = find_last_newline(FD, Location),
-    {ok, Seek} = file:position(FD, Seek),
-    ok = file:truncate(FD),
-    ok.
-
-%% Seek backwards to the last valid log entry
-find_last_newline(_FD, N) when N =< 1 ->
-    0;
-find_last_newline(FD, Location) ->
-    case file:pread(FD, Location - 1, 1) of
-	{ok, <<$\n>>} ->
-            Location;
-	{ok, _} ->
-	    find_last_newline(FD, Location - 1)
-    end.
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-name_test() ->
-    D = mochitemp:mkdtemp(),
-    FileName = filename:join(D, "open_close_test.log"),
-    H = open(FileName),
-    ?assertEqual(
-       FileName,
-       name(H)),
-    close(H),
-    file:delete(FileName),
-    file:del_dir(D),
-    ok.
-
-open_close_test() ->
-    D = mochitemp:mkdtemp(),
-    FileName = filename:join(D, "open_close_test.log"),
-    OpenClose = fun () ->
-                        H = open(FileName),
-                        ?assertEqual(
-                           true,
-                           filelib:is_file(FileName)),
-                        ok = close(H),
-                        ?assertEqual(
-                           {ok, <<>>},
-                           file:read_file(FileName)),
-                        ok
-                end,
-    OpenClose(),
-    OpenClose(),
-    file:delete(FileName),
-    file:del_dir(D),
-    ok.
-
-write_test() ->
-    D = mochitemp:mkdtemp(),
-    FileName = filename:join(D, "write_test.log"),
-    F = fun () ->
-                H = open(FileName),
-                write(H, "test line"),
-                close(H),
-                ok
-        end,
-    F(),
-    ?assertEqual(
-       {ok, <<"test line\n">>},
-       file:read_file(FileName)),
-    F(),
-    ?assertEqual(
-       {ok, <<"test line\ntest line\n">>},
-       file:read_file(FileName)),
-    file:delete(FileName),
-    file:del_dir(D),
-    ok.
-
-fix_log_test() ->
-    D = mochitemp:mkdtemp(),
-    FileName = filename:join(D, "write_test.log"),
-    file:write_file(FileName, <<"first line good\nsecond line bad">>),
-    F = fun () ->
-                H = open(FileName),
-                write(H, "test line"),
-                close(H),
-                ok
-        end,
-    F(),
-    ?assertEqual(
-       {ok, <<"first line good\ntest line\n">>},
-       file:read_file(FileName)),
-    file:write_file(FileName, <<"first line bad">>),
-    F(),
-    ?assertEqual(
-       {ok, <<"test line\n">>},
-       file:read_file(FileName)),
-    F(),
-    ?assertEqual(
-       {ok, <<"test line\ntest line\n">>},
-       file:read_file(FileName)),
-    ok.
-
--endif.


[44/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Import JavaScript test runner changes

This is the basis for some temporary work to be able to run the
JavaScript test suite as during the merge transition.


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/03ea534c
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/03ea534c
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/03ea534c

Branch: refs/heads/1843-feature-bigcouch
Commit: 03ea534c0366164055560cc93d8fc32ceb27bf1f
Parents: a204d0a
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 21:47:09 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Wed Feb 5 08:34:52 2014 -0600

----------------------------------------------------------------------
 test/javascript/cli_runner.js |  85 +++++++-----------------
 test/javascript/run.tpl       | 133 ++++++++++++++++++++++++++-----------
 test/javascript/test_setup.js |  89 +++++++++++++++++++++++++
 3 files changed, 205 insertions(+), 102 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/03ea534c/test/javascript/cli_runner.js
----------------------------------------------------------------------
diff --git a/test/javascript/cli_runner.js b/test/javascript/cli_runner.js
index 5d82a51..e8ebd2e 100644
--- a/test/javascript/cli_runner.js
+++ b/test/javascript/cli_runner.js
@@ -9,78 +9,39 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+//
 
-var console = {
-  log: function(arg) {
-    var msg = (arg.toString()).replace(/\n/g, "\n    ");
-    print(msg, true);
-  }
-};
+/*
+ * Futon test suite was designed to be able to run all tests populated into
+ * couchTests. Here we should only be loading one test, so we'll pop the first
+ * test off the list and run the test. If more than one item is loaded in the
+ * test object, return an error.
+ */
+function runTest() {
+  var count = 0;
+  var start = new Date().getTime();
 
-var fmtStack = function(stack) {
-  if(!stack) {
-    console.log("No stack information");
-    return;
-  }
-  console.log("Trace back (most recent call first):\n");
-  var re = new RegExp("(.*?)@([^:]*):(.*)$");
-  var lines = stack.split("\n");
-  for(var i = 0; i < lines.length; i++) {
-    var line = lines[i];
-    if(!line.length) continue;
-    var match = re.exec(line);
-    if(!match) continue
-    var source = match[1].substr(0, 70);
-    var file = match[2];
-    var lnum = match[3];
-    while(lnum.length < 3) lnum = " " + lnum;
-    console.log(" " + lnum + ": " + file);
-    console.log("      " + source);
+  for(var name in couchTests) {
+      count++;
   }
-}
-
 
-function T(arg1, arg2) {
-  if(!arg1) {
-    var result = (arg2 ? arg2 : arg1);
-    throw((result instanceof Error ? result : Error(result)));
+  if (count !== 1) {
+      console.log('Only one test per file is allowed.');
+      quit(1);
   }
-}
 
-function runTestConsole(num, name, func) {
-  CouchDB.reloadConfig();
-  var passed = false;
   try {
-    func();
-    passed = true;
-    print("ok " + num + " " + name);
+    // Add artificial wait for each test of 1 sec
+    while (new Date().getTime() < start + 1200);
+    couchTests[name]();
+    print('OK');
   } catch(e) {
-    print("not ok " + num + " " + name);
-    console.log("Reason: " + e.message);
+    console.log("FAIL\nReason: " + e.message);
     fmtStack(e.stack);
+    quit(1);
   }
-  return passed;
 }
 
-function runAllTestsConsole() {
-  var numTests = 0;
-  var numPassed = 0;
-  for(var t in couchTests) { numTests += 1; }
-  print("1.." + numTests);
-  var testId = 0;
-  for(var t in couchTests) {
-    testId += 1;
-    if(runTestConsole(testId, t, couchTests[t])) {
-      numPassed++;
-    }
-  }
-  if(numPassed != numTests) {
-    console.log("Test failures: " + (numTests - numPassed));
-    quit(1);
-  } else {
-    console.log("All tests passed");
-  }
-};
+waitForSuccess(CouchDB.isRunning, 'isRunning');
 
-waitForSuccess(CouchDB.getVersion);
-runAllTestsConsole();
+runTest();

http://git-wip-us.apache.org/repos/asf/couchdb/blob/03ea534c/test/javascript/run.tpl
----------------------------------------------------------------------
diff --git a/test/javascript/run.tpl b/test/javascript/run.tpl
index 141e9e8..75192da 100644
--- a/test/javascript/run.tpl
+++ b/test/javascript/run.tpl
@@ -17,7 +17,7 @@ BUILD_DIR=%abs_top_builddir%
 SCRIPT_DIR=$SRC_DIR/share/www/script
 JS_TEST_DIR=$SRC_DIR/test/javascript
 
-COUCHJS=%abs_top_builddir%/src/couch/priv/couchjs
+COUCHJS=%abs_top_builddir%/src/couchdb/priv/couchjs
 COUCH_URI_FILE=%localstaterundir%/couch.uri
 
 # make check-js calls us with MAKE=$(MAKE) so BSDish `gmake` invocations
@@ -27,59 +27,112 @@ if [ -z "$MAKE" ]; then
     MAKE=make
 fi
 
-if [ "$#" -eq 0 ];
-then
-    TEST_SRC="$SCRIPT_DIR/test/*.js"
-else
-    TEST_SRC="$1"
-    if [ ! -f $TEST_SRC ]; then
-        TEST_SRC="$SCRIPT_DIR/test/$1"
-        if [ ! -f $TEST_SRC ]; then
-            TEST_SRC="$SCRIPT_DIR/test/$1.js"
-            if [ ! -f $TEST_SRC ]; then
-                echo "file $1 does not exist"
-                exit 1
-            fi
-        fi
-    fi
-fi
+trap 'abort' EXIT INT
+
+start() {
+	./utils/run -b -r 0 -n \
+		-a $BUILD_DIR/etc/couchdb/default_dev.ini \
+		-a $SRC_DIR/test/random_port.ini \
+		-a $BUILD_DIR/etc/couchdb/local_dev.ini 1>/dev/null
+}
+
+stop() {
+    ./utils/run -d 1>/dev/null
+}
+
+restart() {
+    stop
+    start
+}
 
-# stop CouchDB on exit from various signals
 abort() {
     trap - 0
-    ./utils/run -d
+    stop
     exit 2
 }
 
+process_response() {
+    while read data
+    do
+        if [ $data = 'restart' ];
+        then
+            if [ -z $COUCHDB_NO_START ]; then
+                restart
+            fi
+        else
+            echo "$data"
+        fi
+    done
+}
+
+run() {
+    # start the tests
+    /bin/echo -n "$1 ... "
+    $COUCHJS -H -u $COUCH_URI_FILE \
+        $SCRIPT_DIR/json2.js \
+        $SCRIPT_DIR/sha1.js \
+        $SCRIPT_DIR/oauth.js \
+        $SCRIPT_DIR/couch.js \
+        $SCRIPT_DIR/replicator_db_inc.js \
+        $SCRIPT_DIR/couch_test_runner.js \
+        $JS_TEST_DIR/couch_http.js \
+        $JS_TEST_DIR/test_setup.js \
+        $1 \
+        $JS_TEST_DIR/cli_runner.js | process_response
+
+    if [ -z $RESULT ]; then
+        RESULT=$?
+    elif [ "$?" -eq 1 ]; then
+        RESULT=$?
+    fi
+
+}
+
+run_files() {
+    COUNTER=1
+    FILE_COUNT=$(ls -l $1 | wc -l)
+    FILE_COUNT=$(expr $FILE_COUNT + 0)
+    for TEST_SRC in $1
+    do
+        /bin/echo -n "$COUNTER/$FILE_COUNT "
+        COUNTER=$(expr $COUNTER + 1)
+        run $TEST_SRC
+    done
+}
+
 # start CouchDB
 if [ -z $COUCHDB_NO_START ]; then
     $MAKE dev
-    trap 'abort' EXIT INT
-	./utils/run -b -r 1 -n \
-		-a $BUILD_DIR/etc/couchdb/default_dev.ini \
-		-a $SRC_DIR/test/random_port.ini \
-		-a $BUILD_DIR/etc/couchdb/local_dev.ini
-	sleep 1 # give it a sec
+    start
 fi
 
-# start the tests
-$COUCHJS -H -u $COUCH_URI_FILE \
-	$SCRIPT_DIR/json2.js \
-	$SCRIPT_DIR/sha1.js \
-	$SCRIPT_DIR/oauth.js \
-	$SCRIPT_DIR/couch.js \
-	$SCRIPT_DIR/couch_test_runner.js \
-	$SCRIPT_DIR/couch_tests.js \
-	$TEST_SRC \
-	$JS_TEST_DIR/couch_http.js \
-	$JS_TEST_DIR/cli_runner.js
+echo "Running javascript tests ..."
 
-RESULT=$?
+if [ "$#" -eq 0 ];
+then
+    run_files "$SCRIPT_DIR/test/*.js"
+else
+    if [ -d $1 ]; then
+        run_files "$1/*.js"
+    else
+        TEST_SRC="$1"
+        if [ ! -f $TEST_SRC ]; then
+            TEST_SRC="$SCRIPT_DIR/test/$1"
+            if [ ! -f $TEST_SRC ]; then
+                TEST_SRC="$SCRIPT_DIR/test/$1.js"
+                if [ ! -f $TEST_SRC ]; then
+                    echo "file $1 does not exist"
+                    exit 1
+                fi
+            fi
+        fi
+    fi
+    run $TEST_SRC
+fi
 
 if [ -z $COUCHDB_NO_START ]; then
-    # stop CouchDB
-    ./utils/run -d
-    trap - 0
+    stop
 fi
 
+trap - 0
 exit $RESULT

http://git-wip-us.apache.org/repos/asf/couchdb/blob/03ea534c/test/javascript/test_setup.js
----------------------------------------------------------------------
diff --git a/test/javascript/test_setup.js b/test/javascript/test_setup.js
new file mode 100644
index 0000000..9347455
--- /dev/null
+++ b/test/javascript/test_setup.js
@@ -0,0 +1,89 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+/*
+ * Add global couchTests object required for existing tests.
+ */
+var couchTests = {}; 
+
+var console = { 
+  log: function(arg) {
+    var msg = (arg.toString()).replace(/\n/g, "\n    ");
+    print(msg, true);
+  }
+};
+
+var fmtStack = function(stack) {
+  if(!stack) {
+    console.log("No stack information");
+    return;
+  }
+  console.log("Trace back (most recent call first):\n");
+  var re = new RegExp("(.*?)@([^:]*):(.*)$");
+  var lines = stack.split("\n");
+  for(var i = 0; i < lines.length; i++) {
+    var line = lines[i];
+    if(!line.length) continue;
+    var match = re.exec(line);
+    if(!match) continue
+    var match = re.exec(line);
+    if(!match) continue
+    var source = match[1].substr(0, 70);
+    var file = match[2];
+    var lnum = match[3];
+    while(lnum.length < 3) lnum = " " + lnum;
+    console.log(" " + lnum + ": " + file);
+    console.log("      " + source);
+  }
+} 
+
+function T(arg1, arg2) {
+  if(!arg1) {
+    var result = (arg2 ? arg2 : arg1);
+    throw((result instanceof Error ? result : Error(result)));
+  }
+} 
+
+function waitForSuccess(fun, tag) {
+  var start = new Date().getTime();
+  var complete = false;
+  
+  while (!complete) {
+    var now = new Date().getTime();
+    if (now > start + 5000) {
+      complete = true;
+      print('FAIL');
+      print(tag);
+      quit(1);
+    }
+    try {
+      while (new Date().getTime() < now + 500);
+      complete = fun();
+    } catch (e) {}
+  }
+}
+
+function restartServer() {
+  print('restart');
+  var start = new Date().getTime();
+  while (new Date().getTime() < start + 1000);
+  waitForSuccess(CouchDB.isRunning, 'restart');
+}
+
+/*
+ * If last_req is an object, we got something back. This might be an error, but
+ * CouchDB is up and running!
+ */
+CouchDB.isRunning = function() {
+  CouchDB.last_req = CouchDB.request("GET", "/");
+  return typeof CouchDB.last_req == 'object';
+};


[10/49] Remove src/couch

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_emsort.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_emsort.erl b/src/couch/src/couch_emsort.erl
deleted file mode 100644
index 2a25a23..0000000
--- a/src/couch/src/couch_emsort.erl
+++ /dev/null
@@ -1,318 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_emsort).
-
-% This is an implementation of an external N-way merge sort. It's primary
-% purpose is to be used during database compaction as an optimization for
-% managing the docid btree.
-%
-% Trunk currently writes the docid btree as its compacting the database but
-% this is quite inneficient as its written out of order in the general case
-% as writes are ordered by update_seq.
-%
-% The general design of this module is a very standard merge sort with one
-% caveat due to append only files. This is described in more detail in the
-% sorting phase.
-%
-% The basic algorithm is in two halves. The first half stores KV pairs to disk
-% which is then followed by the actual sorting phase that streams KV's back
-% to the client using a fold-like function. After some basic definitions we'll
-% describe both phases.
-%
-% Key/Value apairs (aka, KV pairs, or KVs) are simply lists of two-tuples with
-% a key as the first element and an arbitrary value as the second. The key of
-% this pair is what used to determine the sort order based on native Erlang
-% term comparison.
-%
-% Internally, KVs are stored as lists with a max size defined by
-% #ems.chain_chunk. These lists are then chained together on disk using disk
-% offsets as a poor man's linked list. The basic format of a list looks like
-% {KVs, DiskOffset} where DiskOffset is either the atom nil which means "end
-% of the list" or an integer that is a file position offset that is the
-% location of another {KVs, DiskOffset} term. The head of each list is
-% referred to with a single DiskOffset. The set of terms that extend from
-% this initial DiskOffset to the last {KVs, nil} term is referred to in the
-% code as a chain. Two important facts are that one call to couch_emsort:add/2
-% creates a single chain, and that a chain is always sorted on disk (though its
-% possible to be sorted in descending order which will be discussed later).
-%
-% The second major internal structure is the back bone. This is a list of
-% chains that has a quite similar structure to chains but contains different
-% data types and has no guarantee on ordering. The back bone is merely the
-% list of all head DiskOffsets. The structure has the similar structure of
-% {DiskOffsets, DiskOffset} that we use for chains, except that DiskOffsets is
-% a list of integers that refer to the heads of chains. The maximum size of
-% DiskOffsets is defined by #ems.bb_chunk. It is important to note that the
-% backbone has no defined ordering. The other thing of note is that the RAM
-% bounds are loosely defined as:
-%
-%     #ems.bb_chunk * #ems.chain_chunk * avg_size(KV).
-%
-% Build Phase
-% -----------
-%
-% As mentioned, each call to couch_emsort:add/2 creates a chain from the
-% list of KVs that are passed in. This list is first sorted and then the
-% chain is created by foldr-ing (note: r) across the list to build the
-% chain on disk. It is important to note that the final chain is then
-% sorted in ascending order on disk.
-%
-%
-% Sort Phase
-% ----------
-%
-% The sort phase is where the merge sort kicks in. This is generally your
-% average merge sort with a caveat for append only storage. First the
-% general outline.
-%
-% The general outline for this sort is that it iteratively merges chains
-% in the backbone until less than #ems.bb_chunk chains exist. At this
-% point it switches to the last merge sort phase where it just streams
-% the sorted KVs back to the client using a fold function.
-%
-% The general chain merging is a pretty standard merge sort. You load up
-% the initial KVs from each phase, pick the next one in sort order and
-% then when you run out of KVs you're left with a single DiskOffset for
-% the head of a single chain that represents the merge. These new
-% DiskOffsets are used to build the new back bone.
-%
-% The one caveat here is that we're using append only storage. This is
-% important because once we make a pass we've effectively reversed the
-% sort order of each chain. Ie, the first merge results in chains that
-% are ordered in descending order. Since, one pass reverses the list
-% the trick is that each phase does two passes. The first phase picks
-% the smallest KV to write next and the second phase picks the largest.
-% In this manner each time we do a back bone merge we end up with chains
-% that are always sorted in an ascending order.
-%
-% The one downfall is that in the interest of simplicity the sorting is
-% restricted to Erlang's native term sorting. A possible extension would
-% be to allow two comparison functions to be used, but this module is
-% currently only used for docid sorting which is hardcoded to be raw
-% Erlang ordering.
-%
-% Diagram
-% -------
-%
-% If it helps, this is a general diagram of the internal structures. A
-% couple points to note since this is ASCII art. The BB pointers across
-% the top are lists of chains going down. Each BBN item is one of the
-% {DiskOffsets, DiskOffset} structures discussed earlier. Going down,
-% the CMN nodes are actually representing #ems.bb_chunk chains in parallel
-% going off the back bone. It is important and not represented in this
-% diagram that within these groups the chains don't have to be the same
-% length. That's just a limitiationg of my ASCII artistic abilities.
-%
-% The BBN* node is marked with a * to denote that it is the only state
-% that we store when writing headeres to disk as it has pointers that
-% lead us to all data in the tree.
-%
-%     BB1 <- BB2 <- BB3 <- BBN*
-%      |      |      |      |
-%      v      v      v      v
-%     CA1    CB1    CC1    CD1
-%      |             |      |
-%      v             v      v
-%     CA2           CC2    CD2
-%      |                    |
-%      v                    v
-%     CA3                  CD3
-%
-
--export([open/1, open/2, get_fd/1, get_state/1]).
--export([add/2, merge/1, sort/1, iter/1, next/1]).
-
-
--record(ems, {
-    fd,
-    root,
-    bb_chunk = 10,
-    chain_chunk = 100
-}).
-
-
-open(Fd) ->
-    {ok, #ems{fd=Fd}}.
-
-
-open(Fd, Options) ->
-    {ok, set_options(#ems{fd=Fd}, Options)}.
-
-
-set_options(Ems, []) ->
-    Ems;
-set_options(Ems, [{root, Root} | Rest]) ->
-    set_options(Ems#ems{root=Root}, Rest);
-set_options(Ems, [{chain_chunk, Count} | Rest]) when is_integer(Count) ->
-    set_options(Ems#ems{chain_chunk=Count}, Rest);
-set_options(Ems, [{back_bone_chunk, Count} | Rest]) when is_integer(Count) ->
-    set_options(Ems#ems{bb_chunk=Count}, Rest).
-
-
-get_fd(#ems{fd=Fd}) ->
-    Fd.
-
-
-get_state(#ems{root=Root}) ->
-    Root.
-
-
-add(Ems, []) ->
-    {ok, Ems};
-add(Ems, KVs) ->
-    Pos = write_kvs(Ems, KVs),
-    {ok, add_bb_pos(Ems, Pos)}.
-
-
-sort(#ems{}=Ems) ->
-    {ok, Ems1} = merge(Ems),
-    iter(Ems1).
-
-
-merge(#ems{root=undefined}=Ems) ->
-    {ok, Ems};
-merge(#ems{}=Ems) ->
-    {ok, decimate(Ems)}.
-
-
-iter(#ems{root=undefined}=Ems) ->
-    {ok, {Ems, []}};
-iter(#ems{root={BB, nil}}=Ems) ->
-    Chains = init_chains(Ems, small, BB),
-    {ok, {Ems, Chains}};
-iter(#ems{root={_, _}}) ->
-    {error, not_merged}.
-
-
-next({_Ems, []}) ->
-    finished;
-next({Ems, Chains}) ->
-    {KV, RestChains} = choose_kv(small, Ems, Chains),
-    {ok, KV, {Ems, RestChains}}.
-
-
-add_bb_pos(#ems{root=undefined}=Ems, Pos) ->
-    Ems#ems{root={[Pos], nil}};
-add_bb_pos(#ems{root={BB, Prev}}=Ems, Pos) ->
-    {NewBB, NewPrev} = append_item(Ems, {BB, Prev}, Pos, Ems#ems.bb_chunk),
-    Ems#ems{root={NewBB, NewPrev}}.
-
-
-write_kvs(Ems, KVs) ->
-    % Write the list of KV's to disk in sorted order in chunks
-    % of 100. Also make sure that the order is so that they
-    % can be streamed in asscending order.
-    {LastKVs, LastPos} =
-    lists:foldr(fun(KV, Acc) ->
-        append_item(Ems, Acc, KV, Ems#ems.chain_chunk)
-    end, {[], nil}, lists:sort(KVs)),
-    {ok, Final, _} = couch_file:append_term(Ems#ems.fd, {LastKVs, LastPos}),
-    Final.
-
-
-decimate(#ems{root={_BB, nil}}=Ems) ->
-    % We have less than bb_chunk backbone pointers so we're
-    % good to start streaming KV's back to the client.
-    Ems;
-decimate(#ems{root={BB, NextBB}}=Ems) ->
-    % To make sure we have a bounded amount of data in RAM
-    % at any given point we first need to decimate the data
-    % by performing the first couple iterations of a merge
-    % sort writing the intermediate results back to disk.
-
-    % The first pass gives us a sort with pointers linked from
-    % largest to smallest.
-    {RevBB, RevNextBB} = merge_back_bone(Ems, small, BB, NextBB),
-
-    % We have to run a second pass so that links are pointed
-    % back from smallest to largest.
-    {FwdBB, FwdNextBB} = merge_back_bone(Ems, big, RevBB, RevNextBB),
-
-    % Continue deicmating until we have an acceptable bound on
-    % the number of keys to use.
-    decimate(Ems#ems{root={FwdBB, FwdNextBB}}).
-
-
-merge_back_bone(Ems, Choose, BB, NextBB) ->
-    BBPos = merge_chains(Ems, Choose, BB),
-    merge_rest_back_bone(Ems, Choose, NextBB, {[BBPos], nil}).
-
-
-merge_rest_back_bone(_Ems, _Choose, nil, Acc) ->
-    Acc;
-merge_rest_back_bone(Ems, Choose, BBPos, Acc) ->
-    {ok, {BB, NextBB}} = couch_file:pread_term(Ems#ems.fd, BBPos),
-    NewPos = merge_chains(Ems, Choose, BB),
-    {NewBB, NewPrev} = append_item(Ems, Acc, NewPos, Ems#ems.bb_chunk),
-    merge_rest_back_bone(Ems, Choose, NextBB, {NewBB, NewPrev}).
-
-
-merge_chains(Ems, Choose, BB) ->
-    Chains = init_chains(Ems, Choose, BB),
-    merge_chains(Ems, Choose, Chains, {[], nil}).
-
-
-merge_chains(Ems, _Choose, [], ChainAcc) ->
-    {ok, CPos, _} = couch_file:append_term(Ems#ems.fd, ChainAcc),
-    CPos;
-merge_chains(#ems{chain_chunk=CC}=Ems, Choose, Chains, Acc) ->
-    {KV, RestChains} = choose_kv(Choose, Ems, Chains),
-    {NewKVs, NewPrev} = append_item(Ems, Acc, KV, CC),
-    merge_chains(Ems, Choose, RestChains, {NewKVs, NewPrev}).
-
-
-init_chains(Ems, Choose, BB) ->
-    Chains = lists:map(fun(CPos) ->
-        {ok, {KVs, NextKVs}} = couch_file:pread_term(Ems#ems.fd, CPos),
-        {KVs, NextKVs}
-    end, BB),
-    order_chains(Choose, Chains).
-
-
-order_chains(small, Chains) -> lists:sort(Chains);
-order_chains(big, Chains) -> lists:reverse(lists:sort(Chains)).
-
-
-choose_kv(_Choose, _Ems, [{[KV], nil} | Rest]) ->
-    {KV, Rest};
-choose_kv(Choose, Ems, [{[KV], Pos} | RestChains]) ->
-    {ok, Chain} = couch_file:pread_term(Ems#ems.fd, Pos),
-    case Choose of
-        small -> {KV, ins_small_chain(RestChains, Chain, [])};
-        big -> {KV, ins_big_chain(RestChains, Chain, [])}
-    end;
-choose_kv(Choose, _Ems, [{[KV | RestKVs], Prev} | RestChains]) ->
-    case Choose of
-        small -> {KV, ins_small_chain(RestChains, {RestKVs, Prev}, [])};
-        big -> {KV, ins_big_chain(RestChains, {RestKVs, Prev}, [])}
-    end.
-
-
-ins_small_chain([{[{K1,_}|_],_}=C1|Rest], {[{K2,_}|_],_}=C2, Acc) when K1<K2 ->
-    ins_small_chain(Rest, C2, [C1 | Acc]);
-ins_small_chain(Rest, Chain, Acc) ->
-    lists:reverse(Acc, [Chain | Rest]).
-
-
-ins_big_chain([{[{K1,_}|_],_}=C1|Rest], {[{K2,_}|_],_}=C2, Acc) when K1>K2 ->
-    ins_big_chain(Rest, C2, [C1 | Acc]);
-ins_big_chain(Rest, Chain, Acc) ->
-    lists:reverse(Acc, [Chain | Rest]).
-
-
-append_item(Ems, {List, Prev}, Pos, Size) when length(List) >= Size ->
-    {ok, PrevList, _} = couch_file:append_term(Ems#ems.fd, {List, Prev}),
-    {[Pos], PrevList};
-append_item(_Ems, {List, Prev}, Pos, _Size) ->
-    {[Pos | List], Prev}.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_event_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_event_sup.erl b/src/couch/src/couch_event_sup.erl
deleted file mode 100644
index e9e7214..0000000
--- a/src/couch/src/couch_event_sup.erl
+++ /dev/null
@@ -1,73 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% The purpose of this module is to allow event handlers to particpate in Erlang
-%% supervisor trees. It provide a monitorable process that crashes if the event
-%% handler fails. The process, when shutdown, deregisters the event handler.
-
--module(couch_event_sup).
--behaviour(gen_server).
-
--include_lib("couch/include/couch_db.hrl").
-
--export([start_link/3,start_link/4, stop/1]).
--export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3]).
-
-%
-% Instead calling the
-% ok = gen_event:add_sup_handler(error_logger, my_log, Args)
-%
-% do this:
-% {ok, LinkedPid} = couch_event_sup:start_link(error_logger, my_log, Args)
-%
-% The benefit is the event is now part of the process tree, and can be
-% started, restarted and shutdown consistently like the rest of the server
-% components.
-%
-% And now if the "event" crashes, the supervisor is notified and can restart
-% the event handler.
-%
-% Use this form to named process:
-% {ok, LinkedPid} = couch_event_sup:start_link({local, my_log}, error_logger, my_log, Args)
-%
-
-start_link(EventMgr, EventHandler, Args) ->
-    gen_server:start_link(couch_event_sup, {EventMgr, EventHandler, Args}, []).
-
-start_link(ServerName, EventMgr, EventHandler, Args) ->
-    gen_server:start_link(ServerName, couch_event_sup, {EventMgr, EventHandler, Args}, []).
-
-stop(Pid) ->
-    gen_server:cast(Pid, stop).
-
-init({EventMgr, EventHandler, Args}) ->
-    case gen_event:add_sup_handler(EventMgr, EventHandler, Args) of
-    ok ->
-        {ok, {EventMgr, EventHandler}};
-    {stop, Error} ->
-        {stop, Error}
-    end.
-
-terminate(_Reason, _State) ->
-    ok.
-
-handle_call(_Whatever, _From, State) ->
-    {ok, State}.
-
-handle_cast(stop, State) ->
-    {stop, normal, State}.
-
-handle_info({gen_event_EXIT, _Handler, Reason}, State) ->
-    {stop, Reason, State}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_external_manager.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_external_manager.erl b/src/couch/src/couch_external_manager.erl
deleted file mode 100644
index a8cad63..0000000
--- a/src/couch/src/couch_external_manager.erl
+++ /dev/null
@@ -1,115 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_external_manager).
--behaviour(gen_server).
--behaviour(config_listener).
-
--export([start_link/0, execute/2]).
--export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2]).
-
-% config_listener api
--export([handle_config_change/5]).
-
--include_lib("couch/include/couch_db.hrl").
-
-start_link() ->
-    gen_server:start_link({local, couch_external_manager},
-        couch_external_manager, [], []).
-
-execute(UrlName, JsonReq) ->
-    Pid = gen_server:call(couch_external_manager, {get, UrlName}),
-    case Pid of
-    {error, Reason} ->
-        Reason;
-    _ ->
-        couch_external_server:execute(Pid, JsonReq)
-    end.
-
-handle_config_change("external", UrlName, _, _, _) ->
-    {ok, gen_server:call(couch_external_manager, {config, UrlName})};
-handle_config_change(_, _, _, _, _) ->
-    {ok, nil}.
-
-% gen_server API
-
-init([]) ->
-    process_flag(trap_exit, true),
-    Handlers = ets:new(couch_external_manager_handlers, [set, private]),
-    ok = config:listen_for_changes(?MODULE, nil),
-    {ok, Handlers}.
-
-terminate(_Reason, Handlers) ->
-    ets:foldl(fun({_UrlName, Pid}, nil) ->
-        couch_external_server:stop(Pid),
-        nil
-    end, nil, Handlers),
-    ok.
-
-handle_call({get, UrlName}, _From, Handlers) ->
-    case ets:lookup(Handlers, UrlName) of
-    [] ->
-        case config:get("external", UrlName, nil) of
-        nil ->
-            Msg = lists:flatten(
-                io_lib:format("No server configured for ~p.", [UrlName])),
-            {reply, {error, {unknown_external_server, ?l2b(Msg)}}, Handlers};
-        Command ->
-            {ok, NewPid} = couch_external_server:start_link(UrlName, Command),
-            true = ets:insert(Handlers, {UrlName, NewPid}),
-            {reply, NewPid, Handlers}
-        end;
-    [{UrlName, Pid}] ->
-        {reply, Pid, Handlers}
-    end;
-handle_call({config, UrlName}, _From, Handlers) ->
-    % A newly added handler and a handler that had it's command
-    % changed are treated exactly the same.
-
-    % Shutdown the old handler.
-    case ets:lookup(Handlers, UrlName) of
-    [{UrlName, Pid}] ->
-        couch_external_server:stop(Pid);
-    [] ->
-        ok
-    end,
-    % Wait for next request to boot the handler.
-    {reply, ok, Handlers}.
-
-handle_cast(_Whatever, State) ->
-    {noreply, State}.
-
-handle_info({'EXIT', Pid, normal}, Handlers) ->
-    ?LOG_INFO("EXTERNAL: Server ~p terminated normally", [Pid]),
-    % The process terminated normally without us asking - Remove Pid from the
-    % handlers table so we don't attempt to reuse it
-    ets:match_delete(Handlers, {'_', Pid}),
-    {noreply, Handlers};
-
-handle_info({'EXIT', Pid, Reason}, Handlers) ->
-    ?LOG_INFO("EXTERNAL: Server ~p died. (reason: ~p)", [Pid, Reason]),
-    % Remove Pid from the handlers table so we don't try closing
-    % it a second time in terminate/2.
-    ets:match_delete(Handlers, {'_', Pid}),
-    {stop, normal, Handlers};
-
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {noreply, State};
-
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-    {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_external_server.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_external_server.erl b/src/couch/src/couch_external_server.erl
deleted file mode 100644
index 56406bb..0000000
--- a/src/couch/src/couch_external_server.erl
+++ /dev/null
@@ -1,84 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_external_server).
--behaviour(gen_server).
--behaviour(config_listener).
-
--export([start_link/2, stop/1, execute/2]).
--export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
-
-% config_listener api
--export([handle_config_change/5]).
-
--include_lib("couch/include/couch_db.hrl").
-
-% External API
-
-start_link(Name, Command) ->
-    gen_server:start_link(couch_external_server, [Name, Command], []).
-
-stop(Pid) ->
-    gen_server:cast(Pid, stop).
-
-execute(Pid, JsonReq) ->
-    {json, Json} = gen_server:call(Pid, {execute, JsonReq}, infinity),
-    ?JSON_DECODE(Json).
-
-% Gen Server Handlers
-
-init([Name, Command]) ->
-    ?LOG_INFO("EXTERNAL: Starting process for: ~s", [Name]),
-    ?LOG_INFO("COMMAND: ~s", [Command]),
-    process_flag(trap_exit, true),
-    Timeout = list_to_integer(config:get("couchdb", "os_process_timeout",
-        "5000")),
-    {ok, Pid} = couch_os_process:start_link(Command, [{timeout, Timeout}]),
-    ok = config:listen_for_changes(?MODULE, {Name, Command, Pid}, Pid),
-    {ok, {Name, Command, Pid}}.
-
-terminate(_Reason, {_Name, _Command, Pid}) ->
-    couch_os_process:stop(Pid),
-    ok.
-
-handle_call({execute, JsonReq}, _From, {Name, Command, Pid}) ->
-    {reply, couch_os_process:prompt(Pid, JsonReq), {Name, Command, Pid}}.
-
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {noreply, State};
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, State),
-    {noreply, State};
-handle_info({'EXIT', _Pid, normal}, State) ->
-    {noreply, State};
-handle_info({'EXIT', Pid, Reason}, {Name, Command, Pid}) ->
-    ?LOG_INFO("EXTERNAL: Process for ~s exiting. (reason: ~w)", [Name, Reason]),
-    {stop, Reason, {Name, Command, Pid}}.
-
-handle_cast(stop, {Name, Command, Pid}) ->
-    ?LOG_INFO("EXTERNAL: Shutting down ~s", [Name]),
-    exit(Pid, normal),
-    {stop, normal, {Name, Command, Pid}};
-handle_cast(_Whatever, State) ->
-    {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-handle_config_change("couchdb", "os_process_timeout", NewTimeout, _, Pid) ->
-    couch_os_process:set_timeout(Pid, list_to_integer(NewTimeout)),
-    {ok, Pid};
-handle_config_change(_, _, _, _, Pid) ->
-    {ok, Pid}.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_file.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_file.erl b/src/couch/src/couch_file.erl
deleted file mode 100644
index 7528091..0000000
--- a/src/couch/src/couch_file.erl
+++ /dev/null
@@ -1,587 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_file).
--behaviour(gen_server).
-
--include_lib("couch/include/couch_db.hrl").
-
-
--define(INITIAL_WAIT, 60000).
--define(MONITOR_CHECK, 10000).
--define(SIZE_BLOCK, 16#1000). % 4 KiB
-
-
--record(file, {
-    fd,
-    is_sys,
-    eof = 0,
-    db_pid
-}).
-
-% public API
--export([open/1, open/2, close/1, bytes/1, sync/1, truncate/2, set_db_pid/2]).
--export([pread_term/2, pread_iolist/2, pread_binary/2]).
--export([append_binary/2, append_binary_md5/2]).
--export([append_raw_chunk/2, assemble_file_chunk/1, assemble_file_chunk/2]).
--export([append_term/2, append_term/3, append_term_md5/2, append_term_md5/3]).
--export([write_header/2, read_header/1]).
--export([delete/2, delete/3, nuke_dir/2, init_delete_dir/1]).
-
-% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-
-%%----------------------------------------------------------------------
-%% Args:   Valid Options are [create] and [create,overwrite].
-%%  Files are opened in read/write mode.
-%% Returns: On success, {ok, Fd}
-%%  or {error, Reason} if the file could not be opened.
-%%----------------------------------------------------------------------
-
-open(Filepath) ->
-    open(Filepath, []).
-
-open(Filepath, Options) ->
-    case gen_server:start_link(couch_file,
-            {Filepath, Options, self(), Ref = make_ref()}, []) of
-    {ok, Fd} ->
-        {ok, Fd};
-    ignore ->
-        % get the error
-        receive
-        {Ref, Pid, {error, Reason} = Error} ->
-            case process_info(self(), trap_exit) of
-            {trap_exit, true} -> receive {'EXIT', Pid, _} -> ok end;
-            {trap_exit, false} -> ok
-            end,
-            case {lists:member(nologifmissing, Options), Reason} of
-            {true, enoent} -> ok;
-            _ ->
-            ?LOG_ERROR("Could not open file ~s: ~s",
-                [Filepath, file:format_error(Reason)])
-            end,
-            Error
-        end;
-    Error ->
-        % We can't say much here, because it could be any kind of error.
-        % Just let it bubble and an encapsulating subcomponent can perhaps
-        % be more informative. It will likely appear in the SASL log, anyway.
-        Error
-    end.
-
-
-set_db_pid(Fd, Pid) ->
-    gen_server:call(Fd, {set_db_pid, Pid}).
-
-
-%%----------------------------------------------------------------------
-%% Purpose: To append an Erlang term to the end of the file.
-%% Args:    Erlang term to serialize and append to the file.
-%% Returns: {ok, Pos, NumBytesWritten} where Pos is the file offset to
-%%  the beginning the serialized  term. Use pread_term to read the term
-%%  back.
-%%  or {error, Reason}.
-%%----------------------------------------------------------------------
-
-append_term(Fd, Term) ->
-    append_term(Fd, Term, []).
-
-append_term(Fd, Term, Options) ->
-    Comp = couch_util:get_value(compression, Options, ?DEFAULT_COMPRESSION),
-    append_binary(Fd, couch_compress:compress(Term, Comp)).
-
-append_term_md5(Fd, Term) ->
-    append_term_md5(Fd, Term, []).
-
-append_term_md5(Fd, Term, Options) ->
-    Comp = couch_util:get_value(compression, Options, ?DEFAULT_COMPRESSION),
-    append_binary_md5(Fd, couch_compress:compress(Term, Comp)).
-
-%%----------------------------------------------------------------------
-%% Purpose: To append an Erlang binary to the end of the file.
-%% Args:    Erlang term to serialize and append to the file.
-%% Returns: {ok, Pos, NumBytesWritten} where Pos is the file offset to the
-%%  beginning the serialized term. Use pread_term to read the term back.
-%%  or {error, Reason}.
-%%----------------------------------------------------------------------
-
-append_binary(Fd, Bin) ->
-    gen_server:call(Fd, {append_bin, assemble_file_chunk(Bin)}, infinity).
-    
-append_binary_md5(Fd, Bin) ->
-    gen_server:call(Fd,
-        {append_bin, assemble_file_chunk(Bin, couch_util:md5(Bin))}, infinity).
-
-append_raw_chunk(Fd, Chunk) ->
-    gen_server:call(Fd, {append_bin, Chunk}, infinity).
-
-
-assemble_file_chunk(Bin) ->
-    [<<0:1/integer, (iolist_size(Bin)):31/integer>>, Bin].
-
-assemble_file_chunk(Bin, Md5) ->
-    [<<1:1/integer, (iolist_size(Bin)):31/integer>>, Md5, Bin].
-
-%%----------------------------------------------------------------------
-%% Purpose: Reads a term from a file that was written with append_term
-%% Args:    Pos, the offset into the file where the term is serialized.
-%% Returns: {ok, Term}
-%%  or {error, Reason}.
-%%----------------------------------------------------------------------
-
-
-pread_term(Fd, Pos) ->
-    {ok, Bin} = pread_binary(Fd, Pos),
-    {ok, couch_compress:decompress(Bin)}.
-
-
-%%----------------------------------------------------------------------
-%% Purpose: Reads a binrary from a file that was written with append_binary
-%% Args:    Pos, the offset into the file where the term is serialized.
-%% Returns: {ok, Term}
-%%  or {error, Reason}.
-%%----------------------------------------------------------------------
-
-pread_binary(Fd, Pos) ->
-    {ok, L} = pread_iolist(Fd, Pos),
-    {ok, iolist_to_binary(L)}.
-
-
-pread_iolist(Fd, Pos) ->
-    case gen_server:call(Fd, {pread_iolist, Pos}, infinity) of
-    {ok, IoList, <<>>} ->
-        {ok, IoList};
-    {ok, IoList, Md5} ->
-        case couch_util:md5(IoList) of
-        Md5 ->
-            {ok, IoList};
-        _ ->
-            twig:log(emerg, "File corruption in ~p at position ~B",
-                     [Fd, Pos]),
-            exit({file_corruption, <<"file corruption">>})
-        end;
-    Error ->
-        Error
-    end.
-
-%%----------------------------------------------------------------------
-%% Purpose: The length of a file, in bytes.
-%% Returns: {ok, Bytes}
-%%  or {error, Reason}.
-%%----------------------------------------------------------------------
-
-% length in bytes
-bytes(Fd) ->
-    gen_server:call(Fd, bytes, infinity).
-
-%%----------------------------------------------------------------------
-%% Purpose: Truncate a file to the number of bytes.
-%% Returns: ok
-%%  or {error, Reason}.
-%%----------------------------------------------------------------------
-
-truncate(Fd, Pos) ->
-    gen_server:call(Fd, {truncate, Pos}, infinity).
-
-%%----------------------------------------------------------------------
-%% Purpose: Ensure all bytes written to the file are flushed to disk.
-%% Returns: ok
-%%  or {error, Reason}.
-%%----------------------------------------------------------------------
-
-sync(Filepath) when is_list(Filepath) ->
-    {ok, Fd} = file:open(Filepath, [append, raw]),
-    try ok = file:sync(Fd) after ok = file:close(Fd) end;
-sync(Fd) ->
-    gen_server:call(Fd, sync, infinity).
-
-%%----------------------------------------------------------------------
-%% Purpose: Close the file.
-%% Returns: ok
-%%----------------------------------------------------------------------
-close(Fd) ->
-    gen_server:call(Fd, close, infinity).
-
-
-delete(RootDir, Filepath) ->
-    delete(RootDir, Filepath, true).
-
-
-delete(RootDir, Filepath, Async) ->
-    DelFile = filename:join([RootDir,".delete", ?b2l(couch_uuids:random())]),
-    case file:rename(Filepath, DelFile) of
-    ok ->
-        if (Async) ->
-            spawn(file, delete, [DelFile]),
-            ok;
-        true ->
-            file:delete(DelFile)
-        end;
-    Error ->
-        Error
-    end.
-
-
-nuke_dir(RootDelDir, Dir) ->
-    FoldFun = fun(File) ->
-        Path = Dir ++ "/" ++ File,
-        case filelib:is_dir(Path) of
-            true ->
-                ok = nuke_dir(RootDelDir, Path),
-                file:del_dir(Path);
-            false ->
-                delete(RootDelDir, Path, false)
-        end
-    end,
-    case file:list_dir(Dir) of
-        {ok, Files} ->
-            lists:foreach(FoldFun, Files),
-            ok = file:del_dir(Dir);
-        {error, enoent} ->
-            ok
-    end.
-
-
-init_delete_dir(RootDir) ->
-    Dir = filename:join(RootDir,".delete"),
-    % note: ensure_dir requires an actual filename companent, which is the
-    % reason for "foo".
-    filelib:ensure_dir(filename:join(Dir,"foo")),
-    filelib:fold_files(Dir, ".*", true,
-        fun(Filename, _) ->
-            ok = file:delete(Filename)
-        end, ok).
-
-
-read_header(Fd) ->
-    case gen_server:call(Fd, find_header, infinity) of
-    {ok, Bin} ->
-        {ok, binary_to_term(Bin)};
-    Else ->
-        Else
-    end.
-
-write_header(Fd, Data) ->
-    Bin = term_to_binary(Data),
-    Md5 = couch_util:md5(Bin),
-    % now we assemble the final header binary and write to disk
-    FinalBin = <<Md5/binary, Bin/binary>>,
-    gen_server:call(Fd, {write_header, FinalBin}, infinity).
-
-
-init_status_error(ReturnPid, Ref, Error) ->
-    ReturnPid ! {Ref, self(), Error},
-    ignore.
-
-% server functions
-
-init({Filepath, Options, ReturnPid, Ref}) ->
-    process_flag(trap_exit, true),
-    OpenOptions = file_open_options(Options),
-    case lists:member(create, Options) of
-    true ->
-        filelib:ensure_dir(Filepath),
-        case file:open(Filepath, OpenOptions) of
-        {ok, Fd} ->
-            {ok, Length} = file:position(Fd, eof),
-            case Length > 0 of
-            true ->
-                % this means the file already exists and has data.
-                % FYI: We don't differentiate between empty files and non-existant
-                % files here.
-                case lists:member(overwrite, Options) of
-                true ->
-                    {ok, 0} = file:position(Fd, 0),
-                    ok = file:truncate(Fd),
-                    ok = file:sync(Fd),
-                    maybe_track_open_os_files(Options),
-                    erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
-                    {ok, #file{fd=Fd, is_sys=lists:member(sys_db, Options)}};
-                false ->
-                    ok = file:close(Fd),
-                    init_status_error(ReturnPid, Ref, {error, eexist})
-                end;
-            false ->
-                maybe_track_open_os_files(Options),
-                erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
-                {ok, #file{fd=Fd, is_sys=lists:member(sys_db, Options)}}
-            end;
-        Error ->
-            init_status_error(ReturnPid, Ref, Error)
-        end;
-    false ->
-        % open in read mode first, so we don't create the file if it doesn't exist.
-        case file:open(Filepath, [read, raw]) of
-        {ok, Fd_Read} ->
-            {ok, Fd} = file:open(Filepath, OpenOptions),
-            ok = file:close(Fd_Read),
-            maybe_track_open_os_files(Options),
-            {ok, Eof} = file:position(Fd, eof),
-            erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
-            {ok, #file{fd=Fd, eof=Eof, is_sys=lists:member(sys_db, Options)}};
-        Error ->
-            init_status_error(ReturnPid, Ref, Error)
-        end
-    end.
-
-file_open_options(Options) ->
-    [read, raw, binary] ++ case lists:member(read_only, Options) of
-    true ->
-        [];
-    false ->
-        [append]
-    end.
-
-maybe_track_open_os_files(Options) ->
-    case not lists:member(sys_db, Options) of
-        true ->
-            couch_stats_collector:track_process_count({couchdb, open_os_files});
-        false ->
-            ok
-    end.
-
-terminate(_Reason, #file{fd = nil}) ->
-    ok;
-terminate(_Reason, #file{fd = Fd}) ->
-    ok = file:close(Fd).
-
-handle_call(close, _From, #file{fd=Fd}=File) ->
-    {stop, normal, file:close(Fd), File#file{fd = nil}};
-
-handle_call({pread_iolist, Pos}, _From, File) ->
-    {RawData, NextPos} = try
-        % up to 8Kbs of read ahead
-        read_raw_iolist_int(File, Pos, 2 * ?SIZE_BLOCK - (Pos rem ?SIZE_BLOCK))
-    catch
-    _:_ ->
-        read_raw_iolist_int(File, Pos, 4)
-    end,
-    <<Prefix:1/integer, Len:31/integer, RestRawData/binary>> =
-        iolist_to_binary(RawData),
-    case Prefix of
-    1 ->
-        {Md5, IoList} = extract_md5(
-            maybe_read_more_iolist(RestRawData, 16 + Len, NextPos, File)),
-        {reply, {ok, IoList, Md5}, File};
-    0 ->
-        IoList = maybe_read_more_iolist(RestRawData, Len, NextPos, File),
-        {reply, {ok, IoList, <<>>}, File}
-    end;
-
-handle_call(bytes, _From, #file{fd = Fd} = File) ->
-    {reply, file:position(Fd, eof), File};
-
-handle_call({set_db_pid, Pid}, _From, #file{db_pid=OldPid}=File) ->
-    case is_pid(OldPid) of
-        true -> unlink(OldPid);
-        false -> ok
-    end,
-    link(Pid),
-    {reply, ok, File#file{db_pid=Pid}};
-
-handle_call(sync, _From, #file{fd=Fd}=File) ->
-    {reply, file:sync(Fd), File};
-
-handle_call({truncate, Pos}, _From, #file{fd=Fd}=File) ->
-    {ok, Pos} = file:position(Fd, Pos),
-    case file:truncate(Fd) of
-    ok ->
-        {reply, ok, File#file{eof = Pos}};
-    Error ->
-        {reply, Error, File}
-    end;
-
-handle_call({append_bin, Bin}, _From, #file{fd = Fd, eof = Pos} = File) ->
-    Blocks = make_blocks(Pos rem ?SIZE_BLOCK, Bin),
-    Size = iolist_size(Blocks),
-    case file:write(Fd, Blocks) of
-    ok ->
-        {reply, {ok, Pos, Size}, File#file{eof = Pos + Size}};
-    Error ->
-        {reply, Error, File}
-    end;
-
-handle_call({write_header, Bin}, _From, #file{fd = Fd, eof = Pos} = File) ->
-    BinSize = byte_size(Bin),
-    case Pos rem ?SIZE_BLOCK of
-    0 ->
-        Padding = <<>>;
-    BlockOffset ->
-        Padding = <<0:(8*(?SIZE_BLOCK-BlockOffset))>>
-    end,
-    FinalBin = [Padding, <<1, BinSize:32/integer>> | make_blocks(5, [Bin])],
-    case file:write(Fd, FinalBin) of
-    ok ->
-        {reply, ok, File#file{eof = Pos + iolist_size(FinalBin)}};
-    Error ->
-        {reply, Error, File}
-    end;
-
-handle_call(find_header, _From, #file{fd = Fd, eof = Pos} = File) ->
-    {reply, find_header(Fd, Pos div ?SIZE_BLOCK), File}.
-
-handle_cast(close, Fd) ->
-    {stop,normal,Fd}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-handle_info(maybe_close, File) ->
-    case is_idle(File) of
-        true ->
-            {stop, normal, File};
-        false ->
-            erlang:send_after(?MONITOR_CHECK, self(), maybe_close),
-            {noreply, File}
-    end;
-
-handle_info({'EXIT', Pid, _}, #file{db_pid=Pid}=File) ->
-    case is_idle(File) of
-        true -> {stop, normal, File};
-        false -> {noreply, File}
-    end;
-
-handle_info({'EXIT', _, normal}, Fd) ->
-    {noreply, Fd};
-handle_info({'EXIT', _, Reason}, Fd) ->
-    {stop, Reason, Fd}.
-
-
-find_header(_Fd, -1) ->
-    no_valid_header;
-find_header(Fd, Block) ->
-    case (catch load_header(Fd, Block)) of
-    {ok, Bin} ->
-        {ok, Bin};
-    _Error ->
-        find_header(Fd, Block -1)
-    end.
-
-load_header(Fd, Block) ->
-    {ok, <<1, HeaderLen:32/integer, RestBlock/binary>>} =
-        file:pread(Fd, Block * ?SIZE_BLOCK, ?SIZE_BLOCK),
-    TotalBytes = calculate_total_read_len(5, HeaderLen),
-    case TotalBytes > byte_size(RestBlock) of
-    false ->
-        <<RawBin:TotalBytes/binary, _/binary>> = RestBlock;
-    true ->
-        {ok, Missing} = file:pread(
-            Fd, (Block * ?SIZE_BLOCK) + 5 + byte_size(RestBlock),
-            TotalBytes - byte_size(RestBlock)),
-        RawBin = <<RestBlock/binary, Missing/binary>>
-    end,
-    <<Md5Sig:16/binary, HeaderBin/binary>> =
-        iolist_to_binary(remove_block_prefixes(5, RawBin)),
-    Md5Sig = couch_util:md5(HeaderBin),
-    {ok, HeaderBin}.
-
-maybe_read_more_iolist(Buffer, DataSize, _, _)
-    when DataSize =< byte_size(Buffer) ->
-    <<Data:DataSize/binary, _/binary>> = Buffer,
-    [Data];
-maybe_read_more_iolist(Buffer, DataSize, NextPos, File) ->
-    {Missing, _} =
-        read_raw_iolist_int(File, NextPos, DataSize - byte_size(Buffer)),
-    [Buffer, Missing].
-
--spec read_raw_iolist_int(#file{}, Pos::non_neg_integer(), Len::non_neg_integer()) ->
-    {Data::iolist(), CurPos::non_neg_integer()}.
-read_raw_iolist_int(Fd, {Pos, _Size}, Len) -> % 0110 UPGRADE CODE
-    read_raw_iolist_int(Fd, Pos, Len);
-read_raw_iolist_int(#file{fd = Fd}, Pos, Len) ->
-    BlockOffset = Pos rem ?SIZE_BLOCK,
-    TotalBytes = calculate_total_read_len(BlockOffset, Len),
-    {ok, <<RawBin:TotalBytes/binary>>} = file:pread(Fd, Pos, TotalBytes),
-    {remove_block_prefixes(BlockOffset, RawBin), Pos + TotalBytes}.
-
--spec extract_md5(iolist()) -> {binary(), iolist()}.
-extract_md5(FullIoList) ->
-    {Md5List, IoList} = split_iolist(FullIoList, 16, []),
-    {iolist_to_binary(Md5List), IoList}.
-
-calculate_total_read_len(0, FinalLen) ->
-    calculate_total_read_len(1, FinalLen) + 1;
-calculate_total_read_len(BlockOffset, FinalLen) ->
-    case ?SIZE_BLOCK - BlockOffset of
-    BlockLeft when BlockLeft >= FinalLen ->
-        FinalLen;
-    BlockLeft ->
-        FinalLen + ((FinalLen - BlockLeft) div (?SIZE_BLOCK -1)) +
-            if ((FinalLen - BlockLeft) rem (?SIZE_BLOCK -1)) =:= 0 -> 0;
-                true -> 1 end
-    end.
-
-remove_block_prefixes(_BlockOffset, <<>>) ->
-    [];
-remove_block_prefixes(0, <<_BlockPrefix,Rest/binary>>) ->
-    remove_block_prefixes(1, Rest);
-remove_block_prefixes(BlockOffset, Bin) ->
-    BlockBytesAvailable = ?SIZE_BLOCK - BlockOffset,
-    case size(Bin) of
-    Size when Size > BlockBytesAvailable ->
-        <<DataBlock:BlockBytesAvailable/binary,Rest/binary>> = Bin,
-        [DataBlock | remove_block_prefixes(0, Rest)];
-    _Size ->
-        [Bin]
-    end.
-
-make_blocks(_BlockOffset, []) ->
-    [];
-make_blocks(0, IoList) ->
-    [<<0>> | make_blocks(1, IoList)];
-make_blocks(BlockOffset, IoList) ->
-    case split_iolist(IoList, (?SIZE_BLOCK - BlockOffset), []) of
-    {Begin, End} ->
-        [Begin | make_blocks(0, End)];
-    _SplitRemaining ->
-        IoList
-    end.
-
-%% @doc Returns a tuple where the first element contains the leading SplitAt
-%% bytes of the original iolist, and the 2nd element is the tail. If SplitAt
-%% is larger than byte_size(IoList), return the difference.
--spec split_iolist(IoList::iolist(), SplitAt::non_neg_integer(), Acc::list()) ->
-    {iolist(), iolist()} | non_neg_integer().
-split_iolist(List, 0, BeginAcc) ->
-    {lists:reverse(BeginAcc), List};
-split_iolist([], SplitAt, _BeginAcc) ->
-    SplitAt;
-split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) when SplitAt > byte_size(Bin) ->
-    split_iolist(Rest, SplitAt - byte_size(Bin), [Bin | BeginAcc]);
-split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) ->
-    <<Begin:SplitAt/binary,End/binary>> = Bin,
-    split_iolist([End | Rest], 0, [Begin | BeginAcc]);
-split_iolist([Sublist| Rest], SplitAt, BeginAcc) when is_list(Sublist) ->
-    case split_iolist(Sublist, SplitAt, BeginAcc) of
-    {Begin, End} ->
-        {Begin, [End | Rest]};
-    SplitRemaining ->
-        split_iolist(Rest, SplitAt - (SplitAt - SplitRemaining), [Sublist | BeginAcc])
-    end;
-split_iolist([Byte | Rest], SplitAt, BeginAcc) when is_integer(Byte) ->
-    split_iolist(Rest, SplitAt - 1, [Byte | BeginAcc]).
-
-
-% System dbs aren't monitored by couch_stats_collector
-is_idle(#file{is_sys=true}) ->
-    case process_info(self(), monitored_by) of
-        {monitored_by, []} -> true;
-        _ -> false
-    end;
-is_idle(#file{is_sys=false}) ->
-    case process_info(self(), monitored_by) of
-        {monitored_by, []} -> true;
-        {monitored_by, [_]} -> true;
-        _ -> false
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_httpd.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
deleted file mode 100644
index db90129..0000000
--- a/src/couch/src/couch_httpd.erl
+++ /dev/null
@@ -1,1082 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd).
--include_lib("couch/include/couch_db.hrl").
-
--export([start_link/0, start_link/1, stop/0, handle_request/5]).
-
--export([header_value/2,header_value/3,qs_value/2,qs_value/3,qs/1,qs_json_value/3]).
--export([path/1,absolute_uri/2,body_length/1]).
--export([verify_is_server_admin/1,unquote/1,quote/1,recv/2,recv_chunked/4,error_info/1]).
--export([make_fun_spec_strs/1]).
--export([make_arity_1_fun/1, make_arity_2_fun/1, make_arity_3_fun/1]).
--export([parse_form/1,json_body/1,json_body_obj/1,body/1]).
--export([doc_etag/1, make_etag/1, etag_match/2, etag_respond/3, etag_maybe/2]).
--export([primary_header_value/2,partition/1,serve_file/3,serve_file/4, server_header/0]).
--export([start_chunked_response/3,send_chunk/2,log_request/2]).
--export([start_response_length/4, start_response/3, send/2]).
--export([start_json_response/2, start_json_response/3, end_json_response/1]).
--export([send_response/4,send_method_not_allowed/2,send_error/4, send_redirect/2,send_chunked_error/2]).
--export([send_json/2,send_json/3,send_json/4,last_chunk/1,parse_multipart_request/3]).
--export([accepted_encodings/1,handle_request_int/5,validate_referer/1,validate_ctype/2]).
--export([http_1_0_keep_alive/2]).
-
-start_link() ->
-    start_link(http).
-start_link(http) ->
-    Port = config:get("httpd", "port", "5984"),
-    start_link(?MODULE, [{port, Port}]);
-start_link(https) ->
-    Port = config:get("ssl", "port", "6984"),
-    CertFile = config:get("ssl", "cert_file", nil),
-    KeyFile = config:get("ssl", "key_file", nil),
-    Options = case CertFile /= nil andalso KeyFile /= nil of
-        true ->
-            SslOpts = [{certfile, CertFile}, {keyfile, KeyFile}],
-
-            %% set password if one is needed for the cert
-            SslOpts1 = case config:get("ssl", "password", nil) of
-                nil -> SslOpts;
-                Password ->
-                    SslOpts ++ [{password, Password}]
-            end,
-            % do we verify certificates ?
-            FinalSslOpts = case config:get("ssl",
-                    "verify_ssl_certificates", "false") of
-                "false" -> SslOpts1;
-                "true" ->
-                    case config:get("ssl",
-                            "cacert_file", nil) of
-                        nil ->
-                            io:format("Verify SSL certificate "
-                                ++"enabled but file containing "
-                                ++"PEM encoded CA certificates is "
-                                ++"missing", []),
-                            throw({error, missing_cacerts});
-                        CaCertFile ->
-                            Depth = list_to_integer(config:get("ssl",
-                                    "ssl_certificate_max_depth",
-                                    "1")),
-                            FinalOpts = [
-                                {cacertfile, CaCertFile},
-                                {depth, Depth},
-                                {verify, verify_peer}],
-                            % allows custom verify fun.
-                            case config:get("ssl",
-                                    "verify_fun", nil) of
-                                nil -> FinalOpts;
-                                SpecStr ->
-                                    FinalOpts
-                                    ++ [{verify_fun, make_arity_3_fun(SpecStr)}]
-                            end
-                    end
-            end,
-
-            [{port, Port},
-                {ssl, true},
-                {ssl_opts, FinalSslOpts}];
-        false ->
-            io:format("SSL enabled but PEM certificates are missing.", []),
-            throw({error, missing_certs})
-    end,
-    start_link(https, Options).
-start_link(Name, Options) ->
-    BindAddress = config:get("httpd", "bind_address", any),
-    validate_bind_address(BindAddress),
-    DefaultSpec = "{couch_httpd_db, handle_request}",
-    DefaultFun = make_arity_1_fun(
-        config:get("httpd", "default_handler", DefaultSpec)
-    ),
-
-    UrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
-        end, config:get("httpd_global_handlers")),
-
-    DbUrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
-        end, config:get("httpd_db_handlers")),
-
-    DesignUrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_3_fun(SpecStr)}
-        end, config:get("httpd_design_handlers")),
-
-    UrlHandlers = dict:from_list(UrlHandlersList),
-    DbUrlHandlers = dict:from_list(DbUrlHandlersList),
-    DesignUrlHandlers = dict:from_list(DesignUrlHandlersList),
-    {ok, ServerOptions} = couch_util:parse_term(
-        config:get("httpd", "server_options", "[]")),
-    {ok, SocketOptions} = couch_util:parse_term(
-        config:get("httpd", "socket_options", "[]")),
-
-    set_auth_handlers(),
-
-    % ensure uuid is set so that concurrent replications
-    % get the same value.
-    couch_server:get_uuid(),
-
-    Loop = fun(Req)->
-        case SocketOptions of
-        [] ->
-            ok;
-        _ ->
-            ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions)
-        end,
-        apply(?MODULE, handle_request, [
-            Req, DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers
-        ])
-    end,
-
-    % set mochiweb options
-    FinalOptions = lists:append([Options, ServerOptions, [
-            {loop, Loop},
-            {name, Name},
-            {ip, BindAddress}]]),
-
-    % launch mochiweb
-    case mochiweb_http:start(FinalOptions) of
-        {ok, MochiPid} ->
-            {ok, MochiPid};
-        {error, Reason} ->
-            io:format("Failure to start Mochiweb: ~s~n",[Reason]),
-            throw({error, Reason})
-    end.
-
-
-stop() ->
-    mochiweb_http:stop(couch_httpd),
-    mochiweb_http:stop(https).
-
-
-set_auth_handlers() ->
-    AuthenticationSrcs = make_fun_spec_strs(
-        config:get("httpd", "authentication_handlers", "")),
-    AuthHandlers = lists:map(
-        fun(A) -> {make_arity_1_fun(A), ?l2b(A)} end, AuthenticationSrcs),
-    ok = application:set_env(couch, auth_handlers, AuthHandlers).
-
-% SpecStr is a string like "{my_module, my_fun}"
-%  or "{my_module, my_fun, <<"my_arg">>}"
-make_arity_1_fun(SpecStr) ->
-    case couch_util:parse_term(SpecStr) of
-    {ok, {Mod, Fun, SpecArg}} ->
-        fun(Arg) -> Mod:Fun(Arg, SpecArg) end;
-    {ok, {Mod, Fun}} ->
-        fun(Arg) -> Mod:Fun(Arg) end
-    end.
-
-make_arity_2_fun(SpecStr) ->
-    case couch_util:parse_term(SpecStr) of
-    {ok, {Mod, Fun, SpecArg}} ->
-        fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2, SpecArg) end;
-    {ok, {Mod, Fun}} ->
-        fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2) end
-    end.
-
-make_arity_3_fun(SpecStr) ->
-    case couch_util:parse_term(SpecStr) of
-    {ok, {Mod, Fun, SpecArg}} ->
-        fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3, SpecArg) end;
-    {ok, {Mod, Fun}} ->
-        fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3) end
-    end.
-
-% SpecStr is "{my_module, my_fun}, {my_module2, my_fun2}"
-make_fun_spec_strs(SpecStr) ->
-    re:split(SpecStr, "(?<=})\\s*,\\s*(?={)", [{return, list}]).
-
-handle_request(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers,
-    DesignUrlHandlers) ->
-
-    MochiReq1 = couch_httpd_vhost:dispatch_host(MochiReq),
-
-    handle_request_int(MochiReq1, DefaultFun,
-                UrlHandlers, DbUrlHandlers, DesignUrlHandlers).
-
-handle_request_int(MochiReq, DefaultFun,
-            UrlHandlers, DbUrlHandlers, DesignUrlHandlers) ->
-    Begin = now(),
-    % for the path, use the raw path with the query string and fragment
-    % removed, but URL quoting left intact
-    RawUri = MochiReq:get(raw_path),
-    {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
-
-    Headers = MochiReq:get(headers),
-
-    % get requested path
-    RequestedPath = case MochiReq:get_header_value("x-couchdb-vhost-path") of
-        undefined ->
-            case MochiReq:get_header_value("x-couchdb-requested-path") of
-                undefined -> RawUri;
-                R -> R
-            end;
-        P -> P
-    end,
-
-    HandlerKey =
-    case mochiweb_util:partition(Path, "/") of
-    {"", "", ""} ->
-        <<"/">>; % Special case the root url handler
-    {FirstPart, _, _} ->
-        list_to_binary(FirstPart)
-    end,
-    ?LOG_DEBUG("~p ~s ~p from ~p~nHeaders: ~p", [
-        MochiReq:get(method),
-        RawUri,
-        MochiReq:get(version),
-        MochiReq:get(peer),
-        mochiweb_headers:to_list(MochiReq:get(headers))
-    ]),
-
-    Method1 =
-    case MochiReq:get(method) of
-        % already an atom
-        Meth when is_atom(Meth) -> Meth;
-
-        % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
-        % possible (if any module references the atom, then it's existing).
-        Meth -> couch_util:to_existing_atom(Meth)
-    end,
-    increment_method_stats(Method1),
-
-    % allow broken HTTP clients to fake a full method vocabulary with an X-HTTP-METHOD-OVERRIDE header
-    MethodOverride = MochiReq:get_primary_header_value("X-HTTP-Method-Override"),
-    Method2 = case lists:member(MethodOverride, ["GET", "HEAD", "POST",
-                                                 "PUT", "DELETE",
-                                                 "TRACE", "CONNECT",
-                                                 "COPY"]) of
-    true ->
-        ?LOG_INFO("MethodOverride: ~s (real method was ~s)", [MethodOverride, Method1]),
-        case Method1 of
-        'POST' -> couch_util:to_existing_atom(MethodOverride);
-        _ ->
-            % Ignore X-HTTP-Method-Override when the original verb isn't POST.
-            % I'd like to send a 406 error to the client, but that'd require a nasty refactor.
-            % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>})
-            Method1
-        end;
-    _ -> Method1
-    end,
-
-    % alias HEAD to GET as mochiweb takes care of stripping the body
-    Method = case Method2 of
-        'HEAD' -> 'GET';
-        Other -> Other
-    end,
-
-    HttpReq = #httpd{
-        mochi_req = MochiReq,
-        peer = MochiReq:get(peer),
-        method = Method,
-        requested_path_parts =
-            [?l2b(unquote(Part)) || Part <- string:tokens(RequestedPath, "/")],
-        path_parts = [?l2b(unquote(Part)) || Part <- string:tokens(Path, "/")],
-        db_url_handlers = DbUrlHandlers,
-        design_url_handlers = DesignUrlHandlers,
-        default_fun = DefaultFun,
-        url_handlers = UrlHandlers,
-        user_ctx = erlang:erase(pre_rewrite_user_ctx)
-    },
-
-    HandlerFun = couch_util:dict_find(HandlerKey, UrlHandlers, DefaultFun),
-    {ok, AuthHandlers} = application:get_env(couch, auth_handlers),
-
-    {ok, Resp} =
-    try
-        case couch_httpd_cors:is_preflight_request(HttpReq) of
-        #httpd{} ->
-            case authenticate_request(HttpReq, AuthHandlers) of
-            #httpd{} = Req ->
-                HandlerFun(Req);
-            Response ->
-                Response
-            end;
-        Response ->
-            Response
-        end
-    catch
-        throw:{http_head_abort, Resp0} ->
-            {ok, Resp0};
-        throw:{invalid_json, S} ->
-            ?LOG_ERROR("attempted upload of invalid JSON (set log_level to debug to log it)", []),
-            ?LOG_DEBUG("Invalid JSON: ~p",[S]),
-            send_error(HttpReq, {bad_request, invalid_json});
-        throw:unacceptable_encoding ->
-            ?LOG_ERROR("unsupported encoding method for the response", []),
-            send_error(HttpReq, {not_acceptable, "unsupported encoding"});
-        throw:bad_accept_encoding_value ->
-            ?LOG_ERROR("received invalid Accept-Encoding header", []),
-            send_error(HttpReq, bad_request);
-        exit:normal ->
-            exit(normal);
-        exit:snappy_nif_not_loaded ->
-            ErrorReason = "To access the database or view index, Apache CouchDB"
-                " must be built with Erlang OTP R13B04 or higher.",
-            ?LOG_ERROR("~s", [ErrorReason]),
-            send_error(HttpReq, {bad_otp_release, ErrorReason});
-        exit:{body_too_large, _} ->
-            send_error(HttpReq, request_entity_too_large);
-        throw:Error ->
-            Stack = erlang:get_stacktrace(),
-            ?LOG_DEBUG("Minor error in HTTP request: ~p",[Error]),
-            ?LOG_DEBUG("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, Error);
-        error:badarg ->
-            Stack = erlang:get_stacktrace(),
-            ?LOG_ERROR("Badarg error in HTTP request",[]),
-            ?LOG_INFO("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, badarg);
-        error:function_clause ->
-            Stack = erlang:get_stacktrace(),
-            ?LOG_ERROR("function_clause error in HTTP request",[]),
-            ?LOG_INFO("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, function_clause);
-        Tag:Error ->
-            Stack = erlang:get_stacktrace(),
-            ?LOG_ERROR("Uncaught error in HTTP request: ~p",[{Tag, Error}]),
-            ?LOG_INFO("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, Error)
-    end,
-    RequestTime = round(timer:now_diff(now(), Begin)/1000),
-    couch_stats_collector:record({couchdb, request_time}, RequestTime),
-    couch_stats_collector:increment({httpd, requests}),
-    {ok, Resp}.
-
-% Try authentication handlers in order until one sets a user_ctx
-% the auth funs also have the option of returning a response
-% move this to couch_httpd_auth?
-authenticate_request(#httpd{user_ctx=#user_ctx{}} = Req, _AuthHandlers) ->
-    Req;
-authenticate_request(#httpd{} = Req, []) ->
-    case config:get("couch_httpd_auth", "require_valid_user", "false") of
-    "true" ->
-        throw({unauthorized, <<"Authentication required.">>});
-    "false" ->
-        Req#httpd{user_ctx=#user_ctx{}}
-    end;
-authenticate_request(#httpd{} = Req, [{AuthFun, AuthSrc} | RestAuthHandlers]) ->
-    R = case AuthFun(Req) of
-        #httpd{user_ctx=#user_ctx{}=UserCtx}=Req2 ->
-            Req2#httpd{user_ctx=UserCtx#user_ctx{handler=AuthSrc}};
-        Else -> Else
-    end,
-    authenticate_request(R, RestAuthHandlers);
-authenticate_request(Response, _AuthSrcs) ->
-    Response.
-
-increment_method_stats(Method) ->
-    couch_stats_collector:increment({httpd_request_methods, Method}).
-
-validate_referer(Req) ->
-    Host = host_for_request(Req),
-    Referer = header_value(Req, "Referer", fail),
-    case Referer of
-    fail ->
-        throw({bad_request, <<"Referer header required.">>});
-    Referer ->
-        {_,RefererHost,_,_,_} = mochiweb_util:urlsplit(Referer),
-        if
-            RefererHost =:= Host -> ok;
-            true -> throw({bad_request, <<"Referer header must match host.">>})
-        end
-    end.
-
-validate_ctype(Req, Ctype) ->
-    case header_value(Req, "Content-Type") of
-    undefined ->
-        throw({bad_ctype, "Content-Type must be "++Ctype});
-    ReqCtype ->
-        case string:tokens(ReqCtype, ";") of
-        [Ctype] -> ok;
-        [Ctype, _Rest] -> ok;
-        _Else ->
-            throw({bad_ctype, "Content-Type must be "++Ctype})
-        end
-    end.
-
-% Utilities
-
-partition(Path) ->
-    mochiweb_util:partition(Path, "/").
-
-header_value(#httpd{mochi_req=MochiReq}, Key) ->
-    MochiReq:get_header_value(Key).
-
-header_value(#httpd{mochi_req=MochiReq}, Key, Default) ->
-    case MochiReq:get_header_value(Key) of
-    undefined -> Default;
-    Value -> Value
-    end.
-
-primary_header_value(#httpd{mochi_req=MochiReq}, Key) ->
-    MochiReq:get_primary_header_value(Key).
-
-accepted_encodings(#httpd{mochi_req=MochiReq}) ->
-    case MochiReq:accepted_encodings(["gzip", "identity"]) of
-    bad_accept_encoding_value ->
-        throw(bad_accept_encoding_value);
-    [] ->
-        throw(unacceptable_encoding);
-    EncList ->
-        EncList
-    end.
-
-serve_file(Req, RelativePath, DocumentRoot) ->
-    serve_file(Req, RelativePath, DocumentRoot, []).
-
-serve_file(#httpd{mochi_req=MochiReq}=Req, RelativePath, DocumentRoot,
-           ExtraHeaders) ->
-    log_request(Req, 200),
-    ResponseHeaders = server_header()
-        ++ couch_httpd_auth:cookie_auth_header(Req, [])
-        ++ ExtraHeaders,
-    {ok, MochiReq:serve_file(RelativePath, DocumentRoot,
-            couch_httpd_cors:cors_headers(Req, ResponseHeaders))}.
-
-qs_value(Req, Key) ->
-    qs_value(Req, Key, undefined).
-
-qs_value(Req, Key, Default) ->
-    couch_util:get_value(Key, qs(Req), Default).
-
-qs_json_value(Req, Key, Default) ->
-    case qs_value(Req, Key, Default) of
-    Default ->
-        Default;
-    Result ->
-        ?JSON_DECODE(Result)
-    end.
-
-qs(#httpd{mochi_req=MochiReq}) ->
-    MochiReq:parse_qs().
-
-path(#httpd{mochi_req=MochiReq}) ->
-    MochiReq:get(path).
-
-host_for_request(#httpd{mochi_req=MochiReq}) ->
-    XHost = config:get("httpd", "x_forwarded_host", "X-Forwarded-Host"),
-    case MochiReq:get_header_value(XHost) of
-        undefined ->
-            case MochiReq:get_header_value("Host") of
-                undefined ->
-                    {ok, {Address, Port}} = case MochiReq:get(socket) of
-                        {ssl, SslSocket} -> ssl:sockname(SslSocket);
-                        Socket -> inet:sockname(Socket)
-                    end,
-                    inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
-                Value1 ->
-                    Value1
-            end;
-        Value -> Value
-    end.
-
-absolute_uri(#httpd{mochi_req=MochiReq}=Req, Path) ->
-    Host = host_for_request(Req),
-    XSsl = config:get("httpd", "x_forwarded_ssl", "X-Forwarded-Ssl"),
-    Scheme = case MochiReq:get_header_value(XSsl) of
-                 "on" -> "https";
-                 _ ->
-                     XProto = config:get("httpd", "x_forwarded_proto", "X-Forwarded-Proto"),
-                     case MochiReq:get_header_value(XProto) of
-                         %% Restrict to "https" and "http" schemes only
-                         "https" -> "https";
-                         _ -> case MochiReq:get(scheme) of
-                                  https -> "https";
-                                  http -> "http"
-                              end
-                     end
-             end,
-    Scheme ++ "://" ++ Host ++ Path.
-
-unquote(UrlEncodedString) ->
-    mochiweb_util:unquote(UrlEncodedString).
-
-quote(UrlDecodedString) ->
-    mochiweb_util:quote_plus(UrlDecodedString).
-
-parse_form(#httpd{mochi_req=MochiReq}) ->
-    mochiweb_multipart:parse_form(MochiReq).
-
-recv(#httpd{mochi_req=MochiReq}, Len) ->
-    MochiReq:recv(Len).
-
-recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
-    % Fun is called once with each chunk
-    % Fun({Length, Binary}, State)
-    % called with Length == 0 on the last time.
-    MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState).
-
-body_length(#httpd{mochi_req=MochiReq}) ->
-    MochiReq:get(body_length).
-
-body(#httpd{mochi_req=MochiReq, req_body=undefined}) ->
-    MaxSize = list_to_integer(
-        config:get("couchdb", "max_document_size", "4294967296")),
-    MochiReq:recv_body(MaxSize);
-body(#httpd{req_body=ReqBody}) ->
-    ReqBody.
-
-json_body(Httpd) ->
-    ?JSON_DECODE(body(Httpd)).
-
-json_body_obj(Httpd) ->
-    case json_body(Httpd) of
-        {Props} -> {Props};
-        _Else ->
-            throw({bad_request, "Request body must be a JSON object"})
-    end.
-
-
-
-doc_etag(#doc{revs={Start, [DiskRev|_]}}) ->
-    "\"" ++ ?b2l(couch_doc:rev_to_str({Start, DiskRev})) ++ "\"".
-
-make_etag(Term) ->
-    <<SigInt:128/integer>> = couch_util:md5(term_to_binary(Term)),
-    iolist_to_binary([$", io_lib:format("~.36B", [SigInt]), $"]).
-
-etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
-    etag_match(Req, binary_to_list(CurrentEtag));
-
-etag_match(Req, CurrentEtag) ->
-    EtagsToMatch = string:tokens(
-        header_value(Req, "If-None-Match", ""), ", "),
-    lists:member(CurrentEtag, EtagsToMatch).
-
-etag_respond(Req, CurrentEtag, RespFun) ->
-    case etag_match(Req, CurrentEtag) of
-    true ->
-        % the client has this in their cache.
-        send_response(Req, 304, [{"ETag", CurrentEtag}], <<>>);
-    false ->
-        % Run the function.
-        RespFun()
-    end.
-
-etag_maybe(Req, RespFun) ->
-    try
-        RespFun()
-    catch
-        throw:{etag_match, ETag} ->
-            send_response(Req, 304, [{"ETag", ETag}], <<>>)
-    end.
-
-verify_is_server_admin(#httpd{user_ctx=UserCtx}) ->
-    verify_is_server_admin(UserCtx);
-verify_is_server_admin(#user_ctx{roles=Roles}) ->
-    case lists:member(<<"_admin">>, Roles) of
-    true -> ok;
-    false -> throw({unauthorized, <<"You are not a server admin.">>})
-    end.
-
-log_request(#httpd{mochi_req=MochiReq,peer=Peer}, Code) ->
-    ?LOG_INFO("~s - - ~s ~s ~B", [
-        Peer,
-        MochiReq:get(method),
-        MochiReq:get(raw_path),
-        Code
-    ]).
-
-start_response_length(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Length) ->
-    log_request(Req, Code),
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    Headers1 = Headers ++ server_header() ++
-               couch_httpd_auth:cookie_auth_header(Req, Headers),
-    Headers2 = couch_httpd_cors:cors_headers(Req, Headers1),
-    Resp = MochiReq:start_response_length({Code, Headers2, Length}),
-    case MochiReq:get(method) of
-    'HEAD' -> throw({http_head_abort, Resp});
-    _ -> ok
-    end,
-    {ok, Resp}.
-
-start_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers) ->
-    log_request(Req, Code),
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    CookieHeader = couch_httpd_auth:cookie_auth_header(Req, Headers),
-    Headers1 = Headers ++ server_header() ++ CookieHeader,
-    Headers2 = couch_httpd_cors:cors_headers(Req, Headers1),
-    Resp = MochiReq:start_response({Code, Headers2}),
-    case MochiReq:get(method) of
-        'HEAD' -> throw({http_head_abort, Resp});
-        _ -> ok
-    end,
-    {ok, Resp}.
-
-send(Resp, Data) ->
-    Resp:send(Data),
-    {ok, Resp}.
-
-no_resp_conn_header([]) ->
-    true;
-no_resp_conn_header([{Hdr, _}|Rest]) ->
-    case string:to_lower(Hdr) of
-        "connection" -> false;
-        _ -> no_resp_conn_header(Rest)
-    end.
-
-http_1_0_keep_alive(Req, Headers) ->
-    KeepOpen = Req:should_close() == false,
-    IsHttp10 = Req:get(version) == {1, 0},
-    NoRespHeader = no_resp_conn_header(Headers),
-    case KeepOpen andalso IsHttp10 andalso NoRespHeader of
-        true -> [{"Connection", "Keep-Alive"} | Headers];
-        false -> Headers
-    end.
-
-start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers) ->
-    log_request(Req, Code),
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    Headers1 = http_1_0_keep_alive(MochiReq, Headers),
-    Headers2 = Headers1 ++ server_header() ++
-               couch_httpd_auth:cookie_auth_header(Req, Headers1),
-    Headers3 = couch_httpd_cors:cors_headers(Req, Headers2),
-    Resp = MochiReq:respond({Code, Headers3, chunked}),
-    case MochiReq:get(method) of
-    'HEAD' -> throw({http_head_abort, Resp});
-    _ -> ok
-    end,
-    {ok, Resp}.
-
-send_chunk(Resp, Data) ->
-    case iolist_size(Data) of
-    0 -> ok; % do nothing
-    _ -> Resp:write_chunk(Data)
-    end,
-    {ok, Resp}.
-
-last_chunk(Resp) ->
-    Resp:write_chunk([]),
-    {ok, Resp}.
-
-send_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Body) ->
-    log_request(Req, Code),
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    Headers1 = http_1_0_keep_alive(MochiReq, Headers),
-    if Code >= 500 ->
-        ?LOG_ERROR("httpd ~p error response:~n ~s", [Code, Body]);
-    Code >= 400 ->
-        ?LOG_DEBUG("httpd ~p error response:~n ~s", [Code, Body]);
-    true -> ok
-    end,
-    Headers2 = Headers1 ++ server_header() ++
-               couch_httpd_auth:cookie_auth_header(Req, Headers1),
-    Headers3 = couch_httpd_cors:cors_headers(Req, Headers2),
-
-    {ok, MochiReq:respond({Code, Headers3, Body})}.
-
-send_method_not_allowed(Req, Methods) ->
-    send_error(Req, 405, [{"Allow", Methods}], <<"method_not_allowed">>, ?l2b("Only " ++ Methods ++ " allowed")).
-
-send_json(Req, Value) ->
-    send_json(Req, 200, Value).
-
-send_json(Req, Code, Value) ->
-    send_json(Req, Code, [], Value).
-
-send_json(Req, Code, Headers, Value) ->
-    initialize_jsonp(Req),
-    DefaultHeaders = [
-        {"Content-Type", negotiate_content_type(Req)},
-        {"Cache-Control", "must-revalidate"}
-    ],
-    Body = [start_jsonp(), ?JSON_ENCODE(Value), end_jsonp(), $\n],
-    send_response(Req, Code, DefaultHeaders ++ Headers, Body).
-
-start_json_response(Req, Code) ->
-    start_json_response(Req, Code, []).
-
-start_json_response(Req, Code, Headers) ->
-    initialize_jsonp(Req),
-    DefaultHeaders = [
-        {"Content-Type", negotiate_content_type(Req)},
-        {"Cache-Control", "must-revalidate"}
-    ],
-    {ok, Resp} = start_chunked_response(Req, Code, DefaultHeaders ++ Headers),
-    case start_jsonp() of
-        [] -> ok;
-        Start -> send_chunk(Resp, Start)
-    end,
-    {ok, Resp}.
-
-end_json_response(Resp) ->
-    send_chunk(Resp, end_jsonp() ++ [$\n]),
-    last_chunk(Resp).
-
-initialize_jsonp(Req) ->
-    case get(jsonp) of
-        undefined -> put(jsonp, qs_value(Req, "callback", no_jsonp));
-        _ -> ok
-    end,
-    case get(jsonp) of
-        no_jsonp -> [];
-        [] -> [];
-        CallBack ->
-            try
-                % make sure jsonp is configured on (default off)
-                case config:get("httpd", "allow_jsonp", "false") of
-                "true" ->
-                    validate_callback(CallBack);
-                _Else ->
-                    put(jsonp, no_jsonp)
-                end
-            catch
-                Error ->
-                    put(jsonp, no_jsonp),
-                    throw(Error)
-            end
-    end.
-
-start_jsonp() ->
-    case get(jsonp) of
-        no_jsonp -> [];
-        [] -> [];
-        CallBack -> ["/* CouchDB */", CallBack, "("]
-    end.
-
-end_jsonp() ->
-    case erlang:erase(jsonp) of
-        no_jsonp -> [];
-        [] -> [];
-        _ -> ");"
-    end.
-
-validate_callback(CallBack) when is_binary(CallBack) ->
-    validate_callback(binary_to_list(CallBack));
-validate_callback([]) ->
-    ok;
-validate_callback([Char | Rest]) ->
-    case Char of
-        _ when Char >= $a andalso Char =< $z -> ok;
-        _ when Char >= $A andalso Char =< $Z -> ok;
-        _ when Char >= $0 andalso Char =< $9 -> ok;
-        _ when Char == $. -> ok;
-        _ when Char == $_ -> ok;
-        _ when Char == $[ -> ok;
-        _ when Char == $] -> ok;
-        _ ->
-            throw({bad_request, invalid_callback})
-    end,
-    validate_callback(Rest).
-
-
-error_info({Error, Reason}) when is_list(Reason) ->
-    error_info({Error, ?l2b(Reason)});
-error_info(bad_request) ->
-    {400, <<"bad_request">>, <<>>};
-error_info({bad_request, Reason}) ->
-    {400, <<"bad_request">>, Reason};
-error_info({query_parse_error, Reason}) ->
-    {400, <<"query_parse_error">>, Reason};
-% Prior art for md5 mismatch resulting in a 400 is from AWS S3
-error_info(md5_mismatch) ->
-    {400, <<"content_md5_mismatch">>, <<"Possible message corruption.">>};
-error_info(not_found) ->
-    {404, <<"not_found">>, <<"missing">>};
-error_info({not_found, Reason}) ->
-    {404, <<"not_found">>, Reason};
-error_info({not_acceptable, Reason}) ->
-    {406, <<"not_acceptable">>, Reason};
-error_info(conflict) ->
-    {409, <<"conflict">>, <<"Document update conflict.">>};
-error_info({forbidden, Msg}) ->
-    {403, <<"forbidden">>, Msg};
-error_info({unauthorized, Msg}) ->
-    {401, <<"unauthorized">>, Msg};
-error_info(file_exists) ->
-    {412, <<"file_exists">>, <<"The database could not be "
-        "created, the file already exists.">>};
-error_info(request_entity_too_large) ->
-    {413, <<"too_large">>, <<"the request entity is too large">>};
-error_info({bad_ctype, Reason}) ->
-    {415, <<"bad_content_type">>, Reason};
-error_info(requested_range_not_satisfiable) ->
-    {416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>};
-error_info({error, illegal_database_name, Name}) ->
-    Message = "Name: '" ++ Name ++ "'. Only lowercase characters (a-z), "
-        ++ "digits (0-9), and any of the characters _, $, (, ), +, -, and / "
-        ++ "are allowed. Must begin with a letter.",
-    {400, <<"illegal_database_name">>, couch_util:to_binary(Message)};
-error_info({missing_stub, Reason}) ->
-    {412, <<"missing_stub">>, Reason};
-error_info({Error, Reason}) ->
-    {500, couch_util:to_binary(Error), couch_util:to_binary(Reason)};
-error_info(Error) ->
-    {500, <<"unknown_error">>, couch_util:to_binary(Error)}.
-
-error_headers(#httpd{mochi_req=MochiReq}=Req, Code, ErrorStr, ReasonStr) ->
-    if Code == 401 ->
-        % this is where the basic auth popup is triggered
-        case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
-        undefined ->
-            case config:get("httpd", "WWW-Authenticate", nil) of
-            nil ->
-                % If the client is a browser and the basic auth popup isn't turned on
-                % redirect to the session page.
-                case ErrorStr of
-                <<"unauthorized">> ->
-                    case config:get("couch_httpd_auth", "authentication_redirect", nil) of
-                    nil -> {Code, []};
-                    AuthRedirect ->
-                        case config:get("couch_httpd_auth", "require_valid_user", "false") of
-                        "true" ->
-                            % send the browser popup header no matter what if we are require_valid_user
-                            {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]};
-                        _False ->
-                            case MochiReq:accepts_content_type("application/json") of
-                            true ->
-                                {Code, []};
-                            false ->
-                                case MochiReq:accepts_content_type("text/html") of
-                                true ->
-                                    % Redirect to the path the user requested, not
-                                    % the one that is used internally.
-                                    UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of
-                                    undefined ->
-                                        MochiReq:get(path);
-                                    VHostPath ->
-                                        VHostPath
-                                    end,
-                                    RedirectLocation = lists:flatten([
-                                        AuthRedirect,
-                                        "?return=", couch_util:url_encode(UrlReturnRaw),
-                                        "&reason=", couch_util:url_encode(ReasonStr)
-                                    ]),
-                                    {302, [{"Location", absolute_uri(Req, RedirectLocation)}]};
-                                false ->
-                                    {Code, []}
-                                end
-                            end
-                        end
-                    end;
-                _Else ->
-                    {Code, []}
-                end;
-            Type ->
-                {Code, [{"WWW-Authenticate", Type}]}
-            end;
-        Type ->
-           {Code, [{"WWW-Authenticate", Type}]}
-        end;
-    true ->
-        {Code, []}
-    end.
-
-send_error(_Req, {already_sent, Resp, _Error}) ->
-    {ok, Resp};
-
-send_error(Req, Error) ->
-    {Code, ErrorStr, ReasonStr} = error_info(Error),
-    {Code1, Headers} = error_headers(Req, Code, ErrorStr, ReasonStr),
-    send_error(Req, Code1, Headers, ErrorStr, ReasonStr).
-
-send_error(Req, Code, ErrorStr, ReasonStr) ->
-    send_error(Req, Code, [], ErrorStr, ReasonStr).
-
-send_error(Req, Code, Headers, ErrorStr, ReasonStr) ->
-    send_json(Req, Code, Headers,
-        {[{<<"error">>,  ErrorStr},
-         {<<"reason">>, ReasonStr}]}).
-
-% give the option for list functions to output html or other raw errors
-send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
-    send_chunk(Resp, Reason),
-    last_chunk(Resp);
-
-send_chunked_error(Resp, Error) ->
-    {Code, ErrorStr, ReasonStr} = error_info(Error),
-    JsonError = {[{<<"code">>, Code},
-        {<<"error">>,  ErrorStr},
-        {<<"reason">>, ReasonStr}]},
-    send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])),
-    last_chunk(Resp).
-
-send_redirect(Req, Path) ->
-     send_response(Req, 301, [{"Location", absolute_uri(Req, Path)}], <<>>).
-
-negotiate_content_type(Req) ->
-    case get(jsonp) of
-        no_jsonp -> negotiate_content_type1(Req);
-        [] -> negotiate_content_type1(Req);
-        _Callback -> "text/javascript"
-    end.
-
-negotiate_content_type1(#httpd{mochi_req=MochiReq}) ->
-    %% Determine the appropriate Content-Type header for a JSON response
-    %% depending on the Accept header in the request. A request that explicitly
-    %% lists the correct JSON MIME type will get that type, otherwise the
-    %% response will have the generic MIME type "text/plain"
-    AcceptedTypes = case MochiReq:get_header_value("Accept") of
-        undefined       -> [];
-        AcceptHeader    -> string:tokens(AcceptHeader, ", ")
-    end,
-    case lists:member("application/json", AcceptedTypes) of
-        true  -> "application/json";
-        false -> "text/plain; charset=utf-8"
-    end.
-
-server_header() ->
-    [{"Server", "CouchDB/" ++ couch_server:get_version() ++
-                " (Erlang OTP/" ++ erlang:system_info(otp_release) ++ ")"}].
-
-
--record(mp, {boundary, buffer, data_fun, callback}).
-
-
-parse_multipart_request(ContentType, DataFun, Callback) ->
-    Boundary0 = iolist_to_binary(get_boundary(ContentType)),
-    Boundary = <<"\r\n--", Boundary0/binary>>,
-    Mp = #mp{boundary= Boundary,
-            buffer= <<>>,
-            data_fun=DataFun,
-            callback=Callback},
-    {Mp2, _NilCallback} = read_until(Mp, <<"--", Boundary0/binary>>,
-        fun nil_callback/1),
-    #mp{buffer=Buffer, data_fun=DataFun2, callback=Callback2} =
-            parse_part_header(Mp2),
-    {Buffer, DataFun2, Callback2}.
-
-nil_callback(_Data)->
-    fun nil_callback/1.
-
-get_boundary({"multipart/" ++ _, Opts}) ->
-    case couch_util:get_value("boundary", Opts) of
-        S when is_list(S) ->
-            S
-    end;
-get_boundary(ContentType) ->
-    {"multipart/" ++ _ , Opts} = mochiweb_util:parse_header(ContentType),
-    get_boundary({"multipart/", Opts}).
-
-
-
-split_header(<<>>) ->
-    [];
-split_header(Line) ->
-    {Name, [$: | Value]} = lists:splitwith(fun (C) -> C =/= $: end,
-                                           binary_to_list(Line)),
-    [{string:to_lower(string:strip(Name)),
-     mochiweb_util:parse_header(Value)}].
-
-read_until(#mp{data_fun=DataFun, buffer=Buffer}=Mp, Pattern, Callback) ->
-    case find_in_binary(Pattern, Buffer) of
-    not_found ->
-        Callback2 = Callback(Buffer),
-        {Buffer2, DataFun2} = DataFun(),
-        Buffer3 = iolist_to_binary(Buffer2),
-        read_until(Mp#mp{data_fun=DataFun2,buffer=Buffer3}, Pattern, Callback2);
-    {partial, 0} ->
-        {NewData, DataFun2} = DataFun(),
-        read_until(Mp#mp{data_fun=DataFun2,
-                buffer= iolist_to_binary([Buffer,NewData])},
-                Pattern, Callback);
-    {partial, Skip} ->
-        <<DataChunk:Skip/binary, Rest/binary>> = Buffer,
-        Callback2 = Callback(DataChunk),
-        {NewData, DataFun2} = DataFun(),
-        read_until(Mp#mp{data_fun=DataFun2,
-                buffer= iolist_to_binary([Rest | NewData])},
-                Pattern, Callback2);
-    {exact, 0} ->
-        PatternLen = size(Pattern),
-        <<_:PatternLen/binary, Rest/binary>> = Buffer,
-        {Mp#mp{buffer= Rest}, Callback};
-    {exact, Skip} ->
-        PatternLen = size(Pattern),
-        <<DataChunk:Skip/binary, _:PatternLen/binary, Rest/binary>> = Buffer,
-        Callback2 = Callback(DataChunk),
-        {Mp#mp{buffer= Rest}, Callback2}
-    end.
-
-
-parse_part_header(#mp{callback=UserCallBack}=Mp) ->
-    {Mp2, AccCallback} = read_until(Mp, <<"\r\n\r\n">>,
-            fun(Next) -> acc_callback(Next, []) end),
-    HeaderData = AccCallback(get_data),
-
-    Headers =
-    lists:foldl(fun(Line, Acc) ->
-            split_header(Line) ++ Acc
-        end, [], re:split(HeaderData,<<"\r\n">>, [])),
-    NextCallback = UserCallBack({headers, Headers}),
-    parse_part_body(Mp2#mp{callback=NextCallback}).
-
-parse_part_body(#mp{boundary=Prefix, callback=Callback}=Mp) ->
-    {Mp2, WrappedCallback} = read_until(Mp, Prefix,
-            fun(Data) -> body_callback_wrapper(Data, Callback) end),
-    Callback2 = WrappedCallback(get_callback),
-    Callback3 = Callback2(body_end),
-    case check_for_last(Mp2#mp{callback=Callback3}) of
-    {last, #mp{callback=Callback3}=Mp3} ->
-        Mp3#mp{callback=Callback3(eof)};
-    {more, Mp3} ->
-        parse_part_header(Mp3)
-    end.
-
-acc_callback(get_data, Acc)->
-    iolist_to_binary(lists:reverse(Acc));
-acc_callback(Data, Acc)->
-    fun(Next) -> acc_callback(Next, [Data | Acc]) end.
-
-body_callback_wrapper(get_callback, Callback) ->
-    Callback;
-body_callback_wrapper(Data, Callback) ->
-    Callback2 = Callback({body, Data}),
-    fun(Next) -> body_callback_wrapper(Next, Callback2) end.
-
-
-check_for_last(#mp{buffer=Buffer, data_fun=DataFun}=Mp) ->
-    case Buffer of
-    <<"--",_/binary>> -> {last, Mp};
-    <<_, _, _/binary>> -> {more, Mp};
-    _ -> % not long enough
-        {Data, DataFun2} = DataFun(),
-        check_for_last(Mp#mp{buffer= <<Buffer/binary, Data/binary>>,
-                data_fun = DataFun2})
-    end.
-
-find_in_binary(_B, <<>>) ->
-    not_found;
-
-find_in_binary(B, Data) ->
-    case binary:match(Data, [B], []) of
-    nomatch ->
-        partial_find(binary:part(B, {0, byte_size(B) - 1}),
-                     binary:part(Data, {byte_size(Data), -byte_size(Data) + 1}), 1);
-    {Pos, _Len} ->
-        {exact, Pos}
-    end.
-
-partial_find(<<>>, _Data, _Pos) ->
-    not_found;
-
-partial_find(B, Data, N) when byte_size(Data) > 0 ->
-    case binary:match(Data, [B], []) of
-    nomatch ->
-        partial_find(binary:part(B, {0, byte_size(B) - 1}),
-                     binary:part(Data, {byte_size(Data), -byte_size(Data) + 1}), N + 1);
-    {Pos, _Len} ->
-        {partial, N + Pos}
-    end;
-
-partial_find(_B, _Data, _N) ->
-    not_found.
-
-
-validate_bind_address(Address) ->
-    case inet_parse:address(Address) of
-        {ok, _} -> ok;
-        _ -> throw({error, invalid_bind_address})
-    end.


[31/49] Remove src/ibrowse

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/3822d8f4/src/ibrowse/src/ibrowse_http_client.erl
----------------------------------------------------------------------
diff --git a/src/ibrowse/src/ibrowse_http_client.erl b/src/ibrowse/src/ibrowse_http_client.erl
deleted file mode 100644
index d021d6f..0000000
--- a/src/ibrowse/src/ibrowse_http_client.erl
+++ /dev/null
@@ -1,1921 +0,0 @@
-%%%-------------------------------------------------------------------
-%%% File    : ibrowse_http_client.erl
-%%% Author  : Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%% Description : The name says it all
-%%%
-%%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <ch...@t-mobile.co.uk>
-%%%-------------------------------------------------------------------
--module(ibrowse_http_client).
--behaviour(gen_server).
-%%--------------------------------------------------------------------
-%% Include files
-%%--------------------------------------------------------------------
-
-%%--------------------------------------------------------------------
-%% External exports
--export([
-         start_link/1,
-         start/1,
-         stop/1,
-         send_req/7
-        ]).
-
--ifdef(debug).
--compile(export_all).
--endif.
-
-%% gen_server callbacks
--export([
-         init/1,
-         handle_call/3,
-         handle_cast/2,
-         handle_info/2,
-         terminate/2,
-         code_change/3
-        ]).
-
--include_lib("ibrowse/include/ibrowse.hrl").
--include_lib("kernel/include/inet.hrl").
-
--record(state, {host, port, connect_timeout,
-                inactivity_timer_ref,
-                use_proxy = false, proxy_auth_digest,
-                ssl_options = [], is_ssl = false, socket,
-                proxy_tunnel_setup = false,
-                tunnel_setup_queue = [],
-                reqs=queue:new(), cur_req, status=idle, http_status_code,
-                reply_buffer = <<>>, rep_buf_size=0, streamed_size = 0,
-                recvd_headers=[],
-                status_line, raw_headers,
-                is_closing, content_length,
-                deleted_crlf = false, transfer_encoding,
-                chunk_size, chunk_size_buffer = <<>>,
-                recvd_chunk_size, interim_reply_sent = false,
-                lb_ets_tid, cur_pipeline_size = 0, prev_req_id
-               }).
-
--record(request, {url, method, options, from,
-                  stream_to, caller_controls_socket = false,
-                  caller_socket_options = [],
-                  req_id,
-                  stream_chunk_size,
-                  save_response_to_file = false,
-                  tmp_file_name, tmp_file_fd, preserve_chunked_encoding,
-                  response_format, timer_ref}).
-
--import(ibrowse_lib, [
-                      get_value/2,
-                      get_value/3,
-                      do_trace/2
-                     ]).
-
--define(DEFAULT_STREAM_CHUNK_SIZE, 1024*1024).
--define(dec2hex(X), erlang:integer_to_list(X, 16)).
-%%====================================================================
-%% External functions
-%%====================================================================
-%%--------------------------------------------------------------------
-%% Function: start_link/0
-%% Description: Starts the server
-%%--------------------------------------------------------------------
-start(Args) ->
-    gen_server:start(?MODULE, Args, []).
-
-start_link(Args) ->
-    gen_server:start_link(?MODULE, Args, []).
-
-stop(Conn_pid) ->
-    case catch gen_server:call(Conn_pid, stop) of
-        {'EXIT', {timeout, _}} ->
-            exit(Conn_pid, kill),
-            ok;
-        _ ->
-            ok
-    end.
-
-send_req(Conn_Pid, Url, Headers, Method, Body, Options, Timeout) ->
-    gen_server:call(
-      Conn_Pid,
-      {send_req, {Url, Headers, Method, Body, Options, Timeout}}, Timeout).
-
-%%====================================================================
-%% Server functions
-%%====================================================================
-
-%%--------------------------------------------------------------------
-%% Function: init/1
-%% Description: Initiates the server
-%% Returns: {ok, State}          |
-%%          {ok, State, Timeout} |
-%%          ignore               |
-%%          {stop, Reason}
-%%--------------------------------------------------------------------
-init({Lb_Tid, #url{host = Host, port = Port}, {SSLOptions, Is_ssl}}) ->
-    State = #state{host = Host,
-                   port = Port,
-                   ssl_options = SSLOptions,
-                   is_ssl = Is_ssl,
-                   lb_ets_tid = Lb_Tid},
-    put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
-    put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
-    {ok, set_inac_timer(State)};
-init(Url) when is_list(Url) ->
-    case catch ibrowse_lib:parse_url(Url) of
-        #url{protocol = Protocol} = Url_rec ->
-            init({undefined, Url_rec, {[], Protocol == https}});
-        {'EXIT', _} ->
-            {error, invalid_url}
-    end;
-init({Host, Port}) ->
-    State = #state{host = Host,
-                   port = Port},
-    put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
-    put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
-    {ok, set_inac_timer(State)}.
-
-%%--------------------------------------------------------------------
-%% Function: handle_call/3
-%% Description: Handling call messages
-%% Returns: {reply, Reply, State}          |
-%%          {reply, Reply, State, Timeout} |
-%%          {noreply, State}               |
-%%          {noreply, State, Timeout}      |
-%%          {stop, Reason, Reply, State}   | (terminate/2 is called)
-%%          {stop, Reason, State}            (terminate/2 is called)
-%%--------------------------------------------------------------------
-%% Received a request when the remote server has already sent us a
-%% Connection: Close header
-handle_call({send_req, _}, _From, #state{is_closing = true} = State) ->
-    {reply, {error, connection_closing}, State};
-
-handle_call({send_req, {Url, Headers, Method, Body, Options, Timeout}},
-            From, State) ->
-    send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State);
-
-handle_call(stop, _From, State) ->
-    do_close(State),
-    do_error_reply(State, closing_on_request),
-    {stop, normal, ok, State};
-
-handle_call(Request, _From, State) ->
-    Reply = {unknown_request, Request},
-    {reply, Reply, State}.
-
-%%--------------------------------------------------------------------
-%% Function: handle_cast/2
-%% Description: Handling cast messages
-%% Returns: {noreply, State}          |
-%%          {noreply, State, Timeout} |
-%%          {stop, Reason, State}            (terminate/2 is called)
-%%--------------------------------------------------------------------
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-%%--------------------------------------------------------------------
-%% Function: handle_info/2
-%% Description: Handling all non call/cast messages
-%% Returns: {noreply, State}          |
-%%          {noreply, State, Timeout} |
-%%          {stop, Reason, State}            (terminate/2 is called)
-%%--------------------------------------------------------------------
-handle_info({tcp, _Sock, Data}, #state{status = Status} = State) ->
-    do_trace("Data recvd in state: ~p. Size: ~p. ~p~n~n", [Status, size(Data), Data]),
-    handle_sock_data(Data, State);
-handle_info({ssl, _Sock, Data}, State) ->
-    handle_sock_data(Data, State);
-
-handle_info({stream_next, Req_id}, #state{socket = Socket,
-                                          cur_req = #request{req_id = Req_id}} = State) ->
-    do_setopts(Socket, [{active, once}], State),
-    {noreply, set_inac_timer(State)};
-
-handle_info({stream_next, _Req_id}, State) ->
-    _Cur_req_id = case State#state.cur_req of
-                     #request{req_id = Cur} ->
-                         Cur;
-                     _ ->
-                         undefined
-                 end,
-    {noreply, State};
-
-handle_info({stream_close, _Req_id}, State) ->
-    shutting_down(State),
-    do_close(State),
-    do_error_reply(State, closing_on_request),
-    {stop, normal, State};
-
-handle_info({tcp_closed, _Sock}, State) ->
-    do_trace("TCP connection closed by peer!~n", []),
-    handle_sock_closed(State),
-    {stop, normal, State};
-handle_info({ssl_closed, _Sock}, State) ->
-    do_trace("SSL connection closed by peer!~n", []),
-    handle_sock_closed(State),
-    {stop, normal, State};
-
-handle_info({tcp_error, _Sock, Reason}, State) ->
-    do_trace("Error on connection to ~1000.p:~1000.p -> ~1000.p~n",
-             [State#state.host, State#state.port, Reason]),
-    handle_sock_closed(State),
-    {stop, normal, State};
-handle_info({ssl_error, _Sock, Reason}, State) ->
-    do_trace("Error on SSL connection to ~1000.p:~1000.p -> ~1000.p~n",
-             [State#state.host, State#state.port, Reason]),
-    handle_sock_closed(State),
-    {stop, normal, State};
-
-handle_info({req_timedout, From}, State) ->
-    case lists:keymember(From, #request.from, queue:to_list(State#state.reqs)) of
-        false ->
-            {noreply, State};
-        true ->
-            shutting_down(State),
-%%            do_error_reply(State, req_timedout),
-            {stop, normal, State}
-    end;
-
-handle_info(timeout, State) ->
-    do_trace("Inactivity timeout triggered. Shutting down connection~n", []),
-    shutting_down(State),
-    do_error_reply(State, req_timedout),
-    {stop, normal, State};
-
-handle_info({trace, Bool}, State) ->
-    put(my_trace_flag, Bool),
-    {noreply, State};
-
-handle_info(Info, State) ->
-    io:format("Unknown message recvd for ~1000.p:~1000.p -> ~p~n",
-              [State#state.host, State#state.port, Info]),
-    io:format("Recvd unknown message ~p when in state: ~p~n", [Info, State]),
-    {noreply, State}.
-
-%%--------------------------------------------------------------------
-%% Function: terminate/2
-%% Description: Shutdown the server
-%% Returns: any (ignored by gen_server)
-%%--------------------------------------------------------------------
-terminate(_Reason, State) ->
-    do_close(State),
-    ok.
-
-%%--------------------------------------------------------------------
-%% Func: code_change/3
-%% Purpose: Convert process state when code is changed
-%% Returns: {ok, NewState}
-%%--------------------------------------------------------------------
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-%%--------------------------------------------------------------------
-%%% Internal functions
-%%--------------------------------------------------------------------
-
-%%--------------------------------------------------------------------
-%% Handles data recvd on the socket
-%%--------------------------------------------------------------------
-handle_sock_data(Data, #state{status=idle}=State) ->
-    do_trace("Data recvd on socket in state idle!. ~1000.p~n", [Data]),
-    shutting_down(State),
-    do_error_reply(State, data_in_status_idle),
-    do_close(State),
-    {stop, normal, State};
-
-handle_sock_data(Data, #state{status = get_header}=State) ->
-    case parse_response(Data, State) of
-        {error, _Reason} ->
-            shutting_down(State),
-            {stop, normal, State};
-        #state{socket = Socket, status = Status, cur_req = CurReq} = State_1 ->
-            case {Status, CurReq} of
-                {get_header, #request{caller_controls_socket = true}} ->
-                    do_setopts(Socket, [{active, once}], State_1);
-                _ ->
-                    active_once(State_1)
-            end,
-            {noreply, set_inac_timer(State_1)}
-    end;
-
-handle_sock_data(Data, #state{status           = get_body,
-                              socket           = Socket,
-                              content_length   = CL,
-                              http_status_code = StatCode,
-                              recvd_headers    = Headers,
-                              chunk_size       = CSz} = State) ->
-    case (CL == undefined) and (CSz == undefined) of
-        true ->
-            case accumulate_response(Data, State) of
-                {error, Reason} ->
-                    shutting_down(State),
-                    fail_pipelined_requests(State,
-                                            {error, {Reason, {stat_code, StatCode}, Headers}}),
-                    {stop, normal, State};
-                State_1 ->
-                    active_once(State_1),
-                    State_2 = set_inac_timer(State_1),
-                    {noreply, State_2}
-            end;
-        _ ->
-            case parse_11_response(Data, State) of
-                {error, Reason} ->
-                    shutting_down(State),
-                    fail_pipelined_requests(State,
-                                            {error, {Reason, {stat_code, StatCode}, Headers}}),
-                    {stop, normal, State};
-                #state{cur_req = #request{caller_controls_socket = Ccs},
-                       interim_reply_sent = Irs} = State_1 ->
-                    case Irs of
-                        true ->
-                            active_once(State_1);
-                        false when Ccs == true ->
-                            do_setopts(Socket, [{active, once}], State);
-                        false ->
-                            active_once(State_1)
-                    end,
-                    State_2 = State_1#state{interim_reply_sent = false},
-                    case Ccs of
-                    true ->
-                        cancel_timer(State_2#state.inactivity_timer_ref, {eat_message, timeout}),
-                        {noreply, State_2#state{inactivity_timer_ref = undefined}};
-                    _ ->
-                        {noreply, set_inac_timer(State_2)}
-                    end;
-                State_1 ->
-                    active_once(State_1),
-                    State_2 = set_inac_timer(State_1),
-                    {noreply, State_2}
-            end
-    end.
-
-accumulate_response(Data,
-                    #state{
-                      cur_req = #request{save_response_to_file = Srtf,
-                                         tmp_file_fd = undefined} = CurReq,
-                      http_status_code=[$2 | _]}=State) when Srtf /= false ->
-    TmpFilename = make_tmp_filename(Srtf),
-    Mode = file_mode(Srtf),
-    case file:open(TmpFilename, [Mode, delayed_write, raw]) of
-        {ok, Fd} ->
-            accumulate_response(Data, State#state{
-                                        cur_req = CurReq#request{
-                                                    tmp_file_fd = Fd,
-                                                    tmp_file_name = TmpFilename}});
-        {error, Reason} ->
-            {error, {file_open_error, Reason}}
-    end;
-accumulate_response(Data, #state{cur_req = #request{save_response_to_file = Srtf,
-                                                    tmp_file_fd = Fd},
-                                 transfer_encoding=chunked,
-                                 reply_buffer = Reply_buf,
-                                 http_status_code=[$2 | _]
-                                } = State) when Srtf /= false ->
-    case file:write(Fd, [Reply_buf, Data]) of
-        ok ->
-            State#state{reply_buffer = <<>>};
-        {error, Reason} ->
-            {error, {file_write_error, Reason}}
-    end;
-accumulate_response(Data, #state{cur_req = #request{save_response_to_file = Srtf,
-                                                    tmp_file_fd = Fd},
-                                 reply_buffer = RepBuf,
-                                 http_status_code=[$2 | _]
-                                } = State) when Srtf /= false ->
-    case file:write(Fd, [RepBuf, Data]) of
-        ok ->
-            State#state{reply_buffer = <<>>};
-        {error, Reason} ->
-            {error, {file_write_error, Reason}}
-    end;
-accumulate_response(Data, #state{reply_buffer      = RepBuf,
-                                 rep_buf_size      = RepBufSize,
-                                 streamed_size     = Streamed_size,
-                                 cur_req           = CurReq}=State) ->
-    #request{stream_to                 = StreamTo,
-             req_id                    = ReqId,
-             stream_chunk_size         = Stream_chunk_size,
-             response_format           = Response_format,
-             caller_controls_socket    = Caller_controls_socket} = CurReq,
-    RepBuf_1 = <<RepBuf/binary, Data/binary>>,
-    New_data_size = RepBufSize - Streamed_size,
-    case StreamTo of
-        undefined ->
-            State#state{reply_buffer = RepBuf_1};
-        _ when Caller_controls_socket == true ->
-            do_interim_reply(StreamTo, Response_format, ReqId, RepBuf_1),
-            State#state{reply_buffer = <<>>,
-                        interim_reply_sent = true,
-                        streamed_size = Streamed_size + size(RepBuf_1)};
-        _ when New_data_size >= Stream_chunk_size ->
-            {Stream_chunk, Rem_data} = split_binary(RepBuf_1, Stream_chunk_size),
-            do_interim_reply(StreamTo, Response_format, ReqId, Stream_chunk),
-            State_1 = State#state{
-                        reply_buffer = <<>>,
-                        interim_reply_sent = true,
-                        streamed_size = Streamed_size + Stream_chunk_size},
-            case Rem_data of
-                <<>> ->
-                    State_1;
-                _ ->
-                    accumulate_response(Rem_data, State_1)
-            end;
-        _ ->
-            State#state{reply_buffer = RepBuf_1}
-    end.
-
-make_tmp_filename(true) ->
-    DownloadDir = ibrowse:get_config_value(download_dir, filename:absname("./")),
-    {A,B,C} = now(),
-    filename:join([DownloadDir,
-                   "ibrowse_tmp_file_"++
-                   integer_to_list(A) ++
-                   integer_to_list(B) ++
-                   integer_to_list(C)]);
-make_tmp_filename(File) when is_list(File) ->
-    File;
-make_tmp_filename({append, File}) when is_list(File) ->
-    File.
-
-file_mode({append, _File}) -> append;
-file_mode(_Srtf) -> write.
-
-
-%%--------------------------------------------------------------------
-%% Handles the case when the server closes the socket
-%%--------------------------------------------------------------------
-handle_sock_closed(#state{status=get_header} = State) ->
-    shutting_down(State),
-    do_error_reply(State, connection_closed);
-
-handle_sock_closed(#state{cur_req=undefined} = State) ->
-    shutting_down(State);
-
-%% We check for IsClosing because this the server could have sent a
-%% Connection-Close header and has closed the socket to indicate end
-%% of response. There maybe requests pipelined which need a response.
-handle_sock_closed(#state{reply_buffer = Buf, reqs = Reqs, http_status_code = SC,
-                          is_closing = IsClosing,
-                          cur_req = #request{tmp_file_name=TmpFilename,
-                                             tmp_file_fd=Fd} = CurReq,
-                          status = get_body,
-                          recvd_headers = Headers,
-                          status_line = Status_line,
-                          raw_headers = Raw_headers
-                         }=State) ->
-    #request{from=From, stream_to=StreamTo, req_id=ReqId,
-             response_format = Resp_format,
-             options = Options} = CurReq,
-    case IsClosing of
-        true ->
-            {_, Reqs_1} = queue:out(Reqs),
-            Body = case TmpFilename of
-                       undefined ->
-                           Buf;
-                       _ ->
-                           ok = file:close(Fd),
-                           {file, TmpFilename}
-                   end,
-            Reply = case get_value(give_raw_headers, Options, false) of
-                          true ->
-                            {ok, Status_line, Raw_headers, Body};
-                        false ->
-                            {ok, SC, Headers, Buf}
-                    end,
-            State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
-            ok = do_error_reply(State_1#state{reqs = Reqs_1}, connection_closed),
-            State_1;
-        _ ->
-            ok = do_error_reply(State, connection_closed),
-            State
-    end.
-
-do_connect(Host, Port, Options, #state{is_ssl      = true,
-                                       use_proxy   = false,
-                                       ssl_options = SSLOptions},
-           Timeout) ->
-    ssl:connect(Host, Port, get_sock_options(Host, Options, SSLOptions), Timeout);
-do_connect(Host, Port, Options, _State, Timeout) ->
-    gen_tcp:connect(Host, Port, get_sock_options(Host, Options, []), Timeout).
-
-get_sock_options(Host, Options, SSLOptions) ->
-    Caller_socket_options = get_value(socket_options, Options, []),
-    Ipv6Options = case is_ipv6_host(Host) of
-        true ->
-            [inet6];
-        false ->
-            []
-    end,
-    Other_sock_options = filter_sock_options(SSLOptions ++ Caller_socket_options ++ Ipv6Options),
-    case lists:keysearch(nodelay, 1, Other_sock_options) of
-        false ->
-            [{nodelay, true}, binary, {active, false} | Other_sock_options];
-        {value, _} ->
-            [binary, {active, false} | Other_sock_options]
-    end.
-
-is_ipv6_host(Host) ->
-    case inet_parse:address(Host) of
-        {ok, {_, _, _, _, _, _, _, _}} ->
-            true;
-        {ok, {_, _, _, _}} ->
-            false;
-        _  ->
-            case inet:gethostbyname(Host) of
-                {ok, #hostent{h_addrtype = inet6}} ->
-                    true;
-                _ ->
-                    false
-            end
-    end.
-
-%% We don't want the caller to specify certain options
-filter_sock_options(Opts) ->
-    lists:filter(fun({active, _}) ->
-                         false;
-                    ({packet, _}) ->
-                         false;
-                    (list) ->
-                         false;
-                    (_) ->
-                         true
-                 end, Opts).
-
-do_send(Req, #state{socket = Sock,
-                    is_ssl = true,
-                    use_proxy = true,
-                    proxy_tunnel_setup = Pts}) when Pts /= done ->  gen_tcp:send(Sock, Req);
-do_send(Req, #state{socket = Sock, is_ssl = true})  ->  ssl:send(Sock, Req);
-do_send(Req, #state{socket = Sock, is_ssl = false}) ->  gen_tcp:send(Sock, Req).
-
-%% @spec do_send_body(Sock::socket_descriptor(), Source::source_descriptor(), IsSSL::boolean()) -> ok | error()
-%% source_descriptor() = fun_arity_0           |
-%%                       {fun_arity_0}         |
-%%                       {fun_arity_1, term()}
-%% error() = term()
-do_send_body(Source, State, TE) when is_function(Source) ->
-    do_send_body({Source}, State, TE);
-do_send_body({Source}, State, TE) when is_function(Source) ->
-    do_send_body1(Source, Source(), State, TE);
-do_send_body({Source, Source_state}, State, TE) when is_function(Source) ->
-    do_send_body1(Source, Source(Source_state), State, TE);
-do_send_body(Body, State, _TE) ->
-    do_send(Body, State).
-
-do_send_body1(Source, Resp, State, TE) ->
-    case Resp of
-                {ok, Data} when Data == []; Data == <<>> ->
-                        do_send_body({Source}, State, TE);
-        {ok, Data} ->
-            do_send(maybe_chunked_encode(Data, TE), State),
-            do_send_body({Source}, State, TE);
-                {ok, Data, New_source_state} when Data == []; Data == <<>> ->
-                        do_send_body({Source, New_source_state}, State, TE);
-        {ok, Data, New_source_state} ->
-            do_send(maybe_chunked_encode(Data, TE), State),
-            do_send_body({Source, New_source_state}, State, TE);
-        eof when TE == true ->
-            do_send(<<"0\r\n\r\n">>, State),
-            ok;
-        eof ->
-            ok;
-        Err ->
-            Err
-    end.
-
-maybe_chunked_encode(Data, false) ->
-    Data;
-maybe_chunked_encode(Data, true) ->
-    [?dec2hex(iolist_size(Data)), "\r\n", Data, "\r\n"].
-
-do_close(#state{socket = undefined})            ->  ok;
-do_close(#state{socket = Sock,
-                is_ssl = true,
-                use_proxy = true,
-                proxy_tunnel_setup = Pts
-               }) when Pts /= done ->  catch gen_tcp:close(Sock);
-do_close(#state{socket = Sock, is_ssl = true})  ->  catch ssl:close(Sock);
-do_close(#state{socket = Sock, is_ssl = false}) ->  catch gen_tcp:close(Sock).
-
-active_once(#state{cur_req = #request{caller_controls_socket = true}}) ->
-    ok;
-active_once(#state{socket = Socket} = State) ->
-    do_setopts(Socket, [{active, once}], State).
-
-do_setopts(_Sock, [],   _)    ->  ok;
-do_setopts(Sock, Opts, #state{is_ssl = true,
-                              use_proxy = true,
-                              proxy_tunnel_setup = Pts}
-                             ) when Pts /= done ->  inet:setopts(Sock, Opts);
-do_setopts(Sock, Opts, #state{is_ssl = true}) -> ssl:setopts(Sock, Opts);
-do_setopts(Sock, Opts, _) ->  inet:setopts(Sock, Opts).
-
-check_ssl_options(Options, State) ->
-    case get_value(is_ssl, Options, false) of
-        false ->
-            State;
-        true ->
-            State#state{is_ssl=true, ssl_options=get_value(ssl_options, Options)}
-    end.
-
-send_req_1(From,
-           #url{host = Host,
-                port = Port} = Url,
-           Headers, Method, Body, Options, Timeout,
-           #state{socket = undefined} = State) ->
-    {Host_1, Port_1, State_1} =
-        case get_value(proxy_host, Options, false) of
-            false ->
-                {Host, Port, State};
-            PHost ->
-                ProxyUser     = get_value(proxy_user, Options, []),
-                ProxyPassword = get_value(proxy_password, Options, []),
-                Digest        = http_auth_digest(ProxyUser, ProxyPassword),
-                {PHost, get_value(proxy_port, Options, 80),
-                 State#state{use_proxy = true,
-                             proxy_auth_digest = Digest}}
-        end,
-    State_2 = check_ssl_options(Options, State_1),
-    do_trace("Connecting...~n", []),
-    Conn_timeout = get_value(connect_timeout, Options, Timeout),
-    case do_connect(Host_1, Port_1, Options, State_2, Conn_timeout) of
-        {ok, Sock} ->
-            do_trace("Connected! Socket: ~1000.p~n", [Sock]),
-            State_3 = State_2#state{socket = Sock,
-                                    connect_timeout = Conn_timeout},
-            send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State_3);
-        Err ->
-            shutting_down(State_2),
-            do_trace("Error connecting. Reason: ~1000.p~n", [Err]),
-            gen_server:reply(From, {error, {conn_failed, Err}}),
-            {stop, normal, State_2}
-    end;
-
-%% Send a CONNECT request.
-%% Wait for 200 OK
-%% Upgrade to SSL connection
-%% Then send request
-
-send_req_1(From,
-           #url{
-                host    = Server_host,
-                port    = Server_port
-                } = Url,
-           Headers, Method, Body, Options, Timeout,
-           #state{
-                  proxy_tunnel_setup = false,
-                  use_proxy = true,
-                  is_ssl    = true} = State) ->
-    Ref = case Timeout of
-              infinity ->
-                  undefined;
-              _ ->
-                  erlang:send_after(Timeout, self(), {req_timedout, From})
-          end,
-    NewReq = #request{
-      method                    = connect,
-      preserve_chunked_encoding = get_value(preserve_chunked_encoding, Options, false),
-      options                   = Options,
-      timer_ref                 = Ref
-     },
-    State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
-    Pxy_auth_headers = maybe_modify_headers(Url, Method, Options, [], State_1),
-    Path = [Server_host, $:, integer_to_list(Server_port)],
-    {Req, Body_1} = make_request(connect, Pxy_auth_headers,
-                                 Path, Path,
-                                 [], Options, State_1, undefined),
-    TE = is_chunked_encoding_specified(Options),
-    trace_request(Req),
-    case do_send(Req, State) of
-        ok ->
-            case do_send_body(Body_1, State_1, TE) of
-                ok ->
-                    trace_request_body(Body_1),
-                    active_once(State_1),
-                    State_1_1 = inc_pipeline_counter(State_1),
-                    State_2 = State_1_1#state{status     = get_header,
-                                              cur_req    = NewReq,
-                                              proxy_tunnel_setup = in_progress,
-                                              tunnel_setup_queue = [{From, Url, Headers, Method, Body, Options, Timeout}]},
-                    State_3 = set_inac_timer(State_2),
-                    {noreply, State_3};
-                Err ->
-                    shutting_down(State_1),
-                    do_trace("Send failed... Reason: ~p~n", [Err]),
-                    gen_server:reply(From, {error, {send_failed, Err}}),
-                    {stop, normal, State_1}
-            end;
-        Err ->
-            shutting_down(State_1),
-            do_trace("Send failed... Reason: ~p~n", [Err]),
-            gen_server:reply(From, {error, {send_failed, Err}}),
-            {stop, normal, State_1}
-    end;
-
-send_req_1(From, Url, Headers, Method, Body, Options, Timeout,
-           #state{proxy_tunnel_setup = in_progress,
-                  tunnel_setup_queue = Q} = State) ->
-    do_trace("Queued SSL request awaiting tunnel setup: ~n"
-             "URL     : ~s~n"
-             "Method  : ~p~n"
-             "Headers : ~p~n", [Url, Method, Headers]),
-    {noreply, State#state{tunnel_setup_queue = [{From, Url, Headers, Method, Body, Options, Timeout} | Q]}};
-
-send_req_1(From,
-           #url{abspath = AbsPath,
-                path    = RelPath} = Url,
-           Headers, Method, Body, Options, Timeout,
-           #state{status    = Status,
-                  socket    = Socket} = State) ->
-    cancel_timer(State#state.inactivity_timer_ref, {eat_message, timeout}),
-    ReqId = make_req_id(),
-    Resp_format = get_value(response_format, Options, list),
-    Caller_socket_options = get_value(socket_options, Options, []),
-    {StreamTo, Caller_controls_socket} =
-        case get_value(stream_to, Options, undefined) of
-            {Caller, once} when is_pid(Caller) or
-                                is_atom(Caller) ->
-                Async_pid_rec = {{req_id_pid, ReqId}, self()},
-                true = ets:insert(ibrowse_stream, Async_pid_rec),
-                {Caller, true};
-            undefined ->
-                {undefined, false};
-            Caller when is_pid(Caller) or
-                        is_atom(Caller) ->
-                {Caller, false};
-            Stream_to_inv ->
-                exit({invalid_option, {stream_to, Stream_to_inv}})
-        end,
-    SaveResponseToFile = get_value(save_response_to_file, Options, false),
-    Ref = case Timeout of
-              infinity ->
-                  undefined;
-              _ ->
-                  erlang:send_after(Timeout, self(), {req_timedout, From})
-          end,
-    NewReq = #request{url                    = Url,
-                      method                 = Method,
-                      stream_to              = StreamTo,
-                      caller_controls_socket = Caller_controls_socket,
-                      caller_socket_options  = Caller_socket_options,
-                      options                = Options,
-                      req_id                 = ReqId,
-                      save_response_to_file  = SaveResponseToFile,
-                      stream_chunk_size      = get_stream_chunk_size(Options),
-                      response_format        = Resp_format,
-                      from                   = From,
-                      preserve_chunked_encoding = get_value(preserve_chunked_encoding, Options, false),
-                      timer_ref              = Ref
-                     },
-    State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
-    Headers_1 = maybe_modify_headers(Url, Method, Options, Headers, State_1),
-    {Req, Body_1} = make_request(Method,
-                                 Headers_1,
-                                 AbsPath, RelPath, Body, Options, State_1,
-                                 ReqId),
-    trace_request(Req),
-    do_setopts(Socket, Caller_socket_options, State_1),
-    TE = is_chunked_encoding_specified(Options),
-    case do_send(Req, State_1) of
-        ok ->
-            case do_send_body(Body_1, State_1, TE) of
-                ok ->
-                    trace_request_body(Body_1),
-                    State_2 = inc_pipeline_counter(State_1),
-                    active_once(State_2),
-                    State_3 = case Status of
-                                  idle ->
-                                      State_2#state{status     = get_header,
-                                                    cur_req    = NewReq};
-                                  _ ->
-                                      State_2
-                              end,
-                    case StreamTo of
-                        undefined ->
-                            ok;
-                        _ ->
-                            gen_server:reply(From, {ibrowse_req_id, ReqId})
-                    end,
-                    State_4 = set_inac_timer(State_3),
-                    {noreply, State_4};
-                Err ->
-                    shutting_down(State_1),
-                    do_trace("Send failed... Reason: ~p~n", [Err]),
-                    gen_server:reply(From, {error, {send_failed, Err}}),
-                    {stop, normal, State_1}
-            end;
-        Err ->
-            shutting_down(State_1),
-            do_trace("Send failed... Reason: ~p~n", [Err]),
-            gen_server:reply(From, {error, {send_failed, Err}}),
-            {stop, normal, State_1}
-    end.
-
-maybe_modify_headers(#url{}, connect, _, Headers, State) ->
-    add_proxy_auth_headers(State, Headers);
-maybe_modify_headers(#url{host = Host, port = Port} = Url,
-                     _Method,
-                     Options, Headers, State) ->
-    case get_value(headers_as_is, Options, false) of
-        false ->
-            Headers_1 = add_auth_headers(Url, Options, Headers, State),
-            HostHeaderValue = case lists:keysearch(host_header, 1, Options) of
-                                  false ->
-                                      case Port of
-                                          80 -> Host;
-                                          443 -> Host;
-                                          _ -> [Host, ":", integer_to_list(Port)]
-                                      end;
-                                  {value, {_, Host_h_val}} ->
-                                      Host_h_val
-                              end,
-            [{"Host", HostHeaderValue} | Headers_1];
-        true ->
-            Headers
-    end.
-
-add_auth_headers(#url{username = User,
-                      password = UPw},
-                 Options,
-                 Headers,
-                 State) ->
-    Headers_1 = case User of
-                    undefined ->
-                        case get_value(basic_auth, Options, undefined) of
-                            undefined ->
-                                Headers;
-                            {U,P} ->
-                                [{"Authorization", ["Basic ", http_auth_digest(U, P)]} | Headers]
-                        end;
-                    _ ->
-                        [{"Authorization", ["Basic ", http_auth_digest(User, UPw)]} | Headers]
-                end,
-    add_proxy_auth_headers(State, Headers_1).
-
-add_proxy_auth_headers(#state{use_proxy = false}, Headers) ->
-    Headers;
-add_proxy_auth_headers(#state{proxy_auth_digest = []}, Headers) ->
-    Headers;
-add_proxy_auth_headers(#state{proxy_auth_digest = Auth_digest}, Headers) ->
-    [{"Proxy-Authorization", ["Basic ", Auth_digest]} | Headers].
-
-http_auth_digest([], []) ->
-    [];
-http_auth_digest(Username, Password) ->
-    ibrowse_lib:encode_base64(Username ++ [$: | Password]).
-
-make_request(Method, Headers, AbsPath, RelPath, Body, Options,
-             #state{use_proxy = UseProxy, is_ssl = Is_ssl}, ReqId) ->
-    HttpVsn = http_vsn_string(get_value(http_vsn, Options, {1,1})),
-    Fun1 = fun({X, Y}) when is_atom(X) ->
-                   {to_lower(atom_to_list(X)), X, Y};
-              ({X, Y}) when is_list(X) ->
-                   {to_lower(X), X, Y}
-           end,
-    Headers_0 = [Fun1(X) || X <- Headers],
-    Headers_1 =
-        case lists:keysearch("content-length", 1, Headers_0) of
-            false when (Body =:= [] orelse Body =:= <<>>) andalso
-                       (Method =:= post orelse Method =:= put) ->
-                [{"content-length", "Content-Length", "0"} | Headers_0];
-            false when is_binary(Body) orelse is_list(Body) ->
-                [{"content-length", "Content-Length", integer_to_list(iolist_size(Body))} | Headers_0];
-            _ ->
-                %% Content-Length is already specified or Body is a
-                %% function or function/state pair
-                Headers_0
-        end,
-    {Headers_2, Body_1} =
-        case is_chunked_encoding_specified(Options) of
-            false ->
-                {[{Y, Z} || {_, Y, Z} <- Headers_1], Body};
-            true ->
-                Chunk_size_1 = case get_value(transfer_encoding, Options) of
-                                  chunked ->
-                                      5120;
-                                  {chunked, Chunk_size} ->
-                                      Chunk_size
-                              end,
-                {[{Y, Z} || {X, Y, Z} <- Headers_1,
-                            X /= "content-length"] ++
-                 [{"Transfer-Encoding", "chunked"}],
-                 chunk_request_body(Body, Chunk_size_1)}
-        end,
-    Headers_3 = case lists:member({include_ibrowse_req_id, true}, Options) of
-                    true ->
-                        [{"x-ibrowse-request-id", io_lib:format("~1000.p",[ReqId])} | Headers_2];
-                    false ->
-                        Headers_2
-                end,
-    Headers_4 = cons_headers(Headers_3),
-    Uri = case get_value(use_absolute_uri, Options, false) or UseProxy of
-              true ->
-                  case Is_ssl of
-                      true ->
-                          RelPath;
-                      false ->
-                          AbsPath
-                  end;
-              false ->
-                  RelPath
-          end,
-    {[method(Method), " ", Uri, " ", HttpVsn, crnl(), Headers_4, crnl()], Body_1}.
-
-is_chunked_encoding_specified(Options) ->
-    case get_value(transfer_encoding, Options, false) of
-        false ->
-            false;
-        {chunked, _} ->
-            true;
-        chunked ->
-            true
-    end.
-
-http_vsn_string({0,9}) -> "HTTP/0.9";
-http_vsn_string({1,0}) -> "HTTP/1.0";
-http_vsn_string({1,1}) -> "HTTP/1.1".
-
-cons_headers(Headers) ->
-    cons_headers(Headers, []).
-cons_headers([], Acc) ->
-    encode_headers(Acc);
-cons_headers([{basic_auth, {U,P}} | T], Acc) ->
-    cons_headers(T, [{"Authorization",
-                      ["Basic ", ibrowse_lib:encode_base64(U++":"++P)]} | Acc]);
-cons_headers([{cookie, Cookie} | T], Acc) ->
-    cons_headers(T, [{"Cookie", Cookie} | Acc]);
-cons_headers([{content_length, L} | T], Acc) ->
-    cons_headers(T, [{"Content-Length", L} | Acc]);
-cons_headers([{content_type, L} | T], Acc) ->
-    cons_headers(T, [{"Content-Type", L} | Acc]);
-cons_headers([H | T], Acc) ->
-    cons_headers(T, [H | Acc]).
-
-encode_headers(L) ->
-    encode_headers(L, []).
-encode_headers([{http_vsn, _Val} | T], Acc) ->
-    encode_headers(T, Acc);
-encode_headers([{Name,Val} | T], Acc) when is_list(Name) ->
-    encode_headers(T, [[Name, ": ", fmt_val(Val), crnl()] | Acc]);
-encode_headers([{Name,Val} | T], Acc) when is_atom(Name) ->
-    encode_headers(T, [[atom_to_list(Name), ": ", fmt_val(Val), crnl()] | Acc]);
-encode_headers([], Acc) ->
-    lists:reverse(Acc).
-
-chunk_request_body(Body, _ChunkSize) when is_tuple(Body) orelse
-                                          is_function(Body) ->
-    Body;
-chunk_request_body(Body, ChunkSize) ->
-    chunk_request_body(Body, ChunkSize, []).
-
-chunk_request_body(Body, _ChunkSize, Acc) when Body == <<>>; Body == [] ->
-    LastChunk = "0\r\n",
-    lists:reverse(["\r\n", LastChunk | Acc]);
-chunk_request_body(Body, ChunkSize, Acc) when is_binary(Body),
-                                              size(Body) >= ChunkSize ->
-    <<ChunkBody:ChunkSize/binary, Rest/binary>> = Body,
-    Chunk = [?dec2hex(ChunkSize),"\r\n",
-             ChunkBody, "\r\n"],
-    chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
-chunk_request_body(Body, _ChunkSize, Acc) when is_binary(Body) ->
-    BodySize = size(Body),
-    Chunk = [?dec2hex(BodySize),"\r\n",
-             Body, "\r\n"],
-    LastChunk = "0\r\n",
-    lists:reverse(["\r\n", LastChunk, Chunk | Acc]);
-chunk_request_body(Body, ChunkSize, Acc) when length(Body) >= ChunkSize ->
-    {ChunkBody, Rest} = split_list_at(Body, ChunkSize),
-    Chunk = [?dec2hex(ChunkSize),"\r\n",
-             ChunkBody, "\r\n"],
-    chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
-chunk_request_body(Body, _ChunkSize, Acc) when is_list(Body) ->
-    BodySize = length(Body),
-    Chunk = [?dec2hex(BodySize),"\r\n",
-             Body, "\r\n"],
-    LastChunk = "0\r\n",
-    lists:reverse(["\r\n", LastChunk, Chunk | Acc]).
-
-
-parse_response(<<>>, #state{cur_req = undefined}=State) ->
-    State#state{status = idle};
-parse_response(Data, #state{cur_req = undefined}) ->
-    do_trace("Data left to process when no pending request. ~1000.p~n", [Data]),
-    {error, data_in_status_idle};
-
-parse_response(Data, #state{reply_buffer = Acc, reqs = Reqs,
-                            cur_req = CurReq} = State) ->
-    #request{from=From, stream_to=StreamTo, req_id=ReqId,
-             method=Method, response_format = Resp_format,
-             options = Options, timer_ref = T_ref
-            } = CurReq,
-    MaxHeaderSize = ibrowse:get_config_value(max_headers_size, infinity),
-    case scan_header(Acc, Data) of
-        {yes, Headers, Data_1}  ->
-            do_trace("Recvd Header Data -> ~s~n----~n", [Headers]),
-            do_trace("Recvd headers~n--- Headers Begin ---~n~s~n--- Headers End ---~n~n", [Headers]),
-            {HttpVsn, StatCode, Headers_1, Status_line, Raw_headers} = parse_headers(Headers),
-            do_trace("HttpVsn: ~p StatusCode: ~p Headers_1 -> ~1000.p~n", [HttpVsn, StatCode, Headers_1]),
-            LCHeaders = [{to_lower(X), Y} || {X,Y} <- Headers_1],
-            ConnClose = to_lower(get_value("connection", LCHeaders, "false")),
-            IsClosing = is_connection_closing(HttpVsn, ConnClose),
-            State_0 = case IsClosing of
-                          true ->
-                              shutting_down(State),
-                              State#state{is_closing = IsClosing};
-                          false ->
-                              State
-                      end,
-            Give_raw_headers = get_value(give_raw_headers, Options, false),
-            State_1 = case Give_raw_headers of
-                          true ->
-                              State_0#state{recvd_headers=Headers_1, status=get_body,
-                                            reply_buffer = <<>>,
-                                            status_line = Status_line,
-                                            raw_headers = Raw_headers,
-                                            http_status_code=StatCode};
-                          false ->
-                              State_0#state{recvd_headers=Headers_1, status=get_body,
-                                            reply_buffer = <<>>,
-                                            http_status_code=StatCode}
-                      end,
-            put(conn_close, ConnClose),
-            TransferEncoding = to_lower(get_value("transfer-encoding", LCHeaders, "false")),
-            Head_response_with_body = lists:member({workaround, head_response_with_body}, Options),
-            case get_value("content-length", LCHeaders, undefined) of
-                _ when Method == connect,
-                       hd(StatCode) == $2 ->
-                    {_, Reqs_1} = queue:out(Reqs),
-                    cancel_timer(T_ref),
-                    upgrade_to_ssl(set_cur_request(State_0#state{reqs = Reqs_1,
-                                                                 recvd_headers = [],
-                                                                 status = idle
-                                                                }));
-                _ when Method == connect ->
-                    {_, Reqs_1} = queue:out(Reqs),
-                    do_error_reply(State#state{reqs = Reqs_1},
-                                   {error, proxy_tunnel_failed}),
-                    {error, proxy_tunnel_failed};
-                _ when Method =:= head,
-                       Head_response_with_body =:= false ->
-                    %% This (HEAD response with body) is not supposed
-                    %% to happen, but it does. An Apache server was
-                    %% observed to send an "empty" body, but in a
-                    %% Chunked-Transfer-Encoding way, which meant
-                    %% there was still a body.  Issue #67 on Github
-                    {_, Reqs_1} = queue:out(Reqs),
-                    send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
-                    State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
-                                         {ok, StatCode, Headers_1, []}),
-                    cancel_timer(T_ref, {eat_message, {req_timedout, From}}),
-                    State_2 = reset_state(State_1_1),
-                    State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
-                    parse_response(Data_1, State_3);
-                _ when hd(StatCode) =:= $1 ->
-                    %% No message body is expected. Server may send
-                    %% one or more 1XX responses before a proper
-                    %% response.
-                    send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
-                    do_trace("Recvd a status code of ~p. Ignoring and waiting for a proper response~n", [StatCode]),
-                    parse_response(Data_1, State_1#state{recvd_headers = [],
-                                                         status = get_header});
-                _ when StatCode =:= "204";
-                       StatCode =:= "304" ->
-                    %% No message body is expected for these Status Codes.
-                    %% RFC2616 - Sec 4.4
-                    {_, Reqs_1} = queue:out(Reqs),
-                    send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
-                    State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
-                                         {ok, StatCode, Headers_1, []}),
-                    cancel_timer(T_ref, {eat_message, {req_timedout, From}}),
-                    State_2 = reset_state(State_1_1),
-                    State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
-                    parse_response(Data_1, State_3);
-                _ when TransferEncoding =:= "chunked" ->
-                    do_trace("Chunked encoding detected...~n",[]),
-                    send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
-                    case parse_11_response(Data_1, State_1#state{transfer_encoding=chunked,
-                                                                 chunk_size=chunk_start,
-                                                                 reply_buffer = <<>>}) of
-                        {error, Reason} ->
-                            fail_pipelined_requests(State_1,
-                                                    {error, {Reason,
-                                                             {stat_code, StatCode}, Headers_1}}),
-                            {error, Reason};
-                        State_2 ->
-                            State_2
-                    end;
-                undefined when HttpVsn =:= "HTTP/1.0";
-                               ConnClose =:= "close" ->
-                    send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
-                    State_1#state{reply_buffer = Data_1};
-                undefined ->
-                    fail_pipelined_requests(State_1,
-                                            {error, {content_length_undefined,
-                                                     {stat_code, StatCode}, Headers}}),
-                    {error, content_length_undefined};
-                V ->
-                    case catch list_to_integer(V) of
-                        V_1 when is_integer(V_1), V_1 >= 0 ->
-                            send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
-                            do_trace("Recvd Content-Length of ~p~n", [V_1]),
-                            State_2 = State_1#state{rep_buf_size=0,
-                                                    reply_buffer = <<>>,
-                                                    content_length=V_1},
-                            case parse_11_response(Data_1, State_2) of
-                                {error, Reason} ->
-                                    fail_pipelined_requests(State_1,
-                                                            {error, {Reason,
-                                                                     {stat_code, StatCode}, Headers_1}}),
-                                    {error, Reason};
-                                State_3 ->
-                                    State_3
-                            end;
-                        _ ->
-                            fail_pipelined_requests(State_1,
-                                                    {error, {content_length_undefined,
-                                                             {stat_code, StatCode}, Headers}}),
-                            {error, content_length_undefined}
-                    end
-            end;
-        {no, Acc_1} when MaxHeaderSize == infinity ->
-            State#state{reply_buffer = Acc_1};
-        {no, Acc_1} when size(Acc_1) < MaxHeaderSize ->
-            State#state{reply_buffer = Acc_1};
-        {no, _Acc_1} ->
-            fail_pipelined_requests(State, {error, max_headers_size_exceeded}),
-            {error, max_headers_size_exceeded}
-    end.
-
-upgrade_to_ssl(#state{socket = Socket,
-                      connect_timeout = Conn_timeout,
-                      ssl_options = Ssl_options,
-                      tunnel_setup_queue = Q} = State) ->
-    case ssl:connect(Socket, Ssl_options, Conn_timeout) of
-        {ok, Ssl_socket} ->
-            do_trace("Upgraded to SSL socket!!~n", []),
-            State_1 = State#state{socket = Ssl_socket,
-                                  proxy_tunnel_setup = done},
-            send_queued_requests(lists:reverse(Q), State_1);
-        Err ->
-            do_trace("Upgrade to SSL socket failed. Reson: ~p~n", [Err]),
-            do_error_reply(State, {error, {send_failed, Err}}),
-            {error, send_failed}
-    end.
-
-send_queued_requests([], State) ->
-    do_trace("Sent all queued requests via SSL connection~n", []),
-    State#state{tunnel_setup_queue = []};
-send_queued_requests([{From, Url, Headers, Method, Body, Options, Timeout} | Q],
-                     State) ->
-    case send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State) of
-        {noreply, State_1} ->
-            send_queued_requests(Q, State_1);
-        Err ->
-            do_trace("Error sending queued SSL request: ~n"
-                     "URL     : ~s~n"
-                     "Method  : ~p~n"
-                     "Headers : ~p~n", [Url, Method, Headers]),
-            do_error_reply(State, {error, {send_failed, Err}}),
-            {error, send_failed}
-    end.
-
-is_connection_closing("HTTP/0.9", _)       -> true;
-is_connection_closing(_, "close")          -> true;
-is_connection_closing("HTTP/1.0", "false") -> true;
-is_connection_closing(_, _)                -> false.
-
-%% This clause determines the chunk size when given data from the beginning of the chunk
-parse_11_response(DataRecvd,
-                  #state{transfer_encoding = chunked,
-                         chunk_size        = chunk_start,
-                         chunk_size_buffer = Chunk_sz_buf
-                        } = State) ->
-    case scan_crlf(Chunk_sz_buf, DataRecvd) of
-        {yes, ChunkHeader, Data_1} ->
-            State_1 = maybe_accumulate_ce_data(State, <<ChunkHeader/binary, $\r, $\n>>),
-            ChunkSize = parse_chunk_header(ChunkHeader),
-            %%
-            %% Do we have to preserve the chunk encoding when
-            %% streaming? NO. This should be transparent to the client
-            %% process. Chunked encoding was only introduced to make
-            %% it efficient for the server.
-            %%
-            RemLen = size(Data_1),
-            do_trace("Determined chunk size: ~p. Already recvd: ~p~n",
-                     [ChunkSize, RemLen]),
-            parse_11_response(Data_1, State_1#state{chunk_size_buffer = <<>>,
-                                                    deleted_crlf = true,
-                                                    recvd_chunk_size = 0,
-                                                    chunk_size = ChunkSize});
-        {no, Data_1} ->
-            State#state{chunk_size_buffer = Data_1}
-    end;
-
-%% This clause is to remove the CRLF between two chunks
-%%
-parse_11_response(DataRecvd,
-                  #state{transfer_encoding = chunked,
-                         chunk_size = tbd,
-                         chunk_size_buffer = Buf
-                        } = State) ->
-    case scan_crlf(Buf, DataRecvd) of
-        {yes, _, NextChunk} ->
-            State_1 = maybe_accumulate_ce_data(State, <<$\r, $\n>>),
-            State_2 = State_1#state{chunk_size = chunk_start,
-                                    chunk_size_buffer = <<>>,
-                                    deleted_crlf = true},
-            parse_11_response(NextChunk, State_2);
-        {no, Data_1} ->
-            State#state{chunk_size_buffer = Data_1}
-    end;
-
-%% This clause deals with the end of a chunked transfer. ibrowse does
-%% not support Trailers in the Chunked Transfer encoding. Any trailer
-%% received is silently discarded.
-parse_11_response(DataRecvd,
-                  #state{transfer_encoding = chunked, chunk_size = 0,
-                         cur_req           = CurReq,
-                         deleted_crlf      = DelCrlf,
-                         chunk_size_buffer = Trailer,
-                         reqs              = Reqs} = State) ->
-    do_trace("Detected end of chunked transfer...~n", []),
-    DataRecvd_1 = case DelCrlf of
-                      false ->
-                          DataRecvd;
-                      true ->
-                          <<$\r, $\n, DataRecvd/binary>>
-                  end,
-    case scan_header(Trailer, DataRecvd_1) of
-        {yes, TEHeaders, Rem} ->
-            {_, Reqs_1} = queue:out(Reqs),
-            State_1 = maybe_accumulate_ce_data(State, <<TEHeaders/binary, $\r, $\n>>),
-            State_2 = handle_response(CurReq,
-                                      State_1#state{reqs = Reqs_1}),
-            parse_response(Rem, reset_state(State_2));
-        {no, Rem} ->
-            accumulate_response(<<>>, State#state{chunk_size_buffer = Rem, deleted_crlf = false})
-    end;
-
-%% This clause extracts a chunk, given the size.
-parse_11_response(DataRecvd,
-                  #state{transfer_encoding = chunked,
-                         chunk_size = CSz,
-                         recvd_chunk_size = Recvd_csz,
-                         rep_buf_size = RepBufSz} = State) ->
-    NeedBytes = CSz - Recvd_csz,
-    DataLen = size(DataRecvd),
-    do_trace("Recvd more data: size: ~p. NeedBytes: ~p~n", [DataLen, NeedBytes]),
-    case DataLen >= NeedBytes of
-        true ->
-            {RemChunk, RemData} = split_binary(DataRecvd, NeedBytes),
-            do_trace("Recvd another chunk...~p~n", [RemChunk]),
-            do_trace("RemData -> ~p~n", [RemData]),
-            case accumulate_response(RemChunk, State) of
-                {error, Reason} ->
-                    do_trace("Error accumulating response --> ~p~n", [Reason]),
-                    {error, Reason};
-                #state{} = State_1 ->
-                    State_2 = State_1#state{chunk_size=tbd},
-                    parse_11_response(RemData, State_2)
-            end;
-        false ->
-            accumulate_response(DataRecvd,
-                                State#state{rep_buf_size = RepBufSz + DataLen,
-                                            recvd_chunk_size = Recvd_csz + DataLen})
-    end;
-
-%% This clause to extract the body when Content-Length is specified
-parse_11_response(DataRecvd,
-                  #state{content_length=CL, rep_buf_size=RepBufSz,
-                         reqs=Reqs}=State) ->
-    NeedBytes = CL - RepBufSz,
-    DataLen = size(DataRecvd),
-    case DataLen >= NeedBytes of
-        true ->
-            {RemBody, Rem} = split_binary(DataRecvd, NeedBytes),
-            {_, Reqs_1} = queue:out(Reqs),
-            State_1 = accumulate_response(RemBody, State),
-            State_2 = handle_response(State_1#state.cur_req, State_1#state{reqs=Reqs_1}),
-            State_3 = reset_state(State_2),
-            parse_response(Rem, State_3);
-        false ->
-            accumulate_response(DataRecvd, State#state{rep_buf_size = (RepBufSz+DataLen)})
-    end.
-
-maybe_accumulate_ce_data(#state{cur_req = #request{preserve_chunked_encoding = false}} = State, _) ->
-    State;
-maybe_accumulate_ce_data(State, Data) ->
-    accumulate_response(Data, State).
-
-handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
-                         response_format = Resp_format,
-                         save_response_to_file = SaveResponseToFile,
-                         tmp_file_name = TmpFilename,
-                         tmp_file_fd = Fd,
-                         options       = Options,
-                         timer_ref     = ReqTimer
-                        },
-                #state{http_status_code = SCode,
-                       status_line   = Status_line,
-                       raw_headers   = Raw_headers,
-                       reply_buffer  = RepBuf,
-                       recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
-    Body = RepBuf,
-    case Fd of
-        undefined ->
-            ok;
-        _ ->
-            ok = file:close(Fd)
-    end,
-    ResponseBody = case TmpFilename of
-                       undefined ->
-                           Body;
-                       _ ->
-                           {file, TmpFilename}
-                   end,
-    {Resp_headers_1, Raw_headers_1} = maybe_add_custom_headers(RespHeaders, Raw_headers, Options),
-    Reply = case get_value(give_raw_headers, Options, false) of
-                true ->
-                    {ok, Status_line, Raw_headers_1, ResponseBody};
-                false ->
-                    {ok, SCode, Resp_headers_1, ResponseBody}
-            end,
-    State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
-    cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
-    set_cur_request(State_1);
-handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
-                         response_format = Resp_format,
-                         options = Options, timer_ref = ReqTimer},
-                #state{http_status_code = SCode,
-                       status_line      = Status_line,
-                       raw_headers      = Raw_headers,
-                       recvd_headers    = Resp_headers,
-                       reply_buffer     = RepBuf
-                      } = State) ->
-    Body = RepBuf,
-    {Resp_headers_1, Raw_headers_1} = maybe_add_custom_headers(Resp_headers, Raw_headers, Options),
-    Reply = case get_value(give_raw_headers, Options, false) of
-                true ->
-                    {ok, Status_line, Raw_headers_1, Body};
-                false ->
-                    {ok, SCode, Resp_headers_1, Body}
-            end,
-    State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
-    cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
-    set_cur_request(State_1).
-
-reset_state(State) ->
-    State#state{status            = get_header,
-                rep_buf_size      = 0,
-                streamed_size     = 0,
-                content_length    = undefined,
-                reply_buffer      = <<>>,
-                chunk_size_buffer = <<>>,
-                recvd_headers     = [],
-                status_line       = undefined,
-                raw_headers       = undefined,
-                deleted_crlf      = false,
-                http_status_code  = undefined,
-                chunk_size        = undefined,
-                transfer_encoding = undefined
-               }.
-
-set_cur_request(#state{reqs = Reqs, socket = Socket} = State) ->
-    case queue:peek(Reqs) of
-        empty ->
-            State#state{cur_req = undefined};
-        {value, #request{caller_controls_socket = Ccs} = NextReq} ->
-            case Ccs of
-                true ->
-                    do_setopts(Socket, [{active, once}], State);
-                _ ->
-                    ok
-            end,
-            State#state{cur_req = NextReq}
-    end.
-
-parse_headers(Headers) ->
-    case scan_crlf(Headers) of
-        {yes, StatusLine, T} ->
-            parse_headers(StatusLine, T);
-        {no, StatusLine} ->
-            parse_headers(StatusLine, <<>>)
-    end.
-
-parse_headers(StatusLine, Headers) ->
-    Headers_1 = parse_headers_1(Headers),
-    case parse_status_line(StatusLine) of
-        {ok, HttpVsn, StatCode, _Msg} ->
-            put(http_prot_vsn, HttpVsn),
-            {HttpVsn, StatCode, Headers_1, StatusLine, Headers};
-        _ -> %% A HTTP 0.9 response?
-            put(http_prot_vsn, "HTTP/0.9"),
-            {"HTTP/0.9", undefined, Headers, StatusLine, Headers}
-    end.
-
-% From RFC 2616
-%
-%    HTTP/1.1 header field values can be folded onto multiple lines if
-%    the continuation line begins with a space or horizontal tab. All
-%    linear white space, including folding, has the same semantics as
-%    SP. A recipient MAY replace any linear white space with a single
-%    SP before interpreting the field value or forwarding the message
-%    downstream.
-parse_headers_1(B) when is_binary(B) ->
-    parse_headers_1(binary_to_list(B));
-parse_headers_1(String) ->
-    parse_headers_1(String, [], []).
-
-parse_headers_1([$\n, H |T], [$\r | L], Acc) when H =:= 32;
-                                                  H =:= $\t ->
-    parse_headers_1(lists:dropwhile(fun(X) ->
-                                            is_whitespace(X)
-                                    end, T), [32 | L], Acc);
-parse_headers_1([$\n, H |T], L, Acc) when H =:= 32;
-                                          H =:= $\t ->
-    parse_headers_1(lists:dropwhile(fun(X) ->
-                                            is_whitespace(X)
-                                    end, T), [32 | L], Acc);
-parse_headers_1([$\n|T], [$\r | L], Acc) ->
-    case parse_header(lists:reverse(L)) of
-        invalid ->
-            parse_headers_1(T, [], Acc);
-        NewHeader ->
-            parse_headers_1(T, [], [NewHeader | Acc])
-    end;
-parse_headers_1([$\n|T], L, Acc) ->
-    case parse_header(lists:reverse(L)) of
-        invalid ->
-            parse_headers_1(T, [], Acc);
-        NewHeader ->
-            parse_headers_1(T, [], [NewHeader | Acc])
-    end;
-parse_headers_1([H|T],  L, Acc) ->
-    parse_headers_1(T, [H|L], Acc);
-parse_headers_1([], [], Acc) ->
-    lists:reverse(Acc);
-parse_headers_1([], L, Acc) ->
-    Acc_1 = case parse_header(lists:reverse(L)) of
-                invalid ->
-                    Acc;
-                NewHeader ->
-                    [NewHeader | Acc]
-            end,
-    lists:reverse(Acc_1).
-
-parse_status_line(Line) when is_binary(Line) ->
-    parse_status_line(binary_to_list(Line));
-parse_status_line(Line) ->
-    parse_status_line(Line, get_prot_vsn, [], []).
-parse_status_line([32 | T], get_prot_vsn, ProtVsn, StatCode) ->
-    parse_status_line(T, get_status_code, ProtVsn, StatCode);
-parse_status_line([32 | T], get_status_code, ProtVsn, StatCode) ->
-    {ok, lists:reverse(ProtVsn), lists:reverse(StatCode), T};
-parse_status_line([], get_status_code, ProtVsn, StatCode) ->
-    {ok, lists:reverse(ProtVsn), lists:reverse(StatCode), []};
-parse_status_line([H | T], get_prot_vsn, ProtVsn, StatCode) ->
-    parse_status_line(T, get_prot_vsn, [H|ProtVsn], StatCode);
-parse_status_line([H | T], get_status_code, ProtVsn, StatCode) ->
-    parse_status_line(T, get_status_code, ProtVsn, [H | StatCode]);
-parse_status_line([], _, _, _) ->
-    http_09.
-
-parse_header(L) ->
-    parse_header(L, []).
-
-parse_header([$: | V], Acc) ->
-    {lists:reverse(Acc), string:strip(V)};
-parse_header([H | T], Acc) ->
-    parse_header(T, [H | Acc]);
-parse_header([], _) ->
-    invalid.
-
-scan_header(Bin) ->
-    case get_crlf_crlf_pos(Bin, 0) of
-        {yes, Pos} ->
-            {Headers, <<_:4/binary, Body/binary>>} = split_binary(Bin, Pos),
-            {yes, Headers, Body};
-        {yes_dodgy, Pos} ->
-            {Headers, <<_:2/binary, Body/binary>>} = split_binary(Bin, Pos),
-            {yes, Headers, Body};
-        no ->
-            {no, Bin}
-    end.
-
-scan_header(Bin1, Bin2) when size(Bin1) < 4 ->
-    scan_header(<<Bin1/binary, Bin2/binary>>);
-scan_header(Bin1, <<>>) ->
-    scan_header(Bin1);
-scan_header(Bin1, Bin2) ->
-    Bin1_already_scanned_size = size(Bin1) - 4,
-    <<Headers_prefix:Bin1_already_scanned_size/binary, Rest/binary>> = Bin1,
-    Bin_to_scan = <<Rest/binary, Bin2/binary>>,
-    case get_crlf_crlf_pos(Bin_to_scan, 0) of
-        {yes, Pos} ->
-            {Headers_suffix, <<_:4/binary, Body/binary>>} = split_binary(Bin_to_scan, Pos),
-            {yes, <<Headers_prefix/binary, Headers_suffix/binary>>, Body};
-        {yes_dodgy, Pos} ->
-            {Headers_suffix, <<_:2/binary, Body/binary>>} = split_binary(Bin_to_scan, Pos),
-            {yes, <<Headers_prefix/binary, Headers_suffix/binary>>, Body};
-        no ->
-            {no, <<Bin1/binary, Bin2/binary>>}
-    end.
-
-get_crlf_crlf_pos(<<$\r, $\n, $\r, $\n, _/binary>>, Pos) -> {yes, Pos};
-get_crlf_crlf_pos(<<$\n, $\n, _/binary>>, Pos)           -> {yes_dodgy, Pos};
-get_crlf_crlf_pos(<<_, Rest/binary>>, Pos)               -> get_crlf_crlf_pos(Rest, Pos + 1);
-get_crlf_crlf_pos(<<>>, _)                               -> no.
-
-scan_crlf(Bin) ->
-    case get_crlf_pos(Bin) of
-        {yes, Offset, Pos} ->
-            {Prefix, <<_:Offset/binary, Suffix/binary>>} = split_binary(Bin, Pos),
-            {yes, Prefix, Suffix};
-        no ->
-            {no, Bin}
-    end.
-
-scan_crlf(<<>>, Bin2) ->
-    scan_crlf(Bin2);
-scan_crlf(Bin1, Bin2) when size(Bin1) < 2 ->
-    scan_crlf(<<Bin1/binary, Bin2/binary>>);
-scan_crlf(Bin1, Bin2) ->
-    scan_crlf_1(size(Bin1) - 2, Bin1, Bin2).
-
-scan_crlf_1(Bin1_head_size, Bin1, Bin2) ->
-    <<Bin1_head:Bin1_head_size/binary, Bin1_tail/binary>> = Bin1,
-    Bin3 = <<Bin1_tail/binary, Bin2/binary>>,
-    case get_crlf_pos(Bin3) of
-        {yes, Offset, Pos} ->
-            {Prefix, <<_:Offset/binary, Suffix/binary>>} = split_binary(Bin3, Pos),
-            {yes, list_to_binary([Bin1_head, Prefix]), Suffix};
-        no ->
-            {no, list_to_binary([Bin1, Bin2])}
-    end.
-
-get_crlf_pos(Bin) ->
-    get_crlf_pos(Bin, 0).
-
-get_crlf_pos(<<$\r, $\n, _/binary>>, Pos) -> {yes, 2, Pos};
-get_crlf_pos(<<$\n, _/binary>>, Pos) ->      {yes, 1, Pos};
-get_crlf_pos(<<_, Rest/binary>>, Pos)     -> get_crlf_pos(Rest, Pos + 1);
-get_crlf_pos(<<>>, _)                     -> no.
-
-fmt_val(L) when is_list(L)    -> L;
-fmt_val(I) when is_integer(I) -> integer_to_list(I);
-fmt_val(A) when is_atom(A)    -> atom_to_list(A);
-fmt_val(Term)                 -> io_lib:format("~p", [Term]).
-
-crnl() -> "\r\n".
-
-method(connect)     -> "CONNECT";
-method(delete)      -> "DELETE";
-method(get)         -> "GET";
-method(head)        -> "HEAD";
-method(options)     -> "OPTIONS";
-method(post)        -> "POST";
-method(put)         -> "PUT";
-method(trace)       -> "TRACE";
-%% webdav
-method(copy)        -> "COPY";
-method(lock)        -> "LOCK";
-method(mkcol)       -> "MKCOL";
-method(move)        -> "MOVE";
-method(propfind)    -> "PROPFIND";
-method(proppatch)   -> "PROPPATCH";
-method(search)      -> "SEARCH";
-method(unlock)      -> "UNLOCK";
-%% subversion %%
-method(report)      -> "REPORT";
-method(mkactivity)  -> "MKACTIVITY";
-method(checkout)    -> "CHECKOUT";
-method(merge)       -> "MERGE";
-%% upnp
-method(msearch)     -> "MSEARCH";
-method(notify)      -> "NOTIFY";
-method(subscribe)   -> "SUBSCRIBE";
-method(unsubscribe) -> "UNSUBSCRIBE";
-%% rfc-5789
-method(patch)       -> "PATCH";
-method(purge)       -> "PURGE".
-
-%% From RFC 2616
-%%
-% The chunked encoding modifies the body of a message in order to
-% transfer it as a series of chunks, each with its own size indicator,
-% followed by an OPTIONAL trailer containing entity-header
-% fields. This allows dynamically produced content to be transferred
-% along with the information necessary for the recipient to verify
-% that it has received the full message.
-%       Chunked-Body =  *chunk
-%                       last-chunk
-%                       trailer
-%                       CRLF
-%       chunk = chunk-size [ chunk-extension ] CRLF
-%               chunk-data CRLF
-%       chunk-size = 1*HEX
-%       last-chunk = 1*("0") [ chunk-extension ] CRLF
-%       chunk-extension= *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
-%       chunk-ext-name = token
-%       chunk-ext-val = token | quoted-string
-%       chunk-data = chunk-size(OCTET)
-%       trailer = *(entity-header CRLF)
-% The chunk-size field is a string of hex digits indicating the size
-% of the chunk. The chunked encoding is ended by any chunk whose size
-% is zero, followed by the trailer, which is terminated by an empty
-% line.
-%%
-%% The parsing implemented here discards all chunk extensions. It also
-%% strips trailing spaces from the chunk size fields as Apache 1.3.27 was
-%% sending them.
-parse_chunk_header(ChunkHeader) ->
-    parse_chunk_header(ChunkHeader, []).
-
-parse_chunk_header(<<$;, _/binary>>, Acc) ->
-    hexlist_to_integer(lists:reverse(Acc));
-parse_chunk_header(<<H, T/binary>>, Acc) ->
-    case is_whitespace(H) of
-        true ->
-            parse_chunk_header(T, Acc);
-        false ->
-            parse_chunk_header(T, [H | Acc])
-    end;
-parse_chunk_header(<<>>, Acc) ->
-    hexlist_to_integer(lists:reverse(Acc)).
-
-is_whitespace($\s)  -> true;
-is_whitespace($\r) -> true;
-is_whitespace($\n) -> true;
-is_whitespace($\t) -> true;
-is_whitespace(_)   -> false.
-
-send_async_headers(_ReqId, undefined, _, _State) ->
-    ok;
-send_async_headers(ReqId, StreamTo, Give_raw_headers,
-                   #state{status_line = Status_line, raw_headers = Raw_headers,
-                          recvd_headers = Headers, http_status_code = StatCode,
-                          cur_req = #request{options = Opts}
-                         }) ->
-    {Headers_1, Raw_headers_1} = maybe_add_custom_headers(Headers, Raw_headers, Opts),
-    case Give_raw_headers of
-        false ->
-            catch StreamTo ! {ibrowse_async_headers, ReqId, StatCode, Headers_1};
-        true ->
-            catch StreamTo ! {ibrowse_async_headers, ReqId, Status_line, Raw_headers_1}
-    end.
-
-maybe_add_custom_headers(Headers, Raw_headers, Opts) ->
-    Custom_headers = get_value(add_custom_headers, Opts, []),
-    Headers_1 = Headers ++ Custom_headers,
-    Raw_headers_1 = case Custom_headers of
-                        [_ | _] when is_binary(Raw_headers) ->
-                            Custom_headers_bin = list_to_binary(string:join([[X, $:, Y] || {X, Y} <- Custom_headers], "\r\n")),
-                            <<Raw_headers/binary, "\r\n", Custom_headers_bin/binary>>;
-                        _ ->
-                            Raw_headers
-                    end,
-    {Headers_1, Raw_headers_1}.
-
-format_response_data(Resp_format, Body) ->
-    case Resp_format of
-        list when is_list(Body) ->
-            flatten(Body);
-        list when is_binary(Body) ->
-            binary_to_list(Body);
-        binary when is_list(Body) ->
-            list_to_binary(Body);
-        _ ->
-            %% This is to cater for sending messages such as
-            %% {chunk_start, _}, chunk_end etc
-            Body
-    end.
-
-do_reply(State, From, undefined, _, Resp_format, {ok, St_code, Headers, Body}) ->
-    Msg_1 = {ok, St_code, Headers, format_response_data(Resp_format, Body)},
-    gen_server:reply(From, Msg_1),
-    dec_pipeline_counter(State);
-do_reply(State, From, undefined, _, _, Msg) ->
-    gen_server:reply(From, Msg),
-    dec_pipeline_counter(State);
-do_reply(#state{prev_req_id = Prev_req_id} = State,
-         _From, StreamTo, ReqId, Resp_format, {ok, _, _, Body}) ->
-    State_1 = dec_pipeline_counter(State),
-    case Body of
-        [] ->
-            ok;
-        _ ->
-            Body_1 = format_response_data(Resp_format, Body),
-            catch StreamTo ! {ibrowse_async_response, ReqId, Body_1}
-    end,
-    catch StreamTo ! {ibrowse_async_response_end, ReqId},
-    %% We don't want to delete the Req-id to Pid mapping straightaway
-    %% as the client may send a stream_next message just while we are
-    %% sending back this ibrowse_async_response_end message. If we
-    %% deleted this mapping straightaway, the caller will see a
-    %% {error, unknown_req_id} when it calls ibrowse:stream_next/1. To
-    %% get around this, we store the req id, and clear it after the
-    %% next request. If there are wierd combinations of stream,
-    %% stream_once and sync requests on the same connection, it will
-    %% take a while for the req_id-pid mapping to get cleared, but it
-    %% should do no harm.
-    ets:delete(ibrowse_stream, {req_id_pid, Prev_req_id}),
-    State_1#state{prev_req_id = ReqId};
-do_reply(State, _From, StreamTo, ReqId, Resp_format, Msg) ->
-    State_1 = dec_pipeline_counter(State),
-    Msg_1 = format_response_data(Resp_format, Msg),
-    catch StreamTo ! {ibrowse_async_response, ReqId, Msg_1},
-    State_1.
-
-do_interim_reply(undefined, _, _ReqId, _Msg) ->
-    ok;
-do_interim_reply(StreamTo, Response_format, ReqId, Msg) ->
-    Msg_1 = format_response_data(Response_format, Msg),
-    catch StreamTo ! {ibrowse_async_response, ReqId, Msg_1}.
-
-do_error_reply(#state{reqs = Reqs, tunnel_setup_queue = Tun_q} = State, Err) ->
-    ReqList = queue:to_list(Reqs),
-    lists:foreach(fun(#request{from=From, stream_to=StreamTo, req_id=ReqId,
-                               response_format = Resp_format}) ->
-                          ets:delete(ibrowse_stream, {req_id_pid, ReqId}),
-                          do_reply(State, From, StreamTo, ReqId, Resp_format, {error, Err})
-                  end, ReqList),
-    lists:foreach(
-      fun({From, _Url, _Headers, _Method, _Body, _Options, _Timeout}) ->
-              do_reply(State, From, undefined, undefined, undefined, Err)
-      end, Tun_q).
-
-fail_pipelined_requests(#state{reqs = Reqs, cur_req = CurReq} = State, Reply) ->
-    {_, Reqs_1} = queue:out(Reqs),
-    #request{from=From, stream_to=StreamTo, req_id=ReqId,
-             response_format = Resp_format} = CurReq,
-    State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
-    do_error_reply(State_1#state{reqs = Reqs_1}, previous_request_failed).
-
-split_list_at(List, N) ->
-    split_list_at(List, N, []).
-
-split_list_at([], _, Acc) ->
-    {lists:reverse(Acc), []};
-split_list_at(List2, 0, List1) ->
-    {lists:reverse(List1), List2};
-split_list_at([H | List2], N, List1) ->
-    split_list_at(List2, N-1, [H | List1]).
-
-hexlist_to_integer(List) ->
-    hexlist_to_integer(lists:reverse(List), 1, 0).
-
-hexlist_to_integer([H | T], Multiplier, Acc) ->
-    hexlist_to_integer(T, Multiplier*16, Multiplier*to_ascii(H) + Acc);
-hexlist_to_integer([], _, Acc) ->
-    Acc.
-
-to_ascii($A) -> 10;
-to_ascii($a) -> 10;
-to_ascii($B) -> 11;
-to_ascii($b) -> 11;
-to_ascii($C) -> 12;
-to_ascii($c) -> 12;
-to_ascii($D) -> 13;
-to_ascii($d) -> 13;
-to_ascii($E) -> 14;
-to_ascii($e) -> 14;
-to_ascii($F) -> 15;
-to_ascii($f) -> 15;
-to_ascii($1) -> 1;
-to_ascii($2) -> 2;
-to_ascii($3) -> 3;
-to_ascii($4) -> 4;
-to_ascii($5) -> 5;
-to_ascii($6) -> 6;
-to_ascii($7) -> 7;
-to_ascii($8) -> 8;
-to_ascii($9) -> 9;
-to_ascii($0) -> 0.
-
-cancel_timer(undefined) -> ok;
-cancel_timer(Ref)       -> _ = erlang:cancel_timer(Ref),
-                           ok.
-
-cancel_timer(Ref, {eat_message, Msg}) ->
-    cancel_timer(Ref),
-    receive
-        Msg ->
-            ok
-    after 0 ->
-            ok
-    end.
-
-make_req_id() ->
-    now().
-
-to_lower(Str) ->
-    to_lower(Str, []).
-to_lower([H|T], Acc) when H >= $A, H =< $Z ->
-    to_lower(T, [H+32|Acc]);
-to_lower([H|T], Acc) ->
-    to_lower(T, [H|Acc]);
-to_lower([], Acc) ->
-    lists:reverse(Acc).
-
-shutting_down(#state{lb_ets_tid = undefined}) ->
-    ok;
-shutting_down(#state{lb_ets_tid = Tid,
-                     cur_pipeline_size = _Sz}) ->
-    catch ets:delete(Tid, self()).
-
-inc_pipeline_counter(#state{is_closing = true} = State) ->
-    State;
-inc_pipeline_counter(#state{lb_ets_tid = undefined} = State) ->
-    State;
-inc_pipeline_counter(#state{cur_pipeline_size = Pipe_sz,
-                           lb_ets_tid = Tid} = State) ->
-    update_counter(Tid, self(), {2,1,99999,9999}),
-    State#state{cur_pipeline_size = Pipe_sz + 1}.
-
-update_counter(Tid, Key, Args) ->
-    ets:update_counter(Tid, Key, Args).
-
-dec_pipeline_counter(#state{is_closing = true} = State) ->
-    State;
-dec_pipeline_counter(#state{lb_ets_tid = undefined} = State) ->
-    State;
-dec_pipeline_counter(#state{cur_pipeline_size = Pipe_sz,
-                            lb_ets_tid = Tid} = State) ->
-    try
-        update_counter(Tid, self(), {2,-1,0,0}),
-        update_counter(Tid, self(), {3,-1,0,0})
-    catch
-        _:_ ->
-            ok
-    end,
-    State#state{cur_pipeline_size = Pipe_sz - 1}.
-
-flatten([H | _] = L) when is_integer(H) ->
-    L;
-flatten([H | _] = L) when is_list(H) ->
-    lists:flatten(L);
-flatten([]) ->
-    [].
-
-get_stream_chunk_size(Options) ->
-    case lists:keysearch(stream_chunk_size, 1, Options) of
-        {value, {_, V}} when V > 0 ->
-            V;
-        _ ->
-            ?DEFAULT_STREAM_CHUNK_SIZE
-    end.
-
-set_inac_timer(State) ->
-    cancel_timer(State#state.inactivity_timer_ref),
-    set_inac_timer(State#state{inactivity_timer_ref = undefined},
-                   get_inac_timeout(State)).
-
-set_inac_timer(State, Timeout) when is_integer(Timeout) ->
-    Ref = erlang:send_after(Timeout, self(), timeout),
-    State#state{inactivity_timer_ref = Ref};
-set_inac_timer(State, _) ->
-    State.
-
-get_inac_timeout(#state{cur_req = #request{options = Opts}}) ->
-    get_value(inactivity_timeout, Opts, infinity);
-get_inac_timeout(#state{cur_req = undefined}) ->
-    case ibrowse:get_config_value(inactivity_timeout, undefined) of
-        Val when is_integer(Val) ->
-            Val;
-        _ ->
-            case application:get_env(ibrowse, inactivity_timeout) of
-                {ok, Val} when is_integer(Val), Val > 0 ->
-                    Val;
-                _ ->
-                    10000
-            end
-    end.
-
-trace_request(Req) ->
-    case get(my_trace_flag) of
-        true ->
-            %%Avoid the binary operations if trace is not on...
-            NReq = to_binary(Req),
-            do_trace("Sending request: ~n"
-                     "--- Request Begin ---~n~s~n"
-                     "--- Request End ---~n", [NReq]);
-        _ -> ok
-    end.
-
-trace_request_body(Body) ->
-    case get(my_trace_flag) of
-        true ->
-            %%Avoid the binary operations if trace is not on...
-            NBody = to_binary(Body),
-            case size(NBody) > 1024 of
-                true ->
-                    ok;
-                false ->
-                    do_trace("Sending request body: ~n"
-                             "--- Request Body Begin ---~n~s~n"
-                             "--- Request Body End ---~n", [NBody])
-            end;
-        false ->
-            ok
-    end.
-
-to_binary(X) when is_list(X)   -> list_to_binary(X);
-to_binary(X) when is_binary(X) -> X.


[21/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/couch_replicator


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/550e8202
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/550e8202
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/550e8202

Branch: refs/heads/1843-feature-bigcouch
Commit: 550e8202ae88b667516c51701af25927411e24d3
Parents: 2acbbd3
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:40:14 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:40:14 2014 -0600

----------------------------------------------------------------------
 src/couch_replicator/README.md                  |  200 ----
 .../src/couch_replicator.app.src                |   44 -
 src/couch_replicator/src/couch_replicator.erl   | 1002 ------------------
 src/couch_replicator/src/couch_replicator.hrl   |   30 -
 .../src/couch_replicator_api_wrap.erl           |  802 --------------
 .../src/couch_replicator_api_wrap.hrl           |   36 -
 .../src/couch_replicator_app.erl                |   17 -
 .../src/couch_replicator_httpc.erl              |  297 ------
 .../src/couch_replicator_httpc_pool.erl         |  194 ----
 .../src/couch_replicator_httpd.erl              |   65 --
 .../src/couch_replicator_job_sup.erl            |   29 -
 .../src/couch_replicator_js_functions.hrl       |  151 ---
 .../src/couch_replicator_manager.erl            |  889 ----------------
 .../src/couch_replicator_notifier.erl           |   57 -
 .../src/couch_replicator_sup.erl                |   42 -
 .../src/couch_replicator_utils.erl              |  432 --------
 .../src/couch_replicator_worker.erl             |  514 ---------
 src/couch_replicator/src/json_stream_parse.erl  |  432 --------
 src/couch_replicator/test/01-load.t             |   37 -
 src/couch_replicator/test/02-httpc-pool.t       |  240 -----
 .../test/03-replication-compact.t               |  493 ---------
 .../test/04-replication-large-atts.t            |  256 -----
 .../test/05-replication-many-leaves.t           |  283 -----
 .../test/06-doc-missing-stubs.t                 |  293 -----
 24 files changed, 6835 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/README.md
----------------------------------------------------------------------
diff --git a/src/couch_replicator/README.md b/src/couch_replicator/README.md
deleted file mode 100644
index 376b3fc..0000000
--- a/src/couch_replicator/README.md
+++ /dev/null
@@ -1,200 +0,0 @@
-## Replicator
-
-### Overview
-
-Replication is a process that synchronizes two databases. All changes
-in one database (the source), inserts, updates, and deletes, are made
-to the second database (the target). The replication can be run on
-either the node containing the source (a *push* replication), or on the
-node containing the target database (a *pull* replication) or on a
-third node different from either the source or the target.
-
-#### Background
-
-This current version is based on the latest from CouchDB as partially
-documented on the CouchDB  [WIKI][1]. The first solid version that worked
-was implemented by [Adam Kocoloski][4] and subsequently picked up by [Filipe
-Manana][3], another CouchDB committer, who did two major reworkings. The
-first added the [replicator][2] db and the second some performance
-improvements and configurability to adjust factors that affect
-performance.
- 
-This was then ported and integrated with dbcore. The changes required
-were modest but there are notable differences in the use of the
-replicator db as noted below. These notes are intended to be the
-definitive resource for Cloudant users and will be kept in sync with
-code changes
-
-### Usage
-
-The following examples all assume the use of the `_replicate2` end
-point. Eventualy this will be replaced by `_replicate` but this allows
-us to support both the new and old replicator during the testing
-phase, .eg.:
-
-    curl -X POST -H 'content-type:application/json'
-    http://bitdiddle.cloudant.com/_replicate2 -d
-    '{"source":http://cust1.cloudant.com/foo","target":"http://cust2.cloudant.com/bar"}' 
-
-In the remaining examples we'll just note the JSON
-bodies. `_replicate2` is really all there is to calling the
-replicator, all the parameters are in the JSON body. For example a
-simple pull replication (run from the target machine):
-
-    {"source":"http://bitdiddle.cloudant.com/foo",
-     "target":"http://nordier.cloudant.com/foo"}
-To create the target db also:
-
-    {"source":"http://mazincas.cloudant.com/foo",
-     "target":"http://nordier.cloudant.com/foo",
-     "create_target": true}
-
-A continuous replication will stay running and as changes occur in the
-source, replicate them to the target. Under the covers it makes use of
-a continuous `_changes` feed:
-
-    {"source":"http://bitdiddle.cloudant.com/foo",
-     "target":"http://nordier.cloudant.com/foo",
-     "continuous":true}
-
-A replication can be stopped by posting the same body but with a
-cancel property added:
-
-    {"source":"http://bitdiddle.cloudant.com/foo",
-     "target":"http://nordier.cloudant.com/foo",
-     "continuous":true,
-     "cancel":true}
-
-When a continuous replication is run a replication id is returned,
-that provides an aditional method for cancelling the replication:
-
-    {"source":"http://bitdiddle.cloudant.com/foo",
-     "target":"http://nordier.cloudant.com/foo",
-     "continuous":true}
-
-    {"ok":true,"_local_id":"0a81b645497e6270611ec3419767a584+continuous+create_target"}
-
-To cancel:
-
-    {"replication_id":
-    "0a81b645497e6270611ec3419767a584+continuous+create_target",
-    "cancel": true}
-
-These are the main use cases. Additionally there are filter functions
-supported, JS functions that control whether a doc is replicated. They
-work similar to views, .eg.:
-
-    {
-    "_id":"_design/foo",
-    "filters": {
-      "foo": function(doc, req) {
-                   if (doc.name == req.query.key) {
-                      return true;
-                   } else {
-                      return false;
-                   }
-                }
-       }
-    }
-
-It's use is specified in the replication as follows:
-
-    {"source":" "http://nordier.cloudant.com/foo", 
-     "target":"http://beezlechaus.cloudant.com/foo",
-     "filter":"foo/foo",
-     "query_params": {"key":"value"}
-    }
-
-Some applications might need to just replicate a few docs in which
-case `doc_ids` can be used:
-
-    {"source":"http://nordier.cloudant.com/foo",
-     "target":"http://mazincas.cloudant.com/foo",
-     "doc_ids":["foo","bar","baz"]
-    }
-
-### Replicator DB
-
-A replicator db is a new way to manage replications better by storing
-them in a database. One just puts a JSON doc to the replicator db with
-a body the same as if is were posted to _replicate2
-
-    curl -X PUT 
-    http://bitdiddle.cloudant.com/_replicator/repl1 -d
-    '{"source":http://cust1.cloudant.com/foo",
-      "target":"http://cust2.cloudant.com/bar"}'
-
-The doc might look something like:
-
-<pre>
-{
-    "_id": "repl1",
-    "source":  "http://cust1.cloudant.com/foo",
-    "target":  "http://cust1.cloudant.com/foo", 
-    "create_target":  true,
-    "_replication_id":  "c0ebe9256695ff083347cbf95f93e280",
-    "_replication_state":  "triggered",
-    "_replication_state_time":  "2011-06-07T16:54:35+01:00"
-}
-</pre>
-
-This replication now persists across server restarts and the
-replication can be cancelled by simply deleting the doc. The
-`_replication_state` will change to `completed` when a replication
-finishes, which may not happen in the case of a continuous
-replication. It can also change to `error` if it fails.
-
-### Advanced Configurations
-
-There are a few additional arguments that can be passed to a
-replication that govern it's performance. These are to be used with
-care as they can have signifificant impact on the system in a
-multi-tenant environment.
-
- * worker_processes - The default is 4. This controls how many
-   separate processes will read from the changes manager and write to
-   the target. A higher number can improve throughput.
-
- * worker_batch_size - The default is 500. This controls how many
-   documents are processed. After each batch a checkpoint is written
-   so this controls how frequently checkpointing occurs.
-
- * http_connections - The maximum number of http connections used per
-   replication. The default is 20.
-
- * connection_timeout - How long a connection can remain idle, the
-   default is 30000 (30s)
-
- * retries_per_request - When requests fail the number of times to
-   retry, the default is 10. There is a wait period between each
-   attempt, that begins with 0.25s and doubles on each iteration, with
-   a cap at 5 minutes.
-
- * socket_options - a list of options that can be used with the socket
-   connections. See the [erlang documentation][6] for details.
-
-
-### Differences from CouchDB
-
-CouchDB allows the replicator db to be shared by multiple users,
-controlling access via update handlers. It also allows the replicator
-db to be changed on the fly by setting a value in the config file. We
-support neither of these in dbcore. Each account has it's own
-replicator db, named _replicator, and that's it.
-
-Another difference to note is in specifying urls. Using the name of a
-local db works fine in a single CouchDB instance but makes less sense
-in a clustered environment. Though we try to determine a full url to
-use this doesn't work well in practice so it's a good rule of thumb to
-always use the full urls in both the source and target.
-
-### Overview of Code and Design
-
-----
-
-[1]: http://wiki.apache.org/couchdb/Replication
-[2]: https://gist.github.com/832610
-[3]: https://github.com/fdmanana
-[4]: https://github.com/kocolosk
-[5]: http://guide.couchdb.org/draft/replication.html
-[6]: http://www.erlang.org/doc/man/inet.html#setopts-2

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator.app.src
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator.app.src b/src/couch_replicator/src/couch_replicator.app.src
deleted file mode 100644
index 2da0d4a..0000000
--- a/src/couch_replicator/src/couch_replicator.app.src
+++ /dev/null
@@ -1,44 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, couch_replicator, [
-    {description, "CouchDB replicator"},
-    {vsn, git},
-    {modules, [
-        couch_replicator,
-        couch_replicator_api_wrap,
-        couch_replicator_app,
-        couch_replicator_httpc,
-        couch_replicator_httpd,
-        couch_replicator_job_sup,
-        couch_replicator_notifier,
-        couch_replicator_manager,
-        couch_replicator_httpc_pool,
-        couch_replicator_sup,
-        couch_replicator_utils,
-        couch_replicator_worker
-    ]},
-    {registered, [
-        couch_replication,
-        couch_replicator_manager,
-        couch_replicator_job_sup
-    ]},
-    {applications, [
-        kernel,
-        stdlib,
-        twig,
-        fabric,
-        mem3,
-        couch
-    ]}
-]}.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator.erl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl
deleted file mode 100644
index e1ff15c..0000000
--- a/src/couch_replicator/src/couch_replicator.erl
+++ /dev/null
@@ -1,1002 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator).
--behaviour(gen_server).
-
-% public API
--export([replicate/2]).
-
-% meant to be used only by the replicator database listener
--export([async_replicate/1]).
--export([cancel_replication/1]).
-
-% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_replicator_api_wrap.hrl").
--include("couch_replicator.hrl").
-
--define(LOWEST_SEQ, 0).
-
--import(couch_util, [
-    get_value/2,
-    get_value/3,
-    to_binary/1
-]).
-
--import(couch_replicator_utils, [
-    start_db_compaction_notifier/2,
-    stop_db_compaction_notifier/1
-]).
-
--record(rep_state, {
-    rep_details,
-    source_name,
-    target_name,
-    source,
-    target,
-    history,
-    checkpoint_history,
-    start_seq,
-    committed_seq,
-    current_through_seq,
-    seqs_in_progress = [],
-    highest_seq_done = {0, ?LOWEST_SEQ},
-    source_log,
-    target_log,
-    rep_starttime,
-    src_starttime,
-    tgt_starttime,
-    timer, % checkpoint timer
-    changes_queue,
-    changes_manager,
-    changes_reader,
-    workers,
-    stats = #rep_stats{},
-    session_id,
-    source_db_compaction_notifier = nil,
-    target_db_compaction_notifier = nil,
-    source_monitor = nil,
-    target_monitor = nil,
-    source_seq = nil,
-    use_checkpoints = true
-}).
-
-
-replicate(PostBody, Ctx) ->
-    {ok, #rep{id = RepId, options = Options, user_ctx = UserCtx} = Rep} =
-        couch_replicator_utils:parse_rep_doc(PostBody, Ctx),
-    case get_value(cancel, Options, false) of
-    true ->
-        case get_value(id, Options, nil) of
-        nil ->
-            cancel_replication(RepId);
-        RepId2 ->
-            cancel_replication(RepId2, UserCtx)
-        end;
-    false ->
-        {ok, Listener} = rep_result_listener(RepId),
-        Result = do_replication_loop(Rep),
-        couch_replicator_notifier:stop(Listener),
-        Result
-    end.
-
-
-do_replication_loop(#rep{id = {BaseId, Ext} = Id, options = Options} = Rep) ->
-    case async_replicate(Rep) of
-    {ok, _Pid} ->
-        case get_value(continuous, Options, false) of
-        true ->
-            {ok, {continuous, ?l2b(BaseId ++ Ext)}};
-        false ->
-            wait_for_result(Id)
-        end;
-    Error ->
-        Error
-    end.
-
-
-async_replicate(#rep{id = {BaseId, Ext}, source = Src, target = Tgt} = Rep) ->
-    RepChildId = BaseId ++ Ext,
-    Source = couch_replicator_api_wrap:db_uri(Src),
-    Target = couch_replicator_api_wrap:db_uri(Tgt),
-    Timeout = get_value(connection_timeout, Rep#rep.options),
-    ChildSpec = {
-        RepChildId,
-        {gen_server, start_link, [?MODULE, Rep, [{timeout, Timeout}]]},
-        temporary,
-        250,
-        worker,
-        [?MODULE]
-    },
-    % All these nested cases to attempt starting/restarting a replication child
-    % are ugly and not 100% race condition free. The following patch submission
-    % is a solution:
-    %
-    % http://erlang.2086793.n4.nabble.com/PATCH-supervisor-atomically-delete-child-spec-when-child-terminates-td3226098.html
-    %
-    case supervisor:start_child(couch_replicator_job_sup, ChildSpec) of
-    {ok, Pid} ->
-        twig:log(notice,"starting new replication `~s` at ~p (`~s` -> `~s`)",
-            [RepChildId, Pid, Source, Target]),
-        {ok, Pid};
-    {error, already_present} ->
-        case supervisor:restart_child(couch_replicator_job_sup, RepChildId) of
-        {ok, Pid} ->
-            twig:log(notice,"restarting replication `~s` at ~p (`~s` -> `~s`)",
-                [RepChildId, Pid, Source, Target]),
-            {ok, Pid};
-        {error, running} ->
-            %% this error occurs if multiple replicators are racing
-            %% each other to start and somebody else won. Just grab
-            %% the Pid by calling start_child again.
-            timer:sleep(50 + random:uniform(100)),
-            async_replicate(Rep);
-        {error, {'EXIT', {badarg,
-            [{erlang, apply, [gen_server, start_link, undefined]} | _]}}} ->
-            % Clause to deal with a change in the supervisor module introduced
-            % in R14B02. For more details consult the thread at:
-            %     http://erlang.org/pipermail/erlang-bugs/2011-March/002273.html
-            _ = supervisor:delete_child(couch_replicator_job_sup, RepChildId),
-            async_replicate(Rep);
-        {error, _} = Error ->
-            Error
-        end;
-    {error, {already_started, Pid}} ->
-        twig:log(notice,"replication `~s` already running at ~p (`~s` -> `~s`)",
-            [RepChildId, Pid, Source, Target]),
-        {ok, Pid};
-    {error, {Error, _}} ->
-        {error, Error}
-    end.
-
-
-rep_result_listener(RepId) ->
-    ReplyTo = self(),
-    {ok, _Listener} = couch_replicator_notifier:start_link(
-        fun({_, RepId2, _} = Ev) when RepId2 =:= RepId ->
-                ReplyTo ! Ev;
-            (_) ->
-                ok
-        end).
-
-
-wait_for_result(RepId) ->
-    receive
-    {finished, RepId, RepResult} ->
-        {ok, RepResult};
-    {error, RepId, Reason} ->
-        {error, Reason}
-    end.
-
-
-cancel_replication({BaseId, Extension}) ->
-    FullRepId = BaseId ++ Extension,
-    twig:log(notice,"Canceling replication `~s`...", [FullRepId]),
-    case supervisor:terminate_child(couch_replicator_job_sup, FullRepId) of
-    ok ->
-        twig:log(notice,"Replication `~s` canceled.", [FullRepId]),
-        case supervisor:delete_child(couch_replicator_job_sup, FullRepId) of
-            ok ->
-                {ok, {cancelled, ?l2b(FullRepId)}};
-            {error, not_found} ->
-                {ok, {cancelled, ?l2b(FullRepId)}};
-            Error ->
-                Error
-        end;
-    Error ->
-        twig:log(error,"Error canceling replication `~s`: ~p", [FullRepId, Error]),
-        Error
-    end.
-
-cancel_replication(RepId, #user_ctx{name = Name, roles = Roles}) ->
-    case lists:member(<<"_admin">>, Roles) of
-    true ->
-        cancel_replication(RepId);
-    false ->
-        {BaseId, Ext} = RepId,
-        case lists:keysearch(
-            BaseId ++ Ext, 1, supervisor:which_children(couch_replicator_job_sup)) of
-        {value, {_, Pid, _, _}} when is_pid(Pid) ->
-            case (catch gen_server:call(Pid, get_details, infinity)) of
-            {ok, #rep{user_ctx = #user_ctx{name = Name}}} ->
-                cancel_replication(RepId);
-            {ok, _} ->
-                throw({unauthorized,
-                    <<"Can't cancel a replication triggered by another user">>});
-            {'EXIT', {noproc, {gen_server, call, _}}} ->
-                {error, not_found};
-            Error ->
-                throw(Error)
-            end;
-        _ ->
-            {error, not_found}
-        end
-    end.
-
-init(InitArgs) ->
-    {ok, InitArgs, 0}.
-
-do_init(#rep{options = Options, id = {BaseId, Ext}, user_ctx=UserCtx} = Rep) ->
-    process_flag(trap_exit, true),
-
-    #rep_state{
-        source = Source,
-        target = Target,
-        source_name = SourceName,
-        target_name = TargetName,
-        start_seq = {_Ts, StartSeq},
-        source_seq = SourceCurSeq,
-        committed_seq = {_, CommittedSeq}
-    } = State = init_state(Rep),
-
-    NumWorkers = get_value(worker_processes, Options),
-    BatchSize = get_value(worker_batch_size, Options),
-    {ok, ChangesQueue} = couch_work_queue:new([
-        {max_items, BatchSize * NumWorkers * 2},
-        {max_size, 100 * 1024 * NumWorkers}
-    ]),
-    % This starts the _changes reader process. It adds the changes from
-    % the source db to the ChangesQueue.
-    ChangesReader = spawn_changes_reader(StartSeq, Source, ChangesQueue, Options),
-    % Changes manager - responsible for dequeing batches from the changes queue
-    % and deliver them to the worker processes.
-    ChangesManager = spawn_changes_manager(self(), ChangesQueue, BatchSize),
-    % This starts the worker processes. They ask the changes queue manager for a
-    % a batch of _changes rows to process -> check which revs are missing in the
-    % target, and for the missing ones, it copies them from the source to the target.
-    MaxConns = get_value(http_connections, Options),
-    Workers = lists:map(
-        fun(_) ->
-            {ok, Pid} = couch_replicator_worker:start_link(
-                self(), Source, Target, ChangesManager, MaxConns),
-            Pid
-        end,
-        lists:seq(1, NumWorkers)),
-
-    couch_task_status:add_task([
-        {type, replication},
-        {user, UserCtx#user_ctx.name},
-        {replication_id, ?l2b(BaseId ++ Ext)},
-        {doc_id, Rep#rep.doc_id},
-        {source, ?l2b(SourceName)},
-        {target, ?l2b(TargetName)},
-        {continuous, get_value(continuous, Options, false)},
-        {revisions_checked, 0},
-        {missing_revisions_found, 0},
-        {docs_read, 0},
-        {docs_written, 0},
-        {changes_pending, pending(SourceCurSeq, CommittedSeq)},
-        {doc_write_failures, 0},
-        {source_seq, SourceCurSeq},
-        {checkpointed_source_seq, CommittedSeq},
-        {progress, 0}
-    ]),
-    couch_task_status:set_update_frequency(1000),
-
-    % Until OTP R14B03:
-    %
-    % Restarting a temporary supervised child implies that the original arguments
-    % (#rep{} record) specified in the MFA component of the supervisor
-    % child spec will always be used whenever the child is restarted.
-    % This implies the same replication performance tunning parameters will
-    % always be used. The solution is to delete the child spec (see
-    % cancel_replication/1) and then start the replication again, but this is
-    % unfortunately not immune to race conditions.
-
-    twig:log(notice,"Replication `~p` is using:~n"
-        "~c~p worker processes~n"
-        "~ca worker batch size of ~p~n"
-        "~c~p HTTP connections~n"
-        "~ca connection timeout of ~p milliseconds~n"
-        "~c~p retries per request~n"
-        "~csocket options are: ~s~s",
-        [BaseId ++ Ext, $\t, NumWorkers, $\t, BatchSize, $\t,
-            MaxConns, $\t, get_value(connection_timeout, Options),
-            $\t, get_value(retries, Options),
-            $\t, io_lib:format("~p", [get_value(socket_options, Options)]),
-            case StartSeq of
-            ?LOWEST_SEQ ->
-                "";
-            _ ->
-                io_lib:format("~n~csource start sequence ~p", [$\t, StartSeq])
-            end]),
-
-    twig:log(debug,"Worker pids are: ~p", [Workers]),
-
-    couch_replicator_manager:replication_started(Rep),
-
-    {ok, State#rep_state{
-            changes_queue = ChangesQueue,
-            changes_manager = ChangesManager,
-            changes_reader = ChangesReader,
-            workers = Workers
-        }
-    }.
-
-
-handle_info(shutdown, St) ->
-    {stop, shutdown, St};
-
-handle_info({'DOWN', Ref, _, _, Why}, #rep_state{source_monitor = Ref} = St) ->
-    twig:log(error,"Source database is down. Reason: ~p", [Why]),
-    {stop, source_db_down, St};
-
-handle_info({'DOWN', Ref, _, _, Why}, #rep_state{target_monitor = Ref} = St) ->
-    twig:log(error,"Target database is down. Reason: ~p", [Why]),
-    {stop, target_db_down, St};
-
-handle_info({'EXIT', Pid, normal}, #rep_state{changes_reader=Pid} = State) ->
-    {noreply, State};
-
-handle_info({'EXIT', Pid, Reason}, #rep_state{changes_reader=Pid} = State) ->
-    twig:log(error,"ChangesReader process died with reason: ~p", [Reason]),
-    {stop, changes_reader_died, cancel_timer(State)};
-
-handle_info({'EXIT', Pid, normal}, #rep_state{changes_manager = Pid} = State) ->
-    {noreply, State};
-
-handle_info({'EXIT', Pid, Reason}, #rep_state{changes_manager = Pid} = State) ->
-    twig:log(error,"ChangesManager process died with reason: ~p", [Reason]),
-    {stop, changes_manager_died, cancel_timer(State)};
-
-handle_info({'EXIT', Pid, normal}, #rep_state{changes_queue=Pid} = State) ->
-    {noreply, State};
-
-handle_info({'EXIT', Pid, Reason}, #rep_state{changes_queue=Pid} = State) ->
-    twig:log(error,"ChangesQueue process died with reason: ~p", [Reason]),
-    {stop, changes_queue_died, cancel_timer(State)};
-
-handle_info({'EXIT', Pid, normal}, #rep_state{workers = Workers} = State) ->
-    case Workers -- [Pid] of
-    Workers ->
-        twig:log(error,"unknown pid bit the dust ~p ~n",[Pid]),
-        {noreply, State#rep_state{workers = Workers}};
-        %% not clear why a stop was here before
-        %%{stop, {unknown_process_died, Pid, normal}, State};
-    [] ->
-        catch unlink(State#rep_state.changes_manager),
-        catch exit(State#rep_state.changes_manager, kill),
-        do_last_checkpoint(State);
-    Workers2 ->
-        {noreply, State#rep_state{workers = Workers2}}
-    end;
-
-handle_info({'EXIT', Pid, Reason}, #rep_state{workers = Workers} = State) ->
-    State2 = cancel_timer(State),
-    case lists:member(Pid, Workers) of
-    false ->
-        {stop, {unknown_process_died, Pid, Reason}, State2};
-    true ->
-        twig:log(error,"Worker ~p died with reason: ~p", [Pid, Reason]),
-        {stop, {worker_died, Pid, Reason}, State2}
-    end;
-
-handle_info(timeout, InitArgs) ->
-    try do_init(InitArgs) of {ok, State} ->
-        {noreply, State}
-    catch Class:Error ->
-        Stack = erlang:get_stacktrace(),
-        {stop, shutdown, {error, Class, Error, Stack, InitArgs}}
-    end.
-
-handle_call(get_details, _From, #rep_state{rep_details = Rep} = State) ->
-    {reply, {ok, Rep}, State};
-
-handle_call({add_stats, Stats}, From, State) ->
-    gen_server:reply(From, ok),
-    NewStats = couch_replicator_utils:sum_stats(State#rep_state.stats, Stats),
-    {noreply, State#rep_state{stats = NewStats}};
-
-handle_call({report_seq_done, Seq, StatsInc}, From,
-    #rep_state{seqs_in_progress = SeqsInProgress, highest_seq_done = HighestDone,
-        current_through_seq = ThroughSeq, stats = Stats} = State) ->
-    gen_server:reply(From, ok),
-    {NewThroughSeq0, NewSeqsInProgress} = case SeqsInProgress of
-    [Seq | Rest] ->
-        {Seq, Rest};
-    [_ | _] ->
-        {ThroughSeq, ordsets:del_element(Seq, SeqsInProgress)}
-    end,
-    NewHighestDone = lists:max([HighestDone, Seq]),
-    NewThroughSeq = case NewSeqsInProgress of
-    [] ->
-        lists:max([NewThroughSeq0, NewHighestDone]);
-    _ ->
-        NewThroughSeq0
-    end,
-    twig:log(debug,"Worker reported seq ~p, through seq was ~p, "
-        "new through seq is ~p, highest seq done was ~p, "
-        "new highest seq done is ~p~n"
-        "Seqs in progress were: ~p~nSeqs in progress are now: ~p",
-        [Seq, ThroughSeq, NewThroughSeq, HighestDone,
-            NewHighestDone, SeqsInProgress, NewSeqsInProgress]),
-    SourceCurSeq = source_cur_seq(State),
-    NewState = State#rep_state{
-        stats = couch_replicator_utils:sum_stats(Stats, StatsInc),
-        current_through_seq = NewThroughSeq,
-        seqs_in_progress = NewSeqsInProgress,
-        highest_seq_done = NewHighestDone,
-        source_seq = SourceCurSeq
-    },
-    update_task(NewState),
-    {noreply, NewState}.
-
-
-handle_cast({db_compacted, DbName},
-    #rep_state{source = #db{name = DbName} = Source} = State) ->
-    {ok, NewSource} = couch_db:reopen(Source),
-    {noreply, State#rep_state{source = NewSource}};
-
-handle_cast({db_compacted, DbName},
-    #rep_state{target = #db{name = DbName} = Target} = State) ->
-    {ok, NewTarget} = couch_db:reopen(Target),
-    {noreply, State#rep_state{target = NewTarget}};
-
-handle_cast(checkpoint, State) ->
-    case do_checkpoint(State) of
-    {ok, NewState} ->
-        {noreply, NewState#rep_state{timer = start_timer(State)}};
-    Error ->
-        {stop, Error, State}
-    end;
-
-handle_cast({report_seq, Seq},
-    #rep_state{seqs_in_progress = SeqsInProgress} = State) ->
-    NewSeqsInProgress = ordsets:add_element(Seq, SeqsInProgress),
-    {noreply, State#rep_state{seqs_in_progress = NewSeqsInProgress}}.
-
-
-code_change(_OldVsn, OldState, _Extra) when tuple_size(OldState) =:= 30 ->
-    {ok, erlang:append_element(OldState, true)};
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-terminate(normal, #rep_state{rep_details = #rep{id = RepId} = Rep,
-    checkpoint_history = CheckpointHistory} = State) ->
-    terminate_cleanup(State),
-    couch_replicator_notifier:notify({finished, RepId, CheckpointHistory}),
-    couch_replicator_manager:replication_completed(Rep, rep_stats(State));
-
-terminate(shutdown, #rep_state{rep_details = #rep{id = RepId}} = State) ->
-    % cancelled replication throught ?MODULE:cancel_replication/1
-    couch_replicator_notifier:notify({error, RepId, <<"cancelled">>}),
-    terminate_cleanup(State);
-
-terminate(Reason, #rep_state{} = State) ->
-    #rep_state{
-        source_name = Source,
-        target_name = Target,
-        rep_details = #rep{id = {BaseId, Ext} = RepId} = Rep
-    } = State,
-    twig:log(error,"Replication `~s` (`~s` -> `~s`) failed: ~s",
-        [BaseId ++ Ext, Source, Target, to_binary(Reason)]),
-    terminate_cleanup(State),
-    couch_replicator_notifier:notify({error, RepId, Reason}),
-    couch_replicator_manager:replication_error(Rep, Reason);
-
-terminate(shutdown, {error, Class, Error, Stack, InitArgs}) ->
-    #rep{id=RepId} = InitArgs,
-    twig:log(error,"~p:~p: Replication failed to start for args ~p: ~p",
-             [Class, Error, InitArgs, Stack]),
-    case Error of
-    {unauthorized, DbUri} ->
-        NotifyError = {unauthorized, <<"unauthorized to access or create database ", DbUri/binary>>};
-    {db_not_found, DbUri} ->
-        NotifyError = {db_not_found, <<"could not open ", DbUri/binary>>};
-    _ ->
-        NotifyError = Error
-    end,
-    couch_replicator_notifier:notify({error, RepId, NotifyError}),
-    couch_replicator_manager:replication_error(InitArgs, NotifyError).
-
-terminate_cleanup(State) ->
-    update_task(State),
-    stop_db_compaction_notifier(State#rep_state.source_db_compaction_notifier),
-    stop_db_compaction_notifier(State#rep_state.target_db_compaction_notifier),
-    couch_replicator_api_wrap:db_close(State#rep_state.source),
-    couch_replicator_api_wrap:db_close(State#rep_state.target).
-
-
-do_last_checkpoint(#rep_state{seqs_in_progress = [],
-    highest_seq_done = {_Ts, ?LOWEST_SEQ}} = State) ->
-    {stop, normal, cancel_timer(State)};
-do_last_checkpoint(#rep_state{seqs_in_progress = [],
-    highest_seq_done = Seq} = State) ->
-    case do_checkpoint(State#rep_state{current_through_seq = Seq}) of
-    {ok, NewState} ->
-        {stop, normal, cancel_timer(NewState)};
-    Error ->
-        {stop, Error, State}
-    end.
-
-
-start_timer(State) ->
-    After = checkpoint_interval(State),
-    case timer:apply_after(After, gen_server, cast, [self(), checkpoint]) of
-    {ok, Ref} ->
-        Ref;
-    Error ->
-        twig:log(error,"Replicator, error scheduling checkpoint:  ~p", [Error]),
-        nil
-    end.
-
-
-cancel_timer(#rep_state{timer = nil} = State) ->
-    State;
-cancel_timer(#rep_state{timer = Timer} = State) ->
-    {ok, cancel} = timer:cancel(Timer),
-    State#rep_state{timer = nil}.
-
-
-init_state(Rep) ->
-    #rep{
-        source = Src, target = Tgt,
-        options = Options, user_ctx = UserCtx
-    } = Rep,
-    {ok, Source} = couch_replicator_api_wrap:db_open(Src, [{user_ctx, UserCtx}]),
-    {ok, Target} = couch_replicator_api_wrap:db_open(Tgt, [{user_ctx, UserCtx}],
-        get_value(create_target, Options, false)),
-
-    {ok, SourceInfo} = couch_replicator_api_wrap:get_db_info(Source),
-    {ok, TargetInfo} = couch_replicator_api_wrap:get_db_info(Target),
-
-    [SourceLog, TargetLog] = find_replication_logs([Source, Target], Rep),
-
-    {StartSeq0, History} = compare_replication_logs(SourceLog, TargetLog),
-    StartSeq1 = get_value(since_seq, Options, StartSeq0),
-    StartSeq = {0, StartSeq1},
-    #doc{body={CheckpointHistory}} = SourceLog,
-    State = #rep_state{
-        rep_details = Rep,
-        source_name = couch_replicator_api_wrap:db_uri(Source),
-        target_name = couch_replicator_api_wrap:db_uri(Target),
-        source = Source,
-        target = Target,
-        history = History,
-        checkpoint_history = {[{<<"no_changes">>, true}| CheckpointHistory]},
-        start_seq = StartSeq,
-        current_through_seq = StartSeq,
-        committed_seq = StartSeq,
-        source_log = SourceLog,
-        target_log = TargetLog,
-        rep_starttime = couch_util:rfc1123_date(),
-        src_starttime = get_value(<<"instance_start_time">>, SourceInfo),
-        tgt_starttime = get_value(<<"instance_start_time">>, TargetInfo),
-        session_id = couch_uuids:random(),
-        source_db_compaction_notifier =
-            start_db_compaction_notifier(Source, self()),
-        target_db_compaction_notifier =
-            start_db_compaction_notifier(Target, self()),
-        source_monitor = db_monitor(Source),
-        target_monitor = db_monitor(Target),
-        source_seq = get_value(<<"update_seq">>, SourceInfo, ?LOWEST_SEQ),
-        use_checkpoints = get_value(use_checkpoints, Options, true)
-    },
-    State#rep_state{timer = start_timer(State)}.
-
-
-find_replication_logs(DbList, #rep{id = {BaseId, _}} = Rep) ->
-    LogId = ?l2b(?LOCAL_DOC_PREFIX ++ BaseId),
-    fold_replication_logs(DbList, ?REP_ID_VERSION, LogId, LogId, Rep, []).
-
-
-fold_replication_logs([], _Vsn, _LogId, _NewId, _Rep, Acc) ->
-    lists:reverse(Acc);
-
-fold_replication_logs([Db | Rest] = Dbs, Vsn, LogId, NewId, Rep, Acc) ->
-    case couch_replicator_api_wrap:open_doc(Db, LogId, [ejson_body]) of
-    {error, <<"not_found">>} when Vsn > 1 ->
-        OldRepId = couch_replicator_utils:replication_id(Rep, Vsn - 1),
-        fold_replication_logs(Dbs, Vsn - 1,
-            ?l2b(?LOCAL_DOC_PREFIX ++ OldRepId), NewId, Rep, Acc);
-    {error, <<"not_found">>} ->
-        fold_replication_logs(
-            Rest, ?REP_ID_VERSION, NewId, NewId, Rep, [#doc{id = NewId} | Acc]);
-    {ok, Doc} when LogId =:= NewId ->
-        fold_replication_logs(
-            Rest, ?REP_ID_VERSION, NewId, NewId, Rep, [Doc | Acc]);
-    {ok, Doc} ->
-        MigratedLog = #doc{id = NewId, body = Doc#doc.body},
-        fold_replication_logs(
-            Rest, ?REP_ID_VERSION, NewId, NewId, Rep, [MigratedLog | Acc])
-    end.
-
-
-spawn_changes_reader(StartSeq, #httpdb{} = Db, ChangesQueue, Options) ->
-    spawn_link(fun() ->
-        put(last_seq, StartSeq),
-        put(retries_left, Db#httpdb.retries),
-        read_changes(StartSeq, Db#httpdb{retries = 0}, ChangesQueue, Options)
-    end);
-spawn_changes_reader(StartSeq, Db, ChangesQueue, Options) ->
-    spawn_link(fun() ->
-        read_changes(StartSeq, Db, ChangesQueue, Options)
-    end).
-
-read_changes(StartSeq, Db, ChangesQueue, Options) ->
-    try
-        couch_replicator_api_wrap:changes_since(Db, all_docs, StartSeq,
-            fun(#doc_info{high_seq = Seq, id = Id} = DocInfo) ->
-                case Id of
-                <<>> ->
-                    % Previous CouchDB releases had a bug which allowed a doc
-                    % with an empty ID to be inserted into databases. Such doc
-                    % is impossible to GET.
-                    twig:log(error,"Replicator: ignoring document with empty ID in "
-                        "source database `~s` (_changes sequence ~p)",
-                        [couch_replicator_api_wrap:db_uri(Db), Seq]);
-                _ ->
-                    ok = couch_work_queue:queue(ChangesQueue, DocInfo)
-                end,
-                put(last_seq, Seq);
-            ({last_seq, LS}) ->
-                case get_value(continuous, Options) of
-                true ->
-                    % LS should never be undefined, but it doesn't hurt to be
-                    % defensive inside the replicator.
-                    Seq = case LS of undefined -> get(last_seq); _ -> LS end,
-                    read_changes(Seq, Db, ChangesQueue, Options);
-                _ ->
-                    % This clause is unreachable today, but let's plan ahead
-                    % for the future where we checkpoint against last_seq
-                    % instead of the sequence of the last change.  The two can
-                    % differ substantially in the case of a restrictive filter.
-                    ok
-                end
-            end, Options),
-        couch_work_queue:close(ChangesQueue)
-    catch exit:{http_request_failed, _, _, _} = Error ->
-        case get(retries_left) of
-        N when N > 0 ->
-            put(retries_left, N - 1),
-            LastSeq = get(last_seq),
-            Db2 = case LastSeq of
-            StartSeq ->
-                twig:log(notice,"Retrying _changes request to source database ~s"
-                    " with since=~p in ~p seconds",
-                    [couch_replicator_api_wrap:db_uri(Db), LastSeq, Db#httpdb.wait / 1000]),
-                ok = timer:sleep(Db#httpdb.wait),
-                Db#httpdb{wait = 2 * Db#httpdb.wait};
-            _ ->
-                twig:log(notice,"Retrying _changes request to source database ~s"
-                    " with since=~p", [couch_replicator_api_wrap:db_uri(Db), LastSeq]),
-                Db
-            end,
-            read_changes(LastSeq, Db2, ChangesQueue, Options);
-        _ ->
-            exit(Error)
-        end
-    end.
-
-
-spawn_changes_manager(Parent, ChangesQueue, BatchSize) ->
-    spawn_link(fun() ->
-        changes_manager_loop_open(Parent, ChangesQueue, BatchSize, 1)
-    end).
-
-changes_manager_loop_open(Parent, ChangesQueue, BatchSize, Ts) ->
-    receive
-    {get_changes, From} ->
-        case couch_work_queue:dequeue(ChangesQueue, BatchSize) of
-        closed ->
-            From ! {closed, self()};
-        {ok, Changes} ->
-            #doc_info{high_seq = Seq} = lists:last(Changes),
-            ReportSeq = {Ts, Seq},
-            ok = gen_server:cast(Parent, {report_seq, ReportSeq}),
-            From ! {changes, self(), Changes, ReportSeq}
-        end,
-        changes_manager_loop_open(Parent, ChangesQueue, BatchSize, Ts + 1)
-    end.
-
-
-checkpoint_interval(_State) ->
-    5000.
-
-do_checkpoint(#rep_state{use_checkpoints=false} = State) ->
-    NewState = State#rep_state{checkpoint_history = {[{<<"use_checkpoints">>, false}]} },
-    {ok, NewState};
-do_checkpoint(#rep_state{current_through_seq=Seq, committed_seq=Seq} = State) ->
-    SourceCurSeq = source_cur_seq(State),
-    NewState = State#rep_state{source_seq = SourceCurSeq},
-    update_task(NewState),
-    {ok, NewState};
-do_checkpoint(State) ->
-    #rep_state{
-        source_name=SourceName,
-        target_name=TargetName,
-        source = Source,
-        target = Target,
-        history = OldHistory,
-        start_seq = {_, StartSeq},
-        current_through_seq = {_Ts, NewSeq} = NewTsSeq,
-        source_log = SourceLog,
-        target_log = TargetLog,
-        rep_starttime = ReplicationStartTime,
-        src_starttime = SrcInstanceStartTime,
-        tgt_starttime = TgtInstanceStartTime,
-        stats = Stats,
-        rep_details = #rep{options = Options},
-        session_id = SessionId
-    } = State,
-    case commit_to_both(Source, Target) of
-    {source_error, Reason} ->
-         {checkpoint_commit_failure,
-             <<"Failure on source commit: ", (to_binary(Reason))/binary>>};
-    {target_error, Reason} ->
-         {checkpoint_commit_failure,
-             <<"Failure on target commit: ", (to_binary(Reason))/binary>>};
-    {SrcInstanceStartTime, TgtInstanceStartTime} ->
-        twig:log(notice,"recording a checkpoint for `~s` -> `~s` at source update_seq ~p",
-            [SourceName, TargetName, NewSeq]),
-        StartTime = ?l2b(ReplicationStartTime),
-        EndTime = ?l2b(couch_util:rfc1123_date()),
-        NewHistoryEntry = {[
-            {<<"session_id">>, SessionId},
-            {<<"start_time">>, StartTime},
-            {<<"end_time">>, EndTime},
-            {<<"start_last_seq">>, StartSeq},
-            {<<"end_last_seq">>, NewSeq},
-            {<<"recorded_seq">>, NewSeq},
-            {<<"missing_checked">>, Stats#rep_stats.missing_checked},
-            {<<"missing_found">>, Stats#rep_stats.missing_found},
-            {<<"docs_read">>, Stats#rep_stats.docs_read},
-            {<<"docs_written">>, Stats#rep_stats.docs_written},
-            {<<"doc_write_failures">>, Stats#rep_stats.doc_write_failures}
-        ]},
-        BaseHistory = [
-            {<<"session_id">>, SessionId},
-            {<<"source_last_seq">>, NewSeq},
-            {<<"replication_id_version">>, ?REP_ID_VERSION}
-        ] ++ case get_value(doc_ids, Options) of
-        undefined ->
-            [];
-        _DocIds ->
-            % backwards compatibility with the result of a replication by
-            % doc IDs in versions 0.11.x and 1.0.x
-            % TODO: deprecate (use same history format, simplify code)
-            [
-                {<<"start_time">>, StartTime},
-                {<<"end_time">>, EndTime},
-                {<<"docs_read">>, Stats#rep_stats.docs_read},
-                {<<"docs_written">>, Stats#rep_stats.docs_written},
-                {<<"doc_write_failures">>, Stats#rep_stats.doc_write_failures}
-            ]
-        end,
-        % limit history to 50 entries
-        NewRepHistory = {
-            BaseHistory ++
-            [{<<"history">>, lists:sublist([NewHistoryEntry | OldHistory], 50)}]
-        },
-
-        try
-            {SrcRevPos, SrcRevId} = update_checkpoint(
-                Source, SourceLog#doc{body = NewRepHistory}, source),
-            {TgtRevPos, TgtRevId} = update_checkpoint(
-                Target, TargetLog#doc{body = NewRepHistory}, target),
-            SourceCurSeq = source_cur_seq(State),
-            NewState = State#rep_state{
-                source_seq = SourceCurSeq,
-                checkpoint_history = NewRepHistory,
-                committed_seq = NewTsSeq,
-                source_log = SourceLog#doc{revs={SrcRevPos, [SrcRevId]}},
-                target_log = TargetLog#doc{revs={TgtRevPos, [TgtRevId]}}
-            },
-            update_task(NewState),
-            {ok, NewState}
-        catch throw:{checkpoint_commit_failure, _} = Failure ->
-            Failure
-        end;
-    {SrcInstanceStartTime, _NewTgtInstanceStartTime} ->
-        {checkpoint_commit_failure, <<"Target database out of sync. "
-            "Try to increase max_dbs_open at the target's server.">>};
-    {_NewSrcInstanceStartTime, TgtInstanceStartTime} ->
-        {checkpoint_commit_failure, <<"Source database out of sync. "
-            "Try to increase max_dbs_open at the source's server.">>};
-    {_NewSrcInstanceStartTime, _NewTgtInstanceStartTime} ->
-        {checkpoint_commit_failure, <<"Source and target databases out of "
-            "sync. Try to increase max_dbs_open at both servers.">>}
-    end.
-
-
-update_checkpoint(Db, Doc, DbType) ->
-    try
-        update_checkpoint(Db, Doc)
-    catch throw:{checkpoint_commit_failure, Reason} ->
-        throw({checkpoint_commit_failure,
-            <<"Error updating the ", (to_binary(DbType))/binary,
-                " checkpoint document: ", (to_binary(Reason))/binary>>})
-    end.
-
-update_checkpoint(Db, #doc{id = LogId, body = LogBody} = Doc) ->
-    try
-        case couch_replicator_api_wrap:update_doc(Db, Doc, [delay_commit]) of
-        {ok, PosRevId} ->
-            PosRevId;
-        {error, Reason} ->
-            throw({checkpoint_commit_failure, Reason})
-        end
-    catch throw:conflict ->
-        case (catch couch_replicator_api_wrap:open_doc(Db, LogId, [ejson_body])) of
-        {ok, #doc{body = LogBody, revs = {Pos, [RevId | _]}}} ->
-            % This means that we were able to update successfully the
-            % checkpoint doc in a previous attempt but we got a connection
-            % error (timeout for e.g.) before receiving the success response.
-            % Therefore the request was retried and we got a conflict, as the
-            % revision we sent is not the current one.
-            % We confirm this by verifying the doc body we just got is the same
-            % that we have just sent.
-            {Pos, RevId};
-        _ ->
-            throw({checkpoint_commit_failure, conflict})
-        end
-    end.
-
-
-commit_to_both(Source, Target) ->
-    % commit the src async
-    ParentPid = self(),
-    SrcCommitPid = spawn_link(
-        fun() ->
-            Result = (catch couch_replicator_api_wrap:ensure_full_commit(Source)),
-            ParentPid ! {self(), Result}
-        end),
-
-    % commit tgt sync
-    TargetResult = (catch couch_replicator_api_wrap:ensure_full_commit(Target)),
-
-    SourceResult = receive
-    {SrcCommitPid, Result} ->
-        unlink(SrcCommitPid),
-        receive {'EXIT', SrcCommitPid, _} -> ok after 0 -> ok end,
-        Result;
-    {'EXIT', SrcCommitPid, Reason} ->
-        {error, Reason}
-    end,
-    case TargetResult of
-    {ok, TargetStartTime} ->
-        case SourceResult of
-        {ok, SourceStartTime} ->
-            {SourceStartTime, TargetStartTime};
-        SourceError ->
-            {source_error, SourceError}
-        end;
-    TargetError ->
-        {target_error, TargetError}
-    end.
-
-
-compare_replication_logs(SrcDoc, TgtDoc) ->
-    #doc{body={RepRecProps}} = SrcDoc,
-    #doc{body={RepRecPropsTgt}} = TgtDoc,
-    case get_value(<<"session_id">>, RepRecProps) ==
-            get_value(<<"session_id">>, RepRecPropsTgt) of
-    true ->
-        % if the records have the same session id,
-        % then we have a valid replication history
-        OldSeqNum = get_value(<<"source_last_seq">>, RepRecProps, ?LOWEST_SEQ),
-        OldHistory = get_value(<<"history">>, RepRecProps, []),
-        {OldSeqNum, OldHistory};
-    false ->
-        SourceHistory = get_value(<<"history">>, RepRecProps, []),
-        TargetHistory = get_value(<<"history">>, RepRecPropsTgt, []),
-        twig:log(notice,"Replication records differ. "
-                "Scanning histories to find a common ancestor.", []),
-        twig:log(debug,"Record on source:~p~nRecord on target:~p~n",
-                [RepRecProps, RepRecPropsTgt]),
-        compare_rep_history(SourceHistory, TargetHistory)
-    end.
-
-compare_rep_history(S, T) when S =:= [] orelse T =:= [] ->
-    twig:log(notice,"no common ancestry -- performing full replication", []),
-    {?LOWEST_SEQ, []};
-compare_rep_history([{S} | SourceRest], [{T} | TargetRest] = Target) ->
-    SourceId = get_value(<<"session_id">>, S),
-    case has_session_id(SourceId, Target) of
-    true ->
-        RecordSeqNum = get_value(<<"recorded_seq">>, S, ?LOWEST_SEQ),
-        twig:log(notice,"found a common replication record with source_seq ~p",
-            [RecordSeqNum]),
-        {RecordSeqNum, SourceRest};
-    false ->
-        TargetId = get_value(<<"session_id">>, T),
-        case has_session_id(TargetId, SourceRest) of
-        true ->
-            RecordSeqNum = get_value(<<"recorded_seq">>, T, ?LOWEST_SEQ),
-            twig:log(notice,"found a common replication record with source_seq ~p",
-                [RecordSeqNum]),
-            {RecordSeqNum, TargetRest};
-        false ->
-            compare_rep_history(SourceRest, TargetRest)
-        end
-    end.
-
-
-has_session_id(_SessionId, []) ->
-    false;
-has_session_id(SessionId, [{Props} | Rest]) ->
-    case get_value(<<"session_id">>, Props, nil) of
-    SessionId ->
-        true;
-    _Else ->
-        has_session_id(SessionId, Rest)
-    end.
-
-
-db_monitor(#db{} = Db) ->
-    couch_db:monitor(Db);
-db_monitor(_HttpDb) ->
-    nil.
-
-
-source_cur_seq(#rep_state{source = #httpdb{} = Db, source_seq = Seq}) ->
-    case (catch couch_replicator_api_wrap:get_db_info(Db#httpdb{retries = 3})) of
-    {ok, Info} ->
-        get_value(<<"update_seq">>, Info, Seq);
-    _ ->
-        Seq
-    end;
-source_cur_seq(#rep_state{source = Db, source_seq = Seq}) ->
-    {ok, Info} = couch_replicator_api_wrap:get_db_info(Db),
-    get_value(<<"update_seq">>, Info, Seq).
-
-
-update_task(State) ->
-    #rep_state{
-        current_through_seq = {_, CurSeq},
-        source_seq = SourceCurSeq
-    } = State,
-    couch_task_status:update(
-        rep_stats(State) ++ [
-        {source_seq, SourceCurSeq},
-        case {unpack_seq(CurSeq), unpack_seq(SourceCurSeq)} of
-            {_, 0} ->
-                {progress, 0};
-            {CurSeq1, SourceCurSeq1} ->
-                {progress, (CurSeq1 * 100) div SourceCurSeq1}
-        end
-    ]).
-
-
-rep_stats(State) ->
-    #rep_state{
-        committed_seq = {_, CommittedSeq},
-        stats = Stats,
-        source_seq = SourceCurSeq
-    } = State,
-    [
-        {revisions_checked, Stats#rep_stats.missing_checked},
-        {missing_revisions_found, Stats#rep_stats.missing_found},
-        {docs_read, Stats#rep_stats.docs_read},
-        {docs_written, Stats#rep_stats.docs_written},
-        {changes_pending, pending(SourceCurSeq, CommittedSeq)},
-        {doc_write_failures, Stats#rep_stats.doc_write_failures},
-        {checkpointed_source_seq, CommittedSeq}
-    ].
-
-pending(SourceCurSeq, CommittedSeq) ->
-    unpack_seq(SourceCurSeq) - unpack_seq(CommittedSeq).
-
-unpack_seq(Seq) when is_number(Seq) ->
-    Seq;
-unpack_seq([SeqNum, _]) ->
-    SeqNum;
-unpack_seq(Seq) when is_binary(Seq) ->
-    Pattern = "^\\[?(?<seqnum>[0-9]+)",
-    Options = [{capture, [seqnum], list}],
-    {match, [SeqNum]} = re:run(Seq, Pattern, Options),
-    list_to_integer(SeqNum).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator.hrl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator.hrl b/src/couch_replicator/src/couch_replicator.hrl
deleted file mode 100644
index 018aa4b..0000000
--- a/src/couch_replicator/src/couch_replicator.hrl
+++ /dev/null
@@ -1,30 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(REP_ID_VERSION, 3).
-
--record(rep, {
-    id,
-    source,
-    target,
-    options,
-    user_ctx,
-    doc_id
-}).
-
--record(rep_stats, {
-    missing_checked = 0,
-    missing_found = 0,
-    docs_read = 0,
-    docs_written = 0,
-    doc_write_failures = 0
-}).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator_api_wrap.erl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.erl b/src/couch_replicator/src/couch_replicator_api_wrap.erl
deleted file mode 100644
index d072187..0000000
--- a/src/couch_replicator/src/couch_replicator_api_wrap.erl
+++ /dev/null
@@ -1,802 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_api_wrap).
-
-% This module wraps the native erlang API, and allows for performing
-% operations on a remote vs. local databases via the same API.
-%
-% Notes:
-% Many options and apis aren't yet supported here, they are added as needed.
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_replicator_api_wrap.hrl").
-
--export([
-    db_open/2,
-    db_open/3,
-    db_close/1,
-    get_db_info/1,
-    update_doc/3,
-    update_doc/4,
-    update_docs/3,
-    update_docs/4,
-    ensure_full_commit/1,
-    get_missing_revs/2,
-    open_doc/3,
-    open_doc_revs/6,
-    changes_since/5,
-    db_uri/1
-    ]).
-
--import(couch_replicator_httpc, [
-    send_req/3
-    ]).
-
--import(couch_util, [
-    encode_doc_id/1,
-    get_value/2,
-    get_value/3
-    ]).
-
-
-db_uri(#httpdb{url = Url}) ->
-    couch_util:url_strip_password(Url);
-
-db_uri(#db{name = Name}) ->
-    db_uri(Name);
-
-db_uri(DbName) ->
-    ?b2l(DbName).
-
-
-db_open(Db, Options) ->
-    db_open(Db, Options, false).
-
-db_open(#httpdb{} = Db1, _Options, Create) ->
-    {ok, Db} = couch_replicator_httpc:setup(Db1),
-    case Create of
-    false ->
-        ok;
-    true ->
-        send_req(Db, [{method, put}], fun(_, _, _) -> ok end)
-    end,
-    send_req(Db, [{method, head}],
-        fun(200, _, _) ->
-            {ok, Db};
-        (401, _, _) ->
-            throw({unauthorized, ?l2b(db_uri(Db))});
-        (_, _, _) ->
-            throw({db_not_found, ?l2b(db_uri(Db))})
-        end);
-db_open(DbName, Options, Create) ->
-    try
-        case Create of
-        false ->
-            ok;
-        true ->
-            ok = couch_httpd:verify_is_server_admin(
-                get_value(user_ctx, Options)),
-            couch_db:create(DbName, Options)
-        end,
-        case couch_db:open(DbName, Options) of
-        {error, illegal_database_name, _} ->
-            throw({db_not_found, DbName});
-        {not_found, _Reason} ->
-            throw({db_not_found, DbName});
-        {ok, _Db} = Success ->
-            Success
-        end
-    catch
-    throw:{unauthorized, _} ->
-        throw({unauthorized, DbName})
-    end.
-
-db_close(#httpdb{httpc_pool = Pool}) ->
-    unlink(Pool),
-    ok = couch_replicator_httpc_pool:stop(Pool);
-db_close(DbName) ->
-    catch couch_db:close(DbName).
-
-
-get_db_info(#httpdb{} = Db) ->
-    send_req(Db, [],
-        fun(200, _, {Props}) ->
-            {ok, Props}
-        end);
-get_db_info(#db{name = DbName, user_ctx = UserCtx}) ->
-    {ok, Db} = couch_db:open(DbName, [{user_ctx, UserCtx}]),
-    {ok, Info} = couch_db:get_db_info(Db),
-    couch_db:close(Db),
-    {ok, [{couch_util:to_binary(K), V} || {K, V} <- Info]}.
-
-
-ensure_full_commit(#httpdb{} = Db) ->
-    send_req(
-        Db,
-        [{method, post}, {path, "_ensure_full_commit"},
-            {headers, [{"Content-Type", "application/json"}]}],
-        fun(201, _, {Props}) ->
-            {ok, get_value(<<"instance_start_time">>, Props)};
-        (_, _, {Props}) ->
-            {error, get_value(<<"error">>, Props)}
-        end);
-ensure_full_commit(Db) ->
-    couch_db:ensure_full_commit(Db).
-
-
-get_missing_revs(#httpdb{} = Db, IdRevs) ->
-    JsonBody = {[{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs} <- IdRevs]},
-    send_req(
-        Db,
-        [{method, post}, {path, "_revs_diff"}, {body, ?JSON_ENCODE(JsonBody)},
-            {headers, [{"Content-Type", "application/json"}]}],
-        fun(200, _, {Props}) ->
-            ConvertToNativeFun = fun({Id, {Result}}) ->
-                MissingRevs = couch_doc:parse_revs(
-                    get_value(<<"missing">>, Result)
-                ),
-                PossibleAncestors = couch_doc:parse_revs(
-                    get_value(<<"possible_ancestors">>, Result, [])
-                ),
-                {Id, MissingRevs, PossibleAncestors}
-            end,
-            {ok, lists:map(ConvertToNativeFun, Props)}
-        end);
-get_missing_revs(Db, IdRevs) ->
-    couch_db:get_missing_revs(Db, IdRevs).
-
-
-
-open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
-    Path = encode_doc_id(Id),
-    QArgs = options_to_query_args(
-        HttpDb, Path, [revs, {open_revs, Revs} | Options]),
-    Self = self(),
-    Streamer = spawn_link(fun() ->
-            send_req(
-                HttpDb,
-                [{path, Path}, {qs, QArgs},
-                    {ibrowse_options, [{stream_to, {self(), once}}]},
-                    {headers, [{"Accept", "multipart/mixed"}]}],
-                fun(200, Headers, StreamDataFun) ->
-                    remote_open_doc_revs_streamer_start(Self),
-                    {<<"--">>, _, _} = couch_httpd:parse_multipart_request(
-                        get_value("Content-Type", Headers),
-                        StreamDataFun,
-                        fun mp_parse_mixed/1)
-                end),
-            unlink(Self)
-        end),
-    % If this process dies normally we can leave
-    % the Streamer process hanging around keeping an
-    % HTTP connection open. This is a bit of a
-    % hammer approach to making sure it releases
-    % that connection back to the pool.
-    spawn(fun() ->
-        Ref = erlang:monitor(process, Self),
-        receive
-            {'DOWN', Ref, process, Self, normal} ->
-                exit(Streamer, {streamer_parent_died, Self});
-            {'DOWN', Ref, process, Self, _} ->
-                ok
-            end
-    end),
-    receive
-    {started_open_doc_revs, Ref} ->
-        receive_docs_loop(Streamer, Fun, Id, Revs, Ref, Acc)
-    end;
-open_doc_revs(Db, Id, Revs, Options, Fun, Acc) ->
-    {ok, Results} = couch_db:open_doc_revs(Db, Id, Revs, Options),
-    {ok, lists:foldl(fun(R, A) -> {_, A2} = Fun(R, A), A2 end, Acc, Results)}.
-
-
-open_doc(#httpdb{} = Db, Id, Options) ->
-    send_req(
-        Db,
-        [{path, encode_doc_id(Id)}, {qs, options_to_query_args(Options, [])}],
-        fun(200, _, Body) ->
-            {ok, couch_doc:from_json_obj(Body)};
-        (_, _, {Props}) ->
-            {error, get_value(<<"error">>, Props)}
-        end);
-open_doc(Db, Id, Options) ->
-    case couch_db:open_doc(Db, Id, Options) of
-    {ok, _} = Ok ->
-        Ok;
-    {not_found, _Reason} ->
-        {error, <<"not_found">>}
-    end.
-
-
-update_doc(Db, Doc, Options) ->
-    update_doc(Db, Doc, Options, interactive_edit).
-
-update_doc(#httpdb{} = HttpDb, #doc{id = DocId} = Doc, Options, Type) ->
-    QArgs = case Type of
-    replicated_changes ->
-        [{"new_edits", "false"}];
-    _ ->
-        []
-    end ++ options_to_query_args(Options, []),
-    Boundary = couch_uuids:random(),
-    JsonBytes = ?JSON_ENCODE(
-        couch_doc:to_json_obj(
-          Doc, [revs, attachments, follows, att_encoding_info | Options])),
-    {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(Boundary,
-        JsonBytes, Doc#doc.atts, true),
-    Headers = case lists:member(delay_commit, Options) of
-    true ->
-        [{"X-Couch-Full-Commit", "false"}];
-    false ->
-        []
-    end ++ [{"Content-Type", ?b2l(ContentType)}, {"Content-Length", Len}],
-    Body = {fun stream_doc/1, {JsonBytes, Doc#doc.atts, Boundary, Len}},
-    send_req(
-        HttpDb,
-        [{method, put}, {path, encode_doc_id(DocId)},
-            {qs, QArgs}, {headers, Headers}, {body, Body}],
-        fun(Code, _, {Props}) when Code =:= 200 orelse Code =:= 201 orelse Code =:= 202 ->
-                {ok, couch_doc:parse_rev(get_value(<<"rev">>, Props))};
-            (409, _, _) ->
-                throw(conflict);
-            (Code, _, {Props}) ->
-                case {Code, get_value(<<"error">>, Props)} of
-                {401, <<"unauthorized">>} ->
-                    throw({unauthorized, get_value(<<"reason">>, Props)});
-                {403, <<"forbidden">>} ->
-                    throw({forbidden, get_value(<<"reason">>, Props)});
-                {412, <<"missing_stub">>} ->
-                    throw({missing_stub, get_value(<<"reason">>, Props)});
-                {_, Error} ->
-                    {error, Error}
-                end
-        end);
-update_doc(Db, Doc, Options, Type) ->
-    couch_db:update_doc(Db, Doc, Options, Type).
-
-
-update_docs(Db, DocList, Options) ->
-    update_docs(Db, DocList, Options, interactive_edit).
-
-update_docs(_Db, [], _Options, _UpdateType) ->
-    {ok, []};
-update_docs(#httpdb{} = HttpDb, DocList, Options, UpdateType) ->
-    FullCommit = atom_to_list(not lists:member(delay_commit, Options)),
-    Prefix = case UpdateType of
-    replicated_changes ->
-        <<"{\"new_edits\":false,\"docs\":[">>;
-    interactive_edit ->
-        <<"{\"docs\":[">>
-    end,
-    Suffix = <<"]}">>,
-    % Note: nginx and other servers don't like PUT/POST requests without
-    % a Content-Length header, so we can't do a chunked transfer encoding
-    % and JSON encode each doc only before sending it through the socket.
-    {Docs, Len} = lists:mapfoldl(
-        fun(#doc{} = Doc, Acc) ->
-            Json = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
-            {Json, Acc + iolist_size(Json)};
-        (Doc, Acc) ->
-            {Doc, Acc + iolist_size(Doc)}
-        end,
-        byte_size(Prefix) + byte_size(Suffix) + length(DocList) - 1,
-        DocList),
-    BodyFun = fun(eof) ->
-            eof;
-        ([]) ->
-            {ok, Suffix, eof};
-        ([prefix | Rest]) ->
-            {ok, Prefix, Rest};
-        ([Doc]) ->
-            {ok, Doc, []};
-        ([Doc | RestDocs]) ->
-            {ok, [Doc, ","], RestDocs}
-    end,
-    Headers = [
-        {"Content-Length", Len},
-        {"Content-Type", "application/json"},
-        {"X-Couch-Full-Commit", FullCommit}
-    ],
-    send_req(
-        HttpDb,
-        [{method, post}, {path, "_bulk_docs"},
-            {body, {BodyFun, [prefix | Docs]}}, {headers, Headers}],
-        fun(201, _, Results) when is_list(Results) ->
-                {ok, bulk_results_to_errors(DocList, Results, remote)};
-           (417, _, Results) when is_list(Results) ->
-                {ok, bulk_results_to_errors(DocList, Results, remote)}
-        end);
-update_docs(Db, DocList, Options, UpdateType) ->
-    Result = couch_db:update_docs(Db, DocList, Options, UpdateType),
-    {ok, bulk_results_to_errors(DocList, Result, UpdateType)}.
-
-
-changes_since(#httpdb{headers = Headers1, timeout = InactiveTimeout} = HttpDb,
-              Style, StartSeq, UserFun, Options) ->
-    Timeout = erlang:max(1000, InactiveTimeout - 5000),
-    BaseQArgs = case get_value(continuous, Options, false) of
-    false ->
-        [{"feed", "normal"}];
-    true ->
-        [{"feed", "continuous"}]
-    end ++ [
-        {"style", atom_to_list(Style)}, {"since", ?JSON_ENCODE(StartSeq)},
-        {"timeout", integer_to_list(Timeout)}
-           ],
-    DocIds = get_value(doc_ids, Options),
-    {QArgs, Method, Body, Headers} = case DocIds of
-    undefined ->
-        QArgs1 = maybe_add_changes_filter_q_args(BaseQArgs, Options),
-        {QArgs1, get, [], Headers1};
-    _ when is_list(DocIds) ->
-        Headers2 = [{"Content-Type", "application/json"} | Headers1],
-        JsonDocIds = ?JSON_ENCODE({[{<<"doc_ids">>, DocIds}]}),
-        {[{"filter", "_doc_ids"} | BaseQArgs], post, JsonDocIds, Headers2}
-    end,
-    send_req(
-        HttpDb,
-        [{method, Method}, {path, "_changes"}, {qs, QArgs},
-            {headers, Headers}, {body, Body},
-            {ibrowse_options, [{stream_to, {self(), once}}]}],
-        fun(200, _, DataStreamFun) ->
-                parse_changes_feed(Options, UserFun, DataStreamFun);
-            (405, _, _) when is_list(DocIds) ->
-                % CouchDB versions < 1.1.0 don't have the builtin _changes feed
-                % filter "_doc_ids" neither support POST
-                send_req(HttpDb, [{method, get}, {path, "_changes"},
-                    {qs, BaseQArgs}, {headers, Headers1},
-                    {ibrowse_options, [{stream_to, {self(), once}}]}],
-                    fun(200, _, DataStreamFun2) ->
-                        UserFun2 = fun(#doc_info{id = Id} = DocInfo) ->
-                            case lists:member(Id, DocIds) of
-                            true ->
-                                UserFun(DocInfo);
-                            false ->
-                                ok
-                            end
-                        end,
-                        parse_changes_feed(Options, UserFun2, DataStreamFun2)
-                    end)
-        end);
-changes_since(Db, Style, StartSeq, UserFun, Options) ->
-    Filter = case get_value(doc_ids, Options) of
-    undefined ->
-        ?b2l(get_value(filter, Options, <<>>));
-    _DocIds ->
-        "_doc_ids"
-    end,
-    Args = #changes_args{
-        style = Style,
-        since = StartSeq,
-        filter = Filter,
-        feed = case get_value(continuous, Options, false) of
-            true ->
-                "continuous";
-            false ->
-                "normal"
-        end,
-        timeout = infinity
-    },
-    QueryParams = get_value(query_params, Options, {[]}),
-    Req = changes_json_req(Db, Filter, QueryParams, Options),
-    ChangesFeedFun = couch_changes:handle_changes(Args, {json_req, Req}, Db),
-    ChangesFeedFun(fun({change, Change, _}, _) ->
-            UserFun(json_to_doc_info(Change));
-        (_, _) ->
-            ok
-    end).
-
-
-% internal functions
-
-maybe_add_changes_filter_q_args(BaseQS, Options) ->
-    case get_value(filter, Options) of
-    undefined ->
-        BaseQS;
-    FilterName ->
-        {Params} = get_value(query_params, Options, {[]}),
-        [{"filter", ?b2l(FilterName)} | lists:foldl(
-            fun({K, V}, QSAcc) ->
-                Ks = couch_util:to_list(K),
-                case lists:keymember(Ks, 1, QSAcc) of
-                true ->
-                    QSAcc;
-                false ->
-                    [{Ks, couch_util:to_list(V)} | QSAcc]
-                end
-            end,
-            BaseQS, Params)]
-    end.
-
-parse_changes_feed(Options, UserFun, DataStreamFun) ->
-    case get_value(continuous, Options, false) of
-    true ->
-        continuous_changes(DataStreamFun, UserFun);
-    false ->
-        EventFun = fun(Ev) ->
-            changes_ev1(Ev, fun(DocInfo, _) -> UserFun(DocInfo) end, [])
-        end,
-        json_stream_parse:events(DataStreamFun, EventFun)
-    end.
-
-changes_json_req(_Db, "", _QueryParams, _Options) ->
-    {[]};
-changes_json_req(_Db, "_doc_ids", _QueryParams, Options) ->
-    {[{<<"doc_ids">>, get_value(doc_ids, Options)}]};
-changes_json_req(Db, FilterName, {QueryParams}, _Options) ->
-    {ok, Info} = couch_db:get_db_info(Db),
-    % simulate a request to db_name/_changes
-    {[
-        {<<"info">>, {Info}},
-        {<<"id">>, null},
-        {<<"method">>, 'GET'},
-        {<<"path">>, [couch_db:name(Db), <<"_changes">>]},
-        {<<"query">>, {[{<<"filter">>, FilterName} | QueryParams]}},
-        {<<"headers">>, []},
-        {<<"body">>, []},
-        {<<"peer">>, <<"replicator">>},
-        {<<"form">>, []},
-        {<<"cookie">>, []},
-        {<<"userCtx">>, couch_util:json_user_ctx(Db)}
-    ]}.
-
-
-options_to_query_args(HttpDb, Path, Options) ->
-    case lists:keytake(atts_since, 1, Options) of
-    false ->
-        options_to_query_args(Options, []);
-    {value, {atts_since, []}, Options2} ->
-        options_to_query_args(Options2, []);
-    {value, {atts_since, PAs}, Options2} ->
-        QueryArgs1 = options_to_query_args(Options2, []),
-        FullUrl = couch_replicator_httpc:full_url(
-            HttpDb, [{path, Path}, {qs, QueryArgs1}]),
-        RevList = atts_since_arg(
-            length("GET " ++ FullUrl ++ " HTTP/1.1\r\n") +
-            length("&atts_since=") + 6,  % +6 = % encoded [ and ]
-            PAs, []),
-        [{"atts_since", ?JSON_ENCODE(RevList)} | QueryArgs1]
-    end.
-
-
-options_to_query_args([], Acc) ->
-    lists:reverse(Acc);
-options_to_query_args([ejson_body | Rest], Acc) ->
-    options_to_query_args(Rest, Acc);
-options_to_query_args([delay_commit | Rest], Acc) ->
-    options_to_query_args(Rest, Acc);
-options_to_query_args([revs | Rest], Acc) ->
-    options_to_query_args(Rest, [{"revs", "true"} | Acc]);
-options_to_query_args([{open_revs, all} | Rest], Acc) ->
-    options_to_query_args(Rest, [{"open_revs", "all"} | Acc]);
-options_to_query_args([latest | Rest], Acc) ->
-    options_to_query_args(Rest, [{"latest", "true"} | Acc]);
-options_to_query_args([{open_revs, Revs} | Rest], Acc) ->
-    JsonRevs = ?b2l(iolist_to_binary(?JSON_ENCODE(couch_doc:revs_to_strs(Revs)))),
-    options_to_query_args(Rest, [{"open_revs", JsonRevs} | Acc]).
-
-
--define(MAX_URL_LEN, 7000).
-
-atts_since_arg(_UrlLen, [], Acc) ->
-    lists:reverse(Acc);
-atts_since_arg(UrlLen, [PA | Rest], Acc) ->
-    RevStr = couch_doc:rev_to_str(PA),
-    NewUrlLen = case Rest of
-    [] ->
-        % plus 2 double quotes (% encoded)
-        UrlLen + size(RevStr) + 6;
-    _ ->
-        % plus 2 double quotes and a comma (% encoded)
-        UrlLen + size(RevStr) + 9
-    end,
-    case NewUrlLen >= ?MAX_URL_LEN of
-    true ->
-        lists:reverse(Acc);
-    false ->
-        atts_since_arg(NewUrlLen, Rest, [RevStr | Acc])
-    end.
-
-
-% TODO: A less verbose, more elegant and automatic restart strategy for
-%       the exported open_doc_revs/6 function. The restart should be
-%       transparent to the caller like any other Couch API function exported
-%       by this module.
-receive_docs_loop(Streamer, Fun, Id, Revs, Ref, Acc) ->
-    try
-        % Left only for debugging purposes via an interactive or remote shell
-        erlang:put(open_doc_revs, {Id, Revs, Ref, Streamer}),
-        receive_docs(Streamer, Fun, Ref, Acc)
-    catch
-    error:{restart_open_doc_revs, NewRef} ->
-        receive_docs_loop(Streamer, Fun, Id, Revs, NewRef, Acc)
-    end.
-
-receive_docs(Streamer, UserFun, Ref, UserAcc) ->
-    Streamer ! {get_headers, Ref, self()},
-    receive
-    {started_open_doc_revs, NewRef} ->
-        restart_remote_open_doc_revs(Ref, NewRef);
-    {headers, Ref, Headers} ->
-        case get_value("content-type", Headers) of
-        {"multipart/related", _} = ContentType ->
-            case couch_doc:doc_from_multi_part_stream(
-                ContentType,
-                fun() -> receive_doc_data(Streamer, Ref) end,
-                Ref) of
-            {ok, Doc, WaitFun, Parser} ->
-                case UserFun({ok, Doc}, UserAcc) of
-                {ok, UserAcc2} ->
-                    ok;
-                {skip, UserAcc2} ->
-                    couch_doc:abort_multi_part_stream(Parser)
-                end,
-                WaitFun(),
-                receive_docs(Streamer, UserFun, Ref, UserAcc2)
-            end;
-        {"application/json", []} ->
-            Doc = couch_doc:from_json_obj(
-                    ?JSON_DECODE(receive_all(Streamer, Ref, []))),
-            {_, UserAcc2} = UserFun({ok, Doc}, UserAcc),
-            receive_docs(Streamer, UserFun, Ref, UserAcc2);
-        {"application/json", [{"error","true"}]} ->
-            {ErrorProps} = ?JSON_DECODE(receive_all(Streamer, Ref, [])),
-            Rev = get_value(<<"missing">>, ErrorProps),
-            Result = {{not_found, missing}, couch_doc:parse_rev(Rev)},
-            {_, UserAcc2} = UserFun(Result, UserAcc),
-            receive_docs(Streamer, UserFun, Ref, UserAcc2)
-        end;
-    {done, Ref} ->
-        {ok, UserAcc}
-    end.
-
-
-restart_remote_open_doc_revs(Ref, NewRef) ->
-    receive
-    {body_bytes, Ref, _} ->
-        restart_remote_open_doc_revs(Ref, NewRef);
-    {body_done, Ref} ->
-        restart_remote_open_doc_revs(Ref, NewRef);
-    {done, Ref} ->
-        restart_remote_open_doc_revs(Ref, NewRef);
-    {headers, Ref, _} ->
-        restart_remote_open_doc_revs(Ref, NewRef)
-    after 0 ->
-        erlang:error({restart_open_doc_revs, NewRef})
-    end.
-
-
-remote_open_doc_revs_streamer_start(Parent) ->
-    receive
-    {get_headers, _Ref, Parent} ->
-        remote_open_doc_revs_streamer_start(Parent);
-    {next_bytes, _Ref, Parent} ->
-        remote_open_doc_revs_streamer_start(Parent)
-    after 0 ->
-        Parent ! {started_open_doc_revs, make_ref()}
-    end.
-
-
-receive_all(Streamer, Ref, Acc) ->
-    Streamer ! {next_bytes, Ref, self()},
-    receive
-    {started_open_doc_revs, NewRef} ->
-        restart_remote_open_doc_revs(Ref, NewRef);
-    {body_bytes, Ref, Bytes} ->
-        receive_all(Streamer, Ref, [Bytes | Acc]);
-    {body_done, Ref} ->
-        lists:reverse(Acc)
-    end.
-
-
-mp_parse_mixed(eof) ->
-    receive {get_headers, Ref, From} ->
-        From ! {done, Ref}
-    end;
-mp_parse_mixed({headers, H}) ->
-    receive {get_headers, Ref, From} ->
-        From ! {headers, Ref, H}
-    end,
-    fun mp_parse_mixed/1;
-mp_parse_mixed({body, Bytes}) ->
-    receive {next_bytes, Ref, From} ->
-        From ! {body_bytes, Ref, Bytes}
-    end,
-    fun mp_parse_mixed/1;
-mp_parse_mixed(body_end) ->
-    receive {next_bytes, Ref, From} ->
-        From ! {body_done, Ref};
-    {get_headers, Ref, From} ->
-        self() ! {get_headers, Ref, From}
-    end,
-    fun mp_parse_mixed/1.
-
-
-receive_doc_data(Streamer, Ref) ->
-    Streamer ! {next_bytes, Ref, self()},
-    receive
-    {body_bytes, Ref, Bytes} ->
-        {Bytes, fun() -> receive_doc_data(Streamer, Ref) end};
-    {body_done, Ref} ->
-        {<<>>, fun() -> receive_doc_data(Streamer, Ref) end}
-    end.
-
-doc_from_multi_part_stream(ContentType, DataFun, Ref) ->
-    Self = self(),
-    Parser = spawn_link(fun() ->
-        {<<"--">>, _, _} = couch_httpd:parse_multipart_request(
-            ContentType, DataFun,
-            fun(Next) -> couch_replicator_utils:mp_parse_doc(Next, []) end),
-        unlink(Self)
-        end),
-    Parser ! {get_doc_bytes, Ref, self()},
-    receive
-    {started_open_doc_revs, NewRef} ->
-        unlink(Parser),
-        exit(Parser, kill),
-        restart_remote_open_doc_revs(Ref, NewRef);
-    {doc_bytes, Ref, DocBytes} ->
-        Doc = couch_doc:from_json_obj(?JSON_DECODE(DocBytes)),
-        ReadAttachmentDataFun = fun() ->
-            Parser ! {get_bytes, Ref, self()},
-            receive
-            {started_open_doc_revs, NewRef} ->
-                unlink(Parser),
-                exit(Parser, kill),
-                receive {bytes, Ref, _} -> ok after 0 -> ok end,
-                restart_remote_open_doc_revs(Ref, NewRef);
-            {bytes, Ref, Bytes} ->
-                Bytes
-            end
-        end,
-        Atts2 = lists:map(
-            fun(#att{data = follows} = A) ->
-                A#att{data = ReadAttachmentDataFun};
-            (A) ->
-                A
-            end, Doc#doc.atts),
-        {ok, Doc#doc{atts = Atts2}, Parser}
-    end.
-
-
-changes_ev1(object_start, UserFun, UserAcc) ->
-    fun(Ev) -> changes_ev2(Ev, UserFun, UserAcc) end.
-
-changes_ev2({key, <<"results">>}, UserFun, UserAcc) ->
-    fun(Ev) -> changes_ev3(Ev, UserFun, UserAcc) end;
-changes_ev2(_, UserFun, UserAcc) ->
-    fun(Ev) -> changes_ev2(Ev, UserFun, UserAcc) end.
-
-changes_ev3(array_start, UserFun, UserAcc) ->
-    fun(Ev) -> changes_ev_loop(Ev, UserFun, UserAcc) end.
-
-changes_ev_loop(object_start, UserFun, UserAcc) ->
-    fun(Ev) ->
-        json_stream_parse:collect_object(Ev,
-            fun(Obj) ->
-                UserAcc2 = UserFun(json_to_doc_info(Obj), UserAcc),
-                fun(Ev2) -> changes_ev_loop(Ev2, UserFun, UserAcc2) end
-            end)
-    end;
-changes_ev_loop(array_end, _UserFun, _UserAcc) ->
-    fun(_Ev) -> changes_ev_done() end.
-
-changes_ev_done() ->
-    fun(_Ev) -> changes_ev_done() end.
-
-continuous_changes(DataFun, UserFun) ->
-    {DataFun2, _, Rest} = json_stream_parse:events(
-        DataFun,
-        fun(Ev) -> parse_changes_line(Ev, UserFun) end),
-    continuous_changes(fun() -> {Rest, DataFun2} end, UserFun).
-
-parse_changes_line(object_start, UserFun) ->
-    fun(Ev) ->
-        json_stream_parse:collect_object(Ev,
-            fun(Obj) -> UserFun(json_to_doc_info(Obj)) end)
-    end.
-
-json_to_doc_info({Props}) ->
-    case get_value(<<"changes">>, Props) of
-    undefined ->
-        {last_seq, get_value(<<"last_seq">>, Props)};
-    Changes ->
-        RevsInfo = lists:map(
-            fun({Change}) ->
-                Rev = couch_doc:parse_rev(get_value(<<"rev">>, Change)),
-                Del = couch_replicator_utils:is_deleted(Change),
-                #rev_info{rev=Rev, deleted=Del}
-            end, Changes),
-        #doc_info{
-            id = get_value(<<"id">>, Props),
-            high_seq = get_value(<<"seq">>, Props),
-            revs = RevsInfo
-        }
-    end.
-
-
-bulk_results_to_errors(Docs, {ok, Results}, interactive_edit) ->
-    lists:reverse(lists:foldl(
-        fun({_, {ok, _}}, Acc) ->
-            Acc;
-        ({#doc{id = Id, revs = {Pos, [RevId | _]}}, Error}, Acc) ->
-            {_, Error, Reason} = couch_httpd:error_info(Error),
-            [ {[{id, Id}, {rev, rev_to_str({Pos, RevId})},
-                {error, Error}, {reason, Reason}]} | Acc ]
-        end,
-        [], lists:zip(Docs, Results)));
-
-bulk_results_to_errors(Docs, {ok, Results}, replicated_changes) ->
-    bulk_results_to_errors(Docs, {aborted, Results}, interactive_edit);
-
-bulk_results_to_errors(_Docs, {aborted, Results}, interactive_edit) ->
-    lists:map(
-        fun({{Id, Rev}, Err}) ->
-            {_, Error, Reason} = couch_httpd:error_info(Err),
-            {[{id, Id}, {rev, rev_to_str(Rev)}, {error, Error}, {reason, Reason}]}
-        end,
-        Results);
-
-bulk_results_to_errors(_Docs, Results, remote) ->
-    lists:reverse(lists:foldl(
-        fun({Props}, Acc) ->
-            case get_value(<<"error">>, Props, get_value(error, Props)) of
-            undefined ->
-                Acc;
-            Error ->
-                Id = get_value(<<"id">>, Props, get_value(id, Props)),
-                Rev = get_value(<<"rev">>, Props, get_value(rev, Props)),
-                Reason = get_value(<<"reason">>, Props, get_value(reason, Props)),
-                [ {[{id, Id}, {rev, rev_to_str(Rev)},
-                    {error, Error}, {reason, Reason}]} | Acc ]
-            end
-        end,
-        [], Results)).
-
-
-rev_to_str({_Pos, _Id} = Rev) ->
-    couch_doc:rev_to_str(Rev);
-rev_to_str(Rev) ->
-    Rev.
-
-
-stream_doc({JsonBytes, Atts, Boundary, Len}) ->
-    case erlang:erase({doc_streamer, Boundary}) of
-    Pid when is_pid(Pid) ->
-        unlink(Pid),
-        exit(Pid, kill);
-    _ ->
-        ok
-    end,
-    Self = self(),
-    DocStreamer = spawn_link(fun() ->
-        couch_doc:doc_to_multi_part_stream(
-            Boundary, JsonBytes, Atts,
-            fun(Data) ->
-                receive {get_data, Ref, From} ->
-                    From ! {data, Ref, Data}
-                end
-            end, true),
-        unlink(Self)
-    end),
-    erlang:put({doc_streamer, Boundary}, DocStreamer),
-    {ok, <<>>, {Len, Boundary}};
-stream_doc({0, Id}) ->
-    erlang:erase({doc_streamer, Id}),
-    eof;
-stream_doc({LenLeft, Id}) when LenLeft > 0 ->
-    Ref = make_ref(),
-    erlang:get({doc_streamer, Id}) ! {get_data, Ref, self()},
-    receive {data, Ref, Data} ->
-        {ok, Data, {LenLeft - iolist_size(Data), Id}}
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator_api_wrap.hrl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.hrl b/src/couch_replicator/src/couch_replicator_api_wrap.hrl
deleted file mode 100644
index 1a6f27a..0000000
--- a/src/couch_replicator/src/couch_replicator_api_wrap.hrl
+++ /dev/null
@@ -1,36 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-
--record(httpdb, {
-    url,
-    oauth = nil,
-    headers = [
-        {"Accept", "application/json"},
-        {"User-Agent", "CouchDB/" ++ couch_server:get_version()}
-    ],
-    timeout,            % milliseconds
-    ibrowse_options = [],
-    retries = 10,
-    wait = 250,         % milliseconds
-    httpc_pool = nil,
-    http_connections
-}).
-
--record(oauth, {
-    consumer_key,
-    token,
-    token_secret,
-    consumer_secret,
-    signature_method
-}).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator_app.erl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator_app.erl b/src/couch_replicator/src/couch_replicator_app.erl
deleted file mode 100644
index e4dc63e..0000000
--- a/src/couch_replicator/src/couch_replicator_app.erl
+++ /dev/null
@@ -1,17 +0,0 @@
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, []) ->
-    couch_replicator_sup:start_link().
-
-stop([]) ->
-    ok.


[02/49] Remove src/chttpd

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/e41cfa40/src/chttpd/src/chttpd_db.erl
----------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
deleted file mode 100644
index de99828..0000000
--- a/src/chttpd/src/chttpd_db.erl
+++ /dev/null
@@ -1,1280 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_db).
--include_lib("couch/include/couch_db.hrl").
-
--export([handle_request/1, handle_compact_req/2, handle_design_req/2,
-    db_req/2, couch_doc_open/4,handle_changes_req/2,
-    update_doc_result_to_json/1, update_doc_result_to_json/2,
-    handle_design_info_req/3, handle_view_cleanup_req/2]).
-
--import(chttpd,
-    [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
-    start_json_response/2,send_chunk/2,end_json_response/1,
-    start_chunked_response/3, absolute_uri/2, send/2,
-    start_response_length/4]).
-
--record(doc_query_args, {
-    options = [],
-    rev = nil,
-    open_revs = [],
-    update_type = interactive_edit,
-    atts_since = nil
-}).
-
-% Database request handlers
-handle_request(#httpd{path_parts=[DbName|RestParts],method=Method,
-        db_url_handlers=DbUrlHandlers}=Req)->
-    case {Method, RestParts} of
-    {'PUT', []} ->
-        create_db_req(Req, DbName);
-    {'DELETE', []} ->
-         % if we get ?rev=... the user is using a faulty script where the
-         % document id is empty by accident. Let them recover safely.
-         case couch_httpd:qs_value(Req, "rev", false) of
-             false -> delete_db_req(Req, DbName);
-             _Rev -> throw({bad_request,
-                 "You tried to DELETE a database with a ?=rev parameter. "
-                 ++ "Did you mean to DELETE a document instead?"})
-         end;
-    {_, []} ->
-        do_db_req(Req, fun db_req/2);
-    {_, [SecondPart|_]} ->
-        Handler = couch_util:get_value(SecondPart, DbUrlHandlers, fun db_req/2),
-        do_db_req(Req, Handler)
-    end.
-
-handle_changes_req(#httpd{method='GET'}=Req, Db) ->
-    #changes_args{filter=Raw, style=Style} = Args0 = parse_changes_query(Req),
-    ChangesArgs = Args0#changes_args{
-        filter = couch_changes:configure_filter(Raw, Style, Req, Db)
-    },
-    case ChangesArgs#changes_args.feed of
-    "normal" ->
-        T0 = os:timestamp(),
-        {ok, Info} = fabric:get_db_info(Db),
-        Etag = chttpd:make_etag(Info),
-        DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
-        couch_stats_collector:record({couchdb, dbinfo}, DeltaT),
-        chttpd:etag_respond(Req, Etag, fun() ->
-            fabric:changes(Db, fun changes_callback/2, {"normal", {"Etag",Etag}, Req},
-                ChangesArgs)
-        end);
-    Feed ->
-        % "longpoll" or "continuous"
-        fabric:changes(Db, fun changes_callback/2, {Feed, Req}, ChangesArgs)
-    end;
-handle_changes_req(#httpd{path_parts=[_,<<"_changes">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-% callbacks for continuous feed (newline-delimited JSON Objects)
-changes_callback(start, {"continuous", Req}) ->
-    {ok, Resp} = chttpd:start_delayed_json_response(Req, 200),
-    {ok, {"continuous", Resp}};
-changes_callback({change, Change}, {"continuous", Resp}) ->
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]),
-    {ok, {"continuous", Resp1}};
-changes_callback({stop, EndSeq0}, {"continuous", Resp}) ->
-    EndSeq = case is_old_couch(Resp) of true -> 0; false -> EndSeq0 end,
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp,
-        [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]),
-    chttpd:end_delayed_json_response(Resp1);
-
-% callbacks for longpoll and normal (single JSON Object)
-changes_callback(start, {"normal", {"Etag", Etag}, Req}) ->
-    FirstChunk = "{\"results\":[\n",
-    {ok, Resp} = chttpd:start_delayed_json_response(Req, 200,
-        [{"Etag",Etag}], FirstChunk),
-    {ok, {"", Resp}};
-changes_callback(start, {_, Req}) ->
-    FirstChunk = "{\"results\":[\n",
-    {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [], FirstChunk),
-    {ok, {"", Resp}};
-changes_callback({change, Change}, {Prepend, Resp}) ->
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]),
-    {ok, {",\r\n", Resp1}};
-changes_callback({stop, EndSeq}, {_, Resp}) ->
-    {ok, Resp1} = case is_old_couch(Resp) of
-    true ->
-        chttpd:send_delayed_chunk(Resp, "\n],\n\"last_seq\":0}\n");
-    false ->
-        chttpd:send_delayed_chunk(Resp,
-            ["\n],\n\"last_seq\":", ?JSON_ENCODE(EndSeq), "}\n"])
-    end,
-    chttpd:end_delayed_json_response(Resp1);
-
-changes_callback(timeout, {Prepend, Resp}) ->
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp, "\n"),
-    {ok, {Prepend, Resp1}};
-changes_callback({error, Reason}, {_, #httpd{}=Req}) ->
-    chttpd:send_error(Req, Reason);
-changes_callback({error, Reason}, {"normal", {"Etag", _Etag}, Req}) ->
-    chttpd:send_error(Req, Reason);
-changes_callback({error, Reason}, {_, Resp}) ->
-    chttpd:send_delayed_error(Resp, Reason).
-
-is_old_couch(Resp) ->
-    MochiReq = chttpd:get_delayed_req(Resp),
-    case MochiReq:get_header_value("user-agent") of
-    undefined ->
-        false;
-    "CouchDB/1.0.0" ->
-        true;
-    UserAgent ->
-        string:str(UserAgent, "CouchDB/0") > 0
-    end.
-
-handle_compact_req(Req, _) ->
-    Msg = <<"Compaction must be triggered on a per-shard basis in CouchDB">>,
-    couch_httpd:send_error(Req, 403, forbidden, Msg).
-
-handle_view_cleanup_req(Req, Db) ->
-    ok = fabric:cleanup_index_files(Db),
-    send_json(Req, 202, {[{ok, true}]}).
-
-handle_design_req(#httpd{
-        path_parts=[_DbName, _Design, Name, <<"_",_/binary>> = Action | _Rest],
-        design_url_handlers = DesignUrlHandlers
-    }=Req, Db) ->
-    DbName = mem3:dbname(Db#db.name),
-    case ddoc_cache:open(DbName, <<"_design/", Name/binary>>) of
-    {ok, DDoc} ->
-        Handler = couch_util:get_value(Action, DesignUrlHandlers,
-            fun bad_action_req/3),
-        Handler(Req, Db, DDoc);
-    Error ->
-        throw(Error)
-    end;
-
-handle_design_req(Req, Db) ->
-    db_req(Req, Db).
-
-bad_action_req(#httpd{path_parts=[_, _, Name|FileNameParts]}=Req, Db, _DDoc) ->
-    db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts).
-
-handle_design_info_req(#httpd{method='GET'}=Req, Db, #doc{id=Id} = DDoc) ->
-    {ok, GroupInfoList} = fabric:get_view_group_info(Db, DDoc),
-    send_json(Req, 200, {[
-        {name,  Id},
-        {view_index, {GroupInfoList}}
-    ]});
-
-handle_design_info_req(Req, _Db, _DDoc) ->
-    send_method_not_allowed(Req, "GET").
-
-create_db_req(#httpd{}=Req, DbName) ->
-    couch_httpd:verify_is_server_admin(Req),
-    N = couch_httpd:qs_value(Req, "n", config:get("cluster", "n", "3")),
-    Q = couch_httpd:qs_value(Req, "q", config:get("cluster", "q", "8")),
-    P = couch_httpd:qs_value(Req, "placement", config:get("cluster", "placement")),
-    DocUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
-    case fabric:create_db(DbName, [{n,N}, {q,Q}, {placement,P}]) of
-    ok ->
-        send_json(Req, 201, [{"Location", DocUrl}], {[{ok, true}]});
-    accepted ->
-        send_json(Req, 202, [{"Location", DocUrl}], {[{ok, true}]});
-    {error, file_exists} ->
-        chttpd:send_error(Req, file_exists);
-    Error ->
-        throw(Error)
-    end.
-
-delete_db_req(#httpd{}=Req, DbName) ->
-    couch_httpd:verify_is_server_admin(Req),
-    case fabric:delete_db(DbName, []) of
-    ok ->
-        send_json(Req, 200, {[{ok, true}]});
-    accepted ->
-        send_json(Req, 202, {[{ok, true}]});
-    Error ->
-        throw(Error)
-    end.
-
-do_db_req(#httpd{path_parts=[DbName|_], user_ctx=Ctx}=Req, Fun) ->
-    fabric:get_security(DbName, [{user_ctx,Ctx}]), % calls check_is_reader
-    Fun(Req, #db{name=DbName, user_ctx=Ctx}).
-
-db_req(#httpd{method='GET',path_parts=[DbName]}=Req, _Db) ->
-    % measure the time required to generate the etag, see if it's worth it
-    T0 = os:timestamp(),
-    {ok, DbInfo} = fabric:get_db_info(DbName),
-    DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
-    couch_stats_collector:record({couchdb, dbinfo}, DeltaT),
-    send_json(Req, {DbInfo});
-
-db_req(#httpd{method='POST', path_parts=[DbName], user_ctx=Ctx}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-
-    W = couch_httpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    Options = [{user_ctx,Ctx}, {w,W}],
-
-    Doc = couch_doc:from_json_obj(chttpd:json_body(Req)),
-    Doc2 = case Doc#doc.id of
-        <<"">> ->
-            Doc#doc{id=couch_uuids:new(), revs={0, []}};
-        _ ->
-            Doc
-    end,
-    DocId = Doc2#doc.id,
-    case couch_httpd:qs_value(Req, "batch") of
-    "ok" ->
-        % async_batching
-        spawn(fun() ->
-                case catch(fabric:update_doc(Db, Doc2, Options)) of
-                {ok, _} -> ok;
-                {accepted, _} -> ok;
-                Error ->
-                    twig:log(debug, "Batch doc error (~s): ~p",[DocId, Error])
-                end
-            end),
-
-        send_json(Req, 202, [], {[
-            {ok, true},
-            {id, DocId}
-        ]});
-    _Normal ->
-        % normal
-        DocUrl = absolute_uri(Req, [$/, DbName, $/, DocId]),
-        case fabric:update_doc(Db, Doc2, Options) of
-        {ok, NewRev} ->
-            HttpCode = 201;
-        {accepted, NewRev} ->
-            HttpCode = 202
-        end,
-        send_json(Req, HttpCode, [{"Location", DocUrl}], {[
-            {ok, true},
-            {id, DocId},
-            {rev, couch_doc:rev_to_str(NewRev)}
-        ]})
-    end;
-
-db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
-    send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
-    send_json(Req, 201, {[
-        {ok, true},
-        {instance_start_time, <<"0">>}
-    ]});
-
-db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req, Db) ->
-    couch_stats_collector:increment({httpd, bulk_requests}),
-    couch_httpd:validate_ctype(Req, "application/json"),
-    {JsonProps} = chttpd:json_body_obj(Req),
-    DocsArray = couch_util:get_value(<<"docs">>, JsonProps),
-    W = couch_httpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    case chttpd:header_value(Req, "X-Couch-Full-Commit") of
-    "true" ->
-        Options = [full_commit, {user_ctx,Ctx}, {w,W}];
-    "false" ->
-        Options = [delay_commit, {user_ctx,Ctx}, {w,W}];
-    _ ->
-        Options = [{user_ctx,Ctx}, {w,W}]
-    end,
-    case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
-    true ->
-        Docs = lists:map(
-            fun({ObjProps} = JsonObj) ->
-                Doc = couch_doc:from_json_obj(JsonObj),
-                validate_attachment_names(Doc),
-                Id = case Doc#doc.id of
-                    <<>> -> couch_uuids:new();
-                    Id0 -> Id0
-                end,
-                case couch_util:get_value(<<"_rev">>, ObjProps) of
-                undefined ->
-                    Revs = {0, []};
-                Rev  ->
-                    {Pos, RevId} = couch_doc:parse_rev(Rev),
-                    Revs = {Pos, [RevId]}
-                end,
-                Doc#doc{id=Id,revs=Revs}
-            end,
-            DocsArray),
-        Options2 =
-        case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
-        true  -> [all_or_nothing|Options];
-        _ -> Options
-        end,
-        case fabric:update_docs(Db, Docs, Options2) of
-        {ok, Results} ->
-            % output the results
-            DocResults = lists:zipwith(fun update_doc_result_to_json/2,
-                Docs, Results),
-            send_json(Req, 201, DocResults);
-        {accepted, Results} ->
-            % output the results
-            DocResults = lists:zipwith(fun update_doc_result_to_json/2,
-                Docs, Results),
-            send_json(Req, 202, DocResults);
-        {aborted, Errors} ->
-            ErrorsJson =
-                lists:map(fun update_doc_result_to_json/1, Errors),
-            send_json(Req, 417, ErrorsJson)
-        end;
-    false ->
-        Docs = [couch_doc:from_json_obj(JsonObj) || JsonObj <- DocsArray],
-        [validate_attachment_names(D) || D <- Docs],
-        case fabric:update_docs(Db, Docs, [replicated_changes|Options]) of
-        {ok, Errors} ->
-            ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
-            send_json(Req, 201, ErrorsJson);
-        {accepted, Errors} ->
-            ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
-            send_json(Req, 202, ErrorsJson)
-        end
-    end;
-
-db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    {IdsRevs} = chttpd:json_body_obj(Req),
-    IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
-    case fabric:purge_docs(Db, IdsRevs2) of
-    {ok, PurgeSeq, PurgedIdsRevs} ->
-        PurgedIdsRevs2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs}
-            <- PurgedIdsRevs],
-        send_json(Req, 200, {[
-            {<<"purge_seq">>, PurgeSeq},
-            {<<"purged">>, {PurgedIdsRevs2}}
-        ]});
-    Error ->
-        throw(Error)
-    end;
-
-db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_all_docs">>]}=Req, Db) ->
-    case chttpd:qs_json_value(Req, "keys", nil) of
-    Keys when is_list(Keys) ->
-        all_docs_view(Req, Db, Keys);
-    nil ->
-        all_docs_view(Req, Db, undefined);
-    _ ->
-        throw({bad_request, "`keys` parameter must be an array."})
-    end;
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_all_docs">>]}=Req, Db) ->
-    {Fields} = chttpd:json_body_obj(Req),
-    case couch_util:get_value(<<"keys">>, Fields, nil) of
-    Keys when is_list(Keys) ->
-        all_docs_view(Req, Db, Keys);
-    nil ->
-        all_docs_view(Req, Db, nil);
-    _ ->
-        throw({bad_request, "`keys` body member must be an array."})
-    end;
-
-db_req(#httpd{path_parts=[_,<<"_all_docs">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "GET,HEAD,POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
-    {JsonDocIdRevs} = chttpd:json_body_obj(Req),
-    {ok, Results} = fabric:get_missing_revs(Db, JsonDocIdRevs),
-    Results2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs, _} <- Results],
-    send_json(Req, {[
-        {missing_revs, {Results2}}
-    ]});
-
-db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
-    {JsonDocIdRevs} = chttpd:json_body_obj(Req),
-    {ok, Results} = fabric:get_missing_revs(Db, JsonDocIdRevs),
-    Results2 =
-    lists:map(fun({Id, MissingRevs, PossibleAncestors}) ->
-        {Id,
-            {[{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
-                if PossibleAncestors == [] ->
-                    [];
-                true ->
-                    [{possible_ancestors,
-                        couch_doc:revs_to_strs(PossibleAncestors)}]
-                end}}
-    end, Results),
-    send_json(Req, {Results2});
-
-db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>],user_ctx=Ctx}=Req,
-        Db) ->
-    SecObj = chttpd:json_body(Req),
-    case fabric:set_security(Db, SecObj, [{user_ctx, Ctx}]) of
-        ok ->
-            send_json(Req, {[{<<"ok">>, true}]});
-        Else ->
-            throw(Else)
-    end;
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_security">>],user_ctx=Ctx}=Req, Db) ->
-    send_json(Req, fabric:get_security(Db, [{user_ctx,Ctx}]));
-
-db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "PUT,GET");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>],user_ctx=Ctx}=Req,
-        Db) ->
-    Limit = chttpd:json_body(Req),
-    ok = fabric:set_revs_limit(Db, Limit, [{user_ctx,Ctx}]),
-    send_json(Req, {[{<<"ok">>, true}]});
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
-    send_json(Req, fabric:get_revs_limit(Db));
-
-db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "PUT,GET");
-
-% vanilla CouchDB sends a 301 here, but we just handle the request
-db_req(#httpd{path_parts=[DbName,<<"_design/",Name/binary>>|Rest]}=Req, Db) ->
-    db_req(Req#httpd{path_parts=[DbName, <<"_design">>, Name | Rest]}, Db);
-
-% Special case to enable using an unencoded slash in the URL of design docs,
-% as slashes in document IDs must otherwise be URL encoded.
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) ->
-    db_doc_req(Req, Db, <<"_design/",Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) ->
-    db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts);
-
-
-% Special case to allow for accessing local documents without %2F
-% encoding the docid. Throws out requests that don't have the second
-% path part or that specify an attachment name.
-db_req(#httpd{path_parts=[_DbName, <<"_local">>]}, _Db) ->
-    throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local/">>]}, _Db) ->
-    throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">>, Name]}=Req, Db) ->
-    db_doc_req(Req, Db, <<"_local/", Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">> | _Rest]}, _Db) ->
-    throw({bad_request, <<"_local documents do not accept attachments.">>});
-
-db_req(#httpd{path_parts=[_, DocId]}=Req, Db) ->
-    db_doc_req(Req, Db, DocId);
-
-db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
-    db_attachment_req(Req, Db, DocId, FileNameParts).
-
-all_docs_view(Req, Db, Keys) ->
-    % measure the time required to generate the etag, see if it's worth it
-    T0 = os:timestamp(),
-    {ok, Info} = fabric:get_db_info(Db),
-    Etag = couch_httpd:make_etag(Info),
-    DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
-    couch_stats_collector:record({couchdb, dbinfo}, DeltaT),
-    QueryArgs = chttpd_view:parse_view_params(Req, Keys, map),
-    chttpd:etag_respond(Req, Etag, fun() ->
-        {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"Etag",Etag}]),
-        fabric:all_docs(Db, fun all_docs_callback/2, {nil, Resp}, QueryArgs)
-    end).
-
-all_docs_callback({total_and_offset, Total, Offset}, {_, Resp}) ->
-    Chunk = "{\"total_rows\":~p,\"offset\":~p,\"rows\":[\r\n",
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp, io_lib:format(Chunk, [Total, Offset])),
-    {ok, {"", Resp1}};
-all_docs_callback({row, Row}, {Prepend, Resp}) ->
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp, [Prepend, ?JSON_ENCODE(Row)]),
-    {ok, {",\r\n", Resp1}};
-all_docs_callback(complete, {_, Resp}) ->
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp, "\r\n]}"),
-    chttpd:end_delayed_json_response(Resp1);
-all_docs_callback({error, Reason}, {_, Resp}) ->
-    chttpd:send_delayed_error(Resp, Reason).
-
-db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
-    % check for the existence of the doc to handle the 404 case.
-    couch_doc_open(Db, DocId, nil, []),
-    case chttpd:qs_value(Req, "rev") of
-    undefined ->
-        Body = {[{<<"_deleted">>,true}]};
-    Rev ->
-        Body = {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]}
-    end,
-    update_doc(Req, Db, DocId, couch_doc_from_req(Req, DocId, Body));
-
-db_doc_req(#httpd{method='GET'}=Req, Db, DocId) ->
-    #doc_query_args{
-        rev = Rev,
-        open_revs = Revs,
-        options = Options,
-        atts_since = AttsSince
-    } = parse_doc_query(Req),
-    case Revs of
-    [] ->
-        Options2 =
-        if AttsSince /= nil ->
-            [{atts_since, AttsSince}, attachments | Options];
-        true -> Options
-        end,
-        Doc = couch_doc_open(Db, DocId, Rev, Options2),
-        send_doc(Req, Doc, Options2);
-    _ ->
-        {ok, Results} = fabric:open_revs(Db, DocId, Revs, Options),
-        AcceptedTypes = case couch_httpd:header_value(Req, "Accept") of
-            undefined       -> [];
-            AcceptHeader    -> string:tokens(AcceptHeader, ", ")
-        end,
-        case lists:member("multipart/mixed", AcceptedTypes) of
-        false ->
-            {ok, Resp} = start_json_response(Req, 200),
-            send_chunk(Resp, "["),
-            % We loop through the docs. The first time through the separator
-            % is whitespace, then a comma on subsequent iterations.
-            lists:foldl(
-                fun(Result, AccSeparator) ->
-                    case Result of
-                    {ok, Doc} ->
-                        JsonDoc = couch_doc:to_json_obj(Doc, Options),
-                        Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
-                        send_chunk(Resp, AccSeparator ++ Json);
-                    {{not_found, missing}, RevId} ->
-                        RevStr = couch_doc:rev_to_str(RevId),
-                        Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
-                        send_chunk(Resp, AccSeparator ++ Json)
-                    end,
-                    "," % AccSeparator now has a comma
-                end,
-                "", Results),
-            send_chunk(Resp, "]"),
-            end_json_response(Resp);
-        true ->
-            send_docs_multipart(Req, Results, Options)
-        end
-    end;
-
-db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
-    couch_httpd:validate_referer(Req),
-    couch_doc:validate_docid(DocId),
-    couch_httpd:validate_ctype(Req, "multipart/form-data"),
-
-    W = couch_httpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    Options = [{user_ctx,Ctx}, {w,W}],
-
-    Form = couch_httpd:parse_form(Req),
-    case proplists:is_defined("_doc", Form) of
-    true ->
-        Json = ?JSON_DECODE(couch_util:get_value("_doc", Form)),
-        Doc = couch_doc_from_req(Req, DocId, Json);
-    false ->
-        Rev = couch_doc:parse_rev(list_to_binary(couch_util:get_value("_rev", Form))),
-        {ok, [{ok, Doc}]} = fabric:open_revs(Db, DocId, [Rev], [])
-    end,
-    UpdatedAtts = [
-        #att{name=validate_attachment_name(Name),
-            type=list_to_binary(ContentType),
-            data=Content} ||
-        {Name, {ContentType, _}, Content} <-
-        proplists:get_all_values("_attachments", Form)
-    ],
-    #doc{atts=OldAtts} = Doc,
-    OldAtts2 = lists:flatmap(
-        fun(#att{name=OldName}=Att) ->
-            case [1 || A <- UpdatedAtts, A#att.name == OldName] of
-            [] -> [Att]; % the attachment wasn't in the UpdatedAtts, return it
-            _ -> [] % the attachment was in the UpdatedAtts, drop it
-            end
-        end, OldAtts),
-    NewDoc = Doc#doc{
-        atts = UpdatedAtts ++ OldAtts2
-    },
-    case fabric:update_doc(Db, NewDoc, Options) of
-    {ok, NewRev} ->
-        HttpCode = 201;
-    {accepted, NewRev} ->
-        HttpCode = 202
-    end,
-    send_json(Req, HttpCode, [{"Etag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewRev)) ++ "\""}], {[
-        {ok, true},
-        {id, DocId},
-        {rev, couch_doc:rev_to_str(NewRev)}
-    ]});
-
-db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
-    #doc_query_args{
-        update_type = UpdateType
-    } = parse_doc_query(Req),
-    couch_doc:validate_docid(DocId),
-
-    W = couch_httpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    Options = [{user_ctx,Ctx}, {w,W}],
-
-    Loc = absolute_uri(Req, [$/, Db#db.name, $/, DocId]),
-    RespHeaders = [{"Location", Loc}],
-    case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
-    ("multipart/related;" ++ _) = ContentType ->
-        {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(ContentType,
-                fun() -> receive_request_data(Req) end),
-        Doc = couch_doc_from_req(Req, DocId, Doc0),
-        try
-            Result = update_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType),
-            WaitFun(),
-            Result
-        catch throw:Err ->
-            % Document rejected by a validate_doc_update function.
-            couch_doc:abort_multi_part_stream(Parser),
-            throw(Err)
-        end;
-    _Else ->
-        case couch_httpd:qs_value(Req, "batch") of
-        "ok" ->
-            % batch
-            Doc = couch_doc_from_req(Req, DocId, chttpd:json_body(Req)),
-
-            spawn(fun() ->
-                    case catch(fabric:update_doc(Db, Doc, Options)) of
-                    {ok, _} -> ok;
-                    {accepted, _} -> ok;
-                    Error ->
-                        twig:log(notice, "Batch doc error (~s): ~p",[DocId, Error])
-                    end
-                end),
-            send_json(Req, 202, [], {[
-                {ok, true},
-                {id, DocId}
-            ]});
-        _Normal ->
-            % normal
-            Body = chttpd:json_body(Req),
-            Doc = couch_doc_from_req(Req, DocId, Body),
-            update_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType)
-        end
-    end;
-
-db_doc_req(#httpd{method='COPY', user_ctx=Ctx}=Req, Db, SourceDocId) ->
-    SourceRev =
-    case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
-        missing_rev -> nil;
-        Rev -> Rev
-    end,
-    {TargetDocId, TargetRevs} = parse_copy_destination_header(Req),
-    % open old doc
-    Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
-    % save new doc
-    case fabric:update_doc(Db,
-        Doc#doc{id=TargetDocId, revs=TargetRevs}, [{user_ctx,Ctx}]) of
-    {ok, NewTargetRev} ->
-        HttpCode = 201;
-    {accepted, NewTargetRev} ->
-        HttpCode = 202
-    end,
-    % respond
-    {PartRes} = update_doc_result_to_json(TargetDocId, {ok, NewTargetRev}),
-    send_json(Req, HttpCode,
-        [{"Etag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewTargetRev)) ++ "\""}],
-        {[{ok, true}] ++ PartRes});
-
-db_doc_req(Req, _Db, _DocId) ->
-    send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
-
-send_doc(Req, Doc, Options) ->
-    case Doc#doc.meta of
-    [] ->
-        DiskEtag = couch_httpd:doc_etag(Doc),
-        % output etag only when we have no meta
-        chttpd:etag_respond(Req, DiskEtag, fun() ->
-            send_doc_efficiently(Req, Doc, [{"Etag", DiskEtag}], Options)
-        end);
-    _ ->
-        send_doc_efficiently(Req, Doc, [], Options)
-    end.
-
-send_doc_efficiently(Req, #doc{atts=[]}=Doc, Headers, Options) ->
-        send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-send_doc_efficiently(Req, #doc{atts=Atts}=Doc, Headers, Options) ->
-    case lists:member(attachments, Options) of
-    true ->
-        Refs = monitor_attachments(Atts),
-        try
-        AcceptedTypes = case couch_httpd:header_value(Req, "Accept") of
-            undefined       -> [];
-            AcceptHeader    -> string:tokens(AcceptHeader, ", ")
-        end,
-        case lists:member("multipart/related", AcceptedTypes) of
-        false ->
-            send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-        true ->
-            Boundary = couch_uuids:random(),
-            JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc,
-                    [attachments, follows, att_encoding_info | Options])),
-            {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
-                    Boundary,JsonBytes, Atts, true),
-            CType = {<<"Content-Type">>, ContentType},
-            {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len),
-            couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts,
-                    fun(Data) -> couch_httpd:send(Resp, Data) end, true)
-        end
-        after
-            demonitor_refs(Refs)
-        end;
-    false ->
-        send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
-    end.
-
-send_docs_multipart(Req, Results, Options1) ->
-    OuterBoundary = couch_uuids:random(),
-    InnerBoundary = couch_uuids:random(),
-    Options = [attachments, follows, att_encoding_info | Options1],
-    CType = {"Content-Type",
-        "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
-    {ok, Resp} = start_chunked_response(Req, 200, [CType]),
-    couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
-    lists:foreach(
-        fun({ok, #doc{atts=Atts}=Doc}) ->
-            Refs = monitor_attachments(Doc#doc.atts),
-            try
-            JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
-            {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
-                    InnerBoundary, JsonBytes, Atts, true),
-            couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ",
-                    ContentType/binary, "\r\n\r\n">>),
-            couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
-                    fun(Data) -> couch_httpd:send_chunk(Resp, Data)
-                    end, true),
-             couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>)
-            after
-                demonitor_refs(Refs)
-            end;
-        ({{not_found, missing}, RevId}) ->
-             RevStr = couch_doc:rev_to_str(RevId),
-             Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
-             couch_httpd:send_chunk(Resp,
-                [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
-                Json,
-                <<"\r\n--", OuterBoundary/binary>>])
-         end, Results),
-    couch_httpd:send_chunk(Resp, <<"--">>),
-    couch_httpd:last_chunk(Resp).
-
-receive_request_data(Req) ->
-    receive_request_data(Req, chttpd:body_length(Req)).
-
-receive_request_data(Req, LenLeft) when LenLeft > 0 ->
-    Len = erlang:min(4096, LenLeft),
-    Data = chttpd:recv(Req, Len),
-    {Data, fun() -> receive_request_data(Req, LenLeft - iolist_size(Data)) end};
-receive_request_data(_Req, _) ->
-    throw(<<"expected more data">>).
-
-update_doc_result_to_json({{Id, Rev}, Error}) ->
-        {_Code, Err, Msg} = chttpd:error_info(Error),
-        {[{id, Id}, {rev, couch_doc:rev_to_str(Rev)},
-            {error, Err}, {reason, Msg}]}.
-
-update_doc_result_to_json(#doc{id=DocId}, Result) ->
-    update_doc_result_to_json(DocId, Result);
-update_doc_result_to_json(DocId, {ok, NewRev}) ->
-    {[{id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
-update_doc_result_to_json(DocId, {accepted, NewRev}) ->
-    {[{id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}, {accepted, true}]};
-update_doc_result_to_json(DocId, Error) ->
-    {_Code, ErrorStr, Reason} = chttpd:error_info(Error),
-    {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
-
-
-update_doc(Req, Db, DocId, Json) ->
-    update_doc(Req, Db, DocId, Json, []).
-
-update_doc(Req, Db, DocId, Doc, Headers) ->
-    update_doc(Req, Db, DocId, Doc, Headers, interactive_edit).
-
-update_doc(#httpd{user_ctx=Ctx} = Req, Db, DocId, #doc{deleted=Deleted}=Doc,
-        Headers, UpdateType) ->
-    W = couch_httpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    Options =
-        case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
-        "true" ->
-            [full_commit, UpdateType, {user_ctx,Ctx}, {w,W}];
-        "false" ->
-            [delay_commit, UpdateType, {user_ctx,Ctx}, {w,W}];
-        _ ->
-            [UpdateType, {user_ctx,Ctx}, {w,W}]
-        end,
-    {_, Ref} = spawn_monitor(fun() -> exit(fabric:update_doc(Db, Doc, Options)) end),
-    Result = receive {'DOWN', Ref, _, _, Res} -> Res end,
-    case Result of
-    {{nocatch, Exception}, _Reason} ->
-        % Exceptions from spawned processes are swallowed and returned, rethrow
-        throw(Exception);
-    _ ->
-        ok
-    end,
-
-    case Result of
-    {ok, NewRev} ->
-        Accepted = false;
-    {accepted, NewRev} ->
-        Accepted = true
-    end,
-    NewRevStr = couch_doc:rev_to_str(NewRev),
-    ResponseHeaders = [{"Etag", <<"\"", NewRevStr/binary, "\"">>} | Headers],
-    case {Accepted, Deleted} of
-    {true, _} ->
-        HttpCode = 202;
-    {false, true} ->
-        HttpCode = 200;
-    {false, false} ->
-        HttpCode = 201
-    end,
-    send_json(Req, HttpCode, ResponseHeaders, {[
-        {ok, true},
-        {id, DocId},
-        {rev, NewRevStr}
-    ]}).
-
-couch_doc_from_req(Req, DocId, #doc{revs=Revs} = Doc) ->
-    validate_attachment_names(Doc),
-    ExplicitDocRev =
-    case Revs of
-        {Start,[RevId|_]} -> {Start, RevId};
-        _ -> undefined
-    end,
-    case extract_header_rev(Req, ExplicitDocRev) of
-    missing_rev ->
-        Revs2 = {0, []};
-    ExplicitDocRev ->
-        Revs2 = Revs;
-    {Pos, Rev} ->
-        Revs2 = {Pos, [Rev]}
-    end,
-    Doc#doc{id=DocId, revs=Revs2};
-couch_doc_from_req(Req, DocId, Json) ->
-    couch_doc_from_req(Req, DocId, couch_doc:from_json_obj(Json)).
-
-
-% Useful for debugging
-% couch_doc_open(Db, DocId) ->
-%   couch_doc_open(Db, DocId, nil, []).
-
-couch_doc_open(Db, DocId, Rev, Options) ->
-    case Rev of
-    nil -> % open most recent rev
-        case fabric:open_doc(Db, DocId, Options) of
-        {ok, Doc} ->
-            Doc;
-         Error ->
-             throw(Error)
-         end;
-  _ -> % open a specific rev (deletions come back as stubs)
-      case fabric:open_revs(Db, DocId, [Rev], Options) of
-          {ok, [{ok, Doc}]} ->
-              Doc;
-          {ok, [{{not_found, missing}, Rev}]} ->
-              throw(not_found);
-          {ok, [Else]} ->
-              throw(Else)
-      end
-  end.
-
-% Attachment request handlers
-
-db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNameParts) ->
-    FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1,
-        FileNameParts),"/")),
-    #doc_query_args{
-        rev=Rev,
-        options=Options
-    } = parse_doc_query(Req),
-    #doc{
-        atts=Atts
-    } = Doc = couch_doc_open(Db, DocId, Rev, Options),
-    case [A || A <- Atts, A#att.name == FileName] of
-    [] ->
-        throw({not_found, "Document is missing attachment"});
-    [#att{type=Type, encoding=Enc, disk_len=DiskLen, att_len=AttLen}=Att] ->
-        Refs = monitor_attachments(Att),
-        try
-        Etag = chttpd:doc_etag(Doc),
-        ReqAcceptsAttEnc = lists:member(
-           atom_to_list(Enc),
-           couch_httpd:accepted_encodings(Req)
-        ),
-        Headers = [
-            {"ETag", Etag},
-            {"Cache-Control", "must-revalidate"},
-            {"Content-Type", binary_to_list(Type)}
-        ] ++ case ReqAcceptsAttEnc of
-        true when Enc =/= identity ->
-            % RFC 2616 says that the 'identify' encoding should not be used in
-            % the Content-Encoding header
-            [{"Content-Encoding", atom_to_list(Enc)}];
-        _ ->
-            []
-        end ++ case Enc of
-            identity ->
-                [{"Accept-Ranges", "bytes"}];
-            _ ->
-                [{"Accept-Ranges", "none"}]
-        end,
-        Len = case {Enc, ReqAcceptsAttEnc} of
-        {identity, _} ->
-            % stored and served in identity form
-            DiskLen;
-        {_, false} when DiskLen =/= AttLen ->
-            % Stored encoded, but client doesn't accept the encoding we used,
-            % so we need to decode on the fly.  DiskLen is the identity length
-            % of the attachment.
-            DiskLen;
-        {_, true} ->
-            % Stored and served encoded.  AttLen is the encoded length.
-            AttLen;
-        _ ->
-            % We received an encoded attachment and stored it as such, so we
-            % don't know the identity length.  The client doesn't accept the
-            % encoding, and since we cannot serve a correct Content-Length
-            % header we'll fall back to a chunked response.
-            undefined
-        end,
-        AttFun = case ReqAcceptsAttEnc of
-        false ->
-            fun couch_doc:att_foldl_decode/3;
-        true ->
-            fun couch_doc:att_foldl/3
-        end,
-        chttpd:etag_respond(
-            Req,
-            Etag,
-            fun() ->
-                case Len of
-                undefined ->
-                    {ok, Resp} = start_chunked_response(Req, 200, Headers),
-                    AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
-                    couch_httpd:last_chunk(Resp);
-                _ ->
-                    Ranges = parse_ranges(MochiReq:get(range), Len),
-                    case {Enc, Ranges} of
-                        {identity, [{From, To}]} ->
-                            Headers1 = [{<<"Content-Range">>, make_content_range(From, To, Len)}]
-                                ++ Headers,
-                            {ok, Resp} = start_response_length(Req, 206, Headers1, To - From + 1),
-                            couch_doc:range_att_foldl(Att, From, To + 1,
-                                fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp});
-                        {identity, Ranges} when is_list(Ranges) ->
-                            send_ranges_multipart(Req, Type, Len, Att, Ranges);
-                        _ ->
-                            Headers1 = Headers ++
-                                if Enc =:= identity orelse ReqAcceptsAttEnc =:= true ->
-                                    [{"Content-MD5", base64:encode(Att#att.md5)}];
-                                true ->
-                                    []
-                            end,
-                            {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
-                            AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
-                    end
-                end
-            end
-        )
-        after
-            demonitor_refs(Refs)
-        end
-    end;
-
-
-db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNameParts)
-        when (Method == 'PUT') or (Method == 'DELETE') ->
-    FileName = validate_attachment_name(
-                    mochiweb_util:join(
-                        lists:map(fun binary_to_list/1,
-                            FileNameParts),"/")),
-
-    NewAtt = case Method of
-        'DELETE' ->
-            [];
-        _ ->
-            [#att{
-                name=FileName,
-                type = case couch_httpd:header_value(Req,"Content-Type") of
-                    undefined ->
-                        % We could throw an error here or guess by the FileName.
-                        % Currently, just giving it a default.
-                        <<"application/octet-stream">>;
-                    CType ->
-                        list_to_binary(CType)
-                    end,
-                data = fabric:att_receiver(Req, chttpd:body_length(Req)),
-                att_len = case couch_httpd:header_value(Req,"Content-Length") of
-                    undefined ->
-                        undefined;
-                    Length ->
-                        list_to_integer(Length)
-                    end,
-                md5 = get_md5_header(Req),
-                encoding = case string:to_lower(string:strip(
-                    couch_httpd:header_value(Req,"Content-Encoding","identity")
-                )) of
-                "identity" ->
-                   identity;
-                "gzip" ->
-                   gzip;
-                _ ->
-                   throw({
-                       bad_ctype,
-                       "Only gzip and identity content-encodings are supported"
-                   })
-                end
-            }]
-    end,
-
-    Doc = case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
-        missing_rev -> % make the new doc
-            couch_doc:validate_docid(DocId),
-            #doc{id=DocId};
-        Rev ->
-            case fabric:open_revs(Db, DocId, [Rev], []) of
-            {ok, [{ok, Doc0}]}  -> Doc0;
-            {ok, [Error]}       -> throw(Error)
-            end
-    end,
-
-    #doc{atts=Atts} = Doc,
-    DocEdited = Doc#doc{
-        atts = NewAtt ++ [A || A <- Atts, A#att.name /= FileName]
-    },
-    case fabric:update_doc(Db, DocEdited, [{user_ctx,Ctx}]) of
-    {ok, UpdatedRev} ->
-        HttpCode = 201;
-    {accepted, UpdatedRev} ->
-        HttpCode = 202
-    end,
-    erlang:put(mochiweb_request_recv, true),
-    #db{name=DbName} = Db,
-
-    {Status, Headers} = case Method of
-        'DELETE' ->
-            {200, []};
-        _ ->
-            {HttpCode, [{"Location", absolute_uri(Req, [$/, DbName, $/, DocId, $/,
-                FileName])}]}
-        end,
-    send_json(Req,Status, Headers, {[
-        {ok, true},
-        {id, DocId},
-        {rev, couch_doc:rev_to_str(UpdatedRev)}
-    ]});
-
-db_attachment_req(Req, _Db, _DocId, _FileNameParts) ->
-    send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT").
-
-send_ranges_multipart(Req, ContentType, Len, Att, Ranges) ->
-    Boundary = couch_uuids:random(),
-    CType = {"Content-Type",
-        "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
-    {ok, Resp} = start_chunked_response(Req, 206, [CType]),
-    couch_httpd:send_chunk(Resp, <<"--", Boundary/binary>>),
-    lists:foreach(fun({From, To}) ->
-        ContentRange = make_content_range(From, To, Len),
-        couch_httpd:send_chunk(Resp,
-            <<"\r\nContent-Type: ", ContentType/binary, "\r\n",
-            "Content-Range: ", ContentRange/binary, "\r\n",
-           "\r\n">>),
-        couch_doc:range_att_foldl(Att, From, To + 1,
-            fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
-        couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
-    end, Ranges),
-    couch_httpd:send_chunk(Resp, <<"--">>),
-    couch_httpd:last_chunk(Resp),
-    {ok, Resp}.
-
-parse_ranges(undefined, _Len) ->
-    undefined;
-parse_ranges(fail, _Len) ->
-    undefined;
-parse_ranges(Ranges, Len) ->
-    parse_ranges(Ranges, Len, []).
-
-parse_ranges([], _Len, Acc) ->
-    lists:reverse(Acc);
-parse_ranges([{From, To}|_], _Len, _Acc)
-  when is_integer(From) andalso is_integer(To) andalso To < From ->
-    throw(requested_range_not_satisfiable);
-parse_ranges([{From, To}|Rest], Len, Acc)
-  when is_integer(To) andalso To >= Len ->
-    parse_ranges([{From, Len-1}] ++ Rest, Len, Acc);
-parse_ranges([{none, To}|Rest], Len, Acc) ->
-    parse_ranges([{Len - To, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From, none}|Rest], Len, Acc) ->
-    parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From,To}|Rest], Len, Acc) ->
-    parse_ranges(Rest, Len, [{From, To}] ++ Acc).
-
-make_content_range(From, To, Len) ->
-    ?l2b(io_lib:format("bytes ~B-~B/~B", [From, To, Len])).
-
-get_md5_header(Req) ->
-    ContentMD5 = couch_httpd:header_value(Req, "Content-MD5"),
-    Length = couch_httpd:body_length(Req),
-    Trailer = couch_httpd:header_value(Req, "Trailer"),
-    case {ContentMD5, Length, Trailer} of
-        _ when is_list(ContentMD5) orelse is_binary(ContentMD5) ->
-            base64:decode(ContentMD5);
-        {_, chunked, undefined} ->
-            <<>>;
-        {_, chunked, _} ->
-            case re:run(Trailer, "\\bContent-MD5\\b", [caseless]) of
-                {match, _} ->
-                    md5_in_footer;
-                _ ->
-                    <<>>
-            end;
-        _ ->
-            <<>>
-    end.
-
-parse_doc_query(Req) ->
-    lists:foldl(fun({Key,Value}, Args) ->
-        case {Key, Value} of
-        {"attachments", "true"} ->
-            Options = [attachments | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"meta", "true"} ->
-            Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"revs", "true"} ->
-            Options = [revs | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"local_seq", "true"} ->
-            Options = [local_seq | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"revs_info", "true"} ->
-            Options = [revs_info | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"conflicts", "true"} ->
-            Options = [conflicts | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"deleted_conflicts", "true"} ->
-            Options = [deleted_conflicts | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"rev", Rev} ->
-            Args#doc_query_args{rev=couch_doc:parse_rev(Rev)};
-        {"open_revs", "all"} ->
-            Args#doc_query_args{open_revs=all};
-        {"open_revs", RevsJsonStr} ->
-            JsonArray = ?JSON_DECODE(RevsJsonStr),
-            Args#doc_query_args{open_revs=[couch_doc:parse_rev(Rev) || Rev <- JsonArray]};
-        {"latest", "true"} ->
-            Options = [latest | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"atts_since", RevsJsonStr} ->
-            JsonArray = ?JSON_DECODE(RevsJsonStr),
-            Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
-        {"new_edits", "false"} ->
-            Args#doc_query_args{update_type=replicated_changes};
-        {"new_edits", "true"} ->
-            Args#doc_query_args{update_type=interactive_edit};
-        {"att_encoding_info", "true"} ->
-            Options = [att_encoding_info | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"r", R} ->
-            Options = [{r,R} | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"w", W} ->
-            Options = [{w,W} | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        _Else -> % unknown key value pair, ignore.
-            Args
-        end
-    end, #doc_query_args{}, chttpd:qs(Req)).
-
-parse_changes_query(Req) ->
-    lists:foldl(fun({Key, Value}, Args) ->
-        case {Key, Value} of
-        {"feed", _} ->
-            Args#changes_args{feed=Value};
-        {"descending", "true"} ->
-            Args#changes_args{dir=rev};
-        {"since", _} ->
-            Args#changes_args{since=Value};
-        {"limit", _} ->
-            Args#changes_args{limit=list_to_integer(Value)};
-        {"style", _} ->
-            Args#changes_args{style=list_to_existing_atom(Value)};
-        {"heartbeat", "true"} ->
-            Args#changes_args{heartbeat=true};
-        {"heartbeat", _} ->
-            Args#changes_args{heartbeat=list_to_integer(Value)};
-        {"timeout", _} ->
-            Args#changes_args{timeout=list_to_integer(Value)};
-        {"include_docs", "true"} ->
-            Args#changes_args{include_docs=true};
-        {"conflicts", "true"} ->
-            Args#changes_args{conflicts=true};
-        {"filter", _} ->
-            Args#changes_args{filter=Value};
-        _Else -> % unknown key value pair, ignore.
-            Args
-        end
-    end, #changes_args{}, couch_httpd:qs(Req)).
-
-extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
-    extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
-extract_header_rev(Req, ExplicitRev) ->
-    Etag = case chttpd:header_value(Req, "If-Match") of
-        undefined -> undefined;
-        Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
-    end,
-    case {ExplicitRev, Etag} of
-    {undefined, undefined} -> missing_rev;
-    {_, undefined} -> ExplicitRev;
-    {undefined, _} -> Etag;
-    _ when ExplicitRev == Etag -> Etag;
-    _ ->
-        throw({bad_request, "Document rev and etag have different values"})
-    end.
-
-
-parse_copy_destination_header(Req) ->
-    Destination = chttpd:header_value(Req, "Destination"),
-    case re:run(Destination, "\\?", [{capture, none}]) of
-    nomatch ->
-        {list_to_binary(Destination), {0, []}};
-    match ->
-        [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
-        [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
-        {Pos, RevId} = couch_doc:parse_rev(Rev),
-        {list_to_binary(DocId), {Pos, [RevId]}}
-    end.
-
-validate_attachment_names(Doc) ->
-    lists:foreach(fun(#att{name=Name}) ->
-        validate_attachment_name(Name)
-    end, Doc#doc.atts).
-
-validate_attachment_name(Name) when is_list(Name) ->
-    validate_attachment_name(list_to_binary(Name));
-validate_attachment_name(<<"_",Rest/binary>>) ->
-    throw({bad_request, <<"Attachment name '_", Rest/binary,
-                          "' starts with prohibited character '_'">>});
-validate_attachment_name(Name) ->
-    case couch_util:validate_utf8(Name) of
-        true -> Name;
-        false -> throw({bad_request, <<"Attachment name is not UTF-8 encoded">>})
-    end.
-
--spec monitor_attachments(#att{} | [#att{}]) -> [reference()].
-monitor_attachments(#att{}=Att) ->
-    monitor_attachments([Att]);
-monitor_attachments(Atts) when is_list(Atts) ->
-    [monitor(process, Fd) || #att{data={Fd,_}} <- Atts].
-
-demonitor_refs(Refs) when is_list(Refs) ->
-    [demonitor(Ref) || Ref <- Refs].

http://git-wip-us.apache.org/repos/asf/couchdb/blob/e41cfa40/src/chttpd/src/chttpd_external.erl
----------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd_external.erl b/src/chttpd/src/chttpd_external.erl
deleted file mode 100644
index 4d07059..0000000
--- a/src/chttpd/src/chttpd_external.erl
+++ /dev/null
@@ -1,177 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_external).
-
--export([handle_external_req/2, handle_external_req/3]).
--export([send_external_response/2, json_req_obj/2, json_req_obj/3]).
--export([default_or_content_type/2, parse_external_response/1]).
-
--import(chttpd,[send_error/4]).
-
--include_lib("couch/include/couch_db.hrl").
-
-% handle_external_req/2
-% for the old type of config usage:
-% _external = {chttpd_external, handle_external_req}
-% with urls like
-% /db/_external/action/design/name
-handle_external_req(#httpd{
-                        path_parts=[_DbName, _External, UrlName | _Path]
-                    }=HttpReq, Db) ->
-    process_external_req(HttpReq, Db, UrlName);
-handle_external_req(#httpd{path_parts=[_, _]}=Req, _Db) ->
-    send_error(Req, 404, <<"external_server_error">>, <<"No server name specified.">>);
-handle_external_req(Req, _) ->
-    send_error(Req, 404, <<"external_server_error">>, <<"Broken assumption">>).
-
-% handle_external_req/3
-% for this type of config usage:
-% _action = {chttpd_external, handle_external_req, <<"action">>}
-% with urls like
-% /db/_action/design/name
-handle_external_req(HttpReq, Db, Name) ->
-    process_external_req(HttpReq, Db, Name).
-
-process_external_req(HttpReq, Db, Name) ->
-
-    Response = couch_external_manager:execute(binary_to_list(Name),
-        json_req_obj(HttpReq, Db)),
-
-    case Response of
-    {unknown_external_server, Msg} ->
-        send_error(HttpReq, 404, <<"external_server_error">>, Msg);
-    _ ->
-        send_external_response(HttpReq, Response)
-    end.
-
-json_req_obj(Req, Db) -> json_req_obj(Req, Db, null).
-json_req_obj(#httpd{mochi_req=Req,
-               method=Method,
-               path_parts=Path,
-               req_body=ReqBody
-            }, Db, DocId) ->
-    Body = case ReqBody of
-        undefined -> Req:recv_body();
-        Else -> Else
-    end,
-    ParsedForm = case Req:get_primary_header_value("content-type") of
-        "application/x-www-form-urlencoded" ++ _ when Method =:= 'POST' ->
-            mochiweb_util:parse_qs(Body);
-        _ ->
-            []
-    end,
-    Headers = Req:get(headers),
-    Hlist = mochiweb_headers:to_list(Headers),
-    {ok, Info} = fabric:get_db_info(Db),
-
-    % add headers...
-    {[{<<"info">>, {Info}},
-        {<<"uuid">>, couch_uuids:new()},
-        {<<"id">>, DocId},
-        {<<"method">>, Method},
-        {<<"path">>, Path},
-        {<<"query">>, json_query_keys(to_json_terms(Req:parse_qs()))},
-        {<<"headers">>, to_json_terms(Hlist)},
-        {<<"body">>, Body},
-        {<<"peer">>, ?l2b(Req:get(peer))},
-        {<<"form">>, to_json_terms(ParsedForm)},
-        {<<"cookie">>, to_json_terms(Req:parse_cookie())},
-        {<<"userCtx">>, couch_util:json_user_ctx(Db)}]}.
-
-to_json_terms(Data) ->
-    to_json_terms(Data, []).
-to_json_terms([], Acc) ->
-    {lists:reverse(Acc)};
-to_json_terms([{Key, Value} | Rest], Acc) when is_atom(Key) ->
-    to_json_terms(Rest, [{list_to_binary(atom_to_list(Key)), list_to_binary(Value)} | Acc]);
-to_json_terms([{Key, Value} | Rest], Acc) ->
-    to_json_terms(Rest, [{list_to_binary(Key), list_to_binary(Value)} | Acc]).
-
-json_query_keys({Json}) ->
-    json_query_keys(Json, []).
-json_query_keys([], Acc) ->
-    {lists:reverse(Acc)};
-json_query_keys([{<<"startkey">>, Value} | Rest], Acc) ->
-    json_query_keys(Rest, [{<<"startkey">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([{<<"endkey">>, Value} | Rest], Acc) ->
-    json_query_keys(Rest, [{<<"endkey">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([{<<"key">>, Value} | Rest], Acc) ->
-    json_query_keys(Rest, [{<<"key">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([{<<"descending">>, Value} | Rest], Acc) ->
-    json_query_keys(Rest, [{<<"descending">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([Term | Rest], Acc) ->
-    json_query_keys(Rest, [Term|Acc]).
-
-send_external_response(Req, Response) ->
-    #extern_resp_args{
-        code = Code,
-        data = Data,
-        ctype = CType,
-        headers = Headers,
-        json = Json
-    } = parse_external_response(Response),
-    Headers1 = default_or_content_type(CType, Headers),
-    case Json of
-    nil ->
-        couch_httpd:send_response(Req, Code, Headers1, Data);
-    Json ->
-        couch_httpd:send_json(Req, Code, Headers1, Json)
-    end.
-
-parse_external_response({Response}) ->
-    lists:foldl(fun({Key,Value}, Args) ->
-        case {Key, Value} of
-            {"", _} ->
-                Args;
-            {<<"code">>, Value} ->
-                Args#extern_resp_args{code=Value};
-            {<<"stop">>, true} ->
-                Args#extern_resp_args{stop=true};
-            {<<"json">>, Value} ->
-                Args#extern_resp_args{
-                    json=Value,
-                    ctype="application/json"};
-            {<<"body">>, Value} ->
-                Args#extern_resp_args{data=Value, ctype="text/html; charset=utf-8"};
-            {<<"base64">>, Value} ->
-                Args#extern_resp_args{
-                    data=base64:decode(Value),
-                    ctype="application/binary"
-                };
-            {<<"headers">>, {Headers}} ->
-                NewHeaders = lists:map(fun({Header, HVal}) ->
-                    {binary_to_list(Header), binary_to_list(HVal)}
-                end, Headers),
-                Args#extern_resp_args{headers=NewHeaders};
-            _ -> % unknown key
-                Msg = lists:flatten(io_lib:format("Invalid data from external server: ~p", [{Key, Value}])),
-                throw({external_response_error, Msg})
-            end
-        end, #extern_resp_args{}, Response).
-
-default_or_content_type(DefaultContentType, Headers) ->
-    {ContentType, OtherHeaders} = lists:partition(
-        fun({HeaderName, _}) ->
-            HeaderName == "Content-Type"
-        end, Headers),
-
-    % XXX: What happens if we were passed multiple content types? We add another?
-    case ContentType of
-        [{"Content-Type", SetContentType}] ->
-            TrueContentType = SetContentType;
-        _Else ->
-            TrueContentType = DefaultContentType
-    end,
-
-    HeadersWithContentType = lists:append(OtherHeaders, [{"Content-Type", TrueContentType}]),
-    HeadersWithContentType.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/e41cfa40/src/chttpd/src/chttpd_misc.erl
----------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
deleted file mode 100644
index 8c5f50e..0000000
--- a/src/chttpd/src/chttpd_misc.erl
+++ /dev/null
@@ -1,312 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_misc).
-
--export([handle_welcome_req/2,handle_favicon_req/2,handle_utils_dir_req/2,
-    handle_all_dbs_req/1,handle_replicate_req/1,handle_restart_req/1,
-    handle_uuids_req/1,handle_config_req/1,handle_log_req/1,
-    handle_task_status_req/1,handle_sleep_req/1,handle_welcome_req/1,
-    handle_utils_dir_req/1, handle_favicon_req/1, handle_system_req/1,
-    handle_up_req/1]).
-
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
--import(chttpd,
-    [send_json/2,send_json/3,send_method_not_allowed/2,
-    send_chunk/2,start_chunked_response/3]).
-
-% httpd global handlers
-
-handle_welcome_req(Req) ->
-    handle_welcome_req(Req, <<"Welcome">>).
-
-handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) ->
-    send_json(Req, {[
-        {couchdb, WelcomeMessage},
-        {version, list_to_binary(couch_server:get_version())},
-        {bigcouch, get_version()}
-    ]});
-handle_welcome_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-get_version() ->
-    Releases = release_handler:which_releases(),
-    Version = case [V || {"bigcouch", V, _, current} <- Releases] of
-    [] ->
-        case [V || {"bigcouch", V, _, permanent} <- Releases] of
-        [] ->
-            "dev";
-        [Permanent] ->
-            Permanent
-        end;
-    [Current] ->
-        Current
-    end,
-    list_to_binary(Version).
-
-handle_favicon_req(Req) ->
-    handle_favicon_req(Req, config:get("chttpd", "docroot")).
-
-handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) ->
-    chttpd:serve_file(Req, "favicon.ico", DocumentRoot);
-handle_favicon_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_utils_dir_req(Req) ->
-    handle_utils_dir_req(Req, config:get("chttpd", "docroot")).
-
-handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) ->
-    "/" ++ UrlPath = chttpd:path(Req),
-    case chttpd:partition(UrlPath) of
-    {_ActionKey, "/", RelativePath} ->
-        % GET /_utils/path or GET /_utils/
-        chttpd:serve_file(Req, RelativePath, DocumentRoot);
-    {_ActionKey, "", _RelativePath} ->
-        % GET /_utils
-        RedirectPath = chttpd:path(Req) ++ "/",
-        chttpd:send_redirect(Req, RedirectPath)
-    end;
-handle_utils_dir_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_sleep_req(#httpd{method='GET'}=Req) ->
-    Time = list_to_integer(chttpd:qs_value(Req, "time")),
-    receive snicklefart -> ok after Time -> ok end,
-    send_json(Req, {[{ok, true}]});
-handle_sleep_req(Req) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_all_dbs_req(#httpd{method='GET'}=Req) ->
-    ShardDbName = config:get("mem3", "shard_db", "dbs"),
-    %% shard_db is not sharded but mem3:shards treats it as an edge case
-    %% so it can be pushed thru fabric
-    {ok, Info} = fabric:get_db_info(ShardDbName),
-    Etag = couch_httpd:make_etag({Info}),
-    chttpd:etag_respond(Req, Etag, fun() ->
-        {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"Etag",Etag}]),
-        fabric:all_docs(ShardDbName, fun all_dbs_callback/2,
-            {nil, Resp}, #mrargs{})
-    end);
-handle_all_dbs_req(Req) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-all_dbs_callback({total_and_offset, _Total, _Offset}, {_, Resp}) ->
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp, "["),
-    {ok, {"", Resp1}};
-all_dbs_callback({row, {Row}}, {Prepend, Resp}) ->
-    case couch_util:get_value(id, Row) of <<"_design", _/binary>> ->
-        {ok, {Prepend, Resp}};
-    DbName ->
-        {ok, Resp1} = chttpd:send_delayed_chunk(Resp, [Prepend, ?JSON_ENCODE(DbName)]),
-        {ok, {",", Resp1}}
-    end;
-all_dbs_callback(complete, {_, Resp}) ->
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp, "]"),
-    chttpd:end_delayed_json_response(Resp1);
-all_dbs_callback({error, Reason}, {_, Resp}) ->
-    chttpd:send_delayed_error(Resp, Reason).
-
-handle_task_status_req(#httpd{method='GET'}=Req) ->
-    {Replies, _BadNodes} = gen_server:multi_call(couch_task_status, all),
-    Response = lists:flatmap(fun({Node, Tasks}) ->
-        [{[{node,Node} | Task]} || Task <- Tasks]
-    end, Replies),
-    send_json(Req, lists:sort(Response));
-handle_task_status_req(Req) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_replicate_req(#httpd{method='POST', user_ctx=Ctx} = Req) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    %% see HACK in chttpd.erl about replication
-    PostBody = get(post_body),
-    try replicate(PostBody, Ctx, mem3_rep_manager) of
-    {ok, {continuous, RepId}} ->
-        send_json(Req, 202, {[{ok, true}, {<<"_local_id">>, RepId}]});
-    {ok, {cancelled, RepId}} ->
-        send_json(Req, 200, {[{ok, true}, {<<"_local_id">>, RepId}]});
-    {ok, {JsonResults}} ->
-        send_json(Req, {[{ok, true} | JsonResults]});
-    {ok, stopped} ->
-        send_json(Req, 200, {[{ok, stopped}]});
-    {error, {Type, Details}} ->
-        send_json(Req, 500, {[{error, Type}, {reason, Details}]});
-    {error, not_found} ->
-        send_json(Req, 404, {[{error, not_found}]});
-    {error, Reason} ->
-        try
-            send_json(Req, 500, {[{error, Reason}]})
-        catch
-        exit:{json_encode, _} ->
-            send_json(Req, 500, {[{error, couch_util:to_binary(Reason)}]})
-        end
-    catch
-    throw:{db_not_found, Msg} ->
-        send_json(Req, 404, {[{error, db_not_found}, {reason, Msg}]});
-    throw:{unauthorized, Msg} ->
-        send_json(Req, 404, {[{error, unauthorized}, {reason, Msg}]})
-    end;
-handle_replicate_req(Req) ->
-    send_method_not_allowed(Req, "POST").
-
-replicate({Props} = PostBody, Ctx, Module) ->
-    Node = choose_node([
-        couch_util:get_value(<<"source">>, Props),
-        couch_util:get_value(<<"target">>, Props)
-    ]),
-    case rpc:call(Node, couch_rep, replicate, [PostBody, Ctx, Module]) of
-    {badrpc, Reason} ->
-        erlang:error(Reason);
-    Res ->
-        Res
-    end.
-
-choose_node(Key) when is_binary(Key) ->
-    Checksum = erlang:crc32(Key),
-    Nodes = lists:sort([node()|erlang:nodes()]),
-    lists:nth(1 + Checksum rem length(Nodes), Nodes);
-choose_node(Key) ->
-    choose_node(term_to_binary(Key)).
-
-handle_restart_req(#httpd{method='POST'}=Req) ->
-    couch_server_sup:restart_core_server(),
-    send_json(Req, 200, {[{ok, true}]});
-handle_restart_req(Req) ->
-    send_method_not_allowed(Req, "POST").
-
-
-handle_uuids_req(Req) ->
-    couch_httpd_misc_handlers:handle_uuids_req(Req).
-
-
-% Config request handler
-
-
-% GET /_config/
-% GET /_config
-handle_config_req(#httpd{method='GET', path_parts=[_]}=Req) ->
-    Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
-        case dict:is_key(Section, Acc) of
-        true ->
-            dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
-        false ->
-            dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
-        end
-    end, dict:new(), config:all()),
-    KVs = dict:fold(fun(Section, Values, Acc) ->
-        [{list_to_binary(Section), {Values}} | Acc]
-    end, [], Grouped),
-    send_json(Req, 200, {KVs});
-% GET /_config/Section
-handle_config_req(#httpd{method='GET', path_parts=[_,Section]}=Req) ->
-    KVs = [{list_to_binary(Key), list_to_binary(Value)}
-            || {Key, Value} <- config:get(Section)],
-    send_json(Req, 200, {KVs});
-% PUT /_config/Section/Key
-% "value"
-handle_config_req(#httpd{method='PUT', path_parts=[_, Section, Key]}=Req) ->
-    Value = chttpd:json_body(Req),
-    Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
-    OldValue = config:get(Section, Key, ""),
-    ok = config:set(Section, Key, ?b2l(Value), Persist),
-    send_json(Req, 200, list_to_binary(OldValue));
-% GET /_config/Section/Key
-handle_config_req(#httpd{method='GET', path_parts=[_, Section, Key]}=Req) ->
-    case config:get(Section, Key, null) of
-    null ->
-        throw({not_found, unknown_config_value});
-    Value ->
-        send_json(Req, 200, list_to_binary(Value))
-    end;
-% DELETE /_config/Section/Key
-handle_config_req(#httpd{method='DELETE',path_parts=[_,Section,Key]}=Req) ->
-    Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
-    case config:get(Section, Key, null) of
-    null ->
-        throw({not_found, unknown_config_value});
-    OldValue ->
-        config:delete(Section, Key, Persist),
-        send_json(Req, 200, list_to_binary(OldValue))
-    end;
-handle_config_req(Req) ->
-    send_method_not_allowed(Req, "GET,PUT,DELETE").
-
-% httpd log handlers
-
-handle_log_req(#httpd{method='GET'}=Req) ->
-    Bytes = list_to_integer(chttpd:qs_value(Req, "bytes", "1000")),
-    Offset = list_to_integer(chttpd:qs_value(Req, "offset", "0")),
-    Chunk = couch_log:read(Bytes, Offset),
-    {ok, Resp} = start_chunked_response(Req, 200, [
-        % send a plaintext response
-        {"Content-Type", "text/plain; charset=utf-8"},
-        {"Content-Length", integer_to_list(length(Chunk))}
-    ]),
-    send_chunk(Resp, Chunk),
-    send_chunk(Resp, "");
-handle_log_req(Req) ->
-    send_method_not_allowed(Req, "GET").
-
-% Note: this resource is exposed on the backdoor interface, but it's in chttpd
-% because it's not couch trunk
-handle_system_req(Req) ->
-    Other = erlang:memory(system) - lists:sum([X || {_,X} <-
-        erlang:memory([atom, code, binary, ets])]),
-    Memory = [{other, Other} | erlang:memory([atom, atom_used, processes,
-        processes_used, binary, code, ets])],
-    {NumberOfGCs, WordsReclaimed, _} = statistics(garbage_collection),
-    {{input, Input}, {output, Output}} = statistics(io),
-    send_json(Req, {[
-        {uptime, element(1,statistics(wall_clock)) div 1000},
-        {memory, {Memory}},
-        {run_queue, statistics(run_queue)},
-        {ets_table_count, length(ets:all())},
-        {context_switches, element(1, statistics(context_switches))},
-        {reductions, element(1, statistics(reductions))},
-        {garbage_collection_count, NumberOfGCs},
-        {words_reclaimed, WordsReclaimed},
-        {io_input, Input},
-        {io_output, Output},
-        {os_proc_count, couch_proc_manager:get_proc_count()},
-        {process_count, erlang:system_info(process_count)},
-        {process_limit, erlang:system_info(process_limit)},
-        {message_queues, message_queues(registered())},
-        {internal_replication_jobs, mem3_sync:get_backlog()},
-        {distribution, {get_distribution_stats()}}
-    ]}).
-
-get_distribution_stats() ->
-    lists:map(fun({Node, Socket}) ->
-        {ok, Stats} = inet:getstat(Socket),
-        {Node, {Stats}}
-    end, erlang:system_info(dist_ctrl)).
-
-handle_up_req(#httpd{method='GET'} = Req) ->
-    case config:get("couchdb", "maintenance_mode") of
-    "true" ->
-        send_json(Req, 404, {[{status, maintenance_mode}]});
-    _ ->
-        send_json(Req, 200, {[{status, ok}]})
-    end;
-
-handle_up_req(Req) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-message_queues(Registered) ->
-    Queues = lists:map(fun(Name) ->
-        Type = message_queue_len,
-        {Type, Length} = process_info(whereis(Name), Type),
-        {Name, Length}
-    end, Registered),
-    {Queues}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/e41cfa40/src/chttpd/src/chttpd_rewrite.erl
----------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd_rewrite.erl b/src/chttpd/src/chttpd_rewrite.erl
deleted file mode 100644
index a18fa00..0000000
--- a/src/chttpd/src/chttpd_rewrite.erl
+++ /dev/null
@@ -1,456 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% bind_path is based on bind method from Webmachine
-
-
-%% @doc Module for URL rewriting by pattern matching.
-
--module(chttpd_rewrite).
--export([handle_rewrite_req/3]).
--include_lib("couch/include/couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, <<"*">>}).
-
-
-%% doc The http rewrite handler. All rewriting is done from
-%% /dbname/_design/ddocname/_rewrite by default.
-%%
-%% each rules should be in rewrites member of the design doc.
-%% Ex of a complete rule :
-%%
-%%  {
-%%      ....
-%%      "rewrites": [
-%%      {
-%%          "from": "",
-%%          "to": "index.html",
-%%          "method": "GET",
-%%          "query": {}
-%%      }
-%%      ]
-%%  }
-%%
-%%  from: is the path rule used to bind current uri to the rule. It
-%% use pattern matching for that.
-%%
-%%  to: rule to rewrite an url. It can contain variables depending on binding
-%% variables discovered during pattern matching and query args (url args and from
-%% the query member.)
-%%
-%%  method: method to bind the request method to the rule. by default "*"
-%%  query: query args you want to define they can contain dynamic variable
-%% by binding the key to the bindings
-%%
-%%
-%% to and from are path with  patterns. pattern can be string starting with ":" or
-%% "*". ex:
-%% /somepath/:var/*
-%%
-%% This path is converted in erlang list by splitting "/". Each var are
-%% converted in atom. "*" is converted to '*' atom. The pattern matching is done
-%% by splitting "/" in request url in a list of token. A string pattern will
-%% match equal token. The star atom ('*' in single quotes) will match any number
-%% of tokens, but may only be present as the last pathtern in a pathspec. If all
-%% tokens are matched and all pathterms are used, then the pathspec matches. It works
-%% like webmachine. Each identified token will be reused in to rule and in query
-%%
-%% The pattern matching is done by first matching the request method to a rule. by
-%% default all methods match a rule. (method is equal to "*" by default). Then
-%% It will try to match the path to one rule. If no rule match, then a 404 error
-%% is displayed.
-%%
-%% Once a rule is found we rewrite the request url using the "to" and
-%% "query" members. The identified token are matched to the rule and
-%% will replace var. if '*' is found in the rule it will contain the remaining
-%% part if it exists.
-%%
-%% Examples:
-%%
-%% Dispatch rule            URL             TO                  Tokens
-%%
-%% {"from": "/a/b",         /a/b?k=v        /some/b?k=v         var =:= b
-%% "to": "/some/"}                                              k = v
-%%
-%% {"from": "/a/b",         /a/b            /some/b?var=b       var =:= b
-%% "to": "/some/:var"}
-%%
-%% {"from": "/a",           /a              /some
-%% "to": "/some/*"}
-%%
-%% {"from": "/a/*",         /a/b/c          /some/b/c
-%% "to": "/some/*"}
-%%
-%% {"from": "/a",           /a              /some
-%% "to": "/some/*"}
-%%
-%% {"from": "/a/:foo/*",    /a/b/c          /some/b/c?foo=b     foo =:= b
-%% "to": "/some/:foo/*"}
-%%
-%% {"from": "/a/:foo",     /a/b             /some/?k=b&foo=b    foo =:= b
-%% "to": "/some",
-%%  "query": {
-%%      "k": ":foo"
-%%  }}
-%%
-%% {"from": "/a",           /a?foo=b        /some/b             foo =:= b
-%% "to": "/some/:foo",
-%%  }}
-
-
-
-handle_rewrite_req(#httpd{
-        path_parts=[DbName, <<"_design">>, DesignName, _Rewrite|PathParts],
-        method=Method,
-        mochi_req=MochiReq}=Req, _Db, DDoc) ->
-
-    % we are in a design handler
-    DesignId = <<"_design/", DesignName/binary>>,
-    Prefix = <<"/", DbName/binary, "/", DesignId/binary>>,
-    QueryList = lists:map(fun decode_query_value/1, couch_httpd:qs(Req)),
-
-    #doc{body={Props}} = DDoc,
-
-    % get rules from ddoc
-    case couch_util:get_value(<<"rewrites">>, Props) of
-        undefined ->
-            couch_httpd:send_error(Req, 404, <<"rewrite_error">>,
-                <<"Invalid path.">>);
-        Bin when is_binary(Bin) ->
-            couch_httpd:send_error(Req, 400, <<"rewrite_error">>,
-                <<"Rewrite rules are a String. They must be a JSON Array.">>);
-        Rules ->
-            % create dispatch list from rules
-            DispatchList =  [make_rule(Rule) || {Rule} <- Rules],
-            Method1 = couch_util:to_binary(Method),
-
-            %% get raw path by matching url to a rule.
-            RawPath = case try_bind_path(DispatchList, Method1,
-                    PathParts, QueryList) of
-                no_dispatch_path ->
-                    throw(not_found);
-                {NewPathParts, Bindings} ->
-                    Parts = [quote_plus(X) || X <- NewPathParts],
-
-                    % build new path, reencode query args, eventually convert
-                    % them to json
-                    Bindings1 = maybe_encode_bindings(Bindings),
-                    Path = binary_to_list(
-                        iolist_to_binary([
-                                string:join(Parts, [?SEPARATOR]),
-                                [["?", mochiweb_util:urlencode(Bindings1)]
-                                    || Bindings1 =/= [] ]
-                            ])),
-
-                    % if path is relative detect it and rewrite path
-                    case mochiweb_util:safe_relative_path(Path) of
-                        undefined ->
-                            ?b2l(Prefix) ++ "/" ++ Path;
-                        P1 ->
-                            ?b2l(Prefix) ++ "/" ++ P1
-                    end
-
-                end,
-
-            % normalize final path (fix levels "." and "..")
-            RawPath1 = ?b2l(iolist_to_binary(normalize_path(RawPath))),
-
-            twig:log(debug, "rewrite to ~p ~n", [RawPath1]),
-
-            % build a new mochiweb request
-            MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
-                                             MochiReq:get(method),
-                                             RawPath1,
-                                             MochiReq:get(version),
-                                             MochiReq:get(headers)),
-
-            % cleanup, It force mochiweb to reparse raw uri.
-            MochiReq1:cleanup(),
-
-            chttpd:handle_request(MochiReq1)
-        end.
-
-quote_plus({bind, X}) ->
-    mochiweb_util:quote_plus(X);
-quote_plus(X) ->
-    mochiweb_util:quote_plus(X).
-
-%% @doc Try to find a rule matching current url. If none is found
-%% 404 error not_found is raised
-try_bind_path([], _Method, _PathParts, _QueryList) ->
-    no_dispatch_path;
-try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
-    [{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch,
-    case bind_method(Method1, Method) of
-        true ->
-            case bind_path(PathParts1, PathParts, []) of
-                {ok, Remaining, Bindings} ->
-                    Bindings1 = Bindings ++ QueryList,
-                    % we parse query args from the rule and fill
-                    % it eventually with bindings vars
-                    QueryArgs1 = make_query_list(QueryArgs, Bindings1,
-                        Formats, []),
-                    % remove params in QueryLists1 that are already in
-                    % QueryArgs1
-                    Bindings2 = lists:foldl(fun({K, V}, Acc) ->
-                        K1 = to_binding(K),
-                        KV = case couch_util:get_value(K1, QueryArgs1) of
-                            undefined -> [{K1, V}];
-                            _V1 -> []
-                        end,
-                        Acc ++ KV
-                    end, [], Bindings1),
-
-                    FinalBindings = Bindings2 ++ QueryArgs1,
-                    NewPathParts = make_new_path(RedirectPath, FinalBindings,
-                                    Remaining, []),
-                    {NewPathParts, FinalBindings};
-                fail ->
-                    try_bind_path(Rest, Method, PathParts, QueryList)
-            end;
-        false ->
-            try_bind_path(Rest, Method, PathParts, QueryList)
-    end.
-
-%% rewriting dynamically the quey list given as query member in
-%% rewrites. Each value is replaced by one binding or an argument
-%% passed in url.
-make_query_list([], _Bindings, _Formats, Acc) ->
-    Acc;
-make_query_list([{Key, {Value}}|Rest], Bindings, Formats, Acc) ->
-    Value1 = {Value},
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_binary(Value) ->
-    Value1 = replace_var(Value, Bindings, Formats),
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_list(Value) ->
-    Value1 = replace_var(Value, Bindings, Formats),
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) ->
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value}|Acc]).
-
-replace_var(<<"*">>=Value, Bindings, Formats) ->
-    get_var(Value, Bindings, Value, Formats);
-replace_var(<<":", Var/binary>> = Value, Bindings, Formats) ->
-    get_var(Var, Bindings, Value, Formats);
-replace_var(Value, _Bindings, _Formats) when is_binary(Value) ->
-    Value;
-replace_var(Value, Bindings, Formats) when is_list(Value) ->
-    lists:reverse(lists:foldl(fun
-                (<<":", Var/binary>>=Value1, Acc) ->
-                    [get_var(Var, Bindings, Value1, Formats)|Acc];
-                (Value1, Acc) ->
-                    [Value1|Acc]
-            end, [], Value));
-replace_var(Value, _Bindings, _Formats) ->
-    Value.
-
-maybe_json(Key, Value) ->
-    case lists:member(Key, [<<"key">>, <<"startkey">>, <<"start_key">>,
-                <<"endkey">>, <<"end_key">>, <<"keys">>]) of
-        true ->
-            ?JSON_ENCODE(Value);
-        false ->
-            Value
-    end.
-
-get_var(VarName, Props, Default, Formats) ->
-    VarName1 = to_binding(VarName),
-    Val = couch_util:get_value(VarName1, Props, Default),
-    maybe_format(VarName, Val, Formats).
-
-maybe_format(VarName, Value, Formats) ->
-    case couch_util:get_value(VarName, Formats) of
-        undefined ->
-             Value;
-        Format ->
-            format(Format, Value)
-    end.
-
-format(<<"int">>, Value) when is_integer(Value) ->
-    Value;
-format(<<"int">>, Value) when is_binary(Value) ->
-    format(<<"int">>, ?b2l(Value));
-format(<<"int">>, Value) when is_list(Value) ->
-    case (catch list_to_integer(Value)) of
-        IntVal when is_integer(IntVal) ->
-            IntVal;
-        _ ->
-            Value
-    end;
-format(<<"bool">>, Value) when is_binary(Value) ->
-    format(<<"bool">>, ?b2l(Value));
-format(<<"bool">>, Value) when is_list(Value) ->
-    case string:to_lower(Value) of
-        "true" -> true;
-        "false" -> false;
-        _ -> Value
-    end;
-format(_Format, Value) ->
-   Value.
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-make_new_path([], _Bindings, _Remaining, Acc) ->
-    lists:reverse(Acc);
-make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) ->
-    P2 = case couch_util:get_value({bind, P}, Bindings) of
-        undefined -> << "undefined">>;
-        P1 ->
-            iolist_to_binary(P1)
-    end,
-    make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
-make_new_path([P|Rest], Bindings, Remaining, Acc) ->
-    make_new_path(Rest, Bindings, Remaining, [P|Acc]).
-
-
-%% @doc If method of the query fith the rule method. If the
-%% method rule is '*', which is the default, all
-%% request method will bind. It allows us to make rules
-%% depending on HTTP method.
-bind_method(?MATCH_ALL, _Method) ->
-    true;
-bind_method({bind, Method}, Method) ->
-    true;
-bind_method(_, _) ->
-    false.
-
-
-%% @doc bind path. Using the rule from we try to bind variables given
-%% to the current url by pattern matching
-bind_path([], [], Bindings) ->
-    {ok, [], Bindings};
-bind_path([?MATCH_ALL], Rest, Bindings) when is_list(Rest) ->
-    {ok, Rest, Bindings};
-bind_path(_, [], _) ->
-    fail;
-bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) ->
-    bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]);
-bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
-    bind_path(RestToken, RestMatch, Bindings);
-bind_path(_, _, _) ->
-    fail.
-
-
-%% normalize path.
-normalize_path(Path)  ->
-    "/" ++ string:join(normalize_path1(string:tokens(Path,
-                "/"), []), [?SEPARATOR]).
-
-
-normalize_path1([], Acc) ->
-    lists:reverse(Acc);
-normalize_path1([".."|Rest], Acc) ->
-    Acc1 = case Acc of
-        [] -> [".."|Acc];
-        [T|_] when T =:= ".." -> [".."|Acc];
-        [_|R] -> R
-    end,
-    normalize_path1(Rest, Acc1);
-normalize_path1(["."|Rest], Acc) ->
-    normalize_path1(Rest, Acc);
-normalize_path1([Path|Rest], Acc) ->
-    normalize_path1(Rest, [Path|Acc]).
-
-
-%% @doc transform json rule in erlang for pattern matching
-make_rule(Rule) ->
-    Method = case couch_util:get_value(<<"method">>, Rule) of
-        undefined -> ?MATCH_ALL;
-        M -> to_binding(M)
-    end,
-    QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
-        undefined -> [];
-        {Args} -> Args
-        end,
-    FromParts  = case couch_util:get_value(<<"from">>, Rule) of
-        undefined -> [?MATCH_ALL];
-        From ->
-            parse_path(From)
-        end,
-    ToParts  = case couch_util:get_value(<<"to">>, Rule) of
-        undefined ->
-            throw({error, invalid_rewrite_target});
-        To ->
-            parse_path(To)
-        end,
-    Formats = case couch_util:get_value(<<"formats">>, Rule) of
-        undefined -> [];
-        {Fmts} -> Fmts
-    end,
-    [{FromParts, Method}, ToParts, QueryArgs, Formats].
-
-parse_path(Path) ->
-    {ok, SlashRE} = re:compile(<<"\\/">>),
-    path_to_list(re:split(Path, SlashRE), [], 0).
-
-%% @doc convert a path rule (from or to) to an erlang list
-%% * and path variable starting by ":" are converted
-%% in erlang atom.
-path_to_list([], Acc, _DotDotCount) ->
-    lists:reverse(Acc);
-path_to_list([<<>>|R], Acc, DotDotCount) ->
-    path_to_list(R, Acc, DotDotCount);
-path_to_list([<<"*">>|R], Acc, DotDotCount) ->
-    path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
-path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
-    case config:get("httpd", "secure_rewrites", "true") of
-    "false" ->
-        path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-    _Else ->
-        twig:log(notice, "insecure_rewrite_rule ~p blocked", [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
-        throw({insecure_rewrite_rule, "too many ../.. segments"})
-    end;
-path_to_list([<<"..">>|R], Acc, DotDotCount) ->
-    path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-path_to_list([P|R], Acc, DotDotCount) ->
-    P1 = case P of
-        <<":", Var/binary>> ->
-            to_binding(Var);
-        _ -> P
-    end,
-    path_to_list(R, [P1|Acc], DotDotCount).
-
-maybe_encode_bindings([]) ->
-    [];
-maybe_encode_bindings(Props) ->
-    lists:foldl(fun
-            ({{bind, <<"*">>}, _V}, Acc) ->
-                Acc;
-            ({{bind, K}, V}, Acc) ->
-                V1 = iolist_to_binary(maybe_json(K, V)),
-                [{K, V1}|Acc]
-        end, [], Props).
-
-decode_query_value({K,V}) ->
-    case lists:member(K, ["key", "startkey", "start_key",
-                "endkey", "end_key", "keys"]) of
-        true ->
-            {to_binding(K), ?JSON_DECODE(V)};
-        false ->
-            {to_binding(K), ?l2b(V)}
-    end.
-
-to_binding({bind, V}) ->
-    {bind, V};
-to_binding(V) when is_list(V) ->
-    to_binding(?l2b(V));
-to_binding(V) ->
-    {bind, V}.


[36/49] Remove src/mochiweb

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_html.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_html.erl b/src/mochiweb/src/mochiweb_html.erl
deleted file mode 100644
index 0f281db..0000000
--- a/src/mochiweb/src/mochiweb_html.erl
+++ /dev/null
@@ -1,1264 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Loosely tokenizes and generates parse trees for HTML 4.
--module(mochiweb_html).
--export([tokens/1, parse/1, parse_tokens/1, to_tokens/1, escape/1,
-         escape_attr/1, to_html/1]).
-
-%% This is a macro to placate syntax highlighters..
--define(QUOTE, $\").
--define(SQUOTE, $\').
--define(ADV_COL(S, N),
-        S#decoder{column=N+S#decoder.column,
-                  offset=N+S#decoder.offset}).
--define(INC_COL(S),
-        S#decoder{column=1+S#decoder.column,
-                  offset=1+S#decoder.offset}).
--define(INC_LINE(S),
-        S#decoder{column=1,
-                  line=1+S#decoder.line,
-                  offset=1+S#decoder.offset}).
--define(INC_CHAR(S, C),
-        case C of
-            $\n ->
-                S#decoder{column=1,
-                          line=1+S#decoder.line,
-                          offset=1+S#decoder.offset};
-            _ ->
-                S#decoder{column=1+S#decoder.column,
-                          offset=1+S#decoder.offset}
-        end).
-
--define(IS_WHITESPACE(C),
-        (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
--define(IS_LITERAL_SAFE(C),
-        ((C >= $A andalso C =< $Z) orelse (C >= $a andalso C =< $z)
-         orelse (C >= $0 andalso C =< $9))).
--define(PROBABLE_CLOSE(C),
-        (C =:= $> orelse ?IS_WHITESPACE(C))).
-
--record(decoder, {line=1,
-                  column=1,
-                  offset=0}).
-
-%% @type html_node() = {string(), [html_attr()], [html_node() | string()]}
-%% @type html_attr() = {string(), string()}
-%% @type html_token() = html_data() | start_tag() | end_tag() | inline_html() | html_comment() | html_doctype()
-%% @type html_data() = {data, string(), Whitespace::boolean()}
-%% @type start_tag() = {start_tag, Name, [html_attr()], Singleton::boolean()}
-%% @type end_tag() = {end_tag, Name}
-%% @type html_comment() = {comment, Comment}
-%% @type html_doctype() = {doctype, [Doctype]}
-%% @type inline_html() = {'=', iolist()}
-
-%% External API.
-
-%% @spec parse(string() | binary()) -> html_node()
-%% @doc tokenize and then transform the token stream into a HTML tree.
-parse(Input) ->
-    parse_tokens(tokens(Input)).
-
-%% @spec parse_tokens([html_token()]) -> html_node()
-%% @doc Transform the output of tokens(Doc) into a HTML tree.
-parse_tokens(Tokens) when is_list(Tokens) ->
-    %% Skip over doctype, processing instructions
-    F = fun (X) ->
-                case X of
-                    {start_tag, _, _, false} ->
-                        false;
-                    _ ->
-                        true
-                end
-        end,
-    [{start_tag, Tag, Attrs, false} | Rest] = lists:dropwhile(F, Tokens),
-    {Tree, _} = tree(Rest, [norm({Tag, Attrs})]),
-    Tree.
-
-%% @spec tokens(StringOrBinary) -> [html_token()]
-%% @doc Transform the input UTF-8 HTML into a token stream.
-tokens(Input) ->
-    tokens(iolist_to_binary(Input), #decoder{}, []).
-
-%% @spec to_tokens(html_node()) -> [html_token()]
-%% @doc Convert a html_node() tree to a list of tokens.
-to_tokens({Tag0}) ->
-    to_tokens({Tag0, [], []});
-to_tokens(T={'=', _}) ->
-    [T];
-to_tokens(T={doctype, _}) ->
-    [T];
-to_tokens(T={comment, _}) ->
-    [T];
-to_tokens({Tag0, Acc}) ->
-    %% This is only allowed in sub-tags: {p, [{"class", "foo"}]}
-    to_tokens({Tag0, [], Acc});
-to_tokens({Tag0, Attrs, Acc}) ->
-    Tag = to_tag(Tag0),
-    to_tokens([{Tag, Acc}], [{start_tag, Tag, Attrs, is_singleton(Tag)}]).
-
-%% @spec to_html([html_token()] | html_node()) -> iolist()
-%% @doc Convert a list of html_token() to a HTML document.
-to_html(Node) when is_tuple(Node) ->
-    to_html(to_tokens(Node));
-to_html(Tokens) when is_list(Tokens) ->
-    to_html(Tokens, []).
-
-%% @spec escape(string() | atom() | binary()) -> binary()
-%% @doc Escape a string such that it's safe for HTML (amp; lt; gt;).
-escape(B) when is_binary(B) ->
-    escape(binary_to_list(B), []);
-escape(A) when is_atom(A) ->
-    escape(atom_to_list(A), []);
-escape(S) when is_list(S) ->
-    escape(S, []).
-
-%% @spec escape_attr(string() | binary() | atom() | integer() | float()) -> binary()
-%% @doc Escape a string such that it's safe for HTML attrs
-%%      (amp; lt; gt; quot;).
-escape_attr(B) when is_binary(B) ->
-    escape_attr(binary_to_list(B), []);
-escape_attr(A) when is_atom(A) ->
-    escape_attr(atom_to_list(A), []);
-escape_attr(S) when is_list(S) ->
-    escape_attr(S, []);
-escape_attr(I) when is_integer(I) ->
-    escape_attr(integer_to_list(I), []);
-escape_attr(F) when is_float(F) ->
-    escape_attr(mochinum:digits(F), []).
-
-to_html([], Acc) ->
-    lists:reverse(Acc);
-to_html([{'=', Content} | Rest], Acc) ->
-    to_html(Rest, [Content | Acc]);
-to_html([{pi, Bin} | Rest], Acc) ->
-    Open = [<<"<?">>,
-            Bin,
-            <<"?>">>],
-    to_html(Rest, [Open | Acc]);
-to_html([{pi, Tag, Attrs} | Rest], Acc) ->
-    Open = [<<"<?">>,
-            Tag,
-            attrs_to_html(Attrs, []),
-            <<"?>">>],
-    to_html(Rest, [Open | Acc]);
-to_html([{comment, Comment} | Rest], Acc) ->
-    to_html(Rest, [[<<"<!--">>, Comment, <<"-->">>] | Acc]);
-to_html([{doctype, Parts} | Rest], Acc) ->
-    Inside = doctype_to_html(Parts, Acc),
-    to_html(Rest, [[<<"<!DOCTYPE">>, Inside, <<">">>] | Acc]);
-to_html([{data, Data, _Whitespace} | Rest], Acc) ->
-    to_html(Rest, [escape(Data) | Acc]);
-to_html([{start_tag, Tag, Attrs, Singleton} | Rest], Acc) ->
-    Open = [<<"<">>,
-            Tag,
-            attrs_to_html(Attrs, []),
-            case Singleton of
-                true -> <<" />">>;
-                false -> <<">">>
-            end],
-    to_html(Rest, [Open | Acc]);
-to_html([{end_tag, Tag} | Rest], Acc) ->
-    to_html(Rest, [[<<"</">>, Tag, <<">">>] | Acc]).
-
-doctype_to_html([], Acc) ->
-    lists:reverse(Acc);
-doctype_to_html([Word | Rest], Acc) ->
-    case lists:all(fun (C) -> ?IS_LITERAL_SAFE(C) end,
-                   binary_to_list(iolist_to_binary(Word))) of
-        true ->
-            doctype_to_html(Rest, [[<<" ">>, Word] | Acc]);
-        false ->
-            doctype_to_html(Rest, [[<<" \"">>, escape_attr(Word), ?QUOTE] | Acc])
-    end.
-
-attrs_to_html([], Acc) ->
-    lists:reverse(Acc);
-attrs_to_html([{K, V} | Rest], Acc) ->
-    attrs_to_html(Rest,
-                  [[<<" ">>, escape(K), <<"=\"">>,
-                    escape_attr(V), <<"\"">>] | Acc]).
-
-escape([], Acc) ->
-    list_to_binary(lists:reverse(Acc));
-escape("<" ++ Rest, Acc) ->
-    escape(Rest, lists:reverse("&lt;", Acc));
-escape(">" ++ Rest, Acc) ->
-    escape(Rest, lists:reverse("&gt;", Acc));
-escape("&" ++ Rest, Acc) ->
-    escape(Rest, lists:reverse("&amp;", Acc));
-escape([C | Rest], Acc) ->
-    escape(Rest, [C | Acc]).
-
-escape_attr([], Acc) ->
-    list_to_binary(lists:reverse(Acc));
-escape_attr("<" ++ Rest, Acc) ->
-    escape_attr(Rest, lists:reverse("&lt;", Acc));
-escape_attr(">" ++ Rest, Acc) ->
-    escape_attr(Rest, lists:reverse("&gt;", Acc));
-escape_attr("&" ++ Rest, Acc) ->
-    escape_attr(Rest, lists:reverse("&amp;", Acc));
-escape_attr([?QUOTE | Rest], Acc) ->
-    escape_attr(Rest, lists:reverse("&quot;", Acc));
-escape_attr([C | Rest], Acc) ->
-    escape_attr(Rest, [C | Acc]).
-
-to_tag(A) when is_atom(A) ->
-    norm(atom_to_list(A));
-to_tag(L) ->
-    norm(L).
-
-to_tokens([], Acc) ->
-    lists:reverse(Acc);
-to_tokens([{Tag, []} | Rest], Acc) ->
-    to_tokens(Rest, [{end_tag, to_tag(Tag)} | Acc]);
-to_tokens([{Tag0, [{T0} | R1]} | Rest], Acc) ->
-    %% Allow {br}
-    to_tokens([{Tag0, [{T0, [], []} | R1]} | Rest], Acc);
-to_tokens([{Tag0, [T0={'=', _C0} | R1]} | Rest], Acc) ->
-    %% Allow {'=', iolist()}
-    to_tokens([{Tag0, R1} | Rest], [T0 | Acc]);
-to_tokens([{Tag0, [T0={comment, _C0} | R1]} | Rest], Acc) ->
-    %% Allow {comment, iolist()}
-    to_tokens([{Tag0, R1} | Rest], [T0 | Acc]);
-to_tokens([{Tag0, [T0={pi, _S0} | R1]} | Rest], Acc) ->
-    %% Allow {pi, binary()}
-    to_tokens([{Tag0, R1} | Rest], [T0 | Acc]);
-to_tokens([{Tag0, [T0={pi, _S0, _A0} | R1]} | Rest], Acc) ->
-    %% Allow {pi, binary(), list()}
-    to_tokens([{Tag0, R1} | Rest], [T0 | Acc]);
-to_tokens([{Tag0, [{T0, A0=[{_, _} | _]} | R1]} | Rest], Acc) ->
-    %% Allow {p, [{"class", "foo"}]}
-    to_tokens([{Tag0, [{T0, A0, []} | R1]} | Rest], Acc);
-to_tokens([{Tag0, [{T0, C0} | R1]} | Rest], Acc) ->
-    %% Allow {p, "content"} and {p, <<"content">>}
-    to_tokens([{Tag0, [{T0, [], C0} | R1]} | Rest], Acc);
-to_tokens([{Tag0, [{T0, A1, C0} | R1]} | Rest], Acc) when is_binary(C0) ->
-    %% Allow {"p", [{"class", "foo"}], <<"content">>}
-    to_tokens([{Tag0, [{T0, A1, binary_to_list(C0)} | R1]} | Rest], Acc);
-to_tokens([{Tag0, [{T0, A1, C0=[C | _]} | R1]} | Rest], Acc)
-  when is_integer(C) ->
-    %% Allow {"p", [{"class", "foo"}], "content"}
-    to_tokens([{Tag0, [{T0, A1, [C0]} | R1]} | Rest], Acc);
-to_tokens([{Tag0, [{T0, A1, C1} | R1]} | Rest], Acc) ->
-    %% Native {"p", [{"class", "foo"}], ["content"]}
-    Tag = to_tag(Tag0),
-    T1 = to_tag(T0),
-    case is_singleton(norm(T1)) of
-        true ->
-            to_tokens([{Tag, R1} | Rest], [{start_tag, T1, A1, true} | Acc]);
-        false ->
-            to_tokens([{T1, C1}, {Tag, R1} | Rest],
-                      [{start_tag, T1, A1, false} | Acc])
-    end;
-to_tokens([{Tag0, [L | R1]} | Rest], Acc) when is_list(L) ->
-    %% List text
-    Tag = to_tag(Tag0),
-    to_tokens([{Tag, R1} | Rest], [{data, iolist_to_binary(L), false} | Acc]);
-to_tokens([{Tag0, [B | R1]} | Rest], Acc) when is_binary(B) ->
-    %% Binary text
-    Tag = to_tag(Tag0),
-    to_tokens([{Tag, R1} | Rest], [{data, B, false} | Acc]).
-
-tokens(B, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary>> ->
-            lists:reverse(Acc);
-        _ ->
-            {Tag, S1} = tokenize(B, S),
-            case parse_flag(Tag) of
-                script ->
-                    {Tag2, S2} = tokenize_script(B, S1),
-                    tokens(B, S2, [Tag2, Tag | Acc]);
-                textarea ->
-                    {Tag2, S2} = tokenize_textarea(B, S1),
-                    tokens(B, S2, [Tag2, Tag | Acc]);
-                none ->
-                    tokens(B, S1, [Tag | Acc])
-            end
-    end.
-
-parse_flag({start_tag, B, _, false}) ->
-    case string:to_lower(binary_to_list(B)) of
-        "script" ->
-            script;
-        "textarea" ->
-            textarea;
-        _ ->
-            none
-    end;
-parse_flag(_) ->
-    none.
-
-tokenize(B, S=#decoder{offset=O}) ->
-    case B of
-        <<_:O/binary, "<!--", _/binary>> ->
-            tokenize_comment(B, ?ADV_COL(S, 4));
-        <<_:O/binary, "<!DOCTYPE", _/binary>> ->
-            tokenize_doctype(B, ?ADV_COL(S, 10));
-        <<_:O/binary, "<![CDATA[", _/binary>> ->
-            tokenize_cdata(B, ?ADV_COL(S, 9));
-        <<_:O/binary, "<?php", _/binary>> ->
-            {Body, S1} = raw_qgt(B, ?ADV_COL(S, 2)),
-            {{pi, Body}, S1};
-        <<_:O/binary, "<?", _/binary>> ->
-            {Tag, S1} = tokenize_literal(B, ?ADV_COL(S, 2)),
-            {Attrs, S2} = tokenize_attributes(B, S1),
-            S3 = find_qgt(B, S2),
-            {{pi, Tag, Attrs}, S3};
-        <<_:O/binary, "&", _/binary>> ->
-            tokenize_charref(B, ?INC_COL(S));
-        <<_:O/binary, "</", _/binary>> ->
-            {Tag, S1} = tokenize_literal(B, ?ADV_COL(S, 2)),
-            {S2, _} = find_gt(B, S1),
-            {{end_tag, Tag}, S2};
-        <<_:O/binary, "<", C, _/binary>> when ?IS_WHITESPACE(C) ->
-            %% This isn't really strict HTML
-            {{data, Data, _Whitespace}, S1} = tokenize_data(B, ?INC_COL(S)),
-            {{data, <<$<, Data/binary>>, false}, S1};
-        <<_:O/binary, "<", _/binary>> ->
-            {Tag, S1} = tokenize_literal(B, ?INC_COL(S)),
-            {Attrs, S2} = tokenize_attributes(B, S1),
-            {S3, HasSlash} = find_gt(B, S2),
-            Singleton = HasSlash orelse is_singleton(Tag),
-            {{start_tag, Tag, Attrs, Singleton}, S3};
-        _ ->
-            tokenize_data(B, S)
-    end.
-
-tree_data([{data, Data, Whitespace} | Rest], AllWhitespace, Acc) ->
-    tree_data(Rest, (Whitespace andalso AllWhitespace), [Data | Acc]);
-tree_data(Rest, AllWhitespace, Acc) ->
-    {iolist_to_binary(lists:reverse(Acc)), AllWhitespace, Rest}.
-
-tree([], Stack) ->
-    {destack(Stack), []};
-tree([{end_tag, Tag} | Rest], Stack) ->
-    case destack(norm(Tag), Stack) of
-        S when is_list(S) ->
-            tree(Rest, S);
-        Result ->
-            {Result, []}
-    end;
-tree([{start_tag, Tag, Attrs, true} | Rest], S) ->
-    tree(Rest, append_stack_child(norm({Tag, Attrs}), S));
-tree([{start_tag, Tag, Attrs, false} | Rest], S) ->
-    tree(Rest, stack(norm({Tag, Attrs}), S));
-tree([T={pi, _Raw} | Rest], S) ->
-    tree(Rest, append_stack_child(T, S));
-tree([T={pi, _Tag, _Attrs} | Rest], S) ->
-    tree(Rest, append_stack_child(T, S));
-tree([T={comment, _Comment} | Rest], S) ->
-    tree(Rest, append_stack_child(T, S));
-tree(L=[{data, _Data, _Whitespace} | _], S) ->
-    case tree_data(L, true, []) of
-        {_, true, Rest} ->
-            tree(Rest, S);
-        {Data, false, Rest} ->
-            tree(Rest, append_stack_child(Data, S))
-    end;
-tree([{doctype, _} | Rest], Stack) ->
-    tree(Rest, Stack).
-
-norm({Tag, Attrs}) ->
-    {norm(Tag), [{norm(K), iolist_to_binary(V)} || {K, V} <- Attrs], []};
-norm(Tag) when is_binary(Tag) ->
-    Tag;
-norm(Tag) ->
-    list_to_binary(string:to_lower(Tag)).
-
-stack(T1={TN, _, _}, Stack=[{TN, _, _} | _Rest])
-  when TN =:= <<"li">> orelse TN =:= <<"option">> ->
-    [T1 | destack(TN, Stack)];
-stack(T1={TN0, _, _}, Stack=[{TN1, _, _} | _Rest])
-  when (TN0 =:= <<"dd">> orelse TN0 =:= <<"dt">>) andalso
-       (TN1 =:= <<"dd">> orelse TN1 =:= <<"dt">>) ->
-    [T1 | destack(TN1, Stack)];
-stack(T1, Stack) ->
-    [T1 | Stack].
-
-append_stack_child(StartTag, [{Name, Attrs, Acc} | Stack]) ->
-    [{Name, Attrs, [StartTag | Acc]} | Stack].
-
-destack(<<"br">>, Stack) ->
-    %% This is an ugly hack to make dumb_br_test() pass,
-    %% this makes it such that br can never have children.
-    Stack;
-destack(TagName, Stack) when is_list(Stack) ->
-    F = fun (X) ->
-                case X of
-                    {TagName, _, _} ->
-                        false;
-                    _ ->
-                        true
-                end
-        end,
-    case lists:splitwith(F, Stack) of
-        {_, []} ->
-            %% If we're parsing something like XML we might find
-            %% a <link>tag</link> that is normally a singleton
-            %% in HTML but isn't here
-            case {is_singleton(TagName), Stack} of
-                {true, [{T0, A0, Acc0} | Post0]} ->
-                    case lists:splitwith(F, Acc0) of
-                        {_, []} ->
-                            %% Actually was a singleton
-                            Stack;
-                        {Pre, [{T1, A1, Acc1} | Post1]} ->
-                            [{T0, A0, [{T1, A1, Acc1 ++ lists:reverse(Pre)} | Post1]}
-                             | Post0]
-                    end;
-                _ ->
-                    %% No match, no state change
-                    Stack
-            end;
-        {_Pre, [_T]} ->
-            %% Unfurl the whole stack, we're done
-            destack(Stack);
-        {Pre, [T, {T0, A0, Acc0} | Post]} ->
-            %% Unfurl up to the tag, then accumulate it
-            [{T0, A0, [destack(Pre ++ [T]) | Acc0]} | Post]
-    end.
-
-destack([{Tag, Attrs, Acc}]) ->
-    {Tag, Attrs, lists:reverse(Acc)};
-destack([{T1, A1, Acc1}, {T0, A0, Acc0} | Rest]) ->
-    destack([{T0, A0, [{T1, A1, lists:reverse(Acc1)} | Acc0]} | Rest]).
-
-is_singleton(<<"br">>) -> true;
-is_singleton(<<"hr">>) -> true;
-is_singleton(<<"img">>) -> true;
-is_singleton(<<"input">>) -> true;
-is_singleton(<<"base">>) -> true;
-is_singleton(<<"meta">>) -> true;
-is_singleton(<<"link">>) -> true;
-is_singleton(<<"area">>) -> true;
-is_singleton(<<"param">>) -> true;
-is_singleton(<<"col">>) -> true;
-is_singleton(_) -> false.
-
-tokenize_data(B, S=#decoder{offset=O}) ->
-    tokenize_data(B, S, O, true).
-
-tokenize_data(B, S=#decoder{offset=O}, Start, Whitespace) ->
-    case B of
-        <<_:O/binary, C, _/binary>> when (C =/= $< andalso C =/= $&) ->
-            tokenize_data(B, ?INC_CHAR(S, C), Start,
-                          (Whitespace andalso ?IS_WHITESPACE(C)));
-        _ ->
-            Len = O - Start,
-            <<_:Start/binary, Data:Len/binary, _/binary>> = B,
-            {{data, Data, Whitespace}, S}
-    end.
-
-tokenize_attributes(B, S) ->
-    tokenize_attributes(B, S, []).
-
-tokenize_attributes(B, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary>> ->
-            {lists:reverse(Acc), S};
-        <<_:O/binary, C, _/binary>> when (C =:= $> orelse C =:= $/) ->
-            {lists:reverse(Acc), S};
-        <<_:O/binary, "?>", _/binary>> ->
-            {lists:reverse(Acc), S};
-        <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
-            tokenize_attributes(B, ?INC_CHAR(S, C), Acc);
-        _ ->
-            {Attr, S1} = tokenize_literal(B, S),
-            {Value, S2} = tokenize_attr_value(Attr, B, S1),
-            tokenize_attributes(B, S2, [{Attr, Value} | Acc])
-    end.
-
-tokenize_attr_value(Attr, B, S) ->
-    S1 = skip_whitespace(B, S),
-    O = S1#decoder.offset,
-    case B of
-        <<_:O/binary, "=", _/binary>> ->
-            S2 = skip_whitespace(B, ?INC_COL(S1)),
-            tokenize_quoted_or_unquoted_attr_value(B, S2);
-        _ ->
-            {Attr, S1}
-    end.
-    
-tokenize_quoted_or_unquoted_attr_value(B, S=#decoder{offset=O}) ->
-    case B of
-        <<_:O/binary>> ->
-            { [], S };
-        <<_:O/binary, Q, _/binary>> when Q =:= ?QUOTE orelse
-                                         Q =:= ?SQUOTE ->
-            tokenize_quoted_attr_value(B, ?INC_COL(S), [], Q);
-        <<_:O/binary, _/binary>> ->
-            tokenize_unquoted_attr_value(B, S, [])
-    end.
-    
-tokenize_quoted_attr_value(B, S=#decoder{offset=O}, Acc, Q) ->
-    case B of
-        <<_:O/binary>> ->
-            { iolist_to_binary(lists:reverse(Acc)), S };
-        <<_:O/binary, $&, _/binary>> ->
-            {{data, Data, false}, S1} = tokenize_charref(B, ?INC_COL(S)),
-            tokenize_quoted_attr_value(B, S1, [Data|Acc], Q);
-        <<_:O/binary, Q, _/binary>> ->
-            { iolist_to_binary(lists:reverse(Acc)), ?INC_COL(S) };
-        <<_:O/binary, $\n, _/binary>> ->
-            { iolist_to_binary(lists:reverse(Acc)), ?INC_LINE(S) };
-        <<_:O/binary, C, _/binary>> ->
-            tokenize_quoted_attr_value(B, ?INC_COL(S), [C|Acc], Q)
-    end.
-    
-tokenize_unquoted_attr_value(B, S=#decoder{offset=O}, Acc) ->
-    case B of
-        <<_:O/binary>> ->
-            { iolist_to_binary(lists:reverse(Acc)), S };
-        <<_:O/binary, $&, _/binary>> ->
-            {{data, Data, false}, S1} = tokenize_charref(B, ?INC_COL(S)),
-            tokenize_unquoted_attr_value(B, S1, [Data|Acc]);
-        <<_:O/binary, $/, $>, _/binary>> ->
-            { iolist_to_binary(lists:reverse(Acc)), S };
-        <<_:O/binary, C, _/binary>> when ?PROBABLE_CLOSE(C) ->
-            { iolist_to_binary(lists:reverse(Acc)), S };
-        <<_:O/binary, C, _/binary>> ->
-            tokenize_unquoted_attr_value(B, ?INC_COL(S), [C|Acc])
-    end.   
-
-skip_whitespace(B, S=#decoder{offset=O}) ->
-    case B of
-        <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
-            skip_whitespace(B, ?INC_CHAR(S, C));
-        _ ->
-            S
-    end.
-
-tokenize_literal(Bin, S=#decoder{offset=O}) ->
-    case Bin of
-        <<_:O/binary, C, _/binary>> when C =:= $>
-                                    orelse C =:= $/
-                                    orelse C =:= $= ->
-            %% Handle case where tokenize_literal would consume
-            %% 0 chars. http://github.com/mochi/mochiweb/pull/13
-            {[C], ?INC_COL(S)};
-        _ ->
-            tokenize_literal(Bin, S, [])
-    end.
-
-tokenize_literal(Bin, S=#decoder{offset=O}, Acc) ->
-    case Bin of
-        <<_:O/binary, $&, _/binary>> ->
-            {{data, Data, false}, S1} = tokenize_charref(Bin, ?INC_COL(S)),
-            tokenize_literal(Bin, S1, [Data | Acc]);
-        <<_:O/binary, C, _/binary>> when not (?IS_WHITESPACE(C)
-                                              orelse C =:= $>
-                                              orelse C =:= $/
-                                              orelse C =:= $=) ->
-            tokenize_literal(Bin, ?INC_COL(S), [C | Acc]);
-        _ ->
-            {iolist_to_binary(string:to_lower(lists:reverse(Acc))), S}
-    end.
-
-raw_qgt(Bin, S=#decoder{offset=O}) ->
-    raw_qgt(Bin, S, O).
-
-raw_qgt(Bin, S=#decoder{offset=O}, Start) ->
-    case Bin of
-        <<_:O/binary, "?>", _/binary>> ->
-            Len = O - Start,
-            <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
-            {Raw, ?ADV_COL(S, 2)};
-        <<_:O/binary, C, _/binary>> ->
-            raw_qgt(Bin, ?INC_CHAR(S, C), Start);
-        <<_:O/binary>> ->
-            <<_:Start/binary, Raw/binary>> = Bin,
-            {Raw, S}
-    end.
-
-find_qgt(Bin, S=#decoder{offset=O}) ->
-    case Bin of
-        <<_:O/binary, "?>", _/binary>> ->
-            ?ADV_COL(S, 2);
-        <<_:O/binary, ">", _/binary>> ->
-			?ADV_COL(S, 1);
-        <<_:O/binary, "/>", _/binary>> ->
-			?ADV_COL(S, 2);
-        %% tokenize_attributes takes care of this state:
-        %% <<_:O/binary, C, _/binary>> ->
-        %%     find_qgt(Bin, ?INC_CHAR(S, C));
-        <<_:O/binary>> ->
-            S
-    end.
-
-find_gt(Bin, S) ->
-    find_gt(Bin, S, false).
-
-find_gt(Bin, S=#decoder{offset=O}, HasSlash) ->
-    case Bin of
-        <<_:O/binary, $/, _/binary>> ->
-            find_gt(Bin, ?INC_COL(S), true);
-        <<_:O/binary, $>, _/binary>> ->
-            {?INC_COL(S), HasSlash};
-        <<_:O/binary, C, _/binary>> ->
-            find_gt(Bin, ?INC_CHAR(S, C), HasSlash);
-        _ ->
-            {S, HasSlash}
-    end.
-
-tokenize_charref(Bin, S=#decoder{offset=O}) ->
-    tokenize_charref(Bin, S, O).
-
-tokenize_charref(Bin, S=#decoder{offset=O}, Start) ->
-    case Bin of
-        <<_:O/binary>> ->
-            <<_:Start/binary, Raw/binary>> = Bin,
-            {{data, Raw, false}, S};
-        <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C)
-                                         orelse C =:= ?SQUOTE
-                                         orelse C =:= ?QUOTE
-                                         orelse C =:= $/
-                                         orelse C =:= $> ->
-            Len = O - Start,
-            <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
-            {{data, Raw, false}, S};
-        <<_:O/binary, $;, _/binary>> ->
-            Len = O - Start,
-            <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
-            Data = case mochiweb_charref:charref(Raw) of
-                       undefined ->
-                           Start1 = Start - 1,
-                           Len1 = Len + 2,
-                           <<_:Start1/binary, R:Len1/binary, _/binary>> = Bin,
-                           R;
-                       Unichar ->
-                           mochiutf8:codepoint_to_bytes(Unichar)
-                   end,
-            {{data, Data, false}, ?INC_COL(S)};
-        _ ->
-            tokenize_charref(Bin, ?INC_COL(S), Start)
-    end.
-
-tokenize_doctype(Bin, S) ->
-    tokenize_doctype(Bin, S, []).
-
-tokenize_doctype(Bin, S=#decoder{offset=O}, Acc) ->
-    case Bin of
-        <<_:O/binary>> ->
-            {{doctype, lists:reverse(Acc)}, S};
-        <<_:O/binary, $>, _/binary>> ->
-            {{doctype, lists:reverse(Acc)}, ?INC_COL(S)};
-        <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
-            tokenize_doctype(Bin, ?INC_CHAR(S, C), Acc);
-        _ ->
-            {Word, S1} = tokenize_word_or_literal(Bin, S),
-            tokenize_doctype(Bin, S1, [Word | Acc])
-    end.
-
-tokenize_word_or_literal(Bin, S=#decoder{offset=O}) ->
-    case Bin of
-        <<_:O/binary, C, _/binary>> when C =:= ?QUOTE orelse C =:= ?SQUOTE ->
-            tokenize_word(Bin, ?INC_COL(S), C);
-        <<_:O/binary, C, _/binary>> when not ?IS_WHITESPACE(C) ->
-            %% Sanity check for whitespace
-            tokenize_literal(Bin, S)
-    end.
-
-tokenize_word(Bin, S, Quote) ->
-    tokenize_word(Bin, S, Quote, []).
-
-tokenize_word(Bin, S=#decoder{offset=O}, Quote, Acc) ->
-    case Bin of
-        <<_:O/binary>> ->
-            {iolist_to_binary(lists:reverse(Acc)), S};
-        <<_:O/binary, Quote, _/binary>> ->
-            {iolist_to_binary(lists:reverse(Acc)), ?INC_COL(S)};
-        <<_:O/binary, $&, _/binary>> ->
-            {{data, Data, false}, S1} = tokenize_charref(Bin, ?INC_COL(S)),
-            tokenize_word(Bin, S1, Quote, [Data | Acc]);
-        <<_:O/binary, C, _/binary>> ->
-            tokenize_word(Bin, ?INC_CHAR(S, C), Quote, [C | Acc])
-    end.
-
-tokenize_cdata(Bin, S=#decoder{offset=O}) ->
-    tokenize_cdata(Bin, S, O).
-
-tokenize_cdata(Bin, S=#decoder{offset=O}, Start) ->
-    case Bin of
-        <<_:O/binary, "]]>", _/binary>> ->
-            Len = O - Start,
-            <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
-            {{data, Raw, false}, ?ADV_COL(S, 3)};
-        <<_:O/binary, C, _/binary>> ->
-            tokenize_cdata(Bin, ?INC_CHAR(S, C), Start);
-        _ ->
-            <<_:O/binary, Raw/binary>> = Bin,
-            {{data, Raw, false}, S}
-    end.
-
-tokenize_comment(Bin, S=#decoder{offset=O}) ->
-    tokenize_comment(Bin, S, O).
-
-tokenize_comment(Bin, S=#decoder{offset=O}, Start) ->
-    case Bin of
-        <<_:O/binary, "-->", _/binary>> ->
-            Len = O - Start,
-            <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
-            {{comment, Raw}, ?ADV_COL(S, 3)};
-        <<_:O/binary, C, _/binary>> ->
-            tokenize_comment(Bin, ?INC_CHAR(S, C), Start);
-        <<_:Start/binary, Raw/binary>> ->
-            {{comment, Raw}, S}
-    end.
-
-tokenize_script(Bin, S=#decoder{offset=O}) ->
-    tokenize_script(Bin, S, O).
-
-tokenize_script(Bin, S=#decoder{offset=O}, Start) ->
-    case Bin of
-        %% Just a look-ahead, we want the end_tag separately
-        <<_:O/binary, $<, $/, SS, CC, RR, II, PP, TT, ZZ, _/binary>>
-        when (SS =:= $s orelse SS =:= $S) andalso
-             (CC =:= $c orelse CC =:= $C) andalso
-             (RR =:= $r orelse RR =:= $R) andalso
-             (II =:= $i orelse II =:= $I) andalso
-             (PP =:= $p orelse PP =:= $P) andalso
-             (TT=:= $t orelse TT =:= $T) andalso
-             ?PROBABLE_CLOSE(ZZ) ->
-            Len = O - Start,
-            <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
-            {{data, Raw, false}, S};
-        <<_:O/binary, C, _/binary>> ->
-            tokenize_script(Bin, ?INC_CHAR(S, C), Start);
-        <<_:Start/binary, Raw/binary>> ->
-            {{data, Raw, false}, S}
-    end.
-
-tokenize_textarea(Bin, S=#decoder{offset=O}) ->
-    tokenize_textarea(Bin, S, O).
-
-tokenize_textarea(Bin, S=#decoder{offset=O}, Start) ->
-    case Bin of
-        %% Just a look-ahead, we want the end_tag separately
-        <<_:O/binary, $<, $/, TT, EE, XX, TT2, AA, RR, EE2, AA2, ZZ, _/binary>>
-        when (TT =:= $t orelse TT =:= $T) andalso
-             (EE =:= $e orelse EE =:= $E) andalso
-             (XX =:= $x orelse XX =:= $X) andalso
-             (TT2 =:= $t orelse TT2 =:= $T) andalso
-             (AA =:= $a orelse AA =:= $A) andalso
-             (RR =:= $r orelse RR =:= $R) andalso
-             (EE2 =:= $e orelse EE2 =:= $E) andalso
-             (AA2 =:= $a orelse AA2 =:= $A) andalso
-             ?PROBABLE_CLOSE(ZZ) ->
-            Len = O - Start,
-            <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
-            {{data, Raw, false}, S};
-        <<_:O/binary, C, _/binary>> ->
-            tokenize_textarea(Bin, ?INC_CHAR(S, C), Start);
-        <<_:Start/binary, Raw/binary>> ->
-            {{data, Raw, false}, S}
-    end.
-
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-to_html_test() ->
-    ?assertEqual(
-       <<"<html><head><title>hey!</title></head><body><p class=\"foo\">what's up<br /></p><div>sucka</div>RAW!<!-- comment! --></body></html>">>,
-       iolist_to_binary(
-         to_html({html, [],
-                  [{<<"head">>, [],
-                    [{title, <<"hey!">>}]},
-                   {body, [],
-                    [{p, [{class, foo}], [<<"what's">>, <<" up">>, {br}]},
-                     {'div', <<"sucka">>},
-                     {'=', <<"RAW!">>},
-                     {comment, <<" comment! ">>}]}]}))),
-    ?assertEqual(
-       <<"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">">>,
-       iolist_to_binary(
-         to_html({doctype,
-                  [<<"html">>, <<"PUBLIC">>,
-                   <<"-//W3C//DTD XHTML 1.0 Transitional//EN">>,
-                   <<"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">>]}))),
-    ?assertEqual(
-       <<"<html><?xml:namespace prefix=\"o\" ns=\"urn:schemas-microsoft-com:office:office\"?></html>">>,
-       iolist_to_binary(
-         to_html({<<"html">>,[],
-                  [{pi, <<"xml:namespace">>,
-                    [{<<"prefix">>,<<"o">>},
-                     {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}]}))),
-    ok.
-
-escape_test() ->
-    ?assertEqual(
-       <<"&amp;quot;\"word &gt;&lt;&lt;up!&amp;quot;">>,
-       escape(<<"&quot;\"word ><<up!&quot;">>)),
-    ?assertEqual(
-       <<"&amp;quot;\"word &gt;&lt;&lt;up!&amp;quot;">>,
-       escape("&quot;\"word ><<up!&quot;")),
-    ?assertEqual(
-       <<"&amp;quot;\"word &gt;&lt;&lt;up!&amp;quot;">>,
-       escape('&quot;\"word ><<up!&quot;')),
-    ok.
-
-escape_attr_test() ->
-    ?assertEqual(
-       <<"&amp;quot;&quot;word &gt;&lt;&lt;up!&amp;quot;">>,
-       escape_attr(<<"&quot;\"word ><<up!&quot;">>)),
-    ?assertEqual(
-       <<"&amp;quot;&quot;word &gt;&lt;&lt;up!&amp;quot;">>,
-       escape_attr("&quot;\"word ><<up!&quot;")),
-    ?assertEqual(
-       <<"&amp;quot;&quot;word &gt;&lt;&lt;up!&amp;quot;">>,
-       escape_attr('&quot;\"word ><<up!&quot;')),
-    ?assertEqual(
-       <<"12345">>,
-       escape_attr(12345)),
-    ?assertEqual(
-       <<"1.5">>,
-       escape_attr(1.5)),
-    ok.
-
-tokens_test() ->
-    ?assertEqual(
-       [{start_tag, <<"foo">>, [{<<"bar">>, <<"baz">>},
-                                {<<"wibble">>, <<"wibble">>},
-                                {<<"alice">>, <<"bob">>}], true}],
-       tokens(<<"<foo bar=baz wibble='wibble' alice=\"bob\"/>">>)),
-    ?assertEqual(
-       [{start_tag, <<"foo">>, [{<<"bar">>, <<"baz">>},
-                                {<<"wibble">>, <<"wibble">>},
-                                {<<"alice">>, <<"bob">>}], true}],
-       tokens(<<"<foo bar=baz wibble='wibble' alice=bob/>">>)),
-    ?assertEqual(
-       [{comment, <<"[if lt IE 7]>\n<style type=\"text/css\">\n.no_ie { display: none; }\n</style>\n<![endif]">>}],
-       tokens(<<"<!--[if lt IE 7]>\n<style type=\"text/css\">\n.no_ie { display: none; }\n</style>\n<![endif]-->">>)),
-    ?assertEqual(
-       [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
-        {data, <<" A= B <= C ">>, false},
-        {end_tag, <<"script">>}],
-       tokens(<<"<script type=\"text/javascript\"> A= B <= C </script>">>)),
-    ?assertEqual(
-       [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
-        {data, <<" A= B <= C ">>, false},
-        {end_tag, <<"script">>}],
-       tokens(<<"<script type =\"text/javascript\"> A= B <= C </script>">>)),
-    ?assertEqual(
-       [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
-        {data, <<" A= B <= C ">>, false},
-        {end_tag, <<"script">>}],
-       tokens(<<"<script type = \"text/javascript\"> A= B <= C </script>">>)),
-    ?assertEqual(
-       [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
-        {data, <<" A= B <= C ">>, false},
-        {end_tag, <<"script">>}],
-       tokens(<<"<script type= \"text/javascript\"> A= B <= C </script>">>)),
-    ?assertEqual(
-       [{start_tag, <<"textarea">>, [], false},
-        {data, <<"<html></body>">>, false},
-        {end_tag, <<"textarea">>}],
-       tokens(<<"<textarea><html></body></textarea>">>)),
-    ?assertEqual(
-       [{start_tag, <<"textarea">>, [], false},
-        {data, <<"<html></body></textareaz>">>, false}],
-       tokens(<<"<textarea ><html></body></textareaz>">>)),
-    ?assertEqual(
-       [{pi, <<"xml:namespace">>,
-         [{<<"prefix">>,<<"o">>},
-          {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}],
-       tokens(<<"<?xml:namespace prefix=\"o\" ns=\"urn:schemas-microsoft-com:office:office\"?>">>)),
-    ?assertEqual(
-       [{pi, <<"xml:namespace">>,
-         [{<<"prefix">>,<<"o">>},
-          {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}],
-       tokens(<<"<?xml:namespace prefix=o ns=urn:schemas-microsoft-com:office:office \n?>">>)),
-    ?assertEqual(
-       [{pi, <<"xml:namespace">>,
-         [{<<"prefix">>,<<"o">>},
-          {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}],
-       tokens(<<"<?xml:namespace prefix=o ns=urn:schemas-microsoft-com:office:office">>)),
-    ?assertEqual(
-       [{data, <<"<">>, false}],
-       tokens(<<"&lt;">>)),
-    ?assertEqual(
-       [{data, <<"not html ">>, false},
-        {data, <<"< at all">>, false}],
-       tokens(<<"not html < at all">>)),
-    ok.
-
-parse_test() ->
-    D0 = <<"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" \"http://www.w3.org/TR/html4/strict.dtd\">
-<html>
- <head>
-   <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\">
-   <title>Foo</title>
-   <link rel=\"stylesheet\" type=\"text/css\" href=\"/static/rel/dojo/resources/dojo.css\" media=\"screen\">
-   <link rel=\"stylesheet\" type=\"text/css\" href=\"/static/foo.css\" media=\"screen\">
-   <!--[if lt IE 7]>
-   <style type=\"text/css\">
-     .no_ie { display: none; }
-   </style>
-   <![endif]-->
-   <link rel=\"icon\" href=\"/static/images/favicon.ico\" type=\"image/x-icon\">
-   <link rel=\"shortcut icon\" href=\"/static/images/favicon.ico\" type=\"image/x-icon\">
- </head>
- <body id=\"home\" class=\"tundra\"><![CDATA[&lt;<this<!-- is -->CDATA>&gt;]]></body>
-</html>">>,
-    ?assertEqual(
-       {<<"html">>, [],
-        [{<<"head">>, [],
-          [{<<"meta">>,
-            [{<<"http-equiv">>,<<"Content-Type">>},
-             {<<"content">>,<<"text/html; charset=UTF-8">>}],
-            []},
-           {<<"title">>,[],[<<"Foo">>]},
-           {<<"link">>,
-            [{<<"rel">>,<<"stylesheet">>},
-             {<<"type">>,<<"text/css">>},
-             {<<"href">>,<<"/static/rel/dojo/resources/dojo.css">>},
-             {<<"media">>,<<"screen">>}],
-            []},
-           {<<"link">>,
-            [{<<"rel">>,<<"stylesheet">>},
-             {<<"type">>,<<"text/css">>},
-             {<<"href">>,<<"/static/foo.css">>},
-             {<<"media">>,<<"screen">>}],
-            []},
-           {comment,<<"[if lt IE 7]>\n   <style type=\"text/css\">\n     .no_ie { display: none; }\n   </style>\n   <![endif]">>},
-           {<<"link">>,
-            [{<<"rel">>,<<"icon">>},
-             {<<"href">>,<<"/static/images/favicon.ico">>},
-             {<<"type">>,<<"image/x-icon">>}],
-            []},
-           {<<"link">>,
-            [{<<"rel">>,<<"shortcut icon">>},
-             {<<"href">>,<<"/static/images/favicon.ico">>},
-             {<<"type">>,<<"image/x-icon">>}],
-            []}]},
-         {<<"body">>,
-          [{<<"id">>,<<"home">>},
-           {<<"class">>,<<"tundra">>}],
-          [<<"&lt;<this<!-- is -->CDATA>&gt;">>]}]},
-       parse(D0)),
-    ?assertEqual(
-       {<<"html">>,[],
-        [{pi, <<"xml:namespace">>,
-          [{<<"prefix">>,<<"o">>},
-           {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}]},
-       parse(
-         <<"<html><?xml:namespace prefix=\"o\" ns=\"urn:schemas-microsoft-com:office:office\"?></html>">>)),
-    ?assertEqual(
-       {<<"html">>, [],
-        [{<<"dd">>, [], [<<"foo">>]},
-         {<<"dt">>, [], [<<"bar">>]}]},
-       parse(<<"<html><dd>foo<dt>bar</html>">>)),
-    %% Singleton sadness
-    ?assertEqual(
-       {<<"html">>, [],
-        [{<<"link">>, [], []},
-         <<"foo">>,
-         {<<"br">>, [], []},
-         <<"bar">>]},
-       parse(<<"<html><link>foo<br>bar</html>">>)),
-    ?assertEqual(
-       {<<"html">>, [],
-        [{<<"link">>, [], [<<"foo">>,
-                           {<<"br">>, [], []},
-                           <<"bar">>]}]},
-       parse(<<"<html><link>foo<br>bar</link></html>">>)),
-    %% Case insensitive tags
-    ?assertEqual(
-       {<<"html">>, [],
-        [{<<"head">>, [], [<<"foo">>,
-                           {<<"br">>, [], []},
-                           <<"BAR">>]},
-         {<<"body">>, [{<<"class">>, <<"">>}, {<<"bgcolor">>, <<"#Aa01fF">>}], []}
-        ]},
-       parse(<<"<html><Head>foo<bR>BAR</head><body Class=\"\" bgcolor=\"#Aa01fF\"></BODY></html>">>)),
-    ok.
-
-exhaustive_is_singleton_test() ->
-    T = mochiweb_cover:clause_lookup_table(?MODULE, is_singleton),
-    [?assertEqual(V, is_singleton(K)) || {K, V} <- T].
-
-tokenize_attributes_test() ->
-    ?assertEqual(
-       {<<"foo">>,
-        [{<<"bar">>, <<"b\"az">>},
-         {<<"wibble">>, <<"wibble">>},
-         {<<"taco", 16#c2, 16#a9>>, <<"bell">>},
-         {<<"quux">>, <<"quux">>}],
-        []},
-       parse(<<"<foo bar=\"b&quot;az\" wibble taco&copy;=bell quux">>)),
-    ok.
-
-tokens2_test() ->
-    D0 = <<"<channel><title>from __future__ import *</title><link>http://bob.pythonmac.org</link><description>Bob's Rants</description></channel>">>,
-    ?assertEqual(
-       [{start_tag,<<"channel">>,[],false},
-        {start_tag,<<"title">>,[],false},
-        {data,<<"from __future__ import *">>,false},
-        {end_tag,<<"title">>},
-        {start_tag,<<"link">>,[],true},
-        {data,<<"http://bob.pythonmac.org">>,false},
-        {end_tag,<<"link">>},
-        {start_tag,<<"description">>,[],false},
-        {data,<<"Bob's Rants">>,false},
-        {end_tag,<<"description">>},
-        {end_tag,<<"channel">>}],
-       tokens(D0)),
-    ok.
-
-to_tokens_test() ->
-    ?assertEqual(
-       [{start_tag, <<"p">>, [{class, 1}], false},
-        {end_tag, <<"p">>}],
-       to_tokens({p, [{class, 1}], []})),
-    ?assertEqual(
-       [{start_tag, <<"p">>, [], false},
-        {end_tag, <<"p">>}],
-       to_tokens({p})),
-    ?assertEqual(
-       [{'=', <<"data">>}],
-       to_tokens({'=', <<"data">>})),
-    ?assertEqual(
-       [{comment, <<"comment">>}],
-       to_tokens({comment, <<"comment">>})),
-    %% This is only allowed in sub-tags:
-    %% {p, [{"class", "foo"}]} as {p, [{"class", "foo"}], []}
-    %% On the outside it's always treated as follows:
-    %% {p, [], [{"class", "foo"}]} as {p, [], [{"class", "foo"}]}
-    ?assertEqual(
-       [{start_tag, <<"html">>, [], false},
-        {start_tag, <<"p">>, [{class, 1}], false},
-        {end_tag, <<"p">>},
-        {end_tag, <<"html">>}],
-       to_tokens({html, [{p, [{class, 1}]}]})),
-    ok.
-
-parse2_test() ->
-    D0 = <<"<channel><title>from __future__ import *</title><link>http://bob.pythonmac.org<br>foo</link><description>Bob's Rants</description></channel>">>,
-    ?assertEqual(
-       {<<"channel">>,[],
-        [{<<"title">>,[],[<<"from __future__ import *">>]},
-         {<<"link">>,[],[
-                         <<"http://bob.pythonmac.org">>,
-                         {<<"br">>,[],[]},
-                         <<"foo">>]},
-         {<<"description">>,[],[<<"Bob's Rants">>]}]},
-       parse(D0)),
-    ok.
-
-parse_tokens_test() ->
-    D0 = [{doctype,[<<"HTML">>,<<"PUBLIC">>,<<"-//W3C//DTD HTML 4.01 Transitional//EN">>]},
-          {data,<<"\n">>,true},
-          {start_tag,<<"html">>,[],false}],
-    ?assertEqual(
-       {<<"html">>, [], []},
-       parse_tokens(D0)),
-    D1 = D0 ++ [{end_tag, <<"html">>}],
-    ?assertEqual(
-       {<<"html">>, [], []},
-       parse_tokens(D1)),
-    D2 = D0 ++ [{start_tag, <<"body">>, [], false}],
-    ?assertEqual(
-       {<<"html">>, [], [{<<"body">>, [], []}]},
-       parse_tokens(D2)),
-    D3 = D0 ++ [{start_tag, <<"head">>, [], false},
-                {end_tag, <<"head">>},
-                {start_tag, <<"body">>, [], false}],
-    ?assertEqual(
-       {<<"html">>, [], [{<<"head">>, [], []}, {<<"body">>, [], []}]},
-       parse_tokens(D3)),
-    D4 = D3 ++ [{data,<<"\n">>,true},
-                {start_tag,<<"div">>,[{<<"class">>,<<"a">>}],false},
-                {start_tag,<<"a">>,[{<<"name">>,<<"#anchor">>}],false},
-                {end_tag,<<"a">>},
-                {end_tag,<<"div">>},
-                {start_tag,<<"div">>,[{<<"class">>,<<"b">>}],false},
-                {start_tag,<<"div">>,[{<<"class">>,<<"c">>}],false},
-                {end_tag,<<"div">>},
-                {end_tag,<<"div">>}],
-    ?assertEqual(
-       {<<"html">>, [],
-        [{<<"head">>, [], []},
-         {<<"body">>, [],
-          [{<<"div">>, [{<<"class">>, <<"a">>}], [{<<"a">>, [{<<"name">>, <<"#anchor">>}], []}]},
-           {<<"div">>, [{<<"class">>, <<"b">>}], [{<<"div">>, [{<<"class">>, <<"c">>}], []}]}
-          ]}]},
-       parse_tokens(D4)),
-    D5 = [{start_tag,<<"html">>,[],false},
-          {data,<<"\n">>,true},
-          {data,<<"boo">>,false},
-          {data,<<"hoo">>,false},
-          {data,<<"\n">>,true},
-          {end_tag,<<"html">>}],
-    ?assertEqual(
-       {<<"html">>, [], [<<"\nboohoo\n">>]},
-       parse_tokens(D5)),
-    D6 = [{start_tag,<<"html">>,[],false},
-          {data,<<"\n">>,true},
-          {data,<<"\n">>,true},
-          {end_tag,<<"html">>}],
-    ?assertEqual(
-       {<<"html">>, [], []},
-       parse_tokens(D6)),
-    D7 = [{start_tag,<<"html">>,[],false},
-          {start_tag,<<"ul">>,[],false},
-          {start_tag,<<"li">>,[],false},
-          {data,<<"word">>,false},
-          {start_tag,<<"li">>,[],false},
-          {data,<<"up">>,false},
-          {end_tag,<<"li">>},
-          {start_tag,<<"li">>,[],false},
-          {data,<<"fdsa">>,false},
-          {start_tag,<<"br">>,[],true},
-          {data,<<"asdf">>,false},
-          {end_tag,<<"ul">>},
-          {end_tag,<<"html">>}],
-    ?assertEqual(
-       {<<"html">>, [],
-        [{<<"ul">>, [],
-          [{<<"li">>, [], [<<"word">>]},
-           {<<"li">>, [], [<<"up">>]},
-           {<<"li">>, [], [<<"fdsa">>,{<<"br">>, [], []}, <<"asdf">>]}]}]},
-       parse_tokens(D7)),
-    ok.
-
-destack_test() ->
-    {<<"a">>, [], []} =
-        destack([{<<"a">>, [], []}]),
-    {<<"a">>, [], [{<<"b">>, [], []}]} =
-        destack([{<<"b">>, [], []}, {<<"a">>, [], []}]),
-    {<<"a">>, [], [{<<"b">>, [], [{<<"c">>, [], []}]}]} =
-     destack([{<<"c">>, [], []}, {<<"b">>, [], []}, {<<"a">>, [], []}]),
-    [{<<"a">>, [], [{<<"b">>, [], [{<<"c">>, [], []}]}]}] =
-     destack(<<"b">>,
-             [{<<"c">>, [], []}, {<<"b">>, [], []}, {<<"a">>, [], []}]),
-    [{<<"b">>, [], [{<<"c">>, [], []}]}, {<<"a">>, [], []}] =
-     destack(<<"c">>,
-             [{<<"c">>, [], []}, {<<"b">>, [], []},{<<"a">>, [], []}]),
-    ok.
-
-doctype_test() ->
-    ?assertEqual(
-       {<<"html">>,[],[{<<"head">>,[],[]}]},
-       mochiweb_html:parse("<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">"
-                           "<html><head></head></body></html>")),
-    %% http://code.google.com/p/mochiweb/issues/detail?id=52
-    ?assertEqual(
-       {<<"html">>,[],[{<<"head">>,[],[]}]},
-       mochiweb_html:parse("<html>"
-                           "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">"
-                           "<head></head></body></html>")),
-    %% http://github.com/mochi/mochiweb/pull/13
-    ?assertEqual(
-       {<<"html">>,[],[{<<"head">>,[],[]}]},
-       mochiweb_html:parse("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0 Transitional//EN\"/>"
-                           "<html>"
-                           "<head></head></body></html>")),
-    ok.
-
-dumb_br_test() ->
-    %% http://code.google.com/p/mochiweb/issues/detail?id=71
-    ?assertEqual(
-       {<<"div">>,[],[{<<"br">>, [], []}, {<<"br">>, [], []}, <<"z">>]},
-       mochiweb_html:parse("<div><br/><br/>z</br/></br/></div>")),
-    ?assertEqual(
-       {<<"div">>,[],[{<<"br">>, [], []}, {<<"br">>, [], []}, <<"z">>]},
-       mochiweb_html:parse("<div><br><br>z</br/></br/></div>")),
-    ?assertEqual(
-       {<<"div">>,[],[{<<"br">>, [], []}, {<<"br">>, [], []}, <<"z">>, {<<"br">>, [], []}, {<<"br">>, [], []}]},
-       mochiweb_html:parse("<div><br><br>z<br/><br/></div>")),
-    ?assertEqual(
-       {<<"div">>,[],[{<<"br">>, [], []}, {<<"br">>, [], []}, <<"z">>]},
-       mochiweb_html:parse("<div><br><br>z</br></br></div>")).
-
-
-php_test() ->
-    %% http://code.google.com/p/mochiweb/issues/detail?id=71
-    ?assertEqual(
-       [{pi, <<"php\n">>}],
-       mochiweb_html:tokens(
-         "<?php\n?>")),
-    ?assertEqual(
-       {<<"div">>, [], [{pi, <<"php\n">>}]},
-       mochiweb_html:parse(
-         "<div><?php\n?></div>")),
-    ok.
-
-parse_unquoted_attr_test() ->
-    D0 = <<"<html><img src=/images/icon.png/></html>">>,
-    ?assertEqual(
-        {<<"html">>,[],[
-            { <<"img">>, [ { <<"src">>, <<"/images/icon.png">> } ], [] }
-        ]},
-        mochiweb_html:parse(D0)),
-    
-    D1 = <<"<html><img src=/images/icon.png></img></html>">>,
-        ?assertEqual(
-            {<<"html">>,[],[
-                { <<"img">>, [ { <<"src">>, <<"/images/icon.png">> } ], [] }
-            ]},
-            mochiweb_html:parse(D1)),
-    
-    D2 = <<"<html><img src=/images/icon&gt;.png width=100></img></html>">>,
-        ?assertEqual(
-            {<<"html">>,[],[
-                { <<"img">>, [ { <<"src">>, <<"/images/icon>.png">> }, { <<"width">>, <<"100">> } ], [] }
-            ]},
-            mochiweb_html:parse(D2)),
-    ok.        
-    
-parse_quoted_attr_test() ->    
-    D0 = <<"<html><img src='/images/icon.png'></html>">>,
-    ?assertEqual(
-        {<<"html">>,[],[
-            { <<"img">>, [ { <<"src">>, <<"/images/icon.png">> } ], [] }
-        ]},
-        mochiweb_html:parse(D0)),     
-        
-    D1 = <<"<html><img src=\"/images/icon.png'></html>">>,
-    ?assertEqual(
-        {<<"html">>,[],[
-            { <<"img">>, [ { <<"src">>, <<"/images/icon.png'></html>">> } ], [] }
-        ]},
-        mochiweb_html:parse(D1)),     
-
-    D2 = <<"<html><img src=\"/images/icon&gt;.png\"></html>">>,
-    ?assertEqual(
-        {<<"html">>,[],[
-            { <<"img">>, [ { <<"src">>, <<"/images/icon>.png">> } ], [] }
-        ]},
-        mochiweb_html:parse(D2)),     
-    ok.
-
-parse_missing_attr_name_test() ->
-    D0 = <<"<html =black></html>">>,
-    ?assertEqual(
-        {<<"html">>, [ { <<"=">>, <<"=">> }, { <<"black">>, <<"black">> } ], [] },
-       mochiweb_html:parse(D0)),
-    ok.
-
-parse_broken_pi_test() ->
-	D0 = <<"<html><?xml:namespace prefix = o ns = \"urn:schemas-microsoft-com:office:office\" /></html>">>,
-	?assertEqual(
-		{<<"html">>, [], [
-			{ pi, <<"xml:namespace">>, [ { <<"prefix">>, <<"o">> }, 
-			                             { <<"ns">>, <<"urn:schemas-microsoft-com:office:office">> } ] }
-		] },
-		mochiweb_html:parse(D0)),
-	ok.
-
-parse_funny_singletons_test() ->
-	D0 = <<"<html><input><input>x</input></input></html>">>,
-	?assertEqual(
-		{<<"html">>, [], [
-			{ <<"input">>, [], [] },
-			{ <<"input">>, [], [ <<"x">> ] }
-		] },
-		mochiweb_html:parse(D0)),
-	ok.
-    
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_http.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_http.erl b/src/mochiweb/src/mochiweb_http.erl
deleted file mode 100644
index 23a4752..0000000
--- a/src/mochiweb/src/mochiweb_http.erl
+++ /dev/null
@@ -1,290 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc HTTP server.
-
--module(mochiweb_http).
--author('bob@mochimedia.com').
--export([start/0, start/1, stop/0, stop/1]).
--export([loop/2, default_body/1]).
--export([after_response/2, reentry/1]).
--export([parse_range_request/1, range_skip_length/2]).
-
--define(REQUEST_RECV_TIMEOUT, 300000).   % timeout waiting for request line
--define(HEADERS_RECV_TIMEOUT, 30000). % timeout waiting for headers
-
--define(MAX_HEADERS, 1000).
--define(DEFAULTS, [{name, ?MODULE},
-                   {port, 8888}]).
-
-parse_options(Options) ->
-    {loop, HttpLoop} = proplists:lookup(loop, Options),
-    Loop = fun (S) ->
-                   ?MODULE:loop(S, HttpLoop)
-           end,
-    Options1 = [{loop, Loop} | proplists:delete(loop, Options)],
-    mochilists:set_defaults(?DEFAULTS, Options1).
-
-stop() ->
-    mochiweb_socket_server:stop(?MODULE).
-
-stop(Name) ->
-    mochiweb_socket_server:stop(Name).
-
-start() ->
-    start([{ip, "127.0.0.1"},
-           {loop, {?MODULE, default_body}}]).
-
-%% @spec start(Options) -> ServerRet
-%%     Options = [option()]
-%%     Option = {name, atom()} | {ip, string() | tuple()} | {backlog, integer()}
-%%              | {nodelay, boolean()} | {acceptor_pool_size, integer()}
-%%              | {ssl, boolean()} | {profile_fun, undefined | (Props) -> ok}
-%% @doc Start a mochiweb server.
-%%      profile_fun is used to profile accept timing.
-%%      After each accept, if defined, profile_fun is called with a proplist of a subset of the mochiweb_socket_server state and timing information.
-%%      The proplist is as follows: [{name, Name}, {port, Port}, {active_sockets, ActiveSockets}, {timing, Timing}].
-%% @end
-start(Options) ->
-    mochiweb_socket_server:start(parse_options(Options)).
-
-frm(Body) ->
-    ["<html><head></head><body>"
-     "<form method=\"POST\">"
-     "<input type=\"hidden\" value=\"message\" name=\"hidden\"/>"
-     "<input type=\"submit\" value=\"regular POST\">"
-     "</form>"
-     "<br />"
-     "<form method=\"POST\" enctype=\"multipart/form-data\""
-     " action=\"/multipart\">"
-     "<input type=\"hidden\" value=\"multipart message\" name=\"hidden\"/>"
-     "<input type=\"file\" name=\"file\"/>"
-     "<input type=\"submit\" value=\"multipart POST\" />"
-     "</form>"
-     "<pre>", Body, "</pre>"
-     "</body></html>"].
-
-default_body(Req, M, "/chunked") when M =:= 'GET'; M =:= 'HEAD' ->
-    Res = Req:ok({"text/plain", [], chunked}),
-    Res:write_chunk("First chunk\r\n"),
-    timer:sleep(5000),
-    Res:write_chunk("Last chunk\r\n"),
-    Res:write_chunk("");
-default_body(Req, M, _Path) when M =:= 'GET'; M =:= 'HEAD' ->
-    Body = io_lib:format("~p~n", [[{parse_qs, Req:parse_qs()},
-                                   {parse_cookie, Req:parse_cookie()},
-                                   Req:dump()]]),
-    Req:ok({"text/html",
-            [mochiweb_cookies:cookie("mochiweb_http", "test_cookie")],
-            frm(Body)});
-default_body(Req, 'POST', "/multipart") ->
-    Body = io_lib:format("~p~n", [[{parse_qs, Req:parse_qs()},
-                                   {parse_cookie, Req:parse_cookie()},
-                                   {body, Req:recv_body()},
-                                   Req:dump()]]),
-    Req:ok({"text/html", [], frm(Body)});
-default_body(Req, 'POST', _Path) ->
-    Body = io_lib:format("~p~n", [[{parse_qs, Req:parse_qs()},
-                                   {parse_cookie, Req:parse_cookie()},
-                                   {parse_post, Req:parse_post()},
-                                   Req:dump()]]),
-    Req:ok({"text/html", [], frm(Body)});
-default_body(Req, _Method, _Path) ->
-    Req:respond({501, [], []}).
-
-default_body(Req) ->
-    default_body(Req, Req:get(method), Req:get(path)).
-
-loop(Socket, Body) ->
-    mochiweb_socket:setopts(Socket, [{packet, http}]),
-    request(Socket, Body).
-
-request(Socket, Body) ->
-    mochiweb_socket:setopts(Socket, [{active, once}]),
-    receive
-        {Protocol, _, {http_request, Method, Path, Version}} when Protocol == http orelse Protocol == ssl ->
-            mochiweb_socket:setopts(Socket, [{packet, httph}]),
-            headers(Socket, {Method, Path, Version}, [], Body, 0);
-        {Protocol, _, {http_error, "\r\n"}} when Protocol == http orelse Protocol == ssl ->
-            request(Socket, Body);
-        {Protocol, _, {http_error, "\n"}} when Protocol == http orelse Protocol == ssl ->
-            request(Socket, Body);
-        {tcp_closed, _} ->
-            mochiweb_socket:close(Socket),
-            exit(normal);
-        _Other ->
-            handle_invalid_request(Socket)
-    after ?REQUEST_RECV_TIMEOUT ->
-        mochiweb_socket:close(Socket),
-        exit(normal)
-    end.
-
-reentry(Body) ->
-    fun (Req) ->
-            ?MODULE:after_response(Body, Req)
-    end.
-
-headers(Socket, Request, Headers, _Body, ?MAX_HEADERS) ->
-    %% Too many headers sent, bad request.
-    mochiweb_socket:setopts(Socket, [{packet, raw}]),
-    handle_invalid_request(Socket, Request, Headers);
-headers(Socket, Request, Headers, Body, HeaderCount) ->
-    mochiweb_socket:setopts(Socket, [{active, once}]),
-    receive
-        {Protocol, _, http_eoh} when Protocol == http orelse Protocol == ssl ->
-            Req = new_request(Socket, Request, Headers),
-            call_body(Body, Req),
-            ?MODULE:after_response(Body, Req);
-        {Protocol, _, {http_header, _, Name, _, Value}} when Protocol == http orelse Protocol == ssl ->
-            headers(Socket, Request, [{Name, Value} | Headers], Body,
-                    1 + HeaderCount);
-        {tcp_closed, _} ->
-            mochiweb_socket:close(Socket),
-            exit(normal);
-        _Other ->
-            handle_invalid_request(Socket, Request, Headers)
-    after ?HEADERS_RECV_TIMEOUT ->
-        mochiweb_socket:close(Socket),
-        exit(normal)
-    end.
-
-call_body({M, F}, Req) ->
-    M:F(Req);
-call_body(Body, Req) ->
-    Body(Req).
-
-handle_invalid_request(Socket) ->
-    handle_invalid_request(Socket, {'GET', {abs_path, "/"}, {0,9}}, []).
-
-handle_invalid_request(Socket, Request, RevHeaders) ->
-    Req = new_request(Socket, Request, RevHeaders),
-    Req:respond({400, [], []}),
-    mochiweb_socket:close(Socket),
-    exit(normal).
-
-new_request(Socket, Request, RevHeaders) ->
-    mochiweb_socket:setopts(Socket, [{packet, raw}]),
-    mochiweb:new_request({Socket, Request, lists:reverse(RevHeaders)}).
-
-after_response(Body, Req) ->
-    Socket = Req:get(socket),
-    case Req:should_close() of
-        true ->
-            mochiweb_socket:close(Socket),
-            exit(normal);
-        false ->
-            Req:cleanup(),
-            ?MODULE:loop(Socket, Body)
-    end.
-
-parse_range_request("bytes=0-") ->
-    undefined;
-parse_range_request(RawRange) when is_list(RawRange) ->
-    try
-        "bytes=" ++ RangeString = RawRange,
-        Ranges = string:tokens(RangeString, ","),
-        lists:map(fun ("-" ++ V)  ->
-                          {none, list_to_integer(V)};
-                      (R) ->
-                          case string:tokens(R, "-") of
-                              [S1, S2] ->
-                                  {list_to_integer(S1), list_to_integer(S2)};
-                              [S] ->
-                                  {list_to_integer(S), none}
-                          end
-                  end,
-                  Ranges)
-    catch
-        _:_ ->
-            fail
-    end.
-
-range_skip_length(Spec, Size) ->
-    case Spec of
-        {none, R} when R =< Size, R >= 0 ->
-            {Size - R, R};
-        {none, _OutOfRange} ->
-            {0, Size};
-        {R, none} when R >= 0, R < Size ->
-            {R, Size - R};
-        {_OutOfRange, none} ->
-            invalid_range;
-        {Start, End} when 0 =< Start, Start =< End, End < Size ->
-            {Start, End - Start + 1};
-        {_OutOfRange, _End} ->
-            invalid_range
-    end.
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-range_test() ->
-    %% valid, single ranges
-    ?assertEqual([{20, 30}], parse_range_request("bytes=20-30")),
-    ?assertEqual([{20, none}], parse_range_request("bytes=20-")),
-    ?assertEqual([{none, 20}], parse_range_request("bytes=-20")),
-
-    %% trivial single range
-    ?assertEqual(undefined, parse_range_request("bytes=0-")),
-
-    %% invalid, single ranges
-    ?assertEqual(fail, parse_range_request("")),
-    ?assertEqual(fail, parse_range_request("garbage")),
-    ?assertEqual(fail, parse_range_request("bytes=-20-30")),
-
-    %% valid, multiple range
-    ?assertEqual(
-       [{20, 30}, {50, 100}, {110, 200}],
-       parse_range_request("bytes=20-30,50-100,110-200")),
-    ?assertEqual(
-       [{20, none}, {50, 100}, {none, 200}],
-       parse_range_request("bytes=20-,50-100,-200")),
-
-    %% no ranges
-    ?assertEqual([], parse_range_request("bytes=")),
-    ok.
-
-range_skip_length_test() ->
-    Body = <<"012345678901234567890123456789012345678901234567890123456789">>,
-    BodySize = byte_size(Body), %% 60
-    BodySize = 60,
-
-    %% these values assume BodySize =:= 60
-    ?assertEqual({1,9}, range_skip_length({1,9}, BodySize)), %% 1-9
-    ?assertEqual({10,10}, range_skip_length({10,19}, BodySize)), %% 10-19
-    ?assertEqual({40, 20}, range_skip_length({none, 20}, BodySize)), %% -20
-    ?assertEqual({30, 30}, range_skip_length({30, none}, BodySize)), %% 30-
-
-    %% valid edge cases for range_skip_length
-    ?assertEqual({BodySize, 0}, range_skip_length({none, 0}, BodySize)),
-    ?assertEqual({0, BodySize}, range_skip_length({none, BodySize}, BodySize)),
-    ?assertEqual({0, BodySize}, range_skip_length({0, none}, BodySize)),
-    BodySizeLess1 = BodySize - 1,
-    ?assertEqual({BodySizeLess1, 1},
-                 range_skip_length({BodySize - 1, none}, BodySize)),
-
-    %% out of range, return whole thing
-    ?assertEqual({0, BodySize},
-                 range_skip_length({none, BodySize + 1}, BodySize)),
-    ?assertEqual({0, BodySize},
-                 range_skip_length({none, -1}, BodySize)),
-
-    %% invalid ranges
-    ?assertEqual(invalid_range,
-                 range_skip_length({-1, 30}, BodySize)),
-    ?assertEqual(invalid_range,
-                 range_skip_length({0, BodySize + 1}, BodySize)),
-    ?assertEqual(invalid_range,
-                 range_skip_length({-1, BodySize + 1}, BodySize)),
-    ?assertEqual(invalid_range,
-                 range_skip_length({BodySize, 40}, BodySize)),
-    ?assertEqual(invalid_range,
-                 range_skip_length({-1, none}, BodySize)),
-    ?assertEqual(invalid_range,
-                 range_skip_length({BodySize, none}, BodySize)),
-    ok.
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_io.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_io.erl b/src/mochiweb/src/mochiweb_io.erl
deleted file mode 100644
index 6ce57ec..0000000
--- a/src/mochiweb/src/mochiweb_io.erl
+++ /dev/null
@@ -1,46 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Utilities for dealing with IO devices (open files).
-
--module(mochiweb_io).
--author('bob@mochimedia.com').
-
--export([iodevice_stream/3, iodevice_stream/2]).
--export([iodevice_foldl/4, iodevice_foldl/3]).
--export([iodevice_size/1]).
--define(READ_SIZE, 8192).
-
-iodevice_foldl(F, Acc, IoDevice) ->
-    iodevice_foldl(F, Acc, IoDevice, ?READ_SIZE).
-
-iodevice_foldl(F, Acc, IoDevice, BufferSize) ->
-    case file:read(IoDevice, BufferSize) of
-        eof ->
-            Acc;
-        {ok, Data} ->
-            iodevice_foldl(F, F(Data, Acc), IoDevice, BufferSize)
-    end.
-
-iodevice_stream(Callback, IoDevice) ->
-    iodevice_stream(Callback, IoDevice, ?READ_SIZE).
-
-iodevice_stream(Callback, IoDevice, BufferSize) ->
-    F = fun (Data, ok) -> Callback(Data) end,
-    ok = iodevice_foldl(F, ok, IoDevice, BufferSize).
-
-iodevice_size(IoDevice) ->
-    {ok, Size} = file:position(IoDevice, eof),
-    {ok, 0} = file:position(IoDevice, bof),
-    Size.
-
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_mime.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_mime.erl b/src/mochiweb/src/mochiweb_mime.erl
deleted file mode 100644
index 5344aee..0000000
--- a/src/mochiweb/src/mochiweb_mime.erl
+++ /dev/null
@@ -1,94 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Gives a good MIME type guess based on file extension.
-
--module(mochiweb_mime).
--author('bob@mochimedia.com').
--export([from_extension/1]).
-
-%% @spec from_extension(S::string()) -> string() | undefined
-%% @doc Given a filename extension (e.g. ".html") return a guess for the MIME
-%%      type such as "text/html". Will return the atom undefined if no good
-%%      guess is available.
-from_extension(".html") ->
-    "text/html";
-from_extension(".xhtml") ->
-    "application/xhtml+xml";
-from_extension(".xml") ->
-    "application/xml";
-from_extension(".css") ->
-    "text/css";
-from_extension(".js") ->
-    "application/x-javascript";
-from_extension(".jpg") ->
-    "image/jpeg";
-from_extension(".gif") ->
-    "image/gif";
-from_extension(".png") ->
-    "image/png";
-from_extension(".swf") ->
-    "application/x-shockwave-flash";
-from_extension(".zip") ->
-    "application/zip";
-from_extension(".bz2") ->
-    "application/x-bzip2";
-from_extension(".gz") ->
-    "application/x-gzip";
-from_extension(".tar") ->
-    "application/x-tar";
-from_extension(".tgz") ->
-    "application/x-gzip";
-from_extension(".txt") ->
-    "text/plain";
-from_extension(".doc") ->
-    "application/msword";
-from_extension(".pdf") ->
-    "application/pdf";
-from_extension(".xls") ->
-    "application/vnd.ms-excel";
-from_extension(".rtf") ->
-    "application/rtf";
-from_extension(".mov") ->
-    "video/quicktime";
-from_extension(".mp3") ->
-    "audio/mpeg";
-from_extension(".z") ->
-    "application/x-compress";
-from_extension(".wav") ->
-    "audio/x-wav";
-from_extension(".ico") ->
-    "image/x-icon";
-from_extension(".bmp") ->
-    "image/bmp";
-from_extension(".m4a") ->
-    "audio/mpeg";
-from_extension(".m3u") ->
-    "audio/x-mpegurl";
-from_extension(".exe") ->
-    "application/octet-stream";
-from_extension(".csv") ->
-    "text/csv";
-from_extension(_) ->
-    undefined.
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-exhaustive_from_extension_test() ->
-    T = mochiweb_cover:clause_lookup_table(?MODULE, from_extension),
-    [?assertEqual(V, from_extension(K)) || {K, V} <- T].
-
-from_extension_test() ->
-    ?assertEqual("text/html",
-                 from_extension(".html")),
-    ?assertEqual(undefined,
-                 from_extension("")),
-    ?assertEqual(undefined,
-                 from_extension(".wtf")),
-    ok.
-
--endif.


[24/49] Remove src/ejson

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_lex.c
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_lex.c b/src/ejson/c_src/yajl/yajl_lex.c
deleted file mode 100644
index 11e5f7b..0000000
--- a/src/ejson/c_src/yajl/yajl_lex.c
+++ /dev/null
@@ -1,737 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-#include "yajl_lex.h"
-#include "yajl_buf.h"
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <assert.h>
-#include <string.h>
-
-#ifdef YAJL_LEXER_DEBUG
-static const char *
-tokToStr(yajl_tok tok) 
-{
-    switch (tok) {
-        case yajl_tok_bool: return "bool";
-        case yajl_tok_colon: return "colon";
-        case yajl_tok_comma: return "comma";
-        case yajl_tok_eof: return "eof";
-        case yajl_tok_error: return "error";
-        case yajl_tok_left_brace: return "brace";
-        case yajl_tok_left_bracket: return "bracket";
-        case yajl_tok_null: return "null";
-        case yajl_tok_integer: return "integer";
-        case yajl_tok_double: return "double";
-        case yajl_tok_right_brace: return "brace";
-        case yajl_tok_right_bracket: return "bracket";
-        case yajl_tok_string: return "string";
-        case yajl_tok_string_with_escapes: return "string_with_escapes";
-    }
-    return "unknown";
-}
-#endif
-
-/* Impact of the stream parsing feature on the lexer:
- *
- * YAJL support stream parsing.  That is, the ability to parse the first
- * bits of a chunk of JSON before the last bits are available (still on
- * the network or disk).  This makes the lexer more complex.  The
- * responsibility of the lexer is to handle transparently the case where
- * a chunk boundary falls in the middle of a token.  This is
- * accomplished is via a buffer and a character reading abstraction. 
- *
- * Overview of implementation
- *
- * When we lex to end of input string before end of token is hit, we
- * copy all of the input text composing the token into our lexBuf.
- * 
- * Every time we read a character, we do so through the readChar function.
- * readChar's responsibility is to handle pulling all chars from the buffer
- * before pulling chars from input text
- */
-
-struct yajl_lexer_t {
-    /* the overal line and char offset into the data */
-    unsigned int lineOff;
-    unsigned int charOff;
-
-    /* error */
-    yajl_lex_error error;
-
-    /* a input buffer to handle the case where a token is spread over
-     * multiple chunks */ 
-    yajl_buf buf;
-
-    /* in the case where we have data in the lexBuf, bufOff holds
-     * the current offset into the lexBuf. */
-    unsigned int bufOff;
-
-    /* are we using the lex buf? */
-    unsigned int bufInUse;
-
-    /* shall we allow comments? */
-    unsigned int allowComments;
-
-    /* shall we validate utf8 inside strings? */
-    unsigned int validateUTF8;
-
-    yajl_alloc_funcs * alloc;
-};
-
-#define readChar(lxr, txt, off)                      \
-    (((lxr)->bufInUse && yajl_buf_len((lxr)->buf) && lxr->bufOff < yajl_buf_len((lxr)->buf)) ? \
-     (*((const unsigned char *) yajl_buf_data((lxr)->buf) + ((lxr)->bufOff)++)) : \
-     ((txt)[(*(off))++]))
-
-#define unreadChar(lxr, off) ((*(off) > 0) ? (*(off))-- : ((lxr)->bufOff--))
-
-yajl_lexer
-yajl_lex_alloc(yajl_alloc_funcs * alloc,
-               unsigned int allowComments, unsigned int validateUTF8)
-{
-    yajl_lexer lxr = (yajl_lexer) YA_MALLOC(alloc, sizeof(struct yajl_lexer_t));
-    memset((void *) lxr, 0, sizeof(struct yajl_lexer_t));
-    lxr->buf = yajl_buf_alloc(alloc);
-    lxr->allowComments = allowComments;
-    lxr->validateUTF8 = validateUTF8;
-    lxr->alloc = alloc;
-    return lxr;
-}
-
-void
-yajl_lex_free(yajl_lexer lxr)
-{
-    yajl_buf_free(lxr->buf);
-    YA_FREE(lxr->alloc, lxr);
-    return;
-}
-
-/* a lookup table which lets us quickly determine three things:
- * VEC - valid escaped conrol char
- * IJC - invalid json char
- * VHC - valid hex char
- * note.  the solidus '/' may be escaped or not.
- * note.  the
- */
-#define VEC 1
-#define IJC 2
-#define VHC 4
-static const char charLookupTable[256] =
-{
-/*00*/ IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    ,
-/*08*/ IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    ,
-/*10*/ IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    ,
-/*18*/ IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    ,
-
-/*20*/ 0      , 0      , VEC|IJC, 0      , 0      , 0      , 0      , 0      ,
-/*28*/ 0      , 0      , 0      , 0      , 0      , 0      , 0      , VEC    ,
-/*30*/ VHC    , VHC    , VHC    , VHC    , VHC    , VHC    , VHC    , VHC    ,
-/*38*/ VHC    , VHC    , 0      , 0      , 0      , 0      , 0      , 0      ,
-
-/*40*/ 0      , VHC    , VHC    , VHC    , VHC    , VHC    , VHC    , 0      ,
-/*48*/ 0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      ,
-/*50*/ 0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      ,
-/*58*/ 0      , 0      , 0      , 0      , VEC|IJC, 0      , 0      , 0      ,
-
-/*60*/ 0      , VHC    , VEC|VHC, VHC    , VHC    , VHC    , VEC|VHC, 0      ,
-/*68*/ 0      , 0      , 0      , 0      , 0      , 0      , VEC    , 0      ,
-/*70*/ 0      , 0      , VEC    , 0      , VEC    , 0      , 0      , 0      ,
-/*78*/ 0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      ,
-
-/* include these so we don't have to always check the range of the char */
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      , 
-       0      , 0      , 0      , 0      , 0      , 0      , 0      , 0
-};
-
-/** process a variable length utf8 encoded codepoint.
- *
- *  returns:
- *    yajl_tok_string - if valid utf8 char was parsed and offset was
- *                      advanced
- *    yajl_tok_eof - if end of input was hit before validation could
- *                   complete
- *    yajl_tok_error - if invalid utf8 was encountered
- * 
- *  NOTE: on error the offset will point to the first char of the
- *  invalid utf8 */
-#define UTF8_CHECK_EOF if (*offset >= jsonTextLen) { return yajl_tok_eof; }
-
-static yajl_tok
-yajl_lex_utf8_char(yajl_lexer lexer, const unsigned char * jsonText,
-                   unsigned int jsonTextLen, unsigned int * offset,
-                   unsigned char curChar)
-{
-    if (curChar <= 0x7f) {
-        /* single byte */
-        return yajl_tok_string;
-    } else if ((curChar >> 5) == 0x6) {
-        /* two byte */ 
-        UTF8_CHECK_EOF;
-        curChar = readChar(lexer, jsonText, offset);
-        if ((curChar >> 6) == 0x2) return yajl_tok_string;
-    } else if ((curChar >> 4) == 0x0e) {
-        /* three byte */
-        UTF8_CHECK_EOF;
-        curChar = readChar(lexer, jsonText, offset);
-        if ((curChar >> 6) == 0x2) {
-            UTF8_CHECK_EOF;
-            curChar = readChar(lexer, jsonText, offset);
-            if ((curChar >> 6) == 0x2) return yajl_tok_string;
-        }
-    } else if ((curChar >> 3) == 0x1e) {
-        /* four byte */
-        UTF8_CHECK_EOF;
-        curChar = readChar(lexer, jsonText, offset);
-        if ((curChar >> 6) == 0x2) {
-            UTF8_CHECK_EOF;
-            curChar = readChar(lexer, jsonText, offset);
-            if ((curChar >> 6) == 0x2) {
-                UTF8_CHECK_EOF;
-                curChar = readChar(lexer, jsonText, offset);
-                if ((curChar >> 6) == 0x2) return yajl_tok_string;
-            }
-        }
-    } 
-
-    return yajl_tok_error;
-}
-
-/* lex a string.  input is the lexer, pointer to beginning of
- * json text, and start of string (offset).
- * a token is returned which has the following meanings:
- * yajl_tok_string: lex of string was successful.  offset points to
- *                  terminating '"'.
- * yajl_tok_eof: end of text was encountered before we could complete
- *               the lex.
- * yajl_tok_error: embedded in the string were unallowable chars.  offset
- *               points to the offending char
- */
-#define STR_CHECK_EOF \
-if (*offset >= jsonTextLen) { \
-   tok = yajl_tok_eof; \
-   goto finish_string_lex; \
-}
-
-static yajl_tok
-yajl_lex_string(yajl_lexer lexer, const unsigned char * jsonText,
-                unsigned int jsonTextLen, unsigned int * offset)
-{
-    yajl_tok tok = yajl_tok_error;
-    int hasEscapes = 0;
-
-    for (;;) {
-		unsigned char curChar;
-
-		STR_CHECK_EOF;
-
-        curChar = readChar(lexer, jsonText, offset);
-
-        /* quote terminates */
-        if (curChar == '"') {
-            tok = yajl_tok_string;
-            break;
-        }
-        /* backslash escapes a set of control chars, */
-        else if (curChar == '\\') {
-            hasEscapes = 1;
-            STR_CHECK_EOF;
-
-            /* special case \u */
-            curChar = readChar(lexer, jsonText, offset);
-            if (curChar == 'u') {
-                unsigned int i = 0;
-
-                for (i=0;i<4;i++) {
-                    STR_CHECK_EOF;                
-                    curChar = readChar(lexer, jsonText, offset);                
-                    if (!(charLookupTable[curChar] & VHC)) {
-                        /* back up to offending char */
-                        unreadChar(lexer, offset);
-                        lexer->error = yajl_lex_string_invalid_hex_char;
-                        goto finish_string_lex;
-                    }
-                }
-            } else if (!(charLookupTable[curChar] & VEC)) {
-                /* back up to offending char */
-                unreadChar(lexer, offset);
-                lexer->error = yajl_lex_string_invalid_escaped_char;
-                goto finish_string_lex;                
-            } 
-        }
-        /* when not validating UTF8 it's a simple table lookup to determine
-         * if the present character is invalid */
-        else if(charLookupTable[curChar] & IJC) {
-            /* back up to offending char */
-            unreadChar(lexer, offset);
-            lexer->error = yajl_lex_string_invalid_json_char;
-            goto finish_string_lex;                
-        }
-        /* when in validate UTF8 mode we need to do some extra work */
-        else if (lexer->validateUTF8) {
-            yajl_tok t = yajl_lex_utf8_char(lexer, jsonText, jsonTextLen,
-                                            offset, curChar);
-            
-            if (t == yajl_tok_eof) {
-                tok = yajl_tok_eof;
-                goto finish_string_lex;
-            } else if (t == yajl_tok_error) {
-                lexer->error = yajl_lex_string_invalid_utf8;
-                goto finish_string_lex;
-            } 
-        }
-        /* accept it, and move on */ 
-    }
-  finish_string_lex:
-    /* tell our buddy, the parser, wether he needs to process this string
-     * again */
-    if (hasEscapes && tok == yajl_tok_string) {
-        tok = yajl_tok_string_with_escapes;
-    } 
-
-    return tok;
-}
-
-#define RETURN_IF_EOF if (*offset >= jsonTextLen) return yajl_tok_eof;
-
-static yajl_tok
-yajl_lex_number(yajl_lexer lexer, const unsigned char * jsonText,
-                unsigned int jsonTextLen, unsigned int * offset)
-{
-    /** XXX: numbers are the only entities in json that we must lex
-     *       _beyond_ in order to know that they are complete.  There
-     *       is an ambiguous case for integers at EOF. */
-
-    unsigned char c;
-
-    yajl_tok tok = yajl_tok_integer;
-
-    RETURN_IF_EOF;    
-    c = readChar(lexer, jsonText, offset);
-
-    /* optional leading minus */
-    if (c == '-') {
-        RETURN_IF_EOF;    
-        c = readChar(lexer, jsonText, offset); 
-    }
-
-    /* a single zero, or a series of integers */
-    if (c == '0') {
-        RETURN_IF_EOF;    
-        c = readChar(lexer, jsonText, offset); 
-    } else if (c >= '1' && c <= '9') {
-        do {
-            RETURN_IF_EOF;    
-            c = readChar(lexer, jsonText, offset); 
-        } while (c >= '0' && c <= '9');
-    } else {
-        unreadChar(lexer, offset);
-        lexer->error = yajl_lex_missing_integer_after_minus;
-        return yajl_tok_error;
-    }
-
-    /* optional fraction (indicates this is floating point) */
-    if (c == '.') {
-        int numRd = 0;
-        
-        RETURN_IF_EOF;
-        c = readChar(lexer, jsonText, offset); 
-
-        while (c >= '0' && c <= '9') {
-            numRd++;
-            RETURN_IF_EOF;
-            c = readChar(lexer, jsonText, offset); 
-        } 
-
-        if (!numRd) {
-            unreadChar(lexer, offset);
-            lexer->error = yajl_lex_missing_integer_after_decimal;
-            return yajl_tok_error;
-        }
-        tok = yajl_tok_double;
-    }
-
-    /* optional exponent (indicates this is floating point) */
-    if (c == 'e' || c == 'E') {
-        RETURN_IF_EOF;
-        c = readChar(lexer, jsonText, offset); 
-
-        /* optional sign */
-        if (c == '+' || c == '-') {
-            RETURN_IF_EOF;
-            c = readChar(lexer, jsonText, offset); 
-        }
-
-        if (c >= '0' && c <= '9') {
-            do {
-                RETURN_IF_EOF;
-                c = readChar(lexer, jsonText, offset); 
-            } while (c >= '0' && c <= '9');
-        } else {
-            unreadChar(lexer, offset);
-            lexer->error = yajl_lex_missing_integer_after_exponent;
-            return yajl_tok_error;
-        }
-        tok = yajl_tok_double;
-    }
-    
-    /* we always go "one too far" */
-    unreadChar(lexer, offset);
-    
-    return tok;
-}
-
-static yajl_tok
-yajl_lex_comment(yajl_lexer lexer, const unsigned char * jsonText,
-                 unsigned int jsonTextLen, unsigned int * offset)
-{
-    unsigned char c;
-
-    yajl_tok tok = yajl_tok_comment;
-
-    RETURN_IF_EOF;    
-    c = readChar(lexer, jsonText, offset);
-
-    /* either slash or star expected */
-    if (c == '/') {
-        /* now we throw away until end of line */
-        do {
-            RETURN_IF_EOF;    
-            c = readChar(lexer, jsonText, offset); 
-        } while (c != '\n');
-    } else if (c == '*') {
-        /* now we throw away until end of comment */        
-        for (;;) {
-            RETURN_IF_EOF;    
-            c = readChar(lexer, jsonText, offset); 
-            if (c == '*') {
-                RETURN_IF_EOF;    
-                c = readChar(lexer, jsonText, offset);                 
-                if (c == '/') {
-                    break;
-                } else {
-                    unreadChar(lexer, offset);
-                }
-            }
-        }
-    } else {
-        lexer->error = yajl_lex_invalid_char;
-        tok = yajl_tok_error;
-    }
-    
-    return tok;
-}
-
-yajl_tok
-yajl_lex_lex(yajl_lexer lexer, const unsigned char * jsonText,
-             unsigned int jsonTextLen, unsigned int * offset,
-             const unsigned char ** outBuf, unsigned int * outLen)
-{
-    yajl_tok tok = yajl_tok_error;
-    unsigned char c;
-    unsigned int startOffset = *offset;
-
-    *outBuf = NULL;
-    *outLen = 0;
-
-    for (;;) {
-        assert(*offset <= jsonTextLen);
-
-        if (*offset >= jsonTextLen) {
-            tok = yajl_tok_eof;
-            goto lexed;
-        }
-
-        c = readChar(lexer, jsonText, offset);
-
-        switch (c) {
-            case '{':
-                tok = yajl_tok_left_bracket;
-                goto lexed;
-            case '}':
-                tok = yajl_tok_right_bracket;
-                goto lexed;
-            case '[':
-                tok = yajl_tok_left_brace;
-                goto lexed;
-            case ']':
-                tok = yajl_tok_right_brace;
-                goto lexed;
-            case ',':
-                tok = yajl_tok_comma;
-                goto lexed;
-            case ':':
-                tok = yajl_tok_colon;
-                goto lexed;
-            case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
-                startOffset++;
-                break;
-            case 't': {
-                const char * want = "rue";
-                do {
-                    if (*offset >= jsonTextLen) {
-                        tok = yajl_tok_eof;
-                        goto lexed;
-                    }
-                    c = readChar(lexer, jsonText, offset);
-                    if (c != *want) {
-                        unreadChar(lexer, offset);
-                        lexer->error = yajl_lex_invalid_string;
-                        tok = yajl_tok_error;
-                        goto lexed;
-                    }
-                } while (*(++want));
-                tok = yajl_tok_bool;
-                goto lexed;
-            }
-            case 'f': {
-                const char * want = "alse";
-                do {
-                    if (*offset >= jsonTextLen) {
-                        tok = yajl_tok_eof;
-                        goto lexed;
-                    }
-                    c = readChar(lexer, jsonText, offset);
-                    if (c != *want) {
-                        unreadChar(lexer, offset);
-                        lexer->error = yajl_lex_invalid_string;
-                        tok = yajl_tok_error;
-                        goto lexed;
-                    }
-                } while (*(++want));
-                tok = yajl_tok_bool;
-                goto lexed;
-            }
-            case 'n': {
-                const char * want = "ull";
-                do {
-                    if (*offset >= jsonTextLen) {
-                        tok = yajl_tok_eof;
-                        goto lexed;
-                    }
-                    c = readChar(lexer, jsonText, offset);
-                    if (c != *want) {
-                        unreadChar(lexer, offset);
-                        lexer->error = yajl_lex_invalid_string;
-                        tok = yajl_tok_error;
-                        goto lexed;
-                    }
-                } while (*(++want));
-                tok = yajl_tok_null;
-                goto lexed;
-            }
-            case '"': {
-                tok = yajl_lex_string(lexer, (const unsigned char *) jsonText,
-                                      jsonTextLen, offset);
-                goto lexed;
-            }
-            case '-':
-            case '0': case '1': case '2': case '3': case '4': 
-            case '5': case '6': case '7': case '8': case '9': {
-                /* integer parsing wants to start from the beginning */
-                unreadChar(lexer, offset);
-                tok = yajl_lex_number(lexer, (const unsigned char *) jsonText,
-                                      jsonTextLen, offset);
-                goto lexed;
-            }
-            case '/':
-                /* hey, look, a probable comment!  If comments are disabled
-                 * it's an error. */
-                if (!lexer->allowComments) {
-                    unreadChar(lexer, offset);
-                    lexer->error = yajl_lex_unallowed_comment;
-                    tok = yajl_tok_error;
-                    goto lexed;
-                }
-                /* if comments are enabled, then we should try to lex
-                 * the thing.  possible outcomes are
-                 * - successful lex (tok_comment, which means continue),
-                 * - malformed comment opening (slash not followed by
-                 *   '*' or '/') (tok_error)
-                 * - eof hit. (tok_eof) */
-                tok = yajl_lex_comment(lexer, (const unsigned char *) jsonText,
-                                       jsonTextLen, offset);
-                if (tok == yajl_tok_comment) {
-                    /* "error" is silly, but that's the initial
-                     * state of tok.  guilty until proven innocent. */  
-                    tok = yajl_tok_error;
-                    yajl_buf_clear(lexer->buf);
-                    lexer->bufInUse = 0;
-                    startOffset = *offset; 
-                    break;
-                }
-                /* hit error or eof, bail */
-                goto lexed;
-            default:
-                lexer->error = yajl_lex_invalid_char;
-                tok = yajl_tok_error;
-                goto lexed;
-        }
-    }
-
-
-  lexed:
-    /* need to append to buffer if the buffer is in use or
-     * if it's an EOF token */
-    if (tok == yajl_tok_eof || lexer->bufInUse) {
-        if (!lexer->bufInUse) yajl_buf_clear(lexer->buf);
-        lexer->bufInUse = 1;
-        yajl_buf_append(lexer->buf, jsonText + startOffset, *offset - startOffset);
-        lexer->bufOff = 0;
-        
-        if (tok != yajl_tok_eof) {
-            *outBuf = yajl_buf_data(lexer->buf);
-            *outLen = yajl_buf_len(lexer->buf);
-            lexer->bufInUse = 0;
-        }
-    } else if (tok != yajl_tok_error) {
-        *outBuf = jsonText + startOffset;
-        *outLen = *offset - startOffset;
-    }
-
-    /* special case for strings. skip the quotes. */
-    if (tok == yajl_tok_string || tok == yajl_tok_string_with_escapes)
-    {
-        assert(*outLen >= 2);
-        (*outBuf)++;
-        *outLen -= 2; 
-    }
-
-
-#ifdef YAJL_LEXER_DEBUG
-    if (tok == yajl_tok_error) {
-        printf("lexical error: %s\n",
-               yajl_lex_error_to_string(yajl_lex_get_error(lexer)));
-    } else if (tok == yajl_tok_eof) {
-        printf("EOF hit\n");
-    } else {
-        printf("lexed %s: '", tokToStr(tok));
-        fwrite(*outBuf, 1, *outLen, stdout);
-        printf("'\n");
-    }
-#endif
-
-    return tok;
-}
-
-const char *
-yajl_lex_error_to_string(yajl_lex_error error)
-{
-    switch (error) {
-        case yajl_lex_e_ok:
-            return "ok, no error";
-        case yajl_lex_string_invalid_utf8:
-            return "invalid bytes in UTF8 string.";
-        case yajl_lex_string_invalid_escaped_char:
-            return "inside a string, '\\' occurs before a character "
-                   "which it may not.";
-        case yajl_lex_string_invalid_json_char:            
-            return "invalid character inside string.";
-        case yajl_lex_string_invalid_hex_char:
-            return "invalid (non-hex) character occurs after '\\u' inside "
-                   "string.";
-        case yajl_lex_invalid_char:
-            return "invalid char in json text.";
-        case yajl_lex_invalid_string:
-            return "invalid string in json text.";
-        case yajl_lex_missing_integer_after_exponent:
-            return "malformed number, a digit is required after the exponent.";
-        case yajl_lex_missing_integer_after_decimal:
-            return "malformed number, a digit is required after the "
-                   "decimal point.";
-        case yajl_lex_missing_integer_after_minus:
-            return "malformed number, a digit is required after the "
-                   "minus sign.";
-        case yajl_lex_unallowed_comment:
-            return "probable comment found in input text, comments are "
-                   "not enabled.";
-    }
-    return "unknown error code";
-}
-
-
-/** allows access to more specific information about the lexical
- *  error when yajl_lex_lex returns yajl_tok_error. */
-yajl_lex_error
-yajl_lex_get_error(yajl_lexer lexer)
-{
-    if (lexer == NULL) return (yajl_lex_error) -1;
-    return lexer->error;
-}
-
-unsigned int yajl_lex_current_line(yajl_lexer lexer)
-{
-    return lexer->lineOff;
-}
-
-unsigned int yajl_lex_current_char(yajl_lexer lexer)
-{
-    return lexer->charOff;
-}
-
-yajl_tok yajl_lex_peek(yajl_lexer lexer, const unsigned char * jsonText,
-                       unsigned int jsonTextLen, unsigned int offset)
-{
-    const unsigned char * outBuf;
-    unsigned int outLen;
-    unsigned int bufLen = yajl_buf_len(lexer->buf);
-    unsigned int bufOff = lexer->bufOff;
-    unsigned int bufInUse = lexer->bufInUse;
-    yajl_tok tok;
-    
-    tok = yajl_lex_lex(lexer, jsonText, jsonTextLen, &offset,
-                       &outBuf, &outLen);
-
-    lexer->bufOff = bufOff;
-    lexer->bufInUse = bufInUse;
-    yajl_buf_truncate(lexer->buf, bufLen);
-    
-    return tok;
-}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_lex.h
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_lex.h b/src/ejson/c_src/yajl/yajl_lex.h
deleted file mode 100644
index 559e54d..0000000
--- a/src/ejson/c_src/yajl/yajl_lex.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-#ifndef __YAJL_LEX_H__
-#define __YAJL_LEX_H__
-
-#include "yajl_common.h"
-
-typedef enum {
-    yajl_tok_bool,         
-    yajl_tok_colon,
-    yajl_tok_comma,     
-    yajl_tok_eof,
-    yajl_tok_error,
-    yajl_tok_left_brace,     
-    yajl_tok_left_bracket,
-    yajl_tok_null,         
-    yajl_tok_right_brace,     
-    yajl_tok_right_bracket,
-
-    /* we differentiate between integers and doubles to allow the
-     * parser to interpret the number without re-scanning */
-    yajl_tok_integer, 
-    yajl_tok_double, 
-
-    /* we differentiate between strings which require further processing,
-     * and strings that do not */
-    yajl_tok_string,
-    yajl_tok_string_with_escapes,
-
-    /* comment tokens are not currently returned to the parser, ever */
-    yajl_tok_comment
-} yajl_tok;
-
-typedef struct yajl_lexer_t * yajl_lexer;
-
-yajl_lexer yajl_lex_alloc(yajl_alloc_funcs * alloc,
-                          unsigned int allowComments,
-                          unsigned int validateUTF8);
-
-void yajl_lex_free(yajl_lexer lexer);
-
-/**
- * run/continue a lex. "offset" is an input/output parameter.
- * It should be initialized to zero for a
- * new chunk of target text, and upon subsetquent calls with the same
- * target text should passed with the value of the previous invocation.
- *
- * the client may be interested in the value of offset when an error is
- * returned from the lexer.  This allows the client to render useful
-n * error messages.
- *
- * When you pass the next chunk of data, context should be reinitialized
- * to zero.
- * 
- * Finally, the output buffer is usually just a pointer into the jsonText,
- * however in cases where the entity being lexed spans multiple chunks,
- * the lexer will buffer the entity and the data returned will be
- * a pointer into that buffer.
- *
- * This behavior is abstracted from client code except for the performance
- * implications which require that the client choose a reasonable chunk
- * size to get adequate performance.
- */
-yajl_tok yajl_lex_lex(yajl_lexer lexer, const unsigned char * jsonText,
-                      unsigned int jsonTextLen, unsigned int * offset,
-                      const unsigned char ** outBuf, unsigned int * outLen);
-
-/** have a peek at the next token, but don't move the lexer forward */
-yajl_tok yajl_lex_peek(yajl_lexer lexer, const unsigned char * jsonText,
-                       unsigned int jsonTextLen, unsigned int offset);
-
-
-typedef enum {
-    yajl_lex_e_ok = 0,
-    yajl_lex_string_invalid_utf8,
-    yajl_lex_string_invalid_escaped_char,
-    yajl_lex_string_invalid_json_char,
-    yajl_lex_string_invalid_hex_char,
-    yajl_lex_invalid_char,
-    yajl_lex_invalid_string,
-    yajl_lex_missing_integer_after_decimal,
-    yajl_lex_missing_integer_after_exponent,
-    yajl_lex_missing_integer_after_minus,
-    yajl_lex_unallowed_comment
-} yajl_lex_error;
-
-const char * yajl_lex_error_to_string(yajl_lex_error error);
-
-/** allows access to more specific information about the lexical
- *  error when yajl_lex_lex returns yajl_tok_error. */
-yajl_lex_error yajl_lex_get_error(yajl_lexer lexer);
-
-/** get the current offset into the most recently lexed json string. */
-unsigned int yajl_lex_current_offset(yajl_lexer lexer);
-
-/** get the number of lines lexed by this lexer instance */
-unsigned int yajl_lex_current_line(yajl_lexer lexer);
-
-/** get the number of chars lexed by this lexer instance since the last
- *  \n or \r */
-unsigned int yajl_lex_current_char(yajl_lexer lexer);
-
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_parse.h
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_parse.h b/src/ejson/c_src/yajl/yajl_parse.h
deleted file mode 100644
index a3dcffc..0000000
--- a/src/ejson/c_src/yajl/yajl_parse.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-/**
- * \file yajl_parse.h
- * Interface to YAJL's JSON parsing facilities.
- */
-
-#include "yajl_common.h"
-
-#ifndef __YAJL_PARSE_H__
-#define __YAJL_PARSE_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif    
-    /** error codes returned from this interface */
-    typedef enum {
-        /** no error was encountered */
-        yajl_status_ok,
-        /** a client callback returned zero, stopping the parse */
-        yajl_status_client_canceled,
-        /** The parse cannot yet complete because more json input text
-         *  is required, call yajl_parse with the next buffer of input text.
-         *  (pertinent only when stream parsing) */
-        yajl_status_insufficient_data,
-        /** An error occured during the parse.  Call yajl_get_error for
-         *  more information about the encountered error */
-        yajl_status_error
-    } yajl_status;
-
-    /** attain a human readable, english, string for an error */
-    YAJL_API const char * yajl_status_to_string(yajl_status code);
-
-    /** an opaque handle to a parser */
-    typedef struct yajl_handle_t * yajl_handle;
-
-    /** yajl is an event driven parser.  this means as json elements are
-     *  parsed, you are called back to do something with the data.  The
-     *  functions in this table indicate the various events for which
-     *  you will be called back.  Each callback accepts a "context"
-     *  pointer, this is a void * that is passed into the yajl_parse
-     *  function which the client code may use to pass around context.
-     *
-     *  All callbacks return an integer.  If non-zero, the parse will
-     *  continue.  If zero, the parse will be canceled and
-     *  yajl_status_client_canceled will be returned from the parse.
-     *
-     *  Note about handling of numbers:
-     *    yajl will only convert numbers that can be represented in a double
-     *    or a long int.  All other numbers will be passed to the client
-     *    in string form using the yajl_number callback.  Furthermore, if
-     *    yajl_number is not NULL, it will always be used to return numbers,
-     *    that is yajl_integer and yajl_double will be ignored.  If
-     *    yajl_number is NULL but one of yajl_integer or yajl_double are
-     *    defined, parsing of a number larger than is representable
-     *    in a double or long int will result in a parse error.
-     */
-    typedef struct {
-        int (* yajl_null)(void * ctx);
-        int (* yajl_boolean)(void * ctx, int boolVal);
-        int (* yajl_integer)(void * ctx, long integerVal);
-        int (* yajl_double)(void * ctx, double doubleVal);
-        /** A callback which passes the string representation of the number
-         *  back to the client.  Will be used for all numbers when present */
-        int (* yajl_number)(void * ctx, const char * numberVal,
-                            unsigned int numberLen);
-
-        /** strings are returned as pointers into the JSON text when,
-         * possible, as a result, they are _not_ null padded */
-        int (* yajl_string)(void * ctx, const unsigned char * stringVal,
-                            unsigned int stringLen);
-
-        int (* yajl_start_map)(void * ctx);
-        int (* yajl_map_key)(void * ctx, const unsigned char * key,
-                             unsigned int stringLen);
-        int (* yajl_end_map)(void * ctx);        
-
-        int (* yajl_start_array)(void * ctx);
-        int (* yajl_end_array)(void * ctx);        
-    } yajl_callbacks;
-    
-    /** configuration structure for the generator */
-    typedef struct {
-        /** if nonzero, javascript style comments will be allowed in
-         *  the json input, both slash star and slash slash */
-        unsigned int allowComments;
-        /** if nonzero, invalid UTF8 strings will cause a parse
-         *  error */
-        unsigned int checkUTF8;
-    } yajl_parser_config;
-
-    /** allocate a parser handle
-     *  \param callbacks  a yajl callbacks structure specifying the
-     *                    functions to call when different JSON entities
-     *                    are encountered in the input text.  May be NULL,
-     *                    which is only useful for validation.
-     *  \param config     configuration parameters for the parse.
-     *  \param ctx        a context pointer that will be passed to callbacks.
-     */
-    YAJL_API yajl_handle yajl_alloc(const yajl_callbacks * callbacks,
-                                    const yajl_parser_config * config,
-                                    const yajl_alloc_funcs * allocFuncs,
-                                    void * ctx);
-
-    /** free a parser handle */    
-    YAJL_API void yajl_free(yajl_handle handle);
-
-    /** Parse some json!
-     *  \param hand - a handle to the json parser allocated with yajl_alloc
-     *  \param jsonText - a pointer to the UTF8 json text to be parsed
-     *  \param jsonTextLength - the length, in bytes, of input text
-     */
-    YAJL_API yajl_status yajl_parse(yajl_handle hand,
-                                    const unsigned char * jsonText,
-                                    unsigned int jsonTextLength);
-
-    /** Parse any remaining buffered json.
-     *  Since yajl is a stream-based parser, without an explicit end of
-     *  input, yajl sometimes can't decide if content at the end of the
-     *  stream is valid or not.  For example, if "1" has been fed in,
-     *  yajl can't know whether another digit is next or some character
-     *  that would terminate the integer token.
-     *
-     *  \param hand - a handle to the json parser allocated with yajl_alloc
-     */
-    YAJL_API yajl_status yajl_parse_complete(yajl_handle hand);
-    
-    /** get an error string describing the state of the
-     *  parse.
-     *
-     *  If verbose is non-zero, the message will include the JSON
-     *  text where the error occured, along with an arrow pointing to
-     *  the specific char.
-     *
-     *  \returns A dynamically allocated string will be returned which should
-     *  be freed with yajl_free_error 
-     */
-    YAJL_API unsigned char * yajl_get_error(yajl_handle hand, int verbose,
-                                            const unsigned char * jsonText,
-                                            unsigned int jsonTextLength);
-
-    /**
-     * get the amount of data consumed from the last chunk passed to YAJL.
-     *
-     * In the case of a successful parse this can help you understand if
-     * the entire buffer was consumed (which will allow you to handle
-     * "junk at end of input". 
-     * 
-     * In the event an error is encountered during parsing, this function
-     * affords the client a way to get the offset into the most recent
-     * chunk where the error occured.  0 will be returned if no error
-     * was encountered.
-     */
-    YAJL_API unsigned int yajl_get_bytes_consumed(yajl_handle hand);
-
-    /** free an error returned from yajl_get_error */
-    YAJL_API void yajl_free_error(yajl_handle hand, unsigned char * str);
-
-#ifdef __cplusplus
-}
-#endif    
-
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_parser.c
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_parser.c b/src/ejson/c_src/yajl/yajl_parser.c
deleted file mode 100644
index 990c860..0000000
--- a/src/ejson/c_src/yajl/yajl_parser.c
+++ /dev/null
@@ -1,470 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-#include "yajl_lex.h"
-#include "yajl_parser.h"
-#include "yajl_encode.h"
-#include "yajl_bytestack.h"
-
-#include <stdlib.h>
-#include <limits.h>
-#include <errno.h>
-#include <stdio.h>
-#include <string.h>
-#include <ctype.h>
-#include <assert.h>
-#include <math.h>
-
-const char *
-yajl_parser_error_to_string(yajl_parser_error error)
-{
-    switch (error) {
-        case yajl_parser_e_ok:
-            return "ok, no error";
-        case yajl_parser_client_cancelled:
-            return "client cancelled parse via callback return value";
-        case yajl_parser_integer_overflow:
-            return "integer overflow";
-        case yajl_parser_numeric_overflow:
-            return "numeric (floating point) overflow";
-        case yajl_parser_invalid_token:
-            return "unallowed token at this point in JSON text";
-        case yajl_parser_internal_invalid_token:
-            return "invalid token, internal error";
-        case yajl_parser_key_must_be_string:
-            return "invalid object key (must be a string)";
-        case yajl_parser_pair_missing_colon:
-            return "object key and value must be separated by a colon (':')";
-        case yajl_parser_bad_token_after_map_value:
-            return "after key and value, inside map, I expect ',' or '}'";
-        case yajl_parser_bad_token_after_array_value:
-            return "after array element, I expect ',' or ']'";
-    }
-    return "unknown error code";
-}
-
-
-unsigned char *
-yajl_render_error_string(yajl_handle hand, const unsigned char * jsonText,
-                         unsigned int jsonTextLen, int verbose)
-{
-    unsigned int offset = hand->bytesConsumed;
-    unsigned char * str;
-    const char * errorType = NULL;
-    const char * errorText = NULL;
-    char text[72];
-    const char * arrow = "                     (right here) ------^\n";    
-
-    if (yajl_bs_current(hand->stateStack) == yajl_state_parse_error) {
-        errorType = "parse";
-        errorText = yajl_parser_error_to_string(hand->parserError);
-    } else if (yajl_bs_current(hand->stateStack) == yajl_state_lexical_error) {
-        errorType = "lexical";
-        errorText = yajl_lex_error_to_string(yajl_lex_get_error(hand->lexer));
-    } else {
-        errorType = "unknown";
-    }
-
-    {
-        unsigned int memneeded = 0;
-        memneeded += strlen(errorType);
-        memneeded += strlen(" error");
-        if (errorText != NULL) {
-            memneeded += strlen(": ");            
-            memneeded += strlen(errorText);            
-        }
-        str = (unsigned char *) YA_MALLOC(&(hand->alloc), memneeded + 2);
-        str[0] = 0;
-        strcat((char *) str, errorType);
-        strcat((char *) str, " error");    
-        if (errorText != NULL) {
-            strcat((char *) str, ": ");            
-            strcat((char *) str, errorText);            
-        }
-        strcat((char *) str, "\n");    
-    }
-
-    /* now we append as many spaces as needed to make sure the error
-     * falls at char 41, if verbose was specified */
-    if (verbose) {
-        unsigned int start, end, i;
-        unsigned int spacesNeeded;
-
-        spacesNeeded = (offset < 30 ? 40 - offset : 10);
-        start = (offset >= 30 ? offset - 30 : 0);
-        end = (offset + 30 > jsonTextLen ? jsonTextLen : offset + 30);
-    
-        for (i=0;i<spacesNeeded;i++) text[i] = ' ';
-
-        for (;start < end;start++, i++) {
-            if (jsonText[start] != '\n' && jsonText[start] != '\r')
-            {
-                text[i] = jsonText[start];
-            }
-            else
-            {
-                text[i] = ' ';
-            }
-        }
-        assert(i <= 71);
-        text[i++] = '\n';
-        text[i] = 0;
-        {
-            char * newStr = (char *)
-                YA_MALLOC(&(hand->alloc), (strlen((char *) str) +
-                                           strlen((char *) text) +
-                                           strlen(arrow) + 1));
-            newStr[0] = 0;
-            strcat((char *) newStr, (char *) str);
-            strcat((char *) newStr, text);
-            strcat((char *) newStr, arrow);    
-            YA_FREE(&(hand->alloc), str);
-            str = (unsigned char *) newStr;
-        }
-    }
-    return str;
-}
-
-/* check for client cancelation */
-#define _CC_CHK(x)                                                \
-    if (!(x)) {                                                   \
-        yajl_bs_set(hand->stateStack, yajl_state_parse_error);    \
-        hand->parserError = yajl_parser_client_cancelled;          \
-        return yajl_status_client_canceled;                       \
-    }
-
-
-yajl_status
-yajl_do_parse(yajl_handle hand, const unsigned char * jsonText,
-              unsigned int jsonTextLen)
-{
-    yajl_tok tok;
-    const unsigned char * buf;
-    unsigned int bufLen;
-    unsigned int * offset = &(hand->bytesConsumed);
-
-    *offset = 0;
-    
-
-  around_again:
-    switch (yajl_bs_current(hand->stateStack)) {
-        case yajl_state_parse_complete:
-            return yajl_status_ok;
-        case yajl_state_lexical_error:
-        case yajl_state_parse_error:            
-            return yajl_status_error;
-        case yajl_state_start:
-        case yajl_state_map_need_val:
-        case yajl_state_array_need_val:
-        case yajl_state_array_start: {
-            /* for arrays and maps, we advance the state for this
-             * depth, then push the state of the next depth.
-             * If an error occurs during the parsing of the nesting
-             * enitity, the state at this level will not matter.
-             * a state that needs pushing will be anything other
-             * than state_start */
-            yajl_state stateToPush = yajl_state_start;
-
-            tok = yajl_lex_lex(hand->lexer, jsonText, jsonTextLen,
-                               offset, &buf, &bufLen);
-
-            switch (tok) {
-                case yajl_tok_eof:
-                    return yajl_status_insufficient_data;
-                case yajl_tok_error:
-                    yajl_bs_set(hand->stateStack, yajl_state_lexical_error);
-                    goto around_again;
-                case yajl_tok_string:
-                    if (hand->callbacks && hand->callbacks->yajl_string) {
-                        _CC_CHK(hand->callbacks->yajl_string(hand->ctx,
-                                                             buf, bufLen));
-                    }
-                    break;
-                case yajl_tok_string_with_escapes:
-                    if (hand->callbacks && hand->callbacks->yajl_string) {
-                        yajl_buf_clear(hand->decodeBuf);
-                        yajl_string_decode(hand->decodeBuf, buf, bufLen);
-                        _CC_CHK(hand->callbacks->yajl_string(
-                                    hand->ctx, yajl_buf_data(hand->decodeBuf),
-                                    yajl_buf_len(hand->decodeBuf)));
-                    }
-                    break;
-                case yajl_tok_bool: 
-                    if (hand->callbacks && hand->callbacks->yajl_boolean) {
-                        _CC_CHK(hand->callbacks->yajl_boolean(hand->ctx,
-                                                              *buf == 't'));
-                    }
-                    break;
-                case yajl_tok_null: 
-                    if (hand->callbacks && hand->callbacks->yajl_null) {
-                        _CC_CHK(hand->callbacks->yajl_null(hand->ctx));
-                    }
-                    break;
-                case yajl_tok_left_bracket:
-                    if (hand->callbacks && hand->callbacks->yajl_start_map) {
-                        _CC_CHK(hand->callbacks->yajl_start_map(hand->ctx));
-                    }
-                    stateToPush = yajl_state_map_start;
-                    break;
-                case yajl_tok_left_brace:
-                    if (hand->callbacks && hand->callbacks->yajl_start_array) {
-                        _CC_CHK(hand->callbacks->yajl_start_array(hand->ctx));
-                    }
-                    stateToPush = yajl_state_array_start;
-                    break;
-                case yajl_tok_integer:
-                    /*
-                     * note.  strtol does not respect the length of
-                     * the lexical token.  in a corner case where the
-                     * lexed number is a integer with a trailing zero,
-                     * immediately followed by the end of buffer,
-                     * sscanf could run off into oblivion and cause a
-                     * crash.  for this reason we copy the integer
-                     * (and doubles), into our parse buffer (the same
-                     * one used for unescaping strings), before
-                     * calling strtol.  yajl_buf ensures null padding,
-                     * so we're safe.
-                     */
-                    if (hand->callbacks) {
-                        if (hand->callbacks->yajl_number) {
-                            _CC_CHK(hand->callbacks->yajl_number(
-                                        hand->ctx,(const char *) buf, bufLen));
-                        } else if (hand->callbacks->yajl_integer) {
-                            long int i = 0;
-                            yajl_buf_clear(hand->decodeBuf);
-                            yajl_buf_append(hand->decodeBuf, buf, bufLen);
-                            buf = yajl_buf_data(hand->decodeBuf);
-                            i = strtol((const char *) buf, NULL, 10);
-                            if ((i == LONG_MIN || i == LONG_MAX) &&
-                                errno == ERANGE)
-                            {
-                                yajl_bs_set(hand->stateStack,
-                                            yajl_state_parse_error);
-                                hand->parserError = yajl_parser_integer_overflow;
-                                /* try to restore error offset */
-                                if (*offset >= bufLen) *offset -= bufLen;
-                                else *offset = 0;
-                                goto around_again;
-                            }
-                            _CC_CHK(hand->callbacks->yajl_integer(hand->ctx,
-                                                                  i));
-                        }
-                    }
-                    break;
-                case yajl_tok_double:
-                    if (hand->callbacks) {
-                        if (hand->callbacks->yajl_number) {
-                            _CC_CHK(hand->callbacks->yajl_number(
-                                        hand->ctx, (const char *) buf, bufLen));
-                        } else if (hand->callbacks->yajl_double) {
-                            double d = 0.0;
-                            yajl_buf_clear(hand->decodeBuf);
-                            yajl_buf_append(hand->decodeBuf, buf, bufLen);
-                            buf = yajl_buf_data(hand->decodeBuf);
-                            d = strtod((char *) buf, NULL);
-                            if ((d == HUGE_VAL || d == -HUGE_VAL) &&
-                                errno == ERANGE)
-                            {
-                                yajl_bs_set(hand->stateStack,
-                                            yajl_state_parse_error);
-                                hand->parserError = yajl_parser_numeric_overflow;
-                                /* try to restore error offset */
-                                if (*offset >= bufLen) *offset -= bufLen;
-                                else *offset = 0;
-                                goto around_again;
-                            }
-                            _CC_CHK(hand->callbacks->yajl_double(hand->ctx,
-                                                                 d));
-                        }
-                    }
-                    break;
-                case yajl_tok_right_brace: {
-                    if (yajl_bs_current(hand->stateStack) ==
-                        yajl_state_array_start)
-                    {
-                        if (hand->callbacks &&
-                            hand->callbacks->yajl_end_array)
-                        {
-                            _CC_CHK(hand->callbacks->yajl_end_array(hand->ctx));
-                        }
-                        yajl_bs_pop(hand->stateStack);
-                        goto around_again;                        
-                    }
-                    /* intentional fall-through */
-                }
-                case yajl_tok_colon: 
-                case yajl_tok_comma: 
-                case yajl_tok_right_bracket:                
-                    yajl_bs_set(hand->stateStack, yajl_state_parse_error);
-                    hand->parserError = yajl_parser_invalid_token;
-                    goto around_again;
-                default:
-                    yajl_bs_set(hand->stateStack, yajl_state_parse_error);
-                    hand->parserError = yajl_parser_invalid_token;
-                    goto around_again;
-            }
-            /* got a value.  transition depends on the state we're in. */
-            {
-                yajl_state s = yajl_bs_current(hand->stateStack);
-                if (s == yajl_state_start) {
-                    yajl_bs_set(hand->stateStack, yajl_state_parse_complete);
-                } else if (s == yajl_state_map_need_val) {
-                    yajl_bs_set(hand->stateStack, yajl_state_map_got_val);
-                } else { 
-                    yajl_bs_set(hand->stateStack, yajl_state_array_got_val);
-                }
-            }
-            if (stateToPush != yajl_state_start) {
-                yajl_bs_push(hand->stateStack, stateToPush);
-            }
-
-            goto around_again;
-        }
-        case yajl_state_map_start: 
-        case yajl_state_map_need_key: {
-            /* only difference between these two states is that in
-             * start '}' is valid, whereas in need_key, we've parsed
-             * a comma, and a string key _must_ follow */
-            tok = yajl_lex_lex(hand->lexer, jsonText, jsonTextLen,
-                               offset, &buf, &bufLen);
-            switch (tok) {
-                case yajl_tok_eof:
-                    return yajl_status_insufficient_data;
-                case yajl_tok_error:
-                    yajl_bs_set(hand->stateStack, yajl_state_lexical_error);
-                    goto around_again;
-                case yajl_tok_string_with_escapes:
-                    if (hand->callbacks && hand->callbacks->yajl_map_key) {
-                        yajl_buf_clear(hand->decodeBuf);
-                        yajl_string_decode(hand->decodeBuf, buf, bufLen);
-                        buf = yajl_buf_data(hand->decodeBuf);
-                        bufLen = yajl_buf_len(hand->decodeBuf);
-                    }
-                    /* intentional fall-through */
-                case yajl_tok_string:
-                    if (hand->callbacks && hand->callbacks->yajl_map_key) {
-                        _CC_CHK(hand->callbacks->yajl_map_key(hand->ctx, buf,
-                                                              bufLen));
-                    }
-                    yajl_bs_set(hand->stateStack, yajl_state_map_sep);
-                    goto around_again;
-                case yajl_tok_right_bracket:
-                    if (yajl_bs_current(hand->stateStack) ==
-                        yajl_state_map_start)
-                    {
-                        if (hand->callbacks && hand->callbacks->yajl_end_map) {
-                            _CC_CHK(hand->callbacks->yajl_end_map(hand->ctx));
-                        }
-                        yajl_bs_pop(hand->stateStack);
-                        goto around_again;                        
-                    }
-                default:
-                    yajl_bs_set(hand->stateStack, yajl_state_parse_error);
-                    hand->parserError = yajl_parser_key_must_be_string;
-                    goto around_again;
-            }
-        }
-        case yajl_state_map_sep: {
-            tok = yajl_lex_lex(hand->lexer, jsonText, jsonTextLen,
-                               offset, &buf, &bufLen);
-            switch (tok) {
-                case yajl_tok_colon:
-                    yajl_bs_set(hand->stateStack, yajl_state_map_need_val);
-                    goto around_again;                    
-                case yajl_tok_eof:
-                    return yajl_status_insufficient_data;
-                case yajl_tok_error:
-                    yajl_bs_set(hand->stateStack, yajl_state_lexical_error);
-                    goto around_again;
-                default:
-                    yajl_bs_set(hand->stateStack, yajl_state_parse_error);
-                    hand->parserError = yajl_parser_pair_missing_colon;
-                    goto around_again;
-            }
-        }
-        case yajl_state_map_got_val: {
-            tok = yajl_lex_lex(hand->lexer, jsonText, jsonTextLen,
-                               offset, &buf, &bufLen);
-            switch (tok) {
-                case yajl_tok_right_bracket:
-                    if (hand->callbacks && hand->callbacks->yajl_end_map) {
-                        _CC_CHK(hand->callbacks->yajl_end_map(hand->ctx));
-                    }
-                    yajl_bs_pop(hand->stateStack);
-                    goto around_again;                        
-                case yajl_tok_comma:
-                    yajl_bs_set(hand->stateStack, yajl_state_map_need_key);
-                    goto around_again;                    
-                case yajl_tok_eof:
-                    return yajl_status_insufficient_data;
-                case yajl_tok_error:
-                    yajl_bs_set(hand->stateStack, yajl_state_lexical_error);
-                    goto around_again;
-                default:
-                    yajl_bs_set(hand->stateStack, yajl_state_parse_error);
-                    hand->parserError = yajl_parser_bad_token_after_map_value; 
-                    /* try to restore error offset */
-                    if (*offset >= bufLen) *offset -= bufLen;
-                    else *offset = 0;
-                    goto around_again;
-            }
-        }
-        case yajl_state_array_got_val: {
-            tok = yajl_lex_lex(hand->lexer, jsonText, jsonTextLen,
-                               offset, &buf, &bufLen);
-            switch (tok) {
-                case yajl_tok_right_brace:
-                    if (hand->callbacks && hand->callbacks->yajl_end_array) {
-                        _CC_CHK(hand->callbacks->yajl_end_array(hand->ctx));
-                    }
-                    yajl_bs_pop(hand->stateStack);
-                    goto around_again;                        
-                case yajl_tok_comma:
-                    yajl_bs_set(hand->stateStack, yajl_state_array_need_val);
-                    goto around_again;                    
-                case yajl_tok_eof:
-                    return yajl_status_insufficient_data;
-                case yajl_tok_error:
-                    yajl_bs_set(hand->stateStack, yajl_state_lexical_error);
-                    goto around_again;
-                default:
-                    yajl_bs_set(hand->stateStack, yajl_state_parse_error);
-                    hand->parserError = yajl_parser_bad_token_after_array_value;
-                    goto around_again;
-            }
-        }
-    }
-    
-    abort();
-    return yajl_status_error;
-}
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_parser.h
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_parser.h b/src/ejson/c_src/yajl/yajl_parser.h
deleted file mode 100644
index f359b45..0000000
--- a/src/ejson/c_src/yajl/yajl_parser.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-#ifndef __YAJL_PARSER_H__
-#define __YAJL_PARSER_H__
-
-#include "yajl_parse.h"
-#include "yajl_bytestack.h"
-#include "yajl_buf.h"
-#include "yajl_lex.h"
-
-typedef enum {
-    yajl_state_start = 0,
-    yajl_state_parse_complete,
-    yajl_state_parse_error,
-    yajl_state_lexical_error,
-    yajl_state_map_start,
-    yajl_state_map_sep,    
-    yajl_state_map_need_val,
-    yajl_state_map_got_val,
-    yajl_state_map_need_key,
-    yajl_state_array_start,
-    yajl_state_array_got_val,
-    yajl_state_array_need_val
-} yajl_state;
-
-typedef enum {
-    yajl_parser_e_ok = 0,
-    yajl_parser_client_cancelled,
-    yajl_parser_integer_overflow,
-    yajl_parser_numeric_overflow,
-    yajl_parser_invalid_token,
-    yajl_parser_internal_invalid_token,
-    yajl_parser_key_must_be_string,
-    yajl_parser_pair_missing_colon,
-    yajl_parser_bad_token_after_map_value,
-    yajl_parser_bad_token_after_array_value
-} yajl_parser_error;
-
-struct yajl_handle_t {
-    const yajl_callbacks * callbacks;
-    void * ctx;
-    yajl_lexer lexer;
-    yajl_parser_error parserError;
-    /* the number of bytes consumed from the last client buffer,
-     * in the case of an error this will be an error offset, in the
-     * case of an error this can be used as the error offset */
-    unsigned int bytesConsumed;
-    /* temporary storage for decoded strings */
-    yajl_buf decodeBuf;
-    /* a stack of states.  access with yajl_state_XXX routines */
-    yajl_bytestack stateStack;
-    /* memory allocation routines */
-    yajl_alloc_funcs alloc;
-};
-
-yajl_status
-yajl_do_parse(yajl_handle handle, const unsigned char * jsonText,
-              unsigned int jsonTextLen);
-
-unsigned char *
-yajl_render_error_string(yajl_handle hand, const unsigned char * jsonText,
-                         unsigned int jsonTextLen, int verbose);
-
-
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/src/ejson.app.src
----------------------------------------------------------------------
diff --git a/src/ejson/src/ejson.app.src b/src/ejson/src/ejson.app.src
deleted file mode 100644
index 7180b81..0000000
--- a/src/ejson/src/ejson.app.src
+++ /dev/null
@@ -1,9 +0,0 @@
-{application, ejson, [
-    {description, "EJSON - decode and encode JSON into/from Erlang terms"},
-    {vsn, git},
-    {modules, [ejson]},
-    {registered, []},
-    {applications, [kernel, stdlib]},
-    {env, []}
-]}.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/src/ejson.erl
----------------------------------------------------------------------
diff --git a/src/ejson/src/ejson.erl b/src/ejson/src/ejson.erl
deleted file mode 100644
index 72bb6c1..0000000
--- a/src/ejson/src/ejson.erl
+++ /dev/null
@@ -1,168 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ejson).
--export([encode/1, decode/1]).
--on_load(init/0).
-
-init() ->
-    SoName = case code:priv_dir(ejson) of
-    {error, bad_name} ->
-        case filelib:is_dir(filename:join(["..", priv])) of
-        true ->
-            filename:join(["..", priv, ejson]);
-        false ->
-            filename:join([priv, ejson])
-        end;
-    Dir ->
-        filename:join(Dir, ejson)
-    end,
-    (catch erlang:load_nif(SoName, 0)),
-    case erlang:system_info(otp_release) of
-    "R13B03" -> true;
-    _ -> ok
-    end.
-
-
-decode(undefined) ->
-    throw({invalid_json, undefined});
-decode(IoList) ->
-    try
-        nif_decode(IoList)
-    catch exit:ejson_nif_not_loaded ->
-        erl_decode(IoList)
-    end.
-
-encode(EJson) ->
-    try
-        nif_encode(EJson)
-    catch exit:ejson_nif_not_loaded ->
-        erl_encode(EJson)
-    end.
-
-
-nif_decode(IoList) ->
-    case reverse_tokens(IoList) of
-    {ok, ReverseTokens} ->
-        [[EJson]] = make_ejson(ReverseTokens, [[]]),
-        EJson;
-    Error ->
-        throw({invalid_json, {Error, IoList}})
-    end.
-
-
-erl_decode(IoList) ->
-    try
-        (mochijson2:decoder([{object_hook, fun({struct, L}) -> {L} end}]))(IoList)
-    catch _Type:Error ->
-        throw({invalid_json, {Error, IoList}})
-    end.
-
-
-nif_encode(EJson) ->
-    RevList = encode_rev(EJson),
-    final_encode(lists:reverse(lists:flatten([RevList]))).
-
-
-erl_encode(EJson) ->
-    Opts = [{handler, fun mochi_encode_handler/1}],
-    iolist_to_binary((mochijson2:encoder(Opts))(EJson)).
-
-mochi_encode_handler({L}) when is_list(L) ->
-    {struct, L};
-mochi_encode_handler(Bad) ->
-    exit({json_encode, {bad_term, Bad}}).
-
-
-% Encode the json into a reverse list that's almost an iolist
-% everything in the list is the final output except for tuples with
-% {0, Strings} and {1, Floats}, which are to be converted to strings
-% inside the NIF.
-encode_rev(true) ->
-    <<"true">>;
-encode_rev(false) ->
-    <<"false">>;
-encode_rev(null) ->
-    <<"null">>;
-encode_rev(I) when is_integer(I) ->
-    list_to_binary(integer_to_list(I));
-encode_rev(S) when is_binary(S) ->
-    {0, S};
-encode_rev(S) when is_atom(S) ->
-    {0, list_to_binary(atom_to_list(S))};
-encode_rev(F) when is_float(F) ->
-    {1, F};
-encode_rev({Props}) when is_list(Props) ->
-    encode_proplist_rev(Props, [<<"{">>]);
-encode_rev(Array) when is_list(Array) ->
-    encode_array_rev(Array, [<<"[">>]);
-encode_rev(Bad) ->
-    throw({json_encode, {bad_term, Bad}}).
-
-
-encode_array_rev([], Acc) ->
-    [<<"]">> | Acc];
-encode_array_rev([Val | Rest], [<<"[">>]) ->
-    encode_array_rev(Rest, [encode_rev(Val), <<"[">>]);
-encode_array_rev([Val | Rest], Acc) ->
-    encode_array_rev(Rest, [encode_rev(Val), <<",">> | Acc]).
-
-
-encode_proplist_rev([], Acc) ->
-    [<<"}">> | Acc];
-encode_proplist_rev([{Key,Val} | Rest], [<<"{">>]) ->
-    encode_proplist_rev(
-        Rest, [encode_rev(Val), <<":">>, {0, as_binary(Key)}, <<"{">>]);
-encode_proplist_rev([{Key,Val} | Rest], Acc) ->
-    encode_proplist_rev(
-        Rest, [encode_rev(Val), <<":">>, {0, as_binary(Key)}, <<",">> | Acc]).
-
-as_binary(B) when is_binary(B) ->
-    B;
-as_binary(A) when is_atom(A) ->
-    list_to_binary(atom_to_list(A));
-as_binary(L) when is_list(L) ->
-    list_to_binary(L).
-
-
-make_ejson([], Stack) ->
-    Stack;
-make_ejson([0 | RevEvs], [ArrayValues, PrevValues | RestStack]) ->
-    % 0 ArrayStart
-    make_ejson(RevEvs, [[ArrayValues | PrevValues] | RestStack]);
-make_ejson([1 | RevEvs], Stack) ->
-    % 1 ArrayEnd
-    make_ejson(RevEvs, [[] | Stack]);
-make_ejson([2 | RevEvs], [ObjValues, PrevValues | RestStack]) ->
-    % 2 ObjectStart
-    make_ejson(RevEvs, [[{ObjValues} | PrevValues] | RestStack]);
-make_ejson([3 | RevEvs], Stack) ->
-    % 3 ObjectEnd
-    make_ejson(RevEvs, [[] | Stack]);
-make_ejson([{0, Value} | RevEvs], [Vals | RestStack] = _Stack) ->
-    % {0, IntegerString}
-    make_ejson(RevEvs, [[list_to_integer(binary_to_list(Value)) | Vals] | RestStack]);
-make_ejson([{1, Value} | RevEvs], [Vals | RestStack] = _Stack) ->
-    % {1, FloatString}
-    make_ejson(RevEvs, [[list_to_float(binary_to_list(Value)) | Vals] | RestStack]);
-make_ejson([{3, String} | RevEvs], [[PrevValue|RestObject] | RestStack] = _Stack) ->
-    % {3 , ObjectKey}
-    make_ejson(RevEvs, [[{String, PrevValue}|RestObject] | RestStack]);
-make_ejson([Value | RevEvs], [Vals | RestStack] = _Stack) ->
-    make_ejson(RevEvs, [[Value | Vals] | RestStack]).
-
-
-reverse_tokens(_) ->
-    exit(ejson_nif_not_loaded).
-
-final_encode(_) ->
-    exit(ejson_nif_not_loaded).


[33/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/mem3


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/4cac46af
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/4cac46af
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/4cac46af

Branch: refs/heads/1843-feature-bigcouch
Commit: 4cac46af3dc40900b7ceb19c78db075bf30a3325
Parents: 3822d8f
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:41:40 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:41:40 2014 -0600

----------------------------------------------------------------------
 src/mem3/README.md                  |  33 ---
 src/mem3/include/mem3.hrl           |  42 ----
 src/mem3/src/mem3.app.src           |  50 -----
 src/mem3/src/mem3.erl               | 240 ---------------------
 src/mem3/src/mem3_app.erl           |  21 --
 src/mem3/src/mem3_httpd.erl         |  51 -----
 src/mem3/src/mem3_nodes.erl         | 149 -------------
 src/mem3/src/mem3_rep.erl           | 223 --------------------
 src/mem3/src/mem3_shards.erl        | 329 -----------------------------
 src/mem3/src/mem3_sup.erl           |  34 ---
 src/mem3/src/mem3_sync.erl          | 344 -------------------------------
 src/mem3/src/mem3_sync_event.erl    |  85 --------
 src/mem3/src/mem3_sync_nodes.erl    | 114 ----------
 src/mem3/src/mem3_sync_security.erl | 105 ----------
 src/mem3/src/mem3_util.erl          | 196 ------------------
 src/mem3/test/01-config-default.ini |   2 -
 src/mem3/test/mem3_util_test.erl    | 152 --------------
 17 files changed, 2170 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/README.md
----------------------------------------------------------------------
diff --git a/src/mem3/README.md b/src/mem3/README.md
deleted file mode 100644
index ba6e826..0000000
--- a/src/mem3/README.md
+++ /dev/null
@@ -1,33 +0,0 @@
-## mem3
-
-Mem3 is the node membership application for clustered [CouchDB][1].  It is used in [BigCouch][2] and tracks two very important things for the cluster:
-
- 1. member nodes
- 2. node/shards mappings for each database
-
-Both the nodes and shards are tracked in node-local couch databases.  Shards are heavily used, so an ETS cache is also maintained for low-latency lookups.  The nodes and shards are synchronized via continuous CouchDB replication, which serves as 'gossip' in Dynamo parlance.  The shards ETS cache is kept in sync based on membership and database event listeners.
-
-A very important point to make here is that BigCouch does not necessarily divide up each database into equal shards across the nodes of a cluster.  For instance, in a 20-node cluster, you may have the need to create a small database with very few documents.  For efficiency reasons, you may create your database with Q=4 and keep the default of N=3.  This means you only have 12 shards total, so 8 nodes will hold none of the data for this database.  Given this feature, we even shard use out across the cluster by altering the 'start' node for the database's shards.
-
-Splitting and merging shards is an immature feature of the system, and will require attention in the near-term.  We believe we can implement both functions and perform them while the database remains online.
-
-### Getting Started
-
-Mem3 requires R13B03 or higher and can be built with [rebar][6], which comes bundled in the repository.  Rebar needs to be able to find the `couch_db.hrl` header file; one way to accomplish this is to set ERL_LIBS to point to the apps
-subdirectory of a bigcouch checkout, e.g.
-
-    ERL_LIBS="/usr/local/src/bigcouch/apps" ./rebar compile
-
-### License
-[Apache 2.0][3]
-
-### Contact
- * [http://cloudant.com][4]
- * [info@cloudant.com][5]
-
-[1]: http://couchdb.apache.org
-[2]: http://github.com/cloudant/bigcouch
-[3]: http://www.apache.org/licenses/LICENSE-2.0.html
-[4]: http://cloudant.com
-[5]: mailto:info@cloudant.com
-[6]: http://github.com/basho/rebar

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/include/mem3.hrl
----------------------------------------------------------------------
diff --git a/src/mem3/include/mem3.hrl b/src/mem3/include/mem3.hrl
deleted file mode 100644
index cb39e78..0000000
--- a/src/mem3/include/mem3.hrl
+++ /dev/null
@@ -1,42 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% type specification hacked to suppress dialyzer warning re: match spec
--record(shard, {
-    name :: binary() | '_',
-    node :: node() | '_',
-    dbname :: binary(),
-    range :: [non_neg_integer() | '$1' | '$2'],
-    ref :: reference() | 'undefined' | '_'
-}).
-
-%% types
--type join_type() :: init | join | replace | leave.
--type join_order() :: non_neg_integer().
--type options() :: list().
--type mem_node() :: {join_order(), node(), options()}.
--type mem_node_list() :: [mem_node()].
--type arg_options() :: {test, boolean()}.
--type args() :: [] | [arg_options()].
--type test() :: undefined | node().
--type epoch() :: float().
--type clock() :: {node(), epoch()}.
--type vector_clock() :: [clock()].
--type ping_node() :: node() | nil.
--type gossip_fun() :: call | cast.
-
--type part() :: #shard{}.
--type fullmap() :: [part()].
--type ref_part_map() :: {reference(), part()}.
--type tref() :: reference().
--type np() :: {node(), part()}.
--type beg_acc() :: [integer()].

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/src/mem3.app.src
----------------------------------------------------------------------
diff --git a/src/mem3/src/mem3.app.src b/src/mem3/src/mem3.app.src
deleted file mode 100644
index 616e4bd..0000000
--- a/src/mem3/src/mem3.app.src
+++ /dev/null
@@ -1,50 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, mem3, [
-    {description, "CouchDB Cluster Membership"},
-    {vsn, git},
-    {modules, [
-        mem3,
-        mem3_app,
-        mem3_httpd,
-        mem3_nodes,
-        mem3_rep,
-        mem3_shards,
-        mem3_sup,
-        mem3_sync,
-        mem3_sync_event,
-        mem3_sync_nodes,
-        mem3_sync_security,
-        mem3_util
-    ]},
-    {mod, {mem3_app, []}},
-    {registered, [
-        mem3_events,
-        mem3_nodes,
-        mem3_shards,
-        mem3_sync,
-        mem3_sync_nodes,
-        mem3_sup
-    ]},
-    {applications, [
-        kernel,
-        stdlib,
-        config,
-        sasl,
-        crypto,
-        mochiweb,
-        couch,
-        rexi,
-        twig
-    ]}
-]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/src/mem3.erl
----------------------------------------------------------------------
diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
deleted file mode 100644
index c9b4793..0000000
--- a/src/mem3/src/mem3.erl
+++ /dev/null
@@ -1,240 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3).
-
--export([start/0, stop/0, restart/0, nodes/0, node_info/2, shards/1, shards/2,
-    choose_shards/2, n/1, dbname/1, ushards/1]).
--export([get_shard/3, local_shards/1, fold_shards/2]).
--export([sync_security/0, sync_security/1]).
--export([compare_nodelists/0, compare_shards/1]).
--export([quorum/1, group_by_proximity/1]).
--export([live_shards/2]).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-start() ->
-    application:start(mem3).
-
-stop() ->
-    application:stop(mem3).
-
-restart() ->
-    stop(),
-    start().
-
-%% @doc Detailed report of cluster-wide membership state.  Queries the state
-%%      on all member nodes and builds a dictionary with unique states as the
-%%      key and the nodes holding that state as the value.  Also reports member
-%%      nodes which fail to respond and nodes which are connected but are not
-%%      cluster members.  Useful for debugging.
--spec compare_nodelists() -> [{{cluster_nodes, [node()]} | bad_nodes
-    | non_member_nodes, [node()]}].
-compare_nodelists() ->
-    Nodes = mem3:nodes(),
-    AllNodes = erlang:nodes([this, visible]),
-    {Replies, BadNodes} = gen_server:multi_call(Nodes, mem3_nodes, get_nodelist),
-    Dict = lists:foldl(fun({Node, Nodelist}, D) ->
-        orddict:append({cluster_nodes, Nodelist}, Node, D)
-    end, orddict:new(), Replies),
-    [{non_member_nodes, AllNodes -- Nodes}, {bad_nodes, BadNodes} | Dict].
-
--spec compare_shards(DbName::iodata()) -> [{bad_nodes | [#shard{}], [node()]}].
-compare_shards(DbName) when is_list(DbName) ->
-    compare_shards(list_to_binary(DbName));
-compare_shards(DbName) ->
-    Nodes = mem3:nodes(),
-    {Replies, BadNodes} = rpc:multicall(mem3, shards, [DbName]),
-    GoodNodes = [N || N <- Nodes, not lists:member(N, BadNodes)],
-    Dict = lists:foldl(fun({Shards, Node}, D) ->
-        orddict:append(Shards, Node, D)
-    end, orddict:new(), lists:zip(Replies, GoodNodes)),
-    [{bad_nodes, BadNodes} | Dict].
-
--spec n(DbName::iodata()) -> integer().
-n(DbName) ->
-    length(mem3:shards(DbName, <<"foo">>)).
-
--spec nodes() -> [node()].
-nodes() ->
-    mem3_nodes:get_nodelist().
-
-node_info(Node, Key) ->
-    mem3_nodes:get_node_info(Node, Key).
-
--spec shards(DbName::iodata()) -> [#shard{}].
-shards(DbName) when is_list(DbName) ->
-    shards(list_to_binary(DbName));
-shards(DbName) ->
-    ShardDbName =
-        list_to_binary(config:get("mem3", "shard_db", "dbs")),
-    case DbName of
-    ShardDbName ->
-        %% shard_db is treated as a single sharded db to support calls to db_info
-        %% and view_all_docs
-        [#shard{
-            node = node(),
-            name = ShardDbName,
-            dbname = ShardDbName,
-            range = [0, 2 bsl 31]}];
-    _ ->
-        mem3_shards:for_db(DbName)
-    end.
-
--spec shards(DbName::iodata(), DocId::binary()) -> [#shard{}].
-shards(DbName, DocId) when is_list(DbName) ->
-    shards(list_to_binary(DbName), DocId);
-shards(DbName, DocId) when is_list(DocId) ->
-    shards(DbName, list_to_binary(DocId));
-shards(DbName, DocId) ->
-    mem3_shards:for_docid(DbName, DocId).
-
--spec ushards(DbName::iodata()) -> [#shard{}].
-ushards(DbName) ->
-    Nodes = [node()|erlang:nodes()],
-    ZoneMap = zone_map(Nodes),
-    ushards(DbName, live_shards(DbName, Nodes), ZoneMap).
-
-ushards(DbName, Shards0, ZoneMap) ->
-    {L,S,D} = group_by_proximity(Shards0, ZoneMap),
-    % Prefer shards in the local zone over shards in a different zone,
-    % but sort each zone separately to ensure a consistent choice between
-    % nodes in the same zone.
-    Shards = choose_ushards(DbName, L ++ S) ++ choose_ushards(DbName, D),
-    lists:ukeysort(#shard.range, Shards).
-
-get_shard(DbName, Node, Range) ->
-    mem3_shards:get(DbName, Node, Range).
-
-local_shards(DbName) ->
-    mem3_shards:local(DbName).
-
-fold_shards(Fun, Acc) ->
-    mem3_shards:fold(Fun, Acc).
-
-sync_security() ->
-    mem3_sync_security:go().
-
-sync_security(Db) ->
-    mem3_sync_security:go(dbname(Db)).
-
--spec choose_shards(DbName::iodata(), Options::list()) -> [#shard{}].
-choose_shards(DbName, Options) when is_list(DbName) ->
-    choose_shards(list_to_binary(DbName), Options);
-choose_shards(DbName, Options) ->
-    try shards(DbName)
-    catch error:E when E==database_does_not_exist; E==badarg ->
-        Nodes = mem3:nodes(),
-        case get_placement(Options) of
-            undefined ->
-                choose_shards(DbName, Nodes, Options);
-            Placement ->
-                lists:flatmap(fun({Zone, N}) ->
-                    NodesInZone = nodes_in_zone(Nodes, Zone),
-                    Options1 = lists:keymerge(1, [{n,N}], Options),
-                    choose_shards(DbName, NodesInZone, Options1)
-                end, Placement)
-        end
-    end.
-
-choose_shards(DbName, Nodes, Options) ->
-    NodeCount = length(Nodes),
-    Suffix = couch_util:get_value(shard_suffix, Options, ""),
-    N = mem3_util:n_val(couch_util:get_value(n, Options), NodeCount),
-    if N =:= 0 -> erlang:error(no_nodes_in_zone);
-       true -> ok
-    end,
-    Q = mem3_util:to_integer(couch_util:get_value(q, Options,
-        config:get("cluster", "q", "8"))),
-    %% rotate to a random entry in the nodelist for even distribution
-    {A, B} = lists:split(crypto:rand_uniform(1,length(Nodes)+1), Nodes),
-    RotatedNodes = B ++ A,
-    mem3_util:create_partition_map(DbName, N, Q, RotatedNodes, Suffix).
-
-get_placement(Options) ->
-    case couch_util:get_value(placement, Options) of
-        undefined ->
-            case config:get("cluster", "placement") of
-                undefined ->
-                    undefined;
-                PlacementStr ->
-                    decode_placement_string(PlacementStr)
-            end;
-        PlacementStr ->
-            decode_placement_string(PlacementStr)
-    end.
-
-decode_placement_string(PlacementStr) ->
-    [begin
-         [Zone, N] = string:tokens(Rule, ":"),
-         {list_to_binary(Zone), list_to_integer(N)}
-     end || Rule <- string:tokens(PlacementStr, ",")].
-
--spec dbname(#shard{} | iodata()) -> binary().
-dbname(#shard{dbname = DbName}) ->
-    DbName;
-dbname(<<"shards/", _:8/binary, "-", _:8/binary, "/", DbName/binary>>) ->
-    list_to_binary(filename:rootname(binary_to_list(DbName)));
-dbname(DbName) when is_list(DbName) ->
-    dbname(list_to_binary(DbName));
-dbname(DbName) when is_binary(DbName) ->
-    DbName;
-dbname(_) ->
-    erlang:error(badarg).
-
-nodes_in_zone(Nodes, Zone) ->
-    [Node || Node <- Nodes, Zone == mem3:node_info(Node, <<"zone">>)].
-
-live_shards(DbName, Nodes) ->
-    [S || #shard{node=Node} = S <- shards(DbName), lists:member(Node, Nodes)].
-
-zone_map(Nodes) ->
-    [{Node, node_info(Node, <<"zone">>)} || Node <- Nodes].
-
-group_by_proximity(Shards) ->
-    Nodes = [N || #shard{node=N} <- lists:ukeysort(#shard.node, Shards)],
-    group_by_proximity(Shards, zone_map(Nodes)).
-
-group_by_proximity(Shards, ZoneMap) ->
-    {Local, Remote} = lists:partition(fun(S) -> S#shard.node =:= node() end,
-        Shards),
-    LocalZone = proplists:get_value(node(), ZoneMap),
-    Fun = fun(S) -> proplists:get_value(S#shard.node, ZoneMap) =:= LocalZone end,
-    {SameZone, DifferentZone} = lists:partition(Fun, Remote),
-    {Local, SameZone, DifferentZone}.
-
-choose_ushards(DbName, Shards) ->
-    Groups = group_by_range(rotate_list(DbName, lists:sort(Shards))),
-    Fun = fun(Group, {N, Acc}) ->
-        {N+1, [lists:nth(1 + N rem length(Group), Group) | Acc]} end,
-    {_, Result} = lists:foldl(Fun, {0, []}, Groups),
-    Result.
-
-rotate_list(_DbName, []) ->
-    [];
-rotate_list(DbName, List) ->
-    {H, T} = lists:split(erlang:crc32(DbName) rem length(List), List),
-    T ++ H.
-
-group_by_range(Shards) ->
-    Groups0 = lists:foldl(fun(#shard{range=Range}=Shard, Dict) ->
-        orddict:append(Range, Shard, Dict) end, orddict:new(), Shards),
-    {_, Groups} = lists:unzip(Groups0),
-    Groups.
-
-% quorum functions
-
-quorum(#db{name=DbName}) ->
-    quorum(DbName);
-quorum(DbName) ->
-    n(DbName) div 2 + 1.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/src/mem3_app.erl
----------------------------------------------------------------------
diff --git a/src/mem3/src/mem3_app.erl b/src/mem3/src/mem3_app.erl
deleted file mode 100644
index 3ddfbe6..0000000
--- a/src/mem3/src/mem3_app.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, []) ->
-    mem3_sup:start_link().
-
-stop([]) ->
-    ok.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/src/mem3_httpd.erl
----------------------------------------------------------------------
diff --git a/src/mem3/src/mem3_httpd.erl b/src/mem3/src/mem3_httpd.erl
deleted file mode 100644
index 94196fa..0000000
--- a/src/mem3/src/mem3_httpd.erl
+++ /dev/null
@@ -1,51 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_httpd).
-
--export([handle_membership_req/1]).
-
-%% includes
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-handle_membership_req(#httpd{method='GET',
-        path_parts=[<<"_membership">>]} = Req) ->
-    ClusterNodes = try mem3:nodes()
-    catch _:_ -> {ok,[]} end,
-    couch_httpd:send_json(Req, {[
-        {all_nodes, lists:sort([node()|nodes()])},
-        {cluster_nodes, lists:sort(ClusterNodes)}
-    ]});
-handle_membership_req(#httpd{method='GET',
-        path_parts=[<<"_membership">>, <<"parts">>, DbName]} = Req) ->
-    ClusterNodes = try mem3:nodes()
-    catch _:_ -> {ok,[]} end,
-    Shards = mem3:shards(DbName),
-    JsonShards = json_shards(Shards, dict:new()),
-    couch_httpd:send_json(Req, {[
-        {all_nodes, lists:sort([node()|nodes()])},
-        {cluster_nodes, lists:sort(ClusterNodes)},
-        {partitions, JsonShards}
-    ]}).
-
-%%
-%% internal
-%%
-
-json_shards([], AccIn) ->
-    List = dict:to_list(AccIn),
-    {lists:sort(List)};
-json_shards([#shard{node=Node, range=[B,_E]} | Rest], AccIn) ->
-    HexBeg = couch_util:to_hex(<<B:32/integer>>),
-    json_shards(Rest, dict:append(HexBeg, Node, AccIn)).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/src/mem3_nodes.erl
----------------------------------------------------------------------
diff --git a/src/mem3/src/mem3_nodes.erl b/src/mem3/src/mem3_nodes.erl
deleted file mode 100644
index 782a8b5..0000000
--- a/src/mem3/src/mem3_nodes.erl
+++ /dev/null
@@ -1,149 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_nodes).
--behaviour(gen_server).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
-    code_change/3]).
-
--export([start_link/0, get_nodelist/0, get_node_info/2]).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record(state, {changes_pid, update_seq}).
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_nodelist() ->
-    try
-        lists:sort([N || {N,_} <- ets:tab2list(?MODULE)])
-    catch error:badarg ->
-        gen_server:call(?MODULE, get_nodelist)
-    end.
-
-get_node_info(Node, Key) ->
-    try
-        couch_util:get_value(Key, ets:lookup_element(?MODULE, Node, 2))
-    catch error:badarg ->
-        gen_server:call(?MODULE, {get_node_info, Node, Key})
-    end.
-
-init([]) ->
-    ets:new(?MODULE, [named_table, {read_concurrency, true}]),
-    UpdateSeq = initialize_nodelist(),
-    {Pid, _} = spawn_monitor(fun() -> listen_for_changes(UpdateSeq) end),
-    {ok, #state{changes_pid = Pid, update_seq = UpdateSeq}}.
-
-handle_call(get_nodelist, _From, State) ->
-    {reply, lists:sort([N || {N,_} <- ets:tab2list(?MODULE)]), State};
-handle_call({get_node_info, Node, Key}, _From, State) ->
-    Resp = try
-        couch_util:get_value(Key, ets:lookup_element(?MODULE, Node, 2))
-    catch error:badarg ->
-        error
-    end,
-    {reply, Resp, State};
-handle_call({add_node, Node, NodeInfo}, _From, State) ->
-    gen_event:notify(mem3_events, {add_node, Node}),
-    ets:insert(?MODULE, {Node, NodeInfo}),
-    {reply, ok, State};
-handle_call({remove_node, Node}, _From, State) ->
-    gen_event:notify(mem3_events, {remove_node, Node}),
-    ets:delete(?MODULE, Node),
-    {reply, ok, State};
-handle_call(_Call, _From, State) ->
-    {noreply, State}.
-
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid=Pid} = State) ->
-    twig:log(notice, "~p changes listener died ~p", [?MODULE, Reason]),
-    StartSeq = State#state.update_seq,
-    Seq = case Reason of {seq, EndSeq} -> EndSeq; _ -> StartSeq end,
-    erlang:send_after(5000, self(), start_listener),
-    {noreply, State#state{update_seq = Seq}};
-handle_info(start_listener, #state{update_seq = Seq} = State) ->
-    {NewPid, _} = spawn_monitor(fun() -> listen_for_changes(Seq) end),
-    {noreply, State#state{changes_pid=NewPid}};
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-code_change(_OldVsn, {state, ChangesPid, UpdateSeq, _}, _Extra) ->
-    ets:new(?MODULE, [named_table, {read_concurrency, true}]),
-    initialize_nodelist(),
-    {ok, #state{changes_pid = ChangesPid, update_seq = UpdateSeq}};
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-%% internal functions
-
-initialize_nodelist() ->
-    DbName = config:get("mem3", "node_db", "nodes"),
-    {ok, Db} = mem3_util:ensure_exists(DbName),
-    {ok, _, Db} = couch_btree:fold(Db#db.id_tree, fun first_fold/3, Db, []),
-    % add self if not already present
-    case ets:lookup(?MODULE, node()) of
-    [_] ->
-        ok;
-    [] ->
-        ets:insert(?MODULE, {node(), []}),
-        Doc = #doc{id = couch_util:to_binary(node())},
-        {ok, _} = couch_db:update_doc(Db, Doc, [])
-    end,
-    couch_db:close(Db),
-    Db#db.update_seq.
-
-first_fold(#full_doc_info{id = <<"_design/", _/binary>>}, _, Acc) ->
-    {ok, Acc};
-first_fold(#full_doc_info{deleted=true}, _, Acc) ->
-    {ok, Acc};
-first_fold(#full_doc_info{id=Id}=DocInfo, _, Db) ->
-    {ok, #doc{body={Props}}} = couch_db:open_doc(Db, DocInfo, [ejson_body]),
-    ets:insert(?MODULE, {mem3_util:to_atom(Id), Props}),
-    {ok, Db}.
-
-listen_for_changes(Since) ->
-    DbName = config:get("mem3", "node_db", "nodes"),
-    {ok, Db} = mem3_util:ensure_exists(DbName),
-    Args = #changes_args{
-        feed = "continuous",
-        since = Since,
-        heartbeat = true,
-        include_docs = true
-    },
-    ChangesFun = couch_changes:handle_changes(Args, nil, Db),
-    ChangesFun(fun changes_callback/2).
-
-changes_callback(start, _) ->
-    {ok, nil};
-changes_callback({stop, EndSeq}, _) ->
-    exit({seq, EndSeq});
-changes_callback({change, {Change}, _}, _) ->
-    Node = couch_util:get_value(<<"id">>, Change),
-    case Node of <<"_design/", _/binary>> -> ok; _ ->
-        case mem3_util:is_deleted(Change) of
-        false ->
-            {Props} = couch_util:get_value(doc, Change),
-            gen_server:call(?MODULE, {add_node, mem3_util:to_atom(Node), Props});
-        true ->
-            gen_server:call(?MODULE, {remove_node, mem3_util:to_atom(Node)})
-        end
-    end,
-    {ok, couch_util:get_value(<<"seq">>, Change)};
-changes_callback(timeout, _) ->
-    {ok, nil}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/src/mem3_rep.erl
----------------------------------------------------------------------
diff --git a/src/mem3/src/mem3_rep.erl b/src/mem3/src/mem3_rep.erl
deleted file mode 100644
index 373bc3f..0000000
--- a/src/mem3/src/mem3_rep.erl
+++ /dev/null
@@ -1,223 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_rep).
-
--export([go/2, go/3, changes_enumerator/3, make_local_id/2]).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(CTX, #user_ctx{roles = [<<"_admin">>]}).
-
--record(acc, {
-    batch_size,
-    batch_count,
-    revcount = 0,
-    infos = [],
-    seq,
-    localid,
-    source,
-    target,
-    filter
-}).
-
-go(Source, Target) ->
-    go(Source, Target, []).
-
-go(DbName, Node, Opts) when is_binary(DbName), is_atom(Node) ->
-    go(#shard{name=DbName, node=node()}, #shard{name=DbName, node=Node}, Opts);
-
-go(#shard{} = Source, #shard{} = Target, Opts) ->
-    mem3_sync_security:maybe_sync(Source, Target),
-    BatchSize = case proplists:get_value(batch_size, Opts) of
-        BS when is_integer(BS), BS > 0 -> BS;
-        _ -> 100
-    end,
-    BatchCount = case proplists:get_value(batch_count, Opts) of
-        all -> all;
-        BC when is_integer(BC), BC > 0 -> BC;
-        _ -> 1
-    end,
-    Filter = proplists:get_value(filter, Opts),
-    LocalId = make_local_id(Source, Target, Filter),
-    Acc = #acc{
-        batch_size = BatchSize,
-        batch_count = BatchCount,
-        localid = LocalId,
-        source = Source,
-        target = Target,
-        filter = Filter
-    },
-    go(Acc).
-
-go(#acc{source=Source, batch_count=BC}=Acc) ->
-    case couch_db:open(Source#shard.name, [{user_ctx,?CTX}]) of
-    {ok, Db} ->
-        Resp = try
-            repl(Db, Acc)
-        catch error:{not_found, no_db_file} ->
-            {error, missing_target}
-        after
-            couch_db:close(Db)
-        end,
-        case Resp of
-            {ok, P} when P > 0, BC == all ->
-                go(Acc);
-            {ok, P} when P > 0, BC > 1 ->
-                go(Acc#acc{batch_count=BC-1});
-            Else ->
-                Else
-        end;
-    {not_found, no_db_file} ->
-        {error, missing_source}
-    end.
-
-repl(#db{name=DbName, seq_tree=Bt}=Db, #acc{localid=LocalId}=Acc0) ->
-    erlang:put(io_priority, {internal_repl, DbName}),
-    Seq = calculate_start_seq(Db, Acc0#acc.target, LocalId),
-    Acc1 = Acc0#acc{source=Db, seq=Seq},
-    Fun = fun ?MODULE:changes_enumerator/3,
-    {ok, _, Acc2} = couch_btree:fold(Bt, Fun, Acc1, [{start_key, Seq + 1}]),
-    {ok, #acc{seq = LastSeq}} = replicate_batch(Acc2),
-    {ok, couch_db:count_changes_since(Db, LastSeq)}.
-
-make_local_id(#shard{}=Source, #shard{}=Target) ->
-    make_local_id(Source, Target, undefined).
-
-make_local_id(#shard{node=SourceNode}, #shard{node=TargetNode}, Filter) ->
-    S = couch_util:encodeBase64Url(couch_util:md5(term_to_binary(SourceNode))),
-    T = couch_util:encodeBase64Url(couch_util:md5(term_to_binary(TargetNode))),
-    F = case is_function(Filter) of
-        true ->
-            {new_uniq, Hash} = erlang:fun_info(Filter, new_uniq),
-            B = couch_util:encodeBase64Url(Hash),
-            <<"-", B/binary>>;
-        false ->
-            <<>>
-    end,
-    <<"_local/shard-sync-", S/binary, "-", T/binary, F/binary>>.
-
-changes_enumerator(FDI, _, #acc{revcount=C, infos=Infos}=Acc0) ->
-    #doc_info{
-        high_seq=Seq,
-        revs=Revs
-    } = couch_doc:to_doc_info(FDI),
-    {Count, NewInfos} = case filter_doc(Acc0#acc.filter, FDI) of
-        keep -> {C + length(Revs), [FDI | Infos]};
-        discard -> {C, Infos}
-    end,
-    Acc1 = Acc0#acc{
-        seq=Seq,
-        revcount=Count,
-        infos=NewInfos
-    },
-    Go = if Count < Acc1#acc.batch_size -> ok; true -> stop end,
-    {Go, Acc1}.
-
-filter_doc(Filter, FullDocInfo) when is_function(Filter) ->
-    try Filter(FullDocInfo) of
-        discard -> discard;
-        _ -> keep
-    catch _:_ ->
-        keep
-    end;
-filter_doc(_, _) ->
-    keep.
-
-replicate_batch(#acc{target = #shard{node=Node, name=Name}} = Acc) ->
-    case find_missing_revs(Acc) of
-    [] ->
-        ok;
-    Missing ->
-        ok = save_on_target(Node, Name, open_docs(Acc, Missing))
-    end,
-    update_locals(Acc),
-    {ok, Acc#acc{revcount=0, infos=[]}}.
-
-find_missing_revs(Acc) ->
-    #acc{target = #shard{node=Node, name=Name}, infos = Infos} = Acc,
-    IdsRevs = lists:map(fun(FDI) ->
-        #doc_info{id=Id, revs=RevInfos} = couch_doc:to_doc_info(FDI),
-        {Id, [R || #rev_info{rev=R} <- RevInfos]}
-    end, Infos),
-    Options = [{io_priority, {internal_repl, Name}}, {user_ctx, ?CTX}],
-    rexi_call(Node, {fabric_rpc, get_missing_revs, [Name, IdsRevs, Options]}).
-
-open_docs(#acc{source=Source, infos=Infos}, Missing) ->
-    lists:flatmap(fun({Id, Revs, _}) ->
-        FDI = lists:keyfind(Id, #full_doc_info.id, Infos),
-        open_doc_revs(Source, FDI, Revs)
-    end, Missing).
-
-save_on_target(Node, Name, Docs) ->
-    Options = [replicated_changes, full_commit, {user_ctx, ?CTX},
-        {io_priority, {internal_repl, Name}}],
-    rexi_call(Node, {fabric_rpc, update_docs, [Name, Docs, Options]}),
-    ok.
-
-update_locals(Acc) ->
-    #acc{seq=Seq, source=Db, target=Target, localid=Id} = Acc,
-    #shard{name=Name, node=Node} = Target,
-    Doc = #doc{id = Id, body = {[
-        {<<"seq">>, Seq},
-        {<<"node">>, list_to_binary(atom_to_list(Node))},
-        {<<"timestamp">>, list_to_binary(iso8601_timestamp())}
-    ]}},
-    {ok, _} = couch_db:update_doc(Db, Doc, []),
-    Options = [{user_ctx, ?CTX}, {io_priority, {internal_repl, Name}}],
-    rexi_call(Node, {fabric_rpc, update_docs, [Name, [Doc], Options]}).
-
-rexi_call(Node, MFA) ->
-    Mon = rexi_monitor:start([{rexi_server, Node}]),
-    Ref = rexi:cast(Node, self(), MFA, [sync]),
-    try
-        receive {Ref, {ok, Reply}} ->
-            Reply;
-        {Ref, Error} ->
-            erlang:error(Error);
-        {rexi_DOWN, Mon, _, Reason} ->
-            erlang:error({rexi_DOWN, {Node, Reason}})
-        after 600000 ->
-            erlang:error(timeout)
-        end
-    after
-        rexi_monitor:stop(Mon)
-    end.
-
-calculate_start_seq(Db, #shard{node=Node, name=Name}, LocalId) ->
-    case couch_db:open_doc(Db, LocalId, [ejson_body]) of
-    {ok, #doc{body = {SProps}}} ->
-        Opts = [{user_ctx, ?CTX}, {io_priority, {internal_repl, Name}}],
-        try rexi_call(Node, {fabric_rpc, open_doc, [Name, LocalId, Opts]}) of
-        #doc{body = {TProps}} ->
-            SourceSeq = couch_util:get_value(<<"seq">>, SProps, 0),
-            TargetSeq = couch_util:get_value(<<"seq">>, TProps, 0),
-            erlang:min(SourceSeq, TargetSeq)
-        catch error:{not_found, _} ->
-            0
-        end;
-    {not_found, _} ->
-        0
-    end.
-
-open_doc_revs(Db, #full_doc_info{id=Id, rev_tree=RevTree}, Revs) ->
-    {FoundRevs, _} = couch_key_tree:get_key_leafs(RevTree, Revs),
-    lists:map(fun({#leaf{deleted=IsDel, ptr=SummaryPtr}, FoundRevPath}) ->
-                  couch_db:make_doc(Db, Id, IsDel, SummaryPtr, FoundRevPath)
-    end, FoundRevs).
-
-iso8601_timestamp() ->
-    {_,_,Micro} = Now = os:timestamp(),
-    {{Year,Month,Date},{Hour,Minute,Second}} = calendar:now_to_datetime(Now),
-    Format = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0B.~6.10.0BZ",
-    io_lib:format(Format, [Year, Month, Date, Hour, Minute, Second, Micro]).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/src/mem3_shards.erl
----------------------------------------------------------------------
diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl
deleted file mode 100644
index 9949869..0000000
--- a/src/mem3/src/mem3_shards.erl
+++ /dev/null
@@ -1,329 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_shards).
--behaviour(gen_server).
--behaviour(config_listener).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
--export([handle_config_change/5]).
-
--export([start_link/0]).
--export([for_db/1, for_docid/2, get/3, local/1, fold/2]).
--export([set_max_size/1]).
-
--record(st, {
-    max_size = 25000,
-    cur_size = 0,
-    changes_pid
-}).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(DBS, mem3_dbs).
--define(SHARDS, mem3_shards).
--define(ATIMES, mem3_atimes).
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-for_db(DbName) ->
-    try ets:lookup(?SHARDS, DbName) of
-        [] ->
-            load_shards_from_disk(DbName);
-        Else ->
-            gen_server:cast(?MODULE, {cache_hit, DbName}),
-            Else
-    catch error:badarg ->
-        load_shards_from_disk(DbName)
-    end.
-
-for_docid(DbName, DocId) ->
-    HashKey = mem3_util:hash(DocId),
-    Head = #shard{
-        name = '_',
-        node = '_',
-        dbname = DbName,
-        range = ['$1','$2'],
-        ref = '_'
-    },
-    Conditions = [{'=<', '$1', HashKey}, {'=<', HashKey, '$2'}],
-    try ets:select(?SHARDS, [{Head, Conditions, ['$_']}]) of
-        [] ->
-            load_shards_from_disk(DbName, DocId);
-        Shards ->
-            gen_server:cast(?MODULE, {cache_hit, DbName}),
-            Shards
-    catch error:badarg ->
-        load_shards_from_disk(DbName, DocId)
-    end.
-
-get(DbName, Node, Range) ->
-    Res = lists:foldl(fun(#shard{node=N, range=R}=S, Acc) ->
-        case {N, R} of
-            {Node, Range} -> [S | Acc];
-            _ -> Acc
-        end
-    end, [], for_db(DbName)),
-    case Res of
-        [] -> {error, not_found};
-        [Shard] -> {ok, Shard};
-        [_|_] -> {error, duplicates}
-    end.
-
-local(DbName) ->
-    Pred = fun(#shard{node=Node}) when Node == node() -> true; (_) -> false end,
-    lists:filter(Pred, for_db(DbName)).
-
-fold(Fun, Acc) ->
-    DbName = config:get("mem3", "shards_db", "dbs"),
-    {ok, Db} = mem3_util:ensure_exists(DbName),
-    FAcc = {Db, Fun, Acc},
-    try
-        {ok, _, LastAcc} = couch_db:enum_docs(Db, fun fold_fun/3, FAcc, []),
-        {_Db, _UFun, UAcc} = LastAcc,
-        UAcc
-    after
-        couch_db:close(Db)
-    end.
-
-set_max_size(Size) when is_integer(Size), Size > 0 ->
-    gen_server:call(?MODULE, {set_max_size, Size}).
-
-handle_config_change("mem3", "shard_cache_size", SizeList, _, _) ->
-    Size = list_to_integer(SizeList),
-    {ok, gen_server:call(?MODULE, {set_max_size, Size}, infinity)};
-handle_config_change("mem3", "shard_db", _DbName, _, _) ->
-    {ok, gen_server:call(?MODULE, shard_db_changed, infinity)};
-handle_config_change(_, _, _, _, _) ->
-    {ok, nil}.
-
-init([]) ->
-    ets:new(?SHARDS, [bag, protected, named_table, {keypos,#shard.dbname}]),
-    ets:new(?DBS, [set, protected, named_table]),
-    ets:new(?ATIMES, [ordered_set, protected, named_table]),
-    ok = config:listen_for_changes(?MODULE, nil),
-    SizeList = config:get("mem3", "shard_cache_size", "25000"),
-    {Pid, _} = spawn_monitor(fun() -> listen_for_changes(get_update_seq()) end),
-    {ok, #st{
-        max_size = list_to_integer(SizeList),
-        cur_size = 0,
-        changes_pid = Pid
-    }}.
-
-handle_call({set_max_size, Size}, _From, St) ->
-    {reply, ok, cache_free(St#st{max_size=Size})};
-handle_call(shard_db_changed, _From, St) ->
-    exit(St#st.changes_pid, shard_db_changed),
-    {reply, ok, St};
-handle_call(_Call, _From, St) ->
-    {noreply, St}.
-
-handle_cast({cache_hit, DbName}, St) ->
-    cache_hit(DbName),
-    {noreply, St};
-handle_cast({cache_insert, DbName, Shards}, St) ->
-    {noreply, cache_free(cache_insert(St, DbName, Shards))};
-handle_cast({cache_remove, DbName}, St) ->
-    {noreply, cache_remove(St, DbName)};
-handle_cast(_Msg, St) ->
-    {noreply, St}.
-
-handle_info({'DOWN', _, _, Pid, Reason}, #st{changes_pid=Pid}=St) ->
-    {NewSt, Seq} = case Reason of
-        {seq, EndSeq} ->
-            {St, EndSeq};
-        shard_db_changed ->
-            {cache_clear(St), get_update_seq()};
-        _ ->
-            twig:log(notice, "~p changes listener died ~p", [?MODULE, Reason]),
-            {St, get_update_seq()}
-    end,
-    erlang:send_after(5000, self(), {start_listener, Seq}),
-    {noreply, NewSt#st{changes_pid=undefined}};
-handle_info({start_listener, Seq}, St) ->
-    {NewPid, _} = spawn_monitor(fun() -> listen_for_changes(Seq) end),
-    {noreply, St#st{changes_pid=NewPid}};
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {noreply, State};
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-    {noreply, State};
-handle_info(_Msg, St) ->
-    {noreply, St}.
-
-terminate(_Reason, #st{changes_pid=Pid}) ->
-    exit(Pid, kill),
-    ok.
-
-code_change(_OldVsn, St, _Extra) ->
-    {ok, St}.
-
-%% internal functions
-
-fold_fun(#full_doc_info{}=FDI, _, Acc) ->
-    DI = couch_doc:to_doc_info(FDI),
-    fold_fun(DI, nil, Acc);
-fold_fun(#doc_info{}=DI, _, {Db, UFun, UAcc}) ->
-    case couch_db:open_doc(Db, DI, [ejson_body, conflicts]) of
-        {ok, Doc} ->
-            {Props} = Doc#doc.body,
-            Shards = mem3_util:build_shards(Doc#doc.id, Props),
-            NewUAcc = lists:foldl(UFun, UAcc, Shards),
-            {ok, {Db, UFun, NewUAcc}};
-        _ ->
-            {ok, {Db, UFun, UAcc}}
-    end.
-
-get_update_seq() ->
-    DbName = config:get("mem3", "shards_db", "dbs"),
-    {ok, Db} = mem3_util:ensure_exists(DbName),
-    couch_db:close(Db),
-    Db#db.update_seq.
-
-listen_for_changes(Since) ->
-    DbName = config:get("mem3", "shards_db", "dbs"),
-    {ok, Db} = mem3_util:ensure_exists(DbName),
-    Args = #changes_args{
-        feed = "continuous",
-        since = Since,
-        heartbeat = true,
-        include_docs = true
-    },
-    ChangesFun = couch_changes:handle_changes(Args, Since, Db),
-    ChangesFun(fun changes_callback/2).
-
-changes_callback(start, Acc) ->
-    {ok, Acc};
-changes_callback({stop, EndSeq}, _) ->
-    exit({seq, EndSeq});
-changes_callback({change, {Change}, _}, _) ->
-    DbName = couch_util:get_value(<<"id">>, Change),
-    case DbName of <<"_design/", _/binary>> -> ok; _Else ->
-        case mem3_util:is_deleted(Change) of
-        true ->
-            gen_server:cast(?MODULE, {cache_remove, DbName});
-        false ->
-            case couch_util:get_value(doc, Change) of
-            {error, Reason} ->
-                twig:log(error, "missing partition table for ~s: ~p",
-                    [DbName, Reason]);
-            {Doc} ->
-                Shards = mem3_util:build_shards(DbName, Doc),
-                gen_server:cast(?MODULE, {cache_insert, DbName, Shards}),
-                [create_if_missing(Name) || #shard{name=Name, node=Node}
-                    <- Shards, Node =:= node()]
-            end
-        end
-    end,
-    {ok, couch_util:get_value(<<"seq">>, Change)};
-changes_callback(timeout, _) ->
-    ok.
-
-load_shards_from_disk(DbName) when is_binary(DbName) ->
-    X = ?l2b(config:get("mem3", "shard_db", "dbs")),
-    {ok, Db} = mem3_util:ensure_exists(X),
-    try
-        load_shards_from_db(Db, DbName)
-    after
-        couch_db:close(Db)
-    end.
-
-load_shards_from_db(#db{} = ShardDb, DbName) ->
-    case couch_db:open_doc(ShardDb, DbName, [ejson_body]) of
-    {ok, #doc{body = {Props}}} ->
-        Shards = mem3_util:build_shards(DbName, Props),
-        gen_server:cast(?MODULE, {cache_insert, DbName, Shards}),
-        Shards;
-    {not_found, _} ->
-        erlang:error(database_does_not_exist, ?b2l(DbName))
-    end.
-
-load_shards_from_disk(DbName, DocId)->
-    Shards = load_shards_from_disk(DbName),
-    HashKey = mem3_util:hash(DocId),
-    [S || #shard{range = [B,E]} = S <- Shards, B =< HashKey, HashKey =< E].
-
-create_if_missing(Name) ->
-    DbDir = config:get("couchdb", "database_dir"),
-    Filename = filename:join(DbDir, ?b2l(Name) ++ ".couch"),
-    case filelib:is_regular(Filename) of
-    true ->
-        ok;
-    false ->
-        Options = [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}],
-        case couch_server:create(Name, Options) of
-        {ok, Db} ->
-            couch_db:close(Db);
-        Error ->
-            twig:log(error, "~p tried to create ~s, got ~p",
-                [?MODULE, Name, Error])
-        end
-    end.
-
-cache_insert(#st{cur_size=Cur}=St, DbName, Shards) ->
-    NewATime = now(),
-    true = ets:delete(?SHARDS, DbName),
-    true = ets:insert(?SHARDS, Shards),
-    case ets:lookup(?DBS, DbName) of
-        [{DbName, ATime}] ->
-            true = ets:delete(?ATIMES, ATime),
-            true = ets:insert(?ATIMES, {NewATime, DbName}),
-            true = ets:insert(?DBS, {DbName, NewATime}),
-            St;
-        [] ->
-            true = ets:insert(?ATIMES, {NewATime, DbName}),
-            true = ets:insert(?DBS, {DbName, NewATime}),
-            St#st{cur_size=Cur + 1}
-    end.
-
-cache_remove(#st{cur_size=Cur}=St, DbName) ->
-    true = ets:delete(?SHARDS, DbName),
-    case ets:lookup(?DBS, DbName) of
-        [{DbName, ATime}] ->
-            true = ets:delete(?DBS, DbName),
-            true = ets:delete(?ATIMES, ATime),
-            St#st{cur_size=Cur-1};
-        [] ->
-            St
-    end.
-
-cache_hit(DbName) ->
-    case ets:lookup(?DBS, DbName) of
-        [{DbName, ATime}] ->
-            NewATime = now(),
-            true = ets:delete(?ATIMES, ATime),
-            true = ets:insert(?ATIMES, {NewATime, DbName}),
-            true = ets:insert(?DBS, {DbName, NewATime});
-        [] ->
-            ok
-    end.
-
-cache_free(#st{max_size=Max, cur_size=Cur}=St) when Max =< Cur ->
-    ATime = ets:first(?ATIMES),
-    [{ATime, DbName}] = ets:lookup(?ATIMES, ATime),
-    true = ets:delete(?ATIMES, ATime),
-    true = ets:delete(?DBS, DbName),
-    true = ets:delete(?SHARDS, DbName),
-    cache_free(St#st{cur_size=Cur-1});
-cache_free(St) ->
-    St.
-
-cache_clear(St) ->
-    true = ets:delete_all_objects(?DBS),
-    true = ets:delete_all_objects(?SHARDS),
-    true = ets:delete_all_objects(?ATIMES),
-    St#st{cur_size=0}.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/src/mem3_sup.erl
----------------------------------------------------------------------
diff --git a/src/mem3/src/mem3_sup.erl b/src/mem3/src/mem3_sup.erl
deleted file mode 100644
index 6ff688b..0000000
--- a/src/mem3/src/mem3_sup.erl
+++ /dev/null
@@ -1,34 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sup).
--behaviour(supervisor).
--export([start_link/0, init/1]).
-
-start_link() ->
-    supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init(_Args) ->
-    Children = [
-        child(mem3_events),
-        child(mem3_nodes),
-        child(mem3_sync_nodes), % Order important?
-        child(mem3_sync),
-        child(mem3_shards)
-    ],
-    {ok, {{one_for_one,10,1}, Children}}.
-
-child(mem3_events) ->
-    MFA = {gen_event, start_link, [{local, mem3_events}]},
-    {mem3_events, MFA, permanent, 1000, worker, dynamic};
-child(Child) ->
-    {Child, {Child, start_link, []}, permanent, 1000, worker, [Child]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/src/mem3_sync.erl
----------------------------------------------------------------------
diff --git a/src/mem3/src/mem3_sync.erl b/src/mem3/src/mem3_sync.erl
deleted file mode 100644
index e47f6fa..0000000
--- a/src/mem3/src/mem3_sync.erl
+++ /dev/null
@@ -1,344 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync).
--behaviour(gen_server).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
-    code_change/3]).
-
--export([start_link/0, get_active/0, get_queue/0, push/1, push/2,
-    remove_node/1, initial_sync/1, get_backlog/0]).
-
--import(queue, [in/2, out/1, to_list/1, join/2, from_list/1, is_empty/1]).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record(state, {
-    active = [],
-    count = 0,
-    limit,
-    dict = dict:new(),
-    waiting = queue:new(),
-    update_notifier
-}).
-
--record(job, {name, node, count=nil, pid=nil}).
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_active() ->
-    gen_server:call(?MODULE, get_active).
-
-get_queue() ->
-    gen_server:call(?MODULE, get_queue).
-
-get_backlog() ->
-    gen_server:call(?MODULE, get_backlog).
-
-push(#shard{name = Name}, Target) ->
-    push(Name, Target);
-push(Name, #shard{node=Node}) ->
-    push(Name, Node);
-push(Name, Node) ->
-    push(#job{name = Name, node = Node}).
-
-push(#job{node = Node} = Job) when Node =/= node() ->
-    gen_server:cast(?MODULE, {push, Job});
-push(_) ->
-    ok.
-
-remove_node(Node) ->
-    gen_server:cast(?MODULE, {remove_node, Node}).
-
-init([]) ->
-    process_flag(trap_exit, true),
-    Concurrency = config:get("mem3", "sync_concurrency", "10"),
-    gen_event:add_handler(mem3_events, mem3_sync_event, []),
-    {ok, Pid} = start_update_notifier(),
-    initial_sync(),
-    {ok, #state{limit = list_to_integer(Concurrency), update_notifier=Pid}}.
-
-handle_call({push, Job}, From, State) ->
-    handle_cast({push, Job#job{pid = From}}, State);
-
-handle_call(get_active, _From, State) ->
-    {reply, State#state.active, State};
-
-handle_call(get_queue, _From, State) ->
-    {reply, to_list(State#state.waiting), State};
-
-handle_call(get_backlog, _From, #state{active=A, waiting=WQ} = State) ->
-    CA = lists:sum([C || #job{count=C} <- A, is_integer(C)]),
-    CW = lists:sum([C || #job{count=C} <- to_list(WQ), is_integer(C)]),
-    {reply, CA+CW, State}.
-
-handle_cast({push, DbName, Node}, State) ->
-    handle_cast({push, #job{name = DbName, node = Node}}, State);
-
-handle_cast({push, Job}, #state{count=Count, limit=Limit} = State)
-        when Count >= Limit ->
-    {noreply, add_to_queue(State, Job)};
-
-handle_cast({push, Job}, State) ->
-    #state{active = L, count = C} = State,
-    #job{name = DbName, node = Node} = Job,
-    case is_running(DbName, Node, L) of
-    true ->
-        {noreply, add_to_queue(State, Job)};
-    false ->
-        Pid = start_push_replication(Job),
-        {noreply, State#state{active=[Job#job{pid=Pid}|L], count=C+1}}
-    end;
-
-handle_cast({remove_node, Node}, #state{waiting = W0} = State) ->
-    {Alive, Dead} = lists:partition(fun(#job{node=N}) -> N =/= Node end, to_list(W0)),
-    Dict = remove_entries(State#state.dict, Dead),
-    [exit(Pid, die_now) || #job{node=N, pid=Pid} <- State#state.active,
-        N =:= Node],
-    {noreply, State#state{dict = Dict, waiting = from_list(Alive)}};
-
-handle_cast({remove_shard, Shard}, #state{waiting = W0} = State) ->
-    {Alive, Dead} = lists:partition(fun(#job{name=S}) ->
-                                        S =/= Shard end, to_list(W0)),
-    Dict = remove_entries(State#state.dict, Dead),
-    [exit(Pid, die_now) || #job{name=S, pid=Pid} <- State#state.active,
-        S =:= Shard],
-    {noreply, State#state{dict = Dict, waiting = from_list(Alive)}}.
-
-handle_info({'EXIT', Pid, _}, #state{update_notifier=Pid} = State) ->
-    {ok, NewPid} = start_update_notifier(),
-    {noreply, State#state{update_notifier=NewPid}};
-
-handle_info({'EXIT', Active, normal}, State) ->
-    handle_replication_exit(State, Active);
-
-handle_info({'EXIT', Active, die_now}, State) ->
-    % we forced this one ourselves, do not retry
-    handle_replication_exit(State, Active);
-
-handle_info({'EXIT', Active, {{not_found, no_db_file}, _Stack}}, State) ->
-    % target doesn't exist, do not retry
-    handle_replication_exit(State, Active);
-
-handle_info({'EXIT', Active, Reason}, State) ->
-    NewState = case lists:keyfind(Active, #job.pid, State#state.active) of
-        #job{name=OldDbName, node=OldNode} = Job ->
-        twig:log(warn, "~s ~s ~s ~w", [?MODULE, OldDbName, OldNode, Reason]),
-        case Reason of {pending_changes, Count} ->
-            maybe_resubmit(State, Job#job{pid = nil, count = Count});
-        _ ->
-            try mem3:shards(mem3:dbname(Job#job.name)) of _ ->
-                timer:apply_after(5000, ?MODULE, push, [Job#job{pid=nil}])
-            catch error:database_does_not_exist ->
-                % no need to retry
-                ok
-            end,
-            State
-        end;
-    false -> State end,
-    handle_replication_exit(NewState, Active);
-
-handle_info(Msg, State) ->
-    twig:log(notice, "unexpected msg at replication manager ~p", [Msg]),
-    {noreply, State}.
-
-terminate(_Reason, State) ->
-    [exit(Pid, shutdown) || #job{pid=Pid} <- State#state.active],
-    ok.
-
-code_change(_, #state{waiting = WaitingList} = State, _) when is_list(WaitingList) ->
-    {ok, State#state{waiting = from_list(WaitingList)}};
-
-code_change(_, State, _) ->
-    {ok, State}.
-
-maybe_resubmit(State, #job{name=DbName, node=Node} = Job) ->
-    case lists:member(DbName, local_dbs()) of
-    true ->
-        case find_next_node() of
-        Node ->
-            add_to_queue(State, Job);
-        _ ->
-            State % don't resubmit b/c we have a new replication target
-        end;
-    false ->
-        add_to_queue(State, Job)
-    end.
-
-handle_replication_exit(State, Pid) ->
-    #state{active=Active, limit=Limit, dict=D, waiting=Waiting} = State,
-    Active1 = lists:keydelete(Pid, #job.pid, Active),
-    case is_empty(Waiting) of
-    true ->
-        {noreply, State#state{active=Active1, count=length(Active1)}};
-    _ ->
-        Count = length(Active1),
-        NewState = if Count < Limit ->
-            case next_replication(Active1, Waiting, queue:new()) of
-            nil -> % all waiting replications are also active
-                State#state{active = Active1, count = Count};
-            {#job{name=DbName, node=Node} = Job, StillWaiting} ->
-                NewPid = start_push_replication(Job),
-                State#state{
-                  active = [Job#job{pid = NewPid} | Active1],
-                  count = Count+1,
-                  dict = dict:erase({DbName,Node}, D),
-                  waiting = StillWaiting
-                 }
-            end;
-        true ->
-            State#state{active = Active1, count=Count}
-        end,
-        {noreply, NewState}
-    end.
-
-start_push_replication(#job{name=Name, node=Node, pid=From}) ->
-    if From =/= nil -> gen_server:reply(From, ok); true -> ok end,
-    spawn_link(fun() ->
-        case mem3_rep:go(Name, maybe_redirect(Node)) of
-            {ok, Pending} when Pending > 0 ->
-                exit({pending_changes, Pending});
-            _ ->
-                ok
-        end
-    end).
-
-add_to_queue(State, #job{name=DbName, node=Node, pid=From} = Job) ->
-    #state{dict=D, waiting=WQ} = State,
-    case dict:is_key({DbName, Node}, D) of
-    true ->
-        if From =/= nil -> gen_server:reply(From, ok); true -> ok end,
-        State;
-    false ->
-        twig:log(debug, "adding ~s -> ~p to mem3_sync queue", [DbName, Node]),
-        State#state{
-            dict = dict:store({DbName,Node}, ok, D),
-            waiting = in(Job, WQ)
-        }
-    end.
-
-sync_nodes_and_dbs() ->
-    Node = find_next_node(),
-    [push(Db, Node) || Db <- local_dbs()].
-
-initial_sync() ->
-    [net_kernel:connect_node(Node) || Node <- mem3:nodes()],
-    mem3_sync_nodes:add(nodes()).
-
-initial_sync(Live) ->
-    sync_nodes_and_dbs(),
-    Acc = {node(), Live, []},
-    {_, _, Shards} = mem3_shards:fold(fun initial_sync_fold/2, Acc),
-    submit_replication_tasks(node(), Live, Shards).
-
-initial_sync_fold(#shard{dbname = Db} = Shard, {LocalNode, Live, AccShards}) ->
-    case AccShards of
-    [#shard{dbname = AccDb} | _] when Db =/= AccDb ->
-        submit_replication_tasks(LocalNode, Live, AccShards),
-        {LocalNode, Live, [Shard]};
-    _ ->
-        {LocalNode, Live, [Shard|AccShards]}
-    end.
-
-submit_replication_tasks(LocalNode, Live, Shards) ->
-    SplitFun = fun(#shard{node = Node}) -> Node =:= LocalNode end,
-    {Local, Remote} = lists:partition(SplitFun, Shards),
-    lists:foreach(fun(#shard{name = ShardName}) ->
-        [sync_push(ShardName, N) || #shard{node=N, name=Name} <- Remote,
-            Name =:= ShardName, lists:member(N, Live)]
-    end, Local).
-
-sync_push(ShardName, N) ->
-    gen_server:call(mem3_sync, {push, #job{name=ShardName, node=N}}, infinity).
-
-start_update_notifier() ->
-    Db1 = nodes_db(),
-    Db2 = shards_db(),
-    Db3 = users_db(),
-    couch_db_update_notifier:start_link(fun
-    ({updated, Db}) when Db == Db1 ->
-        Nodes = mem3:nodes(),
-        Live = nodes(),
-        [?MODULE:push(Db1, N) || N <- Nodes, lists:member(N, Live)];
-    ({updated, Db}) when Db == Db2; Db == Db3 ->
-        ?MODULE:push(Db, find_next_node());
-    ({updated, <<"shards/", _/binary>> = ShardName}) ->
-        % TODO deal with split/merged partitions by comparing keyranges
-        try mem3:shards(mem3:dbname(ShardName)) of
-        Shards ->
-            Targets = [S || #shard{node=N, name=Name} = S <- Shards,
-                N =/= node(), Name =:= ShardName],
-            Live = nodes(),
-            [?MODULE:push(ShardName,N) || #shard{node=N} <- Targets,
-                lists:member(N, Live)]
-        catch error:database_does_not_exist ->
-            ok
-        end;
-    ({deleted, <<"shards/", _:18/binary, _/binary>> = ShardName}) ->
-        gen_server:cast(?MODULE, {remove_shard, ShardName});
-    (_) -> ok end).
-
-find_next_node() ->
-    LiveNodes = [node()|nodes()],
-    AllNodes0 = lists:sort(mem3:nodes()),
-    AllNodes1 = [X || X <- AllNodes0, lists:member(X, LiveNodes)],
-    AllNodes = AllNodes1 ++ [hd(AllNodes1)],
-    [_Self, Next| _] = lists:dropwhile(fun(N) -> N =/= node() end, AllNodes),
-    Next.
-
-%% @doc Finds the next {DbName,Node} pair in the list of waiting replications
-%% which does not correspond to an already running replication
--spec next_replication([#job{}], [#job{}], [#job{}]) -> {#job{}, [#job{}]} | nil.
-next_replication(Active, Waiting, WaitingAndRunning) ->
-    case is_empty(Waiting) of
-    true ->
-        nil;
-    false ->
-        {{value, #job{name=S, node=N} = Job}, RemQ} = out(Waiting),
-        case is_running(S,N,Active) of
-        true ->
-            next_replication(Active, RemQ, in(Job, WaitingAndRunning));
-        false ->
-            {Job, join(RemQ, WaitingAndRunning)}
-        end
-    end.
-
-is_running(DbName, Node, ActiveList) ->
-    [] =/= [true || #job{name=S, node=N} <- ActiveList, S=:=DbName, N=:=Node].
-
-remove_entries(Dict, Entries) ->
-    lists:foldl(fun(#job{name=S, node=N}, D) ->
-        dict:erase({S, N}, D)
-    end, Dict, Entries).
-
-local_dbs() ->
-    [nodes_db(), shards_db(), users_db()].
-
-nodes_db() ->
-    ?l2b(config:get("mem3", "node_db", "nodes")).
-
-shards_db() ->
-    ?l2b(config:get("mem3", "shard_db", "dbs")).
-
-users_db() ->
-    ?l2b(config:get("couch_httpd_auth", "authentication_db", "_users")).
-
-maybe_redirect(Node) ->
-    case config:get("mem3.redirects", atom_to_list(Node)) of
-        undefined ->
-            Node;
-        Redirect ->
-            twig:log(debug, "Redirecting push from ~p to ~p", [Node, Redirect]),
-            list_to_existing_atom(Redirect)
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/src/mem3_sync_event.erl
----------------------------------------------------------------------
diff --git a/src/mem3/src/mem3_sync_event.erl b/src/mem3/src/mem3_sync_event.erl
deleted file mode 100644
index 7a20b0b..0000000
--- a/src/mem3/src/mem3_sync_event.erl
+++ /dev/null
@@ -1,85 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync_event).
--behaviour(gen_event).
-
--export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2,
-    code_change/3]).
-
-init(_) ->
-    net_kernel:monitor_nodes(true),
-    {ok, nil}.
-
-handle_event({add_node, Node}, State) when Node =/= node() ->
-    net_kernel:connect_node(Node),
-    mem3_sync_nodes:add([Node]),
-    {ok, State};
-
-handle_event({remove_node, Node}, State)  ->
-    mem3_sync:remove_node(Node),
-    {ok, State};
-
-handle_event(_Event, State) ->
-    {ok, State}.
-
-handle_call(_Request, State) ->
-    {ok, ok, State}.
-
-handle_info({nodeup, Node}, State) ->
-    Nodes0 = lists:usort(drain_nodeups([Node])),
-    Nodes = lists:filter(fun(N) -> lists:member(N, mem3:nodes()) end, Nodes0),
-    wait_for_rexi(Nodes, 5),
-    {ok, State};
-
-handle_info({nodedown, Node}, State) ->
-    mem3_sync:remove_node(Node),
-    {ok, State};
-
-handle_info(_Info, State) ->
-    {ok, State}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-drain_nodeups(Acc) ->
-    receive
-        {nodeup, Node} ->
-            drain_nodeups([Node | Acc])
-    after 0 ->
-        Acc
-    end.
-
-wait_for_rexi([], _Retries) ->
-    ok;
-wait_for_rexi(Waiting, Retries) ->
-    % Hack around rpc:multicall/4 so that we can
-    % be sure which nodes gave which response
-    Msg = {call, erlang, whereis, [rexi_server], group_leader()},
-    {Resp, _Bad} = gen_server:multi_call(Waiting, rex, Msg, 1000),
-    Up = [N || {N, P} <- Resp, is_pid(P)],
-    NotUp = Waiting -- Up,
-    case length(Up) > 0 of
-        true ->
-            mem3_sync_nodes:add(Up);
-        false -> ok
-    end,
-    case length(NotUp) > 0 andalso Retries > 0 of
-        true ->
-            timer:sleep(1000),
-            wait_for_rexi(NotUp, Retries-1);
-        false ->
-            ok
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/src/mem3_sync_nodes.erl
----------------------------------------------------------------------
diff --git a/src/mem3/src/mem3_sync_nodes.erl b/src/mem3/src/mem3_sync_nodes.erl
deleted file mode 100644
index e07fd44..0000000
--- a/src/mem3/src/mem3_sync_nodes.erl
+++ /dev/null
@@ -1,114 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync_nodes).
--behaviour(gen_server).
-
-
--export([start_link/0]).
--export([add/1]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--export([monitor_sync/1]).
-
-
--record(st, {
-    tid
-}).
-
-
--record(job, {
-    nodes,
-    pid,
-    retry
-}).
-
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-add(Nodes) ->
-    gen_server:cast(?MODULE, {add, Nodes}).
-
-
-init([]) ->
-    {ok, #st{
-        tid = ets:new(?MODULE, [set, protected, {keypos, #job.nodes}])
-    }}.
-
-
-terminate(_Reason, St) ->
-    [exit(Pid, kill) || #job{pid=Pid} <- ets:tab2list(St#st.tid)],
-    ok.
-
-
-handle_call(Msg, _From, St) ->
-    {stop, {invalid_call, Msg}, invalid_call, St}.
-
-
-handle_cast({add, Nodes}, #st{tid=Tid}=St) ->
-    case ets:lookup(Tid, Nodes) of
-        [] ->
-            Pid = start_sync(Nodes),
-            ets:insert(Tid, #job{nodes=Nodes, pid=Pid, retry=false});
-        [#job{retry=false}=Job] ->
-            ets:insert(Tid, Job#job{retry=true});
-        _ ->
-            ok
-    end,
-    {noreply, St};
-
-handle_cast(Msg, St) ->
-    {stop, {invalid_cast, Msg}, St}.
-
-
-handle_info({'DOWN', _, _, _, {sync_done, Nodes}}, #st{tid=Tid}=St) ->
-    case ets:lookup(Tid, Nodes) of
-        [#job{retry=true}=Job] ->
-            Pid = start_sync(Nodes),
-            ets:insert(Tid, Job#job{pid=Pid, retry=false});
-        _ ->
-            ets:delete(Tid, Nodes)
-    end,
-    {noreply, St};
-
-handle_info({'DOWN', _, _, _, {sync_error, Nodes}}, #st{tid=Tid}=St) ->
-    Pid = start_sync(Nodes),
-    ets:insert(Tid, #job{nodes=Nodes, pid=Pid, retry=false}),
-    {noreply, St};
-
-handle_info(Msg, St) ->
-    {stop, {invalid_info, Msg}, St}.
-
-
-code_change(_OldVsn, St, _Extra) ->
-    {ok, St}.
-
-
-start_sync(Nodes) ->
-    {Pid, _} = spawn_monitor(?MODULE, monitor_sync, [Nodes]),
-    Pid.
-
-
-monitor_sync(Nodes) ->
-    process_flag(trap_exit, true),
-    Pid = spawn_link(mem3_sync, initial_sync, [Nodes]),
-    receive
-        {'EXIT', Pid, normal} ->
-            exit({sync_done, Nodes});
-        _ ->
-            exit({sync_error, Nodes})
-    end.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/src/mem3_sync_security.erl
----------------------------------------------------------------------
diff --git a/src/mem3/src/mem3_sync_security.erl b/src/mem3/src/mem3_sync_security.erl
deleted file mode 100644
index da112aa..0000000
--- a/src/mem3/src/mem3_sync_security.erl
+++ /dev/null
@@ -1,105 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync_security).
-
--export([maybe_sync/2, maybe_sync_int/2]).
--export([go/0, go/1]).
-
--include_lib("mem3/include/mem3.hrl").
-
-
-maybe_sync(#shard{}=Src, #shard{}=Dst) ->
-    case is_local(Src#shard.name) of
-        false ->
-            erlang:spawn(?MODULE, maybe_sync_int, [Src, Dst]);
-        true ->
-            ok
-    end.
-
-maybe_sync_int(#shard{name=Name}=Src, Dst) ->
-    DbName = mem3:dbname(Name),
-    case fabric:get_all_security(DbName, [{shards, [Src, Dst]}]) of
-        {ok, WorkerObjs} ->
-            Objs = [Obj || {_Worker, Obj} <- WorkerObjs],
-            case length(lists:usort(Objs)) of
-                1 -> ok;
-                2 -> go(DbName)
-            end;
-        Else ->
-            Args = [DbName, Else],
-            twig:log(err, "Error checking security objects for ~s :: ~p", Args)
-    end.
-
-go() ->
-    {ok, Dbs} = fabric:all_dbs(),
-    lists:foreach(fun handle_db/1, Dbs).
-
-go(DbName) when is_binary(DbName) ->
-    handle_db(DbName).
-
-handle_db(DbName) ->
-    ShardCount = length(mem3:shards(DbName)),
-    case get_all_security(DbName) of
-    {ok, SecObjs} ->
-        case is_ok(SecObjs, ShardCount) of
-        ok ->
-            ok;
-        {fixable, SecObj} ->
-            twig:log(info, "Sync security object for ~p: ~p", [DbName, SecObj]),
-            case fabric:set_security(DbName, SecObj) of
-                ok -> ok;
-                Error ->
-                    twig:log(err, "Error setting security object in ~p: ~p",
-                        [DbName, Error])
-            end;
-        broken ->
-            twig:log(err, "Bad security object in ~p: ~p", [DbName, SecObjs])
-        end;
-    Error ->
-        twig:log(err, "Error getting security objects for ~p: ~p", [
-                DbName, Error])
-    end.
-
-get_all_security(DbName) ->
-    case fabric:get_all_security(DbName) of
-    {ok, SecObjs} ->
-        SecObjsDict = lists:foldl(fun({_, SO}, Acc) ->
-            dict:update_counter(SO, 1, Acc)
-        end, dict:new(), SecObjs),
-        {ok, dict:to_list(SecObjsDict)};
-    Error ->
-        Error
-    end.
-
-is_ok([_], _) ->
-    % One security object is the happy case
-    ok;
-is_ok([_, _] = SecObjs0, ShardCount) ->
-    % Figure out if we have a simple majority of security objects
-    % and if so, use that as the correct value. Otherwise we abort
-    % and rely on human intervention.
-    {Count, SecObj} =  lists:max([{C, O} || {O, C} <- SecObjs0]),
-    case Count >= ((ShardCount div 2) + 1) of
-        true -> {fixable, SecObj};
-        false -> broken
-    end;
-is_ok(_, _) ->
-    % Anything else requires human intervention
-    broken.
-
-
-is_local(<<"shards/", _/binary>>) ->
-    false;
-is_local(_) ->
-    true.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/src/mem3_util.erl
----------------------------------------------------------------------
diff --git a/src/mem3/src/mem3_util.erl b/src/mem3/src/mem3_util.erl
deleted file mode 100644
index 4460df6..0000000
--- a/src/mem3/src/mem3_util.erl
+++ /dev/null
@@ -1,196 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_util).
-
--export([hash/1, name_shard/2, create_partition_map/5, build_shards/2,
-    n_val/2, to_atom/1, to_integer/1, write_db_doc/1, delete_db_doc/1,
-    shard_info/1, ensure_exists/1, open_db_doc/1]).
--export([owner/2, is_deleted/1]).
-
--export([create_partition_map/4, name_shard/1]).
--deprecated({create_partition_map, 4, eventually}).
--deprecated({name_shard, 1, eventually}).
-
--define(RINGTOP, 2 bsl 31).  % CRC32 space
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-hash(Item) when is_binary(Item) ->
-    erlang:crc32(Item);
-hash(Item) ->
-    erlang:crc32(term_to_binary(Item)).
-
-name_shard(Shard) ->
-    name_shard(Shard, "").
-
-name_shard(#shard{dbname = DbName, range=[B,E]} = Shard, Suffix) ->
-    Name = ["shards/", couch_util:to_hex(<<B:32/integer>>), "-",
-        couch_util:to_hex(<<E:32/integer>>), "/", DbName, Suffix],
-    Shard#shard{name = ?l2b(Name)}.
-
-create_partition_map(DbName, N, Q, Nodes) ->
-    create_partition_map(DbName, N, Q, Nodes, "").
-
-create_partition_map(DbName, N, Q, Nodes, Suffix) ->
-    UniqueShards = make_key_ranges((?RINGTOP) div Q, 0, []),
-    Shards0 = lists:flatten([lists:duplicate(N, S) || S <- UniqueShards]),
-    Shards1 = attach_nodes(Shards0, [], Nodes, []),
-    [name_shard(S#shard{dbname=DbName}, Suffix) || S <- Shards1].
-
-make_key_ranges(_, CurrentPos, Acc) when CurrentPos >= ?RINGTOP ->
-    Acc;
-make_key_ranges(Increment, Start, Acc) ->
-    case Start + 2*Increment of
-    X when X > ?RINGTOP ->
-        End = ?RINGTOP - 1;
-    _ ->
-        End = Start + Increment - 1
-    end,
-    make_key_ranges(Increment, End+1, [#shard{range=[Start, End]} | Acc]).
-
-attach_nodes([], Acc, _, _) ->
-    lists:reverse(Acc);
-attach_nodes(Shards, Acc, [], UsedNodes) ->
-    attach_nodes(Shards, Acc, lists:reverse(UsedNodes), []);
-attach_nodes([S | Rest], Acc, [Node | Nodes], UsedNodes) ->
-    attach_nodes(Rest, [S#shard{node=Node} | Acc], Nodes, [Node | UsedNodes]).
-
-open_db_doc(DocId) ->
-    DbName = ?l2b(config:get("mem3", "shard_db", "dbs")),
-    {ok, Db} = couch_db:open(DbName, []),
-    try couch_db:open_doc(Db, DocId, [ejson_body]) after couch_db:close(Db) end.
-
-write_db_doc(Doc) ->
-    DbName = ?l2b(config:get("mem3", "shard_db", "dbs")),
-    write_db_doc(DbName, Doc, true).
-
-write_db_doc(DbName, #doc{id=Id, body=Body} = Doc, ShouldMutate) ->
-    {ok, Db} = couch_db:open(DbName, []),
-    try couch_db:open_doc(Db, Id, [ejson_body]) of
-    {ok, #doc{body = Body}} ->
-        % the doc is already in the desired state, we're done here
-        ok;
-    {not_found, _} when ShouldMutate ->
-        try couch_db:update_doc(Db, Doc, []) of
-        {ok, _} ->
-            ok
-        catch conflict ->
-            % check to see if this was a replication race or a different edit
-            write_db_doc(DbName, Doc, false)
-        end;
-    _ ->
-        % the doc already exists in a different state
-        conflict
-    after
-        couch_db:close(Db)
-    end.
-
-delete_db_doc(DocId) ->
-    gen_server:cast(mem3_shards, {cache_remove, DocId}),
-    DbName = ?l2b(config:get("mem3", "shard_db", "dbs")),
-    delete_db_doc(DbName, DocId, true).
-
-delete_db_doc(DbName, DocId, ShouldMutate) ->
-    {ok, Db} = couch_db:open(DbName, []),
-    {ok, Revs} = couch_db:open_doc_revs(Db, DocId, all, []),
-    try [Doc#doc{deleted=true} || {ok, #doc{deleted=false}=Doc} <- Revs] of
-    [] ->
-        not_found;
-    Docs when ShouldMutate ->
-        try couch_db:update_docs(Db, Docs, []) of
-        {ok, _} ->
-            ok
-        catch conflict ->
-            % check to see if this was a replication race or if leafs survived
-            delete_db_doc(DbName, DocId, false)
-        end;
-    _ ->
-        % we have live leafs that we aren't allowed to delete. let's bail
-        conflict
-    after
-        couch_db:close(Db)
-    end.
-
-build_shards(DbName, DocProps) ->
-    {ByNode} = couch_util:get_value(<<"by_node">>, DocProps, {[]}),
-    Suffix = couch_util:get_value(<<"shard_suffix">>, DocProps, ""),
-    lists:flatmap(fun({Node, Ranges}) ->
-        lists:map(fun(Range) ->
-            [B,E] = string:tokens(?b2l(Range), "-"),
-            Beg = httpd_util:hexlist_to_integer(B),
-            End = httpd_util:hexlist_to_integer(E),
-            name_shard(#shard{
-                dbname = DbName,
-                node = to_atom(Node),
-                range = [Beg, End]
-            }, Suffix)
-        end, Ranges)
-    end, ByNode).
-
-to_atom(Node) when is_binary(Node) ->
-    list_to_atom(binary_to_list(Node));
-to_atom(Node) when is_atom(Node) ->
-    Node.
-
-to_integer(N) when is_integer(N) ->
-    N;
-to_integer(N) when is_binary(N) ->
-    list_to_integer(binary_to_list(N));
-to_integer(N) when is_list(N) ->
-    list_to_integer(N).
-
-n_val(undefined, NodeCount) ->
-    n_val(config:get("cluster", "n", "3"), NodeCount);
-n_val(N, NodeCount) when is_list(N) ->
-    n_val(list_to_integer(N), NodeCount);
-n_val(N, NodeCount) when is_integer(NodeCount), N > NodeCount ->
-    twig:log(error, "Request to create N=~p DB but only ~p node(s)", [N, NodeCount]),
-    NodeCount;
-n_val(N, _) when N < 1 ->
-    1;
-n_val(N, _) ->
-    N.
-
-shard_info(DbName) ->
-    [{n, mem3:n(DbName)},
-     {q, length(mem3:shards(DbName)) div mem3:n(DbName)}].
-
-ensure_exists(DbName) when is_list(DbName) ->
-    ensure_exists(list_to_binary(DbName));
-ensure_exists(DbName) ->
-    Options = [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}],
-    case couch_db:open(DbName, Options) of
-    {ok, Db} ->
-        {ok, Db};
-    _ ->
-        couch_server:create(DbName, Options)
-    end.
-
-
-owner(DbName, DocId) ->
-    Shards = mem3:shards(DbName, DocId),
-    Nodes = [node()|nodes()],
-    LiveShards = [S || #shard{node=Node} = S <- Shards, lists:member(Node, Nodes)],
-    [#shard{node=Node}] = lists:usort(fun(#shard{name=A}, #shard{name=B}) ->
-                                              A =< B  end, LiveShards),
-    node() =:= Node.
-
-is_deleted(Change) ->
-    case couch_util:get_value(<<"deleted">>, Change) of
-    undefined ->
-        % keep backwards compatibility for a while
-        couch_util:get_value(deleted, Change, false);
-    Else ->
-        Else
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/test/01-config-default.ini
----------------------------------------------------------------------
diff --git a/src/mem3/test/01-config-default.ini b/src/mem3/test/01-config-default.ini
deleted file mode 100644
index 757f783..0000000
--- a/src/mem3/test/01-config-default.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[cluster]
-n=3

http://git-wip-us.apache.org/repos/asf/couchdb/blob/4cac46af/src/mem3/test/mem3_util_test.erl
----------------------------------------------------------------------
diff --git a/src/mem3/test/mem3_util_test.erl b/src/mem3/test/mem3_util_test.erl
deleted file mode 100644
index e289282..0000000
--- a/src/mem3/test/mem3_util_test.erl
+++ /dev/null
@@ -1,152 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_util_test).
-
--include("mem3.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-hash_test() ->
-    ?assertEqual(1624516141,mem3_util:hash(0)),
-    ?assertEqual(3816901808,mem3_util:hash("0")),
-    ?assertEqual(3523407757,mem3_util:hash(<<0>>)),
-    ?assertEqual(4108050209,mem3_util:hash(<<"0">>)),
-    ?assertEqual(3094724072,mem3_util:hash(zero)),
-    ok.
-
-name_shard_test() ->
-    Shard1 = #shard{},
-    ?assertError(function_clause, mem3_util:name_shard(Shard1, ".1234")),
-
-    Shard2 = #shard{dbname = <<"testdb">>, range = [0,100]},
-    #shard{name=Name2} = mem3_util:name_shard(Shard2, ".1234"),
-    ?assertEqual(<<"shards/00000000-00000064/testdb.1234">>, Name2),
-
-    ok.
-
-create_partition_map_test() ->
-    {DbName1, N1, Q1, Nodes1} = {<<"testdb1">>, 3, 4, [a,b,c,d]},
-    Map1 = mem3_util:create_partition_map(DbName1, N1, Q1, Nodes1),
-    ?assertEqual(12, length(Map1)),
-
-    {DbName2, N2, Q2, Nodes2} = {<<"testdb2">>, 1, 1, [a,b,c,d]},
-    [#shard{name=Name2,node=Node2}] = Map2 =
-        mem3_util:create_partition_map(DbName2, N2, Q2, Nodes2, ".1234"),
-    ?assertEqual(1, length(Map2)),
-    ?assertEqual(<<"shards/00000000-ffffffff/testdb2.1234">>, Name2),
-    ?assertEqual(a, Node2),
-    ok.
-
-build_shards_test() ->
-    DocProps1 =
-         [{<<"changelog">>,
-            [[<<"add">>,<<"00000000-1fffffff">>,
-              <<"bigcouch@node.local">>],
-             [<<"add">>,<<"20000000-3fffffff">>,
-              <<"bigcouch@node.local">>],
-             [<<"add">>,<<"40000000-5fffffff">>,
-              <<"bigcouch@node.local">>],
-             [<<"add">>,<<"60000000-7fffffff">>,
-              <<"bigcouch@node.local">>],
-             [<<"add">>,<<"80000000-9fffffff">>,
-              <<"bigcouch@node.local">>],
-             [<<"add">>,<<"a0000000-bfffffff">>,
-              <<"bigcouch@node.local">>],
-             [<<"add">>,<<"c0000000-dfffffff">>,
-              <<"bigcouch@node.local">>],
-             [<<"add">>,<<"e0000000-ffffffff">>,
-              <<"bigcouch@node.local">>]]},
-           {<<"by_node">>,
-            {[{<<"bigcouch@node.local">>,
-               [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>,
-                <<"40000000-5fffffff">>,<<"60000000-7fffffff">>,
-                <<"80000000-9fffffff">>,<<"a0000000-bfffffff">>,
-                <<"c0000000-dfffffff">>,<<"e0000000-ffffffff">>]}]}},
-           {<<"by_range">>,
-            {[{<<"00000000-1fffffff">>,[<<"bigcouch@node.local">>]},
-              {<<"20000000-3fffffff">>,[<<"bigcouch@node.local">>]},
-              {<<"40000000-5fffffff">>,[<<"bigcouch@node.local">>]},
-              {<<"60000000-7fffffff">>,[<<"bigcouch@node.local">>]},
-              {<<"80000000-9fffffff">>,[<<"bigcouch@node.local">>]},
-              {<<"a0000000-bfffffff">>,[<<"bigcouch@node.local">>]},
-              {<<"c0000000-dfffffff">>,[<<"bigcouch@node.local">>]},
-              {<<"e0000000-ffffffff">>,[<<"bigcouch@node.local">>]}]}}],
-    Shards1 = mem3_util:build_shards(<<"testdb1">>, DocProps1),
-    ExpectedShards1 =
-        [{shard,<<"shards/00000000-1fffffff/testdb1">>,
-          'bigcouch@node.local',<<"testdb1">>,
-          [0,536870911],
-          undefined},
-         {shard,<<"shards/20000000-3fffffff/testdb1">>,
-          'bigcouch@node.local',<<"testdb1">>,
-          [536870912,1073741823],
-          undefined},
-         {shard,<<"shards/40000000-5fffffff/testdb1">>,
-          'bigcouch@node.local',<<"testdb1">>,
-          [1073741824,1610612735],
-          undefined},
-         {shard,<<"shards/60000000-7fffffff/testdb1">>,
-          'bigcouch@node.local',<<"testdb1">>,
-          [1610612736,2147483647],
-          undefined},
-         {shard,<<"shards/80000000-9fffffff/testdb1">>,
-          'bigcouch@node.local',<<"testdb1">>,
-          [2147483648,2684354559],
-          undefined},
-         {shard,<<"shards/a0000000-bfffffff/testdb1">>,
-          'bigcouch@node.local',<<"testdb1">>,
-          [2684354560,3221225471],
-          undefined},
-         {shard,<<"shards/c0000000-dfffffff/testdb1">>,
-          'bigcouch@node.local',<<"testdb1">>,
-          [3221225472,3758096383],
-          undefined},
-         {shard,<<"shards/e0000000-ffffffff/testdb1">>,
-          'bigcouch@node.local',<<"testdb1">>,
-          [3758096384,4294967295],
-          undefined}],
-    ?assertEqual(ExpectedShards1, Shards1),
-    ok.
-
-
-%% n_val tests
-
-nval_test() ->
-    ?assertEqual(2, mem3_util:n_val(2,4)),
-    ?assertEqual(1, mem3_util:n_val(-1,4)),
-    ?assertEqual(4, mem3_util:n_val(6,4)),
-    ok.
-
-config_01_setup() ->
-    Ini = filename:join([code:lib_dir(mem3, test), "01-config-default.ini"]),
-    {ok, Pid} = config:start_link([Ini]),
-    Pid.
-
-config_teardown(_Pid) ->
-    config:stop().
-
-n_val_test_() ->
-    {"n_val tests",
-     [
-      {setup,
-       fun config_01_setup/0,
-       fun config_teardown/1,
-       fun(Pid) ->
-           {with, Pid, [
-               fun n_val_1/1
-            ]}
-       end}
-     ]
-    }.
-
-n_val_1(_Pid) ->
-    ?assertEqual(3, mem3_util:n_val(undefined, 4)).


[37/49] Remove src/mochiweb

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochinum.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochinum.erl b/src/mochiweb/src/mochinum.erl
deleted file mode 100644
index 3c96b13..0000000
--- a/src/mochiweb/src/mochinum.erl
+++ /dev/null
@@ -1,354 +0,0 @@
-%% @copyright 2007 Mochi Media, Inc.
-%% @author Bob Ippolito <bo...@mochimedia.com>
-
-%% @doc Useful numeric algorithms for floats that cover some deficiencies
-%% in the math module. More interesting is digits/1, which implements
-%% the algorithm from:
-%% http://www.cs.indiana.edu/~burger/fp/index.html
-%% See also "Printing Floating-Point Numbers Quickly and Accurately"
-%% in Proceedings of the SIGPLAN '96 Conference on Programming Language
-%% Design and Implementation.
-
--module(mochinum).
--author("Bob Ippolito <bo...@mochimedia.com>").
--export([digits/1, frexp/1, int_pow/2, int_ceil/1]).
-
-%% IEEE 754 Float exponent bias
--define(FLOAT_BIAS, 1022).
--define(MIN_EXP, -1074).
--define(BIG_POW, 4503599627370496).
-
-%% External API
-
-%% @spec digits(number()) -> string()
-%% @doc  Returns a string that accurately represents the given integer or float
-%%       using a conservative amount of digits. Great for generating
-%%       human-readable output, or compact ASCII serializations for floats.
-digits(N) when is_integer(N) ->
-    integer_to_list(N);
-digits(0.0) ->
-    "0.0";
-digits(Float) ->
-    {Frac1, Exp1} = frexp_int(Float),
-    [Place0 | Digits0] = digits1(Float, Exp1, Frac1),
-    {Place, Digits} = transform_digits(Place0, Digits0),
-    R = insert_decimal(Place, Digits),
-    case Float < 0 of
-        true ->
-            [$- | R];
-        _ ->
-            R
-    end.
-
-%% @spec frexp(F::float()) -> {Frac::float(), Exp::float()}
-%% @doc  Return the fractional and exponent part of an IEEE 754 double,
-%%       equivalent to the libc function of the same name.
-%%       F = Frac * pow(2, Exp).
-frexp(F) ->
-    frexp1(unpack(F)).
-
-%% @spec int_pow(X::integer(), N::integer()) -> Y::integer()
-%% @doc  Moderately efficient way to exponentiate integers.
-%%       int_pow(10, 2) = 100.
-int_pow(_X, 0) ->
-    1;
-int_pow(X, N) when N > 0 ->
-    int_pow(X, N, 1).
-
-%% @spec int_ceil(F::float()) -> integer()
-%% @doc  Return the ceiling of F as an integer. The ceiling is defined as
-%%       F when F == trunc(F);
-%%       trunc(F) when F &lt; 0;
-%%       trunc(F) + 1 when F &gt; 0.
-int_ceil(X) ->
-    T = trunc(X),
-    case (X - T) of
-        Pos when Pos > 0 -> T + 1;
-        _ -> T
-    end.
-
-
-%% Internal API
-
-int_pow(X, N, R) when N < 2 ->
-    R * X;
-int_pow(X, N, R) ->
-    int_pow(X * X, N bsr 1, case N band 1 of 1 -> R * X; 0 -> R end).
-
-insert_decimal(0, S) ->
-    "0." ++ S;
-insert_decimal(Place, S) when Place > 0 ->
-    L = length(S),
-    case Place - L of
-         0 ->
-            S ++ ".0";
-        N when N < 0 ->
-            {S0, S1} = lists:split(L + N, S),
-            S0 ++ "." ++ S1;
-        N when N < 6 ->
-            %% More places than digits
-            S ++ lists:duplicate(N, $0) ++ ".0";
-        _ ->
-            insert_decimal_exp(Place, S)
-    end;
-insert_decimal(Place, S) when Place > -6 ->
-    "0." ++ lists:duplicate(abs(Place), $0) ++ S;
-insert_decimal(Place, S) ->
-    insert_decimal_exp(Place, S).
-
-insert_decimal_exp(Place, S) ->
-    [C | S0] = S,
-    S1 = case S0 of
-             [] ->
-                 "0";
-             _ ->
-                 S0
-         end,
-    Exp = case Place < 0 of
-              true ->
-                  "e-";
-              false ->
-                  "e+"
-          end,
-    [C] ++ "." ++ S1 ++ Exp ++ integer_to_list(abs(Place - 1)).
-
-
-digits1(Float, Exp, Frac) ->
-    Round = ((Frac band 1) =:= 0),
-    case Exp >= 0 of
-        true ->
-            BExp = 1 bsl Exp,
-            case (Frac =/= ?BIG_POW) of
-                true ->
-                    scale((Frac * BExp * 2), 2, BExp, BExp,
-                          Round, Round, Float);
-                false ->
-                    scale((Frac * BExp * 4), 4, (BExp * 2), BExp,
-                          Round, Round, Float)
-            end;
-        false ->
-            case (Exp =:= ?MIN_EXP) orelse (Frac =/= ?BIG_POW) of
-                true ->
-                    scale((Frac * 2), 1 bsl (1 - Exp), 1, 1,
-                          Round, Round, Float);
-                false ->
-                    scale((Frac * 4), 1 bsl (2 - Exp), 2, 1,
-                          Round, Round, Float)
-            end
-    end.
-
-scale(R, S, MPlus, MMinus, LowOk, HighOk, Float) ->
-    Est = int_ceil(math:log10(abs(Float)) - 1.0e-10),
-    %% Note that the scheme implementation uses a 326 element look-up table
-    %% for int_pow(10, N) where we do not.
-    case Est >= 0 of
-        true ->
-            fixup(R, S * int_pow(10, Est), MPlus, MMinus, Est,
-                  LowOk, HighOk);
-        false ->
-            Scale = int_pow(10, -Est),
-            fixup(R * Scale, S, MPlus * Scale, MMinus * Scale, Est,
-                  LowOk, HighOk)
-    end.
-
-fixup(R, S, MPlus, MMinus, K, LowOk, HighOk) ->
-    TooLow = case HighOk of
-                 true ->
-                     (R + MPlus) >= S;
-                 false ->
-                     (R + MPlus) > S
-             end,
-    case TooLow of
-        true ->
-            [(K + 1) | generate(R, S, MPlus, MMinus, LowOk, HighOk)];
-        false ->
-            [K | generate(R * 10, S, MPlus * 10, MMinus * 10, LowOk, HighOk)]
-    end.
-
-generate(R0, S, MPlus, MMinus, LowOk, HighOk) ->
-    D = R0 div S,
-    R = R0 rem S,
-    TC1 = case LowOk of
-              true ->
-                  R =< MMinus;
-              false ->
-                  R < MMinus
-          end,
-    TC2 = case HighOk of
-              true ->
-                  (R + MPlus) >= S;
-              false ->
-                  (R + MPlus) > S
-          end,
-    case TC1 of
-        false ->
-            case TC2 of
-                false ->
-                    [D | generate(R * 10, S, MPlus * 10, MMinus * 10,
-                                  LowOk, HighOk)];
-                true ->
-                    [D + 1]
-            end;
-        true ->
-            case TC2 of
-                false ->
-                    [D];
-                true ->
-                    case R * 2 < S of
-                        true ->
-                            [D];
-                        false ->
-                            [D + 1]
-                    end
-            end
-    end.
-
-unpack(Float) ->
-    <<Sign:1, Exp:11, Frac:52>> = <<Float:64/float>>,
-    {Sign, Exp, Frac}.
-
-frexp1({_Sign, 0, 0}) ->
-    {0.0, 0};
-frexp1({Sign, 0, Frac}) ->
-    Exp = log2floor(Frac),
-    <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, (Frac-1):52>>,
-    {Frac1, -(?FLOAT_BIAS) - 52 + Exp};
-frexp1({Sign, Exp, Frac}) ->
-    <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, Frac:52>>,
-    {Frac1, Exp - ?FLOAT_BIAS}.
-
-log2floor(Int) ->
-    log2floor(Int, 0).
-
-log2floor(0, N) ->
-    N;
-log2floor(Int, N) ->
-    log2floor(Int bsr 1, 1 + N).
-
-
-transform_digits(Place, [0 | Rest]) ->
-    transform_digits(Place, Rest);
-transform_digits(Place, Digits) ->
-    {Place, [$0 + D || D <- Digits]}.
-
-
-frexp_int(F) ->
-    case unpack(F) of
-        {_Sign, 0, Frac} ->
-            {Frac, ?MIN_EXP};
-        {_Sign, Exp, Frac} ->
-            {Frac + (1 bsl 52), Exp - 53 - ?FLOAT_BIAS}
-    end.
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-int_ceil_test() ->
-    ?assertEqual(1, int_ceil(0.0001)),
-    ?assertEqual(0, int_ceil(0.0)),
-    ?assertEqual(1, int_ceil(0.99)),
-    ?assertEqual(1, int_ceil(1.0)),
-    ?assertEqual(-1, int_ceil(-1.5)),
-    ?assertEqual(-2, int_ceil(-2.0)),
-    ok.
-
-int_pow_test() ->
-    ?assertEqual(1, int_pow(1, 1)),
-    ?assertEqual(1, int_pow(1, 0)),
-    ?assertEqual(1, int_pow(10, 0)),
-    ?assertEqual(10, int_pow(10, 1)),
-    ?assertEqual(100, int_pow(10, 2)),
-    ?assertEqual(1000, int_pow(10, 3)),
-    ok.
-
-digits_test() ->
-    ?assertEqual("0",
-                 digits(0)),
-    ?assertEqual("0.0",
-                 digits(0.0)),
-    ?assertEqual("1.0",
-                 digits(1.0)),
-    ?assertEqual("-1.0",
-                 digits(-1.0)),
-    ?assertEqual("0.1",
-                 digits(0.1)),
-    ?assertEqual("0.01",
-                 digits(0.01)),
-    ?assertEqual("0.001",
-                 digits(0.001)),
-    ?assertEqual("1.0e+6",
-                 digits(1000000.0)),
-    ?assertEqual("0.5",
-                 digits(0.5)),
-    ?assertEqual("4503599627370496.0",
-                 digits(4503599627370496.0)),
-    %% small denormalized number
-    %% 4.94065645841246544177e-324 =:= 5.0e-324
-    <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
-    ?assertEqual("5.0e-324",
-                 digits(SmallDenorm)),
-    ?assertEqual(SmallDenorm,
-                 list_to_float(digits(SmallDenorm))),
-    %% large denormalized number
-    %% 2.22507385850720088902e-308
-    <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
-    ?assertEqual("2.225073858507201e-308",
-                 digits(BigDenorm)),
-    ?assertEqual(BigDenorm,
-                 list_to_float(digits(BigDenorm))),
-    %% small normalized number
-    %% 2.22507385850720138309e-308
-    <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
-    ?assertEqual("2.2250738585072014e-308",
-                 digits(SmallNorm)),
-    ?assertEqual(SmallNorm,
-                 list_to_float(digits(SmallNorm))),
-    %% large normalized number
-    %% 1.79769313486231570815e+308
-    <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
-    ?assertEqual("1.7976931348623157e+308",
-                 digits(LargeNorm)),
-    ?assertEqual(LargeNorm,
-                 list_to_float(digits(LargeNorm))),
-    %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
-    ?assertEqual("5.0e-324",
-                 digits(math:pow(2, -1074))),
-    ok.
-
-frexp_test() ->
-    %% zero
-    ?assertEqual({0.0, 0}, frexp(0.0)),
-    %% one
-    ?assertEqual({0.5, 1}, frexp(1.0)),
-    %% negative one
-    ?assertEqual({-0.5, 1}, frexp(-1.0)),
-    %% small denormalized number
-    %% 4.94065645841246544177e-324
-    <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
-    ?assertEqual({0.5, -1073}, frexp(SmallDenorm)),
-    %% large denormalized number
-    %% 2.22507385850720088902e-308
-    <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
-    ?assertEqual(
-       {0.99999999999999978, -1022},
-       frexp(BigDenorm)),
-    %% small normalized number
-    %% 2.22507385850720138309e-308
-    <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
-    ?assertEqual({0.5, -1021}, frexp(SmallNorm)),
-    %% large normalized number
-    %% 1.79769313486231570815e+308
-    <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
-    ?assertEqual(
-        {0.99999999999999989, 1024},
-        frexp(LargeNorm)),
-    %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
-    ?assertEqual(
-       {0.5, -1073},
-       frexp(math:pow(2, -1074))),
-    ok.
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochitemp.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochitemp.erl b/src/mochiweb/src/mochitemp.erl
deleted file mode 100644
index bb23d2a..0000000
--- a/src/mochiweb/src/mochitemp.erl
+++ /dev/null
@@ -1,310 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2010 Mochi Media, Inc.
-
-%% @doc Create temporary files and directories. Requires crypto to be started.
-
--module(mochitemp).
--export([gettempdir/0]).
--export([mkdtemp/0, mkdtemp/3]).
--export([rmtempdir/1]).
-%% -export([mkstemp/4]).
--define(SAFE_CHARS, {$a, $b, $c, $d, $e, $f, $g, $h, $i, $j, $k, $l, $m,
-                     $n, $o, $p, $q, $r, $s, $t, $u, $v, $w, $x, $y, $z,
-                     $A, $B, $C, $D, $E, $F, $G, $H, $I, $J, $K, $L, $M,
-                     $N, $O, $P, $Q, $R, $S, $T, $U, $V, $W, $X, $Y, $Z,
-                     $0, $1, $2, $3, $4, $5, $6, $7, $8, $9, $_}).
--define(TMP_MAX, 10000).
-
--include_lib("kernel/include/file.hrl").
-
-%% TODO: An ugly wrapper over the mktemp tool with open_port and sadness?
-%%       We can't implement this race-free in Erlang without the ability
-%%       to issue O_CREAT|O_EXCL. I suppose we could hack something with
-%%       mkdtemp, del_dir, open.
-%% mkstemp(Suffix, Prefix, Dir, Options) ->
-%%    ok.
-
-rmtempdir(Dir) ->
-    case file:del_dir(Dir) of
-        {error, eexist} ->
-            ok = rmtempdirfiles(Dir),
-            ok = file:del_dir(Dir);
-        ok ->
-            ok
-    end.
-
-rmtempdirfiles(Dir) ->
-    {ok, Files} = file:list_dir(Dir),
-    ok = rmtempdirfiles(Dir, Files).
-
-rmtempdirfiles(_Dir, []) ->
-    ok;
-rmtempdirfiles(Dir, [Basename | Rest]) ->
-    Path = filename:join([Dir, Basename]),
-    case filelib:is_dir(Path) of
-        true ->
-            ok = rmtempdir(Path);
-        false ->
-            ok = file:delete(Path)
-    end,
-    rmtempdirfiles(Dir, Rest).
-
-mkdtemp() ->
-    mkdtemp("", "tmp", gettempdir()).
-
-mkdtemp(Suffix, Prefix, Dir) ->
-    mkdtemp_n(rngpath_fun(Suffix, Prefix, Dir), ?TMP_MAX).
-
-
-
-mkdtemp_n(RngPath, 1) ->
-    make_dir(RngPath());
-mkdtemp_n(RngPath, N) ->
-    try make_dir(RngPath())
-    catch throw:{error, eexist} ->
-            mkdtemp_n(RngPath, N - 1)
-    end.
-
-make_dir(Path) ->
-    case file:make_dir(Path) of
-        ok ->
-            ok;
-        E={error, eexist} ->
-            throw(E)
-    end,
-    %% Small window for a race condition here because dir is created 777
-    ok = file:write_file_info(Path, #file_info{mode=8#0700}),
-    Path.
-
-rngpath_fun(Prefix, Suffix, Dir) ->
-    fun () ->
-            filename:join([Dir, Prefix ++ rngchars(6) ++ Suffix])
-    end.
-
-rngchars(0) ->
-    "";
-rngchars(N) ->
-    [rngchar() | rngchars(N - 1)].
-
-rngchar() ->
-    rngchar(crypto:rand_uniform(0, tuple_size(?SAFE_CHARS))).
-
-rngchar(C) ->
-    element(1 + C, ?SAFE_CHARS).
-
-%% @spec gettempdir() -> string()
-%% @doc Get a usable temporary directory using the first of these that is a directory:
-%%      $TMPDIR, $TMP, $TEMP, "/tmp", "/var/tmp", "/usr/tmp", ".".
-gettempdir() ->
-    gettempdir(gettempdir_checks(), fun normalize_dir/1).
-
-gettempdir_checks() ->
-    [{fun os:getenv/1, ["TMPDIR", "TMP", "TEMP"]},
-     {fun gettempdir_identity/1, ["/tmp", "/var/tmp", "/usr/tmp"]},
-     {fun gettempdir_cwd/1, [cwd]}].
-
-gettempdir_identity(L) ->
-    L.
-
-gettempdir_cwd(cwd) ->
-    {ok, L} = file:get_cwd(),
-    L.
-
-gettempdir([{_F, []} | RestF], Normalize) ->
-    gettempdir(RestF, Normalize);
-gettempdir([{F, [L | RestL]} | RestF], Normalize) ->
-    case Normalize(F(L)) of
-        false ->
-            gettempdir([{F, RestL} | RestF], Normalize);
-        Dir ->
-            Dir
-    end.
-
-normalize_dir(False) when False =:= false orelse False =:= "" ->
-    %% Erlang doesn't have an unsetenv, wtf.
-    false;
-normalize_dir(L) ->
-    Dir = filename:absname(L),
-    case filelib:is_dir(Dir) of
-        false ->
-            false;
-        true ->
-            Dir
-    end.
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-pushenv(L) ->
-    [{K, os:getenv(K)} || K <- L].
-popenv(L) ->
-    F = fun ({K, false}) ->
-                %% Erlang doesn't have an unsetenv, wtf.
-                os:putenv(K, "");
-            ({K, V}) ->
-                os:putenv(K, V)
-        end,
-    lists:foreach(F, L).
-
-gettempdir_fallback_test() ->
-    ?assertEqual(
-       "/",
-       gettempdir([{fun gettempdir_identity/1, ["/--not-here--/"]},
-                   {fun gettempdir_identity/1, ["/"]}],
-                  fun normalize_dir/1)),
-    ?assertEqual(
-       "/",
-       %% simulate a true os:getenv unset env
-       gettempdir([{fun gettempdir_identity/1, [false]},
-                   {fun gettempdir_identity/1, ["/"]}],
-                  fun normalize_dir/1)),
-    ok.
-
-gettempdir_identity_test() ->
-    ?assertEqual(
-       "/",
-       gettempdir([{fun gettempdir_identity/1, ["/"]}], fun normalize_dir/1)),
-    ok.
-
-gettempdir_cwd_test() ->
-    {ok, Cwd} = file:get_cwd(),
-    ?assertEqual(
-       normalize_dir(Cwd),
-       gettempdir([{fun gettempdir_cwd/1, [cwd]}], fun normalize_dir/1)),
-    ok.
-
-rngchars_test() ->
-    crypto:start(),
-    ?assertEqual(
-       "",
-       rngchars(0)),
-    ?assertEqual(
-       10,
-       length(rngchars(10))),
-    ok.
-
-rngchar_test() ->
-    ?assertEqual(
-       $a,
-       rngchar(0)),
-    ?assertEqual(
-       $A,
-       rngchar(26)),
-    ?assertEqual(
-       $_,
-       rngchar(62)),
-    ok.
-
-mkdtemp_n_failonce_test() ->
-    crypto:start(),
-    D = mkdtemp(),
-    Path = filename:join([D, "testdir"]),
-    %% Toggle the existence of a dir so that it fails
-    %% the first time and succeeds the second.
-    F = fun () ->
-                case filelib:is_dir(Path) of
-                    true ->
-                        file:del_dir(Path);
-                    false ->
-                        file:make_dir(Path)
-                end,
-                Path
-        end,
-    try
-        %% Fails the first time
-        ?assertThrow(
-           {error, eexist},
-           mkdtemp_n(F, 1)),
-        %% Reset state
-        file:del_dir(Path),
-        %% Succeeds the second time
-        ?assertEqual(
-           Path,
-           mkdtemp_n(F, 2))
-    after rmtempdir(D)
-    end,
-    ok.
-
-mkdtemp_n_fail_test() ->
-    {ok, Cwd} = file:get_cwd(),
-    ?assertThrow(
-       {error, eexist},
-       mkdtemp_n(fun () -> Cwd end, 1)),
-    ?assertThrow(
-       {error, eexist},
-       mkdtemp_n(fun () -> Cwd end, 2)),
-    ok.
-
-make_dir_fail_test() ->
-    {ok, Cwd} = file:get_cwd(),
-    ?assertThrow(
-      {error, eexist},
-      make_dir(Cwd)),
-    ok.
-
-mkdtemp_test() ->
-    crypto:start(),
-    D = mkdtemp(),
-    ?assertEqual(
-       true,
-       filelib:is_dir(D)),
-    ?assertEqual(
-       ok,
-       file:del_dir(D)),
-    ok.
-
-rmtempdir_test() ->
-    crypto:start(),
-    D1 = mkdtemp(),
-    ?assertEqual(
-       true,
-       filelib:is_dir(D1)),
-    ?assertEqual(
-       ok,
-       rmtempdir(D1)),
-    D2 = mkdtemp(),
-    ?assertEqual(
-       true,
-       filelib:is_dir(D2)),
-    ok = file:write_file(filename:join([D2, "foo"]), <<"bytes">>),
-    D3 = mkdtemp("suffix", "prefix", D2),
-    ?assertEqual(
-       true,
-       filelib:is_dir(D3)),
-    ok = file:write_file(filename:join([D3, "foo"]), <<"bytes">>),
-    ?assertEqual(
-       ok,
-       rmtempdir(D2)),
-    ?assertEqual(
-       {error, enoent},
-       file:consult(D3)),
-    ?assertEqual(
-       {error, enoent},
-       file:consult(D2)),
-    ok.
-
-gettempdir_env_test() ->
-    Env = pushenv(["TMPDIR", "TEMP", "TMP"]),
-    FalseEnv = [{"TMPDIR", false}, {"TEMP", false}, {"TMP", false}],
-    try
-        popenv(FalseEnv),
-        popenv([{"TMPDIR", "/"}]),
-        ?assertEqual(
-           "/",
-           os:getenv("TMPDIR")),
-        ?assertEqual(
-           "/",
-           gettempdir()),
-        {ok, Cwd} = file:get_cwd(),
-        popenv(FalseEnv),
-        popenv([{"TMP", Cwd}]),
-        ?assertEqual(
-           normalize_dir(Cwd),
-           gettempdir())
-    after popenv(Env)
-    end,
-    ok.
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiutf8.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiutf8.erl b/src/mochiweb/src/mochiutf8.erl
deleted file mode 100644
index 206e118..0000000
--- a/src/mochiweb/src/mochiutf8.erl
+++ /dev/null
@@ -1,316 +0,0 @@
-%% @copyright 2010 Mochi Media, Inc.
-%% @author Bob Ippolito <bo...@mochimedia.com>
-
-%% @doc Algorithm to convert any binary to a valid UTF-8 sequence by ignoring
-%%      invalid bytes.
-
--module(mochiutf8).
--export([valid_utf8_bytes/1, codepoint_to_bytes/1, bytes_to_codepoints/1]).
--export([bytes_foldl/3, codepoint_foldl/3, read_codepoint/1, len/1]).
-
-%% External API
-
--type unichar_low() :: 0..16#d7ff.
--type unichar_high() :: 16#e000..16#10ffff.
--type unichar() :: unichar_low() | unichar_high().
-
--spec codepoint_to_bytes(unichar()) -> binary().
-%% @doc Convert a unicode codepoint to UTF-8 bytes.
-codepoint_to_bytes(C) when (C >= 16#00 andalso C =< 16#7f) ->
-    %% U+0000 - U+007F - 7 bits
-    <<C>>;
-codepoint_to_bytes(C) when (C >= 16#080 andalso C =< 16#07FF) ->
-    %% U+0080 - U+07FF - 11 bits
-    <<0:5, B1:5, B0:6>> = <<C:16>>,
-    <<2#110:3, B1:5,
-      2#10:2, B0:6>>;
-codepoint_to_bytes(C) when (C >= 16#0800 andalso C =< 16#FFFF) andalso
-                           (C < 16#D800 orelse C > 16#DFFF) ->
-    %% U+0800 - U+FFFF - 16 bits (excluding UTC-16 surrogate code points)
-    <<B2:4, B1:6, B0:6>> = <<C:16>>,
-    <<2#1110:4, B2:4,
-      2#10:2, B1:6,
-      2#10:2, B0:6>>;
-codepoint_to_bytes(C) when (C >= 16#010000 andalso C =< 16#10FFFF) ->
-    %% U+10000 - U+10FFFF - 21 bits
-    <<0:3, B3:3, B2:6, B1:6, B0:6>> = <<C:24>>,
-    <<2#11110:5, B3:3,
-      2#10:2, B2:6,
-      2#10:2, B1:6,
-      2#10:2, B0:6>>.
-
--spec codepoints_to_bytes([unichar()]) -> binary().
-%% @doc Convert a list of codepoints to a UTF-8 binary.
-codepoints_to_bytes(L) ->
-    <<<<(codepoint_to_bytes(C))/binary>> || C <- L>>.
-
--spec read_codepoint(binary()) -> {unichar(), binary(), binary()}.
-read_codepoint(Bin = <<2#0:1, C:7, Rest/binary>>) ->
-    %% U+0000 - U+007F - 7 bits
-    <<B:1/binary, _/binary>> = Bin,
-    {C, B, Rest};
-read_codepoint(Bin = <<2#110:3, B1:5,
-                       2#10:2, B0:6,
-                       Rest/binary>>) ->
-    %% U+0080 - U+07FF - 11 bits
-    case <<B1:5, B0:6>> of
-        <<C:11>> when C >= 16#80 ->
-            <<B:2/binary, _/binary>> = Bin,
-            {C, B, Rest}
-    end;
-read_codepoint(Bin = <<2#1110:4, B2:4,
-                       2#10:2, B1:6,
-                       2#10:2, B0:6,
-                       Rest/binary>>) ->
-    %% U+0800 - U+FFFF - 16 bits (excluding UTC-16 surrogate code points)
-    case <<B2:4, B1:6, B0:6>> of
-        <<C:16>> when (C >= 16#0800 andalso C =< 16#FFFF) andalso
-                      (C < 16#D800 orelse C > 16#DFFF) ->
-            <<B:3/binary, _/binary>> = Bin,
-            {C, B, Rest}
-    end;
-read_codepoint(Bin = <<2#11110:5, B3:3,
-                       2#10:2, B2:6,
-                       2#10:2, B1:6,
-                       2#10:2, B0:6,
-                       Rest/binary>>) ->
-    %% U+10000 - U+10FFFF - 21 bits
-    case <<B3:3, B2:6, B1:6, B0:6>> of
-        <<C:21>> when (C >= 16#010000 andalso C =< 16#10FFFF) ->
-            <<B:4/binary, _/binary>> = Bin,
-            {C, B, Rest}
-    end.
-
--spec codepoint_foldl(fun((unichar(), _) -> _), _, binary()) -> _.
-codepoint_foldl(F, Acc, <<>>) when is_function(F, 2) ->
-    Acc;
-codepoint_foldl(F, Acc, Bin) ->
-    {C, _, Rest} = read_codepoint(Bin),
-    codepoint_foldl(F, F(C, Acc), Rest).
-
--spec bytes_foldl(fun((binary(), _) -> _), _, binary()) -> _.
-bytes_foldl(F, Acc, <<>>) when is_function(F, 2) ->
-    Acc;
-bytes_foldl(F, Acc, Bin) ->
-    {_, B, Rest} = read_codepoint(Bin),
-    bytes_foldl(F, F(B, Acc), Rest).
-
--spec bytes_to_codepoints(binary()) -> [unichar()].
-bytes_to_codepoints(B) ->
-    lists:reverse(codepoint_foldl(fun (C, Acc) -> [C | Acc] end, [], B)).
-
--spec len(binary()) -> non_neg_integer().
-len(<<>>) ->
-    0;
-len(B) ->
-    {_, _, Rest} = read_codepoint(B),
-    1 + len(Rest).
-
--spec valid_utf8_bytes(B::binary()) -> binary().
-%% @doc Return only the bytes in B that represent valid UTF-8. Uses
-%%      the following recursive algorithm: skip one byte if B does not
-%%      follow UTF-8 syntax (a 1-4 byte encoding of some number),
-%%      skip sequence of 2-4 bytes if it represents an overlong encoding
-%%      or bad code point (surrogate U+D800 - U+DFFF or > U+10FFFF).
-valid_utf8_bytes(B) when is_binary(B) ->
-    binary_skip_bytes(B, invalid_utf8_indexes(B)).
-
-%% Internal API
-
--spec binary_skip_bytes(binary(), [non_neg_integer()]) -> binary().
-%% @doc Return B, but skipping the 0-based indexes in L.
-binary_skip_bytes(B, []) ->
-    B;
-binary_skip_bytes(B, L) ->
-    binary_skip_bytes(B, L, 0, []).
-
-%% @private
--spec binary_skip_bytes(binary(), [non_neg_integer()], non_neg_integer(), iolist()) -> binary().
-binary_skip_bytes(B, [], _N, Acc) ->
-    iolist_to_binary(lists:reverse([B | Acc]));
-binary_skip_bytes(<<_, RestB/binary>>, [N | RestL], N, Acc) ->
-    binary_skip_bytes(RestB, RestL, 1 + N, Acc);
-binary_skip_bytes(<<C, RestB/binary>>, L, N, Acc) ->
-    binary_skip_bytes(RestB, L, 1 + N, [C | Acc]).
-
--spec invalid_utf8_indexes(binary()) -> [non_neg_integer()].
-%% @doc Return the 0-based indexes in B that are not valid UTF-8.
-invalid_utf8_indexes(B) ->
-    invalid_utf8_indexes(B, 0, []).
-
-%% @private.
--spec invalid_utf8_indexes(binary(), non_neg_integer(), [non_neg_integer()]) -> [non_neg_integer()].
-invalid_utf8_indexes(<<C, Rest/binary>>, N, Acc) when C < 16#80 ->
-    %% U+0000 - U+007F - 7 bits
-    invalid_utf8_indexes(Rest, 1 + N, Acc);
-invalid_utf8_indexes(<<C1, C2, Rest/binary>>, N, Acc)
-  when C1 band 16#E0 =:= 16#C0,
-       C2 band 16#C0 =:= 16#80 ->
-    %% U+0080 - U+07FF - 11 bits
-    case ((C1 band 16#1F) bsl 6) bor (C2 band 16#3F) of
-	C when C < 16#80 ->
-            %% Overlong encoding.
-            invalid_utf8_indexes(Rest, 2 + N, [1 + N, N | Acc]);
-        _ ->
-            %% Upper bound U+07FF does not need to be checked
-            invalid_utf8_indexes(Rest, 2 + N, Acc)
-    end;
-invalid_utf8_indexes(<<C1, C2, C3, Rest/binary>>, N, Acc)
-  when C1 band 16#F0 =:= 16#E0,
-       C2 band 16#C0 =:= 16#80,
-       C3 band 16#C0 =:= 16#80 ->
-    %% U+0800 - U+FFFF - 16 bits
-    case ((((C1 band 16#0F) bsl 6) bor (C2 band 16#3F)) bsl 6) bor
-	(C3 band 16#3F) of
-	C when (C < 16#800) orelse (C >= 16#D800 andalso C =< 16#DFFF) ->
-	    %% Overlong encoding or surrogate.
-            invalid_utf8_indexes(Rest, 3 + N, [2 + N, 1 + N, N | Acc]);
-	_ ->
-            %% Upper bound U+FFFF does not need to be checked
-	    invalid_utf8_indexes(Rest, 3 + N, Acc)
-    end;
-invalid_utf8_indexes(<<C1, C2, C3, C4, Rest/binary>>, N, Acc)
-  when C1 band 16#F8 =:= 16#F0,
-       C2 band 16#C0 =:= 16#80,
-       C3 band 16#C0 =:= 16#80,
-       C4 band 16#C0 =:= 16#80 ->
-    %% U+10000 - U+10FFFF - 21 bits
-    case ((((((C1 band 16#0F) bsl 6) bor (C2 band 16#3F)) bsl 6) bor
-           (C3 band 16#3F)) bsl 6) bor (C4 band 16#3F) of
-	C when (C < 16#10000) orelse (C > 16#10FFFF) ->
-	    %% Overlong encoding or invalid code point.
-	    invalid_utf8_indexes(Rest, 4 + N, [3 + N, 2 + N, 1 + N, N | Acc]);
-	_ ->
-	    invalid_utf8_indexes(Rest, 4 + N, Acc)
-    end;
-invalid_utf8_indexes(<<_, Rest/binary>>, N, Acc) ->
-    %% Invalid char
-    invalid_utf8_indexes(Rest, 1 + N, [N | Acc]);
-invalid_utf8_indexes(<<>>, _N, Acc) ->
-    lists:reverse(Acc).
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-binary_skip_bytes_test() ->
-    ?assertEqual(<<"foo">>,
-                 binary_skip_bytes(<<"foo">>, [])),
-    ?assertEqual(<<"foobar">>,
-                 binary_skip_bytes(<<"foo bar">>, [3])),
-    ?assertEqual(<<"foo">>,
-                 binary_skip_bytes(<<"foo bar">>, [3, 4, 5, 6])),
-    ?assertEqual(<<"oo bar">>,
-                 binary_skip_bytes(<<"foo bar">>, [0])),
-    ok.
-
-invalid_utf8_indexes_test() ->
-    ?assertEqual(
-       [],
-       invalid_utf8_indexes(<<"unicode snowman for you: ", 226, 152, 131>>)),
-    ?assertEqual(
-       [0],
-       invalid_utf8_indexes(<<128>>)),
-    ?assertEqual(
-       [57,59,60,64,66,67],
-       invalid_utf8_indexes(<<"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; (",
-                              167, 65, 170, 186, 73, 83, 80, 166, 87, 186, 217, 41, 41>>)),
-    ok.
-
-codepoint_to_bytes_test() ->
-    %% U+0000 - U+007F - 7 bits
-    %% U+0080 - U+07FF - 11 bits
-    %% U+0800 - U+FFFF - 16 bits (excluding UTC-16 surrogate code points)
-    %% U+10000 - U+10FFFF - 21 bits
-    ?assertEqual(
-       <<"a">>,
-       codepoint_to_bytes($a)),
-    ?assertEqual(
-       <<16#c2, 16#80>>,
-       codepoint_to_bytes(16#80)),
-    ?assertEqual(
-       <<16#df, 16#bf>>,
-       codepoint_to_bytes(16#07ff)),
-    ?assertEqual(
-       <<16#ef, 16#bf, 16#bf>>,
-       codepoint_to_bytes(16#ffff)),
-    ?assertEqual(
-       <<16#f4, 16#8f, 16#bf, 16#bf>>,
-       codepoint_to_bytes(16#10ffff)),
-    ok.
-
-bytes_foldl_test() ->
-    ?assertEqual(
-       <<"abc">>,
-       bytes_foldl(fun (B, Acc) -> <<Acc/binary, B/binary>> end, <<>>, <<"abc">>)),
-    ?assertEqual(
-       <<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>,
-       bytes_foldl(fun (B, Acc) -> <<Acc/binary, B/binary>> end, <<>>,
-                   <<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>)),
-    ok.
-
-bytes_to_codepoints_test() ->
-    ?assertEqual(
-       "abc" ++ [16#2603, 16#4e2d, 16#85, 16#10ffff],
-       bytes_to_codepoints(<<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>)),
-    ok.
-
-codepoint_foldl_test() ->
-    ?assertEqual(
-       "cba",
-       codepoint_foldl(fun (C, Acc) -> [C | Acc] end, [], <<"abc">>)),
-    ?assertEqual(
-       [16#10ffff, 16#85, 16#4e2d, 16#2603 | "cba"],
-       codepoint_foldl(fun (C, Acc) -> [C | Acc] end, [],
-                       <<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>)),
-    ok.
-
-len_test() ->
-    ?assertEqual(
-       29,
-       len(<<"unicode snowman for you: ", 226, 152, 131, 228, 184, 173, 194, 133, 244, 143, 191, 191>>)),
-    ok.
-
-codepoints_to_bytes_test() ->
-    ?assertEqual(
-       iolist_to_binary(lists:map(fun codepoint_to_bytes/1, lists:seq(1, 1000))),
-       codepoints_to_bytes(lists:seq(1, 1000))),
-    ok.
-
-valid_utf8_bytes_test() ->
-    ?assertEqual(
-       <<"invalid U+11ffff: ">>,
-       valid_utf8_bytes(<<"invalid U+11ffff: ", 244, 159, 191, 191>>)),
-    ?assertEqual(
-       <<"U+10ffff: ", 244, 143, 191, 191>>,
-       valid_utf8_bytes(<<"U+10ffff: ", 244, 143, 191, 191>>)),
-    ?assertEqual(
-       <<"overlong 2-byte encoding (a): ">>,
-       valid_utf8_bytes(<<"overlong 2-byte encoding (a): ", 2#11000001, 2#10100001>>)),
-    ?assertEqual(
-       <<"overlong 2-byte encoding (!): ">>,
-       valid_utf8_bytes(<<"overlong 2-byte encoding (!): ", 2#11000000, 2#10100001>>)),
-    ?assertEqual(
-       <<"mu: ", 194, 181>>,
-       valid_utf8_bytes(<<"mu: ", 194, 181>>)),
-    ?assertEqual(
-       <<"bad coding bytes: ">>,
-       valid_utf8_bytes(<<"bad coding bytes: ", 2#10011111, 2#10111111, 2#11111111>>)),
-    ?assertEqual(
-       <<"low surrogate (unpaired): ">>,
-       valid_utf8_bytes(<<"low surrogate (unpaired): ", 237, 176, 128>>)),
-    ?assertEqual(
-       <<"high surrogate (unpaired): ">>,
-       valid_utf8_bytes(<<"high surrogate (unpaired): ", 237, 191, 191>>)),
-    ?assertEqual(
-       <<"unicode snowman for you: ", 226, 152, 131>>,
-       valid_utf8_bytes(<<"unicode snowman for you: ", 226, 152, 131>>)),
-    ?assertEqual(
-       <<"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; (AISPW))">>,
-       valid_utf8_bytes(<<"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; (",
-                          167, 65, 170, 186, 73, 83, 80, 166, 87, 186, 217, 41, 41>>)),
-    ok.
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb.app.src
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb.app.src b/src/mochiweb/src/mochiweb.app.src
deleted file mode 100644
index c240102..0000000
--- a/src/mochiweb/src/mochiweb.app.src
+++ /dev/null
@@ -1,8 +0,0 @@
-{application, mochiweb,
- [{description, "MochiMedia Web Server"},
-  {vsn, git},
-  {modules, []},
-  {registered, []},
-  {mod, {mochiweb_app, []}},
-  {env, []},
-  {applications, [kernel, stdlib, crypto, inets]}]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb.erl b/src/mochiweb/src/mochiweb.erl
deleted file mode 100644
index 3118028..0000000
--- a/src/mochiweb/src/mochiweb.erl
+++ /dev/null
@@ -1,289 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Start and stop the MochiWeb server.
-
--module(mochiweb).
--author('bob@mochimedia.com').
-
--export([start/0, stop/0]).
--export([new_request/1, new_response/1]).
--export([all_loaded/0, all_loaded/1, reload/0]).
-
-%% @spec start() -> ok
-%% @doc Start the MochiWeb server.
-start() ->
-    ensure_started(crypto),
-    application:start(mochiweb).
-
-%% @spec stop() -> ok
-%% @doc Stop the MochiWeb server.
-stop() ->
-    Res = application:stop(mochiweb),
-    application:stop(crypto),
-    Res.
-
-reload() ->
-    [c:l(Module) || Module <- all_loaded()].
-
-all_loaded() ->
-    all_loaded(filename:dirname(code:which(?MODULE))).
-
-all_loaded(Base) when is_atom(Base) ->
-    [];
-all_loaded(Base) ->
-    FullBase = Base ++ "/",
-    F = fun ({_Module, Loaded}, Acc) when is_atom(Loaded) ->
-                Acc;
-            ({Module, Loaded}, Acc) ->
-                case lists:prefix(FullBase, Loaded) of
-                    true ->
-                        [Module | Acc];
-                    false ->
-                        Acc
-                end
-        end,
-    lists:foldl(F, [], code:all_loaded()).
-
-
-%% @spec new_request({Socket, Request, Headers}) -> MochiWebRequest
-%% @doc Return a mochiweb_request data structure.
-new_request({Socket, {Method, {abs_path, Uri}, Version}, Headers}) ->
-    mochiweb_request:new(Socket,
-                         Method,
-                         Uri,
-                         Version,
-                         mochiweb_headers:make(Headers));
-% this case probably doesn't "exist".
-new_request({Socket, {Method, {absoluteURI, _Protocol, _Host, _Port, Uri},
-                      Version}, Headers}) ->
-    mochiweb_request:new(Socket,
-                         Method,
-                         Uri,
-                         Version,
-                         mochiweb_headers:make(Headers));
-%% Request-URI is "*"
-%% From http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
-new_request({Socket, {Method, '*'=Uri, Version}, Headers}) ->
-    mochiweb_request:new(Socket,
-                         Method,
-                         Uri,
-                         Version,
-                         mochiweb_headers:make(Headers)).
-
-%% @spec new_response({Request, integer(), Headers}) -> MochiWebResponse
-%% @doc Return a mochiweb_response data structure.
-new_response({Request, Code, Headers}) ->
-    mochiweb_response:new(Request,
-                          Code,
-                          mochiweb_headers:make(Headers)).
-
-%% Internal API
-
-ensure_started(App) ->
-    case application:start(App) of
-        ok ->
-            ok;
-        {error, {already_started, App}} ->
-            ok
-    end.
-
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
--record(treq, {path, body= <<>>, xreply= <<>>}).
-
-ssl_cert_opts() ->
-    EbinDir = filename:dirname(code:which(?MODULE)),
-    CertDir = filename:join([EbinDir, "..", "support", "test-materials"]),
-    CertFile = filename:join(CertDir, "test_ssl_cert.pem"),
-    KeyFile = filename:join(CertDir, "test_ssl_key.pem"),
-    [{certfile, CertFile}, {keyfile, KeyFile}].
-
-with_server(Transport, ServerFun, ClientFun) ->
-    ServerOpts0 = [{ip, "127.0.0.1"}, {port, 0}, {loop, ServerFun}],
-    ServerOpts = case Transport of
-        plain ->
-            ServerOpts0;
-        ssl ->
-            ServerOpts0 ++ [{ssl, true}, {ssl_opts, ssl_cert_opts()}]
-    end,
-    {ok, Server} = mochiweb_http:start(ServerOpts),
-    Port = mochiweb_socket_server:get(Server, port),
-    Res = (catch ClientFun(Transport, Port)),
-    mochiweb_http:stop(Server),
-    Res.
-
-request_test() ->
-    R = mochiweb_request:new(z, z, "/foo/bar/baz%20wibble+quux?qs=2", z, []),
-    "/foo/bar/baz wibble quux" = R:get(path),
-    ok.
-
-single_http_GET_test() ->
-    do_GET(plain, 1).
-
-single_https_GET_test() ->
-    do_GET(ssl, 1).
-
-multiple_http_GET_test() ->
-    do_GET(plain, 3).
-
-multiple_https_GET_test() ->
-    do_GET(ssl, 3).
-
-hundred_http_GET_test() ->
-    do_GET(plain, 100).
-
-hundred_https_GET_test() ->
-    do_GET(ssl, 100).
-
-single_128_http_POST_test() ->
-    do_POST(plain, 128, 1).
-
-single_128_https_POST_test() ->
-    do_POST(ssl, 128, 1).
-
-single_2k_http_POST_test() ->
-    do_POST(plain, 2048, 1).
-
-single_2k_https_POST_test() ->
-    do_POST(ssl, 2048, 1).
-
-single_100k_http_POST_test() ->
-    do_POST(plain, 102400, 1).
-
-single_100k_https_POST_test() ->
-    do_POST(ssl, 102400, 1).
-
-multiple_100k_http_POST_test() ->
-    do_POST(plain, 102400, 3).
-
-multiple_100K_https_POST_test() ->
-    do_POST(ssl, 102400, 3).
-
-hundred_128_http_POST_test() ->
-    do_POST(plain, 128, 100).
-
-hundred_128_https_POST_test() ->
-    do_POST(ssl, 128, 100).
-
-do_GET(Transport, Times) ->
-    PathPrefix = "/whatever/",
-    ReplyPrefix = "You requested: ",
-    ServerFun = fun (Req) ->
-                        Reply = ReplyPrefix ++ Req:get(path),
-                        Req:ok({"text/plain", Reply})
-                end,
-    TestReqs = [begin
-                    Path = PathPrefix ++ integer_to_list(N),
-                    ExpectedReply = list_to_binary(ReplyPrefix ++ Path),
-                    #treq{path=Path, xreply=ExpectedReply}
-                end || N <- lists:seq(1, Times)],
-    ClientFun = new_client_fun('GET', TestReqs),
-    ok = with_server(Transport, ServerFun, ClientFun),
-    ok.
-
-do_POST(Transport, Size, Times) ->
-    ServerFun = fun (Req) ->
-                        Body = Req:recv_body(),
-                        Headers = [{"Content-Type", "application/octet-stream"}],
-                        Req:respond({201, Headers, Body})
-                end,
-    TestReqs = [begin
-                    Path = "/stuff/" ++ integer_to_list(N),
-                    Body = crypto:rand_bytes(Size),
-                    #treq{path=Path, body=Body, xreply=Body}
-                end || N <- lists:seq(1, Times)],
-    ClientFun = new_client_fun('POST', TestReqs),
-    ok = with_server(Transport, ServerFun, ClientFun),
-    ok.
-
-new_client_fun(Method, TestReqs) ->
-    fun (Transport, Port) ->
-            client_request(Transport, Port, Method, TestReqs)
-    end.
-
-client_request(Transport, Port, Method, TestReqs) ->
-    Opts = [binary, {active, false}, {packet, http}],
-    SockFun = case Transport of
-        plain ->
-            {ok, Socket} = gen_tcp:connect("127.0.0.1", Port, Opts),
-            fun (recv) ->
-                    gen_tcp:recv(Socket, 0);
-                ({recv, Length}) ->
-                    gen_tcp:recv(Socket, Length);
-                ({send, Data}) ->
-                    gen_tcp:send(Socket, Data);
-                ({setopts, L}) ->
-                    inet:setopts(Socket, L)
-            end;
-        ssl ->
-            {ok, Socket} = ssl:connect("127.0.0.1", Port, [{ssl_imp, new} | Opts]),
-            fun (recv) ->
-                    ssl:recv(Socket, 0);
-                ({recv, Length}) ->
-                    ssl:recv(Socket, Length);
-                ({send, Data}) ->
-                    ssl:send(Socket, Data);
-                ({setopts, L}) ->
-                    ssl:setopts(Socket, L)
-            end
-    end,
-    client_request(SockFun, Method, TestReqs).
-
-client_request(SockFun, _Method, []) ->
-    {the_end, {error, closed}} = {the_end, SockFun(recv)},
-    ok;
-client_request(SockFun, Method,
-               [#treq{path=Path, body=Body, xreply=ExReply} | Rest]) ->
-    Request = [atom_to_list(Method), " ", Path, " HTTP/1.1\r\n",
-               client_headers(Body, Rest =:= []),
-               "\r\n",
-               Body],
-    ok = SockFun({send, Request}),
-    case Method of
-        'GET' ->
-            {ok, {http_response, {1,1}, 200, "OK"}} = SockFun(recv);
-        'POST' ->
-            {ok, {http_response, {1,1}, 201, "Created"}} = SockFun(recv)
-    end,
-    ok = SockFun({setopts, [{packet, httph}]}),
-    {ok, {http_header, _, 'Server', _, "MochiWeb" ++ _}} = SockFun(recv),
-    {ok, {http_header, _, 'Date', _, _}} = SockFun(recv),
-    {ok, {http_header, _, 'Content-Type', _, _}} = SockFun(recv),
-    {ok, {http_header, _, 'Content-Length', _, ConLenStr}} = SockFun(recv),
-    ContentLength = list_to_integer(ConLenStr),
-    {ok, http_eoh} = SockFun(recv),
-    ok = SockFun({setopts, [{packet, raw}]}),
-    {payload, ExReply} = {payload, drain_reply(SockFun, ContentLength, <<>>)},
-    ok = SockFun({setopts, [{packet, http}]}),
-    client_request(SockFun, Method, Rest).
-
-client_headers(Body, IsLastRequest) ->
-    ["Host: localhost\r\n",
-     case Body of
-        <<>> ->
-            "";
-        _ ->
-            ["Content-Type: application/octet-stream\r\n",
-             "Content-Length: ", integer_to_list(byte_size(Body)), "\r\n"]
-     end,
-     case IsLastRequest of
-         true ->
-             "Connection: close\r\n";
-         false ->
-             ""
-     end].
-
-drain_reply(_SockFun, 0, Acc) ->
-    Acc;
-drain_reply(SockFun, Length, Acc) ->
-    Sz = erlang:min(Length, 1024),
-    {ok, B} = SockFun({recv, Sz}),
-    drain_reply(SockFun, Length - Sz, <<Acc/bytes, B/bytes>>).
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_acceptor.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_acceptor.erl b/src/mochiweb/src/mochiweb_acceptor.erl
deleted file mode 100644
index 893f99b..0000000
--- a/src/mochiweb/src/mochiweb_acceptor.erl
+++ /dev/null
@@ -1,49 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2010 Mochi Media, Inc.
-
-%% @doc MochiWeb acceptor.
-
--module(mochiweb_acceptor).
--author('bob@mochimedia.com').
-
--include("internal.hrl").
-
--export([start_link/3, init/3]).
-
-start_link(Server, Listen, Loop) ->
-    proc_lib:spawn_link(?MODULE, init, [Server, Listen, Loop]).
-
-init(Server, Listen, Loop) ->
-    T1 = now(),
-    case catch mochiweb_socket:accept(Listen) of
-        {ok, Socket} ->
-            gen_server:cast(Server, {accepted, self(), timer:now_diff(now(), T1)}),
-            case mochiweb_socket:after_accept(Socket) of
-                ok -> call_loop(Loop, Socket);
-                {error, _} -> exit(normal)
-            end;
-        {error, closed} ->
-            exit(normal);
-        {error, timeout} ->
-            init(Server, Listen, Loop);
-        Other ->
-            error_logger:error_report(
-              [{application, mochiweb},
-               "Accept failed error",
-               lists:flatten(io_lib:format("~p", [Other]))]),
-            exit({error, accept_failed})
-    end.
-
-call_loop({M, F}, Socket) ->
-    M:F(Socket);
-call_loop({M, F, A}, Socket) ->
-    erlang:apply(M, F, [Socket | A]);
-call_loop(Loop, Socket) ->
-    Loop(Socket).
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_app.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_app.erl b/src/mochiweb/src/mochiweb_app.erl
deleted file mode 100644
index 5d67787..0000000
--- a/src/mochiweb/src/mochiweb_app.erl
+++ /dev/null
@@ -1,27 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Callbacks for the mochiweb application.
-
--module(mochiweb_app).
--author('bob@mochimedia.com').
-
--behaviour(application).
--export([start/2,stop/1]).
-
-%% @spec start(_Type, _StartArgs) -> ServerRet
-%% @doc application start callback for mochiweb.
-start(_Type, _StartArgs) ->
-    mochiweb_sup:start_link().
-
-%% @spec stop(_State) -> ServerRet
-%% @doc application stop callback for mochiweb.
-stop(_State) ->
-    ok.
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_charref.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_charref.erl b/src/mochiweb/src/mochiweb_charref.erl
deleted file mode 100644
index 99cd550..0000000
--- a/src/mochiweb/src/mochiweb_charref.erl
+++ /dev/null
@@ -1,308 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Converts HTML 4 charrefs and entities to codepoints.
--module(mochiweb_charref).
--export([charref/1]).
-
-%% External API.
-
-%% @spec charref(S) -> integer() | undefined
-%% @doc Convert a decimal charref, hex charref, or html entity to a unicode
-%%      codepoint, or return undefined on failure.
-%%      The input should not include an ampersand or semicolon.
-%%      charref("#38") = 38, charref("#x26") = 38, charref("amp") = 38.
-charref(B) when is_binary(B) ->
-    charref(binary_to_list(B));
-charref([$#, C | L]) when C =:= $x orelse C =:= $X ->
-    try erlang:list_to_integer(L, 16)
-    catch
-        error:badarg -> undefined
-    end;
-charref([$# | L]) ->
-    try list_to_integer(L)
-    catch
-        error:badarg -> undefined
-    end;
-charref(L) ->
-    entity(L).
-
-%% Internal API.
-
-entity("nbsp") -> 160;
-entity("iexcl") -> 161;
-entity("cent") -> 162;
-entity("pound") -> 163;
-entity("curren") -> 164;
-entity("yen") -> 165;
-entity("brvbar") -> 166;
-entity("sect") -> 167;
-entity("uml") -> 168;
-entity("copy") -> 169;
-entity("ordf") -> 170;
-entity("laquo") -> 171;
-entity("not") -> 172;
-entity("shy") -> 173;
-entity("reg") -> 174;
-entity("macr") -> 175;
-entity("deg") -> 176;
-entity("plusmn") -> 177;
-entity("sup2") -> 178;
-entity("sup3") -> 179;
-entity("acute") -> 180;
-entity("micro") -> 181;
-entity("para") -> 182;
-entity("middot") -> 183;
-entity("cedil") -> 184;
-entity("sup1") -> 185;
-entity("ordm") -> 186;
-entity("raquo") -> 187;
-entity("frac14") -> 188;
-entity("frac12") -> 189;
-entity("frac34") -> 190;
-entity("iquest") -> 191;
-entity("Agrave") -> 192;
-entity("Aacute") -> 193;
-entity("Acirc") -> 194;
-entity("Atilde") -> 195;
-entity("Auml") -> 196;
-entity("Aring") -> 197;
-entity("AElig") -> 198;
-entity("Ccedil") -> 199;
-entity("Egrave") -> 200;
-entity("Eacute") -> 201;
-entity("Ecirc") -> 202;
-entity("Euml") -> 203;
-entity("Igrave") -> 204;
-entity("Iacute") -> 205;
-entity("Icirc") -> 206;
-entity("Iuml") -> 207;
-entity("ETH") -> 208;
-entity("Ntilde") -> 209;
-entity("Ograve") -> 210;
-entity("Oacute") -> 211;
-entity("Ocirc") -> 212;
-entity("Otilde") -> 213;
-entity("Ouml") -> 214;
-entity("times") -> 215;
-entity("Oslash") -> 216;
-entity("Ugrave") -> 217;
-entity("Uacute") -> 218;
-entity("Ucirc") -> 219;
-entity("Uuml") -> 220;
-entity("Yacute") -> 221;
-entity("THORN") -> 222;
-entity("szlig") -> 223;
-entity("agrave") -> 224;
-entity("aacute") -> 225;
-entity("acirc") -> 226;
-entity("atilde") -> 227;
-entity("auml") -> 228;
-entity("aring") -> 229;
-entity("aelig") -> 230;
-entity("ccedil") -> 231;
-entity("egrave") -> 232;
-entity("eacute") -> 233;
-entity("ecirc") -> 234;
-entity("euml") -> 235;
-entity("igrave") -> 236;
-entity("iacute") -> 237;
-entity("icirc") -> 238;
-entity("iuml") -> 239;
-entity("eth") -> 240;
-entity("ntilde") -> 241;
-entity("ograve") -> 242;
-entity("oacute") -> 243;
-entity("ocirc") -> 244;
-entity("otilde") -> 245;
-entity("ouml") -> 246;
-entity("divide") -> 247;
-entity("oslash") -> 248;
-entity("ugrave") -> 249;
-entity("uacute") -> 250;
-entity("ucirc") -> 251;
-entity("uuml") -> 252;
-entity("yacute") -> 253;
-entity("thorn") -> 254;
-entity("yuml") -> 255;
-entity("fnof") -> 402;
-entity("Alpha") -> 913;
-entity("Beta") -> 914;
-entity("Gamma") -> 915;
-entity("Delta") -> 916;
-entity("Epsilon") -> 917;
-entity("Zeta") -> 918;
-entity("Eta") -> 919;
-entity("Theta") -> 920;
-entity("Iota") -> 921;
-entity("Kappa") -> 922;
-entity("Lambda") -> 923;
-entity("Mu") -> 924;
-entity("Nu") -> 925;
-entity("Xi") -> 926;
-entity("Omicron") -> 927;
-entity("Pi") -> 928;
-entity("Rho") -> 929;
-entity("Sigma") -> 931;
-entity("Tau") -> 932;
-entity("Upsilon") -> 933;
-entity("Phi") -> 934;
-entity("Chi") -> 935;
-entity("Psi") -> 936;
-entity("Omega") -> 937;
-entity("alpha") -> 945;
-entity("beta") -> 946;
-entity("gamma") -> 947;
-entity("delta") -> 948;
-entity("epsilon") -> 949;
-entity("zeta") -> 950;
-entity("eta") -> 951;
-entity("theta") -> 952;
-entity("iota") -> 953;
-entity("kappa") -> 954;
-entity("lambda") -> 955;
-entity("mu") -> 956;
-entity("nu") -> 957;
-entity("xi") -> 958;
-entity("omicron") -> 959;
-entity("pi") -> 960;
-entity("rho") -> 961;
-entity("sigmaf") -> 962;
-entity("sigma") -> 963;
-entity("tau") -> 964;
-entity("upsilon") -> 965;
-entity("phi") -> 966;
-entity("chi") -> 967;
-entity("psi") -> 968;
-entity("omega") -> 969;
-entity("thetasym") -> 977;
-entity("upsih") -> 978;
-entity("piv") -> 982;
-entity("bull") -> 8226;
-entity("hellip") -> 8230;
-entity("prime") -> 8242;
-entity("Prime") -> 8243;
-entity("oline") -> 8254;
-entity("frasl") -> 8260;
-entity("weierp") -> 8472;
-entity("image") -> 8465;
-entity("real") -> 8476;
-entity("trade") -> 8482;
-entity("alefsym") -> 8501;
-entity("larr") -> 8592;
-entity("uarr") -> 8593;
-entity("rarr") -> 8594;
-entity("darr") -> 8595;
-entity("harr") -> 8596;
-entity("crarr") -> 8629;
-entity("lArr") -> 8656;
-entity("uArr") -> 8657;
-entity("rArr") -> 8658;
-entity("dArr") -> 8659;
-entity("hArr") -> 8660;
-entity("forall") -> 8704;
-entity("part") -> 8706;
-entity("exist") -> 8707;
-entity("empty") -> 8709;
-entity("nabla") -> 8711;
-entity("isin") -> 8712;
-entity("notin") -> 8713;
-entity("ni") -> 8715;
-entity("prod") -> 8719;
-entity("sum") -> 8721;
-entity("minus") -> 8722;
-entity("lowast") -> 8727;
-entity("radic") -> 8730;
-entity("prop") -> 8733;
-entity("infin") -> 8734;
-entity("ang") -> 8736;
-entity("and") -> 8743;
-entity("or") -> 8744;
-entity("cap") -> 8745;
-entity("cup") -> 8746;
-entity("int") -> 8747;
-entity("there4") -> 8756;
-entity("sim") -> 8764;
-entity("cong") -> 8773;
-entity("asymp") -> 8776;
-entity("ne") -> 8800;
-entity("equiv") -> 8801;
-entity("le") -> 8804;
-entity("ge") -> 8805;
-entity("sub") -> 8834;
-entity("sup") -> 8835;
-entity("nsub") -> 8836;
-entity("sube") -> 8838;
-entity("supe") -> 8839;
-entity("oplus") -> 8853;
-entity("otimes") -> 8855;
-entity("perp") -> 8869;
-entity("sdot") -> 8901;
-entity("lceil") -> 8968;
-entity("rceil") -> 8969;
-entity("lfloor") -> 8970;
-entity("rfloor") -> 8971;
-entity("lang") -> 9001;
-entity("rang") -> 9002;
-entity("loz") -> 9674;
-entity("spades") -> 9824;
-entity("clubs") -> 9827;
-entity("hearts") -> 9829;
-entity("diams") -> 9830;
-entity("quot") -> 34;
-entity("amp") -> 38;
-entity("lt") -> 60;
-entity("gt") -> 62;
-entity("OElig") -> 338;
-entity("oelig") -> 339;
-entity("Scaron") -> 352;
-entity("scaron") -> 353;
-entity("Yuml") -> 376;
-entity("circ") -> 710;
-entity("tilde") -> 732;
-entity("ensp") -> 8194;
-entity("emsp") -> 8195;
-entity("thinsp") -> 8201;
-entity("zwnj") -> 8204;
-entity("zwj") -> 8205;
-entity("lrm") -> 8206;
-entity("rlm") -> 8207;
-entity("ndash") -> 8211;
-entity("mdash") -> 8212;
-entity("lsquo") -> 8216;
-entity("rsquo") -> 8217;
-entity("sbquo") -> 8218;
-entity("ldquo") -> 8220;
-entity("rdquo") -> 8221;
-entity("bdquo") -> 8222;
-entity("dagger") -> 8224;
-entity("Dagger") -> 8225;
-entity("permil") -> 8240;
-entity("lsaquo") -> 8249;
-entity("rsaquo") -> 8250;
-entity("euro") -> 8364;
-entity(_) -> undefined.
-
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-exhaustive_entity_test() ->
-    T = mochiweb_cover:clause_lookup_table(?MODULE, entity),
-    [?assertEqual(V, entity(K)) || {K, V} <- T].
-
-charref_test() ->
-    1234 = charref("#1234"),
-    255 = charref("#xfF"),
-    255 = charref(<<"#XFf">>),
-    38 = charref("amp"),
-    38 = charref(<<"amp">>),
-    undefined = charref("not_an_entity"),
-    undefined = charref("#not_an_entity"),
-    undefined = charref("#xnot_an_entity"),
-    ok.
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_cookies.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_cookies.erl b/src/mochiweb/src/mochiweb_cookies.erl
deleted file mode 100644
index ee91d0c..0000000
--- a/src/mochiweb/src/mochiweb_cookies.erl
+++ /dev/null
@@ -1,309 +0,0 @@
-%% @author Emad El-Haraty <em...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc HTTP Cookie parsing and generating (RFC 2109, RFC 2965).
-
--module(mochiweb_cookies).
--export([parse_cookie/1, cookie/3, cookie/2]).
-
--define(QUOTE, $\").
-
--define(IS_WHITESPACE(C),
-        (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
-
-%% RFC 2616 separators (called tspecials in RFC 2068)
--define(IS_SEPARATOR(C),
-        (C < 32 orelse
-         C =:= $\s orelse C =:= $\t orelse
-         C =:= $( orelse C =:= $) orelse C =:= $< orelse C =:= $> orelse
-         C =:= $@ orelse C =:= $, orelse C =:= $; orelse C =:= $: orelse
-         C =:= $\\ orelse C =:= $\" orelse C =:= $/ orelse
-         C =:= $[ orelse C =:= $] orelse C =:= $? orelse C =:= $= orelse
-         C =:= ${ orelse C =:= $})).
-
-%% @type proplist() = [{Key::string(), Value::string()}].
-%% @type header() = {Name::string(), Value::string()}.
-
-%% @spec cookie(Key::string(), Value::string()) -> header()
-%% @doc Short-hand for <code>cookie(Key, Value, [])</code>.
-cookie(Key, Value) ->
-    cookie(Key, Value, []).
-
-%% @spec cookie(Key::string(), Value::string(), Options::[Option]) -> header()
-%% where Option = {max_age, integer()} | {local_time, {date(), time()}}
-%%                | {domain, string()} | {path, string()}
-%%                | {secure, true | false} | {http_only, true | false}
-%%
-%% @doc Generate a Set-Cookie header field tuple.
-cookie(Key, Value, Options) ->
-    Cookie = [any_to_list(Key), "=", quote(Value), "; Version=1"],
-    %% Set-Cookie:
-    %%    Comment, Domain, Max-Age, Path, Secure, Version
-    %% Set-Cookie2:
-    %%    Comment, CommentURL, Discard, Domain, Max-Age, Path, Port, Secure,
-    %%    Version
-    ExpiresPart =
-        case proplists:get_value(max_age, Options) of
-            undefined ->
-                "";
-            RawAge ->
-                When = case proplists:get_value(local_time, Options) of
-                           undefined ->
-                               calendar:universal_time();
-                           LocalTime ->
-                               erlang:localtime_to_universaltime(LocalTime)
-                       end,
-                Age = case RawAge < 0 of
-                          true ->
-                              0;
-                          false ->
-                              RawAge
-                      end,
-                ["; Expires=", age_to_cookie_date(Age, When),
-                 "; Max-Age=", quote(Age)]
-        end,
-    SecurePart =
-        case proplists:get_value(secure, Options) of
-            true ->
-                "; Secure";
-            _ ->
-                ""
-        end,
-    DomainPart =
-        case proplists:get_value(domain, Options) of
-            undefined ->
-                "";
-            Domain ->
-                ["; Domain=", quote(Domain)]
-        end,
-    PathPart =
-        case proplists:get_value(path, Options) of
-            undefined ->
-                "";
-            Path ->
-                ["; Path=", quote(Path)]
-        end,
-    HttpOnlyPart =
-        case proplists:get_value(http_only, Options) of
-            true ->
-                "; HttpOnly";
-            _ ->
-                ""
-        end,
-    CookieParts = [Cookie, ExpiresPart, SecurePart, DomainPart, PathPart, HttpOnlyPart],
-    {"Set-Cookie", lists:flatten(CookieParts)}.
-
-
-%% Every major browser incorrectly handles quoted strings in a
-%% different and (worse) incompatible manner.  Instead of wasting time
-%% writing redundant code for each browser, we restrict cookies to
-%% only contain characters that browsers handle compatibly.
-%%
-%% By replacing the definition of quote with this, we generate
-%% RFC-compliant cookies:
-%%
-%%     quote(V) ->
-%%         Fun = fun(?QUOTE, Acc) -> [$\\, ?QUOTE | Acc];
-%%                  (Ch, Acc) -> [Ch | Acc]
-%%               end,
-%%         [?QUOTE | lists:foldr(Fun, [?QUOTE], V)].
-
-%% Convert to a string and raise an error if quoting is required.
-quote(V0) ->
-    V = any_to_list(V0),
-    lists:all(fun(Ch) -> Ch =:= $/ orelse not ?IS_SEPARATOR(Ch) end, V)
-        orelse erlang:error({cookie_quoting_required, V}),
-    V.
-
-add_seconds(Secs, UniversalTime) ->
-    Greg = calendar:datetime_to_gregorian_seconds(UniversalTime),
-    calendar:gregorian_seconds_to_datetime(Greg + Secs).
-
-age_to_cookie_date(Age, UniversalTime) ->
-    couch_util:rfc1123_date(add_seconds(Age, UniversalTime)).
-
-%% @spec parse_cookie(string()) -> [{K::string(), V::string()}]
-%% @doc Parse the contents of a Cookie header field, ignoring cookie
-%% attributes, and return a simple property list.
-parse_cookie("") ->
-    [];
-parse_cookie(Cookie) ->
-    parse_cookie(Cookie, []).
-
-%% Internal API
-
-parse_cookie([], Acc) ->
-    lists:reverse(Acc);
-parse_cookie(String, Acc) ->
-    {{Token, Value}, Rest} = read_pair(String),
-    Acc1 = case Token of
-               "" ->
-                   Acc;
-               "$" ++ _ ->
-                   Acc;
-               _ ->
-                   [{Token, Value} | Acc]
-           end,
-    parse_cookie(Rest, Acc1).
-
-read_pair(String) ->
-    {Token, Rest} = read_token(skip_whitespace(String)),
-    {Value, Rest1} = read_value(skip_whitespace(Rest)),
-    {{Token, Value}, skip_past_separator(Rest1)}.
-
-read_value([$= | Value]) ->
-    Value1 = skip_whitespace(Value),
-    case Value1 of
-        [?QUOTE | _] ->
-            read_quoted(Value1);
-        _ ->
-            read_token(Value1)
-    end;
-read_value(String) ->
-    {"", String}.
-
-read_quoted([?QUOTE | String]) ->
-    read_quoted(String, []).
-
-read_quoted([], Acc) ->
-    {lists:reverse(Acc), []};
-read_quoted([?QUOTE | Rest], Acc) ->
-    {lists:reverse(Acc), Rest};
-read_quoted([$\\, Any | Rest], Acc) ->
-    read_quoted(Rest, [Any | Acc]);
-read_quoted([C | Rest], Acc) ->
-    read_quoted(Rest, [C | Acc]).
-
-skip_whitespace(String) ->
-    F = fun (C) -> ?IS_WHITESPACE(C) end,
-    lists:dropwhile(F, String).
-
-read_token(String) ->
-    F = fun (C) -> not ?IS_SEPARATOR(C) end,
-    lists:splitwith(F, String).
-
-skip_past_separator([]) ->
-    [];
-skip_past_separator([$; | Rest]) ->
-    Rest;
-skip_past_separator([$, | Rest]) ->
-    Rest;
-skip_past_separator([_ | Rest]) ->
-    skip_past_separator(Rest).
-
-any_to_list(V) when is_list(V) ->
-    V;
-any_to_list(V) when is_atom(V) ->
-    atom_to_list(V);
-any_to_list(V) when is_binary(V) ->
-    binary_to_list(V);
-any_to_list(V) when is_integer(V) ->
-    integer_to_list(V).
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-quote_test() ->
-    %% ?assertError eunit macro is not compatible with coverage module
-    try quote(":wq")
-    catch error:{cookie_quoting_required, ":wq"} -> ok
-    end,
-    ?assertEqual(
-       "foo",
-       quote(foo)),
-    ok.
-
-parse_cookie_test() ->
-    %% RFC example
-    C1 = "$Version=\"1\"; Customer=\"WILE_E_COYOTE\"; $Path=\"/acme\";
-    Part_Number=\"Rocket_Launcher_0001\"; $Path=\"/acme\";
-    Shipping=\"FedEx\"; $Path=\"/acme\"",
-    ?assertEqual(
-       [{"Customer","WILE_E_COYOTE"},
-        {"Part_Number","Rocket_Launcher_0001"},
-        {"Shipping","FedEx"}],
-       parse_cookie(C1)),
-    %% Potential edge cases
-    ?assertEqual(
-       [{"foo", "x"}],
-       parse_cookie("foo=\"\\x\"")),
-    ?assertEqual(
-       [],
-       parse_cookie("=")),
-    ?assertEqual(
-       [{"foo", ""}, {"bar", ""}],
-       parse_cookie("  foo ; bar  ")),
-    ?assertEqual(
-       [{"foo", ""}, {"bar", ""}],
-       parse_cookie("foo=;bar=")),
-    ?assertEqual(
-       [{"foo", "\";"}, {"bar", ""}],
-       parse_cookie("foo = \"\\\";\";bar ")),
-    ?assertEqual(
-       [{"foo", "\";bar"}],
-       parse_cookie("foo=\"\\\";bar")),
-    ?assertEqual(
-       [],
-       parse_cookie([])),
-    ?assertEqual(
-       [{"foo", "bar"}, {"baz", "wibble"}],
-       parse_cookie("foo=bar , baz=wibble ")),
-    ok.
-
-domain_test() ->
-    ?assertEqual(
-       {"Set-Cookie",
-        "Customer=WILE_E_COYOTE; "
-        "Version=1; "
-        "Domain=acme.com; "
-        "HttpOnly"},
-       cookie("Customer", "WILE_E_COYOTE",
-              [{http_only, true}, {domain, "acme.com"}])),
-    ok.
-
-local_time_test() ->
-    {"Set-Cookie", S} = cookie("Customer", "WILE_E_COYOTE",
-                               [{max_age, 111}, {secure, true}]),
-    ?assertMatch(
-       ["Customer=WILE_E_COYOTE",
-        " Version=1",
-        " Expires=" ++ _,
-        " Max-Age=111",
-        " Secure"],
-       string:tokens(S, ";")),
-    ok.
-
-cookie_test() ->
-    C1 = {"Set-Cookie",
-          "Customer=WILE_E_COYOTE; "
-          "Version=1; "
-          "Path=/acme"},
-    C1 = cookie("Customer", "WILE_E_COYOTE", [{path, "/acme"}]),
-    C1 = cookie("Customer", "WILE_E_COYOTE",
-                [{path, "/acme"}, {badoption, "negatory"}]),
-    C1 = cookie('Customer', 'WILE_E_COYOTE', [{path, '/acme'}]),
-    C1 = cookie(<<"Customer">>, <<"WILE_E_COYOTE">>, [{path, <<"/acme">>}]),
-
-    {"Set-Cookie","=NoKey; Version=1"} = cookie("", "NoKey", []),
-    {"Set-Cookie","=NoKey; Version=1"} = cookie("", "NoKey"),
-    LocalTime = calendar:universal_time_to_local_time({{2007, 5, 15}, {13, 45, 33}}),
-    C2 = {"Set-Cookie",
-          "Customer=WILE_E_COYOTE; "
-          "Version=1; "
-          "Expires=Tue, 15 May 2007 13:45:33 GMT; "
-          "Max-Age=0"},
-    C2 = cookie("Customer", "WILE_E_COYOTE",
-                [{max_age, -111}, {local_time, LocalTime}]),
-    C3 = {"Set-Cookie",
-          "Customer=WILE_E_COYOTE; "
-          "Version=1; "
-          "Expires=Wed, 16 May 2007 13:45:50 GMT; "
-          "Max-Age=86417"},
-    C3 = cookie("Customer", "WILE_E_COYOTE",
-                [{max_age, 86417}, {local_time, LocalTime}]),
-    ok.
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_cover.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_cover.erl b/src/mochiweb/src/mochiweb_cover.erl
deleted file mode 100644
index 6a14ef5..0000000
--- a/src/mochiweb/src/mochiweb_cover.erl
+++ /dev/null
@@ -1,75 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2010 Mochi Media, Inc.
-
-%% @doc Workarounds for various cover deficiencies.
--module(mochiweb_cover).
--export([get_beam/1, get_abstract_code/1,
-         get_clauses/2, clause_lookup_table/1]).
--export([clause_lookup_table/2]).
-
-%% Internal
-
-get_beam(Module) ->
-    {Module, Beam, _Path} = code:get_object_code(Module),
-    Beam.
-
-get_abstract_code(Beam) ->
-    {ok, {_Module,
-          [{abstract_code,
-            {raw_abstract_v1, L}}]}} = beam_lib:chunks(Beam, [abstract_code]),
-    L.
-
-get_clauses(Function, Code) ->
-    [L] = [Clauses || {function, _, FName, _, Clauses}
-                          <- Code, FName =:= Function],
-    L.
-
-clause_lookup_table(Module, Function) ->
-    clause_lookup_table(
-      get_clauses(Function,
-                  get_abstract_code(get_beam(Module)))).
-
-clause_lookup_table(Clauses) ->
-    lists:foldr(fun clause_fold/2, [], Clauses).
-
-clause_fold({clause, _,
-             [InTerm],
-             _Guards=[],
-             [OutTerm]},
-            Acc) ->
-    try [{erl_parse:normalise(InTerm), erl_parse:normalise(OutTerm)} | Acc]
-    catch error:_ -> Acc
-    end;
-clause_fold(_, Acc) ->
-    Acc.
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-foo_table(a) -> b;
-foo_table("a") -> <<"b">>;
-foo_table(123) -> {4, 3, 2};
-foo_table([list]) -> [];
-foo_table([list1, list2]) -> [list1, list2, list3];
-foo_table(ignored) -> some, code, ignored;
-foo_table(Var) -> Var.
-
-foo_table_test() ->
-    T = clause_lookup_table(?MODULE, foo_table),
-    [?assertEqual(V, foo_table(K)) || {K, V} <- T].
-
-clause_lookup_table_test() ->
-    ?assertEqual(b, foo_table(a)),
-    ?assertEqual(ignored, foo_table(ignored)),
-    ?assertEqual('Var', foo_table('Var')),
-    ?assertEqual(
-       [{a, b},
-        {"a", <<"b">>},
-        {123, {4, 3, 2}},
-        {[list], []},
-        {[list1, list2], [list1, list2, list3]}],
-       clause_lookup_table(?MODULE, foo_table)).
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_echo.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_echo.erl b/src/mochiweb/src/mochiweb_echo.erl
deleted file mode 100644
index 6f7872b..0000000
--- a/src/mochiweb/src/mochiweb_echo.erl
+++ /dev/null
@@ -1,38 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Simple and stupid echo server to demo mochiweb_socket_server.
-
--module(mochiweb_echo).
--author('bob@mochimedia.com').
--export([start/0, stop/0, loop/1]).
-
-stop() ->
-    mochiweb_socket_server:stop(?MODULE).
-
-start() ->
-    mochiweb_socket_server:start([{name, ?MODULE},
-                                  {port, 6789},
-                                  {ip, "127.0.0.1"},
-                                  {max, 1},
-                                  {loop, {?MODULE, loop}}]).
-
-loop(Socket) ->
-    case mochiweb_socket:recv(Socket, 0, 30000) of
-        {ok, Data} ->
-            case mochiweb_socket:send(Socket, Data) of
-                ok ->
-                    loop(Socket);
-                _ ->
-                    exit(normal)
-            end;
-        _Other ->
-            exit(normal)
-    end.
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_headers.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_headers.erl b/src/mochiweb/src/mochiweb_headers.erl
deleted file mode 100644
index 4fce983..0000000
--- a/src/mochiweb/src/mochiweb_headers.erl
+++ /dev/null
@@ -1,299 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Case preserving (but case insensitive) HTTP Header dictionary.
-
--module(mochiweb_headers).
--author('bob@mochimedia.com').
--export([empty/0, from_list/1, insert/3, enter/3, get_value/2, lookup/2]).
--export([delete_any/2, get_primary_value/2]).
--export([default/3, enter_from_list/2, default_from_list/2]).
--export([to_list/1, make/1]).
--export([from_binary/1]).
-
-%% @type headers().
-%% @type key() = atom() | binary() | string().
-%% @type value() = atom() | binary() | string() | integer().
-
-%% @spec empty() -> headers()
-%% @doc Create an empty headers structure.
-empty() ->
-    gb_trees:empty().
-
-%% @spec make(headers() | [{key(), value()}]) -> headers()
-%% @doc Construct a headers() from the given list.
-make(L) when is_list(L) ->
-    from_list(L);
-%% assume a tuple is already mochiweb_headers.
-make(T) when is_tuple(T) ->
-    T.
-
-%% @spec from_binary(iolist()) -> headers()
-%% @doc Transforms a raw HTTP header into a mochiweb headers structure.
-%%
-%%      The given raw HTTP header can be one of the following:
-%%
-%%      1) A string or a binary representing a full HTTP header ending with
-%%         double CRLF.
-%%         Examples:
-%%         ```
-%%         "Content-Length: 47\r\nContent-Type: text/plain\r\n\r\n"
-%%         <<"Content-Length: 47\r\nContent-Type: text/plain\r\n\r\n">>'''
-%%
-%%      2) A list of binaries or strings where each element represents a raw
-%%         HTTP header line ending with a single CRLF.
-%%         Examples:
-%%         ```
-%%         [<<"Content-Length: 47\r\n">>, <<"Content-Type: text/plain\r\n">>]
-%%         ["Content-Length: 47\r\n", "Content-Type: text/plain\r\n"]
-%%         ["Content-Length: 47\r\n", <<"Content-Type: text/plain\r\n">>]'''
-%%
-from_binary(RawHttpHeader) when is_binary(RawHttpHeader) ->
-    from_binary(RawHttpHeader, []);
-from_binary(RawHttpHeaderList) ->
-    from_binary(list_to_binary([RawHttpHeaderList, "\r\n"])).
-
-from_binary(RawHttpHeader, Acc) ->
-    case erlang:decode_packet(httph, RawHttpHeader, []) of
-        {ok, {http_header, _, H, _, V}, Rest} ->
-            from_binary(Rest, [{H, V} | Acc]);
-        _ ->
-            make(Acc)
-    end.
-
-%% @spec from_list([{key(), value()}]) -> headers()
-%% @doc Construct a headers() from the given list.
-from_list(List) ->
-    lists:foldl(fun ({K, V}, T) -> insert(K, V, T) end, empty(), List).
-
-%% @spec enter_from_list([{key(), value()}], headers()) -> headers()
-%% @doc Insert pairs into the headers, replace any values for existing keys.
-enter_from_list(List, T) ->
-    lists:foldl(fun ({K, V}, T1) -> enter(K, V, T1) end, T, List).
-
-%% @spec default_from_list([{key(), value()}], headers()) -> headers()
-%% @doc Insert pairs into the headers for keys that do not already exist.
-default_from_list(List, T) ->
-    lists:foldl(fun ({K, V}, T1) -> default(K, V, T1) end, T, List).
-
-%% @spec to_list(headers()) -> [{key(), string()}]
-%% @doc Return the contents of the headers. The keys will be the exact key
-%%      that was first inserted (e.g. may be an atom or binary, case is
-%%      preserved).
-to_list(T) ->
-    F = fun ({K, {array, L}}, Acc) ->
-                L1 = lists:reverse(L),
-                lists:foldl(fun (V, Acc1) -> [{K, V} | Acc1] end, Acc, L1);
-            (Pair, Acc) ->
-                [Pair | Acc]
-        end,
-    lists:reverse(lists:foldl(F, [], gb_trees:values(T))).
-
-%% @spec get_value(key(), headers()) -> string() | undefined
-%% @doc Return the value of the given header using a case insensitive search.
-%%      undefined will be returned for keys that are not present.
-get_value(K, T) ->
-    case lookup(K, T) of
-        {value, {_, V}} ->
-            expand(V);
-        none ->
-            undefined
-    end.
-
-%% @spec get_primary_value(key(), headers()) -> string() | undefined
-%% @doc Return the value of the given header up to the first semicolon using
-%%      a case insensitive search. undefined will be returned for keys
-%%      that are not present.
-get_primary_value(K, T) ->
-    case get_value(K, T) of
-        undefined ->
-            undefined;
-        V ->
-            lists:takewhile(fun (C) -> C =/= $; end, V)
-    end.
-
-%% @spec lookup(key(), headers()) -> {value, {key(), string()}} | none
-%% @doc Return the case preserved key and value for the given header using
-%%      a case insensitive search. none will be returned for keys that are
-%%      not present.
-lookup(K, T) ->
-    case gb_trees:lookup(normalize(K), T) of
-        {value, {K0, V}} ->
-            {value, {K0, expand(V)}};
-        none ->
-            none
-    end.
-
-%% @spec default(key(), value(), headers()) -> headers()
-%% @doc Insert the pair into the headers if it does not already exist.
-default(K, V, T) ->
-    K1 = normalize(K),
-    V1 = any_to_list(V),
-    try gb_trees:insert(K1, {K, V1}, T)
-    catch
-        error:{key_exists, _} ->
-            T
-    end.
-
-%% @spec enter(key(), value(), headers()) -> headers()
-%% @doc Insert the pair into the headers, replacing any pre-existing key.
-enter(K, V, T) ->
-    K1 = normalize(K),
-    V1 = any_to_list(V),
-    gb_trees:enter(K1, {K, V1}, T).
-
-%% @spec insert(key(), value(), headers()) -> headers()
-%% @doc Insert the pair into the headers, merging with any pre-existing key.
-%%      A merge is done with Value = V0 ++ ", " ++ V1.
-insert(K, V, T) ->
-    K1 = normalize(K),
-    V1 = any_to_list(V),
-    try gb_trees:insert(K1, {K, V1}, T)
-    catch
-        error:{key_exists, _} ->
-            {K0, V0} = gb_trees:get(K1, T),
-            V2 = merge(K1, V1, V0),
-            gb_trees:update(K1, {K0, V2}, T)
-    end.
-
-%% @spec delete_any(key(), headers()) -> headers()
-%% @doc Delete the header corresponding to key if it is present.
-delete_any(K, T) ->
-    K1 = normalize(K),
-    gb_trees:delete_any(K1, T).
-
-%% Internal API
-
-expand({array, L}) ->
-    mochiweb_util:join(lists:reverse(L), ", ");
-expand(V) ->
-    V.
-
-merge("set-cookie", V1, {array, L}) ->
-    {array, [V1 | L]};
-merge("set-cookie", V1, V0) ->
-    {array, [V1, V0]};
-merge(_, V1, V0) ->
-    V0 ++ ", " ++ V1.
-
-normalize(K) when is_list(K) ->
-    string:to_lower(K);
-normalize(K) when is_atom(K) ->
-    normalize(atom_to_list(K));
-normalize(K) when is_binary(K) ->
-    normalize(binary_to_list(K)).
-
-any_to_list(V) when is_list(V) ->
-    V;
-any_to_list(V) when is_atom(V) ->
-    atom_to_list(V);
-any_to_list(V) when is_binary(V) ->
-    binary_to_list(V);
-any_to_list(V) when is_integer(V) ->
-    integer_to_list(V).
-
-%%
-%% Tests.
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-make_test() ->
-    Identity = make([{hdr, foo}]),
-    ?assertEqual(
-       Identity,
-       make(Identity)).
-
-enter_from_list_test() ->
-    H = make([{hdr, foo}]),
-    ?assertEqual(
-       [{baz, "wibble"}, {hdr, "foo"}],
-       to_list(enter_from_list([{baz, wibble}], H))),
-    ?assertEqual(
-       [{hdr, "bar"}],
-       to_list(enter_from_list([{hdr, bar}], H))),
-    ok.
-
-default_from_list_test() ->
-    H = make([{hdr, foo}]),
-    ?assertEqual(
-       [{baz, "wibble"}, {hdr, "foo"}],
-       to_list(default_from_list([{baz, wibble}], H))),
-    ?assertEqual(
-       [{hdr, "foo"}],
-       to_list(default_from_list([{hdr, bar}], H))),
-    ok.
-
-get_primary_value_test() ->
-    H = make([{hdr, foo}, {baz, <<"wibble;taco">>}]),
-    ?assertEqual(
-       "foo",
-       get_primary_value(hdr, H)),
-    ?assertEqual(
-       undefined,
-       get_primary_value(bar, H)),
-    ?assertEqual(
-       "wibble",
-       get_primary_value(<<"baz">>, H)),
-    ok.
-
-set_cookie_test() ->
-    H = make([{"set-cookie", foo}, {"set-cookie", bar}, {"set-cookie", baz}]),
-    ?assertEqual(
-       [{"set-cookie", "foo"}, {"set-cookie", "bar"}, {"set-cookie", "baz"}],
-       to_list(H)),
-    ok.
-
-headers_test() ->
-    H = ?MODULE:make([{hdr, foo}, {"Hdr", "bar"}, {'Hdr', 2}]),
-    [{hdr, "foo, bar, 2"}] = ?MODULE:to_list(H),
-    H1 = ?MODULE:insert(taco, grande, H),
-    [{hdr, "foo, bar, 2"}, {taco, "grande"}] = ?MODULE:to_list(H1),
-    H2 = ?MODULE:make([{"Set-Cookie", "foo"}]),
-    [{"Set-Cookie", "foo"}] = ?MODULE:to_list(H2),
-    H3 = ?MODULE:insert("Set-Cookie", "bar", H2),
-    [{"Set-Cookie", "foo"}, {"Set-Cookie", "bar"}] = ?MODULE:to_list(H3),
-    "foo, bar" = ?MODULE:get_value("set-cookie", H3),
-    {value, {"Set-Cookie", "foo, bar"}} = ?MODULE:lookup("set-cookie", H3),
-    undefined = ?MODULE:get_value("shibby", H3),
-    none = ?MODULE:lookup("shibby", H3),
-    H4 = ?MODULE:insert("content-type",
-                        "application/x-www-form-urlencoded; charset=utf8",
-                        H3),
-    "application/x-www-form-urlencoded" = ?MODULE:get_primary_value(
-                                             "content-type", H4),
-    H4 = ?MODULE:delete_any("nonexistent-header", H4),
-    H3 = ?MODULE:delete_any("content-type", H4),
-    HB = <<"Content-Length: 47\r\nContent-Type: text/plain\r\n\r\n">>,
-    H_HB = ?MODULE:from_binary(HB),
-    H_HB = ?MODULE:from_binary(binary_to_list(HB)),
-    "47" = ?MODULE:get_value("Content-Length", H_HB),
-    "text/plain" = ?MODULE:get_value("Content-Type", H_HB),
-    L_H_HB = ?MODULE:to_list(H_HB),
-    2 = length(L_H_HB),
-    true = lists:member({'Content-Length', "47"}, L_H_HB),
-    true = lists:member({'Content-Type', "text/plain"}, L_H_HB),
-    HL = [ <<"Content-Length: 47\r\n">>, <<"Content-Type: text/plain\r\n">> ],
-    HL2 = [ "Content-Length: 47\r\n", <<"Content-Type: text/plain\r\n">> ],
-    HL3 = [ <<"Content-Length: 47\r\n">>, "Content-Type: text/plain\r\n" ],
-    H_HL = ?MODULE:from_binary(HL),
-    H_HL = ?MODULE:from_binary(HL2),
-    H_HL = ?MODULE:from_binary(HL3),
-    "47" = ?MODULE:get_value("Content-Length", H_HL),
-    "text/plain" = ?MODULE:get_value("Content-Type", H_HL),
-    L_H_HL = ?MODULE:to_list(H_HL),
-    2 = length(L_H_HL),
-    true = lists:member({'Content-Length', "47"}, L_H_HL),
-    true = lists:member({'Content-Type', "text/plain"}, L_H_HL),
-    [] = ?MODULE:to_list(?MODULE:from_binary(<<>>)),
-    [] = ?MODULE:to_list(?MODULE:from_binary(<<"">>)),
-    [] = ?MODULE:to_list(?MODULE:from_binary(<<"\r\n">>)),
-    [] = ?MODULE:to_list(?MODULE:from_binary(<<"\r\n\r\n">>)),
-    [] = ?MODULE:to_list(?MODULE:from_binary("")),
-    [] = ?MODULE:to_list(?MODULE:from_binary([<<>>])),
-    [] = ?MODULE:to_list(?MODULE:from_binary([<<"">>])),
-    [] = ?MODULE:to_list(?MODULE:from_binary([<<"\r\n">>])),
-    [] = ?MODULE:to_list(?MODULE:from_binary([<<"\r\n\r\n">>])),
-    ok.
-
--endif.


[20/49] Remove src/couch_replicator

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator_httpc.erl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator_httpc.erl b/src/couch_replicator/src/couch_replicator_httpc.erl
deleted file mode 100644
index 10409c4..0000000
--- a/src/couch_replicator/src/couch_replicator_httpc.erl
+++ /dev/null
@@ -1,297 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_httpc).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("ibrowse/include/ibrowse.hrl").
--include("couch_replicator_api_wrap.hrl").
-
--export([setup/1]).
--export([send_req/3]).
--export([full_url/2]).
-
--import(couch_util, [
-    get_value/2,
-    get_value/3
-]).
-
--define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
--define(MAX_WAIT, 5 * 60 * 1000).
-
-
-setup(#httpdb{httpc_pool = nil, url = Url, http_connections = MaxConns} = Db) ->
-    {ok, Pid} = couch_replicator_httpc_pool:start_link(Url, [{max_connections, MaxConns}]),
-    {ok, Db#httpdb{httpc_pool = Pid}}.
-
-
-send_req(HttpDb, Params1, Callback) ->
-    Params2 = ?replace(Params1, qs,
-        [{K, ?b2l(iolist_to_binary(V))} || {K, V} <- get_value(qs, Params1, [])]),
-    Params = ?replace(Params2, ibrowse_options,
-        lists:keysort(1, get_value(ibrowse_options, Params2, []))),
-    {Worker, Response} = send_ibrowse_req(HttpDb, Params),
-    process_response(Response, Worker, HttpDb, Params, Callback).
-
-
-send_ibrowse_req(#httpdb{headers = BaseHeaders} = HttpDb, Params) ->
-    Method = get_value(method, Params, get),
-    UserHeaders = lists:keysort(1, get_value(headers, Params, [])),
-    Headers1 = lists:ukeymerge(1, UserHeaders, BaseHeaders),
-    Headers2 = oauth_header(HttpDb, Params) ++ Headers1,
-    Url = full_url(HttpDb, Params),
-    Body = get_value(body, Params, []),
-    case get_value(path, Params) of
-    "_changes" ->
-        {ok, Worker} = ibrowse:spawn_link_worker_process(Url);
-    _ ->
-        {ok, Worker} = couch_replicator_httpc_pool:get_worker(HttpDb#httpdb.httpc_pool)
-    end,
-    IbrowseOptions = [
-        {response_format, binary}, {inactivity_timeout, HttpDb#httpdb.timeout} |
-        lists:ukeymerge(1, get_value(ibrowse_options, Params, []),
-            HttpDb#httpdb.ibrowse_options)
-    ],
-    Response = ibrowse:send_req_direct(
-        Worker, Url, Headers2, Method, Body, IbrowseOptions, infinity),
-    {Worker, Response}.
-
-
-process_response({error, sel_conn_closed}, _Worker, HttpDb, Params, Callback) ->
-    send_req(HttpDb, Params, Callback);
-
-process_response({error, {'EXIT', {normal, _}}}, _Worker, HttpDb, Params, Cb) ->
-    % ibrowse worker terminated because remote peer closed the socket
-    % -> not an error
-    send_req(HttpDb, Params, Cb);
-
-process_response({ibrowse_req_id, ReqId}, Worker, HttpDb, Params, Callback) ->
-    process_stream_response(ReqId, Worker, HttpDb, Params, Callback);
-
-process_response({ok, Code, Headers, Body}, Worker, HttpDb, Params, Callback) ->
-    release_worker(Worker, HttpDb),
-    case list_to_integer(Code) of
-    Ok when (Ok >= 200 andalso Ok < 300) ; (Ok >= 400 andalso Ok < 500) ->
-        EJson = case Body of
-        <<>> ->
-            null;
-        Json ->
-            ?JSON_DECODE(Json)
-        end,
-        Callback(Ok, Headers, EJson);
-    R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
-        do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
-    Error ->
-        maybe_retry({code, Error}, Worker, HttpDb, Params, Callback)
-    end;
-
-process_response(Error, Worker, HttpDb, Params, Callback) ->
-    maybe_retry(Error, Worker, HttpDb, Params, Callback).
-
-
-process_stream_response(ReqId, Worker, HttpDb, Params, Callback) ->
-    receive
-    {ibrowse_async_headers, ReqId, Code, Headers} ->
-        case list_to_integer(Code) of
-        Ok when (Ok >= 200 andalso Ok < 300) ; (Ok >= 400 andalso Ok < 500) ->
-            StreamDataFun = fun() ->
-                stream_data_self(HttpDb, Params, Worker, ReqId, Callback)
-            end,
-            ibrowse:stream_next(ReqId),
-            try
-                Ret = Callback(Ok, Headers, StreamDataFun),
-                release_worker(Worker, HttpDb),
-                clean_mailbox_req(ReqId),
-                Ret
-            catch throw:{maybe_retry_req, Err} ->
-                clean_mailbox_req(ReqId),
-                maybe_retry(Err, Worker, HttpDb, Params, Callback)
-            end;
-        R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
-            do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
-        Error ->
-            report_error(Worker, HttpDb, Params, {code, Error})
-        end;
-    {ibrowse_async_response, ReqId, {error, _} = Error} ->
-        maybe_retry(Error, Worker, HttpDb, Params, Callback)
-    after HttpDb#httpdb.timeout + 500 ->
-        % Note: ibrowse should always reply with timeouts, but this doesn't
-        % seem to be always true when there's a very high rate of requests
-        % and many open connections.
-        maybe_retry(timeout, Worker, HttpDb, Params, Callback)
-    end.
-
-
-clean_mailbox_req(ReqId) ->
-    receive
-    {ibrowse_async_response, ReqId, _} ->
-        clean_mailbox_req(ReqId);
-    {ibrowse_async_response_end, ReqId} ->
-        clean_mailbox_req(ReqId)
-    after 0 ->
-        ok
-    end.
-
-
-release_worker(Worker, #httpdb{httpc_pool = Pool}) ->
-    ok = couch_replicator_httpc_pool:release_worker(Pool, Worker).
-
-
-maybe_retry(Error, Worker, #httpdb{retries = 0} = HttpDb, Params, _Cb) ->
-    report_error(Worker, HttpDb, Params, {error, Error});
-
-maybe_retry(Error, Worker, #httpdb{retries = Retries, wait = Wait} = HttpDb,
-    Params, Cb) ->
-    release_worker(Worker, HttpDb),
-    Method = string:to_upper(atom_to_list(get_value(method, Params, get))),
-    Url = couch_util:url_strip_password(full_url(HttpDb, Params)),
-    twig:log(notice,"Retrying ~s request to ~s in ~p seconds due to error ~s",
-        [Method, Url, Wait / 1000, error_cause(Error)]),
-    ok = timer:sleep(Wait),
-    Wait2 = erlang:min(Wait * 2, ?MAX_WAIT),
-    send_req(HttpDb#httpdb{retries = Retries - 1, wait = Wait2}, Params, Cb).
-
-
-report_error(Worker, HttpDb, Params, Error) ->
-    Method = string:to_upper(atom_to_list(get_value(method, Params, get))),
-    Url = couch_util:url_strip_password(full_url(HttpDb, Params)),
-    do_report_error(Url, Method, Error),
-    release_worker(Worker, HttpDb),
-    exit({http_request_failed, Method, Url, Error}).
-
-
-do_report_error(Url, Method, {code, Code}) ->
-    twig:log(error,"Replicator, request ~s to ~p failed. The received "
-        "HTTP error code is ~p", [Method, Url, Code]);
-
-do_report_error(FullUrl, Method, Error) ->
-    twig:log(error,"Replicator, request ~s to ~p failed due to error ~s",
-        [Method, FullUrl, error_cause(Error)]).
-
-
-error_cause({error, Cause}) ->
-    lists:flatten(io_lib:format("~p", [Cause]));
-error_cause(Cause) ->
-    lists:flatten(io_lib:format("~p", [Cause])).
-
-
-stream_data_self(#httpdb{timeout = T} = HttpDb, Params, Worker, ReqId, Cb) ->
-    case accumulate_messages(ReqId, [], T + 500) of
-    {Data, ibrowse_async_response} ->
-        ibrowse:stream_next(ReqId),
-        {Data, fun() -> stream_data_self(HttpDb, Params, Worker, ReqId, Cb) end};
-    {Data, ibrowse_async_response_end} ->
-        {Data, fun() -> throw({maybe_retry_req, more_data_expected}) end}
-    end.
-
-accumulate_messages(ReqId, Acc, Timeout) ->
-    receive
-    {ibrowse_async_response, ReqId, {error, Error}} ->
-        throw({maybe_retry_req, Error});
-    {ibrowse_async_response, ReqId, <<>>} ->
-        accumulate_messages(ReqId, Acc, Timeout);
-    {ibrowse_async_response, ReqId, Data} ->
-        accumulate_messages(ReqId, [Data | Acc], 0);
-    {ibrowse_async_response_end, ReqId} ->
-        {iolist_to_binary(lists:reverse(Acc)), ibrowse_async_response_end}
-    after Timeout ->
-        % Note: ibrowse should always reply with timeouts, but this doesn't
-        % seem to be always true when there's a very high rate of requests
-        % and many open connections.
-        if Acc =:= [] ->
-            throw({maybe_retry_req, timeout});
-        true ->
-            {iolist_to_binary(lists:reverse(Acc)), ibrowse_async_response}
-        end
-    end.
-
-
-full_url(#httpdb{url = BaseUrl}, Params) ->
-    Path = get_value(path, Params, []),
-    QueryArgs = get_value(qs, Params, []),
-    BaseUrl ++ Path ++ query_args_to_string(QueryArgs, []).
-
-
-query_args_to_string([], []) ->
-    "";
-query_args_to_string([], Acc) ->
-    "?" ++ string:join(lists:reverse(Acc), "&");
-query_args_to_string([{K, V} | Rest], Acc) ->
-    query_args_to_string(Rest, [K ++ "=" ++ couch_httpd:quote(V) | Acc]).
-
-
-oauth_header(#httpdb{oauth = nil}, _ConnParams) ->
-    [];
-oauth_header(#httpdb{url = BaseUrl, oauth = OAuth}, ConnParams) ->
-    Consumer = {
-        OAuth#oauth.consumer_key,
-        OAuth#oauth.consumer_secret,
-        OAuth#oauth.signature_method
-    },
-    Method = case get_value(method, ConnParams, get) of
-    get -> "GET";
-    post -> "POST";
-    put -> "PUT";
-    head -> "HEAD"
-    end,
-    QSL = get_value(qs, ConnParams, []),
-    OAuthParams = oauth:sign(Method,
-        BaseUrl ++ get_value(path, ConnParams, []),
-        QSL, Consumer, OAuth#oauth.token, OAuth#oauth.token_secret) -- QSL,
-    [{"Authorization",
-        "OAuth " ++ oauth:header_params_encode(OAuthParams)}].
-
-
-do_redirect(Worker, Code, Headers, #httpdb{url = Url} = HttpDb, Params, Cb) ->
-    release_worker(Worker, HttpDb),
-    RedirectUrl = redirect_url(Headers, Url),
-    {HttpDb2, Params2} = after_redirect(RedirectUrl, Code, HttpDb, Params),
-    send_req(HttpDb2, Params2, Cb).
-
-
-redirect_url(RespHeaders, OrigUrl) ->
-    MochiHeaders = mochiweb_headers:make(RespHeaders),
-    RedUrl = mochiweb_headers:get_value("Location", MochiHeaders),
-    #url{
-        host = Host,
-        host_type = HostType,
-        port = Port,
-        path = Path,  % includes query string
-        protocol = Proto
-    } = ibrowse_lib:parse_url(RedUrl),
-    #url{
-        username = User,
-        password = Passwd
-    } = ibrowse_lib:parse_url(OrigUrl),
-    Creds = case is_list(User) andalso is_list(Passwd) of
-    true ->
-        User ++ ":" ++ Passwd ++ "@";
-    false ->
-        []
-    end,
-    HostPart = case HostType of
-    ipv6_address ->
-        "[" ++ Host ++ "]";
-    _ ->
-        Host
-    end,
-    atom_to_list(Proto) ++ "://" ++ Creds ++ HostPart ++ ":" ++
-        integer_to_list(Port) ++ Path.
-
-after_redirect(RedirectUrl, 303, HttpDb, Params) ->
-    after_redirect(RedirectUrl, HttpDb, ?replace(Params, method, get));
-after_redirect(RedirectUrl, _Code, HttpDb, Params) ->
-    after_redirect(RedirectUrl, HttpDb, Params).
-
-after_redirect(RedirectUrl, HttpDb, Params) ->
-    Params2 = lists:keydelete(path, 1, lists:keydelete(qs, 1, Params)),
-    {HttpDb#httpdb{url = RedirectUrl}, Params2}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator_httpc_pool.erl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator_httpc_pool.erl b/src/couch_replicator/src/couch_replicator_httpc_pool.erl
deleted file mode 100644
index 0a42284..0000000
--- a/src/couch_replicator/src/couch_replicator_httpc_pool.erl
+++ /dev/null
@@ -1,194 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_httpc_pool).
--behaviour(gen_server).
-
-% public API
--export([start_link/2, stop/1]).
--export([get_worker/1, release_worker/2]).
-
-% gen_server API
--export([init/1, handle_call/3, handle_info/2, handle_cast/2]).
--export([code_change/3, terminate/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--import(couch_util, [
-    get_value/2
-]).
-
--record(state, {
-    url,
-    limit,                  % max # of workers allowed
-    free = [],              % free workers (connections)
-    busy = [],              % busy workers (connections)
-    waiting = queue:new(),  % blocked clients waiting for a worker
-    callers = []            % clients who've been given a worker
-}).
-
-
-start_link(Url, Options) ->
-    gen_server:start_link(?MODULE, {Url, Options}, []).
-
-stop(Pool) ->
-    ok = gen_server:call(Pool, stop, infinity).
-
-
-get_worker(Pool) ->
-    {ok, _Worker} = gen_server:call(Pool, get_worker, infinity).
-
-
-release_worker(Pool, Worker) ->
-    ok = gen_server:cast(Pool, {release_worker, Worker}).
-
-
-init({Url, Options}) ->
-    process_flag(trap_exit, true),
-    State = #state{
-        url = Url,
-        limit = get_value(max_connections, Options)
-    },
-    {ok, State}.
-
-
-handle_call(get_worker, From, State) ->
-    #state{
-        waiting = Waiting,
-        callers = Callers,
-        url = Url,
-        limit = Limit,
-        busy = Busy,
-        free = Free
-    } = State,
-    case length(Busy) >= Limit of
-    true ->
-        {noreply, State#state{waiting = queue:in(From, Waiting)}};
-    false ->
-        case Free of
-        [] ->
-           {ok, Worker} = ibrowse:spawn_link_worker_process(Url),
-           Free2 = Free;
-        [Worker | Free2] ->
-           ok
-        end,
-        NewState = State#state{
-            free = Free2,
-            busy = [Worker | Busy],
-            callers = monitor_client(Callers, Worker, From)
-        },
-        {reply, {ok, Worker}, NewState}
-    end;
-
-handle_call(stop, _From, State) ->
-    {stop, normal, ok, State}.
-
-
-handle_cast({release_worker, Worker}, State) ->
-    #state{waiting = Waiting, callers = Callers} = State,
-    NewCallers0 = demonitor_client(Callers, Worker),
-    case is_process_alive(Worker) andalso
-        lists:member(Worker, State#state.busy) of
-    true ->
-        case queue:out(Waiting) of
-        {empty, Waiting2} ->
-            NewCallers1 = NewCallers0,
-            Busy2 = State#state.busy -- [Worker],
-            Free2 = [Worker | State#state.free];
-        {{value, From}, Waiting2} ->
-            NewCallers1 = monitor_client(NewCallers0, Worker, From),
-            gen_server:reply(From, {ok, Worker}),
-            Busy2 = State#state.busy,
-            Free2 = State#state.free
-        end,
-        NewState = State#state{
-           busy = Busy2,
-           free = Free2,
-           waiting = Waiting2,
-           callers = NewCallers1
-        },
-        {noreply, NewState};
-   false ->
-        {noreply, State#state{callers = NewCallers0}}
-   end.
-
-handle_info({'EXIT', Pid, _Reason}, State) ->
-    #state{
-        url = Url,
-        busy = Busy,
-        free = Free,
-        waiting = Waiting,
-        callers = Callers
-    } = State,
-    NewCallers0 = demonitor_client(Callers, Pid),
-    case Free -- [Pid] of
-    Free ->
-        case Busy -- [Pid] of
-        Busy ->
-            {noreply, State#state{callers = NewCallers0}};
-        Busy2 ->
-            case queue:out(Waiting) of
-            {empty, _} ->
-                {noreply, State#state{busy = Busy2, callers = NewCallers0}};
-            {{value, From}, Waiting2} ->
-                {ok, Worker} = ibrowse:spawn_link_worker_process(Url),
-                NewCallers1 = monitor_client(NewCallers0, Worker, From),
-                gen_server:reply(From, {ok, Worker}),
-                NewState = State#state{
-                    busy = [Worker | Busy2],
-                    waiting = Waiting2,
-                    callers = NewCallers1
-                },
-                {noreply, NewState}
-            end
-        end;
-    Free2 ->
-        {noreply, State#state{free = Free2, callers = NewCallers0}}
-    end;
-
-handle_info({'DOWN', Ref, process, _, _}, #state{callers = Callers} = State) ->
-    case lists:keysearch(Ref, 2, Callers) of
-        {value, {Worker, Ref}} ->
-            handle_cast({release_worker, Worker}, State);
-        false ->
-            {noreply, State}
-    end.
-
-code_change(_OldVsn, OldState, _Extra) when tuple_size(OldState) =:= 7 ->
-    case element(7, OldState) of
-        EtsTable when is_integer(EtsTable) ->
-            NewState = setelement(7, OldState, ets:tab2list(EtsTable)),
-            ets:delete(EtsTable),
-            {ok, NewState};
-        Callers when is_list(Callers) ->
-            % Already upgraded
-            {ok, OldState}
-    end;
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-terminate(_Reason, State) ->
-    lists:foreach(fun ibrowse_http_client:stop/1, State#state.free),
-    lists:foreach(fun ibrowse_http_client:stop/1, State#state.busy).
-
-monitor_client(Callers, Worker, {ClientPid, _}) ->
-    [{Worker, erlang:monitor(process, ClientPid)} | Callers].
-
-demonitor_client(Callers, Worker) ->
-    case lists:keysearch(Worker, 1, Callers) of
-        {value, {Worker, MonRef}} ->
-            erlang:demonitor(MonRef, [flush]),
-            lists:keydelete(Worker, 1, Callers);
-        false ->
-            Callers
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator_httpd.erl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator_httpd.erl b/src/couch_replicator/src/couch_replicator_httpd.erl
deleted file mode 100644
index f84152c..0000000
--- a/src/couch_replicator/src/couch_replicator_httpd.erl
+++ /dev/null
@@ -1,65 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_httpd).
-
--include_lib("couch/include/couch_db.hrl").
-
--import(couch_httpd, [
-    send_json/2,
-    send_json/3,
-    send_method_not_allowed/2
-]).
-
--import(couch_util, [
-    to_binary/1
-]).
-
--export([handle_req/1]).
-
-
-handle_req(#httpd{method = 'POST', user_ctx = UserCtx} = Req) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    RepDoc = {Props} = couch_httpd:json_body_obj(Req),
-    validate_rep_props(Props),
-    case couch_replicator:replicate(RepDoc, UserCtx) of
-    {error, {Error, Reason}} ->
-        send_json(
-            Req, 404,
-            {[{error, to_binary(Error)}, {reason, to_binary(Reason)}]});
-    {error, not_found} ->
-        % Tried to cancel a replication that didn't exist.
-        send_json(Req, 404, {[{error, <<"not found">>}]});
-    {error, Reason} ->
-        send_json(Req, 500, {[{error, to_binary(Reason)}]});
-    {ok, {cancelled, RepId}} ->
-        send_json(Req, 200, {[{ok, true}, {<<"_local_id">>, RepId}]});
-    {ok, {continuous, RepId}} ->
-        send_json(Req, 202, {[{ok, true}, {<<"_local_id">>, RepId}]});
-    {ok, {HistoryResults}} ->
-        send_json(Req, {[{ok, true} | HistoryResults]})
-    end;
-
-handle_req(Req) ->
-    send_method_not_allowed(Req, "POST").
-
-validate_rep_props([]) ->
-    ok;
-validate_rep_props([{<<"query_params">>, {Params}}|Rest]) ->
-    lists:foreach(fun
-        ({_,V}) when is_binary(V) -> ok;
-        ({K,_}) -> throw({bad_request,
-            <<K/binary," value must be a string.">>})
-        end, Params),
-    validate_rep_props(Rest);
-validate_rep_props([_|Rest]) ->
-    validate_rep_props(Rest).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator_job_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator_job_sup.erl b/src/couch_replicator/src/couch_replicator_job_sup.erl
deleted file mode 100644
index 3cce46c..0000000
--- a/src/couch_replicator/src/couch_replicator_job_sup.erl
+++ /dev/null
@@ -1,29 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_job_sup).
--behaviour(supervisor).
--export([init/1, start_link/0]).
-
-start_link() ->
-    supervisor:start_link({local,?MODULE}, ?MODULE, []).
-
-%%=============================================================================
-%% supervisor callbacks
-%%=============================================================================
-
-init([]) ->
-    {ok, {{one_for_one, 3, 10}, []}}.
-
-%%=============================================================================
-%% internal functions
-%%=============================================================================

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator_js_functions.hrl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator_js_functions.hrl b/src/couch_replicator/src/couch_replicator_js_functions.hrl
deleted file mode 100644
index 3f1db7c..0000000
--- a/src/couch_replicator/src/couch_replicator_js_functions.hrl
+++ /dev/null
@@ -1,151 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License.  You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(REP_DB_DOC_VALIDATE_FUN, <<"
-    function(newDoc, oldDoc, userCtx) {
-        function reportError(error_msg) {
-            log('Error writing document `' + newDoc._id +
-                '\\' to the replicator database: ' + error_msg);
-            throw({forbidden: error_msg});
-        }
-
-        function validateEndpoint(endpoint, fieldName) {
-            if ((typeof endpoint !== 'string') &&
-                ((typeof endpoint !== 'object') || (endpoint === null))) {
-
-                reportError('The `' + fieldName + '\\' property must exist' +
-                    ' and be either a string or an object.');
-            }
-
-            if (typeof endpoint === 'object') {
-                if ((typeof endpoint.url !== 'string') || !endpoint.url) {
-                    reportError('The url property must exist in the `' +
-                        fieldName + '\\' field and must be a non-empty string.');
-                }
-
-                if ((typeof endpoint.auth !== 'undefined') &&
-                    ((typeof endpoint.auth !== 'object') ||
-                        endpoint.auth === null)) {
-
-                    reportError('`' + fieldName +
-                        '.auth\\' must be a non-null object.');
-                }
-
-                if ((typeof endpoint.headers !== 'undefined') &&
-                    ((typeof endpoint.headers !== 'object') ||
-                        endpoint.headers === null)) {
-
-                    reportError('`' + fieldName +
-                        '.headers\\' must be a non-null object.');
-                }
-            }
-        }
-
-        var isReplicator = (userCtx.roles.indexOf('_replicator') >= 0);
-        var isAdmin = (userCtx.roles.indexOf('_admin') >= 0);
-
-        if (oldDoc && !newDoc._deleted && !isReplicator &&
-            (oldDoc._replication_state === 'triggered')) {
-            reportError('Only the replicator can edit replication documents ' +
-                'that are in the triggered state.');
-        }
-
-        if (!newDoc._deleted) {
-            validateEndpoint(newDoc.source, 'source');
-            validateEndpoint(newDoc.target, 'target');
-
-            if ((typeof newDoc.create_target !== 'undefined') &&
-                (typeof newDoc.create_target !== 'boolean')) {
-
-                reportError('The `create_target\\' field must be a boolean.');
-            }
-
-            if ((typeof newDoc.continuous !== 'undefined') &&
-                (typeof newDoc.continuous !== 'boolean')) {
-
-                reportError('The `continuous\\' field must be a boolean.');
-            }
-
-            if ((typeof newDoc.doc_ids !== 'undefined') &&
-                !isArray(newDoc.doc_ids)) {
-
-                reportError('The `doc_ids\\' field must be an array of strings.');
-            }
-
-            if ((typeof newDoc.filter !== 'undefined') &&
-                ((typeof newDoc.filter !== 'string') || !newDoc.filter)) {
-
-                reportError('The `filter\\' field must be a non-empty string.');
-            }
-
-            if ((typeof newDoc.query_params !== 'undefined') &&
-                ((typeof newDoc.query_params !== 'object') ||
-                    newDoc.query_params === null)) {
-
-                reportError('The `query_params\\' field must be an object.');
-            }
-
-            if (newDoc.user_ctx) {
-                var user_ctx = newDoc.user_ctx;
-
-                if ((typeof user_ctx !== 'object') || (user_ctx === null)) {
-                    reportError('The `user_ctx\\' property must be a ' +
-                        'non-null object.');
-                }
-
-                if (!(user_ctx.name === null ||
-                    (typeof user_ctx.name === 'undefined') ||
-                    ((typeof user_ctx.name === 'string') &&
-                        user_ctx.name.length > 0))) {
-
-                    reportError('The `user_ctx.name\\' property must be a ' +
-                        'non-empty string or null.');
-                }
-
-                if (!isAdmin && (user_ctx.name !== userCtx.name)) {
-                    reportError('The given `user_ctx.name\\' is not valid');
-                }
-
-                if (user_ctx.roles && !isArray(user_ctx.roles)) {
-                    reportError('The `user_ctx.roles\\' property must be ' +
-                        'an array of strings.');
-                }
-
-                if (!isAdmin && user_ctx.roles) {
-                    for (var i = 0; i < user_ctx.roles.length; i++) {
-                        var role = user_ctx.roles[i];
-
-                        if (typeof role !== 'string' || role.length === 0) {
-                            reportError('Roles must be non-empty strings.');
-                        }
-                        if (userCtx.roles.indexOf(role) === -1) {
-                            reportError('Invalid role (`' + role +
-                                '\\') in the `user_ctx\\'');
-                        }
-                    }
-                }
-            } else {
-                if (!isAdmin) {
-                    reportError('The `user_ctx\\' property is missing (it is ' +
-                       'optional for admins only).');
-                }
-            }
-        } else {
-            if (!isAdmin) {
-                if (!oldDoc.user_ctx || (oldDoc.user_ctx.name !== userCtx.name)) {
-                    reportError('Replication documents can only be deleted by ' +
-                        'admins or by the users who created them.');
-                }
-            }
-        }
-    }
-">>).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator_manager.erl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator_manager.erl b/src/couch_replicator/src/couch_replicator_manager.erl
deleted file mode 100644
index 047f573..0000000
--- a/src/couch_replicator/src/couch_replicator_manager.erl
+++ /dev/null
@@ -1,889 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_manager).
--behaviour(gen_server).
--behaviour(config_listener).
-
-% public API
--export([replication_started/1, replication_completed/2, replication_error/2]).
-
--export([before_doc_update/2, after_doc_read/2]).
-
-% gen_server callbacks
--export([start_link/0, init/1, handle_call/3, handle_info/2, handle_cast/2]).
--export([code_change/3, terminate/2]).
-
-% config_listener callback
--export([handle_config_change/5]).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_replicator.hrl").
--include("couch_replicator_js_functions.hrl").
-
--define(DOC_TO_REP, couch_rep_doc_id_to_rep_id).
--define(REP_TO_STATE, couch_rep_id_to_rep_state).
--define(INITIAL_WAIT, 2.5). % seconds
--define(MAX_WAIT, 600).     % seconds
--define(OWNER, <<"owner">>).
-
--define(DB_TO_SEQ, db_to_seq).
--define(CTX, {user_ctx, #user_ctx{roles=[<<"_admin">>, <<"_replicator">>]}}).
-
--define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
-
--record(rep_state, {
-    dbname,
-    rep,
-    starting,
-    retries_left,
-    max_retries,
-    wait = ?INITIAL_WAIT
-}).
-
--import(couch_util, [
-    to_binary/1
-]).
-
--record(state, {
-    db_notifier = nil,
-    scan_pid = nil,
-    rep_start_pids = [],
-    max_retries
-}).
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-replication_started(#rep{id = {BaseId, _} = RepId}) ->
-    case rep_state(RepId) of
-    nil ->
-        ok;
-    #rep_state{dbname = DbName, rep = #rep{doc_id = DocId}} ->
-        update_rep_doc(DbName, DocId, [
-            {<<"_replication_state">>, <<"triggered">>},
-            {<<"_replication_id">>, ?l2b(BaseId)},
-            {<<"_replication_stats">>, undefined}]),
-        ok = gen_server:call(?MODULE, {rep_started, RepId}, infinity),
-        twig:log(notice, "Document `~s` triggered replication `~s`",
-            [DocId, pp_rep_id(RepId)])
-    end.
-
-
-replication_completed(#rep{id = RepId}, Stats) ->
-    case rep_state(RepId) of
-    nil ->
-        ok;
-    #rep_state{dbname = DbName, rep = #rep{doc_id = DocId}} ->
-        update_rep_doc(DbName, DocId, [
-            {<<"_replication_state">>, <<"completed">>},
-            {<<"_replication_stats">>, {Stats}}]),
-        ok = gen_server:call(?MODULE, {rep_complete, RepId}, infinity),
-        twig:log(notice, "Replication `~s` finished (triggered by document `~s`)",
-            [pp_rep_id(RepId), DocId])
-    end.
-
-
-replication_error(#rep{id = {BaseId, _} = RepId}, Error) ->
-    case rep_state(RepId) of
-    nil ->
-        ok;
-    #rep_state{dbname = DbName, rep = #rep{doc_id = DocId}} ->
-        % TODO: maybe add error reason to replication document
-        update_rep_doc(DbName, DocId, [
-            {<<"_replication_state">>, <<"error">>},
-            {<<"_replication_id">>, ?l2b(BaseId)}]),
-        ok = gen_server:call(?MODULE, {rep_error, RepId, Error}, infinity)
-    end.
-
-
-handle_config_change("replicator", "db", _, _, S) ->
-    ok = gen_server:call(S, rep_db_changed),
-    remove_handler;
-handle_config_change("replicator", "max_replication_retry_count", V, _, S) ->
-    ok = gen_server:cast(S, {set_max_retries, retries_value(V)}),
-    {ok, S};
-handle_config_change(_, _, _, _, S) ->
-    {ok, S}.
-
-
-init(_) ->
-    process_flag(trap_exit, true),
-    net_kernel:monitor_nodes(true),
-    ?DOC_TO_REP = ets:new(?DOC_TO_REP, [named_table, set, public]),
-    ?REP_TO_STATE = ets:new(?REP_TO_STATE, [named_table, set, public]),
-    ?DB_TO_SEQ = ets:new(?DB_TO_SEQ, [named_table, set, public]),
-    Server = self(),
-    ok = config:listen_for_changes(?MODULE, Server),
-    ScanPid = spawn_link(fun() -> scan_all_dbs(Server) end),
-    % Automatically start node local changes feed loop
-    LocalRepDb = ?l2b(config:get("replicator", "db", "_replicator")),
-    Pid = changes_feed_loop(LocalRepDb, 0),
-    {ok, #state{
-        db_notifier = db_update_notifier(),
-        scan_pid = ScanPid,
-        max_retries = retries_value(
-            config:get("replicator", "max_replication_retry_count", "10")),
-        rep_start_pids = [Pid]
-    }}.
-
-
-handle_call({rep_db_update, DbName, {ChangeProps} = Change}, _From, State) ->
-    NewState = try
-        process_update(State, DbName, Change)
-    catch
-    _Tag:Error ->
-        {RepProps} = get_json_value(doc, ChangeProps),
-        DocId = get_json_value(<<"_id">>, RepProps),
-        rep_db_update_error(Error, DbName, DocId),
-        State
-    end,
-    {reply, ok, NewState};
-
-
-handle_call({rep_started, RepId}, _From, State) ->
-    case rep_state(RepId) of
-    nil ->
-        ok;
-    RepState ->
-        NewRepState = RepState#rep_state{
-            starting = false,
-            retries_left = State#state.max_retries,
-            max_retries = State#state.max_retries,
-            wait = ?INITIAL_WAIT
-        },
-        true = ets:insert(?REP_TO_STATE, {RepId, NewRepState})
-    end,
-    {reply, ok, State};
-
-handle_call({rep_complete, RepId}, _From, State) ->
-    true = ets:delete(?REP_TO_STATE, RepId),
-    {reply, ok, State};
-
-handle_call({rep_error, RepId, Error}, _From, State) ->
-    {reply, ok, replication_error(State, RepId, Error)};
-
-handle_call({resume_scan, DbName}, _From, State) ->
-    Since = case ets:lookup(?DB_TO_SEQ, DbName) of
-        [] -> 0;
-        [{DbName, EndSeq}] -> EndSeq
-    end,
-    Pid = changes_feed_loop(DbName, Since),
-    twig:log(debug, "Scanning ~s from update_seq ~p", [DbName, Since]),
-    {reply, ok, State#state{rep_start_pids = [Pid | State#state.rep_start_pids]}};
-
-handle_call({rep_db_checkpoint, DbName, EndSeq}, _From, State) ->
-    true = ets:insert(?DB_TO_SEQ, {DbName, EndSeq}),
-    {reply, ok, State};
-
-handle_call(rep_db_changed, _From, State) ->
-    {stop, shutdown, ok, State};
-
-handle_call(Msg, From, State) ->
-    twig:log(error, "Replication manager received unexpected call ~p from ~p",
-        [Msg, From]),
-    {stop, {error, {unexpected_call, Msg}}, State}.
-
-handle_cast({set_max_retries, MaxRetries}, State) ->
-    {noreply, State#state{max_retries = MaxRetries}};
-
-handle_cast(Msg, State) ->
-    twig:log(error, "Replication manager received unexpected cast ~p", [Msg]),
-    {stop, {error, {unexpected_cast, Msg}}, State}.
-
-handle_info({nodeup, _Node}, State) ->
-    {noreply, rescan(State)};
-
-handle_info({nodedown, _Node}, State) ->
-    {noreply, rescan(State)};
-
-handle_info({'EXIT', From, normal}, #state{scan_pid = From} = State) ->
-    twig:log(debug, "Background scan has completed.", []),
-    {noreply, State#state{scan_pid=nil}};
-
-handle_info({'EXIT', From, Reason}, #state{scan_pid = From} = State) ->
-    twig:log(error, "Background scanner died. Reason: ~p", [Reason]),
-    {stop, {scanner_died, Reason}, State};
-
-handle_info({'EXIT', From, Reason}, #state{db_notifier = From} = State) ->
-    twig:log(error, "Database update notifier died. Reason: ~p", [Reason]),
-    {stop, {db_update_notifier_died, Reason}, State};
-
-handle_info({'EXIT', From, normal}, #state{rep_start_pids = Pids} = State) ->
-    % one of the replication start processes terminated successfully
-    {noreply, State#state{rep_start_pids = Pids -- [From]}};
-
-handle_info({'DOWN', _Ref, _, _, _}, State) ->
-    % From a db monitor created by a replication process. Ignore.
-    {noreply, State};
-
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {noreply, State};
-
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, self()),
-    {noreply, State};
-
-handle_info(shutdown, State) ->
-    {stop, shutdown, State};
-
-handle_info(Msg, State) ->
-    twig:log(error,"Replication manager received unexpected message ~p", [Msg]),
-    {stop, {unexpected_msg, Msg}, State}.
-
-
-terminate(_Reason, State) ->
-    #state{
-        scan_pid = ScanPid,
-        rep_start_pids = StartPids,
-        db_notifier = DbNotifier
-    } = State,
-    stop_all_replications(),
-    lists:foreach(
-        fun(Pid) ->
-            catch unlink(Pid),
-            catch exit(Pid, stop)
-        end,
-        [ScanPid | StartPids]),
-    true = ets:delete(?REP_TO_STATE),
-    true = ets:delete(?DOC_TO_REP),
-    true = ets:delete(?DB_TO_SEQ),
-    couch_db_update_notifier:stop(DbNotifier).
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-changes_feed_loop(<<"shards/", _/binary>>=DbName, Since) ->
-    Server = self(),
-    Pid = spawn_link(
-        fun() ->
-            fabric:changes(DbName, fun
-            ({change, Change}, Acc) ->
-                case has_valid_rep_id(Change) of
-                true ->
-                    ok = gen_server:call(
-                        Server, {rep_db_update, DbName, Change}, infinity);
-                false ->
-                    ok
-                end,
-                {ok, Acc};
-            ({stop, EndSeq}, Acc) ->
-                ok = gen_server:call(Server, {rep_db_checkpoint, DbName, EndSeq}, infinity),
-                {ok, Acc};
-            (_, Acc) ->
-                {ok, Acc}
-            end,
-            nil,
-            #changes_args{
-                include_docs = true,
-                feed = "longpoll",
-                since = Since,
-                filter = main_only,
-                timeout = infinity
-                }
-            )
-        end),
-    Pid;
-changes_feed_loop(DbName, Since) ->
-    ensure_rep_db_exists(DbName),
-    Server = self(),
-    spawn_link(fun() ->
-        UserCtx = #user_ctx{roles = [<<"_admin">>, <<"_replicator">>]},
-        DbOpenOptions = [{user_ctx, UserCtx}, sys_db],
-        {ok, Db} = couch_db:open_int(DbName, DbOpenOptions),
-        ChangesFeedFun = couch_changes:handle_changes(
-            #changes_args{
-                include_docs = true,
-                since = Since,
-                feed = "continuous",
-                timeout = infinity
-            },
-            {json_req, null},
-            Db
-        ),
-        EnumFun = fun
-        ({change, Change, _}, _) ->
-            case has_valid_rep_id(Change) of
-                true ->
-                    Msg = {rep_db_update, DbName, Change},
-                    ok = gen_server:call(Server, Msg, infinity);
-                false ->
-                    ok
-            end;
-        (_, _) ->
-            ok
-        end,
-        ChangesFeedFun(EnumFun)
-    end).
-
-
-has_valid_rep_id({Change}) ->
-    has_valid_rep_id(get_json_value(<<"id">>, Change));
-has_valid_rep_id(<<?DESIGN_DOC_PREFIX, _Rest/binary>>) ->
-    false;
-has_valid_rep_id(_Else) ->
-    true.
-
-db_update_notifier() ->
-    Server = self(),
-    IsReplicatorDbFun = is_replicator_db_fun(),
-    {ok, Notifier} = couch_db_update_notifier:start_link(fun
-        ({Event, ShardDbName})
-                when Event == created; Event == updated; Event == deleted ->
-            DbName = mem3:dbname(ShardDbName),
-            IsRepDb = IsReplicatorDbFun(DbName),
-            case Event of
-                created when IsRepDb ->
-                    ensure_rep_ddoc_exists(DbName);
-                updated when IsRepDb ->
-                    ensure_rep_ddoc_exists(DbName),
-                    Msg = {resume_scan, DbName},
-                    ok = gen_server:call(Server, Msg, infinity);
-                deleted when IsRepDb ->
-                    clean_up_replications(DbName);
-                _ ->
-                    ok
-            end;
-        (_Event) ->
-            ok
-        end
-    ),
-    Notifier.
-
-rescan(#state{scan_pid = nil} = State) ->
-    true = ets:delete_all_objects(?DB_TO_SEQ),
-    Server = self(),
-    NewScanPid = spawn_link(fun() -> scan_all_dbs(Server) end),
-    State#state{scan_pid = NewScanPid};
-rescan(#state{scan_pid = ScanPid} = State) ->
-    unlink(ScanPid),
-    exit(ScanPid, exit),
-    rescan(State#state{scan_pid = nil}).
-
-process_update(State, DbName, {Change}) ->
-    {RepProps} = JsonRepDoc = get_json_value(doc, Change),
-    DocId = get_json_value(<<"_id">>, RepProps),
-    case {is_owner(DbName, DocId), get_json_value(deleted, Change, false)} of
-    {false, _} ->
-        replication_complete(DbName, DocId),
-        State;
-    {true, true} ->
-        rep_doc_deleted(DbName, DocId),
-        State;
-    {true, false} ->
-        case get_json_value(<<"_replication_state">>, RepProps) of
-        undefined ->
-            maybe_start_replication(State, DbName, DocId, JsonRepDoc);
-        <<"triggered">> ->
-            maybe_start_replication(State, DbName, DocId, JsonRepDoc);
-        <<"completed">> ->
-            replication_complete(DbName, DocId),
-            State;
-        <<"error">> ->
-            case ets:lookup(?DOC_TO_REP, {DbName, DocId}) of
-            [] ->
-                maybe_start_replication(State, DbName, DocId, JsonRepDoc);
-            _ ->
-                State
-            end
-        end
-    end.
-
-
-is_owner(<<"shards/", _/binary>>=DbName, DocId) ->
-    mem3_util:owner(DbName, DocId);
-is_owner(_, _) ->
-    true.
-
-
-rep_db_update_error(Error, DbName, DocId) ->
-    case Error of
-    {bad_rep_doc, Reason} ->
-        ok;
-    _ ->
-        Reason = to_binary(Error)
-    end,
-    twig:log(error,"Replication manager, error processing document `~s`: ~s",
-        [DocId, Reason]),
-    update_rep_doc(DbName, DocId, [{<<"_replication_state">>, <<"error">>}]).
-
-
-rep_user_ctx({RepDoc}) ->
-    case get_json_value(<<"user_ctx">>, RepDoc) of
-    undefined ->
-        #user_ctx{};
-    {UserCtx} ->
-        #user_ctx{
-            name = get_json_value(<<"name">>, UserCtx, null),
-            roles = get_json_value(<<"roles">>, UserCtx, [])
-        }
-    end.
-
-
-maybe_start_replication(State, DbName, DocId, RepDoc) ->
-    #rep{id = {BaseId, _} = RepId} = Rep = parse_rep_doc(RepDoc),
-    case rep_state(RepId) of
-    nil ->
-        RepState = #rep_state{
-            dbname = DbName,
-            rep = Rep,
-            starting = true,
-            retries_left = State#state.max_retries,
-            max_retries = State#state.max_retries
-        },
-        true = ets:insert(?REP_TO_STATE, {RepId, RepState}),
-        true = ets:insert(?DOC_TO_REP, {{DbName, DocId}, RepId}),
-        twig:log(notice,"Attempting to start replication `~s` (document `~s`).",
-            [pp_rep_id(RepId), DocId]),
-        Pid = spawn_link(fun() -> start_replication(Rep, 0) end),
-        State#state{rep_start_pids = [Pid | State#state.rep_start_pids]};
-    #rep_state{rep = #rep{doc_id = DocId}} ->
-        State;
-    #rep_state{starting = false, dbname = DbName, rep = #rep{doc_id = OtherDocId}} ->
-        twig:log(notice, "The replication specified by the document `~s` was already"
-            " triggered by the document `~s`", [DocId, OtherDocId]),
-        maybe_tag_rep_doc(DbName, DocId, RepDoc, ?l2b(BaseId)),
-        State;
-    #rep_state{starting = true, dbname = DbName, rep = #rep{doc_id = OtherDocId}} ->
-        twig:log(notice, "The replication specified by the document `~s` is already"
-            " being triggered by the document `~s`", [DocId, OtherDocId]),
-        maybe_tag_rep_doc(DbName, DocId, RepDoc, ?l2b(BaseId)),
-        State
-    end.
-
-
-parse_rep_doc(RepDoc) ->
-    {ok, Rep} = try
-        couch_replicator_utils:parse_rep_doc(RepDoc, rep_user_ctx(RepDoc))
-    catch
-    throw:{error, Reason} ->
-        throw({bad_rep_doc, Reason});
-    Tag:Err ->
-        throw({bad_rep_doc, to_binary({Tag, Err})})
-    end,
-    Rep.
-
-
-maybe_tag_rep_doc(DbName, DocId, {RepProps}, RepId) ->
-    case get_json_value(<<"_replication_id">>, RepProps) of
-    RepId ->
-        ok;
-    _ ->
-        update_rep_doc(DbName, DocId, [{<<"_replication_id">>, RepId}])
-    end.
-
-%% note to self: this is markedly diff from mem3_rep_manager
-start_replication(Rep, Wait) ->
-    ok = timer:sleep(Wait * 1000),
-    case (catch couch_replicator:async_replicate(Rep)) of
-    {ok, _} ->
-        ok;
-    Error ->
-        replication_error(Rep, Error)
-    end.
-
-replication_complete(DbName, DocId) ->
-    case ets:lookup(?DOC_TO_REP, {DbName, DocId}) of
-    [{{DbName, DocId}, {BaseId, Ext} = RepId}] ->
-        case rep_state(RepId) of
-        nil ->
-            % Prior to OTP R14B02, temporary child specs remain in
-            % in the supervisor after a worker finishes - remove them.
-            % We want to be able to start the same replication but with
-            % eventually different values for parameters that don't
-            % contribute to its ID calculation.
-            case erlang:system_info(otp_release) < "R14B02" of
-            true ->
-                spawn(fun() ->
-                    _ = supervisor:delete_child(couch_replicator_job_sup, BaseId ++ Ext)
-                end);
-            false ->
-                ok
-            end;
-        #rep_state{} ->
-            ok
-        end,
-        true = ets:delete(?DOC_TO_REP, {DbName, DocId});
-    _ ->
-        ok
-    end.
-
-
-rep_doc_deleted(DbName, DocId) ->
-    case ets:lookup(?DOC_TO_REP, {DbName, DocId}) of
-    [{{DbName, DocId}, RepId}] ->
-        couch_replicator:cancel_replication(RepId),
-        true = ets:delete(?REP_TO_STATE, RepId),
-        true = ets:delete(?DOC_TO_REP, {DbName, DocId}),
-        twig:log(notice, "Stopped replication `~s` because replication document `~s`"
-            " was deleted", [pp_rep_id(RepId), DocId]);
-    [] ->
-        ok
-    end.
-
-
-replication_error(State, RepId, Error) ->
-    case rep_state(RepId) of
-    nil ->
-        State;
-    RepState ->
-        maybe_retry_replication(RepState, Error, State)
-    end.
-
-maybe_retry_replication(#rep_state{retries_left = 0} = RepState, Error, State) ->
-    #rep_state{
-        dbname = DbName,
-        rep = #rep{id = RepId, doc_id = DocId},
-        max_retries = MaxRetries
-    } = RepState,
-    couch_replicator:cancel_replication(RepId),
-    true = ets:delete(?REP_TO_STATE, RepId),
-    true = ets:delete(?DOC_TO_REP, {DbName, DocId}),
-    twig:log(error, "Error in replication `~s` (triggered by document `~s`): ~s"
-        "~nReached maximum retry attempts (~p).",
-        [pp_rep_id(RepId), DocId, to_binary(error_reason(Error)), MaxRetries]),
-    State;
-
-maybe_retry_replication(RepState, Error, State) ->
-    #rep_state{
-        rep = #rep{id = RepId, doc_id = DocId} = Rep
-    } = RepState,
-    #rep_state{wait = Wait} = NewRepState = state_after_error(RepState),
-    true = ets:insert(?REP_TO_STATE, {RepId, NewRepState}),
-    twig:log(error, "Error in replication `~s` (triggered by document `~s`): ~s"
-        "~nRestarting replication in ~p seconds.",
-        [pp_rep_id(RepId), DocId, to_binary(error_reason(Error)), Wait]),
-    Pid = spawn_link(fun() -> start_replication(Rep, Wait) end),
-    State#state{rep_start_pids = [Pid | State#state.rep_start_pids]}.
-
-
-stop_all_replications() ->
-    twig:log(notice, "Stopping all ongoing replications because the replicator"
-        " database was deleted or changed", []),
-    ets:foldl(
-        fun({_, RepId}, _) ->
-            couch_replicator:cancel_replication(RepId)
-        end,
-        ok, ?DOC_TO_REP),
-    true = ets:delete_all_objects(?REP_TO_STATE),
-    true = ets:delete_all_objects(?DOC_TO_REP),
-    true = ets:delete_all_objects(?DB_TO_SEQ).
-
-clean_up_replications(DbName) ->
-    ets:foldl(
-        fun({{Name, DocId}, RepId}, _) when Name =:= DbName ->
-            couch_replicator:cancel_replication(RepId),
-            ets:delete(?DOC_TO_REP,{Name, DocId}),
-            ets:delete(?REP_TO_STATE, RepId);
-           ({_,_}, _) ->
-            ok
-        end,
-        ok, ?DOC_TO_REP),
-    ets:delete(?DB_TO_SEQ,DbName).
-
-
-update_rep_doc(RepDbName, RepDocId, KVs) when is_binary(RepDocId) ->
-    try
-        case open_rep_doc(RepDbName, RepDocId) of
-            {ok, LastRepDoc} ->
-                update_rep_doc(RepDbName, LastRepDoc, KVs);
-            _ ->
-                ok
-        end
-    catch
-        throw:conflict ->
-            Msg = "Conflict when updating replication document `~s`. Retrying.",
-            twig:log(error, Msg, [RepDocId]),
-            ok = timer:sleep(5),
-            update_rep_doc(RepDbName, RepDocId, KVs)
-    end;
-update_rep_doc(RepDbName, #doc{body = {RepDocBody}} = RepDoc, KVs) ->
-    NewRepDocBody = lists:foldl(
-        fun({K, undefined}, Body) ->
-                lists:keydelete(K, 1, Body);
-           ({<<"_replication_state">> = K, State} = KV, Body) ->
-                case get_json_value(K, Body) of
-                State ->
-                    Body;
-                _ ->
-                    Body1 = lists:keystore(K, 1, Body, KV),
-                    lists:keystore(
-                        <<"_replication_state_time">>, 1, Body1,
-                        {<<"_replication_state_time">>, timestamp()})
-                end;
-            ({K, _V} = KV, Body) ->
-                lists:keystore(K, 1, Body, KV)
-        end,
-        RepDocBody, KVs),
-    case NewRepDocBody of
-    RepDocBody ->
-        ok;
-    _ ->
-        % Might not succeed - when the replication doc is deleted right
-        % before this update (not an error, ignore).
-        save_rep_doc(RepDbName, RepDoc#doc{body = {NewRepDocBody}})
-    end.
-
-
-open_rep_doc(<<"shards/", _/binary>>=ShardDbName, DocId) ->
-    defer_call(fun() ->
-        fabric:open_doc(mem3:dbname(ShardDbName), DocId, [])
-    end);
-open_rep_doc(DbName, DocId) ->
-    {ok, Db} = couch_db:open_int(DbName, [?CTX, sys_db]),
-    try
-        couch_db:open_doc(Db, DocId, [ejson_body])
-    after
-        couch_db:close(Db)
-    end.
-
-save_rep_doc(<<"shards/", _/binary>>=DbName, Doc) ->
-    defer_call(fun() ->
-        fabric:update_doc(DbName, Doc, [?CTX])
-    end);
-save_rep_doc(DbName, Doc) ->
-    {ok, Db} = couch_db:open_int(DbName, [?CTX, sys_db]),
-    try
-        couch_db:update_doc(Db, Doc, [])
-    after
-        couch_db:close(Db)
-    end.
-
-defer_call(Fun) ->
-    {Pid, Ref} = erlang:spawn_monitor(fun() ->
-        try
-            exit({exit_ok, Fun()})
-        catch
-            Type:Reason ->
-                exit({exit_err, Type, Reason})
-        end
-    end),
-    receive
-        {'DOWN', Ref, process, Pid, {exit_ok, Resp}} ->
-            Resp;
-        {'DOWN', Ref, process, Pid, {exit_err, throw, Error}} ->
-            throw(Error);
-        {'DOWN', Ref, process, Pid, {exit_err, error, Error}} ->
-            erlang:error(Error);
-        {'DOWN', Ref, process, Pid, {exit_err, exit, Error}} ->
-            exit(Error)
-    end.
-
-
-% RFC3339 timestamps.
-% Note: doesn't include the time seconds fraction (RFC3339 says it's optional).
-timestamp() ->
-    {{Year, Month, Day}, {Hour, Min, Sec}} = calendar:now_to_local_time(now()),
-    UTime = erlang:universaltime(),
-    LocalTime = calendar:universal_time_to_local_time(UTime),
-    DiffSecs = calendar:datetime_to_gregorian_seconds(LocalTime) -
-        calendar:datetime_to_gregorian_seconds(UTime),
-    zone(DiffSecs div 3600, (DiffSecs rem 3600) div 60),
-    iolist_to_binary(
-        io_lib:format("~4..0w-~2..0w-~2..0wT~2..0w:~2..0w:~2..0w~s",
-            [Year, Month, Day, Hour, Min, Sec,
-                zone(DiffSecs div 3600, (DiffSecs rem 3600) div 60)])).
-
-zone(Hr, Min) when Hr >= 0, Min >= 0 ->
-    io_lib:format("+~2..0w:~2..0w", [Hr, Min]);
-zone(Hr, Min) ->
-    io_lib:format("-~2..0w:~2..0w", [abs(Hr), abs(Min)]).
-
-
-ensure_rep_db_exists(DbName) ->
-    Db = case couch_db:open_int(DbName, [?CTX, sys_db, nologifmissing]) of
-        {ok, Db0} ->
-            Db0;
-        _Error ->
-            {ok, Db0} = couch_db:create(DbName, [?CTX, sys_db]),
-            Db0
-    end,
-    ensure_rep_ddoc_exists(DbName),
-    {ok, Db}.
-
-
-ensure_rep_ddoc_exists(RepDb) ->
-    DDocId = <<"_design/_replicator">>,
-    case open_rep_doc(RepDb, DDocId) of
-        {ok, _Doc} ->
-            ok;
-        _ ->
-            DDoc = couch_doc:from_json_obj({[
-                {<<"_id">>, DDocId},
-                {<<"language">>, <<"javascript">>},
-                {<<"validate_doc_update">>, ?REP_DB_DOC_VALIDATE_FUN}
-            ]}),
-            try
-                {ok, _} = save_rep_doc(RepDb, DDoc)
-            catch
-                throw:conflict ->
-                    % NFC what to do about this other than
-                    % not kill the process.
-                    ok
-            end
-    end.
-
-
-% pretty-print replication id
-pp_rep_id(#rep{id = RepId}) ->
-    pp_rep_id(RepId);
-pp_rep_id({Base, Extension}) ->
-    Base ++ Extension.
-
-
-rep_state(RepId) ->
-    case ets:lookup(?REP_TO_STATE, RepId) of
-    [{RepId, RepState}] ->
-        RepState;
-    [] ->
-        nil
-    end.
-
-
-error_reason({error, Reason}) ->
-    Reason;
-error_reason(Reason) ->
-    Reason.
-
-
-retries_value("infinity") ->
-    infinity;
-retries_value(Value) ->
-    list_to_integer(Value).
-
-
-state_after_error(#rep_state{retries_left = Left, wait = Wait} = State) ->
-    Wait2 = erlang:min(trunc(Wait * 2), ?MAX_WAIT),
-    case Left of
-    infinity ->
-        State#rep_state{wait = Wait2};
-    _ ->
-        State#rep_state{retries_left = Left - 1, wait = Wait2}
-    end.
-
-
-before_doc_update(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, _Db) ->
-    Doc;
-before_doc_update(#doc{body = {Body}} = Doc, #db{user_ctx=UserCtx} = Db) ->
-    #user_ctx{roles = Roles, name = Name} = UserCtx,
-    case lists:member(<<"_replicator">>, Roles) of
-    true ->
-        Doc;
-    false ->
-        case couch_util:get_value(?OWNER, Body) of
-        undefined ->
-            Doc#doc{body = {?replace(Body, ?OWNER, Name)}};
-        Name ->
-            Doc;
-        Other ->
-            case (catch couch_db:check_is_admin(Db)) of
-            ok when Other =:= null ->
-                Doc#doc{body = {?replace(Body, ?OWNER, Name)}};
-            ok ->
-                Doc;
-            _ ->
-                throw({forbidden, <<"Can't update replication documents",
-                    " from other users.">>})
-            end
-        end
-    end.
-
-
-after_doc_read(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, _Db) ->
-    Doc;
-after_doc_read(#doc{body = {Body}} = Doc, #db{user_ctx=UserCtx} = Db) ->
-    #user_ctx{name = Name} = UserCtx,
-    case (catch couch_db:check_is_admin(Db)) of
-    ok ->
-        Doc;
-    _ ->
-        case couch_util:get_value(?OWNER, Body) of
-        Name ->
-            Doc;
-        _Other ->
-            Source = strip_credentials(couch_util:get_value(<<"source">>,
-Body)),
-            Target = strip_credentials(couch_util:get_value(<<"target">>,
-Body)),
-            NewBody0 = ?replace(Body, <<"source">>, Source),
-            NewBody = ?replace(NewBody0, <<"target">>, Target),
-            #doc{revs = {Pos, [_ | Revs]}} = Doc,
-            NewDoc = Doc#doc{body = {NewBody}, revs = {Pos - 1, Revs}},
-            NewRevId = couch_db:new_revid(NewDoc),
-            NewDoc#doc{revs = {Pos, [NewRevId | Revs]}}
-        end
-    end.
-
-strip_credentials(Url) when is_binary(Url) ->
-    re:replace(Url,
-        "http(s)?://(?:[^:]+):[^@]+@(.*)$",
-        "http\\1://\\2",
-        [{return, binary}]);
-strip_credentials({Props}) ->
-    {lists:keydelete(<<"oauth">>, 1, Props)}.
-
-scan_all_dbs(Server) when is_pid(Server) ->
-    {ok, Db} = mem3_util:ensure_exists(config:get("mem3", "shard_db", "dbs")),
-    ChangesFun = couch_changes:handle_changes(#changes_args{}, nil, Db),
-    IsReplicatorDbFun = is_replicator_db_fun(),
-    ChangesFun(fun({change, {Change}, _}, _) ->
-        DbName = get_json_value(<<"id">>, Change),
-        case DbName of <<"_design/", _/binary>> -> ok; _Else ->
-            case couch_replicator_utils:is_deleted(Change) of
-            true ->
-                ok;
-            false ->
-                case IsReplicatorDbFun(DbName) of
-                    true ->
-                        ensure_rep_ddoc_exists(DbName),
-                        gen_server:call(Server, {resume_scan, DbName});
-                    false ->
-                        ok
-                end
-            end
-        end;
-        (_, _) -> ok
-    end),
-    couch_db:close(Db).
-
-is_replicator_db_fun() ->
-    {ok, RegExp} = re:compile("^([a-z][a-z0-9\\_\\$()\\+\\-\\/]*/)?_replicator$"),
-    fun
-        (<<"shards/", _/binary>>=DbName) ->
-            match =:= re:run(mem3:dbname(DbName), RegExp, [{capture,none}]);
-        (DbName) ->
-            LocalRepDb = ?l2b(config:get("replicator", "db", "_replicator")),
-            DbName == LocalRepDb
-    end.
-
-get_json_value(Key, Props) ->
-    get_json_value(Key, Props, undefined).
-
-get_json_value(Key, Props, Default) when is_atom(Key) ->
-    Ref = make_ref(),
-    case couch_util:get_value(Key, Props, Ref) of
-        Ref ->
-            couch_util:get_value(?l2b(atom_to_list(Key)), Props, Default);
-        Else ->
-            Else
-    end;
-get_json_value(Key, Props, Default) when is_binary(Key) ->
-    Ref = make_ref(),
-    case couch_util:get_value(Key, Props, Ref) of
-        Ref ->
-            couch_util:get_value(list_to_atom(?b2l(Key)), Props, Default);
-        Else ->
-            Else
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator_notifier.erl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator_notifier.erl b/src/couch_replicator/src/couch_replicator_notifier.erl
deleted file mode 100644
index 39fd68b..0000000
--- a/src/couch_replicator/src/couch_replicator_notifier.erl
+++ /dev/null
@@ -1,57 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_notifier).
-
--behaviour(gen_event).
-
-% public API
--export([start_link/1, stop/1, notify/1]).
-
-% gen_event callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_event/2, handle_call/2, handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
-start_link(FunAcc) ->
-    couch_event_sup:start_link(couch_replication,
-        {couch_replicator_notifier, make_ref()}, FunAcc).
-
-notify(Event) ->
-    gen_event:notify(couch_replication, Event).
-
-stop(Pid) ->
-    couch_event_sup:stop(Pid).
-
-
-init(FunAcc) ->
-    {ok, FunAcc}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-handle_event(Event, Fun) when is_function(Fun, 1) ->
-    Fun(Event),
-    {ok, Fun};
-handle_event(Event, {Fun, Acc}) when is_function(Fun, 2) ->
-    Acc2 = Fun(Event, Acc),
-    {ok, {Fun, Acc2}}.
-
-handle_call(_Msg, State) ->
-    {reply, ok, State}.
-
-handle_info(_Msg, State) ->
-    {ok, State}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator_sup.erl b/src/couch_replicator/src/couch_replicator_sup.erl
deleted file mode 100644
index bad9747..0000000
--- a/src/couch_replicator/src/couch_replicator_sup.erl
+++ /dev/null
@@ -1,42 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_sup).
--behaviour(supervisor).
--export([start_link/0, init/1]).
-
-start_link() ->
-    supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init(_Args) ->
-    Children = [
-        {couch_replication_event,
-            {gen_event, start_link, [{local, couch_replication}]},
-            permanent,
-            brutal_kill,
-            worker,
-            dynamic},
-        {couch_replicator_manager,
-            {couch_replicator_manager, start_link, []},
-            permanent,
-            brutal_kill,
-            worker,
-            [couch_replicator_manager]},
-        {couch_replicator_job_sup,
-            {couch_replicator_job_sup, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_replicator_job_sup]}
-    ],
-    {ok, {{one_for_one,10,3600}, Children}}.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/550e8202/src/couch_replicator/src/couch_replicator_utils.erl
----------------------------------------------------------------------
diff --git a/src/couch_replicator/src/couch_replicator_utils.erl b/src/couch_replicator/src/couch_replicator_utils.erl
deleted file mode 100644
index 9b9dd74..0000000
--- a/src/couch_replicator/src/couch_replicator_utils.erl
+++ /dev/null
@@ -1,432 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_utils).
-
--export([parse_rep_doc/2]).
--export([open_db/1, close_db/1]).
--export([start_db_compaction_notifier/2, stop_db_compaction_notifier/1]).
--export([replication_id/2]).
--export([sum_stats/2, is_deleted/1]).
--export([mp_parse_doc/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("ibrowse/include/ibrowse.hrl").
--include("couch_replicator_api_wrap.hrl").
--include("couch_replicator.hrl").
-
--import(couch_util, [
-    get_value/2,
-    get_value/3
-]).
-
-
-parse_rep_doc({Props}, UserCtx) ->
-    ProxyParams = parse_proxy_params(get_value(<<"proxy">>, Props, <<>>)),
-    Options = make_options(Props),
-    case get_value(cancel, Options, false) andalso
-        (get_value(id, Options, nil) =/= nil) of
-    true ->
-        {ok, #rep{options = Options, user_ctx = UserCtx}};
-    false ->
-        Source = parse_rep_db(get_value(<<"source">>, Props), ProxyParams, Options),
-        Target = parse_rep_db(get_value(<<"target">>, Props), ProxyParams, Options),
-        Rep = #rep{
-            source = Source,
-            target = Target,
-            options = Options,
-            user_ctx = UserCtx,
-            doc_id = get_value(<<"_id">>, Props, null)
-        },
-        {ok, Rep#rep{id = replication_id(Rep)}}
-    end.
-
-
-replication_id(#rep{options = Options} = Rep) ->
-    BaseId = replication_id(Rep, ?REP_ID_VERSION),
-    {BaseId, maybe_append_options([continuous, create_target], Options)}.
-
-
-% Versioned clauses for generating replication IDs.
-% If a change is made to how replications are identified,
-% please add a new clause and increase ?REP_ID_VERSION.
-
-replication_id(#rep{user_ctx = UserCtx} = Rep, 3) ->
-    UUID = couch_server:get_uuid(),
-    Src = get_rep_endpoint(UserCtx, Rep#rep.source),
-    Tgt = get_rep_endpoint(UserCtx, Rep#rep.target),
-    maybe_append_filters([UUID, Src, Tgt], Rep);
-
-replication_id(#rep{user_ctx = UserCtx} = Rep, 2) ->
-    {ok, HostName} = inet:gethostname(),
-    Port = case (catch mochiweb_socket_server:get(couch_httpd, port)) of
-    P when is_number(P) ->
-        P;
-    _ ->
-        % On restart we might be called before the couch_httpd process is
-        % started.
-        % TODO: we might be under an SSL socket server only, or both under
-        % SSL and a non-SSL socket.
-        % ... mochiweb_socket_server:get(https, port)
-        list_to_integer(config:get("httpd", "port", "5984"))
-    end,
-    Src = get_rep_endpoint(UserCtx, Rep#rep.source),
-    Tgt = get_rep_endpoint(UserCtx, Rep#rep.target),
-    maybe_append_filters([HostName, Port, Src, Tgt], Rep);
-
-replication_id(#rep{user_ctx = UserCtx} = Rep, 1) ->
-    {ok, HostName} = inet:gethostname(),
-    Src = get_rep_endpoint(UserCtx, Rep#rep.source),
-    Tgt = get_rep_endpoint(UserCtx, Rep#rep.target),
-    maybe_append_filters([HostName, Src, Tgt], Rep).
-
-
-maybe_append_filters(Base,
-        #rep{source = Source, user_ctx = UserCtx, options = Options}) ->
-    Base2 = Base ++
-        case get_value(filter, Options) of
-        undefined ->
-            case get_value(doc_ids, Options) of
-            undefined ->
-                [];
-            DocIds ->
-                [DocIds]
-            end;
-        Filter ->
-            [filter_code(Filter, Source, UserCtx),
-                get_value(query_params, Options, {[]})]
-        end,
-    couch_util:to_hex(couch_util:md5(term_to_binary(Base2))).
-
-
-filter_code(Filter, Source, UserCtx) ->
-    {DDocName, FilterName} =
-    case re:run(Filter, "(.*?)/(.*)", [{capture, [1, 2], binary}]) of
-    {match, [DDocName0, FilterName0]} ->
-        {DDocName0, FilterName0};
-    _ ->
-        throw({error, <<"Invalid filter. Must match `ddocname/filtername`.">>})
-    end,
-    Db = case (catch couch_replicator_api_wrap:db_open(Source, [{user_ctx, UserCtx}])) of
-    {ok, Db0} ->
-        Db0;
-    DbError ->
-        DbErrorMsg = io_lib:format("Could not open source database `~s`: ~s",
-           [couch_replicator_api_wrap:db_uri(Source), couch_util:to_binary(DbError)]),
-        throw({error, iolist_to_binary(DbErrorMsg)})
-    end,
-    try
-        Body = case (catch couch_replicator_api_wrap:open_doc(
-            Db, <<"_design/", DDocName/binary>>, [ejson_body])) of
-        {ok, #doc{body = Body0}} ->
-            Body0;
-        DocError ->
-            DocErrorMsg = io_lib:format(
-                "Couldn't open document `_design/~s` from source "
-                "database `~s`: ~s", [DDocName, couch_replicator_api_wrap:db_uri(Source),
-                    couch_util:to_binary(DocError)]),
-            throw({error, iolist_to_binary(DocErrorMsg)})
-        end,
-        Code = couch_util:get_nested_json_value(
-            Body, [<<"filters">>, FilterName]),
-        re:replace(Code, [$^, "\s*(.*?)\s*", $$], "\\1", [{return, binary}])
-    after
-        couch_replicator_api_wrap:db_close(Db)
-    end.
-
-
-maybe_append_options(Options, RepOptions) ->
-    lists:foldl(fun(Option, Acc) ->
-        Acc ++
-        case get_value(Option, RepOptions, false) of
-        true ->
-            "+" ++ atom_to_list(Option);
-        false ->
-            ""
-        end
-    end, [], Options).
-
-
-get_rep_endpoint(_UserCtx, #httpdb{url=Url, headers=Headers, oauth=OAuth}) ->
-    DefaultHeaders = (#httpdb{})#httpdb.headers,
-    case OAuth of
-    nil ->
-        {remote, Url, Headers -- DefaultHeaders};
-    #oauth{} ->
-        {remote, Url, Headers -- DefaultHeaders, OAuth}
-    end;
-get_rep_endpoint(UserCtx, <<DbName/binary>>) ->
-    {local, DbName, UserCtx}.
-
-
-parse_rep_db({Props}, ProxyParams, Options) ->
-    Url = maybe_add_trailing_slash(get_value(<<"url">>, Props)),
-    {AuthProps} = get_value(<<"auth">>, Props, {[]}),
-    {BinHeaders} = get_value(<<"headers">>, Props, {[]}),
-    Headers = lists:ukeysort(1, [{?b2l(K), ?b2l(V)} || {K, V} <- BinHeaders]),
-    DefaultHeaders = (#httpdb{})#httpdb.headers,
-    OAuth = case get_value(<<"oauth">>, AuthProps) of
-    undefined ->
-        nil;
-    {OauthProps} ->
-        #oauth{
-            consumer_key = ?b2l(get_value(<<"consumer_key">>, OauthProps)),
-            token = ?b2l(get_value(<<"token">>, OauthProps)),
-            token_secret = ?b2l(get_value(<<"token_secret">>, OauthProps)),
-            consumer_secret = ?b2l(get_value(<<"consumer_secret">>, OauthProps)),
-            signature_method =
-                case get_value(<<"signature_method">>, OauthProps) of
-                undefined ->        hmac_sha1;
-                <<"PLAINTEXT">> ->  plaintext;
-                <<"HMAC-SHA1">> ->  hmac_sha1;
-                <<"RSA-SHA1">> ->   rsa_sha1
-                end
-        }
-    end,
-    #httpdb{
-        url = Url,
-        oauth = OAuth,
-        headers = lists:ukeymerge(1, Headers, DefaultHeaders),
-        ibrowse_options = lists:keysort(1,
-            [{socket_options, get_value(socket_options, Options)} |
-                ProxyParams ++ ssl_params(Url)]),
-        timeout = get_value(connection_timeout, Options),
-        http_connections = get_value(http_connections, Options),
-        retries = get_value(retries, Options)
-    };
-parse_rep_db(<<"http://", _/binary>> = Url, ProxyParams, Options) ->
-    parse_rep_db({[{<<"url">>, Url}]}, ProxyParams, Options);
-parse_rep_db(<<"https://", _/binary>> = Url, ProxyParams, Options) ->
-    parse_rep_db({[{<<"url">>, Url}]}, ProxyParams, Options);
-parse_rep_db(<<DbName/binary>>, _ProxyParams, _Options) ->
-    DbName.
-
-
-maybe_add_trailing_slash(Url) when is_binary(Url) ->
-    maybe_add_trailing_slash(?b2l(Url));
-maybe_add_trailing_slash(Url) ->
-    case lists:last(Url) of
-    $/ ->
-        Url;
-    _ ->
-        Url ++ "/"
-    end.
-
-
-make_options(Props) ->
-    Options = lists:ukeysort(1, convert_options(Props)),
-    DefWorkers = config:get("replicator", "worker_processes", "4"),
-    DefBatchSize = config:get("replicator", "worker_batch_size", "500"),
-    DefConns = config:get("replicator", "http_connections", "20"),
-    DefTimeout = config:get("replicator", "connection_timeout", "30000"),
-    DefRetries = config:get("replicator", "retries_per_request", "10"),
-    UseCheckpoints = config:get("replicator", "use_checkpoints", "true"),
-    {ok, DefSocketOptions} = couch_util:parse_term(
-        config:get("replicator", "socket_options",
-            "[{keepalive, true}, {nodelay, false}]")),
-    lists:ukeymerge(1, Options, lists:keysort(1, [
-        {connection_timeout, list_to_integer(DefTimeout)},
-        {retries, list_to_integer(DefRetries)},
-        {http_connections, list_to_integer(DefConns)},
-        {socket_options, DefSocketOptions},
-        {worker_batch_size, list_to_integer(DefBatchSize)},
-        {worker_processes, list_to_integer(DefWorkers)},
-        {use_checkpoints, list_to_existing_atom(UseCheckpoints)}
-    ])).
-
-
-convert_options([])->
-    [];
-convert_options([{<<"cancel">>, V} | R]) ->
-    [{cancel, V} | convert_options(R)];
-convert_options([{IdOpt, V} | R]) when IdOpt =:= <<"_local_id">>;
-        IdOpt =:= <<"replication_id">>; IdOpt =:= <<"id">> ->
-    Id = lists:splitwith(fun(X) -> X =/= $+ end, ?b2l(V)),
-    [{id, Id} | convert_options(R)];
-convert_options([{<<"create_target">>, V} | R]) ->
-    [{create_target, V} | convert_options(R)];
-convert_options([{<<"continuous">>, V} | R]) ->
-    [{continuous, V} | convert_options(R)];
-convert_options([{<<"filter">>, V} | R]) ->
-    [{filter, V} | convert_options(R)];
-convert_options([{<<"query_params">>, V} | R]) ->
-    [{query_params, V} | convert_options(R)];
-convert_options([{<<"doc_ids">>, null} | R]) ->
-    convert_options(R);
-convert_options([{<<"doc_ids">>, V} | R]) ->
-    % Ensure same behaviour as old replicator: accept a list of percent
-    % encoded doc IDs.
-    DocIds = [?l2b(couch_httpd:unquote(Id)) || Id <- V],
-    [{doc_ids, DocIds} | convert_options(R)];
-convert_options([{<<"worker_processes">>, V} | R]) ->
-    [{worker_processes, couch_util:to_integer(V)} | convert_options(R)];
-convert_options([{<<"worker_batch_size">>, V} | R]) ->
-    [{worker_batch_size, couch_util:to_integer(V)} | convert_options(R)];
-convert_options([{<<"http_connections">>, V} | R]) ->
-    [{http_connections, couch_util:to_integer(V)} | convert_options(R)];
-convert_options([{<<"connection_timeout">>, V} | R]) ->
-    [{connection_timeout, couch_util:to_integer(V)} | convert_options(R)];
-convert_options([{<<"retries_per_request">>, V} | R]) ->
-    [{retries, couch_util:to_integer(V)} | convert_options(R)];
-convert_options([{<<"socket_options">>, V} | R]) ->
-    {ok, SocketOptions} = couch_util:parse_term(V),
-    [{socket_options, SocketOptions} | convert_options(R)];
-convert_options([{<<"since_seq">>, V} | R]) ->
-    [{since_seq, V} | convert_options(R)];
-convert_options([{<<"use_checkpoints">>, V} | R]) ->
-    [{use_checkpoints, V} | convert_options(R)];
-convert_options([_ | R]) -> % skip unknown option
-    convert_options(R).
-
-
-parse_proxy_params(ProxyUrl) when is_binary(ProxyUrl) ->
-    parse_proxy_params(?b2l(ProxyUrl));
-parse_proxy_params([]) ->
-    [];
-parse_proxy_params(ProxyUrl) ->
-    #url{
-        host = Host,
-        port = Port,
-        username = User,
-        password = Passwd
-    } = ibrowse_lib:parse_url(ProxyUrl),
-    [{proxy_host, Host}, {proxy_port, Port}] ++
-        case is_list(User) andalso is_list(Passwd) of
-        false ->
-            [];
-        true ->
-            [{proxy_user, User}, {proxy_password, Passwd}]
-        end.
-
-
-ssl_params(Url) ->
-    case ibrowse_lib:parse_url(Url) of
-    #url{protocol = https} ->
-        Depth = list_to_integer(
-            config:get("replicator", "ssl_certificate_max_depth", "3")
-        ),
-        VerifyCerts = config:get("replicator", "verify_ssl_certificates"),
-        CertFile = config:get("replicator", "cert_file", nil),
-        KeyFile = config:get("replicator", "key_file", nil),
-        Password = config:get("replicator", "password", nil),
-        SslOpts = [{depth, Depth} | ssl_verify_options(VerifyCerts =:= "true")],
-        SslOpts1 = case CertFile /= nil andalso KeyFile /= nil of
-            true ->
-                case Password of
-                    nil ->
-                        [{certfile, CertFile}, {keyfile, KeyFile}] ++ SslOpts;
-                    _ ->
-                        [{certfile, CertFile}, {keyfile, KeyFile},
-                            {password, Password}] ++ SslOpts
-                end;
-            false -> SslOpts
-        end,
-        [{is_ssl, true}, {ssl_options, SslOpts1}];
-    #url{protocol = http} ->
-        []
-    end.
-
-ssl_verify_options(Value) ->
-    ssl_verify_options(Value, erlang:system_info(otp_release)).
-
-ssl_verify_options(true, OTPVersion) when OTPVersion >= "R14" ->
-    CAFile = config:get("replicator", "ssl_trusted_certificates_file"),
-    [{verify, verify_peer}, {cacertfile, CAFile}];
-ssl_verify_options(false, OTPVersion) when OTPVersion >= "R14" ->
-    [{verify, verify_none}];
-ssl_verify_options(true, _OTPVersion) ->
-    CAFile = config:get("replicator", "ssl_trusted_certificates_file"),
-    [{verify, 2}, {cacertfile, CAFile}];
-ssl_verify_options(false, _OTPVersion) ->
-    [{verify, 0}].
-
-
-%% New db record has Options field removed here to enable smoother dbcore migration
-open_db(#db{name = Name, user_ctx = UserCtx}) ->
-    {ok, Db} = couch_db:open(Name, [{user_ctx, UserCtx} | []]),
-    Db;
-open_db(HttpDb) ->
-    HttpDb.
-
-
-close_db(#db{} = Db) ->
-    couch_db:close(Db);
-close_db(_HttpDb) ->
-    ok.
-
-
-start_db_compaction_notifier(#db{name = DbName}, Server) ->
-    {ok, Notifier} = couch_db_update_notifier:start_link(
-        fun({compacted, DbName1}) when DbName1 =:= DbName ->
-                ok = gen_server:cast(Server, {db_compacted, DbName});
-            (_) ->
-                ok
-        end),
-    Notifier;
-start_db_compaction_notifier(_, _) ->
-    nil.
-
-
-stop_db_compaction_notifier(nil) ->
-    ok;
-stop_db_compaction_notifier(Notifier) ->
-    couch_db_update_notifier:stop(Notifier).
-
-
-sum_stats(#rep_stats{} = S1, #rep_stats{} = S2) ->
-    #rep_stats{
-        missing_checked =
-            S1#rep_stats.missing_checked + S2#rep_stats.missing_checked,
-        missing_found = S1#rep_stats.missing_found + S2#rep_stats.missing_found,
-        docs_read = S1#rep_stats.docs_read + S2#rep_stats.docs_read,
-        docs_written = S1#rep_stats.docs_written + S2#rep_stats.docs_written,
-        doc_write_failures =
-            S1#rep_stats.doc_write_failures + S2#rep_stats.doc_write_failures
-    }.
-
-mp_parse_doc({headers, H}, []) ->
-    case couch_util:get_value("content-type", H) of
-    {"application/json", _} ->
-        fun (Next) ->
-            mp_parse_doc(Next, [])
-        end
-    end;
-mp_parse_doc({body, Bytes}, AccBytes) ->
-    fun (Next) ->
-        mp_parse_doc(Next, [Bytes | AccBytes])
-    end;
-mp_parse_doc(body_end, AccBytes) ->
-    receive {get_doc_bytes, Ref, From} ->
-        From ! {doc_bytes, Ref, lists:reverse(AccBytes)}
-    end,
-    fun mp_parse_atts/1.
-
-mp_parse_atts(eof) ->
-    ok;
-mp_parse_atts({headers, _H}) ->
-    fun mp_parse_atts/1;
-mp_parse_atts({body, Bytes}) ->
-    receive {get_bytes, Ref, From} ->
-        From ! {bytes, Ref, Bytes}
-    end,
-    fun mp_parse_atts/1;
-mp_parse_atts(body_end) ->
-    fun mp_parse_atts/1.
-
-is_deleted(Change) ->
-    case couch_util:get_value(<<"deleted">>, Change) of
-    undefined ->
-        % keep backwards compatibility for a while
-        couch_util:get_value(deleted, Change, false);
-    Else ->
-        Else
-    end.


[13/49] Remove src/couch

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/spawnkillable/couchspawnkillable_win.c
----------------------------------------------------------------------
diff --git a/src/couch/priv/spawnkillable/couchspawnkillable_win.c b/src/couch/priv/spawnkillable/couchspawnkillable_win.c
deleted file mode 100644
index 0678231..0000000
--- a/src/couch/priv/spawnkillable/couchspawnkillable_win.c
+++ /dev/null
@@ -1,145 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License.  You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-// Do what 2 lines of shell script in couchspawnkillable does...
-// * Create a new suspended process with the same (duplicated) standard 
-//   handles as us.
-// * Write a line to stdout, consisting of the path to ourselves, plus
-//   '--kill {pid}' where {pid} is the PID of the newly created process.
-// * Un-suspend the new process.
-// * Wait for the process to terminate.
-// * Terminate with the child's exit-code.
-
-// Later, couch will call us with --kill and the PID, so we dutifully
-// terminate the specified PID.
-
-#include <stdlib.h>
-#include "windows.h"
-
-char *get_child_cmdline(int argc, char **argv)
-{
-    // make a new command-line, but skipping me.
-    // XXX - todo - spaces etc in args???
-    int i;
-    char *p, *cmdline;
-    int nchars = 0;
-    int nthis = 1;
-    for (i=1;i<argc;i++)
-        nchars += strlen(argv[i])+1;
-    cmdline = p = malloc(nchars+1);
-    if (!cmdline)
-        return NULL;
-    for (i=1;i<argc;i++) {
-        nthis = strlen(argv[i]);
-        strncpy(p, argv[i], nthis);
-        p[nthis] = ' ';
-        p += nthis+1;
-    }
-    // Replace the last space we added above with a '\0'
-    cmdline[nchars-1] = '\0';
-    return cmdline;
-}
-
-// create the child process, returning 0, or the exit-code we will
-// terminate with.
-int create_child(int argc, char **argv, PROCESS_INFORMATION *pi)
-{
-    char buf[1024];
-    DWORD dwcreate;
-    STARTUPINFO si;
-    char *cmdline;
-    if (argc < 2)
-        return 1;
-    cmdline = get_child_cmdline(argc, argv);
-    if (!cmdline)
-        return 2;
-
-    memset(&si, 0, sizeof(si));
-    si.cb = sizeof(si);
-    // depending on how *our* parent is started, we may or may not have
-    // a valid stderr stream - so although we try and duplicate it, only
-    // failing to duplicate stdin and stdout are considered fatal.
-    if (!DuplicateHandle(GetCurrentProcess(),
-                       GetStdHandle(STD_INPUT_HANDLE),
-                       GetCurrentProcess(),
-                       &si.hStdInput,
-                       0,
-                       TRUE, // inheritable
-                       DUPLICATE_SAME_ACCESS) ||
-       !DuplicateHandle(GetCurrentProcess(),
-                       GetStdHandle(STD_OUTPUT_HANDLE),
-                       GetCurrentProcess(),
-                       &si.hStdOutput,
-                       0,
-                       TRUE, // inheritable
-                       DUPLICATE_SAME_ACCESS)) {
-        return 3;
-    }
-    DuplicateHandle(GetCurrentProcess(),
-                   GetStdHandle(STD_ERROR_HANDLE),
-                   GetCurrentProcess(),
-                   &si.hStdError,
-                   0,
-                   TRUE, // inheritable
-                   DUPLICATE_SAME_ACCESS);
-
-    si.dwFlags = STARTF_USESTDHANDLES;
-    dwcreate = CREATE_SUSPENDED;
-    if (!CreateProcess( NULL, cmdline,
-                        NULL,
-                        NULL,
-                        TRUE, // inherit handles
-                        dwcreate,
-                        NULL, // environ
-                        NULL, // cwd
-                        &si,
-                        pi))
-        return 4;
-    return 0;
-}
-
-// and here we go...
-int main(int argc, char **argv)
-{
-    char out_buf[1024];
-    int rc;
-    DWORD cbwritten;
-    DWORD exitcode;
-    PROCESS_INFORMATION pi;
-    if (argc==3 && strcmp(argv[1], "--kill")==0) {
-        HANDLE h = OpenProcess(PROCESS_TERMINATE, 0, atoi(argv[2]));
-        if (!h)
-            return 1;
-        if (!TerminateProcess(h, 0))
-            return 2;
-        CloseHandle(h);
-        return 0;
-    }
-    // spawn the new suspended process
-    rc = create_child(argc, argv, &pi);
-    if (rc)
-        return rc;
-    // Write the 'terminate' command, which includes this PID, back to couch.
-    // *sob* - what about spaces etc?
-    sprintf_s(out_buf, sizeof(out_buf), "%s --kill %d\n", 
-              argv[0], pi.dwProcessId);
-    WriteFile(GetStdHandle(STD_OUTPUT_HANDLE), out_buf, strlen(out_buf), 
-              &cbwritten, NULL);
-    // Let the child process go...
-    ResumeThread(pi.hThread);
-    // Wait for the process to terminate so we can reflect the exit code
-    // back to couch.
-    WaitForSingleObject(pi.hProcess, INFINITE);
-    if (!GetExitCodeProcess(pi.hProcess, &exitcode))
-        return 6;
-    return exitcode;
-}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/stat_descriptions.cfg
----------------------------------------------------------------------
diff --git a/src/couch/priv/stat_descriptions.cfg b/src/couch/priv/stat_descriptions.cfg
deleted file mode 100644
index b80d768..0000000
--- a/src/couch/priv/stat_descriptions.cfg
+++ /dev/null
@@ -1,50 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%%   http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
-% Style guide for descriptions: Start with a lowercase letter & do not add
-% a trailing full-stop / period
-% Please keep this in alphabetical order
-
-{couchdb, database_writes, "number of times a database was changed"}.
-{couchdb, database_reads, "number of times a document was read from a database"}.
-{couchdb, open_databases, "number of open databases"}.
-{couchdb, open_os_files, "number of file descriptors CouchDB has open"}.
-{couchdb, request_time, "length of a request inside CouchDB without MochiWeb"}.
-{couchdb, auth_cache_hits, "number of authentication cache hits"}.
-{couchdb, auth_cache_misses, "number of authentication cache misses"}.
-
-{httpd, bulk_requests, "number of bulk requests"}.
-{httpd, requests, "number of HTTP requests"}.
-{httpd, temporary_view_reads, "number of temporary view reads"}.
-{httpd, view_reads, "number of view reads"}.
-{httpd, clients_requesting_changes, "number of clients for continuous _changes"}.
-
-{httpd_request_methods, 'COPY', "number of HTTP COPY requests"}.
-{httpd_request_methods, 'DELETE', "number of HTTP DELETE requests"}.
-{httpd_request_methods, 'GET', "number of HTTP GET requests"}.
-{httpd_request_methods, 'HEAD', "number of HTTP HEAD requests"}.
-{httpd_request_methods, 'POST', "number of HTTP POST requests"}.
-{httpd_request_methods, 'PUT', "number of HTTP PUT requests"}.
-
-{httpd_status_codes, '200', "number of HTTP 200 OK responses"}.
-{httpd_status_codes, '201', "number of HTTP 201 Created responses"}.
-{httpd_status_codes, '202', "number of HTTP 202 Accepted responses"}.
-{httpd_status_codes, '301', "number of HTTP 301 Moved Permanently responses"}.
-{httpd_status_codes, '304', "number of HTTP 304 Not Modified responses"}.
-{httpd_status_codes, '400', "number of HTTP 400 Bad Request responses"}.
-{httpd_status_codes, '401', "number of HTTP 401 Unauthorized responses"}.
-{httpd_status_codes, '403', "number of HTTP 403 Forbidden responses"}.
-{httpd_status_codes, '404', "number of HTTP 404 Not Found responses"}.
-{httpd_status_codes, '405', "number of HTTP 405 Method Not Allowed responses"}.
-{httpd_status_codes, '409', "number of HTTP 409 Conflict responses"}.
-{httpd_status_codes, '412', "number of HTTP 412 Precondition Failed responses"}.
-{httpd_status_codes, '500', "number of HTTP 500 Internal Server Error responses"}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/rebar.config.script
----------------------------------------------------------------------
diff --git a/src/couch/rebar.config.script b/src/couch/rebar.config.script
deleted file mode 100644
index 9053485..0000000
--- a/src/couch/rebar.config.script
+++ /dev/null
@@ -1,70 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%%   http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
-CouchJSName = case os:type() of
-    {win32, _} ->
-        "couchjs.exe";
-    _ ->
-        "couchjs"
-end,
-CouchJSPath = filename:join(["priv", CouchJSName]),
-Version = string:strip(os:cmd("git describe --always"), right, $\n),
-
-
-ConfigH = [
-    {"SM185", ""},
-    {"HAVE_JS_GET_STRING_CHARS_AND_LENGTH", "1"},
-    {"JSSCRIPT_TYPE", "JSObject*"},
-    {"COUCHJS_NAME", "\"" ++ CouchJSName++ "\""},
-    {"PACKAGE", "\"apache-couchdb\""},
-    {"PACKAGE_BUGREPORT", "\"https://issues.apache.org/jira/browse/COUCHDB\""},
-    {"PACKAGE_NAME", "\"Apache CouchDB\""},
-    {"PACKAGE_STRING", "\"Apache CouchDB " ++ Version ++ "\""},
-    {"PACKAGE_VERSION", "\"" ++ Version ++ "\""}
-],
-
-ConfigSrc = [["#define ", K, " ", V, $\n] || {K, V} <- ConfigH],
-ok = file:write_file("priv/couch_js/config.h", ConfigSrc),
-
-JS_LDFLAGS = "-lmozjs185 -DWITHOUTCURL",
-CouchJSSrc = ["priv/couch_js/{help,http,main,utf8,util}.c"],
-
-BaseSpecs = [
-        %% couchjs
-        {"darwin", CouchJSPath, CouchJSSrc, [{env, [{"CFLAGS", "-DXP_UNIX -I/usr/local/include/js"}, {"LDFLAGS", JS_LDFLAGS}]}]},
-        {"linux",  CouchJSPath, CouchJSSrc, [{env, [{"CFLAGS", "-DXP_UNIX -I/usr/include/js"}, {"LDFLAGS", JS_LDFLAGS ++ " -lm"}]}]},
-        {"unix",   CouchJSPath, CouchJSSrc, [{env, [{"CFLAGS", "-DXP_UNIX -I/usr/local/include/js}"}, {"LDFLAGS", JS_LDFLAGS ++ " -lm"}]}]},
-        {"win32",  CouchJSPath, CouchJSSrc, [{env, [{"CFLAGS", "-DXP_WIN -I/usr/include/js"}, {"LDFLAGS", JS_LDFLAGS}]}]},
-        % ICU
-        {"", "priv/couch_icu_driver.so", ["priv/icu_driver/*.c"], [{env, [
-            {"DRV_CFLAGS",  "$DRV_CFLAGS -DPIC -O2 -fno-common"},
-            {"DRV_LDFLAGS", "$DRV_LDFLAGS -lm -licuuc -licudata -licui18n -lpthread"}]}]},
-        % ejson_compare
-        {"priv/couch_ejson_compare.so", ["priv/couch_ejson_compare/*.c"]}
-],
-
-SpawnSpec = [
-    {"priv/couchspawnkillable", ["priv/spawnkillable/*.c"]}
-],
-
-PortSpecs = case os:type() of
-    {win32, _} ->
-        BaseSpecs ++ SpawnSpec;
-    _ ->
-        {ok, _} = file:copy("priv/spawnkillable/couchspawnkillable.sh",
-                            "priv/couchspawnkillable"),
-        os:cmd("chmod +x priv/couchspawnkillable"),
-        BaseSpecs
-end,
-
-
-[{port_specs, PortSpecs}].

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch.app.src
----------------------------------------------------------------------
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
deleted file mode 100644
index bb1e527..0000000
--- a/src/couch/src/couch.app.src
+++ /dev/null
@@ -1,22 +0,0 @@
-{application, couch, [
-    {description, "Apache CouchDB"},
-    {vsn, git},
-    {registered, [
-        couch_db_update,
-        couch_db_update_notifier_sup,
-        couch_external_manager,
-        couch_httpd,
-        couch_log,
-        couch_primary_services,
-        couch_proc_manager,
-        couch_secondary_services,
-        couch_server,
-        couch_sup,
-        couch_stats_aggregator,
-        couch_stats_collector,
-        couch_task_status
-    ]},
-    {mod, {couch_app, []}},
-    {applications, [kernel, stdlib, crypto, sasl, inets, oauth, ibrowse,
-        mochiweb, ssl, twig]}
-]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch.erl b/src/couch/src/couch.erl
deleted file mode 100644
index 15cffbc..0000000
--- a/src/couch/src/couch.erl
+++ /dev/null
@@ -1,65 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch).
-
--compile(export_all).
-
-
-deps() ->
-    [
-        sasl,
-        inets,
-        os_mon,
-        crypto,
-        public_key,
-        ssl,
-        oauth,
-        ibrowse,
-        mochiweb,
-        config,
-        twig
-    ].
-
-
-start() ->
-    catch erlang:system_flag(scheduler_bind_type, default_bind),
-    case start_apps(deps()) of
-        ok ->
-            ok = application:start(couch);
-        Else ->
-            throw(Else)
-    end.
-
-
-stop() ->
-    application:stop(couch).
-
-
-restart() ->
-    init:restart().
-
-
-start_apps([]) ->
-    ok;
-start_apps([App|Rest]) ->
-    case application:start(App) of
-    ok ->
-       start_apps(Rest);
-    {error, {already_started, App}} ->
-       start_apps(Rest);
-    {error, _Reason} when App =:= public_key ->
-       % ignore on R12B5
-       start_apps(Rest);
-    {error, _Reason} ->
-       {error, {app_would_not_start, App}}
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_app.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_app.erl b/src/couch/src/couch_app.erl
deleted file mode 100644
index d284c2b..0000000
--- a/src/couch/src/couch_app.erl
+++ /dev/null
@@ -1,31 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_app).
-
--behaviour(application).
-
--include_lib("couch/include/couch_db.hrl").
-
--export([start/2, stop/1]).
-
-start(_Type, _) ->
-    case couch_sup:start_link() of
-        {ok, _} = Resp ->
-            Resp;
-        Else ->
-            throw(Else)
-    end.
-
-stop(_) ->
-    ok.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_auth_cache.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_auth_cache.erl b/src/couch/src/couch_auth_cache.erl
deleted file mode 100644
index 1650495..0000000
--- a/src/couch/src/couch_auth_cache.erl
+++ /dev/null
@@ -1,437 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_auth_cache).
--behaviour(gen_server).
--behaviour(config_listener).
-
-% public API
--export([get_user_creds/1]).
-
-% gen_server API
--export([start_link/0, init/1, handle_call/3, handle_info/2, handle_cast/2]).
--export([code_change/3, terminate/2]).
-
--export([handle_config_change/5]).
-
--include_lib("couch/include/couch_db.hrl").
--include("couch_js_functions.hrl").
-
--define(STATE, auth_state_ets).
--define(BY_USER, auth_by_user_ets).
--define(BY_ATIME, auth_by_atime_ets).
-
--record(state, {
-    max_cache_size = 0,
-    cache_size = 0,
-    db_notifier = nil,
-    db_mon_ref = nil
-}).
-
-
--spec get_user_creds(UserName::string() | binary()) ->
-    Credentials::list() | nil.
-
-get_user_creds(UserName) when is_list(UserName) ->
-    get_user_creds(?l2b(UserName));
-
-get_user_creds(UserName) ->
-    UserCreds = case config:get("admins", ?b2l(UserName)) of
-    "-hashed-" ++ HashedPwdAndSalt ->
-        % the name is an admin, now check to see if there is a user doc
-        % which has a matching name, salt, and password_sha
-        [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
-        case get_from_cache(UserName) of
-        nil ->
-            make_admin_doc(HashedPwd, Salt, []);
-        UserProps when is_list(UserProps) ->
-            make_admin_doc(HashedPwd, Salt, couch_util:get_value(<<"roles">>, UserProps))
-        end;
-    "-pbkdf2-" ++ HashedPwdSaltAndIterations ->
-        [HashedPwd, Salt, Iterations] = string:tokens(HashedPwdSaltAndIterations, ","),
-        case get_from_cache(UserName) of
-        nil ->
-            make_admin_doc(HashedPwd, Salt, Iterations, []);
-        UserProps when is_list(UserProps) ->
-            make_admin_doc(HashedPwd, Salt, Iterations, couch_util:get_value(<<"roles">>, UserProps))
-    end;
-    _Else ->
-        get_from_cache(UserName)
-    end,
-    validate_user_creds(UserCreds).
-
-make_admin_doc(HashedPwd, Salt, ExtraRoles) ->
-    [{<<"roles">>, [<<"_admin">>|ExtraRoles]},
-     {<<"salt">>, ?l2b(Salt)},
-     {<<"password_scheme">>, <<"simple">>},
-     {<<"password_sha">>, ?l2b(HashedPwd)}].
-
-make_admin_doc(DerivedKey, Salt, Iterations, ExtraRoles) ->
-    [{<<"roles">>, [<<"_admin">>|ExtraRoles]},
-     {<<"salt">>, ?l2b(Salt)},
-     {<<"iterations">>, list_to_integer(Iterations)},
-     {<<"password_scheme">>, <<"pbkdf2">>},
-     {<<"derived_key">>, ?l2b(DerivedKey)}].
-
-get_from_cache(UserName) ->
-    exec_if_auth_db(
-        fun(_AuthDb) ->
-            maybe_refresh_cache(),
-            case ets:lookup(?BY_USER, UserName) of
-            [] ->
-                gen_server:call(?MODULE, {fetch, UserName}, infinity);
-            [{UserName, {Credentials, _ATime}}] ->
-                couch_stats_collector:increment({couchdb, auth_cache_hits}),
-                gen_server:cast(?MODULE, {cache_hit, UserName}),
-                Credentials
-            end
-        end,
-        nil
-    ).
-
-
-validate_user_creds(nil) ->
-    nil;
-validate_user_creds(UserCreds) ->
-    case couch_util:get_value(<<"_conflicts">>, UserCreds) of
-    undefined ->
-        ok;
-    _ConflictList ->
-        throw({unauthorized,
-            <<"User document conflicts must be resolved before the document",
-              " is used for authentication purposes.">>
-        })
-    end,
-    UserCreds.
-
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-init(_) ->
-    ?STATE = ets:new(?STATE, [set, protected, named_table]),
-    ?BY_USER = ets:new(?BY_USER, [set, protected, named_table]),
-    ?BY_ATIME = ets:new(?BY_ATIME, [ordered_set, private, named_table]),
-    process_flag(trap_exit, true),
-    ok = config:listen_for_changes(?MODULE, nil),
-    {ok, Notifier} = couch_db_update_notifier:start_link(fun handle_db_event/1),
-    State = #state{
-        db_notifier = Notifier,
-        max_cache_size = list_to_integer(
-            config:get("couch_httpd_auth", "auth_cache_size", "50")
-        )
-    },
-    {ok, reinit_cache(State)}.
-
-
-handle_db_event({Event, DbName}) ->
-    [{auth_db_name, AuthDbName}] = ets:lookup(?STATE, auth_db_name),
-    case DbName =:= AuthDbName of
-    true ->
-        case Event of
-        created -> gen_server:call(?MODULE, reinit_cache, infinity);
-        compacted -> gen_server:call(?MODULE, auth_db_compacted, infinity);
-        _Else   -> ok
-        end;
-    false ->
-        ok
-    end.
-
-
-handle_call(reinit_cache, _From, State) ->
-    exec_if_auth_db(fun(AuthDb) -> catch couch_db:close(AuthDb) end),
-    {reply, ok, reinit_cache(State)};
-
-handle_call(auth_db_compacted, _From, State) ->
-    exec_if_auth_db(
-        fun(AuthDb) ->
-            true = ets:insert(?STATE, {auth_db, reopen_auth_db(AuthDb)})
-        end
-    ),
-    {reply, ok, State};
-
-handle_call({new_max_cache_size, NewSize},
-        _From, #state{cache_size = Size} = State) when NewSize >= Size ->
-    {reply, ok, State#state{max_cache_size = NewSize}};
-
-handle_call({new_max_cache_size, NewSize}, _From, State) ->
-    free_mru_cache_entries(State#state.cache_size - NewSize),
-    {reply, ok, State#state{max_cache_size = NewSize, cache_size = NewSize}};
-
-handle_call({fetch, UserName}, _From, State) ->
-    {Credentials, NewState} = case ets:lookup(?BY_USER, UserName) of
-    [{UserName, {Creds, ATime}}] ->
-        couch_stats_collector:increment({couchdb, auth_cache_hits}),
-        cache_hit(UserName, Creds, ATime),
-        {Creds, State};
-    [] ->
-        couch_stats_collector:increment({couchdb, auth_cache_misses}),
-        Creds = get_user_props_from_db(UserName),
-        State1 = add_cache_entry(UserName, Creds, erlang:now(), State),
-        {Creds, State1}
-    end,
-    {reply, Credentials, NewState};
-
-handle_call(refresh, _From, State) ->
-    exec_if_auth_db(fun refresh_entries/1),
-    {reply, ok, State}.
-
-
-handle_cast({cache_hit, UserName}, State) ->
-    case ets:lookup(?BY_USER, UserName) of
-    [{UserName, {Credentials, ATime}}] ->
-        cache_hit(UserName, Credentials, ATime);
-    _ ->
-        ok
-    end,
-    {noreply, State}.
-
-
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {noreply, State};
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-    {noreply, State};
-handle_info({'DOWN', Ref, _, _, _Reason}, #state{db_mon_ref = Ref} = State) ->
-    {noreply, reinit_cache(State)}.
-
-
-terminate(_Reason, #state{db_notifier = Notifier}) ->
-    couch_db_update_notifier:stop(Notifier),
-    exec_if_auth_db(fun(AuthDb) -> catch couch_db:close(AuthDb) end),
-    true = ets:delete(?BY_USER),
-    true = ets:delete(?BY_ATIME),
-    true = ets:delete(?STATE).
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-handle_config_change("couch_httpd_auth", "auth_cache_size", SizeList, _, _) ->
-    Size = list_to_integer(SizeList),
-    {ok, gen_server:call(?MODULE, {new_max_cache_size, Size}, infinity)};
-handle_config_change("couch_httpd_auth", "authentication_db", _DbName, _, _) ->
-    {ok, gen_server:call(?MODULE, reinit_cache, infinity)};
-handle_config_change(_, _, _, _, _) ->
-    {ok, nil}.
-
-
-clear_cache(State) ->
-    exec_if_auth_db(fun(AuthDb) -> catch couch_db:close(AuthDb) end),
-    true = ets:delete_all_objects(?BY_USER),
-    true = ets:delete_all_objects(?BY_ATIME),
-    State#state{cache_size = 0}.
-
-
-reinit_cache(State) ->
-    NewState = clear_cache(State),
-    AuthDbName = ?l2b(config:get("couch_httpd_auth", "authentication_db")),
-    true = ets:insert(?STATE, {auth_db_name, AuthDbName}),
-    AuthDb = open_auth_db(),
-    true = ets:insert(?STATE, {auth_db, AuthDb}),
-    NewState#state{db_mon_ref = erlang:monitor(process, AuthDb#db.main_pid)}.
-
-
-add_cache_entry(_, _, _, #state{max_cache_size = 0} = State) ->
-    State;
-add_cache_entry(UserName, Credentials, ATime, State) ->
-    case State#state.cache_size >= State#state.max_cache_size of
-    true ->
-        free_mru_cache_entry();
-    false ->
-        ok
-    end,
-    true = ets:insert(?BY_ATIME, {ATime, UserName}),
-    true = ets:insert(?BY_USER, {UserName, {Credentials, ATime}}),
-    State#state{cache_size = couch_util:get_value(size, ets:info(?BY_USER))}.
-
-free_mru_cache_entries(0) ->
-    ok;
-free_mru_cache_entries(N) when N > 0 ->
-    free_mru_cache_entry(),
-    free_mru_cache_entries(N - 1).
-
-free_mru_cache_entry() ->
-    MruTime = ets:last(?BY_ATIME),
-    [{MruTime, UserName}] = ets:lookup(?BY_ATIME, MruTime),
-    true = ets:delete(?BY_ATIME, MruTime),
-    true = ets:delete(?BY_USER, UserName).
-
-
-cache_hit(UserName, Credentials, ATime) ->
-    NewATime = erlang:now(),
-    true = ets:delete(?BY_ATIME, ATime),
-    true = ets:insert(?BY_ATIME, {NewATime, UserName}),
-    true = ets:insert(?BY_USER, {UserName, {Credentials, NewATime}}).
-
-
-refresh_entries(AuthDb) ->
-    case reopen_auth_db(AuthDb) of
-    nil ->
-        ok;
-    AuthDb2 ->
-        case AuthDb2#db.update_seq > AuthDb#db.update_seq of
-        true ->
-            {ok, _, _} = couch_db:enum_docs_since(
-                AuthDb2,
-                AuthDb#db.update_seq,
-                fun(DocInfo, _, _) -> refresh_entry(AuthDb2, DocInfo) end,
-                AuthDb#db.update_seq,
-                []
-            ),
-            true = ets:insert(?STATE, {auth_db, AuthDb2});
-        false ->
-            ok
-        end
-    end.
-
-
-refresh_entry(Db, #full_doc_info{} = FDI) ->
-    refresh_entry(Db, couch_doc:to_doc_info(FDI));
-refresh_entry(Db, #doc_info{high_seq = DocSeq} = DocInfo) ->
-    case is_user_doc(DocInfo) of
-    {true, UserName} ->
-        case ets:lookup(?BY_USER, UserName) of
-        [] ->
-            ok;
-        [{UserName, {_OldCreds, ATime}}] ->
-            {ok, Doc} = couch_db:open_doc(Db, DocInfo, [conflicts, deleted]),
-            NewCreds = user_creds(Doc),
-            true = ets:insert(?BY_USER, {UserName, {NewCreds, ATime}})
-        end;
-    false ->
-        ok
-    end,
-    {ok, DocSeq}.
-
-
-user_creds(#doc{deleted = true}) ->
-    nil;
-user_creds(#doc{} = Doc) ->
-    {Creds} = couch_doc:to_json_obj(Doc, []),
-    Creds.
-
-
-is_user_doc(#doc_info{id = <<"org.couchdb.user:", UserName/binary>>}) ->
-    {true, UserName};
-is_user_doc(_) ->
-    false.
-
-
-maybe_refresh_cache() ->
-    case cache_needs_refresh() of
-    true ->
-        ok = gen_server:call(?MODULE, refresh, infinity);
-    false ->
-        ok
-    end.
-
-
-cache_needs_refresh() ->
-    exec_if_auth_db(
-        fun(AuthDb) ->
-            case reopen_auth_db(AuthDb) of
-            nil ->
-                false;
-            AuthDb2 ->
-                AuthDb2#db.update_seq > AuthDb#db.update_seq
-            end
-        end,
-        false
-    ).
-
-
-reopen_auth_db(AuthDb) ->
-    case (catch couch_db:reopen(AuthDb)) of
-    {ok, AuthDb2} ->
-        AuthDb2;
-    _ ->
-        nil
-    end.
-
-
-exec_if_auth_db(Fun) ->
-    exec_if_auth_db(Fun, ok).
-
-exec_if_auth_db(Fun, DefRes) ->
-    case ets:lookup(?STATE, auth_db) of
-    [{auth_db, #db{} = AuthDb}] ->
-        Fun(AuthDb);
-    _ ->
-        DefRes
-    end.
-
-
-open_auth_db() ->
-    [{auth_db_name, DbName}] = ets:lookup(?STATE, auth_db_name),
-    {ok, AuthDb} = ensure_users_db_exists(DbName, [sys_db]),
-    AuthDb.
-
-
-get_user_props_from_db(UserName) ->
-    exec_if_auth_db(
-        fun(AuthDb) ->
-            Db = reopen_auth_db(AuthDb),
-            DocId = <<"org.couchdb.user:", UserName/binary>>,
-            try
-                {ok, Doc} = couch_db:open_doc(Db, DocId, [conflicts]),
-                {DocProps} = couch_doc:to_json_obj(Doc, []),
-                DocProps
-            catch
-            _:_Error ->
-                nil
-            end
-        end,
-        nil
-    ).
-
-ensure_users_db_exists(DbName, Options) ->
-    Options1 = [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}, nologifmissing | Options],
-    case couch_db:open(DbName, Options1) of
-    {ok, Db} ->
-        ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
-        {ok, Db};
-    _Error ->
-        {ok, Db} = couch_db:create(DbName, Options1),
-        ok = ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
-        {ok, Db}
-    end.
-
-ensure_auth_ddoc_exists(Db, DDocId) ->
-    case couch_db:open_doc(Db, DDocId) of
-    {not_found, _Reason} ->
-        {ok, AuthDesign} = auth_design_doc(DDocId),
-        {ok, _Rev} = couch_db:update_doc(Db, AuthDesign, []);
-    {ok, Doc} ->
-        {Props} = couch_doc:to_json_obj(Doc, []),
-        case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
-            ?AUTH_DB_DOC_VALIDATE_FUNCTION ->
-                ok;
-            _ ->
-                Props1 = lists:keyreplace(<<"validate_doc_update">>, 1, Props,
-                    {<<"validate_doc_update">>,
-                    ?AUTH_DB_DOC_VALIDATE_FUNCTION}),
-                couch_db:update_doc(Db, couch_doc:from_json_obj({Props1}), [])
-        end
-    end,
-    ok.
-
-auth_design_doc(DocId) ->
-    DocProps = [
-        {<<"_id">>, DocId},
-        {<<"language">>,<<"javascript">>},
-        {<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION}
-    ],
-    {ok, couch_doc:from_json_obj({DocProps})}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_btree.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_btree.erl b/src/couch/src/couch_btree.erl
deleted file mode 100644
index 9caceb8..0000000
--- a/src/couch/src/couch_btree.erl
+++ /dev/null
@@ -1,731 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_btree).
-
--export([open/2, open/3, query_modify/4, add/2, add_remove/3]).
--export([fold/4, full_reduce/1, final_reduce/2, size/1, foldl/3, foldl/4]).
--export([fold_reduce/4, lookup/2, get_state/1, set_options/2]).
--export([less/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
-extract(#btree{extract_kv=undefined}, Value) ->
-    Value;
-extract(#btree{extract_kv=Extract}, Value) ->
-    Extract(Value).
-
-assemble(#btree{assemble_kv=undefined}, Key, Value) ->
-    {Key, Value};
-assemble(#btree{assemble_kv=Assemble}, Key, Value) ->
-    Assemble(Key, Value).
-
-less(#btree{less=undefined}, A, B) ->
-    A < B;
-less(#btree{less=Less}, A, B) ->
-    Less(A, B).
-
-% pass in 'nil' for State if a new Btree.
-open(State, Fd) ->
-    {ok, #btree{root=State, fd=Fd}}.
-
-set_options(Bt, []) ->
-    Bt;
-set_options(Bt, [{split, Extract}|Rest]) ->
-    set_options(Bt#btree{extract_kv=Extract}, Rest);
-set_options(Bt, [{join, Assemble}|Rest]) ->
-    set_options(Bt#btree{assemble_kv=Assemble}, Rest);
-set_options(Bt, [{less, Less}|Rest]) ->
-    set_options(Bt#btree{less=Less}, Rest);
-set_options(Bt, [{reduce, Reduce}|Rest]) ->
-    set_options(Bt#btree{reduce=Reduce}, Rest);
-set_options(Bt, [{compression, Comp}|Rest]) ->
-    set_options(Bt#btree{compression=Comp}, Rest).
-
-open(State, Fd, Options) ->
-    {ok, set_options(#btree{root=State, fd=Fd}, Options)}.
-
-get_state(#btree{root=Root}) ->
-    Root.
-
-final_reduce(#btree{reduce=Reduce}, Val) ->
-    final_reduce(Reduce, Val);
-final_reduce(Reduce, {[], []}) ->
-    Reduce(reduce, []);
-final_reduce(_Bt, {[], [Red]}) ->
-    Red;
-final_reduce(Reduce, {[], Reductions}) ->
-    Reduce(rereduce, Reductions);
-final_reduce(Reduce, {KVs, Reductions}) ->
-    Red = Reduce(reduce, KVs),
-    final_reduce(Reduce, {[], [Red | Reductions]}).
-
-fold_reduce(#btree{root=Root}=Bt, Fun, Acc, Options) ->
-    Dir = couch_util:get_value(dir, Options, fwd),
-    StartKey = couch_util:get_value(start_key, Options),
-    InEndRangeFun = make_key_in_end_range_function(Bt, Dir, Options),
-    KeyGroupFun = couch_util:get_value(key_group_fun, Options, fun(_,_) -> true end),
-    try
-        {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, Root, StartKey, InEndRangeFun, undefined, [], [],
-            KeyGroupFun, Fun, Acc),
-        if GroupedKey2 == undefined ->
-            {ok, Acc2};
-        true ->
-            case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of
-            {ok, Acc3} -> {ok, Acc3};
-            {stop, Acc3} -> {ok, Acc3}
-            end
-        end
-    catch
-        throw:{stop, AccDone} -> {ok, AccDone}
-    end.
-
-full_reduce(#btree{root=nil,reduce=Reduce}) ->
-    {ok, Reduce(reduce, [])};
-full_reduce(#btree{root=Root}) ->
-    {ok, element(2, Root)}.
-
-size(#btree{root = nil}) ->
-    0;
-size(#btree{root = {_P, _Red}}) ->
-    % pre 1.2 format
-    nil;
-size(#btree{root = {_P, _Red, Size}}) ->
-    Size.
-
-% wraps a 2 arity function with the proper 3 arity function
-convert_fun_arity(Fun) when is_function(Fun, 2) ->
-    fun
-        (visit, KV, _Reds, AccIn) -> Fun(KV, AccIn);
-        (traverse, _K, _Red, AccIn) -> {ok, AccIn}
-    end;
-convert_fun_arity(Fun) when is_function(Fun, 3) ->
-    fun
-        (visit, KV, Reds, AccIn) -> Fun(KV, Reds, AccIn);
-        (traverse, _K, _Red, AccIn) -> {ok, AccIn}
-    end;
-convert_fun_arity(Fun) when is_function(Fun, 4) ->
-    Fun.    % Already arity 4
-
-make_key_in_end_range_function(Bt, fwd, Options) ->
-    case couch_util:get_value(end_key_gt, Options) of
-    undefined ->
-        case couch_util:get_value(end_key, Options) of
-        undefined ->
-            fun(_Key) -> true end;
-        LastKey ->
-            fun(Key) -> not less(Bt, LastKey, Key) end
-        end;
-    EndKey ->
-        fun(Key) -> less(Bt, Key, EndKey) end
-    end;
-make_key_in_end_range_function(Bt, rev, Options) ->
-    case couch_util:get_value(end_key_gt, Options) of
-    undefined ->
-        case couch_util:get_value(end_key, Options) of
-        undefined ->
-            fun(_Key) -> true end;
-        LastKey ->
-            fun(Key) -> not less(Bt, Key, LastKey) end
-        end;
-    EndKey ->
-        fun(Key) -> less(Bt, EndKey, Key) end
-    end.
-
-
-foldl(Bt, Fun, Acc) ->
-    fold(Bt, Fun, Acc, []).
-
-foldl(Bt, Fun, Acc, Options) ->
-    fold(Bt, Fun, Acc, Options).
-
-
-fold(#btree{root=nil}, _Fun, Acc, _Options) ->
-    {ok, {[], []}, Acc};
-fold(#btree{root=Root}=Bt, Fun, Acc, Options) ->
-    Dir = couch_util:get_value(dir, Options, fwd),
-    InRange = make_key_in_end_range_function(Bt, Dir, Options),
-    Result =
-    case couch_util:get_value(start_key, Options) of
-    undefined ->
-        stream_node(Bt, [], Bt#btree.root, InRange, Dir,
-                convert_fun_arity(Fun), Acc);
-    StartKey ->
-        stream_node(Bt, [], Bt#btree.root, StartKey, InRange, Dir,
-                convert_fun_arity(Fun), Acc)
-    end,
-    case Result of
-    {ok, Acc2}->
-        FullReduction = element(2, Root),
-        {ok, {[], [FullReduction]}, Acc2};
-    {stop, LastReduction, Acc2} ->
-        {ok, LastReduction, Acc2}
-    end.
-
-add(Bt, InsertKeyValues) ->
-    add_remove(Bt, InsertKeyValues, []).
-
-add_remove(Bt, InsertKeyValues, RemoveKeys) ->
-    {ok, [], Bt2} = query_modify(Bt, [], InsertKeyValues, RemoveKeys),
-    {ok, Bt2}.
-
-query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) ->
-    #btree{root=Root} = Bt,
-    InsertActions = lists:map(
-        fun(KeyValue) ->
-            {Key, Value} = extract(Bt, KeyValue),
-            {insert, Key, Value}
-        end, InsertValues),
-    RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys],
-    FetchActions = [{fetch, Key, nil} || Key <- LookupKeys],
-    SortFun =
-        fun({OpA, A, _}, {OpB, B, _}) ->
-            case A == B of
-            % A and B are equal, sort by op.
-            true -> op_order(OpA) < op_order(OpB);
-            false ->
-                less(Bt, A, B)
-            end
-        end,
-    Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])),
-    {ok, KeyPointers, QueryResults} = modify_node(Bt, Root, Actions, []),
-    {ok, NewRoot} = complete_root(Bt, KeyPointers),
-    {ok, QueryResults, Bt#btree{root=NewRoot}}.
-
-% for ordering different operations with the same key.
-% fetch < remove < insert
-op_order(fetch) -> 1;
-op_order(remove) -> 2;
-op_order(insert) -> 3.
-
-lookup(#btree{root=Root, less=Less}=Bt, Keys) ->
-    SortedKeys = case Less of
-        undefined -> lists:sort(Keys);
-        _ -> lists:sort(Less, Keys)
-    end,
-    {ok, SortedResults} = lookup(Bt, Root, SortedKeys),
-    % We want to return the results in the same order as the keys were input
-    % but we may have changed the order when we sorted. So we need to put the
-    % order back into the results.
-    couch_util:reorder_results(Keys, SortedResults).
-
-lookup(_Bt, nil, Keys) ->
-    {ok, [{Key, not_found} || Key <- Keys]};
-lookup(Bt, Node, Keys) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []);
-    kv_node ->
-        lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, [])
-    end.
-
-lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
-    {ok, lists:reverse(Output)};
-lookup_kpnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
-    {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
-lookup_kpnode(Bt, NodeTuple, LowerBound, [FirstLookupKey | _] = LookupKeys, Output) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), FirstLookupKey),
-    {Key, PointerInfo} = element(N, NodeTuple),
-    SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end,
-    case lists:splitwith(SplitFun, LookupKeys) of
-    {[], GreaterQueries} ->
-        lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output);
-    {LessEqQueries, GreaterQueries} ->
-        {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries),
-        lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output))
-    end.
-
-
-lookup_kvnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
-    {ok, lists:reverse(Output)};
-lookup_kvnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
-    % keys not found
-    {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
-lookup_kvnode(Bt, NodeTuple, LowerBound, [LookupKey | RestLookupKeys], Output) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), LookupKey),
-    {Key, Value} = element(N, NodeTuple),
-    case less(Bt, LookupKey, Key) of
-    true ->
-        % LookupKey is less than Key
-        lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]);
-    false ->
-        case less(Bt, Key, LookupKey) of
-        true ->
-            % LookupKey is greater than Key
-            lookup_kvnode(Bt, NodeTuple, N+1, RestLookupKeys, [{LookupKey, not_found} | Output]);
-        false ->
-            % LookupKey is equal to Key
-            lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output])
-        end
-    end.
-
-
-complete_root(_Bt, []) ->
-    {ok, nil};
-complete_root(_Bt, [{_Key, PointerInfo}])->
-    {ok, PointerInfo};
-complete_root(Bt, KPs) ->
-    {ok, ResultKeyPointers} = write_node(Bt, kp_node, KPs),
-    complete_root(Bt, ResultKeyPointers).
-
-%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%%
-% It is inaccurate as it does not account for compression when blocks are
-% written. Plus with the "case byte_size(term_to_binary(InList)) of" code
-% it's probably really inefficient.
-
-chunkify(InList) ->
-    BaseChunkSize = get_chunk_size(),
-    case ?term_size(InList) of
-    Size when Size > BaseChunkSize ->
-        NumberOfChunksLikely = ((Size div BaseChunkSize) + 1),
-        ChunkThreshold = Size div NumberOfChunksLikely,
-        chunkify(InList, ChunkThreshold, [], 0, []);
-    _Else ->
-        [InList]
-    end.
-
-chunkify([], _ChunkThreshold, [], 0, OutputChunks) ->
-    lists:reverse(OutputChunks);
-chunkify([], _ChunkThreshold, OutList, _OutListSize, OutputChunks) ->
-    lists:reverse([lists:reverse(OutList) | OutputChunks]);
-chunkify([InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) ->
-    case ?term_size(InElement) of
-    Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] ->
-        chunkify(RestInList, ChunkThreshold, [], 0, [lists:reverse([InElement | OutList]) | OutputChunks]);
-    Size ->
-        chunkify(RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks)
-    end.
-
--compile({inline,[get_chunk_size/0]}).
-get_chunk_size() ->
-    try
-        list_to_integer(config:get("couchdb", "btree_chunk_size", "1279"))
-    catch error:badarg ->
-        1279
-    end.
-
-modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
-    case RootPointerInfo of
-    nil ->
-        NodeType = kv_node,
-        NodeList = [];
-    _Tuple ->
-        Pointer = element(1, RootPointerInfo),
-        {NodeType, NodeList} = get_node(Bt, Pointer)
-    end,
-    NodeTuple = list_to_tuple(NodeList),
-
-    {ok, NewNodeList, QueryOutput2} =
-    case NodeType of
-    kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput);
-    kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput)
-    end,
-    case NewNodeList of
-    [] ->  % no nodes remain
-        {ok, [], QueryOutput2};
-    NodeList ->  % nothing changed
-        {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
-        {ok, [{LastKey, RootPointerInfo}], QueryOutput2};
-    _Else2 ->
-        {ok, ResultList} = write_node(Bt, NodeType, NewNodeList),
-        {ok, ResultList, QueryOutput2}
-    end.
-
-reduce_node(#btree{reduce=nil}, _NodeType, _NodeList) ->
-    [];
-reduce_node(#btree{reduce=R}, kp_node, NodeList) ->
-    R(rereduce, [element(2, Node) || {_K, Node} <- NodeList]);
-reduce_node(#btree{reduce=R}=Bt, kv_node, NodeList) ->
-    R(reduce, [assemble(Bt, K, V) || {K, V} <- NodeList]).
-
-reduce_tree_size(kv_node, NodeSize, _KvList) ->
-    NodeSize;
-reduce_tree_size(kp_node, NodeSize, []) ->
-    NodeSize;
-reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red}} | _]) ->
-    % pre 1.2 format
-    nil;
-reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red, nil}} | _]) ->
-    nil;
-reduce_tree_size(kp_node, NodeSize, [{_K, {_P, _Red, Sz}} | NodeList]) ->
-    reduce_tree_size(kp_node, NodeSize + Sz, NodeList).
-
-get_node(#btree{fd = Fd}, NodePos) ->
-    {ok, {NodeType, NodeList}} = couch_file:pread_term(Fd, NodePos),
-    {NodeType, NodeList}.
-
-write_node(#btree{fd = Fd, compression = Comp} = Bt, NodeType, NodeList) ->
-    % split up nodes into smaller sizes
-    NodeListList = chunkify(NodeList),
-    % now write out each chunk and return the KeyPointer pairs for those nodes
-    ResultList = [
-        begin
-            {ok, Pointer, Size} = couch_file:append_term(
-                Fd, {NodeType, ANodeList}, [{compression, Comp}]),
-            {LastKey, _} = lists:last(ANodeList),
-            SubTreeSize = reduce_tree_size(NodeType, Size, ANodeList),
-            {LastKey, {Pointer, reduce_node(Bt, NodeType, ANodeList), SubTreeSize}}
-        end
-    ||
-        ANodeList <- NodeListList
-    ],
-    {ok, ResultList}.
-
-modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) ->
-    modify_node(Bt, nil, Actions, QueryOutput);
-modify_kpnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
-    {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
-            tuple_size(NodeTuple), [])), QueryOutput};
-modify_kpnode(Bt, NodeTuple, LowerBound,
-        [{_, FirstActionKey, _}|_]=Actions, ResultNode, QueryOutput) ->
-    Sz = tuple_size(NodeTuple),
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, Sz, FirstActionKey),
-    case N =:= Sz of
-    true  ->
-        % perform remaining actions on last node
-        {_, PointerInfo} = element(Sz, NodeTuple),
-        {ok, ChildKPs, QueryOutput2} =
-            modify_node(Bt, PointerInfo, Actions, QueryOutput),
-        NodeList = lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
-            Sz - 1, ChildKPs)),
-        {ok, NodeList, QueryOutput2};
-    false ->
-        {NodeKey, PointerInfo} = element(N, NodeTuple),
-        SplitFun = fun({_ActionType, ActionKey, _ActionValue}) ->
-                not less(Bt, NodeKey, ActionKey)
-            end,
-        {LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions),
-        {ok, ChildKPs, QueryOutput2} =
-                modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput),
-        ResultNode2 = lists:reverse(ChildKPs, bounded_tuple_to_revlist(NodeTuple,
-                LowerBound, N - 1, ResultNode)),
-        modify_kpnode(Bt, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2)
-    end.
-
-bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End ->
-    Tail;
-bounded_tuple_to_revlist(Tuple, Start, End, Tail) ->
-    bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]).
-
-bounded_tuple_to_list(Tuple, Start, End, Tail) ->
-    bounded_tuple_to_list2(Tuple, Start, End, [], Tail).
-
-bounded_tuple_to_list2(_Tuple, Start, End, Acc, Tail) when Start > End ->
-    lists:reverse(Acc, Tail);
-bounded_tuple_to_list2(Tuple, Start, End, Acc, Tail) ->
-    bounded_tuple_to_list2(Tuple, Start + 1, End, [element(Start, Tuple) | Acc], Tail).
-
-find_first_gteq(_Bt, _Tuple, Start, End, _Key) when Start == End ->
-    End;
-find_first_gteq(Bt, Tuple, Start, End, Key) ->
-    Mid = Start + ((End - Start) div 2),
-    {TupleKey, _} = element(Mid, Tuple),
-    case less(Bt, TupleKey, Key) of
-    true ->
-        find_first_gteq(Bt, Tuple, Mid+1, End, Key);
-    false ->
-        find_first_gteq(Bt, Tuple, Start, Mid, Key)
-    end.
-
-modify_kvnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
-    {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), [])), QueryOutput};
-modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) when LowerBound > tuple_size(NodeTuple) ->
-    case ActionType of
-    insert ->
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-    remove ->
-        % just drop the action
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput);
-    fetch ->
-        % the key/value must not exist in the tree
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
-    end;
-modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], AccNode, QueryOutput) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), ActionKey),
-    {Key, Value} = element(N, NodeTuple),
-    ResultNode =  bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode),
-    case less(Bt, ActionKey, Key) of
-    true ->
-        case ActionType of
-        insert ->
-            % ActionKey is less than the Key, so insert
-            modify_kvnode(Bt, NodeTuple, N, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-        remove ->
-            % ActionKey is less than the Key, just drop the action
-            modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput);
-        fetch ->
-            % ActionKey is less than the Key, the key/value must not exist in the tree
-            modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
-        end;
-    false ->
-        % ActionKey and Key are maybe equal.
-        case less(Bt, Key, ActionKey) of
-        false ->
-            case ActionType of
-            insert ->
-                modify_kvnode(Bt, NodeTuple, N+1, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-            remove ->
-                modify_kvnode(Bt, NodeTuple, N+1, RestActions, ResultNode, QueryOutput);
-            fetch ->
-                % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node
-                % since an identical action key can follow it.
-                modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{ok, assemble(Bt, Key, Value)} | QueryOutput])
-            end;
-        true ->
-            modify_kvnode(Bt, NodeTuple, N + 1, [{ActionType, ActionKey, ActionValue} | RestActions], [{Key, Value} | ResultNode], QueryOutput)
-        end
-    end.
-
-
-reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _InEndRangeFun, GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) ->
-    {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_node(Bt, Dir, Node, KeyStart, InEndRangeFun, GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    P = element(1, Node),
-    case get_node(Bt, P) of
-    {kp_node, NodeList} ->
-        NodeList2 = adjust_dir(Dir, NodeList),
-        reduce_stream_kp_node(Bt, Dir, NodeList2, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc);
-    {kv_node, KVs} ->
-        KVs2 = adjust_dir(Dir, KVs),
-        reduce_stream_kv_node(Bt, Dir, KVs2, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc)
-    end.
-
-reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, InEndRangeFun,
-                        GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc) ->
-
-    GTEKeyStartKVs =
-    case KeyStart of
-    undefined ->
-        KVs;
-    _ ->
-        DropFun = case Dir of
-        fwd ->
-            fun({Key, _}) -> less(Bt, Key, KeyStart) end;
-        rev ->
-            fun({Key, _}) -> less(Bt, KeyStart, Key) end
-        end,
-        lists:dropwhile(DropFun, KVs)
-    end,
-    KVs2 = lists:takewhile(
-        fun({Key, _}) -> InEndRangeFun(Key) end, GTEKeyStartKVs),
-    reduce_stream_kv_node2(Bt, KVs2, GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc).
-
-
-reduce_stream_kv_node2(_Bt, [], GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-        _KeyGroupFun, _Fun, Acc) ->
-    {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_kv_node2(Bt, [{Key, Value}| RestKVs], GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    case GroupedKey of
-    undefined ->
-        reduce_stream_kv_node2(Bt, RestKVs, Key,
-                [assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc);
-    _ ->
-
-        case KeyGroupFun(GroupedKey, Key) of
-        true ->
-            reduce_stream_kv_node2(Bt, RestKVs, GroupedKey,
-                [assemble(Bt,Key,Value)|GroupedKVsAcc], GroupedRedsAcc, KeyGroupFun,
-                Fun, Acc);
-        false ->
-            case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of
-            {ok, Acc2} ->
-                reduce_stream_kv_node2(Bt, RestKVs, Key, [assemble(Bt,Key,Value)],
-                    [], KeyGroupFun, Fun, Acc2);
-            {stop, Acc2} ->
-                throw({stop, Acc2})
-            end
-        end
-    end.
-
-reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
-                        GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc) ->
-    Nodes =
-    case KeyStart of
-    undefined ->
-        NodeList;
-    _ ->
-        case Dir of
-        fwd ->
-            lists:dropwhile(fun({Key, _}) -> less(Bt, Key, KeyStart) end, NodeList);
-        rev ->
-            RevKPs = lists:reverse(NodeList),
-            case lists:splitwith(fun({Key, _}) -> less(Bt, Key, KeyStart) end, RevKPs) of
-            {_Before, []} ->
-                NodeList;
-            {Before, [FirstAfter | _]} ->
-                [FirstAfter | lists:reverse(Before)]
-            end
-        end
-    end,
-    {InRange, MaybeInRange} = lists:splitwith(
-        fun({Key, _}) -> InEndRangeFun(Key) end, Nodes),
-    NodesInRange = case MaybeInRange of
-    [FirstMaybeInRange | _] when Dir =:= fwd ->
-        InRange ++ [FirstMaybeInRange];
-    _ ->
-        InRange
-    end,
-    reduce_stream_kp_node2(Bt, Dir, NodesInRange, KeyStart, InEndRangeFun,
-        GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc).
-
-
-reduce_stream_kp_node2(Bt, Dir, [{_Key, NodeInfo} | RestNodeList], KeyStart, InEndRangeFun,
-                        undefined, [], [], KeyGroupFun, Fun, Acc) ->
-    {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, undefined,
-                [], [], KeyGroupFun, Fun, Acc),
-    reduce_stream_kp_node2(Bt, Dir, RestNodeList, KeyStart, InEndRangeFun, GroupedKey2,
-            GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
-reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
-        GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    {Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) ->
-        KeyGroupFun(GroupedKey, Key) end, NodeList),
-    {GroupedNodes, UngroupedNodes} =
-    case Grouped0 of
-    [] ->
-        {Grouped0, Ungrouped0};
-    _ ->
-        [FirstGrouped | RestGrouped] = lists:reverse(Grouped0),
-        {RestGrouped, [FirstGrouped | Ungrouped0]}
-    end,
-    GroupedReds = [element(2, Node) || {_, Node} <- GroupedNodes],
-    case UngroupedNodes of
-    [{_Key, NodeInfo}|RestNodes] ->
-        {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc),
-        reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, InEndRangeFun, GroupedKey2,
-                GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
-    [] ->
-        {ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey}
-    end.
-
-adjust_dir(fwd, List) ->
-    List;
-adjust_dir(rev, List) ->
-    lists:reverse(List).
-
-stream_node(Bt, Reds, Node, StartKey, InRange, Dir, Fun, Acc) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc);
-    kv_node ->
-        stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc)
-    end.
-
-stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc);
-    kv_node ->
-        stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc)
-    end.
-
-stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) ->
-    {ok, Acc};
-stream_kp_node(Bt, Reds, [{Key, Node} | Rest], InRange, Dir, Fun, Acc) ->
-    Red = element(2, Node),
-    case Fun(traverse, Key, Red, Acc) of
-    {ok, Acc2} ->
-        case stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc2) of
-        {ok, Acc3} ->
-            stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc3);
-        {stop, LastReds, Acc3} ->
-            {stop, LastReds, Acc3}
-        end;
-    {skip, Acc2} ->
-        stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2)
-    end.
-
-drop_nodes(_Bt, Reds, _StartKey, []) ->
-    {Reds, []};
-drop_nodes(Bt, Reds, StartKey, [{NodeKey, Node} | RestKPs]) ->
-    case less(Bt, NodeKey, StartKey) of
-    true ->
-        drop_nodes(Bt, [element(2, Node) | Reds], StartKey, RestKPs);
-    false ->
-        {Reds, [{NodeKey, Node} | RestKPs]}
-    end.
-
-stream_kp_node(Bt, Reds, KPs, StartKey, InRange, Dir, Fun, Acc) ->
-    {NewReds, NodesToStream} =
-    case Dir of
-    fwd ->
-        % drop all nodes sorting before the key
-        drop_nodes(Bt, Reds, StartKey, KPs);
-    rev ->
-        % keep all nodes sorting before the key, AND the first node to sort after
-        RevKPs = lists:reverse(KPs),
-         case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of
-        {_RevsBefore, []} ->
-            % everything sorts before it
-            {Reds, KPs};
-        {RevBefore, [FirstAfter | Drop]} ->
-            {[element(2, Node) || {_K, Node} <- Drop] ++ Reds,
-                 [FirstAfter | lists:reverse(RevBefore)]}
-        end
-    end,
-    case NodesToStream of
-    [] ->
-        {ok, Acc};
-    [{_Key, Node} | Rest] ->
-        case stream_node(Bt, NewReds, Node, StartKey, InRange, Dir, Fun, Acc) of
-        {ok, Acc2} ->
-            Red = element(2, Node),
-            stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2);
-        {stop, LastReds, Acc2} ->
-            {stop, LastReds, Acc2}
-        end
-    end.
-
-stream_kv_node(Bt, Reds, KVs, StartKey, InRange, Dir, Fun, Acc) ->
-    DropFun =
-    case Dir of
-    fwd ->
-        fun({Key, _}) -> less(Bt, Key, StartKey) end;
-    rev ->
-        fun({Key, _}) -> less(Bt, StartKey, Key) end
-    end,
-    {LTKVs, GTEKVs} = lists:splitwith(DropFun, KVs),
-    AssembleLTKVs = [assemble(Bt,K,V) || {K,V} <- LTKVs],
-    stream_kv_node2(Bt, Reds, AssembleLTKVs, GTEKVs, InRange, Dir, Fun, Acc).
-
-stream_kv_node2(_Bt, _Reds, _PrevKVs, [], _InRange, _Dir, _Fun, Acc) ->
-    {ok, Acc};
-stream_kv_node2(Bt, Reds, PrevKVs, [{K,V} | RestKVs], InRange, Dir, Fun, Acc) ->
-    case InRange(K) of
-    false ->
-        {stop, {PrevKVs, Reds}, Acc};
-    true ->
-        AssembledKV = assemble(Bt, K, V),
-        case Fun(visit, AssembledKV, {PrevKVs, Reds}, Acc) of
-        {ok, Acc2} ->
-            stream_kv_node2(Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2);
-        {stop, Acc2} ->
-            {stop, {PrevKVs, Reds}, Acc2}
-        end
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_changes.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_changes.erl b/src/couch/src/couch_changes.erl
deleted file mode 100644
index 4346109..0000000
--- a/src/couch/src/couch_changes.erl
+++ /dev/null
@@ -1,583 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_changes).
--include_lib("couch/include/couch_db.hrl").
-
--export([
-    handle_changes/3,
-    get_changes_timeout/2,
-    wait_db_updated/3,
-    get_rest_db_updated/1,
-    configure_filter/4,
-    filter/3
-]).
-
--export([changes_enumerator/2]).
-
-% For the builtin filter _docs_ids, this is the maximum number
-% of documents for which we trigger the optimized code path.
--define(MAX_DOC_IDS, 100).
-
--record(changes_acc, {
-    db,
-    seq,
-    prepend,
-    filter,
-    callback,
-    user_acc,
-    resp_type,
-    limit,
-    include_docs,
-    conflicts,
-    timeout,
-    timeout_fun
-}).
-
-%% @type Req -> #httpd{} | {json_req, JsonObj()}
-handle_changes(Args1, Req, Db0) ->
-    #changes_args{
-        style = Style,
-        filter = FilterName,
-        feed = Feed,
-        dir = Dir,
-        since = Since
-    } = Args1,
-    Filter = configure_filter(FilterName, Style, Req, Db0),
-    Args = Args1#changes_args{filter_fun = Filter},
-    Start = fun() ->
-        {ok, Db} = couch_db:reopen(Db0),
-        StartSeq = case Dir of
-        rev ->
-            couch_db:get_update_seq(Db);
-        fwd ->
-            Since
-        end,
-        {Db, StartSeq}
-    end,
-    % begin timer to deal with heartbeat when filter function fails
-    case Args#changes_args.heartbeat of
-    undefined ->
-        erlang:erase(last_changes_heartbeat);
-    Val when is_integer(Val); Val =:= true ->
-        put(last_changes_heartbeat, now())
-    end,
-
-    case lists:member(Feed, ["continuous", "longpoll", "eventsource"]) of
-    true ->
-        fun(CallbackAcc) ->
-            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
-            Self = self(),
-            {ok, Notify} = couch_db_update_notifier:start_link(
-                fun({_, DbName}) when  Db0#db.name == DbName ->
-                    Self ! db_updated;
-                (_) ->
-                    ok
-                end
-            ),
-            {Db, StartSeq} = Start(),
-            UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
-            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
-            Acc0 = build_acc(Args, Callback, UserAcc2, Db, StartSeq,
-                             <<"">>, Timeout, TimeoutFun),
-            try
-                keep_sending_changes(
-                    Args#changes_args{dir=fwd},
-                    Acc0,
-                    true)
-            after
-                couch_db_update_notifier:stop(Notify),
-                get_rest_db_updated(ok) % clean out any remaining update messages
-            end
-        end;
-    false ->
-        fun(CallbackAcc) ->
-            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
-            UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
-            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
-            {Db, StartSeq} = Start(),
-            Acc0 = build_acc(Args#changes_args{feed="normal"}, Callback,
-                             UserAcc2, Db, StartSeq, <<>>, Timeout, TimeoutFun),
-            {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
-                send_changes(
-                    Args#changes_args{feed="normal"},
-                    Acc0,
-                    true),
-            end_sending_changes(Callback, UserAcc3, LastSeq, Feed)
-        end
-    end.
-
-get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 3) ->
-    Pair;
-get_callback_acc(Callback) when is_function(Callback, 2) ->
-    {fun(Ev, Data, _) -> Callback(Ev, Data) end, ok}.
-
-
-configure_filter("_doc_ids", Style, Req, _Db) ->
-    {doc_ids, Style, get_doc_ids(Req)};
-configure_filter("_design", Style, _Req, _Db) ->
-    {design_docs, Style};
-configure_filter("_view", Style, Req, Db) ->
-    ViewName = couch_httpd:qs_value(Req, "view", ""),
-    if ViewName /= "" -> ok; true ->
-        throw({bad_request, "`view` filter parameter is not provided."})
-    end,
-    ViewNameParts = string:tokens(ViewName, "/"),
-    case [?l2b(couch_httpd:unquote(Part)) || Part <- ViewNameParts] of
-        [DName, VName] ->
-            {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
-            check_member_exists(DDoc, [<<"views">>, VName]),
-            {view, Style, DDoc, VName};
-        [] ->
-            Msg = "`view` must be of the form `designname/viewname`",
-            throw({bad_request, Msg})
-    end;
-configure_filter([$_ | _], _Style, _Req, _Db) ->
-    throw({bad_request, "unknown builtin filter name"});
-configure_filter("", main_only, _Req, _Db) ->
-    {default, main_only};
-configure_filter("", all_docs, _Req, _Db) ->
-    {default, all_docs};
-configure_filter(FilterName, Style, Req, Db) ->
-    FilterNameParts = string:tokens(FilterName, "/"),
-    case [?l2b(couch_httpd:unquote(Part)) || Part <- FilterNameParts] of
-        [DName, FName] ->
-            {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
-            check_member_exists(DDoc, [<<"filters">>, FName]),
-            {custom, Style, Req, DDoc, FName};
-        [] ->
-            {default, Style};
-        _Else ->
-            Msg = "`filter` must be of the form `designname/filtername`",
-            throw({bad_request, Msg})
-    end.
-
-
-filter(Db, #full_doc_info{}=FDI, Filter) ->
-    filter(Db, couch_doc:to_doc_info(FDI), Filter);
-filter(_Db, DocInfo, {default, Style}) ->
-    apply_style(DocInfo, Style);
-filter(_Db, DocInfo, {doc_ids, Style, DocIds}) ->
-    case lists:member(DocInfo#doc_info.id, DocIds) of
-        true ->
-            apply_style(DocInfo, Style);
-        false ->
-            []
-    end;
-filter(_Db, DocInfo, {design_docs, Style}) ->
-    case DocInfo#doc_info.id of
-        <<"_design", _/binary>> ->
-            apply_style(DocInfo, Style);
-        _ ->
-            []
-    end;
-filter(Db, DocInfo, {view, Style, DDoc, VName}) ->
-    Docs = open_revs(Db, DocInfo, Style),
-    {ok, Passes} = couch_query_servers:filter_view(DDoc, VName, Docs),
-    filter_revs(Passes, Docs);
-filter(Db, DocInfo, {custom, Style, Req0, DDoc, FName}) ->
-    Req = case Req0 of
-        {json_req, _} -> Req0;
-        #httpd{} -> {json_req, couch_httpd_external:json_req_obj(Req0, Db)}
-    end,
-    Docs = open_revs(Db, DocInfo, Style),
-    {ok, Passes} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
-    filter_revs(Passes, Docs).
-
-
-get_doc_ids({json_req, {Props}}) ->
-    check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method='POST'}=Req) ->
-    {Props} = couch_httpd:json_body_obj(Req),
-    check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method='GET'}=Req) ->
-    DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")),
-    check_docids(DocIds);
-get_doc_ids(_) ->
-    throw({bad_request, no_doc_ids_provided}).
-
-
-check_docids(DocIds) when is_list(DocIds) ->
-    lists:foreach(fun
-        (DocId) when not is_binary(DocId) ->
-            Msg = "`doc_ids` filter parameter is not a list of binaries.",
-            throw({bad_request, Msg});
-        (_) -> ok
-    end, DocIds),
-    DocIds;
-check_docids(_) ->
-    Msg = "`doc_ids` filter parameter is not a list of binaries.",
-    throw({bad_request, Msg}).
-
-
-open_ddoc(#db{name= <<"shards/", _/binary>> =ShardName}, DDocId) ->
-    {_, Ref} = spawn_monitor(fun() ->
-        exit(fabric:open_doc(mem3:dbname(ShardName), DDocId, []))
-    end),
-    receive
-        {'DOWN', Ref, _, _, {ok, _}=Response} ->
-            Response;
-        {'DOWN', Ref, _, _, Response} ->
-            throw(Response)
-    end;
-open_ddoc(Db, DDocId) ->
-    case couch_db:open_doc(Db, DDocId, [ejson_body]) of
-        {ok, _} = Resp -> Resp;
-        Else -> throw(Else)
-    end.
-
-
-check_member_exists(#doc{body={Props}}, Path) ->
-    couch_util:get_nested_json_value({Props}, Path).
-
-
-apply_style(#doc_info{revs=Revs}, main_only) ->
-    [#rev_info{rev=Rev} | _] = Revs,
-    [{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}];
-apply_style(#doc_info{revs=Revs}, all_docs) ->
-    [{[{<<"rev">>, couch_doc:rev_to_str(R)}]} || #rev_info{rev=R} <- Revs].
-
-
-open_revs(Db, DocInfo, Style) ->
-    DocInfos = case Style of
-        main_only -> [DocInfo];
-        all_docs -> [DocInfo#doc_info{revs=[R]}|| R <- DocInfo#doc_info.revs]
-    end,
-    OpenOpts = [deleted, conflicts],
-    % Relying on list comprehensions to silence errors
-    OpenResults = [couch_db:open_doc(Db, DI, OpenOpts) || DI <- DocInfos],
-    [Doc || {ok, Doc} <- OpenResults].
-
-
-filter_revs(Passes, Docs) ->
-    lists:flatmap(fun
-        ({true, #doc{revs={RevPos, [RevId | _]}}}) ->
-            RevStr = couch_doc:rev_to_str({RevPos, RevId}),
-            Change = {[{<<"rev">>, RevStr}]},
-            [Change];
-        (_) ->
-            []
-    end, lists:zip(Passes, Docs)).
-
-
-get_changes_timeout(Args, Callback) ->
-    #changes_args{
-        heartbeat = Heartbeat,
-        timeout = Timeout,
-        feed = ResponseType
-    } = Args,
-    DefaultTimeout = list_to_integer(
-        config:get("httpd", "changes_timeout", "60000")
-    ),
-    case Heartbeat of
-    undefined ->
-        case Timeout of
-        undefined ->
-            {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
-        infinity ->
-            {infinity, fun(UserAcc) -> {stop, UserAcc} end};
-        _ ->
-            {lists:min([DefaultTimeout, Timeout]),
-                fun(UserAcc) -> {stop, UserAcc} end}
-        end;
-    true ->
-        {DefaultTimeout,
-            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end};
-    _ ->
-        {lists:min([DefaultTimeout, Heartbeat]),
-            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end}
-    end.
-
-start_sending_changes(_Callback, UserAcc, ResponseType)
-        when ResponseType =:= "continuous"
-        orelse ResponseType =:= "eventsource" ->
-    UserAcc;
-start_sending_changes(Callback, UserAcc, ResponseType) ->
-    Callback(start, ResponseType, UserAcc).
-
-build_acc(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout, TimeoutFun) ->
-    #changes_args{
-        include_docs = IncludeDocs,
-        conflicts = Conflicts,
-        limit = Limit,
-        feed = ResponseType,
-        filter_fun = Filter
-    } = Args,
-    #changes_acc{
-        db = Db,
-        seq = StartSeq,
-        prepend = Prepend,
-        filter = Filter,
-        callback = Callback,
-        user_acc = UserAcc,
-        resp_type = ResponseType,
-        limit = Limit,
-        include_docs = IncludeDocs,
-        conflicts = Conflicts,
-        timeout = Timeout,
-        timeout_fun = TimeoutFun
-    }.
-
-send_changes(Args, Acc0, FirstRound) ->
-    #changes_args{
-        dir = Dir
-    } = Args,
-    #changes_acc{
-        db = Db,
-        seq = StartSeq,
-        filter = Filter
-    } = Acc0,
-    EnumFun = fun ?MODULE:changes_enumerator/2,
-    case can_optimize(FirstRound, Filter) of
-        {true, Fun} ->
-            Fun(Db, StartSeq, Dir, EnumFun, Acc0, Filter);
-        _ ->
-            couch_db:changes_since(Db, StartSeq, EnumFun, [{dir, Dir}], Acc0)
-    end.
-
-
-can_optimize(true, {doc_ids, _Style, DocIds})
-        when length(DocIds) =< ?MAX_DOC_IDS ->
-    {true, fun send_changes_doc_ids/6};
-can_optimize(true, {design_docs, _Style}) ->
-    {true, fun send_changes_design_docs/6};
-can_optimize(_, _) ->
-    false.
-
-
-send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
-    Lookups = couch_btree:lookup(Db#db.id_tree, DocIds),
-    FullInfos = lists:foldl(fun
-        ({ok, FDI}, Acc) -> [FDI | Acc];
-        (not_found, Acc) -> Acc
-    end, [], Lookups),
-    send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
-
-send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
-    FoldFun = fun(FullDocInfo, _, Acc) ->
-        {ok, [FullDocInfo | Acc]}
-    end,
-    KeyOpts = [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}],
-    {ok, _, FullInfos} = couch_btree:fold(Db#db.id_tree, FoldFun, [], KeyOpts),
-    send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
-
-send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
-    FoldFun = case Dir of
-        fwd -> fun lists:foldl/3;
-        rev -> fun lists:foldr/3
-    end,
-    GreaterFun = case Dir of
-        fwd -> fun(A, B) -> A > B end;
-        rev -> fun(A, B) -> A =< B end
-    end,
-    DocInfos = lists:foldl(fun(FDI, Acc) ->
-        DI = couch_doc:to_doc_info(FDI),
-        case GreaterFun(DI#doc_info.high_seq, StartSeq) of
-            true -> [DI | Acc];
-            false -> Acc
-        end
-    end, [], FullDocInfos),
-    SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
-    FinalAcc = try
-        FoldFun(fun(DocInfo, Acc) ->
-            case Fun(DocInfo, Acc) of
-                {ok, NewAcc} ->
-                    NewAcc;
-                {stop, NewAcc} ->
-                    throw({stop, NewAcc})
-            end
-        end, Acc0, SortedDocInfos)
-    catch
-        {stop, Acc} -> Acc
-    end,
-    case Dir of
-        fwd -> {ok, FinalAcc#changes_acc{seq = couch_db:get_update_seq(Db)}};
-        rev -> {ok, FinalAcc}
-    end.
-
-
-keep_sending_changes(Args, Acc0, FirstRound) ->
-    #changes_args{
-        feed = ResponseType,
-        limit = Limit,
-        db_open_options = DbOptions
-    } = Args,
-
-    {ok, ChangesAcc} = send_changes(
-        Args#changes_args{dir=fwd},
-        Acc0,
-        FirstRound),
-    #changes_acc{
-        db = Db, callback = Callback, timeout = Timeout, timeout_fun = TimeoutFun,
-        seq = EndSeq, prepend = Prepend2, user_acc = UserAcc2, limit = NewLimit
-    } = ChangesAcc,
-
-    couch_db:close(Db),
-    if Limit > NewLimit, ResponseType == "longpoll" ->
-        end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType);
-    true ->
-        case wait_db_updated(Timeout, TimeoutFun, UserAcc2) of
-        {updated, UserAcc4} ->
-            DbOptions1 = [{user_ctx, Db#db.user_ctx} | DbOptions],
-            case couch_db:open(Db#db.name, DbOptions1) of
-            {ok, Db2} ->
-                keep_sending_changes(
-                  Args#changes_args{limit=NewLimit},
-                  ChangesAcc#changes_acc{
-                    db = Db2,
-                    user_acc = UserAcc4,
-                    seq = EndSeq,
-                    prepend = Prepend2,
-                    timeout = Timeout,
-                    timeout_fun = TimeoutFun},
-                  false);
-            _Else ->
-                end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType)
-            end;
-        {stop, UserAcc4} ->
-            end_sending_changes(Callback, UserAcc4, EndSeq, ResponseType)
-        end
-    end.
-
-end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) ->
-    Callback({stop, EndSeq}, ResponseType, UserAcc).
-
-changes_enumerator(DocInfo, #changes_acc{resp_type = ResponseType} = Acc)
-        when ResponseType =:= "continuous"
-        orelse ResponseType =:= "eventsource" ->
-    #changes_acc{
-        filter = Filter, callback = Callback,
-        user_acc = UserAcc, limit = Limit, db = Db,
-        timeout = Timeout, timeout_fun = TimeoutFun
-    } = Acc,
-    #doc_info{high_seq = Seq} = DocInfo,
-    Results0 = filter(Db, DocInfo, Filter),
-    Results = [Result || Result <- Results0, Result /= null],
-    %% TODO: I'm thinking this should be < 1 and not =< 1
-    Go = if Limit =< 1 -> stop; true -> ok end,
-    case Results of
-    [] ->
-        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
-        case Done of
-        stop ->
-            {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
-        ok ->
-            {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
-        end;
-    _ ->
-        ChangesRow = changes_row(Results, DocInfo, Acc),
-        UserAcc2 = Callback({change, ChangesRow, <<>>}, ResponseType, UserAcc),
-        reset_heartbeat(),
-        {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2, limit = Limit - 1}}
-    end;
-changes_enumerator(DocInfo, Acc) ->
-    #changes_acc{
-        filter = Filter, callback = Callback, prepend = Prepend,
-        user_acc = UserAcc, limit = Limit, resp_type = ResponseType, db = Db,
-        timeout = Timeout, timeout_fun = TimeoutFun
-    } = Acc,
-    #doc_info{high_seq = Seq} = DocInfo,
-    Results0 = filter(Db, DocInfo, Filter),
-    Results = [Result || Result <- Results0, Result /= null],
-    Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
-    case Results of
-    [] ->
-        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
-        case Done of
-        stop ->
-            {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
-        ok ->
-            {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
-        end;
-    _ ->
-        ChangesRow = changes_row(Results, DocInfo, Acc),
-        UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
-        reset_heartbeat(),
-        {Go, Acc#changes_acc{
-            seq = Seq, prepend = <<",\n">>,
-            user_acc = UserAcc2, limit = Limit - 1}}
-    end.
-
-
-changes_row(Results, DocInfo, Acc) ->
-    #doc_info{
-        id = Id, high_seq = Seq, revs = [#rev_info{deleted = Del} | _]
-    } = DocInfo,
-    #changes_acc{db = Db, include_docs = IncDoc, conflicts = Conflicts} = Acc,
-    {[{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++
-        deleted_item(Del) ++ case IncDoc of
-            true ->
-                Opts = case Conflicts of
-                    true -> [deleted, conflicts];
-                    false -> [deleted]
-                end,
-                Doc = couch_index_util:load_doc(Db, DocInfo, Opts),
-                case Doc of
-                    null -> [{doc, null}];
-                    _ ->  [{doc, couch_doc:to_json_obj(Doc, [])}]
-                end;
-            false ->
-                []
-        end}.
-
-deleted_item(true) -> [{<<"deleted">>, true}];
-deleted_item(_) -> [].
-
-% waits for a db_updated msg, if there are multiple msgs, collects them.
-wait_db_updated(Timeout, TimeoutFun, UserAcc) ->
-    receive
-    db_updated ->
-        get_rest_db_updated(UserAcc)
-    after Timeout ->
-        {Go, UserAcc2} = TimeoutFun(UserAcc),
-        case Go of
-        ok ->
-            wait_db_updated(Timeout, TimeoutFun, UserAcc2);
-        stop ->
-            {stop, UserAcc2}
-        end
-    end.
-
-get_rest_db_updated(UserAcc) ->
-    receive
-    db_updated ->
-        get_rest_db_updated(UserAcc)
-    after 0 ->
-        {updated, UserAcc}
-    end.
-
-reset_heartbeat() ->
-    case get(last_changes_heartbeat) of
-    undefined ->
-        ok;
-    _ ->
-        put(last_changes_heartbeat, now())
-    end.
-
-maybe_heartbeat(Timeout, TimeoutFun, Acc) ->
-    Before = get(last_changes_heartbeat),
-    case Before of
-    undefined ->
-        {ok, Acc};
-    _ ->
-        Now = now(),
-        case timer:now_diff(Now, Before) div 1000 >= Timeout of
-        true ->
-            Acc2 = TimeoutFun(Acc),
-            put(last_changes_heartbeat, Now),
-            Acc2;
-        false ->
-            {ok, Acc}
-        end
-    end.


[27/49] Remove src/fabric

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_view_changes.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_view_changes.erl b/src/fabric/src/fabric_view_changes.erl
deleted file mode 100644
index b0a3628..0000000
--- a/src/fabric/src/fabric_view_changes.erl
+++ /dev/null
@@ -1,422 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_changes).
-
--export([go/5, pack_seqs/1, unpack_seqs/2]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--import(fabric_db_update_listener, [wait_db_updated/1]).
-
-go(DbName, Feed, Options, Callback, Acc0) when Feed == "continuous" orelse
-        Feed == "longpoll" ->
-    Args = make_changes_args(Options),
-    Since = get_start_seq(DbName, Args),
-    case validate_start_seq(DbName, Since) of
-    ok ->
-        {ok, Acc} = Callback(start, Acc0),
-        {Timeout, _} = couch_changes:get_changes_timeout(Args, Callback),
-        Ref = make_ref(),
-        Parent = self(),
-        UpdateListener = {spawn_link(fabric_db_update_listener, go,
-                                     [Parent, Ref, DbName, Timeout]),
-                          Ref},
-        try
-            keep_sending_changes(
-                DbName,
-                Args,
-                Callback,
-                Since,
-                Acc,
-                Timeout,
-                UpdateListener,
-                os:timestamp()
-            )
-        after
-            fabric_db_update_listener:stop(UpdateListener)
-        end;
-    Error ->
-        Callback(Error, Acc0)
-    end;
-
-go(DbName, "normal", Options, Callback, Acc0) ->
-    Args = make_changes_args(Options),
-    Since = get_start_seq(DbName, Args),
-    case validate_start_seq(DbName, Since) of
-    ok ->
-        {ok, Acc} = Callback(start, Acc0),
-        {ok, #collector{counters=Seqs, user_acc=AccOut}} = send_changes(
-            DbName,
-            Args,
-            Callback,
-            Since,
-            Acc,
-            5000
-        ),
-        Callback({stop, pack_seqs(Seqs)}, AccOut);
-    Error ->
-        Callback(Error, Acc0)
-    end.
-
-keep_sending_changes(DbName, Args, Callback, Seqs, AccIn, Timeout, UpListen, T0) ->
-    #changes_args{limit=Limit, feed=Feed, heartbeat=Heartbeat} = Args,
-    {ok, Collector} = send_changes(DbName, Args, Callback, Seqs, AccIn, Timeout),
-    #collector{limit=Limit2, counters=NewSeqs, user_acc=AccOut} = Collector,
-    LastSeq = pack_seqs(NewSeqs),
-    if Limit > Limit2, Feed == "longpoll" ->
-        Callback({stop, LastSeq}, AccOut);
-    true ->
-        WaitForUpdate = wait_db_updated(UpListen),
-        AccumulatedTime = timer:now_diff(os:timestamp(), T0) div 1000,
-        Max = case config:get("fabric", "changes_duration") of
-        undefined ->
-            infinity;
-        MaxStr ->
-            list_to_integer(MaxStr)
-        end,
-        case {Heartbeat, AccumulatedTime > Max, WaitForUpdate} of
-        {undefined, _, timeout} ->
-            Callback({stop, LastSeq}, AccOut);
-        {_, true, timeout} ->
-            Callback({stop, LastSeq}, AccOut);
-        _ ->
-            {ok, AccTimeout} = Callback(timeout, AccOut),
-            keep_sending_changes(
-                DbName,
-                Args#changes_args{limit=Limit2},
-                Callback,
-                LastSeq,
-                AccTimeout,
-                Timeout,
-                UpListen,
-                T0
-            )
-        end
-    end.
-
-send_changes(DbName, ChangesArgs, Callback, PackedSeqs, AccIn, Timeout) ->
-    LiveNodes = [node() | nodes()],
-    AllLiveShards = mem3:live_shards(DbName, LiveNodes),
-    Seqs = lists:flatmap(fun({#shard{name=Name, node=N} = Shard, Seq}) ->
-        case lists:member(Shard, AllLiveShards) of
-        true ->
-            Ref = rexi:cast(N, {fabric_rpc, changes, [Name,ChangesArgs,Seq]}),
-            [{Shard#shard{ref = Ref}, Seq}];
-        false ->
-            % Find some replacement shards to cover the missing range
-            % TODO It's possible in rare cases of shard merging to end up
-            % with overlapping shard ranges from this technique
-            lists:map(fun(#shard{name=Name2, node=N2} = NewShard) ->
-                Ref = rexi:cast(N2, {fabric_rpc, changes, [Name2,ChangesArgs,0]}),
-                {NewShard#shard{ref = Ref}, 0}
-            end, find_replacement_shards(Shard, AllLiveShards))
-        end
-    end, unpack_seqs(PackedSeqs, DbName)),
-    {Workers, _} = lists:unzip(Seqs),
-    RexiMon = fabric_util:create_monitors(Workers),
-    State = #collector{
-        query_args = ChangesArgs,
-        callback = Callback,
-        counters = orddict:from_list(Seqs),
-        user_acc = AccIn,
-        limit = ChangesArgs#changes_args.limit,
-        rows = Seqs % store sequence positions instead
-    },
-    %% TODO: errors need to be handled here
-    try
-        receive_results(Workers, State, Timeout, Callback)
-    after
-        rexi_monitor:stop(RexiMon),
-        fabric_util:cleanup(Workers)
-    end.
-
-receive_results(Workers, State, Timeout, Callback) ->
-    case rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, State,
-            infinity, Timeout) of
-    {timeout, NewState0} ->
-        {ok, AccOut} = Callback(timeout, NewState0#collector.user_acc),
-        NewState = NewState0#collector{user_acc = AccOut},
-        receive_results(Workers, NewState, Timeout, Callback);
-    {_, NewState} ->
-        {ok, NewState}
-    end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, nil, State) ->
-    fabric_view:remove_down_shards(State, NodeRef);
-
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
-    #collector{
-        callback=Callback,
-        counters=Counters0,
-        rows = Seqs0,
-        user_acc=Acc
-    } = State,
-    Counters = fabric_dict:erase(Worker, Counters0),
-    Seqs = fabric_dict:erase(Worker, Seqs0),
-    case fabric_view:is_progress_possible(Counters) of
-    true ->
-        {ok, State#collector{counters = Counters, rows=Seqs}};
-    false ->
-        {ok, Resp} = Callback({error, fabric_util:error_info(Reason)}, Acc),
-        {error, Resp}
-    end;
-
-handle_message(_, _, #collector{limit=0} = State) ->
-    {stop, State};
-
-handle_message(#change{key=Seq} = Row0, {Worker, From}, St) ->
-    #collector{
-        query_args = #changes_args{include_docs=IncludeDocs},
-        callback = Callback,
-        counters = S0,
-        limit = Limit,
-        user_acc = AccIn
-    } = St,
-    case fabric_dict:lookup_element(Worker, S0) of
-    undefined ->
-        % this worker lost the race with other partition copies, terminate it
-        gen_server:reply(From, stop),
-        {ok, St};
-    _ ->
-        S1 = fabric_dict:store(Worker, Seq, S0),
-        S2 = fabric_view:remove_overlapping_shards(Worker, S1),
-        % this check should not be necessary at all, as holes in the ranges
-        % created from DOWN messages would have led to errors
-        case fabric_view:is_progress_possible(S2) of
-        true ->
-            Row = Row0#change{key = pack_seqs(S2)},
-            {Go, Acc} = Callback(changes_row(Row, IncludeDocs), AccIn),
-            gen_server:reply(From, Go),
-            {Go, St#collector{counters=S2, limit=Limit-1, user_acc=Acc}};
-        false ->
-            Reason = {range_not_covered, <<"progress not possible">>},
-            Callback({error, Reason}, AccIn),
-            gen_server:reply(From, stop),
-            {stop, St#collector{counters=S2}}
-        end
-    end;
-
-handle_message({complete, EndSeq}, Worker, State) ->
-    #collector{
-        callback = Callback,
-        counters = S0,
-        total_rows = Completed, % override
-        user_acc = Acc
-    } = State,
-    case fabric_dict:lookup_element(Worker, S0) of
-    undefined ->
-        {ok, State};
-    _ ->
-        S1 = fabric_dict:store(Worker, EndSeq, S0),
-        % unlikely to have overlaps here, but possible w/ filters
-        S2 = fabric_view:remove_overlapping_shards(Worker, S1),
-        NewState = State#collector{counters=S2, total_rows=Completed+1},
-        case fabric_dict:size(S2) =:= (Completed+1) of
-        true ->
-            % check ranges are covered, again this should not be neccessary
-            % as any holes in the ranges due to DOWN messages would have errored
-            % out sooner
-            case fabric_view:is_progress_possible(S2) of
-            true ->
-                {stop, NewState};
-            false ->
-                Reason = {range_not_covered, <<"progress not possible">>},
-                Callback({error, Reason}, Acc),
-                {stop, NewState}
-            end;
-        false ->
-            {ok, NewState}
-        end
-    end.
-
-make_changes_args(#changes_args{style=Style, filter=undefined}=Args) ->
-    Args#changes_args{filter = Style};
-make_changes_args(Args) ->
-    Args.
-
-get_start_seq(_DbName, #changes_args{dir=fwd, since=Since}) ->
-    Since;
-get_start_seq(DbName, #changes_args{dir=rev}) ->
-    Shards = mem3:shards(DbName),
-    Workers = fabric_util:submit_jobs(Shards, get_update_seq, []),
-    {ok, Since} = fabric_util:recv(Workers, #shard.ref,
-        fun collect_update_seqs/3, fabric_dict:init(Workers, -1)),
-    Since.
-
-collect_update_seqs(Seq, Shard, Counters) when is_integer(Seq) ->
-    case fabric_dict:lookup_element(Shard, Counters) of
-    undefined ->
-        % already heard from someone else in this range
-        {ok, Counters};
-    -1 ->
-        C1 = fabric_dict:store(Shard, Seq, Counters),
-        C2 = fabric_view:remove_overlapping_shards(Shard, C1),
-        case fabric_dict:any(-1, C2) of
-        true ->
-            {ok, C2};
-        false ->
-            {stop, pack_seqs(C2)}
-        end
-    end.
-
-pack_seqs(Workers) ->
-    SeqList = [{N,R,S} || {#shard{node=N, range=R}, S} <- Workers],
-    SeqSum = lists:sum(element(2, lists:unzip(Workers))),
-    Opaque = couch_util:encodeBase64Url(term_to_binary(SeqList, [compressed])),
-    [SeqSum, Opaque].
-
-unpack_seqs(0, DbName) ->
-    fabric_dict:init(mem3:shards(DbName), 0);
-
-unpack_seqs("0", DbName) ->
-    fabric_dict:init(mem3:shards(DbName), 0);
-
-unpack_seqs([_SeqNum, Opaque], DbName) ->
-    do_unpack_seqs(Opaque, DbName);
-
-unpack_seqs(Packed, DbName) ->
-    NewPattern = "^\\[[0-9]+\s*,\s*\"(?<opaque>.*)\"\\]$",
-    OldPattern = "^\"?([0-9]+-)?(?<opaque>.*?)\"?$",
-    Options = [{capture, [opaque], binary}],
-    Opaque = case re:run(Packed, NewPattern, Options) of
-    {match, Match} ->
-        Match;
-    nomatch ->
-        {match, Match} = re:run(Packed, OldPattern, Options),
-        Match
-    end,
-    do_unpack_seqs(Opaque, DbName).
-
-do_unpack_seqs(Opaque, DbName) ->
-    % A preventative fix for FB 13533 to remove duplicate shards.
-    % This just picks each unique shard and keeps the largest seq
-    % value recorded.
-    Decoded = binary_to_term(couch_util:decodeBase64Url(Opaque)),
-    DedupDict = lists:foldl(fun({Node, [A, B], Seq}, Acc) ->
-        dict:append({Node, [A, B]}, Seq, Acc)
-    end, dict:new(), Decoded),
-    Deduped = lists:map(fun({{Node, [A, B]}, SeqList}) ->
-        {Node, [A, B], lists:max(SeqList)}
-    end, dict:to_list(DedupDict)),
-
-    % Create a fabric_dict of {Shard, Seq} entries
-    % TODO relies on internal structure of fabric_dict as keylist
-    Unpacked = lists:flatmap(fun({Node, [A,B], Seq}) ->
-        case mem3:get_shard(DbName, Node, [A,B]) of
-        {ok, Shard} ->
-            [{Shard, Seq}];
-        {error, not_found} ->
-            []
-        end
-    end, Deduped),
-
-    % Fill holes in the since sequence. If/when we ever start
-    % using overlapping shard ranges this will need to be updated
-    % to not include shard ranges that overlap entries in Upacked.
-    % A quick and dirty approach would be like such:
-    %
-    %   lists:foldl(fun(S, Acc) ->
-    %       fabric_view:remove_overlapping_shards(S, Acc)
-    %   end, mem3:shards(DbName), Unpacked)
-    %
-    % Unfortunately remove_overlapping_shards isn't reusable because
-    % of its calls to rexi:kill/2. When we get to overlapping
-    % shard ranges and have to rewrite shard range management
-    % we can revisit this simpler algorithm.
-    case fabric_view:is_progress_possible(Unpacked) of
-        true ->
-            Unpacked;
-        false ->
-            Ranges = lists:usort([R || #shard{range=R} <- Unpacked]),
-            Filter = fun(S) -> not lists:member(S#shard.range, Ranges) end,
-            Replacements = lists:filter(Filter, mem3:shards(DbName)),
-            Unpacked ++ [{R, 0} || R <- Replacements]
-    end.
-
-changes_row(#change{key=Seq, id=Id, value=Value, deleted=true, doc=Doc}, true) ->
-    {change, {[{seq,Seq}, {id,Id}, {changes,Value}, {deleted, true}, {doc, Doc}]}};
-changes_row(#change{key=Seq, id=Id, value=Value, deleted=true}, false) ->
-    {change, {[{seq,Seq}, {id,Id}, {changes,Value}, {deleted, true}]}};
-changes_row(#change{key=Seq, id=Id, value=Value, doc={error,Reason}}, true) ->
-    {change, {[{seq,Seq}, {id,Id}, {changes,Value}, {error,Reason}]}};
-changes_row(#change{key=Seq, id=Id, value=Value, doc=Doc}, true) ->
-    {change, {[{seq,Seq}, {id,Id}, {changes,Value}, {doc,Doc}]}};
-changes_row(#change{key=Seq, id=Id, value=Value}, false) ->
-    {change, {[{seq,Seq}, {id,Id}, {changes,Value}]}}.
-
-find_replacement_shards(#shard{range=Range}, AllShards) ->
-    % TODO make this moar betta -- we might have split or merged the partition
-    [Shard || Shard <- AllShards, Shard#shard.range =:= Range].
-
-validate_start_seq(DbName, Seq) ->
-    try unpack_seqs(Seq, DbName) of _Any ->
-        ok
-    catch
-        error:database_does_not_exist ->
-            {error, database_does_not_exist};
-        _:_ ->
-            Reason = <<"Malformed sequence supplied in 'since' parameter.">>,
-            {error, {bad_request, Reason}}
-    end.
-
-unpack_seqs_test() ->
-    meck:new(mem3),
-    meck:new(fabric_view),
-    meck:expect(mem3, get_shard, fun(_, _, _) -> {ok, #shard{}} end),
-    meck:expect(fabric_view, is_progress_possible, fun(_) -> true end),
-
-    % BigCouch 0.3 style.
-    assert_shards("23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-    "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-    "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA"),
-
-    % BigCouch 0.4 style.
-    assert_shards([23423,<<"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-    "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-    "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA">>]),
-
-    % BigCouch 0.4 style (as string).
-    assert_shards("[23423,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-    "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-    "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-    assert_shards("[23423 ,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-    "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-    "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-    assert_shards("[23423, \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-    "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-    "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-    assert_shards("[23423 , \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-    "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-    "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-
-    % with internal hypen
-    assert_shards("651-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
-    "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
-    "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"),
-    assert_shards([651,"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
-    "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
-    "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"]),
-
-    % CouchDB 1.2 style
-    assert_shards("\"23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-    "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-    "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\""),
-
-    meck:unload(fabric_view),
-    meck:unload(mem3).
-
-assert_shards(Packed) ->
-    ?assertMatch([{#shard{},_}|_], unpack_seqs(Packed, <<"foo">>)).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_view_map.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_view_map.erl b/src/fabric/src/fabric_view_map.erl
deleted file mode 100644
index 9e41c11..0000000
--- a/src/fabric/src/fabric_view_map.erl
+++ /dev/null
@@ -1,147 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_map).
-
--export([go/6]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-go(DbName, GroupId, View, Args, Callback, Acc0) when is_binary(GroupId) ->
-    {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
-    go(DbName, DDoc, View, Args, Callback, Acc0);
-
-go(DbName, DDoc, View, Args, Callback, Acc0) ->
-    Shards = fabric_view:get_shards(DbName, Args),
-    Workers = fabric_util:submit_jobs(Shards, map_view, [DDoc, View, Args]),
-    #mrargs{limit = Limit, skip = Skip, keys = Keys} = Args,
-    State = #collector{
-        db_name=DbName,
-        query_args = Args,
-        callback = Callback,
-        counters = fabric_dict:init(Workers, 0),
-        skip = Skip,
-        limit = Limit,
-        keys = fabric_view:keydict(Keys),
-        sorted = Args#mrargs.sorted,
-        user_acc = Acc0
-    },
-    RexiMon = fabric_util:create_monitors(Workers),
-    try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
-        State, infinity, 1000 * 60 * 60) of
-    {ok, NewState} ->
-        {ok, NewState#collector.user_acc};
-    {timeout, NewState} ->
-        Callback({error, timeout}, NewState#collector.user_acc);
-    {error, Resp} ->
-        {ok, Resp}
-    after
-        rexi_monitor:stop(RexiMon),
-        fabric_util:cleanup(Workers)
-    end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
-    fabric_view:remove_down_shards(State, NodeRef);
-
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
-    #collector{callback=Callback, counters=Counters0, user_acc=Acc} = State,
-    Counters = fabric_dict:erase(Worker, Counters0),
-    case fabric_view:is_progress_possible(Counters) of
-    true ->
-        {ok, State#collector{counters = Counters}};
-    false ->
-        {ok, Resp} = Callback({error, fabric_util:error_info(Reason)}, Acc),
-        {error, Resp}
-    end;
-
-handle_message({total_and_offset, Tot, Off}, {Worker, From}, State) ->
-    #collector{
-        callback = Callback,
-        counters = Counters0,
-        total_rows = Total0,
-        offset = Offset0,
-        user_acc = AccIn
-    } = State,
-    case fabric_dict:lookup_element(Worker, Counters0) of
-    undefined ->
-        % this worker lost the race with other partition copies, terminate
-        gen_server:reply(From, stop),
-        {ok, State};
-    0 ->
-        gen_server:reply(From, ok),
-        Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
-        Counters2 = fabric_view:remove_overlapping_shards(Worker, Counters1),
-        Total = Total0 + Tot,
-        Offset = Offset0 + Off,
-        case fabric_dict:any(0, Counters2) of
-        true ->
-            {ok, State#collector{
-                counters = Counters2,
-                total_rows = Total,
-                offset = Offset
-            }};
-        false ->
-            FinalOffset = erlang:min(Total, Offset+State#collector.skip),
-            {Go, Acc} = Callback({total_and_offset, Total, FinalOffset}, AccIn),
-            {Go, State#collector{
-                counters = fabric_dict:decrement_all(Counters2),
-                total_rows = Total,
-                offset = FinalOffset,
-                user_acc = Acc
-            }}
-        end
-    end;
-
-handle_message(#view_row{}, {_, _}, #collector{limit=0} = State) ->
-    #collector{callback=Callback} = State,
-    {_, Acc} = Callback(complete, State#collector.user_acc),
-    {stop, State#collector{user_acc=Acc}};
-
-handle_message(#view_row{} = Row, {_,From}, #collector{sorted=false} = St) ->
-    #collector{callback=Callback, user_acc=AccIn, limit=Limit} = St,
-    {Go, Acc} = Callback(fabric_view:transform_row(Row), AccIn),
-    rexi:stream_ack(From),
-    {Go, St#collector{user_acc=Acc, limit=Limit-1}};
-
-handle_message(#view_row{} = Row, {Worker, From}, State) ->
-    #collector{
-        query_args = #mrargs{direction=Dir},
-        counters = Counters0,
-        rows = Rows0,
-        keys = KeyDict
-    } = State,
-    Rows = merge_row(Dir, KeyDict, Row#view_row{worker={Worker, From}}, Rows0),
-    Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
-    State1 = State#collector{rows=Rows, counters=Counters1},
-    fabric_view:maybe_send_row(State1);
-
-handle_message(complete, Worker, State) ->
-    Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
-    fabric_view:maybe_send_row(State#collector{counters = Counters}).
-
-merge_row(fwd, undefined, Row, Rows) ->
-    lists:merge(fun(#view_row{key=KeyA, id=IdA}, #view_row{key=KeyB, id=IdB}) ->
-        couch_view:less_json([KeyA, IdA], [KeyB, IdB])
-    end, [Row], Rows);
-merge_row(rev, undefined, Row, Rows) ->
-    lists:merge(fun(#view_row{key=KeyA, id=IdA}, #view_row{key=KeyB, id=IdB}) ->
-        couch_view:less_json([KeyB, IdB], [KeyA, IdA])
-    end, [Row], Rows);
-merge_row(_, KeyDict, Row, Rows) ->
-    lists:merge(fun(#view_row{key=A, id=IdA}, #view_row{key=B, id=IdB}) ->
-        if A =:= B -> IdA < IdB; true ->
-            dict:fetch(A, KeyDict) < dict:fetch(B, KeyDict)
-        end
-    end, [Row], Rows).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/753e7462/src/fabric/src/fabric_view_reduce.erl
----------------------------------------------------------------------
diff --git a/src/fabric/src/fabric_view_reduce.erl b/src/fabric/src/fabric_view_reduce.erl
deleted file mode 100644
index c922a7f..0000000
--- a/src/fabric/src/fabric_view_reduce.erl
+++ /dev/null
@@ -1,127 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_reduce).
-
--export([go/6]).
-
--include_lib("fabric/include/fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl").
-
-go(DbName, GroupId, View, Args, Callback, Acc0) when is_binary(GroupId) ->
-    {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
-    go(DbName, DDoc, View, Args, Callback, Acc0);
-
-go(DbName, DDoc, VName, Args, Callback, Acc0) ->
-    Group = couch_view_group:design_doc_to_view_group(DDoc),
-    Lang = couch_view_group:get_language(Group),
-    Views = couch_view_group:get_views(Group),
-    {NthRed, View} = fabric_view:extract_view(nil, VName, Views, reduce),
-    {VName, RedSrc} = lists:nth(NthRed, View#mrview.reduce_funs),
-    Workers = lists:map(fun(#shard{name=Name, node=N} = Shard) ->
-        Ref = rexi:cast(N, {fabric_rpc, reduce_view, [Name,DDoc,VName,Args]}),
-        Shard#shard{ref = Ref}
-    end, fabric_view:get_shards(DbName, Args)),
-    RexiMon = fabric_util:create_monitors(Workers),
-    #mrargs{limit = Limit, skip = Skip} = Args,
-    OsProc = case os_proc_needed(RedSrc) of
-        true -> couch_query_servers:get_os_process(Lang);
-        _ -> nil
-    end,
-    State = #collector{
-        db_name = DbName,
-        query_args = Args,
-        callback = Callback,
-        counters = fabric_dict:init(Workers, 0),
-        keys = Args#mrargs.keys,
-        skip = Skip,
-        limit = Limit,
-        lang = Lang,
-        os_proc = OsProc,
-        reducer = RedSrc,
-        rows = dict:new(),
-        user_acc = Acc0
-    },
-    try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
-        State, infinity, 1000 * 60 * 60) of
-    {ok, NewState} ->
-        {ok, NewState#collector.user_acc};
-    {timeout, NewState} ->
-        Callback({error, timeout}, NewState#collector.user_acc);
-    {error, Resp} ->
-        {ok, Resp}
-    after
-        rexi_monitor:stop(RexiMon),
-        fabric_util:cleanup(Workers),
-        case State#collector.os_proc of
-            nil -> ok;
-            OsProc -> catch couch_query_servers:ret_os_process(OsProc)
-        end
-    end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
-    fabric_view:remove_down_shards(State, NodeRef);
-
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
-    #collector{callback=Callback, counters=Counters0, user_acc=Acc} = State,
-    Counters = fabric_dict:erase(Worker, Counters0),
-    case fabric_view:is_progress_possible(Counters) of
-    true ->
-        {ok, State#collector{counters = Counters}};
-    false ->
-        {ok, Resp} = Callback({error, fabric_util:error_info(Reason)}, Acc),
-        {error, Resp}
-    end;
-
-handle_message(#view_row{key=Key} = Row, {Worker, From}, State) ->
-    #collector{counters = Counters0, rows = Rows0} = State,
-    case fabric_dict:lookup_element(Worker, Counters0) of
-    undefined ->
-        % this worker lost the race with other partition copies, terminate it
-        gen_server:reply(From, stop),
-        {ok, State};
-    _ ->
-        Rows = dict:append(Key, Row#view_row{worker={Worker, From}}, Rows0),
-        C1 = fabric_dict:update_counter(Worker, 1, Counters0),
-        % TODO time this call, if slow don't do it every time
-        C2 = fabric_view:remove_overlapping_shards(Worker, C1),
-        State1 = State#collector{rows=Rows, counters=C2},
-        fabric_view:maybe_send_row(State1)
-    end;
-
-handle_message(complete, Worker, #collector{counters = Counters0} = State) ->
-    case fabric_dict:lookup_element(Worker, Counters0) of
-    undefined ->
-        {ok, State};
-    _ ->
-        C1 = fabric_dict:update_counter(Worker, 1, Counters0),
-        C2 = fabric_view:remove_overlapping_shards(Worker, C1),
-        fabric_view:maybe_send_row(State#collector{counters = C2})
-    end.
-
-complete_worker_test() ->
-    Shards =
-        mem3_util:create_partition_map("foo",3,3,[node(),node(),node()]),
-    Workers = lists:map(fun(#shard{} = Shard) ->
-                            Ref = make_ref(),
-                            Shard#shard{ref = Ref}
-                        end,
-                        Shards),
-    State = #collector{counters=fabric_dict:init(Workers,0)},
-    {ok, NewState} = handle_message(complete, lists:nth(2,Workers), State),
-    ?assertEqual(orddict:size(NewState#collector.counters),length(Workers) - 2).
-
-os_proc_needed(<<"_", _/binary>>) -> false;
-os_proc_needed(_) -> true.
-


[26/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/ets_lru


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/ed8c2fb2
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/ed8c2fb2
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/ed8c2fb2

Branch: refs/heads/1843-feature-bigcouch
Commit: ed8c2fb2f50dda679aff79996667d8e2ffe8ad54
Parents: 191a9b4
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:40:55 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:40:55 2014 -0600

----------------------------------------------------------------------
 src/ets_lru/src/ets_lru.app.src         |  24 --
 src/ets_lru/src/ets_lru.erl             | 311 --------------
 src/ets_lru/test/01-basic-behavior.t    |  91 ----
 src/ets_lru/test/02-lru-options.t       |  61 ---
 src/ets_lru/test/03-limit-max-objects.t |  26 --
 src/ets_lru/test/04-limit-max-size.t    |  26 --
 src/ets_lru/test/05-limit-lifetime.t    |  23 -
 src/ets_lru/test/etap.erl               | 612 ---------------------------
 src/ets_lru/test/tutil.erl              |  29 --
 9 files changed, 1203 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed8c2fb2/src/ets_lru/src/ets_lru.app.src
----------------------------------------------------------------------
diff --git a/src/ets_lru/src/ets_lru.app.src b/src/ets_lru/src/ets_lru.app.src
deleted file mode 100644
index 2573a0f..0000000
--- a/src/ets_lru/src/ets_lru.app.src
+++ /dev/null
@@ -1,24 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, ets_lru, [
-    {description, "ETS Base LRU Cache"},
-    {vsn, git},
-    {modules, [
-        ets_lru
-    ]},
-    {registered, []},
-    {applications, [
-        kernel,
-        stdlib
-    ]}
-]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed8c2fb2/src/ets_lru/src/ets_lru.erl
----------------------------------------------------------------------
diff --git a/src/ets_lru/src/ets_lru.erl b/src/ets_lru/src/ets_lru.erl
deleted file mode 100644
index 5880bd5..0000000
--- a/src/ets_lru/src/ets_lru.erl
+++ /dev/null
@@ -1,311 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(ets_lru).
--behavior(gen_server).
-
-
--export([
-    start_link/2,
-    stop/1,
-
-    insert/3,
-    lookup/2,
-    remove/2,
-    clear/1,
-
-    % Dirty functions read straight from
-    % the ETS tables which means there are
-    % race conditions with concurrent access.
-    lookup_d/2
-]).
-
--export([
-    init/1,
-    terminate/2,
-
-    handle_call/3,
-    handle_cast/2,
-    handle_info/2,
-
-    code_change/3
-]).
-
-
--record(entry, {
-    key,
-    val,
-    atime,
-    ctime
-}).
-
--record(st, {
-    objects,
-    atimes,
-    ctimes,
-
-    max_objs,
-    max_size,
-    max_lifetime
-}).
-
-
-start_link(Name, Options) when is_atom(Name) ->
-    gen_server:start_link({local, Name}, ?MODULE, {Name, Options}, []).
-
-
-stop(LRU) ->
-    gen_server:cast(LRU, stop).
-
-
-lookup(LRU, Key) ->
-    gen_server:call(LRU, {lookup, Key}).
-
-
-insert(LRU, Key, Val) ->
-    gen_server:call(LRU, {insert, Key, Val}).
-
-
-remove(LRU, Key) ->
-    gen_server:call(LRU, {remove, Key}).
-
-
-clear(LRU) ->
-    gen_server:call(LRU, clear).
-
-
-lookup_d(Name, Key) when is_atom(Name) ->
-    case ets:lookup(obj_table(Name), Key) of
-        [#entry{val=Val}] ->
-            gen_server:cast(Name, {accessed, Key}),
-            {ok, Val};
-        [] ->
-            not_found
-    end.
-
-
-init({Name, Options}) ->
-    St = set_options(#st{}, Options),
-    ObjOpts = [set, named_table, protected, {keypos, #entry.key}],
-    TimeOpts = [ordered_set, named_table, protected],
-
-    {ok, St#st{
-        objects = ets:new(obj_table(Name), ObjOpts),
-        atimes = ets:new(at_table(Name), TimeOpts),
-        ctimes = ets:new(ct_table(Name), TimeOpts)
-    }}.
-
-
-terminate(_Reason, St) ->
-    true = ets:delete(St#st.objects),
-    true = ets:delete(St#st.atimes),
-    true = ets:delete(St#st.ctimes),
-    ok.
-
-
-handle_call({lookup, Key}, _From, St) ->
-    Reply = case ets:lookup(St#st.objects, Key) of
-        [#entry{val=Val}] ->
-            accessed(St, Key),
-            {ok, Val};
-        [] ->
-            not_found
-    end,
-    {reply, Reply, St, 0};
-
-handle_call({insert, Key, Val}, _From, St) ->
-    NewATime = erlang:now(),
-    Pattern = #entry{key=Key, atime='$1', _='_'},
-    case ets:match(St#st.objects, Pattern) of
-        [[ATime]] ->
-            Update = {#entry.val, Val},
-            true = ets:update_element(St#st.objects, Key, Update),
-            true = ets:delete(St#st.atimes, ATime),
-            true = ets:insert(St#st.atimes, {NewATime, Key});
-        [] ->
-            Entry = #entry{key=Key, val=Val, atime=NewATime, ctime=NewATime},
-            true = ets:insert(St#st.objects, Entry),
-            true = ets:insert(St#st.atimes, {NewATime, Key}),
-            true = ets:insert(St#st.ctimes, {NewATime, Key})
-    end,
-    {reply, ok, St, 0};
-
-handle_call({remove, Key}, _From, St) ->
-    Pattern = #entry{key=Key, atime='$1', ctime='$2', _='_'},
-    Reply = case ets:match(St#st.objects, Pattern) of
-        [[ATime, CTime]] ->
-            true = ets:delete(St#st.objects, Key),
-            true = ets:delete(St#st.atimes, ATime),
-            true = ets:delete(St#st.ctimes, CTime),
-            ok;
-        [] ->
-            not_found
-    end,
-    {reply, Reply, St, 0};
-
-handle_call(clear, _From, St) ->
-    true = ets:delete_all_objects(St#st.objects),
-    true = ets:delete_all_objects(St#st.atimes),
-    true = ets:delete_all_objects(St#st.ctimes),
-    % No need to timeout here and evict cache
-    % entries because its now empty.
-    {reply, ok, St};
-
-
-handle_call(Msg, _From, St) ->
-    {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
-
-handle_cast({accessed, Key}, St) ->
-    accessed(Key, St),
-    {noreply, St, 0};
-
-handle_cast(stop, St) ->
-    {stop, normal, St};
-
-handle_cast(Msg, St) ->
-    {stop, {invalid_cast, Msg}, St}.
-
-
-handle_info(timeout, St) ->
-    trim(St),
-    {noreply, St, next_timeout(St)};
-
-handle_info(Msg, St) ->
-    {stop, {invalid_info, Msg}, St}.
-
-
-code_change(_OldVsn, St, _Extra) ->
-    {ok, St}.
-
-
-accessed(Key, St) ->
-    Pattern = #entry{key=Key, atime='$1', _='_'},
-    case ets:match(St#st.objects, Pattern) of
-        [[ATime]] ->
-            NewATime = erlang:now(),
-            Update = {#entry.atime, NewATime},
-            true = ets:update_element(St#st.objects, Key, Update),
-            true = ets:delete(St#st.atimes, ATime),
-            true = ets:insert(St#st.atimes, {NewATime, Key}),
-            ok;
-        [] ->
-            ok
-    end.
-
-
-trim(St) ->
-    trim_count(St),
-    trim_size(St),
-    trim_lifetime(St).
-
-
-trim_count(#st{max_objs=undefined}) ->
-    ok;
-trim_count(#st{max_objs=Max}=St) ->
-    case ets:info(St#st.objects, size) > Max of
-        true ->
-            drop_lru(St, fun trim_count/1);
-        false ->
-            ok
-    end.
-
-
-trim_size(#st{max_size=undefined}) ->
-    ok;
-trim_size(#st{max_size=Max}=St) ->
-    case ets:info(St#st.objects, memory) > Max of
-        true ->
-            drop_lru(St, fun trim_size/1);
-        false ->
-            ok
-    end.
-
-
-trim_lifetime(#st{max_lifetime=undefined}) ->
-    ok;
-trim_lifetime(#st{max_lifetime=Max}=St) ->
-    Now = os:timestamp(),
-    case ets:first(St#st.ctimes) of
-        '$end_of_table' ->
-            ok;
-        CTime ->
-            DiffInMilli = timer:now_diff(Now, CTime) div 1000,
-            case DiffInMilli > Max of
-                true ->
-                    [{CTime, Key}] = ets:lookup(St#st.ctimes, CTime),
-                    Pattern = #entry{key=Key, atime='$1', _='_'},
-                    [[ATime]] = ets:match(St#st.objects, Pattern),
-                    true = ets:delete(St#st.objects, Key),
-                    true = ets:delete(St#st.atimes, ATime),
-                    true = ets:delete(St#st.ctimes, CTime),
-                    trim_lifetime(St);
-                false ->
-                    ok
-            end
-    end.
-
-
-drop_lru(St, Continue) ->
-    case ets:first(St#st.atimes) of
-        '$end_of_table' ->
-            empty;
-        ATime ->
-            [{ATime, Key}] = ets:lookup(St#st.atimes, ATime),
-            Pattern = #entry{key=Key, ctime='$1', _='_'},
-            [[CTime]] = ets:match(St#st.objects, Pattern),
-            true = ets:delete(St#st.objects, Key),
-            true = ets:delete(St#st.atimes, ATime),
-            true = ets:delete(St#st.ctimes, CTime),
-            Continue(St)
-    end.
-
-
-next_timeout(#st{max_lifetime=undefined}) ->
-    infinity;
-next_timeout(St) ->
-    case ets:first(St#st.ctimes) of
-        '$end_of_table' ->
-            infinity;
-        CTime ->
-            Now = os:timestamp(),
-            DiffInMilli = timer:now_diff(Now, CTime) div 1000,
-            erlang:max(St#st.max_lifetime - DiffInMilli, 0)
-    end.
-
-
-set_options(St, []) ->
-    St;
-set_options(St, [{max_objects, N} | Rest]) when is_integer(N), N > 0 ->
-    set_options(St#st{max_objs=N}, Rest);
-set_options(St, [{max_size, N} | Rest]) when is_integer(N), N > 0 ->
-    set_options(St#st{max_size=N}, Rest);
-set_options(St, [{max_lifetime, N} | Rest]) when is_integer(N), N > 0 ->
-    set_options(St#st{max_lifetime=N}, Rest);
-set_options(_, [Opt | _]) ->
-    throw({invalid_option, Opt}).
-
-
-obj_table(Name) ->
-    table_name(Name, "_objects").
-
-
-at_table(Name) ->
-    table_name(Name, "_atimes").
-
-
-ct_table(Name) ->
-    table_name(Name, "_ctimes").
-
-
-table_name(Name, Ext) ->
-    list_to_atom(atom_to_list(Name) ++ Ext).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed8c2fb2/src/ets_lru/test/01-basic-behavior.t
----------------------------------------------------------------------
diff --git a/src/ets_lru/test/01-basic-behavior.t b/src/ets_lru/test/01-basic-behavior.t
deleted file mode 100755
index 7e87a5d..0000000
--- a/src/ets_lru/test/01-basic-behavior.t
+++ /dev/null
@@ -1,91 +0,0 @@
-#! /usr/bin/env escript
-
--define(WITH_LRU(F), tutil:with_lru(fun(LRU) -> F(LRU) end)).
-
-main([]) ->
-    code:add_pathz("test"),
-    code:add_pathz("ebin"),
-
-    tutil:run(16, fun() -> test() end).
-
-
-test() ->
-    test_lifecycle(),
-    test_table_names(),
-    ?WITH_LRU(test_insert_lookup),
-    ?WITH_LRU(test_insert_overwrite),
-    ?WITH_LRU(test_insert_remove),
-    ?WITH_LRU(test_clear),
-
-    ok.
-
-
-test_lifecycle() ->
-    Resp = ets_lru:start_link(?MODULE, []),
-    etap:fun_is(
-        fun({ok, LRU}) when is_pid(LRU) -> true; (_) -> false end,
-        Resp,
-        "ets_lru:start_link/2 returned an LRU"
-    ),
-    {ok, LRU} = Resp,
-    etap:is(ok, ets_lru:stop(LRU), "Destroyed the LRU ok").
-
-
-test_table_names() ->
-    {ok, LRU} = ets_lru:start_link(foo, []),
-    Exists = fun(Name) -> ets:info(Name, size) == 0 end,
-    NExists = fun(Name) -> ets:info(Name, size) == undefined end,
-    etap:is(Exists(foo_objects), true, "foo_objects exists"),
-    etap:is(Exists(foo_atimes), true, "foo_atimes exists"),
-    etap:is(Exists(foo_ctimes), true, "foo_ctimes exists"),
-
-    Ref = erlang:monitor(process, LRU),
-    ets_lru:stop(LRU),
-
-    receive {'DOWN', Ref, process, LRU, Reason} -> ok end,
-    etap:is(Reason, normal, "LRU stopped normally"),
-
-    etap:is(NExists(foo_objects), true, "foo_objects doesn't exist"),
-    etap:is(NExists(foo_atimes), true, "foo_atimes doesn't exist"),
-    etap:is(NExists(foo_ctimes), true, "foo_ctimes doesn't exist"),
-
-    ok.
-
-
-test_insert_lookup(LRU) ->
-    ok = ets_lru:insert(LRU, foo, bar),
-    Resp = ets_lru:lookup(LRU, foo),
-    etap:is(Resp, {ok, bar}, "Lookup returned the inserted value").
-
-
-test_insert_lookup_d(LRU) ->
-    ok = ets_lru:insert(LRU, foo, bar),
-    Resp = ets_lru:lookup_d(test_lru, foo),
-    etap:is(Resp, {ok, bar}, "Dirty lookup returned the inserted value").
-
-
-test_insert_overwrite(LRU) ->
-    ok = ets_lru:insert(LRU, foo, bar),
-    Resp1 = ets_lru:lookup(LRU, foo),
-    etap:is(Resp1, {ok, bar}, "Lookup returned the inserted value"),
-    ok = ets_lru:insert(LRU, foo, bam),
-    Resp2 = ets_lru:lookup(LRU, foo),
-    etap:is(Resp2, {ok, bam}, "Lookup returned the newly inserted value").
-
-
-test_insert_remove(LRU) ->
-    ok = ets_lru:insert(LRU, foo, bar),
-    Resp1 = ets_lru:lookup(LRU, foo),
-    etap:is(Resp1, {ok, bar}, "Lookup returned the inserted value"),
-    ok = ets_lru:remove(LRU, foo),
-    Resp2 = ets_lru:lookup(LRU, foo),
-    etap:is(Resp2, not_found, "Lookup returned not_found for removed value").
-
-
-test_clear(LRU) ->
-    ok = ets_lru:insert(LRU, foo, bar),
-    Resp1 = ets_lru:lookup(LRU, foo),
-    etap:is(Resp1, {ok, bar}, "Lookup returned the inserted value"),
-    ok = ets_lru:clear(LRU),
-    Resp2 = ets_lru:lookup(LRU, foo),
-    etap:is(Resp2, not_found, "Lookup returned not_found after a clear").

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed8c2fb2/src/ets_lru/test/02-lru-options.t
----------------------------------------------------------------------
diff --git a/src/ets_lru/test/02-lru-options.t b/src/ets_lru/test/02-lru-options.t
deleted file mode 100755
index 59d0ba1..0000000
--- a/src/ets_lru/test/02-lru-options.t
+++ /dev/null
@@ -1,61 +0,0 @@
-#! /usr/bin/env escript
-
-main([]) ->
-    code:add_pathz("test"),
-    code:add_pathz("ebin"),
-
-    tutil:run(9, fun() -> test() end).
-
-
-test() ->
-    test_max_objects(),
-    test_max_size(),
-    test_lifetime(),
-    test_bad_option(),
-
-    ok.
-
-
-test_max_objects() ->
-    % See also: 03-limit-max-objects.t
-    test_good([{max_objects, 5}]),
-    test_good([{max_objects, 1}]),
-    test_good([{max_objects, 923928342098203942}]).
-
-
-test_max_size() ->
-    % See also: 04-limit-max-size.t
-    test_good([{max_size, 1}]),
-    test_good([{max_size, 5}]),
-    test_good([{max_size, 2342923423942309423094}]).
-
-
-test_lifetime() ->
-    % See also: 05-limit-lifetime.t
-    test_good([{max_lifetime, 1}]),
-    test_good([{max_lifetime, 5}]),
-    test_good([{max_lifetime, 1244209909180928348}]).
-
-
-test_bad_option() ->
-    % Figure out a test for these.
-    %test_bad([{bingo, bango}]),
-    %test_bad([12]),
-    %test_bad([true]).
-    ok.
-
-
-test_good(Options) ->
-    Msg = io_lib:format("LRU created ok with options: ~w", [Options]),
-    etap:fun_is(fun
-        ({ok, LRU}) when is_pid(LRU) -> ets_lru:stop(LRU), true;
-        (_) -> false
-    end, ets_lru:start_link(?MODULE, Options), lists:flatten(Msg)).
-
-
-% test_bad(Options) ->
-%     etap:fun_is(fun
-%         ({invalid_option, _}) -> true;
-%         ({ok, LRU}) -> ets_lru:stop(LRU), false;
-%         (_) -> false
-%     end, catch ets_lru:start_link(?MODULE, Options), "LRU bad options").

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed8c2fb2/src/ets_lru/test/03-limit-max-objects.t
----------------------------------------------------------------------
diff --git a/src/ets_lru/test/03-limit-max-objects.t b/src/ets_lru/test/03-limit-max-objects.t
deleted file mode 100755
index bd4e793..0000000
--- a/src/ets_lru/test/03-limit-max-objects.t
+++ /dev/null
@@ -1,26 +0,0 @@
-#! /usr/bin/env escript
-
-objs() -> 25.
-
-main([]) ->
-    code:add_pathz("test"),
-    code:add_pathz("ebin"),
-
-    tutil:run(1, fun() -> test() end).
-
-
-test() ->
-    {ok, LRU} = ets_lru:start_link(lru, [{max_objects, objs()}]),
-    etap:is(insert_kvs(LRU, 100 * objs()), ok, "Max object count ok"),
-    ok = ets_lru:stop(LRU).
-
-
-insert_kvs(LRU, 0) ->
-    ok;
-insert_kvs(LRU, Count) ->
-    ets_lru:insert(LRU, Count, bar),
-    case ets:info(lru_objects, size) > objs() of
-        true -> erlang:error(exceeded_max_objects);
-        false -> ok
-    end,
-    insert_kvs(LRU, Count-1).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed8c2fb2/src/ets_lru/test/04-limit-max-size.t
----------------------------------------------------------------------
diff --git a/src/ets_lru/test/04-limit-max-size.t b/src/ets_lru/test/04-limit-max-size.t
deleted file mode 100755
index 5cdf0ce..0000000
--- a/src/ets_lru/test/04-limit-max-size.t
+++ /dev/null
@@ -1,26 +0,0 @@
-#! /usr/bin/env escript
-
-max_size() -> 1024.
-
-main([]) ->
-    code:add_pathz("test"),
-    code:add_pathz("ebin"),
-
-    tutil:run(1, fun() -> test() end).
-
-
-test() ->
-    {ok, LRU} = ets_lru:start_link(lru, [{max_size, max_size()}]),
-    etap:is(insert_kvs(LRU, 10000), ok, "Max size ok"),
-    ok = ets_lru:stop(LRU).
-
-
-insert_kvs(LRU, 0) ->
-    ok;
-insert_kvs(LRU, Count) ->
-    ets_lru:insert(LRU, Count, 1.5234),
-    case ets:info(lru_objects, memory) > max_size() of
-        true -> erlang:error(exceeded_max_size);
-        false -> ok
-    end,
-    insert_kvs(LRU, Count-1).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed8c2fb2/src/ets_lru/test/05-limit-lifetime.t
----------------------------------------------------------------------
diff --git a/src/ets_lru/test/05-limit-lifetime.t b/src/ets_lru/test/05-limit-lifetime.t
deleted file mode 100755
index 95effb2..0000000
--- a/src/ets_lru/test/05-limit-lifetime.t
+++ /dev/null
@@ -1,23 +0,0 @@
-#! /usr/bin/env escript
-
-lifetime() -> 100.
-
-main([]) ->
-    code:add_pathz("test"),
-    code:add_pathz("ebin"),
-
-    tutil:run(2, fun() -> test() end).
-
-
-test() ->
-    {ok, LRU} = ets_lru:start_link(lru, [{max_lifetime, lifetime()}]),
-    ok = test_single_entry(LRU),
-    ok = ets_lru:stop(LRU).
-
-
-test_single_entry(LRU) ->
-    ets_lru:insert(LRU, foo, bar),
-    etap:is(ets_lru:lookup(LRU, foo), {ok, bar}, "Expire leaves new entries"),
-    timer:sleep(round(lifetime() * 1.5)),
-    etap:is(ets_lru:lookup(LRU, foo), not_found, "Entry was expired"),
-    ok.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed8c2fb2/src/ets_lru/test/etap.erl
----------------------------------------------------------------------
diff --git a/src/ets_lru/test/etap.erl b/src/ets_lru/test/etap.erl
deleted file mode 100644
index 6924d09..0000000
--- a/src/ets_lru/test/etap.erl
+++ /dev/null
@@ -1,612 +0,0 @@
-%% Copyright (c) 2008-2009 Nick Gerakines <ni...@gerakines.net>
-%%
-%% Permission is hereby granted, free of charge, to any person
-%% obtaining a copy of this software and associated documentation
-%% files (the "Software"), to deal in the Software without
-%% restriction, including without limitation the rights to use,
-%% copy, modify, merge, publish, distribute, sublicense, and/or sell
-%% copies of the Software, and to permit persons to whom the
-%% Software is furnished to do so, subject to the following
-%% conditions:
-%%
-%% The above copyright notice and this permission notice shall be
-%% included in all copies or substantial portions of the Software.
-%%
-%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-%% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-%% OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-%% NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-%% HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-%% WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-%% OTHER DEALINGS IN THE SOFTWARE.
-%%
-%% @author Nick Gerakines <ni...@gerakines.net> [http://socklabs.com/]
-%% @author Jeremy Wall <je...@marzhillstudios.com>
-%% @version 0.3.4
-%% @copyright 2007-2008 Jeremy Wall, 2008-2009 Nick Gerakines
-%% @reference http://testanything.org/wiki/index.php/Main_Page
-%% @reference http://en.wikipedia.org/wiki/Test_Anything_Protocol
-%% @todo Finish implementing the skip directive.
-%% @todo Document the messages handled by this receive loop.
-%% @todo Explain in documentation why we use a process to handle test input.
-%% @doc etap is a TAP testing module for Erlang components and applications.
-%% This module allows developers to test their software using the TAP method.
-%%
-%% <blockquote cite="http://en.wikipedia.org/wiki/Test_Anything_Protocol"><p>
-%% TAP, the Test Anything Protocol, is a simple text-based interface between
-%% testing modules in a test harness. TAP started life as part of the test
-%% harness for Perl but now has implementations in C/C++, Python, PHP, Perl
-%% and probably others by the time you read this.
-%% </p></blockquote>
-%%
-%% The testing process begins by defining a plan using etap:plan/1, running
-%% a number of etap tests and then calling eta:end_tests/0. Please refer to
-%% the Erlang modules in the t directory of this project for example tests.
--module(etap).
--vsn("0.3.4").
-
--export([
-    ensure_test_server/0,
-    start_etap_server/0,
-    test_server/1,
-    msg/1, msg/2,
-    diag/1, diag/2,
-    expectation_mismatch_message/3,
-    plan/1,
-    end_tests/0,
-    not_ok/2, ok/2, is_ok/2, is/3, isnt/3, any/3, none/3,
-    fun_is/3, expect_fun/3, expect_fun/4,
-    is_greater/3,
-    skip/1, skip/2,
-    datetime/1,
-    skip/3,
-    bail/0, bail/1,
-    test_state/0, failure_count/0
-]).
-
--export([
-    contains_ok/3,
-    is_before/4
-]).
-
--export([
-    is_pid/2,
-    is_alive/2,
-    is_mfa/3
-]).
-
--export([
-    loaded_ok/2,
-    can_ok/2, can_ok/3,
-    has_attrib/2, is_attrib/3,
-    is_behaviour/2
-]).
-
--export([
-    dies_ok/2,
-    lives_ok/2,
-    throws_ok/3
-]).
-
-
--record(test_state, {
-    planned = 0,
-    count = 0,
-    pass = 0,
-    fail = 0,
-    skip = 0,
-    skip_reason = ""
-}).
-
-%% @spec plan(N) -> Result
-%%       N = unknown | skip | {skip, string()} | integer()
-%%       Result = ok
-%% @doc Create a test plan and boot strap the test server.
-plan(unknown) ->
-    ensure_test_server(),
-    etap_server ! {self(), plan, unknown},
-    ok;
-plan(skip) ->
-    io:format("1..0 # skip~n");
-plan({skip, Reason}) ->
-    io:format("1..0 # skip ~s~n", [Reason]);
-plan(N) when is_integer(N), N > 0 ->
-    ensure_test_server(),
-    etap_server ! {self(), plan, N},
-    ok.
-
-%% @spec end_tests() -> ok
-%% @doc End the current test plan and output test results.
-%% @todo This should probably be done in the test_server process.
-end_tests() ->
-    case whereis(etap_server) of
-        undefined -> self() ! true;
-        _ -> etap_server ! {self(), state}
-    end,
-    State = receive X -> X end,
-    if
-        State#test_state.planned == -1 ->
-            io:format("1..~p~n", [State#test_state.count]);
-        true ->
-            ok
-    end,
-    case whereis(etap_server) of
-        undefined -> ok;
-        _ -> etap_server ! done, ok
-    end.
-
-bail() ->
-    bail("").
-
-bail(Reason) ->
-    etap_server ! {self(), diag, "Bail out! " ++ Reason},
-    etap_server ! done, ok,
-    ok.
-
-%% @spec test_state() -> Return
-%%       Return = test_state_record() | {error, string()}
-%% @doc Return the current test state
-test_state() ->
-    etap_server ! {self(), state},
-    receive
-	X when is_record(X, test_state) -> X
-    after
-	1000 -> {error, "Timed out waiting for etap server reply.~n"}
-    end.
-
-%% @spec failure_count() -> Return
-%%       Return = integer() | {error, string()}
-%% @doc Return the current failure count
-failure_count() ->
-    case test_state() of
-        #test_state{fail=FailureCount} -> FailureCount;
-        X -> X
-    end.
-
-%% @spec msg(S) -> ok
-%%       S = string()
-%% @doc Print a message in the test output.
-msg(S) -> etap_server ! {self(), diag, S}, ok.
-
-%% @spec msg(Format, Data) -> ok
-%%      Format = atom() | string() | binary()
-%%      Data = [term()]
-%%      UnicodeList = [Unicode]
-%%      Unicode = int()
-%% @doc Print a message in the test output.
-%% Function arguments are passed through io_lib:format/2.
-msg(Format, Data) -> msg(io_lib:format(Format, Data)).
-
-%% @spec diag(S) -> ok
-%%       S = string()
-%% @doc Print a debug/status message related to the test suite.
-diag(S) -> msg("# " ++ S).
-
-%% @spec diag(Format, Data) -> ok
-%%      Format = atom() | string() | binary()
-%%      Data = [term()]
-%%      UnicodeList = [Unicode]
-%%      Unicode = int()
-%% @doc Print a debug/status message related to the test suite.
-%% Function arguments are passed through io_lib:format/2.
-diag(Format, Data) -> diag(io_lib:format(Format, Data)).
-
-%% @spec expectation_mismatch_message(Got, Expected, Desc) -> ok
-%%       Got = any()
-%%       Expected = any()
-%%       Desc = string()
-%% @doc Print an expectation mismatch message in the test output.
-expectation_mismatch_message(Got, Expected, Desc) ->
-    msg("    ---"),
-    msg("    description: ~p", [Desc]),
-    msg("    found:       ~p", [Got]),
-    msg("    wanted:      ~p", [Expected]),
-    msg("    ..."),
-    ok.
-
-% @spec evaluate(Pass, Got, Expected, Desc) -> Result
-%%       Pass = true | false
-%%       Got = any()
-%%       Expected = any()
-%%       Desc = string()
-%%       Result = true | false
-%% @doc Evaluate a test statement, printing an expectation mismatch message
-%%       if the test failed.
-evaluate(Pass, Got, Expected, Desc) ->
-    case mk_tap(Pass, Desc) of
-        false ->
-            expectation_mismatch_message(Got, Expected, Desc),
-            false;
-        true ->
-            true
-    end.
-
-%% @spec ok(Expr, Desc) -> Result
-%%       Expr = true | false
-%%       Desc = string()
-%%       Result = true | false
-%% @doc Assert that a statement is true.
-ok(Expr, Desc) -> evaluate(Expr == true, Expr, true, Desc).
-
-%% @spec not_ok(Expr, Desc) -> Result
-%%       Expr = true | false
-%%       Desc = string()
-%%       Result = true | false
-%% @doc Assert that a statement is false.
-not_ok(Expr, Desc) -> evaluate(Expr == false, Expr, false, Desc).
-
-%% @spec is_ok(Expr, Desc) -> Result
-%%       Expr = any()
-%%       Desc = string()
-%%       Result = true | false
-%% @doc Assert that two values are the same.
-is_ok(Expr, Desc) -> evaluate(Expr == ok, Expr, ok, Desc).
-
-%% @spec is(Got, Expected, Desc) -> Result
-%%       Got = any()
-%%       Expected = any()
-%%       Desc = string()
-%%       Result = true | false
-%% @doc Assert that two values are the same.
-is(Got, Expected, Desc) -> evaluate(Got == Expected, Got, Expected, Desc).
-
-%% @spec isnt(Got, Expected, Desc) -> Result
-%%       Got = any()
-%%       Expected = any()
-%%       Desc = string()
-%%       Result = true | false
-%% @doc Assert that two values are not the same.
-isnt(Got, Expected, Desc) -> evaluate(Got /= Expected, Got, Expected, Desc).
-
-%% @spec is_greater(ValueA, ValueB, Desc) -> Result
-%%       ValueA = number()
-%%       ValueB = number()
-%%       Desc = string()
-%%       Result = true | false
-%% @doc Assert that an integer is greater than another.
-is_greater(ValueA, ValueB, Desc) when is_integer(ValueA), is_integer(ValueB) ->
-    mk_tap(ValueA > ValueB, Desc).
-
-%% @spec any(Got, Items, Desc) -> Result
-%%       Got = any()
-%%       Items = [any()]
-%%       Desc = string()
-%%       Result = true | false
-%% @doc Assert that an item is in a list.
-any(Got, Items, Desc) when is_function(Got) ->
-    is(lists:any(Got, Items), true, Desc);
-any(Got, Items, Desc) ->
-    is(lists:member(Got, Items), true, Desc).
-
-%% @spec none(Got, Items, Desc) -> Result
-%%       Got = any()
-%%       Items = [any()]
-%%       Desc = string()
-%%       Result = true | false
-%% @doc Assert that an item is not in a list.
-none(Got, Items, Desc) when is_function(Got) ->
-    is(lists:any(Got, Items), false, Desc);
-none(Got, Items, Desc) ->
-    is(lists:member(Got, Items), false, Desc).
-
-%% @spec fun_is(Fun, Expected, Desc) -> Result
-%%       Fun = function()
-%%       Expected = any()
-%%       Desc = string()
-%%       Result = true | false
-%% @doc Use an anonymous function to assert a pattern match.
-fun_is(Fun, Expected, Desc) when is_function(Fun) ->
-    is(Fun(Expected), true, Desc).
-
-%% @spec expect_fun(ExpectFun, Got, Desc) -> Result
-%%       ExpectFun = function()
-%%       Got = any()
-%%       Desc = string()
-%%       Result = true | false
-%% @doc Use an anonymous function to assert a pattern match, using actual
-%%       value as the argument to the function.
-expect_fun(ExpectFun, Got, Desc) ->
-    evaluate(ExpectFun(Got), Got, ExpectFun, Desc).
-
-%% @spec expect_fun(ExpectFun, Got, Desc, ExpectStr) -> Result
-%%       ExpectFun = function()
-%%       Got = any()
-%%       Desc = string()
-%%       ExpectStr = string()
-%%       Result = true | false
-%% @doc Use an anonymous function to assert a pattern match, using actual
-%%       value as the argument to the function.
-expect_fun(ExpectFun, Got, Desc, ExpectStr) ->
-    evaluate(ExpectFun(Got), Got, ExpectStr, Desc).
-
-%% @equiv skip(TestFun, "")
-skip(TestFun) when is_function(TestFun) ->
-    skip(TestFun, "").
-
-%% @spec skip(TestFun, Reason) -> ok
-%%       TestFun = function()
-%%       Reason = string()
-%% @doc Skip a test.
-skip(TestFun, Reason) when is_function(TestFun), is_list(Reason) ->
-    begin_skip(Reason),
-    catch TestFun(),
-    end_skip(),
-    ok.
-
-%% @spec skip(Q, TestFun, Reason) -> ok
-%%       Q = true | false | function()
-%%       TestFun = function()
-%%       Reason = string()
-%% @doc Skips a test conditionally. The first argument to this function can
-%% either be the 'true' or 'false' atoms or a function that returns 'true' or
-%% 'false'.
-skip(QFun, TestFun, Reason) when is_function(QFun), is_function(TestFun), is_list(Reason) ->
-    case QFun() of
-        true -> begin_skip(Reason), TestFun(), end_skip();
-        _ -> TestFun()
-    end,
-    ok;
-
-skip(Q, TestFun, Reason) when is_function(TestFun), is_list(Reason), Q == true ->
-    begin_skip(Reason),
-    TestFun(),
-    end_skip(),
-    ok;
-
-skip(_, TestFun, Reason) when is_function(TestFun), is_list(Reason) ->
-    TestFun(),
-    ok.
-
-%% @private
-begin_skip(Reason) ->
-    etap_server ! {self(), begin_skip, Reason}.
-
-%% @private
-end_skip() ->
-    etap_server ! {self(), end_skip}.
-
-%% @spec contains_ok(string(), string(), string()) -> true | false
-%% @doc Assert that a string is contained in another string.
-contains_ok(Source, String, Desc) ->
-    etap:isnt(
-        string:str(Source, String),
-        0,
-        Desc
-    ).
-
-%% @spec is_before(string(), string(), string(), string()) -> true | false
-%% @doc Assert that a string comes before another string within a larger body.
-is_before(Source, StringA, StringB, Desc) ->
-    etap:is_greater(
-        string:str(Source, StringB),
-        string:str(Source, StringA),
-        Desc
-    ).
-
-%% @doc Assert that a given variable is a pid.
-is_pid(Pid, Desc) when is_pid(Pid) -> etap:ok(true, Desc);
-is_pid(_, Desc) -> etap:ok(false, Desc).
-
-%% @doc Assert that a given process/pid is alive.
-is_alive(Pid, Desc) ->
-    etap:ok(erlang:is_process_alive(Pid), Desc).
-
-%% @doc Assert that the current function of a pid is a given {M, F, A} tuple.
-is_mfa(Pid, MFA, Desc) ->
-    etap:is({current_function, MFA}, erlang:process_info(Pid, current_function), Desc).
-
-%% @spec loaded_ok(atom(), string()) -> true | false
-%% @doc Assert that a module has been loaded successfully.
-loaded_ok(M, Desc) when is_atom(M) ->
-    etap:fun_is(fun({module, _}) -> true; (_) -> false end, code:load_file(M), Desc).
-
-%% @spec can_ok(atom(), atom()) -> true | false
-%% @doc Assert that a module exports a given function.
-can_ok(M, F) when is_atom(M), is_atom(F) ->
-    Matches = [X || {X, _} <- M:module_info(exports), X == F],
-    etap:ok(Matches > 0, lists:concat([M, " can ", F])).
-
-%% @spec can_ok(atom(), atom(), integer()) -> true | false
-%% @doc Assert that a module exports a given function with a given arity.
-can_ok(M, F, A) when is_atom(M); is_atom(F), is_number(A) ->
-    Matches = [X || X <- M:module_info(exports), X == {F, A}],
-    etap:ok(Matches > 0, lists:concat([M, " can ", F, "/", A])).
-
-%% @spec has_attrib(M, A) -> true | false
-%%       M = atom()
-%%       A = atom()
-%% @doc Asserts that a module has a given attribute.
-has_attrib(M, A) when is_atom(M), is_atom(A) ->
-    etap:isnt(
-        proplists:get_value(A, M:module_info(attributes), 'asdlkjasdlkads'),
-        'asdlkjasdlkads',
-        lists:concat([M, " has attribute ", A])
-    ).
-
-%% @spec has_attrib(M, A. V) -> true | false
-%%       M = atom()
-%%       A = atom()
-%%       V = any()
-%% @doc Asserts that a module has a given attribute with a given value.
-is_attrib(M, A, V) when is_atom(M) andalso is_atom(A) ->
-    etap:is(
-        proplists:get_value(A, M:module_info(attributes)),
-        [V],
-        lists:concat([M, "'s ", A, " is ", V])
-    ).
-
-%% @spec is_behavior(M, B) -> true | false
-%%       M = atom()
-%%       B = atom()
-%% @doc Asserts that a given module has a specific behavior.
-is_behaviour(M, B) when is_atom(M) andalso is_atom(B) ->
-    is_attrib(M, behaviour, B).
-
-%% @doc Assert that an exception is raised when running a given function.
-dies_ok(F, Desc) ->
-    case (catch F()) of
-        {'EXIT', _} -> etap:ok(true, Desc);
-        _ -> etap:ok(false, Desc)
-    end.
-
-%% @doc Assert that an exception is not raised when running a given function.
-lives_ok(F, Desc) ->
-    etap:is(try_this(F), success, Desc).
-
-%% @doc Assert that the exception thrown by a function matches the given exception.
-throws_ok(F, Exception, Desc) ->
-    try F() of
-        _ -> etap:ok(nok, Desc)
-    catch
-        _:E ->
-            etap:is(E, Exception, Desc)
-    end.
-
-%% @private
-%% @doc Run a function and catch any exceptions.
-try_this(F) when is_function(F, 0) ->
-    try F() of
-        _ -> success
-    catch
-        throw:E -> {throw, E};
-        error:E -> {error, E};
-        exit:E -> {exit, E}
-    end.
-
-%% @private
-%% @doc Start the etap_server process if it is not running already.
-ensure_test_server() ->
-    case whereis(etap_server) of
-        undefined ->
-            proc_lib:start(?MODULE, start_etap_server,[]);
-        _ ->
-            diag("The test server is already running.")
-    end.
-
-%% @private
-%% @doc Start the etap_server loop and register itself as the etap_server
-%% process.
-start_etap_server() ->
-    catch register(etap_server, self()),
-    proc_lib:init_ack(ok),
-    etap:test_server(#test_state{
-        planned = 0,
-        count = 0,
-        pass = 0,
-        fail = 0,
-        skip = 0,
-        skip_reason = ""
-    }).
-
-
-%% @private
-%% @doc The main etap_server receive/run loop. The etap_server receive loop
-%% responds to seven messages apperatining to failure or passing of tests.
-%% It is also used to initiate the testing process with the {_, plan, _}
-%% message that clears the current test state.
-test_server(State) ->
-    NewState = receive
-        {_From, plan, unknown} ->
-            io:format("# Current time local ~s~n", [datetime(erlang:localtime())]),
-            io:format("# Using etap version ~p~n", [ proplists:get_value(vsn, proplists:get_value(attributes, etap:module_info())) ]),
-            State#test_state{
-                planned = -1,
-                count = 0,
-                pass = 0,
-                fail = 0,
-                skip = 0,
-                skip_reason = ""
-            };
-        {_From, plan, N} ->
-            io:format("# Current time local ~s~n", [datetime(erlang:localtime())]),
-            io:format("# Using etap version ~p~n", [ proplists:get_value(vsn, proplists:get_value(attributes, etap:module_info())) ]),
-            io:format("1..~p~n", [N]),
-            State#test_state{
-                planned = N,
-                count = 0,
-                pass = 0,
-                fail = 0,
-                skip = 0,
-                skip_reason = ""
-            };
-        {_From, begin_skip, Reason} ->
-            State#test_state{
-                skip = 1,
-                skip_reason = Reason
-            };
-        {_From, end_skip} ->
-            State#test_state{
-                skip = 0,
-                skip_reason = ""
-            };
-        {_From, pass, Desc} ->
-            FullMessage = skip_diag(
-                " - " ++ Desc,
-                State#test_state.skip,
-                State#test_state.skip_reason
-            ),
-            io:format("ok ~p ~s~n", [State#test_state.count + 1, FullMessage]),
-            State#test_state{
-                count = State#test_state.count + 1,
-                pass = State#test_state.pass + 1
-            };
-
-        {_From, fail, Desc} ->
-            FullMessage = skip_diag(
-                " - " ++ Desc,
-                State#test_state.skip,
-                State#test_state.skip_reason
-            ),
-            io:format("not ok ~p ~s~n", [State#test_state.count + 1, FullMessage]),
-            State#test_state{
-                count = State#test_state.count + 1,
-                fail = State#test_state.fail + 1
-            };
-        {From, state} ->
-            From ! State,
-            State;
-        {_From, diag, Message} ->
-            io:format("~s~n", [Message]),
-            State;
-        {From, count} ->
-            From ! State#test_state.count,
-            State;
-        {From, is_skip} ->
-            From ! State#test_state.skip,
-            State;
-        done ->
-            exit(normal)
-    end,
-    test_server(NewState).
-
-%% @private
-%% @doc Process the result of a test and send it to the etap_server process.
-mk_tap(Result, Desc) ->
-    IsSkip = lib:sendw(etap_server, is_skip),
-    case [IsSkip, Result] of
-        [_, true] ->
-            etap_server ! {self(), pass, Desc},
-            true;
-        [1, _] ->
-            etap_server ! {self(), pass, Desc},
-            true;
-        _ ->
-            etap_server ! {self(), fail, Desc},
-            false
-    end.
-
-%% @private
-%% @doc Format a date/time string.
-datetime(DateTime) ->
-    {{Year, Month, Day}, {Hour, Min, Sec}} = DateTime,
-    io_lib:format("~4.10.0B-~2.10.0B-~2.10.0B ~2.10.0B:~2.10.0B:~2.10.0B", [Year, Month, Day, Hour, Min, Sec]).
-
-%% @private
-%% @doc Craft an output message taking skip/todo into consideration.
-skip_diag(Message, 0, _) ->
-    Message;
-skip_diag(_Message, 1, "") ->
-    " # SKIP";
-skip_diag(_Message, 1, Reason) ->
-    " # SKIP : " ++ Reason.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed8c2fb2/src/ets_lru/test/tutil.erl
----------------------------------------------------------------------
diff --git a/src/ets_lru/test/tutil.erl b/src/ets_lru/test/tutil.erl
deleted file mode 100644
index 3e4bd68..0000000
--- a/src/ets_lru/test/tutil.erl
+++ /dev/null
@@ -1,29 +0,0 @@
--module(tutil).
-
--export([
-    run/2,
-    with_lru/1
-]).
-
-
-run(Plan, Fun) ->
-    etap:plan(Plan),
-    case (catch Fun()) of
-        ok ->
-            etap:end_tests();
-        Error ->
-            Msg = lists:flatten(io_lib:format("Error: ~p", [Error])),
-            etap:bail(Msg)
-    end.
-
-
-with_lru(Fun) ->
-    {ok, LRU} = ets_lru:start_link(test_lru, []),
-    Ref = erlang:monitor(process, LRU),
-    try
-        Fun(LRU)
-    after
-        ets_lru:stop(LRU),
-        receive {'DOWN', Ref, process, LRU, _} -> ok end
-    end.
-


[12/49] Remove src/couch

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_compaction_daemon.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_compaction_daemon.erl b/src/couch/src/couch_compaction_daemon.erl
deleted file mode 100644
index 3251d5f..0000000
--- a/src/couch/src/couch_compaction_daemon.erl
+++ /dev/null
@@ -1,514 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_compaction_daemon).
--behaviour(gen_server).
--behaviour(config_listener).
-
-% public API
--export([start_link/0]).
-
-% gen_server callbacks
--export([init/1, handle_call/3, handle_info/2, handle_cast/2]).
--export([code_change/3, terminate/2]).
-
-% config_listener api
--export([handle_config_change/5]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(CONFIG_ETS, couch_compaction_daemon_config).
-
--record(state, {
-    loop_pid
-}).
-
--record(config, {
-    db_frag = nil,
-    view_frag = nil,
-    period = nil,
-    cancel = false,
-    parallel_view_compact = false
-}).
-
--record(period, {
-    from = nil,
-    to = nil
-}).
-
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-init(_) ->
-    process_flag(trap_exit, true),
-    ?CONFIG_ETS = ets:new(?CONFIG_ETS, [named_table, set, protected]),
-    ok = config:listen_for_changes(?MODULE, nil),
-    load_config(),
-    Server = self(),
-    Loop = spawn_link(fun() -> compact_loop(Server) end),
-    {ok, #state{loop_pid = Loop}}.
-
-
-handle_cast({config_update, DbName, deleted}, State) ->
-    true = ets:delete(?CONFIG_ETS, ?l2b(DbName)),
-    {noreply, State};
-
-handle_cast({config_update, DbName, Config}, #state{loop_pid = Loop} = State) ->
-    case parse_config(DbName, Config) of
-    {ok, NewConfig} ->
-        WasEmpty = (ets:info(?CONFIG_ETS, size) =:= 0),
-        true = ets:insert(?CONFIG_ETS, {?l2b(DbName), NewConfig}),
-        case WasEmpty of
-        true ->
-            Loop ! {self(), have_config};
-        false ->
-            ok
-        end;
-    error ->
-        ok
-    end,
-    {noreply, State}.
-
-
-handle_call(Msg, _From, State) ->
-    {stop, {unexpected_call, Msg}, State}.
-
-
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {noreply, State};
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-    {noreply, State};
-handle_info({'EXIT', Pid, Reason}, #state{loop_pid = Pid} = State) ->
-    {stop, {compaction_loop_died, Reason}, State}.
-
-
-terminate(_Reason, _State) ->
-    true = ets:delete(?CONFIG_ETS).
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-handle_config_change("compactions", DbName, Value, _, _) ->
-    {ok, gen_server:cast(?MODULE, {config_update, DbName, Value})};
-handle_config_change(_, _, _, _, _) ->
-    {ok, nil}.
-
-
-compact_loop(Parent) ->
-    {ok, _} = couch_server:all_databases(
-        fun(DbName, Acc) ->
-            case ets:info(?CONFIG_ETS, size) =:= 0 of
-            true ->
-                {stop, Acc};
-            false ->
-                case get_db_config(DbName) of
-                nil ->
-                    ok;
-                {ok, Config} ->
-                    case check_period(Config) of
-                    true ->
-                        maybe_compact_db(DbName, Config);
-                    false ->
-                        ok
-                    end
-                end,
-                {ok, Acc}
-            end
-        end, ok),
-    case ets:info(?CONFIG_ETS, size) =:= 0 of
-    true ->
-        receive {Parent, have_config} -> ok end;
-    false ->
-        PausePeriod = list_to_integer(
-            config:get("compaction_daemon", "check_interval", "300")),
-        ok = timer:sleep(PausePeriod * 1000)
-    end,
-    compact_loop(Parent).
-
-
-maybe_compact_db(DbName, Config) ->
-    etap:diag("~n~n~n~n################~nCOMPACTING: ~p~n#############~n~n",
-        [DbName]),
-    case (catch couch_db:open_int(DbName, [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}])) of
-    {ok, Db} ->
-        DDocNames = db_ddoc_names(Db),
-        case can_db_compact(Config, Db) of
-        true ->
-            {ok, _} = couch_db:start_compact(Db),
-            TimeLeft = compact_time_left(Config),
-            case Config#config.parallel_view_compact of
-            true ->
-                ViewsCompactPid = spawn_link(fun() ->
-                    maybe_compact_views(DbName, DDocNames, Config)
-                end),
-                ViewsMonRef = erlang:monitor(process, ViewsCompactPid);
-            false ->
-                ViewsCompactPid = nil,
-                ViewsMonRef = nil
-            end,
-            case couch_db:wait_for_compaction(Db, TimeLeft) of
-                ok ->
-                    couch_db:close(Db),
-                    case Config#config.parallel_view_compact of
-                        true -> ok;
-                        false -> maybe_compact_views(DbName, DDocNames, Config)
-                    end;
-                {error, timeout} ->
-                    ?LOG_INFO("Compaction daemon - canceling compaction "
-                        "for databaes `~s` because exceeded the allowed time.",
-                        [DbName]),
-                    ok = couch_db:cancel_compact(Db),
-                    couch_db:close(Db);
-                {error, Reason} ->
-                    couch_db:close(Db),
-                    ?LOG_ERROR("Compaction daemon - an error ocurred while"
-                        " compacting the database `~s`: ~p", [DbName, Reason])
-            end,
-            case ViewsMonRef of
-            nil ->
-                ok;
-            _ ->
-                receive
-                {'DOWN', ViewsMonRef, process, _, _Reason} ->
-                    ok
-                after TimeLeft + 1000 ->
-                    % Under normal circunstances, the view compaction process
-                    % should have finished already.
-                    erlang:demonitor(ViewsMonRef, [flush]),
-                    unlink(ViewsCompactPid),
-                    exit(ViewsCompactPid, kill)
-                end
-            end;
-        false ->
-            couch_db:close(Db),
-            maybe_compact_views(DbName, DDocNames, Config)
-        end;
-    _ ->
-        ok
-    end.
-
-
-maybe_compact_views(_DbName, [], _Config) ->
-    ok;
-maybe_compact_views(DbName, [DDocName | Rest], Config) ->
-    case check_period(Config) of
-    true ->
-        case maybe_compact_view(DbName, DDocName, Config) of
-        ok ->
-            maybe_compact_views(DbName, Rest, Config);
-        timeout ->
-            ok
-        end;
-    false ->
-        ok
-    end.
-
-
-db_ddoc_names(Db) ->
-    {ok, _, DDocNames} = couch_db:enum_docs(
-        Db,
-        fun(#full_doc_info{id = <<"_design/", _/binary>>, deleted = true}, _, Acc) ->
-            {ok, Acc};
-        (#full_doc_info{id = <<"_design/", Id/binary>>}, _, Acc) ->
-            {ok, [Id | Acc]};
-        (_, _, Acc) ->
-            {stop, Acc}
-        end, [], [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}]),
-    DDocNames.
-
-
-maybe_compact_view(DbName, GroupId, Config) ->
-    DDocId = <<"_design/", GroupId/binary>>,
-    case (catch couch_mrview:get_info(DbName, DDocId)) of
-    {ok, GroupInfo} ->
-        case can_view_compact(Config, DbName, GroupId, GroupInfo) of
-        true ->
-            {ok, MonRef} = couch_mrview:compact(DbName, DDocId, [monitor]),
-            TimeLeft = compact_time_left(Config),
-            receive
-            {'DOWN', MonRef, process, _, normal} ->
-                ok;
-            {'DOWN', MonRef, process, _, Reason} ->
-                ?LOG_ERROR("Compaction daemon - an error ocurred while compacting"
-                    " the view group `~s` from database `~s`: ~p",
-                    [GroupId, DbName, Reason]),
-                ok
-            after TimeLeft ->
-                ?LOG_INFO("Compaction daemon - canceling the compaction for the "
-                    "view group `~s` of the database `~s` because it's exceeding"
-                    " the allowed period.", [GroupId, DbName]),
-                erlang:demonitor(MonRef, [flush]),
-                ok = couch_mrview:cancel_compaction(DbName, DDocId),
-                timeout
-            end;
-        false ->
-            ok
-        end;
-    Error ->
-        ?LOG_ERROR("Error opening view group `~s` from database `~s`: ~p",
-            [GroupId, DbName, Error]),
-        ok
-    end.
-
-
-compact_time_left(#config{cancel = false}) ->
-    infinity;
-compact_time_left(#config{period = nil}) ->
-    infinity;
-compact_time_left(#config{period = #period{to = {ToH, ToM} = To}}) ->
-    {H, M, _} = time(),
-    case To > {H, M} of
-    true ->
-        ((ToH - H) * 60 * 60 * 1000) + (abs(ToM - M) * 60 * 1000);
-    false ->
-        ((24 - H + ToH) * 60 * 60 * 1000) + (abs(ToM - M) * 60 * 1000)
-    end.
-
-
-get_db_config(DbName) ->
-    case ets:lookup(?CONFIG_ETS, DbName) of
-    [] ->
-        case ets:lookup(?CONFIG_ETS, <<"_default">>) of
-        [] ->
-            nil;
-        [{<<"_default">>, Config}] ->
-            {ok, Config}
-        end;
-    [{DbName, Config}] ->
-        {ok, Config}
-    end.
-
-
-can_db_compact(#config{db_frag = Threshold} = Config, Db) ->
-    case check_period(Config) of
-    false ->
-        false;
-    true ->
-        {ok, DbInfo} = couch_db:get_db_info(Db),
-        {Frag, SpaceRequired} = frag(DbInfo),
-        ?LOG_DEBUG("Fragmentation for database `~s` is ~p%, estimated space for"
-           " compaction is ~p bytes.", [Db#db.name, Frag, SpaceRequired]),
-        case check_frag(Threshold, Frag) of
-        false ->
-            false;
-        true ->
-            Free = free_space(config:get("couchdb", "database_dir")),
-            case Free >= SpaceRequired of
-            true ->
-                true;
-            false ->
-                ?LOG_WARN("Compaction daemon - skipping database `~s` "
-                    "compaction: the estimated necessary disk space is about ~p"
-                    " bytes but the currently available disk space is ~p bytes.",
-                   [Db#db.name, SpaceRequired, Free]),
-                false
-            end
-        end
-    end.
-
-can_view_compact(Config, DbName, GroupId, GroupInfo) ->
-    case check_period(Config) of
-    false ->
-        false;
-    true ->
-        case couch_util:get_value(updater_running, GroupInfo) of
-        true ->
-            false;
-        false ->
-            {Frag, SpaceRequired} = frag(GroupInfo),
-            ?LOG_DEBUG("Fragmentation for view group `~s` (database `~s`) is "
-                "~p%, estimated space for compaction is ~p bytes.",
-                [GroupId, DbName, Frag, SpaceRequired]),
-            case check_frag(Config#config.view_frag, Frag) of
-            false ->
-                false;
-            true ->
-                Free = free_space(couch_index_util:root_dir()),
-                case Free >= SpaceRequired of
-                true ->
-                    true;
-                false ->
-                    ?LOG_WARN("Compaction daemon - skipping view group `~s` "
-                        "compaction (database `~s`): the estimated necessary "
-                        "disk space is about ~p bytes but the currently available"
-                        " disk space is ~p bytes.",
-                        [GroupId, DbName, SpaceRequired, Free]),
-                    false
-                end
-            end
-        end
-    end.
-
-
-check_period(#config{period = nil}) ->
-    true;
-check_period(#config{period = #period{from = From, to = To}}) ->
-    {HH, MM, _} = erlang:time(),
-    case From < To of
-    true ->
-        ({HH, MM} >= From) andalso ({HH, MM} < To);
-    false ->
-        ({HH, MM} >= From) orelse ({HH, MM} < To)
-    end.
-
-
-check_frag(nil, _) ->
-    true;
-check_frag(Threshold, Frag) ->
-    Frag >= Threshold.
-
-
-frag(Props) ->
-    FileSize = couch_util:get_value(disk_size, Props),
-    MinFileSize = list_to_integer(
-        config:get("compaction_daemon", "min_file_size", "131072")),
-    case FileSize < MinFileSize of
-    true ->
-        {0, FileSize};
-    false ->
-        case couch_util:get_value(data_size, Props) of
-        null ->
-            {100, FileSize};
-        0 ->
-            {0, FileSize};
-        DataSize ->
-            Frag = round(((FileSize - DataSize) / FileSize * 100)),
-            {Frag, space_required(DataSize)}
-        end
-    end.
-
-% Rough, and pessimistic, estimation of necessary disk space to compact a
-% database or view index.
-space_required(DataSize) ->
-    round(DataSize * 2.0).
-
-
-load_config() ->
-    lists:foreach(
-        fun({DbName, ConfigString}) ->
-            case parse_config(DbName, ConfigString) of
-            {ok, Config} ->
-                true = ets:insert(?CONFIG_ETS, {?l2b(DbName), Config});
-            error ->
-                ok
-            end
-        end,
-        config:get("compactions")).
-
-parse_config(DbName, ConfigString) ->
-    case (catch do_parse_config(ConfigString)) of
-    {ok, Conf} ->
-        {ok, Conf};
-    incomplete_period ->
-        ?LOG_ERROR("Incomplete period ('to' or 'from' missing) in the compaction"
-            " configuration for database `~s`", [DbName]),
-        error;
-    _ ->
-        ?LOG_ERROR("Invalid compaction configuration for database "
-            "`~s`: `~s`", [DbName, ConfigString]),
-        error
-    end.
-
-do_parse_config(ConfigString) ->
-    {ok, ConfProps} = couch_util:parse_term(ConfigString),
-    {ok, #config{period = Period} = Conf} = config_record(ConfProps, #config{}),
-    case Period of
-    nil ->
-        {ok, Conf};
-    #period{from = From, to = To} when From =/= nil, To =/= nil ->
-        {ok, Conf};
-    #period{} ->
-        incomplete_period
-    end.
-
-config_record([], Config) ->
-    {ok, Config};
-
-config_record([{db_fragmentation, V} | Rest], Config) ->
-    [Frag] = string:tokens(V, "%"),
-    config_record(Rest, Config#config{db_frag = list_to_integer(Frag)});
-
-config_record([{view_fragmentation, V} | Rest], Config) ->
-    [Frag] = string:tokens(V, "%"),
-    config_record(Rest, Config#config{view_frag = list_to_integer(Frag)});
-
-config_record([{from, V} | Rest], #config{period = Period0} = Config) ->
-    Time = parse_time(V),
-    Period = case Period0 of
-    nil ->
-        #period{from = Time};
-    #period{} ->
-        Period0#period{from = Time}
-    end,
-    config_record(Rest, Config#config{period = Period});
-
-config_record([{to, V} | Rest], #config{period = Period0} = Config) ->
-    Time = parse_time(V),
-    Period = case Period0 of
-    nil ->
-        #period{to = Time};
-    #period{} ->
-        Period0#period{to = Time}
-    end,
-    config_record(Rest, Config#config{period = Period});
-
-config_record([{strict_window, true} | Rest], Config) ->
-    config_record(Rest, Config#config{cancel = true});
-
-config_record([{strict_window, false} | Rest], Config) ->
-    config_record(Rest, Config#config{cancel = false});
-
-config_record([{parallel_view_compaction, true} | Rest], Config) ->
-    config_record(Rest, Config#config{parallel_view_compact = true});
-
-config_record([{parallel_view_compaction, false} | Rest], Config) ->
-    config_record(Rest, Config#config{parallel_view_compact = false}).
-
-
-parse_time(String) ->
-    [HH, MM] = string:tokens(String, ":"),
-    {list_to_integer(HH), list_to_integer(MM)}.
-
-
-free_space(Path) ->
-    DiskData = lists:sort(
-        fun({PathA, _, _}, {PathB, _, _}) ->
-            length(filename:split(PathA)) > length(filename:split(PathB))
-        end,
-        disksup:get_disk_data()),
-    free_space_rec(abs_path(Path), DiskData).
-
-free_space_rec(_Path, []) ->
-    undefined;
-free_space_rec(Path, [{MountPoint0, Total, Usage} | Rest]) ->
-    MountPoint = abs_path(MountPoint0),
-    case MountPoint =:= string:substr(Path, 1, length(MountPoint)) of
-    false ->
-        free_space_rec(Path, Rest);
-    true ->
-        trunc(Total - (Total * (Usage / 100))) * 1024
-    end.
-
-abs_path(Path0) ->
-    Path = filename:absname(Path0),
-    case lists:last(Path) of
-    $/ ->
-        Path;
-    _ ->
-        Path ++ "/"
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_compress.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_compress.erl b/src/couch/src/couch_compress.erl
deleted file mode 100644
index 6b47a7a..0000000
--- a/src/couch/src/couch_compress.erl
+++ /dev/null
@@ -1,84 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_compress).
-
--export([compress/2, decompress/1, is_compressed/2]).
--export([get_compression_method/0]).
-
--include_lib("couch/include/couch_db.hrl").
-
-% binaries compressed with snappy have their first byte set to this value
--define(SNAPPY_PREFIX, 1).
-% Term prefixes documented at:
-%      http://www.erlang.org/doc/apps/erts/erl_ext_dist.html
--define(TERM_PREFIX, 131).
--define(COMPRESSED_TERM_PREFIX, 131, 80).
-
-
-get_compression_method() ->
-    case config:get("couchdb", "file_compression") of
-    undefined ->
-        ?DEFAULT_COMPRESSION;
-    Method1 ->
-        case string:tokens(Method1, "_") of
-        [Method] ->
-            list_to_existing_atom(Method);
-        [Method, Level] ->
-            {list_to_existing_atom(Method), list_to_integer(Level)}
-        end
-    end.
-
-
-compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, snappy) ->
-    Bin;
-compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, Method) ->
-    compress(decompress(Bin), Method);
-compress(<<?TERM_PREFIX, _/binary>> = Bin, Method) ->
-    compress(decompress(Bin), Method);
-compress(Term, none) ->
-    ?term_to_bin(Term);
-compress(Term, {deflate, Level}) ->
-    term_to_binary(Term, [{minor_version, 1}, {compressed, Level}]);
-compress(Term, snappy) ->
-    Bin = ?term_to_bin(Term),
-    try
-        {ok, CompressedBin} = snappy:compress(Bin),
-        case byte_size(CompressedBin) < byte_size(Bin) of
-        true ->
-            <<?SNAPPY_PREFIX, CompressedBin/binary>>;
-        false ->
-            Bin
-        end
-    catch exit:snappy_nif_not_loaded ->
-        Bin
-    end.
-
-
-decompress(<<?SNAPPY_PREFIX, Rest/binary>>) ->
-    {ok, TermBin} = snappy:decompress(Rest),
-    binary_to_term(TermBin);
-decompress(<<?TERM_PREFIX, _/binary>> = Bin) ->
-    binary_to_term(Bin).
-
-
-is_compressed(<<?SNAPPY_PREFIX, _/binary>>, Method) ->
-    Method =:= snappy;
-is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, {deflate, _Level}) ->
-    true;
-is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, _Method) ->
-    false;
-is_compressed(<<?TERM_PREFIX, _/binary>>, Method) ->
-    Method =:= none;
-is_compressed(Term, _Method) when not is_binary(Term) ->
-    false.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_config.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_config.erl b/src/couch/src/couch_config.erl
deleted file mode 100644
index 5d13dff..0000000
--- a/src/couch/src/couch_config.erl
+++ /dev/null
@@ -1,251 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Reads CouchDB's ini file and gets queried for configuration parameters.
-% This module is initialized with a list of ini files that it consecutively
-% reads Key/Value pairs from and saves them in an ets table. If more than one
-% ini file is specified, the last one is used to write changes that are made
-% with store/2 back to that ini file.
-
--module(couch_config).
--behaviour(gen_server).
-
--include_lib("couch/include/couch_db.hrl").
-
-
--export([start_link/1, stop/0]).
--export([all/0, get/1, get/2, get/3, set/3, set/4, delete/2, delete/3]).
--export([register/1, register/2]).
--export([parse_ini_file/1]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--record(config, {
-    notify_funs=[],
-    write_filename=undefined
-}).
-
-
-start_link(IniFiles) ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, IniFiles, []).
-
-stop() ->
-    gen_server:cast(?MODULE, stop).
-
-
-all() ->
-    lists:sort(gen_server:call(?MODULE, all, infinity)).
-
-
-get(Section) when is_binary(Section) ->
-    ?MODULE:get(?b2l(Section));
-get(Section) ->
-    Matches = ets:match(?MODULE, {{Section, '$1'}, '$2'}),
-    [{Key, Value} || [Key, Value] <- Matches].
-
-get(Section, Key) ->
-    ?MODULE:get(Section, Key, undefined).
-
-get(Section, Key, Default) when is_binary(Section) and is_binary(Key) ->
-    ?MODULE:get(?b2l(Section), ?b2l(Key), Default);
-get(Section, Key, Default) ->
-    case ets:lookup(?MODULE, {Section, Key}) of
-        [] -> Default;
-        [{_, Match}] -> Match
-    end.
-
-set(Section, Key, Value) ->
-    ?MODULE:set(Section, Key, Value, true).
-
-set(Section, Key, Value, Persist) when is_binary(Section) and is_binary(Key)  ->
-    ?MODULE:set(?b2l(Section), ?b2l(Key), Value, Persist);
-set(Section, Key, Value, Persist) ->
-    gen_server:call(?MODULE, {set, Section, Key, Value, Persist}).
-
-
-delete(Section, Key) when is_binary(Section) and is_binary(Key) ->
-    delete(?b2l(Section), ?b2l(Key));
-delete(Section, Key) ->
-    delete(Section, Key, true).
-
-delete(Section, Key, Persist) when is_binary(Section) and is_binary(Key) ->
-    delete(?b2l(Section), ?b2l(Key), Persist);
-delete(Section, Key, Persist) ->
-    gen_server:call(?MODULE, {delete, Section, Key, Persist}).
-
-
-register(Fun) ->
-    ?MODULE:register(Fun, self()).
-
-register(Fun, Pid) ->
-    gen_server:call(?MODULE, {register, Fun, Pid}).
-
-
-init(IniFiles) ->
-    ets:new(?MODULE, [named_table, set, protected]),
-    try
-        lists:map(fun(IniFile) ->
-            {ok, ParsedIniValues} = parse_ini_file(IniFile),
-            ets:insert(?MODULE, ParsedIniValues)
-        end, IniFiles),
-        WriteFile = case IniFiles of
-            [_|_] -> lists:last(IniFiles);
-            _ -> undefined
-        end,
-        {ok, #config{write_filename = WriteFile}}
-    catch _Tag:Error ->
-        {stop, Error}
-    end.
-
-
-terminate(_Reason, _State) ->
-    ok.
-
-
-handle_call(all, _From, Config) ->
-    Resp = lists:sort((ets:tab2list(?MODULE))),
-    {reply, Resp, Config};
-handle_call({set, Sec, Key, Val, Persist}, From, Config) ->
-    Result = case {Persist, Config#config.write_filename} of
-        {true, undefined} ->
-            ok;
-        {true, FileName} ->
-            couch_config_writer:save_to_file({{Sec, Key}, Val}, FileName);
-        _ ->
-            ok
-    end,
-    case Result of
-    ok ->
-        true = ets:insert(?MODULE, {{Sec, Key}, Val}),
-        spawn_link(fun() ->
-            [catch F(Sec, Key, Val, Persist) || {_Pid, F} <- Config#config.notify_funs],
-                gen_server:reply(From, ok)
-        end),
-        {noreply, Config};
-    _Error ->
-        {reply, Result, Config}
-    end;
-handle_call({delete, Sec, Key, Persist}, From, Config) ->
-    true = ets:delete(?MODULE, {Sec,Key}),
-    case {Persist, Config#config.write_filename} of
-        {true, undefined} ->
-            ok;
-        {true, FileName} ->
-            couch_config_writer:save_to_file({{Sec, Key}, ""}, FileName);
-        _ ->
-            ok
-    end,
-    spawn_link(fun() ->
-        [catch F(Sec, Key, deleted, Persist) || {_Pid, F} <- Config#config.notify_funs],
-            gen_server:reply(From, ok)
-    end),
-    {noreply, Config};
-handle_call({register, Fun, Pid}, _From, #config{notify_funs=PidFuns}=Config) ->
-    erlang:monitor(process, Pid),
-    % convert 1 and 2 arity to 3 arity
-    Fun2 =
-    case Fun of
-        _ when is_function(Fun, 1) ->
-            fun(Section, _Key, _Value, _Persist) -> Fun(Section) end;
-        _ when is_function(Fun, 2) ->
-            fun(Section, Key, _Value, _Persist) -> Fun(Section, Key) end;
-        _ when is_function(Fun, 3) ->
-            fun(Section, Key, Value, _Persist) -> Fun(Section, Key, Value) end;
-        _ when is_function(Fun, 4) ->
-            Fun
-    end,
-    {reply, ok, Config#config{notify_funs=[{Pid, Fun2} | PidFuns]}}.
-
-
-handle_cast(stop, State) ->
-    {stop, normal, State};
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-handle_info({'DOWN', _, _, DownPid, _}, #config{notify_funs=PidFuns}=Config) ->
-    % remove any funs registered by the downed process
-    FilteredPidFuns = [{Pid,Fun} || {Pid,Fun} <- PidFuns, Pid /= DownPid],
-    {noreply, Config#config{notify_funs=FilteredPidFuns}}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-parse_ini_file(IniFile) ->
-    IniFilename = couch_util:abs_pathname(IniFile),
-    IniBin =
-    case file:read_file(IniFilename) of
-        {ok, IniBin0} ->
-            IniBin0;
-        {error, Reason} = Error ->
-            ?LOG_ERROR("Could not read server configuration file ~s: ~s",
-                [IniFilename, file:format_error(Reason)]),
-            throw(Error)
-    end,
-
-    Lines = re:split(IniBin, "\r\n|\n|\r|\032", [{return, list}]),
-    {_, ParsedIniValues} =
-    lists:foldl(fun(Line, {AccSectionName, AccValues}) ->
-            case string:strip(Line) of
-            "[" ++ Rest ->
-                case re:split(Rest, "\\]", [{return, list}]) of
-                [NewSectionName, ""] ->
-                    {NewSectionName, AccValues};
-                _Else -> % end bracket not at end, ignore this line
-                    {AccSectionName, AccValues}
-                end;
-            ";" ++ _Comment ->
-                {AccSectionName, AccValues};
-            Line2 ->
-                case re:split(Line2, "\s*=\s*", [{return, list}]) of
-                [Value] ->
-                    MultiLineValuePart = case re:run(Line, "^ \\S", []) of
-                    {match, _} ->
-                        true;
-                    _ ->
-                        false
-                    end,
-                    case {MultiLineValuePart, AccValues} of
-                    {true, [{{_, ValueName}, PrevValue} | AccValuesRest]} ->
-                        % remove comment
-                        case re:split(Value, " ;|\t;", [{return, list}]) of
-                        [[]] ->
-                            % empty line
-                            {AccSectionName, AccValues};
-                        [LineValue | _Rest] ->
-                            E = {{AccSectionName, ValueName},
-                                PrevValue ++ " " ++ LineValue},
-                            {AccSectionName, [E | AccValuesRest]}
-                        end;
-                    _ ->
-                        {AccSectionName, AccValues}
-                    end;
-                [""|_LineValues] -> % line begins with "=", ignore
-                    {AccSectionName, AccValues};
-                [ValueName|LineValues] -> % yeehaw, got a line!
-                    RemainingLine = couch_util:implode(LineValues, "="),
-                    % removes comments
-                    case re:split(RemainingLine, " ;|\t;", [{return, list}]) of
-                    [[]] ->
-                        % empty line means delete this key
-                        ets:delete(?MODULE, {AccSectionName, ValueName}),
-                        {AccSectionName, AccValues};
-                    [LineValue | _Rest] ->
-                        {AccSectionName,
-                            [{{AccSectionName, ValueName}, LineValue} | AccValues]}
-                    end
-                end
-            end
-        end, {"", []}, Lines),
-    {ok, ParsedIniValues}.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_config_writer.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_config_writer.erl b/src/couch/src/couch_config_writer.erl
deleted file mode 100644
index f3c9cca..0000000
--- a/src/couch/src/couch_config_writer.erl
+++ /dev/null
@@ -1,88 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% @doc Saves a Key/Value pair to a ini file. The Key consists of a Section
-%%      and Option combination. If that combination is found in the ini file
-%%      the new value replaces the old value. If only the Section is found the
-%%      Option and value combination is appended to the Section. If the Section
-%%      does not yet exist in the ini file, it is added and the Option/Value
-%%      pair is appended.
-%% @see couch_config
-
--module(couch_config_writer).
-
--export([save_to_file/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
-%% @spec save_to_file(
-%%           Config::{{Section::string(), Option::string()}, Value::string()},
-%%           File::filename()) -> ok
-%% @doc Saves a Section/Key/Value triple to the ini file File::filename()
-save_to_file({{Section, Key}, Value}, File) ->
-    {ok, OldFileContents} = file:read_file(File),
-    Lines = re:split(OldFileContents, "\r\n|\n|\r|\032", [{return, list}]),
-
-    SectionLine = "[" ++ Section ++ "]",
-    {ok, Pattern} = re:compile(["^(", Key, "\\s*=)|\\[[a-zA-Z0-9\_-]*\\]"]),
-
-    NewLines = process_file_lines(Lines, [], SectionLine, Pattern, Key, Value),
-    NewFileContents = reverse_and_add_newline(strip_empty_lines(NewLines), []),
-    case file:write_file(File, NewFileContents) of
-    ok ->
-        ok;
-    {error, Reason} = Error ->
-        ?LOG_ERROR("Could not write config file ~s: ~s",
-            [File, file:format_error(Reason)]),
-        Error
-    end.
-
-
-process_file_lines([Section|Rest], SeenLines, Section, Pattern, Key, Value) ->
-    process_section_lines(Rest, [Section|SeenLines], Pattern, Key, Value);
-
-process_file_lines([Line|Rest], SeenLines, Section, Pattern, Key, Value) ->
-    process_file_lines(Rest, [Line|SeenLines], Section, Pattern, Key, Value);
-
-process_file_lines([], SeenLines, Section, _Pattern, Key, Value) ->
-    % Section wasn't found.  Append it with the option here.
-    [Key ++ " = " ++ Value, Section, "" | strip_empty_lines(SeenLines)].
-
-
-process_section_lines([Line|Rest], SeenLines, Pattern, Key, Value) ->
-    case re:run(Line, Pattern, [{capture, all_but_first}]) of
-    nomatch -> % Found nothing interesting. Move on.
-        process_section_lines(Rest, [Line|SeenLines], Pattern, Key, Value);
-    {match, []} -> % Found another section. Append the option here.
-        lists:reverse(Rest) ++
-        [Line, "", Key ++ " = " ++ Value | strip_empty_lines(SeenLines)];
-    {match, _} -> % Found the option itself. Replace it.
-        lists:reverse(Rest) ++ [Key ++ " = " ++ Value | SeenLines]
-    end;
-
-process_section_lines([], SeenLines, _Pattern, Key, Value) ->
-    % Found end of file within the section. Append the option here.
-    [Key ++ " = " ++ Value | strip_empty_lines(SeenLines)].
-
-
-reverse_and_add_newline([Line|Rest], Content) ->
-    reverse_and_add_newline(Rest, [Line, "\n", Content]);
-
-reverse_and_add_newline([], Content) ->
-    Content.
-
-
-strip_empty_lines(["" | Rest]) ->
-    strip_empty_lines(Rest);
-
-strip_empty_lines(All) ->
-    All.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_db.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
deleted file mode 100644
index 32a0049..0000000
--- a/src/couch/src/couch_db.erl
+++ /dev/null
@@ -1,1412 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db).
-
--export([open/2,open_int/2,close/1,create/2,get_db_info/1,get_design_docs/1]).
--export([start_compact/1, cancel_compact/1]).
--export([wait_for_compaction/1, wait_for_compaction/2]).
--export([is_idle/1,monitor/1,count_changes_since/2]).
--export([update_doc/3,update_doc/4,update_docs/4,update_docs/2,update_docs/3,delete_doc/3]).
--export([get_doc_info/2,get_full_doc_info/2,get_full_doc_infos/2]).
--export([open_doc/2,open_doc/3,open_doc_revs/4]).
--export([set_revs_limit/2,get_revs_limit/1]).
--export([get_missing_revs/2,name/1,get_update_seq/1,get_committed_update_seq/1]).
--export([enum_docs/4,enum_docs_since/5]).
--export([enum_docs_since_reduce_to_count/1,enum_docs_reduce_to_count/1]).
--export([increment_update_seq/1,get_purge_seq/1,purge_docs/2,get_last_purged/1]).
--export([start_link/3,open_doc_int/3,ensure_full_commit/1,ensure_full_commit/2]).
--export([set_security/2,get_security/1]).
--export([changes_since/4,changes_since/5,read_doc/2,new_revid/1]).
--export([check_is_admin/1, check_is_member/1, get_doc_count/1]).
--export([reopen/1, is_system_db/1, compression/1, make_doc/5]).
--export([load_validation_funs/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(VALID_DB_NAME, "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*$").
-
-start_link(DbName, Filepath, Options) ->
-    case open_db_file(Filepath, Options) of
-    {ok, Fd} ->
-        {ok, UpdaterPid} = gen_server:start_link(couch_db_updater, {DbName,
-            Filepath, Fd, Options}, []),
-        unlink(Fd),
-        gen_server:call(UpdaterPid, get_db);
-    Else ->
-        Else
-    end.
-
-open_db_file(Filepath, Options) ->
-    case couch_file:open(Filepath, Options) of
-    {ok, Fd} ->
-        {ok, Fd};
-    {error, enoent} ->
-        % couldn't find file. is there a compact version? This can happen if
-        % crashed during the file switch.
-        case couch_file:open(Filepath ++ ".compact", [nologifmissing]) of
-        {ok, Fd} ->
-            ?LOG_INFO("Found ~s~s compaction file, using as primary storage.", [Filepath, ".compact"]),
-            ok = file:rename(Filepath ++ ".compact", Filepath),
-            ok = couch_file:sync(Fd),
-            {ok, Fd};
-        {error, enoent} ->
-            {not_found, no_db_file}
-        end;
-    Error ->
-        Error
-    end.
-
-
-create(DbName, Options) ->
-    couch_server:create(DbName, Options).
-
-% this is for opening a database for internal purposes like the replicator
-% or the view indexer. it never throws a reader error.
-open_int(DbName, Options) ->
-    couch_server:open(DbName, Options).
-
-% this should be called anytime an http request opens the database.
-% it ensures that the http userCtx is a valid reader
-open(DbName, Options) ->
-    case couch_server:open(DbName, Options) of
-        {ok, Db} ->
-            try
-                check_is_member(Db),
-                {ok, Db}
-            catch
-                throw:Error ->
-                    close(Db),
-                    throw(Error)
-            end;
-        Else -> Else
-    end.
-
-reopen(#db{main_pid = Pid, fd = Fd, fd_monitor = OldRef, user_ctx = UserCtx}) ->
-    {ok, #db{fd = NewFd} = NewDb} = gen_server:call(Pid, get_db, infinity),
-    case NewFd =:= Fd of
-    true ->
-        {ok, NewDb#db{user_ctx = UserCtx}};
-    false ->
-        erlang:demonitor(OldRef, [flush]),
-        NewRef = erlang:monitor(process, NewFd),
-        {ok, NewDb#db{user_ctx = UserCtx, fd_monitor = NewRef}}
-    end.
-
-is_system_db(#db{options = Options}) ->
-    lists:member(sys_db, Options).
-
-ensure_full_commit(#db{main_pid=Pid, instance_start_time=StartTime}) ->
-    ok = gen_server:call(Pid, full_commit, infinity),
-    {ok, StartTime}.
-
-ensure_full_commit(Db, RequiredSeq) ->
-    #db{main_pid=Pid, instance_start_time=StartTime} = Db,
-    ok = gen_server:call(Pid, {full_commit, RequiredSeq}, infinity),
-    {ok, StartTime}.
-
-close(#db{fd_monitor=RefCntr}) ->
-    erlang:demonitor(RefCntr, [flush]),
-    ok.
-
-is_idle(#db{compactor_pid=nil, waiting_delayed_commit=nil} = Db) ->
-    case erlang:process_info(Db#db.fd, monitored_by) of
-    undefined ->
-        true;
-    {monitored_by, Pids} ->
-        (Pids -- [Db#db.main_pid, whereis(couch_stats_collector)]) =:= []
-    end;
-is_idle(_Db) ->
-    false.
-
-monitor(#db{main_pid=MainPid}) ->
-    erlang:monitor(process, MainPid).
-
-start_compact(#db{main_pid=Pid}) ->
-    gen_server:call(Pid, start_compact).
-
-cancel_compact(#db{main_pid=Pid}) ->
-    gen_server:call(Pid, cancel_compact).
-
-wait_for_compaction(Db) ->
-    wait_for_compaction(Db, infinity).
-
-wait_for_compaction(#db{main_pid=Pid}=Db, Timeout) ->
-    Start = erlang:now(),
-    case gen_server:call(Pid, compactor_pid) of
-        CPid when is_pid(CPid) ->
-            Ref = erlang:monitor(process, CPid),
-            receive
-                {'DOWN', Ref, _, _, normal} when Timeout == infinity ->
-                    wait_for_compaction(Db, Timeout);
-                {'DOWN', Ref, _, _, normal} ->
-                    Elapsed = timer:now_diff(now(), Start) div 1000,
-                    wait_for_compaction(Db, Timeout - Elapsed);
-                {'DOWN', Ref, _, _, Reason} ->
-                    {error, Reason}
-            after Timeout ->
-                erlang:demonitor(Ref, [flush]),
-                {error, Timeout}
-            end;
-        _ ->
-            ok
-    end.
-
-delete_doc(Db, Id, Revisions) ->
-    DeletedDocs = [#doc{id=Id, revs=[Rev], deleted=true} || Rev <- Revisions],
-    {ok, [Result]} = update_docs(Db, DeletedDocs, []),
-    {ok, Result}.
-
-open_doc(Db, IdOrDocInfo) ->
-    open_doc(Db, IdOrDocInfo, []).
-
-open_doc(Db, Id, Options) ->
-    increment_stat(Db, {couchdb, database_reads}),
-    case open_doc_int(Db, Id, Options) of
-    {ok, #doc{deleted=true}=Doc} ->
-        case lists:member(deleted, Options) of
-        true ->
-            apply_open_options({ok, Doc},Options);
-        false ->
-            {not_found, deleted}
-        end;
-    Else ->
-        apply_open_options(Else,Options)
-    end.
-
-apply_open_options({ok, Doc},Options) ->
-    apply_open_options2(Doc,Options);
-apply_open_options(Else,_Options) ->
-    Else.
-
-apply_open_options2(Doc,[]) ->
-    {ok, Doc};
-apply_open_options2(#doc{atts=Atts,revs=Revs}=Doc,
-        [{atts_since, PossibleAncestors}|Rest]) ->
-    RevPos = find_ancestor_rev_pos(Revs, PossibleAncestors),
-    apply_open_options2(Doc#doc{atts=[A#att{data=
-        if AttPos>RevPos -> Data; true -> stub end}
-        || #att{revpos=AttPos,data=Data}=A <- Atts]}, Rest);
-apply_open_options2(Doc, [ejson_body | Rest]) ->
-    apply_open_options2(couch_doc:with_ejson_body(Doc), Rest);
-apply_open_options2(Doc,[_|Rest]) ->
-    apply_open_options2(Doc,Rest).
-
-
-find_ancestor_rev_pos({_, []}, _AttsSinceRevs) ->
-    0;
-find_ancestor_rev_pos(_DocRevs, []) ->
-    0;
-find_ancestor_rev_pos({RevPos, [RevId|Rest]}, AttsSinceRevs) ->
-    case lists:member({RevPos, RevId}, AttsSinceRevs) of
-    true ->
-        RevPos;
-    false ->
-        find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs)
-    end.
-
-open_doc_revs(Db, Id, Revs, Options) ->
-    increment_stat(Db, {couchdb, database_reads}),
-    [{ok, Results}] = open_doc_revs_int(Db, [{Id, Revs}], Options),
-    {ok, [apply_open_options(Result, Options) || Result <- Results]}.
-
-% Each returned result is a list of tuples:
-% {Id, MissingRevs, PossibleAncestors}
-% if no revs are missing, it's omitted from the results.
-get_missing_revs(Db, IdRevsList) ->
-    Results = get_full_doc_infos(Db, [Id1 || {Id1, _Revs} <- IdRevsList]),
-    {ok, find_missing(IdRevsList, Results)}.
-
-find_missing([], []) ->
-    [];
-find_missing([{Id, Revs}|RestIdRevs], [{ok, FullInfo} | RestLookupInfo]) ->
-    case couch_key_tree:find_missing(FullInfo#full_doc_info.rev_tree, Revs) of
-    [] ->
-        find_missing(RestIdRevs, RestLookupInfo);
-    MissingRevs ->
-        #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo),
-        LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo],
-        % Find the revs that are possible parents of this rev
-        PossibleAncestors =
-        lists:foldl(fun({LeafPos, LeafRevId}, Acc) ->
-            % this leaf is a "possible ancenstor" of the missing
-            % revs if this LeafPos lessthan any of the missing revs
-            case lists:any(fun({MissingPos, _}) ->
-                    LeafPos < MissingPos end, MissingRevs) of
-            true ->
-                [{LeafPos, LeafRevId} | Acc];
-            false ->
-                Acc
-            end
-        end, [], LeafRevs),
-        [{Id, MissingRevs, PossibleAncestors} |
-                find_missing(RestIdRevs, RestLookupInfo)]
-    end;
-find_missing([{Id, Revs}|RestIdRevs], [not_found | RestLookupInfo]) ->
-    [{Id, Revs, []} | find_missing(RestIdRevs, RestLookupInfo)].
-
-get_doc_info(Db, Id) ->
-    case get_full_doc_info(Db, Id) of
-    {ok, DocInfo} ->
-        {ok, couch_doc:to_doc_info(DocInfo)};
-    Else ->
-        Else
-    end.
-
-%   returns {ok, DocInfo} or not_found
-get_full_doc_info(Db, Id) ->
-    [Result] = get_full_doc_infos(Db, [Id]),
-    Result.
-
-get_full_doc_infos(Db, Ids) ->
-    couch_btree:lookup(Db#db.id_tree, Ids).
-
-increment_update_seq(#db{main_pid=Pid}) ->
-    gen_server:call(Pid, increment_update_seq).
-
-purge_docs(#db{main_pid=Pid}, IdsRevs) ->
-    gen_server:call(Pid, {purge_docs, IdsRevs}).
-
-get_committed_update_seq(#db{committed_update_seq=Seq}) ->
-    Seq.
-
-get_update_seq(#db{update_seq=Seq})->
-    Seq.
-
-get_purge_seq(#db{header=#db_header{purge_seq=PurgeSeq}})->
-    PurgeSeq.
-
-get_last_purged(#db{header=#db_header{purged_docs=nil}}) ->
-    {ok, []};
-get_last_purged(#db{fd=Fd, header=#db_header{purged_docs=PurgedPointer}}) ->
-    couch_file:pread_term(Fd, PurgedPointer).
-
-get_doc_count(Db) ->
-    {ok, {Count, _, _}} = couch_btree:full_reduce(Db#db.id_tree),
-    {ok, Count}.
-
-get_db_info(Db) ->
-    #db{fd=Fd,
-        header=#db_header{disk_version=DiskVersion},
-        compactor_pid=Compactor,
-        update_seq=SeqNum,
-        name=Name,
-        instance_start_time=StartTime,
-        committed_update_seq=CommittedUpdateSeq,
-        id_tree = IdBtree,
-        seq_tree = SeqBtree,
-        local_tree = LocalBtree
-    } = Db,
-    {ok, Size} = couch_file:bytes(Fd),
-    {ok, DbReduction} = couch_btree:full_reduce(IdBtree),
-    InfoList = [
-        {db_name, Name},
-        {doc_count, element(1, DbReduction)},
-        {doc_del_count, element(2, DbReduction)},
-        {update_seq, SeqNum},
-        {purge_seq, couch_db:get_purge_seq(Db)},
-        {compact_running, Compactor/=nil},
-        {disk_size, Size},
-        {data_size, db_data_size(DbReduction, [SeqBtree, IdBtree, LocalBtree])},
-        {instance_start_time, StartTime},
-        {disk_format_version, DiskVersion},
-        {committed_update_seq, CommittedUpdateSeq}
-        ],
-    {ok, InfoList}.
-
-db_data_size({_Count, _DelCount}, _Trees) ->
-    % pre 1.2 format, upgraded on compaction
-    null;
-db_data_size({_Count, _DelCount, nil}, _Trees) ->
-    null;
-db_data_size({_Count, _DelCount, DocAndAttsSize}, Trees) ->
-    sum_tree_sizes(DocAndAttsSize, Trees).
-
-sum_tree_sizes(Acc, []) ->
-    Acc;
-sum_tree_sizes(Acc, [T | Rest]) ->
-    case couch_btree:size(T) of
-    nil ->
-        null;
-    Sz ->
-        sum_tree_sizes(Acc + Sz, Rest)
-    end.
-
-get_design_docs(#db{name = <<"shards/", _:18/binary, DbName/binary>>}) ->
-    {_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(DbName)) end),
-    receive {'DOWN', Ref, _, _, Response} ->
-        Response
-    end;
-get_design_docs(#db{id_tree = IdBtree}) ->
-    FoldFun = skip_deleted(fun
-        (#full_doc_info{deleted = true}, _Reds, Acc) ->
-            {ok, Acc};
-        (#full_doc_info{id= <<"_design/",_/binary>>}=FullDocInfo, _Reds, Acc) ->
-            {ok, [FullDocInfo | Acc]};
-        (_, _Reds, Acc) ->
-            {stop, Acc}
-    end),
-    KeyOpts = [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}],
-    {ok, _, Docs} = couch_btree:fold(IdBtree, FoldFun, [], KeyOpts),
-    {ok, Docs}.
-
-check_is_admin(#db{user_ctx=#user_ctx{name=Name,roles=Roles}}=Db) ->
-    {Admins} = get_admins(Db),
-    AdminRoles = [<<"_admin">> | couch_util:get_value(<<"roles">>, Admins, [])],
-    AdminNames = couch_util:get_value(<<"names">>, Admins,[]),
-    case AdminRoles -- Roles of
-    AdminRoles -> % same list, not an admin role
-        case AdminNames -- [Name] of
-        AdminNames -> % same names, not an admin
-            throw({unauthorized, <<"You are not a db or server admin.">>});
-        _ ->
-            ok
-        end;
-    _ ->
-        ok
-    end.
-
-check_is_member(#db{user_ctx=#user_ctx{name=Name,roles=Roles}=UserCtx}=Db) ->
-    case (catch check_is_admin(Db)) of
-    ok -> ok;
-    _ ->
-        {Members} = get_members(Db),
-        ReaderRoles = couch_util:get_value(<<"roles">>, Members,[]),
-        WithAdminRoles = [<<"_admin">> | ReaderRoles],
-        ReaderNames = couch_util:get_value(<<"names">>, Members,[]),
-        case ReaderRoles ++ ReaderNames of
-        [] -> ok; % no readers == public access
-        _Else ->
-            case WithAdminRoles -- Roles of
-            WithAdminRoles -> % same list, not an reader role
-                case ReaderNames -- [Name] of
-                ReaderNames -> % same names, not a reader
-                    ?LOG_DEBUG("Not a reader: UserCtx ~p vs Names ~p Roles ~p",[UserCtx, ReaderNames, WithAdminRoles]),
-                    throw({unauthorized, <<"You are not authorized to access this db.">>});
-                _ ->
-                    ok
-                end;
-            _ ->
-                ok
-            end
-        end
-    end.
-
-get_admins(#db{security=SecProps}) ->
-    couch_util:get_value(<<"admins">>, SecProps, {[]}).
-
-get_members(#db{security=SecProps}) ->
-    % we fallback to readers here for backwards compatibility
-    couch_util:get_value(<<"members">>, SecProps,
-        couch_util:get_value(<<"readers">>, SecProps, {[]})).
-
-get_security(#db{security=SecProps}) ->
-    {SecProps}.
-
-set_security(#db{main_pid=Pid}=Db, {NewSecProps}) when is_list(NewSecProps) ->
-    check_is_admin(Db),
-    ok = validate_security_object(NewSecProps),
-    ok = gen_server:call(Pid, {set_security, NewSecProps}, infinity),
-    {ok, _} = ensure_full_commit(Db),
-    ok;
-set_security(_, _) ->
-    throw(bad_request).
-
-validate_security_object(SecProps) ->
-    Admins = couch_util:get_value(<<"admins">>, SecProps, {[]}),
-    % we fallback to readers here for backwards compatibility
-    Members = couch_util:get_value(<<"members">>, SecProps,
-        couch_util:get_value(<<"readers">>, SecProps, {[]})),
-    ok = validate_names_and_roles(Admins),
-    ok = validate_names_and_roles(Members),
-    ok.
-
-% validate user input
-validate_names_and_roles({Props}) when is_list(Props) ->
-    case couch_util:get_value(<<"names">>,Props,[]) of
-    Ns when is_list(Ns) ->
-            [throw("names must be a JSON list of strings") ||N <- Ns, not is_binary(N)],
-            Ns;
-    _ -> throw("names must be a JSON list of strings")
-    end,
-    case couch_util:get_value(<<"roles">>,Props,[]) of
-    Rs when is_list(Rs) ->
-        [throw("roles must be a JSON list of strings") ||R <- Rs, not is_binary(R)],
-        Rs;
-    _ -> throw("roles must be a JSON list of strings")
-    end,
-    ok.
-
-get_revs_limit(#db{revs_limit=Limit}) ->
-    Limit.
-
-set_revs_limit(#db{main_pid=Pid}=Db, Limit) when Limit > 0 ->
-    check_is_admin(Db),
-    gen_server:call(Pid, {set_revs_limit, Limit}, infinity);
-set_revs_limit(_Db, _Limit) ->
-    throw(invalid_revs_limit).
-
-name(#db{name=Name}) ->
-    Name.
-
-compression(#db{compression=Compression}) ->
-    Compression.
-
-update_doc(Db, Doc, Options) ->
-    update_doc(Db, Doc, Options, interactive_edit).
-
-update_doc(Db, Doc, Options, UpdateType) ->
-    case update_docs(Db, [Doc], Options, UpdateType) of
-    {ok, [{ok, NewRev}]} ->
-        {ok, NewRev};
-    {ok, [{{_Id, _Rev}, Error}]} ->
-        throw(Error);
-    {ok, [Error]} ->
-        throw(Error);
-    {ok, []} ->
-        % replication success
-        {Pos, [RevId | _]} = Doc#doc.revs,
-        {ok, {Pos, RevId}}
-    end.
-
-update_docs(Db, Docs) ->
-    update_docs(Db, Docs, []).
-
-% group_alike_docs groups the sorted documents into sublist buckets, by id.
-% ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]]
-group_alike_docs(Docs) ->
-    Sorted = lists:sort(fun({#doc{id=A},_},{#doc{id=B},_})-> A < B end, Docs),
-    group_alike_docs(Sorted, []).
-
-group_alike_docs([], Buckets) ->
-    lists:reverse(lists:map(fun lists:reverse/1, Buckets));
-group_alike_docs([Doc|Rest], []) ->
-    group_alike_docs(Rest, [[Doc]]);
-group_alike_docs([{Doc,Ref}|Rest], [Bucket|RestBuckets]) ->
-    [{#doc{id=BucketId},_Ref}|_] = Bucket,
-    case Doc#doc.id == BucketId of
-    true ->
-        % add to existing bucket
-        group_alike_docs(Rest, [[{Doc,Ref}|Bucket]|RestBuckets]);
-    false ->
-        % add to new bucket
-       group_alike_docs(Rest, [[{Doc,Ref}]|[Bucket|RestBuckets]])
-    end.
-
-validate_doc_update(#db{}=Db, #doc{id= <<"_design/",_/binary>>}=Doc, _GetDiskDocFun) ->
-    case catch check_is_admin(Db) of
-        ok -> validate_ddoc(Db#db.name, Doc);
-        Error -> Error
-    end;
-validate_doc_update(#db{validate_doc_funs = undefined} = Db, Doc, Fun) ->
-    ValidationFuns = load_validation_funs(Db),
-    validate_doc_update(Db#db{validate_doc_funs=ValidationFuns}, Doc, Fun);
-validate_doc_update(#db{validate_doc_funs=[]}, _Doc, _GetDiskDocFun) ->
-    ok;
-validate_doc_update(_Db, #doc{id= <<"_local/",_/binary>>}, _GetDiskDocFun) ->
-    ok;
-validate_doc_update(Db, Doc, GetDiskDocFun) ->
-    case get(io_priority) of
-        {internal_repl, _} ->
-            ok;
-        _ ->
-            validate_doc_update_int(Db, Doc, GetDiskDocFun)
-    end.
-
-validate_ddoc(DbName, DDoc) ->
-    try
-        couch_index_server:validate(DbName, couch_doc:with_ejson_body(DDoc))
-    catch
-        throw:Error ->
-            Error
-    end.
-
-validate_doc_update_int(Db, Doc, GetDiskDocFun) ->
-    DiskDoc = GetDiskDocFun(),
-    JsonCtx = couch_util:json_user_ctx(Db),
-    SecObj = get_security(Db),
-    try [case Fun(Doc, DiskDoc, JsonCtx, SecObj) of
-            ok -> ok;
-            Error -> throw(Error)
-        end || Fun <- Db#db.validate_doc_funs],
-        ok
-    catch
-        throw:Error ->
-            Error
-    end.
-
-
-% to be safe, spawn a middleman here
-load_validation_funs(#db{main_pid=Pid, name = <<"shards/", _/binary>>}=Db) ->
-    {_, Ref} = spawn_monitor(fun() ->
-        exit(ddoc_cache:open(mem3:dbname(Db#db.name), validation_funs))
-    end),
-    receive
-        {'DOWN', Ref, _, _, {ok, Funs}} ->
-            gen_server:cast(Pid, {load_validation_funs, Funs}),
-            Funs;
-        {'DOWN', Ref, _, _, Reason} ->
-            ?LOG_ERROR("could not load validation funs ~p", [Reason]),
-            throw(internal_server_error)
-    end;
-load_validation_funs(#db{main_pid=Pid}=Db) ->
-    {ok, DDocInfos} = get_design_docs(Db),
-    OpenDocs = fun
-        (#full_doc_info{}=D) ->
-            {ok, Doc} = open_doc_int(Db, D, [ejson_body]),
-            Doc
-    end,
-    DDocs = lists:map(OpenDocs, DDocInfos),
-    Funs = lists:flatmap(fun(DDoc) ->
-        case couch_doc:get_validate_doc_fun(DDoc) of
-            nil -> [];
-            Fun -> [Fun]
-        end
-    end, DDocs),
-    gen_server:cast(Pid, {load_validation_funs, Funs}),
-    Funs.
-
-prep_and_validate_update(Db, #doc{id=Id,revs={RevStart, Revs}}=Doc,
-        OldFullDocInfo, LeafRevsDict, AllowConflict) ->
-    case Revs of
-    [PrevRev|_] ->
-        case dict:find({RevStart, PrevRev}, LeafRevsDict) of
-        {ok, {#leaf{deleted=Deleted, ptr=DiskSp}, DiskRevs}} ->
-            case couch_doc:has_stubs(Doc) of
-            true ->
-                DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs),
-                Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
-                {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2};
-            false ->
-                LoadDiskDoc = fun() -> make_doc(Db,Id,Deleted,DiskSp,DiskRevs) end,
-                {validate_doc_update(Db, Doc, LoadDiskDoc), Doc}
-            end;
-        error when AllowConflict ->
-            couch_doc:merge_stubs(Doc, #doc{}), % will generate error if
-                                                        % there are stubs
-            {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
-        error ->
-            {conflict, Doc}
-        end;
-    [] ->
-        % new doc, and we have existing revs.
-        % reuse existing deleted doc
-        if OldFullDocInfo#full_doc_info.deleted orelse AllowConflict ->
-            {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
-        true ->
-            {conflict, Doc}
-        end
-    end.
-
-
-
-prep_and_validate_updates(_Db, [], [], _AllowConflict, AccPrepped,
-        AccFatalErrors) ->
-   {AccPrepped, AccFatalErrors};
-prep_and_validate_updates(Db, [DocBucket|RestBuckets], [not_found|RestLookups],
-        AllowConflict, AccPrepped, AccErrors) ->
-    {PreppedBucket, AccErrors3} = lists:foldl(
-        fun({#doc{revs=Revs}=Doc,Ref}, {AccBucket, AccErrors2}) ->
-            case couch_doc:has_stubs(Doc) of
-            true ->
-                couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
-            false -> ok
-            end,
-            case Revs of
-            {0, []} ->
-                case validate_doc_update(Db, Doc, fun() -> nil end) of
-                ok ->
-                    {[{Doc, Ref} | AccBucket], AccErrors2};
-                Error ->
-                    {AccBucket, [{Ref, Error} | AccErrors2]}
-                end;
-            _ ->
-                % old revs specified but none exist, a conflict
-                {AccBucket, [{Ref, conflict} | AccErrors2]}
-            end
-        end,
-        {[], AccErrors}, DocBucket),
-
-    prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
-            [lists:reverse(PreppedBucket) | AccPrepped], AccErrors3);
-prep_and_validate_updates(Db, [DocBucket|RestBuckets],
-        [{ok, #full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo}|RestLookups],
-        AllowConflict, AccPrepped, AccErrors) ->
-    Leafs = couch_key_tree:get_all_leafs(OldRevTree),
-    LeafRevsDict = dict:from_list([
-        {{Start, RevId}, {Leaf, Revs}} ||
-        {Leaf, {Start, [RevId | _]} = Revs} <- Leafs
-    ]),
-    {PreppedBucket, AccErrors3} = lists:foldl(
-        fun({Doc, Ref}, {Docs2Acc, AccErrors2}) ->
-            case prep_and_validate_update(Db, Doc, OldFullDocInfo,
-                    LeafRevsDict, AllowConflict) of
-            {ok, Doc2} ->
-                {[{Doc2, Ref} | Docs2Acc], AccErrors2};
-            {Error, #doc{}} ->
-                % Record the error
-                {Docs2Acc, [{Ref, Error} |AccErrors2]}
-            end
-        end,
-        {[], AccErrors}, DocBucket),
-    prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
-            [PreppedBucket | AccPrepped], AccErrors3).
-
-
-update_docs(Db, Docs, Options) ->
-    update_docs(Db, Docs, Options, interactive_edit).
-
-
-prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) ->
-    Errors2 = [{{Id, {Pos, Rev}}, Error} ||
-            {#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors],
-    {lists:reverse(AccPrepped), lists:reverse(Errors2)};
-prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) ->
-    case OldInfo of
-    not_found ->
-        {ValidatedBucket, AccErrors3} = lists:foldl(
-            fun({Doc, Ref}, {AccPrepped2, AccErrors2}) ->
-                case couch_doc:has_stubs(Doc) of
-                true ->
-                    couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
-                false -> ok
-                end,
-                case validate_doc_update(Db, Doc, fun() -> nil end) of
-                ok ->
-                    {[{Doc, Ref} | AccPrepped2], AccErrors2};
-                Error ->
-                    {AccPrepped2, [{Doc, Error} | AccErrors2]}
-                end
-            end,
-            {[], AccErrors}, Bucket),
-        prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3);
-    {ok, #full_doc_info{rev_tree=OldTree}} ->
-        OldLeafs = couch_key_tree:get_all_leafs_full(OldTree),
-        OldLeafsLU = [{Start, RevId} || {Start, [{RevId, _}|_]} <- OldLeafs],
-        NewRevTree = lists:foldl(
-            fun({NewDoc, _Ref}, AccTree) ->
-                {NewTree, _} = couch_key_tree:merge(AccTree,
-                    couch_doc:to_path(NewDoc), Db#db.revs_limit),
-                NewTree
-            end,
-            OldTree, Bucket),
-        Leafs = couch_key_tree:get_all_leafs_full(NewRevTree),
-        LeafRevsFullDict = dict:from_list( [{{Start, RevId}, FullPath} || {Start, [{RevId, _}|_]}=FullPath <- Leafs]),
-        {ValidatedBucket, AccErrors3} =
-        lists:foldl(
-            fun({#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, Ref}, {AccValidated, AccErrors2}) ->
-                IsOldLeaf = lists:member({Pos, RevId}, OldLeafsLU),
-                case dict:find({Pos, RevId}, LeafRevsFullDict) of
-                {ok, {Start, Path}} when not IsOldLeaf ->
-                    % our unflushed doc is a leaf node. Go back on the path
-                    % to find the previous rev that's on disk.
-
-                    LoadPrevRevFun = fun() ->
-                                make_first_doc_on_disk(Db,Id,Start-1, tl(Path))
-                            end,
-
-                    case couch_doc:has_stubs(Doc) of
-                    true ->
-                        DiskDoc = case LoadPrevRevFun() of
-                            #doc{} = DiskDoc0 ->
-                                DiskDoc0;
-                            _ ->
-                                % Force a missing_stub exception
-                                couch_doc:merge_stubs(Doc, #doc{})
-                        end,
-                        Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
-                        GetDiskDocFun = fun() -> DiskDoc end;
-                    false ->
-                        Doc2 = Doc,
-                        GetDiskDocFun = LoadPrevRevFun
-                    end,
-
-                    case validate_doc_update(Db, Doc2, GetDiskDocFun) of
-                    ok ->
-                        {[{Doc2, Ref} | AccValidated], AccErrors2};
-                    Error ->
-                        {AccValidated, [{Doc, Error} | AccErrors2]}
-                    end;
-                _ ->
-                    % this doc isn't a leaf or already exists in the tree.
-                    % ignore but consider it a success.
-                    {AccValidated, AccErrors2}
-                end
-            end,
-            {[], AccErrors}, Bucket),
-        prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo,
-                [ValidatedBucket | AccPrepped], AccErrors3)
-    end.
-
-
-
-new_revid(#doc{body=Body,revs={OldStart,OldRevs},
-        atts=Atts,deleted=Deleted}) ->
-    case [{N, T, M} || #att{name=N,type=T,md5=M} <- Atts, M =/= <<>>] of
-    Atts2 when length(Atts) =/= length(Atts2) ->
-        % We must have old style non-md5 attachments
-        ?l2b(integer_to_list(couch_util:rand32()));
-    Atts2 ->
-        OldRev = case OldRevs of [] -> 0; [OldRev0|_] -> OldRev0 end,
-        couch_util:md5(term_to_binary([Deleted, OldStart, OldRev, Body, Atts2]))
-    end.
-
-new_revs([], OutBuckets, IdRevsAcc) ->
-    {lists:reverse(OutBuckets), IdRevsAcc};
-new_revs([Bucket|RestBuckets], OutBuckets, IdRevsAcc) ->
-    {NewBucket, IdRevsAcc3} = lists:mapfoldl(
-        fun({#doc{revs={Start, RevIds}}=Doc, Ref}, IdRevsAcc2)->
-        NewRevId = new_revid(Doc),
-        {{Doc#doc{revs={Start+1, [NewRevId | RevIds]}}, Ref},
-            [{Ref, {ok, {Start+1, NewRevId}}} | IdRevsAcc2]}
-    end, IdRevsAcc, Bucket),
-    new_revs(RestBuckets, [NewBucket|OutBuckets], IdRevsAcc3).
-
-check_dup_atts(#doc{atts=Atts}=Doc) ->
-    Atts2 = lists:sort(fun(#att{name=N1}, #att{name=N2}) -> N1 < N2 end, Atts),
-    check_dup_atts2(Atts2),
-    Doc.
-
-check_dup_atts2([#att{name=N}, #att{name=N} | _]) ->
-    throw({bad_request, <<"Duplicate attachments">>});
-check_dup_atts2([_ | Rest]) ->
-    check_dup_atts2(Rest);
-check_dup_atts2(_) ->
-    ok.
-
-
-update_docs(Db, Docs, Options, replicated_changes) ->
-    increment_stat(Db, {couchdb, database_writes}),
-    % associate reference with each doc in order to track duplicates
-    Docs2 = lists:map(fun(Doc) -> {Doc, make_ref()} end, Docs),
-    DocBuckets = before_docs_update(Db, group_alike_docs(Docs2)),
-    case (Db#db.validate_doc_funs /= []) orelse
-        lists:any(
-            fun({#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}, _Ref}) -> true;
-            ({#doc{atts=Atts}, _Ref}) ->
-                Atts /= []
-            end, Docs2) of
-    true ->
-        Ids = [Id || [{#doc{id=Id}, _Ref}|_] <- DocBuckets],
-        ExistingDocs = get_full_doc_infos(Db, Ids),
-
-        {DocBuckets2, DocErrors} =
-                prep_and_validate_replicated_updates(Db, DocBuckets, ExistingDocs, [], []),
-        DocBuckets3 = [Bucket || [_|_]=Bucket <- DocBuckets2]; % remove empty buckets
-    false ->
-        DocErrors = [],
-        DocBuckets3 = DocBuckets
-    end,
-    DocBuckets4 = [[{doc_flush_atts(check_dup_atts(Doc), Db#db.fd), Ref}
-            || {Doc, Ref} <- Bucket] || Bucket <- DocBuckets3],
-    {ok, []} = write_and_commit(Db, DocBuckets4, [], [merge_conflicts | Options]),
-    {ok, DocErrors};
-
-update_docs(Db, Docs, Options, interactive_edit) ->
-    increment_stat(Db, {couchdb, database_writes}),
-    AllOrNothing = lists:member(all_or_nothing, Options),
-    % go ahead and generate the new revision ids for the documents.
-    % separate out the NonRep documents from the rest of the documents
-
-    % associate reference with each doc in order to track duplicates
-    Docs2 = lists:map(fun(Doc) -> {Doc, make_ref()} end,Docs),
-    {Docs3, NonRepDocs} = lists:foldl(
-         fun({#doc{id=Id},_Ref}=Doc, {DocsAcc, NonRepDocsAcc}) ->
-            case Id of
-            <<?LOCAL_DOC_PREFIX, _/binary>> ->
-                {DocsAcc, [Doc | NonRepDocsAcc]};
-            Id->
-                {[Doc | DocsAcc], NonRepDocsAcc}
-            end
-        end, {[], []}, Docs2),
-
-    DocBuckets = before_docs_update(Db, group_alike_docs(Docs3)),
-
-    case (Db#db.validate_doc_funs /= []) orelse
-        lists:any(
-            fun({#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}, _Ref}) ->
-                true;
-            ({#doc{atts=Atts}, _Ref}) ->
-                Atts /= []
-            end, Docs3) of
-    true ->
-        % lookup the doc by id and get the most recent
-        Ids = [Id || [{#doc{id=Id}, _Ref}|_] <- DocBuckets],
-        ExistingDocInfos = get_full_doc_infos(Db, Ids),
-
-        {DocBucketsPrepped, PreCommitFailures} = prep_and_validate_updates(Db,
-                DocBuckets, ExistingDocInfos, AllOrNothing, [], []),
-
-        % strip out any empty buckets
-        DocBuckets2 = [Bucket || [_|_] = Bucket <- DocBucketsPrepped];
-    false ->
-        PreCommitFailures = [],
-        DocBuckets2 = DocBuckets
-    end,
-
-    if (AllOrNothing) and (PreCommitFailures /= []) ->
-        {aborted,
-         lists:foldl(fun({#doc{id=Id,revs={Pos, RevIds}}, Ref},Acc) ->
-                         case lists:keyfind(Ref,1,PreCommitFailures) of
-                         {Ref, Error} ->
-                             [{{Id,{Pos,RevIds}}, Error} | Acc];
-                         false ->
-                             Acc
-                         end
-                     end,[],Docs3)};
-
-    true ->
-        Options2 = if AllOrNothing -> [merge_conflicts];
-                true -> [] end ++ Options,
-        DocBuckets3 = [[
-                {doc_flush_atts(set_new_att_revpos(
-                        check_dup_atts(Doc)), Db#db.fd), Ref}
-                || {Doc, Ref} <- B] || B <- DocBuckets2],
-        {DocBuckets4, IdRevs} = new_revs(DocBuckets3, [], []),
-
-        {ok, CommitResults} = write_and_commit(Db, DocBuckets4, NonRepDocs, Options2),
-
-        ResultsDict = dict:from_list(IdRevs ++ CommitResults ++ PreCommitFailures),
-        {ok, lists:map(
-            fun({#doc{}, Ref}) ->
-                {ok, Result} = dict:find(Ref, ResultsDict),
-                Result
-            end, Docs2)}
-    end.
-
-% Returns the first available document on disk. Input list is a full rev path
-% for the doc.
-make_first_doc_on_disk(_Db, _Id, _Pos, []) ->
-    nil;
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #doc{}} | RestPath]) ->
-    make_first_doc_on_disk(Db, Id, Pos-1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING}|RestPath]) ->
-    make_first_doc_on_disk(Db, Id, Pos - 1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #leaf{deleted=IsDel, ptr=Sp}} |_]=DocPath) ->
-    Revs = [Rev || {Rev, _} <- DocPath],
-    make_doc(Db, Id, IsDel, Sp, {Pos, Revs}).
-
-set_commit_option(Options) ->
-    CommitSettings = {
-        [true || O <- Options, O==full_commit orelse O==delay_commit],
-        config:get("couchdb", "delayed_commits", "false")
-    },
-    case CommitSettings of
-    {[true], _} ->
-        Options; % user requested explicit commit setting, do not change it
-    {_, "true"} ->
-        Options; % delayed commits are enabled, do nothing
-    {_, "false"} ->
-        [full_commit|Options];
-    {_, Else} ->
-        ?LOG_ERROR("[couchdb] delayed_commits setting must be true/false, not ~p",
-            [Else]),
-        [full_commit|Options]
-    end.
-
-collect_results(Pid, MRef, ResultsAcc) ->
-    receive
-    {result, Pid, Result} ->
-        collect_results(Pid, MRef, [Result | ResultsAcc]);
-    {done, Pid} ->
-        {ok, ResultsAcc};
-    {retry, Pid} ->
-        retry;
-    {'DOWN', MRef, _, _, Reason} ->
-        exit(Reason)
-    end.
-
-write_and_commit(#db{main_pid=Pid, user_ctx=Ctx}=Db, DocBuckets1,
-        NonRepDocs, Options0) ->
-    DocBuckets = prepare_doc_summaries(Db, DocBuckets1),
-    Options = set_commit_option(Options0),
-    MergeConflicts = lists:member(merge_conflicts, Options),
-    FullCommit = lists:member(full_commit, Options),
-    MRef = erlang:monitor(process, Pid),
-    try
-        Pid ! {update_docs, self(), DocBuckets, NonRepDocs, MergeConflicts, FullCommit},
-        case collect_results(Pid, MRef, []) of
-        {ok, Results} -> {ok, Results};
-        retry ->
-            % This can happen if the db file we wrote to was swapped out by
-            % compaction. Retry by reopening the db and writing to the current file
-            {ok, Db2} = open(Db#db.name, [{user_ctx, Ctx}]),
-            DocBuckets2 = [
-                [{doc_flush_atts(Doc, Db2#db.fd), Ref} || {Doc, Ref} <- Bucket] ||
-                Bucket <- DocBuckets1
-            ],
-            % We only retry once
-            DocBuckets3 = prepare_doc_summaries(Db2, DocBuckets2),
-            close(Db2),
-            Pid ! {update_docs, self(), DocBuckets3, NonRepDocs, MergeConflicts, FullCommit},
-            case collect_results(Pid, MRef, []) of
-            {ok, Results} -> {ok, Results};
-            retry -> throw({update_error, compaction_retry})
-            end
-        end
-    after
-        erlang:demonitor(MRef, [flush])
-    end.
-
-
-prepare_doc_summaries(Db, BucketList) ->
-    [lists:map(
-        fun({#doc{body = Body, atts = Atts} = Doc, Ref}) ->
-            DiskAtts = [{N, T, P, AL, DL, R, M, E} ||
-                #att{name = N, type = T, data = {_, P}, md5 = M, revpos = R,
-                    att_len = AL, disk_len = DL, encoding = E} <- Atts],
-            AttsFd = case Atts of
-            [#att{data = {Fd, _}} | _] ->
-                Fd;
-            [] ->
-                nil
-            end,
-            SummaryChunk = couch_db_updater:make_doc_summary(Db, {Body, DiskAtts}),
-            {Doc#doc{body = {summary, SummaryChunk, AttsFd}}, Ref}
-        end,
-        Bucket) || Bucket <- BucketList].
-
-
-before_docs_update(#db{before_doc_update = nil}, BucketList) ->
-    BucketList;
-before_docs_update(#db{before_doc_update = Fun} = Db, BucketList) ->
-    [lists:map(
-        fun({Doc, Ref}) ->
-            NewDoc = Fun(couch_doc:with_ejson_body(Doc), Db),
-            {NewDoc, Ref}
-        end,
-        Bucket) || Bucket <- BucketList].
-
-
-set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts}=Doc) ->
-    Doc#doc{atts= lists:map(fun(#att{data={_Fd,_Sp}}=Att) ->
-            % already commited to disk, do not set new rev
-            Att;
-        (Att) ->
-            Att#att{revpos=RevPos+1}
-        end, Atts)}.
-
-
-doc_flush_atts(Doc, Fd) ->
-    Doc#doc{atts=[flush_att(Fd, Att) || Att <- Doc#doc.atts]}.
-
-check_md5(_NewSig, <<>>) -> ok;
-check_md5(Sig, Sig) -> ok;
-check_md5(_, _) -> throw(md5_mismatch).
-
-flush_att(Fd, #att{data={Fd0, _}}=Att) when Fd0 == Fd ->
-    % already written to our file, nothing to write
-    Att;
-
-flush_att(Fd, #att{data={OtherFd,StreamPointer}, md5=InMd5,
-    disk_len=InDiskLen} = Att) ->
-    {NewStreamData, Len, _IdentityLen, Md5, IdentityMd5} =
-            couch_stream:copy_to_new_stream(OtherFd, StreamPointer, Fd),
-    check_md5(IdentityMd5, InMd5),
-    Att#att{data={Fd, NewStreamData}, md5=Md5, att_len=Len, disk_len=InDiskLen};
-
-flush_att(Fd, #att{data=Data}=Att) when is_binary(Data) ->
-    with_stream(Fd, Att, fun(OutputStream) ->
-        couch_stream:write(OutputStream, Data)
-    end);
-
-flush_att(Fd, #att{data=Fun,att_len=undefined}=Att) when is_function(Fun) ->
-    MaxChunkSize = list_to_integer(
-        config:get("couchdb", "attachment_stream_buffer_size", "4096")),
-    with_stream(Fd, Att, fun(OutputStream) ->
-        % Fun(MaxChunkSize, WriterFun) must call WriterFun
-        % once for each chunk of the attachment,
-        Fun(MaxChunkSize,
-            % WriterFun({Length, Binary}, State)
-            % WriterFun({0, _Footers}, State)
-            % Called with Length == 0 on the last time.
-            % WriterFun returns NewState.
-            fun({0, Footers}, _) ->
-                F = mochiweb_headers:from_binary(Footers),
-                case mochiweb_headers:get_value("Content-MD5", F) of
-                undefined ->
-                    ok;
-                Md5 ->
-                    {md5, base64:decode(Md5)}
-                end;
-            ({_Length, Chunk}, _) ->
-                couch_stream:write(OutputStream, Chunk)
-            end, ok)
-    end);
-
-flush_att(Fd, #att{data=Fun,att_len=AttLen}=Att) when is_function(Fun) ->
-    with_stream(Fd, Att, fun(OutputStream) ->
-        write_streamed_attachment(OutputStream, Fun, AttLen)
-    end);
-
-flush_att(Fd, #att{data={follows, Parser, Ref}}=Att) when is_pid(Parser) ->
-    ParserRef = erlang:monitor(process, Parser),
-    Fun = fun() ->
-        Parser ! {get_bytes, Ref, self()},
-        receive
-            {started_open_doc_revs, NewRef} ->
-                couch_doc:restart_open_doc_revs(Parser, Ref, NewRef);
-            {bytes, Ref, Bytes} ->
-                Bytes;
-            {'DOWN', ParserRef, _, _, Reason} ->
-                throw({mp_parser_died, Reason})
-        end
-    end,
-    try
-        flush_att(Fd, Att#att{data=Fun})
-    after
-        erlang:demonitor(ParserRef, [flush])
-    end.
-
-
-compressible_att_type(MimeType) when is_binary(MimeType) ->
-    compressible_att_type(?b2l(MimeType));
-compressible_att_type(MimeType) ->
-    TypeExpList = re:split(
-        config:get("attachments", "compressible_types", ""),
-        "\\s*,\\s*",
-        [{return, list}]
-    ),
-    lists:any(
-        fun(TypeExp) ->
-            Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
-                "(?:\\s*;.*?)?\\s*", $$],
-            re:run(MimeType, Regexp, [caseless]) =/= nomatch
-        end,
-        [T || T <- TypeExpList, T /= []]
-    ).
-
-% From RFC 2616 3.6.1 - Chunked Transfer Coding
-%
-%   In other words, the origin server is willing to accept
-%   the possibility that the trailer fields might be silently
-%   discarded along the path to the client.
-%
-% I take this to mean that if "Trailers: Content-MD5\r\n"
-% is present in the request, but there is no Content-MD5
-% trailer, we're free to ignore this inconsistency and
-% pretend that no Content-MD5 exists.
-with_stream(Fd, #att{md5=InMd5,type=Type,encoding=Enc}=Att, Fun) ->
-    BufferSize = list_to_integer(
-        config:get("couchdb", "attachment_stream_buffer_size", "4096")),
-    {ok, OutputStream} = case (Enc =:= identity) andalso
-        compressible_att_type(Type) of
-    true ->
-        CompLevel = list_to_integer(
-            config:get("attachments", "compression_level", "0")
-        ),
-        couch_stream:open(Fd, [{buffer_size, BufferSize},
-            {encoding, gzip}, {compression_level, CompLevel}]);
-    _ ->
-        couch_stream:open(Fd, [{buffer_size, BufferSize}])
-    end,
-    ReqMd5 = case Fun(OutputStream) of
-        {md5, FooterMd5} ->
-            case InMd5 of
-                md5_in_footer -> FooterMd5;
-                _ -> InMd5
-            end;
-        _ ->
-            InMd5
-    end,
-    {StreamInfo, Len, IdentityLen, Md5, IdentityMd5} =
-        couch_stream:close(OutputStream),
-    check_md5(IdentityMd5, ReqMd5),
-    {AttLen, DiskLen, NewEnc} = case Enc of
-    identity ->
-        case {Md5, IdentityMd5} of
-        {Same, Same} ->
-            {Len, IdentityLen, identity};
-        _ ->
-            {Len, IdentityLen, gzip}
-        end;
-    gzip ->
-        case {Att#att.att_len, Att#att.disk_len} of
-        {AL, DL} when AL =:= undefined orelse DL =:= undefined ->
-            % Compressed attachment uploaded through the standalone API.
-            {Len, Len, gzip};
-        {AL, DL} ->
-            % This case is used for efficient push-replication, where a
-            % compressed attachment is located in the body of multipart
-            % content-type request.
-            {AL, DL, gzip}
-        end
-    end,
-    Att#att{
-        data={Fd,StreamInfo},
-        att_len=AttLen,
-        disk_len=DiskLen,
-        md5=Md5,
-        encoding=NewEnc
-    }.
-
-
-write_streamed_attachment(_Stream, _F, 0) ->
-    ok;
-write_streamed_attachment(Stream, F, LenLeft) when LenLeft > 0 ->
-    Bin = read_next_chunk(F, LenLeft),
-    ok = couch_stream:write(Stream, Bin),
-    write_streamed_attachment(Stream, F, LenLeft - size(Bin)).
-
-read_next_chunk(F, _) when is_function(F, 0) ->
-    F();
-read_next_chunk(F, LenLeft) when is_function(F, 1) ->
-    F(lists:min([LenLeft, 16#2000])).
-
-enum_docs_since_reduce_to_count(Reds) ->
-    couch_btree:final_reduce(
-            fun couch_db_updater:btree_by_seq_reduce/2, Reds).
-
-enum_docs_reduce_to_count(Reds) ->
-    FinalRed = couch_btree:final_reduce(
-            fun couch_db_updater:btree_by_id_reduce/2, Reds),
-    element(1, FinalRed).
-
-changes_since(Db, StartSeq, Fun, Acc) ->
-    changes_since(Db, StartSeq, Fun, [], Acc).
-
-changes_since(Db, StartSeq, Fun, Options, Acc) ->
-    Wrapper = fun(FullDocInfo, _Offset, Acc2) ->
-        DocInfo = case FullDocInfo of
-            #full_doc_info{} ->
-                couch_doc:to_doc_info(FullDocInfo);
-            #doc_info{} ->
-                FullDocInfo
-        end,
-        Fun(DocInfo, Acc2)
-    end,
-    {ok, _LastReduction, AccOut} = couch_btree:fold(Db#db.seq_tree,
-        Wrapper, Acc, [{start_key, StartSeq + 1}] ++ Options),
-    {ok, AccOut}.
-
-count_changes_since(Db, SinceSeq) ->
-    BTree = Db#db.seq_tree,
-    {ok, Changes} =
-    couch_btree:fold_reduce(BTree,
-        fun(_SeqStart, PartialReds, 0) ->
-            {ok, couch_btree:final_reduce(BTree, PartialReds)}
-        end,
-        0, [{start_key, SinceSeq + 1}]),
-    Changes.
-
-enum_docs_since(Db, SinceSeq, InFun, Acc, Options) ->
-    {ok, LastReduction, AccOut} = couch_btree:fold(
-        Db#db.seq_tree, InFun, Acc,
-            [{start_key, SinceSeq + 1} | Options]),
-    {ok, enum_docs_since_reduce_to_count(LastReduction), AccOut}.
-
-enum_docs(Db, InFun, InAcc, Options) ->
-    FoldFun = skip_deleted(InFun),
-    {ok, LastReduce, OutAcc} = couch_btree:fold(
-        Db#db.id_tree, FoldFun, InAcc, Options),
-    {ok, enum_docs_reduce_to_count(LastReduce), OutAcc}.
-
-
-%%% Internal function %%%
-open_doc_revs_int(Db, IdRevs, Options) ->
-    Ids = [Id || {Id, _Revs} <- IdRevs],
-    LookupResults = get_full_doc_infos(Db, Ids),
-    lists:zipwith(
-        fun({Id, Revs}, Lookup) ->
-            case Lookup of
-            {ok, #full_doc_info{rev_tree=RevTree}} ->
-                {FoundRevs, MissingRevs} =
-                case Revs of
-                all ->
-                    {couch_key_tree:get_all_leafs(RevTree), []};
-                _ ->
-                    case lists:member(latest, Options) of
-                    true ->
-                        couch_key_tree:get_key_leafs(RevTree, Revs);
-                    false ->
-                        couch_key_tree:get(RevTree, Revs)
-                    end
-                end,
-                FoundResults =
-                lists:map(fun({Value, {Pos, [Rev|_]}=FoundRevPath}) ->
-                    case Value of
-                    ?REV_MISSING ->
-                        % we have the rev in our list but know nothing about it
-                        {{not_found, missing}, {Pos, Rev}};
-                    #leaf{deleted=IsDeleted, ptr=SummaryPtr} ->
-                        {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)}
-                    end
-                end, FoundRevs),
-                Results = FoundResults ++ [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs],
-                {ok, Results};
-            not_found when Revs == all ->
-                {ok, []};
-            not_found ->
-                {ok, [{{not_found, missing}, Rev} || Rev <- Revs]}
-            end
-        end,
-        IdRevs, LookupResults).
-
-open_doc_int(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = Id, Options) ->
-    case couch_btree:lookup(Db#db.local_tree, [Id]) of
-    [{ok, {_, {Rev, BodyData}}}] ->
-        Doc = #doc{id=Id, revs={0, [?l2b(integer_to_list(Rev))]}, body=BodyData},
-        apply_open_options({ok, Doc}, Options);
-    [not_found] ->
-        {not_found, missing}
-    end;
-open_doc_int(Db, #doc_info{id=Id,revs=[RevInfo|_]}=DocInfo, Options) ->
-    #rev_info{deleted=IsDeleted,rev={Pos,RevId},body_sp=Bp} = RevInfo,
-    Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos,[RevId]}),
-    apply_open_options(
-       {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}}, Options);
-open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
-    #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} =
-        DocInfo = couch_doc:to_doc_info(FullDocInfo),
-    {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]),
-    Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath),
-    apply_open_options(
-        {ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}}, Options);
-open_doc_int(Db, Id, Options) ->
-    case get_full_doc_info(Db, Id) of
-    {ok, FullDocInfo} ->
-        open_doc_int(Db, FullDocInfo, Options);
-    not_found ->
-        {not_found, missing}
-    end.
-
-doc_meta_info(#doc_info{high_seq=Seq,revs=[#rev_info{rev=Rev}|RestInfo]}, RevTree, Options) ->
-    case lists:member(revs_info, Options) of
-    false -> [];
-    true ->
-        {[{Pos, RevPath}],[]} =
-            couch_key_tree:get_full_key_paths(RevTree, [Rev]),
-
-        [{revs_info, Pos, lists:map(
-            fun({Rev1, ?REV_MISSING}) ->
-                {Rev1, missing};
-            ({Rev1, Leaf}) ->
-                case Leaf#leaf.deleted of
-                true ->
-                    {Rev1, deleted};
-                false ->
-                    {Rev1, available}
-                end
-            end, RevPath)}]
-    end ++
-    case lists:member(conflicts, Options) of
-    false -> [];
-    true ->
-        case [Rev1 || #rev_info{rev=Rev1,deleted=false} <- RestInfo] of
-        [] -> [];
-        ConflictRevs -> [{conflicts, ConflictRevs}]
-        end
-    end ++
-    case lists:member(deleted_conflicts, Options) of
-    false -> [];
-    true ->
-        case [Rev1 || #rev_info{rev=Rev1,deleted=true} <- RestInfo] of
-        [] -> [];
-        DelConflictRevs -> [{deleted_conflicts, DelConflictRevs}]
-        end
-    end ++
-    case lists:member(local_seq, Options) of
-    false -> [];
-    true -> [{local_seq, Seq}]
-    end.
-
-read_doc(#db{fd=Fd}, Pos) ->
-    couch_file:pread_term(Fd, Pos).
-
-
-make_doc(#db{fd = Fd} = Db, Id, Deleted, Bp, RevisionPath) ->
-    {BodyData, Atts} =
-    case Bp of
-    nil ->
-        {[], []};
-    _ ->
-        {ok, {BodyData0, Atts00}} = read_doc(Db, Bp),
-        Atts0 = case Atts00 of
-        _ when is_binary(Atts00) ->
-            couch_compress:decompress(Atts00);
-        _ when is_list(Atts00) ->
-            % pre 1.2 format
-            Atts00
-        end,
-        {BodyData0,
-            lists:map(
-                fun({Name,Type,Sp,AttLen,DiskLen,RevPos,Md5,Enc}) ->
-                    #att{name=Name,
-                        type=Type,
-                        att_len=AttLen,
-                        disk_len=DiskLen,
-                        md5=Md5,
-                        revpos=RevPos,
-                        data={Fd,Sp},
-                        encoding=
-                            case Enc of
-                            true ->
-                                % 0110 UPGRADE CODE
-                                gzip;
-                            false ->
-                                % 0110 UPGRADE CODE
-                                identity;
-                            _ ->
-                                Enc
-                            end
-                    };
-                ({Name,Type,Sp,AttLen,RevPos,Md5}) ->
-                    #att{name=Name,
-                        type=Type,
-                        att_len=AttLen,
-                        disk_len=AttLen,
-                        md5=Md5,
-                        revpos=RevPos,
-                        data={Fd,Sp}};
-                ({Name,{Type,Sp,AttLen}}) ->
-                    #att{name=Name,
-                        type=Type,
-                        att_len=AttLen,
-                        disk_len=AttLen,
-                        md5= <<>>,
-                        revpos=0,
-                        data={Fd,Sp}}
-                end, Atts0)}
-    end,
-    Doc = #doc{
-        id = Id,
-        revs = RevisionPath,
-        body = BodyData,
-        atts = Atts,
-        deleted = Deleted
-    },
-    after_doc_read(Db, Doc).
-
-
-after_doc_read(#db{after_doc_read = nil}, Doc) ->
-    Doc;
-after_doc_read(#db{after_doc_read = Fun} = Db, Doc) ->
-    Fun(couch_doc:with_ejson_body(Doc), Db).
-
-
-increment_stat(#db{options = Options}, Stat) ->
-    case lists:member(sys_db, Options) of
-    true ->
-        ok;
-    false ->
-        couch_stats_collector:increment(Stat)
-    end.
-
-skip_deleted(FoldFun) ->
-    fun
-        (visit, KV, Reds, Acc) ->
-            FoldFun(KV, Reds, Acc);
-        (traverse, _LK, {Undeleted, _Del, _Size}, Acc) when Undeleted == 0 ->
-            {skip, Acc};
-        (traverse, _, _, Acc) ->
-            {ok, Acc}
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_db_update_notifier.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_db_update_notifier.erl b/src/couch/src/couch_db_update_notifier.erl
deleted file mode 100644
index 3958917..0000000
--- a/src/couch/src/couch_db_update_notifier.erl
+++ /dev/null
@@ -1,82 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%
-% This causes an OS process to spawned and it is notified every time a database
-% is updated.
-%
-% The notifications are in the form of a the database name sent as a line of
-% text to the OS processes stdout.
-%
-
--module(couch_db_update_notifier).
-
--behaviour(gen_event).
-
--export([start_link/1, notify/1]).
--export([init/1, terminate/2, handle_event/2, handle_call/2, handle_info/2, code_change/3,stop/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-start_link(Exec) ->
-    couch_event_sup:start_link(couch_db_update, {couch_db_update_notifier, make_ref()}, Exec).
-
-notify(Event) ->
-    gen_event:notify(couch_db_update, Event).
-
-stop(Pid) ->
-    couch_event_sup:stop(Pid).
-
-init(Exec) when is_list(Exec) -> % an exe
-    couch_os_process:start_link(Exec, []);
-init(Else) ->
-    {ok, Else}.
-
-terminate(_Reason, Pid) when is_pid(Pid) ->
-    couch_os_process:stop(Pid),
-    ok;
-terminate(_Reason, _State) ->
-    ok.
-
-handle_event(Event, Fun) when is_function(Fun, 1) ->
-    Fun(Event),
-    {ok, Fun};
-handle_event(Event, {Fun, FunAcc}) ->
-    FunAcc2 = Fun(Event, FunAcc),
-    {ok, {Fun, FunAcc2}};
-handle_event({EventType, EventDesc}, Pid) ->
-    Obj = encode_event(EventType, EventDesc),
-    ok = couch_os_process:send(Pid, Obj),
-    {ok, Pid}.
-
-handle_call(_Request, State) ->
-    {reply, ok, State}.
-
-handle_info({'EXIT', Pid, Reason}, Pid) ->
-    ?LOG_ERROR("Update notification process ~p died: ~p", [Pid, Reason]),
-    remove_handler;
-handle_info({'EXIT', _, _}, Pid) ->
-    %% the db_update event manager traps exits and forwards this message to all
-    %% its handlers. Just ignore as it wasn't our os_process that exited.
-    {ok, Pid}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-encode_event(EventType, EventDesc) when is_atom(EventType) ->
-    encode_event(atom_to_list(EventType), EventDesc);
-encode_event(EventType, EventDesc) when is_list(EventType) ->
-    encode_event(?l2b(EventType), EventDesc);
-encode_event(EventType, {DbName, DocId}) ->
-    {[{type, EventType}, {db, DbName}, {id, DocId}]};
-encode_event(EventType, DbName) ->
-    {[{type, EventType}, {db, DbName}]}.


[46/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Make the JavaScript test runner work

This is currently removing a bunch of stuff that we normally use during
the test suite so that I can get the test runner going. I full intend to
undo these changes in the future.


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/0bb58f51
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/0bb58f51
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/0bb58f51

Branch: refs/heads/1843-feature-bigcouch
Commit: 0bb58f51e68cb58d215d53088a0839f595512b7a
Parents: 03ea534
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 22:06:00 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Wed Feb 5 08:34:52 2014 -0600

----------------------------------------------------------------------
 .gitignore              |  2 ++
 configure               |  4 +++
 test/javascript/run.tpl | 68 ++++----------------------------------------
 3 files changed, 11 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/0bb58f51/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 902035d..8ea2016 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,5 @@ share/server/main.js
 src/couch/priv/couch_js/config.h
 src/couch/priv/couchjs
 src/couch/priv/couchspawnkillable
+
+test/javascript/run

http://git-wip-us.apache.org/repos/asf/couchdb/blob/0bb58f51/configure
----------------------------------------------------------------------
diff --git a/configure b/configure
index 7036d7c..24c7cea 100755
--- a/configure
+++ b/configure
@@ -104,6 +104,10 @@ cat > $rootdir/config.erl << EOF
 {with_curl, $WITH_CURL}.
 EOF
 
+# Write out the JavaScript test command
+sed -e "s|%rootdir%|$rootdir|" < test/javascript/run.tpl > test/javascript/run
+chmod +x test/javascript/run
+
 # finally, a few config files for local development nodes
 for i in 1 2 3; do
 cat > rel/dev$i.config << EOF

http://git-wip-us.apache.org/repos/asf/couchdb/blob/0bb58f51/test/javascript/run.tpl
----------------------------------------------------------------------
diff --git a/test/javascript/run.tpl b/test/javascript/run.tpl
index 75192da..c0fd693 100644
--- a/test/javascript/run.tpl
+++ b/test/javascript/run.tpl
@@ -12,73 +12,26 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-SRC_DIR=%abs_top_srcdir%
-BUILD_DIR=%abs_top_builddir%
+SRC_DIR=%rootdir%
+BUILD_DIR=%rootdir%
 SCRIPT_DIR=$SRC_DIR/share/www/script
 JS_TEST_DIR=$SRC_DIR/test/javascript
+COUCHJS=%rootdir%/src/couch/priv/couchjs
 
-COUCHJS=%abs_top_builddir%/src/couchdb/priv/couchjs
-COUCH_URI_FILE=%localstaterundir%/couch.uri
-
-# make check-js calls us with MAKE=$(MAKE) so BSDish `gmake` invocations
-# will get passed on correctly. If $0 gets run manually, default to
-# `make`
-if [ -z "$MAKE" ]; then
-    MAKE=make
-fi
-
-trap 'abort' EXIT INT
-
-start() {
-	./utils/run -b -r 0 -n \
-		-a $BUILD_DIR/etc/couchdb/default_dev.ini \
-		-a $SRC_DIR/test/random_port.ini \
-		-a $BUILD_DIR/etc/couchdb/local_dev.ini 1>/dev/null
-}
-
-stop() {
-    ./utils/run -d 1>/dev/null
-}
-
-restart() {
-    stop
-    start
-}
-
-abort() {
-    trap - 0
-    stop
-    exit 2
-}
-
-process_response() {
-    while read data
-    do
-        if [ $data = 'restart' ];
-        then
-            if [ -z $COUCHDB_NO_START ]; then
-                restart
-            fi
-        else
-            echo "$data"
-        fi
-    done
-}
 
 run() {
     # start the tests
     /bin/echo -n "$1 ... "
-    $COUCHJS -H -u $COUCH_URI_FILE \
+    $COUCHJS -H \
         $SCRIPT_DIR/json2.js \
         $SCRIPT_DIR/sha1.js \
         $SCRIPT_DIR/oauth.js \
         $SCRIPT_DIR/couch.js \
-        $SCRIPT_DIR/replicator_db_inc.js \
         $SCRIPT_DIR/couch_test_runner.js \
         $JS_TEST_DIR/couch_http.js \
         $JS_TEST_DIR/test_setup.js \
         $1 \
-        $JS_TEST_DIR/cli_runner.js | process_response
+        $JS_TEST_DIR/cli_runner.js
 
     if [ -z $RESULT ]; then
         RESULT=$?
@@ -100,12 +53,6 @@ run_files() {
     done
 }
 
-# start CouchDB
-if [ -z $COUCHDB_NO_START ]; then
-    $MAKE dev
-    start
-fi
-
 echo "Running javascript tests ..."
 
 if [ "$#" -eq 0 ];
@@ -130,9 +77,4 @@ else
     run $TEST_SRC
 fi
 
-if [ -z $COUCHDB_NO_START ]; then
-    stop
-fi
-
-trap - 0
 exit $RESULT


[42/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/snappy


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/350a7efd
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/350a7efd
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/350a7efd

Branch: refs/heads/1843-feature-bigcouch
Commit: 350a7efda65c8526f72f30406abb4f041dc1b2a3
Parents: 834aeb0
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:42:46 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:42:46 2014 -0600

----------------------------------------------------------------------
 src/snappy/c_src/erl_nif_compat.h               |  129 --
 src/snappy/c_src/google-snappy/AUTHORS          |    1 -
 src/snappy/c_src/google-snappy/COPYING          |   28 -
 src/snappy/c_src/google-snappy/config.h.in      |  125 --
 .../c_src/google-snappy/snappy-internal.h       |  150 ---
 .../c_src/google-snappy/snappy-sinksource.cc    |   71 --
 .../c_src/google-snappy/snappy-sinksource.h     |  137 ---
 .../google-snappy/snappy-stubs-internal.cc      |   42 -
 .../c_src/google-snappy/snappy-stubs-internal.h |  571 ---------
 .../google-snappy/snappy-stubs-public.h.in      |   85 --
 src/snappy/c_src/google-snappy/snappy.cc        | 1111 ------------------
 src/snappy/c_src/google-snappy/snappy.h         |  155 ---
 src/snappy/c_src/snappy_nif.cc                  |  265 -----
 src/snappy/src/snappy.app.src                   |   12 -
 src/snappy/src/snappy.erl                       |   56 -
 15 files changed, 2938 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/c_src/erl_nif_compat.h
----------------------------------------------------------------------
diff --git a/src/snappy/c_src/erl_nif_compat.h b/src/snappy/c_src/erl_nif_compat.h
deleted file mode 100644
index b8eb9b0..0000000
--- a/src/snappy/c_src/erl_nif_compat.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* Copyright (c) 2010-2011 Basho Technologies, Inc.
- * With some minor modifications by Filipe David Manana
- * <fd...@apache.org>
- *
- * This file is provided to you under the Apache License,
- * Version 2.0 (the "License"); you may not use this file
- * except in compliance with the License.  You may obtain
- * a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
-*/
-
-#ifndef ERL_NIF_COMPAT_H_
-#define ERL_NIF_COMPAT_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-#include "erl_nif.h"
-
-
-#if ERL_NIF_MAJOR_VERSION == 0 && ERL_NIF_MINOR_VERSION == 1
-#define OTP_R13B03
-#elif ERL_NIF_MAJOR_VERSION == 1 && ERL_NIF_MINOR_VERSION == 0
-#define OTP_R13B04
-#elif ERL_NIF_MAJOR_VERSION == 2 && ERL_NIF_MINOR_VERSION == 0
-#define OTP_R14A
-#define OTP_R14B
-#define OTP_R14B01
-#elif ERL_NIF_MAJOR_VERSION == 2 && ERL_NIF_MINOR_VERSION == 1
-#define OTP_R14B02
-#endif
-
-
-#ifdef OTP_R13B03
-
-#define enif_open_resource_type_compat enif_open_resource_type
-#define enif_alloc_resource_compat enif_alloc_resource
-#define enif_release_resource_compat enif_release_resource
-#define enif_alloc_binary_compat enif_alloc_binary
-#define enif_alloc_compat enif_alloc
-#define enif_release_binary_compat enif_release_binary
-#define enif_free_compat enif_free
-#define enif_get_atom_compat enif_get_atom
-#define enif_priv_data_compat enif_get_data
-#define enif_make_uint_compat enif_make_ulong
-
-#define enif_make_existing_atom_compat(E, N, R, Enc) \
-    enif_make_existing_atom(E, N, R)
-
-#define enif_make_string_compat(E, B, Enc) \
-    enif_make_string(E, B)
-
-#endif /* R13B03 */
-
-
-#ifdef OTP_R13B04
-
-#define enif_open_resource_type_compat enif_open_resource_type
-#define enif_alloc_resource_compat enif_alloc_resource
-#define enif_release_resource_compat enif_release_resource
-#define enif_alloc_binary_compat enif_alloc_binary
-#define enif_realloc_binary_compat enif_realloc_binary
-#define enif_release_binary_compat enif_release_binary
-#define enif_alloc_compat enif_alloc
-#define enif_free_compat enif_free
-#define enif_get_atom_compat enif_get_atom
-#define enif_priv_data_compat enif_priv_data
-#define enif_make_string_compat enif_make_string
-#define enif_make_uint_compat enif_make_uint
-
-#define enif_make_existing_atom_compat(E, N, R, Enc) \
-    enif_make_existing_atom(E, N, R)
-
-
-#endif /* R13B04 */
-
-
-/* OTP R14A and future releases */
-#if !defined(OTP_R13B03) && !defined(OTP_R13B04)
-
-#define enif_open_resource_type_compat(E, N, D, F, T) \
-    enif_open_resource_type(E, NULL, N, D, F, T)
-
-#define enif_alloc_resource_compat(E, T, S) \
-    enif_alloc_resource(T, S)
-
-#define enif_release_resource_compat(E, H) \
-    enif_release_resource(H)
-
-#define enif_alloc_binary_compat(E, S, B) \
-    enif_alloc_binary(S, B)
-
-#define enif_realloc_binary_compat(E, S, B) \
-    enif_realloc_binary(S, B)
-
-#define enif_release_binary_compat(E, B) \
-    enif_release_binary(B)
-
-#define enif_alloc_compat(E, S) \
-    enif_alloc(S)
-
-#define enif_free_compat(E, P) \
-    enif_free(P)
-
-#define enif_get_atom_compat(E, T, B, S) \
-    enif_get_atom(E, T, B, S, ERL_NIF_LATIN1)
-
-#define enif_priv_data_compat enif_priv_data
-#define enif_make_string_compat enif_make_string
-#define enif_make_existing_atom_compat enif_make_existing_atom
-#define enif_make_uint_compat enif_make_uint
-
-#endif  /* R14 and future releases */
-
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* ERL_NIF_COMPAT_H_ */

http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/c_src/google-snappy/AUTHORS
----------------------------------------------------------------------
diff --git a/src/snappy/c_src/google-snappy/AUTHORS b/src/snappy/c_src/google-snappy/AUTHORS
deleted file mode 100644
index 4858b37..0000000
--- a/src/snappy/c_src/google-snappy/AUTHORS
+++ /dev/null
@@ -1 +0,0 @@
-opensource@google.com

http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/c_src/google-snappy/COPYING
----------------------------------------------------------------------
diff --git a/src/snappy/c_src/google-snappy/COPYING b/src/snappy/c_src/google-snappy/COPYING
deleted file mode 100644
index 8d6bd9f..0000000
--- a/src/snappy/c_src/google-snappy/COPYING
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright 2011, Google Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-    * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/c_src/google-snappy/config.h.in
----------------------------------------------------------------------
diff --git a/src/snappy/c_src/google-snappy/config.h.in b/src/snappy/c_src/google-snappy/config.h.in
deleted file mode 100644
index 28f57c2..0000000
--- a/src/snappy/c_src/google-snappy/config.h.in
+++ /dev/null
@@ -1,125 +0,0 @@
-/* config.h.in.  Generated from configure.ac by autoheader.  */
-
-/* Define if building universal (internal helper macro) */
-#undef AC_APPLE_UNIVERSAL_BUILD
-
-/* Define to 1 if the compiler supports __builtin_ctz and friends. */
-#undef HAVE_BUILTIN_CTZ
-
-/* Define to 1 if the compiler supports __builtin_expect. */
-#undef HAVE_BUILTIN_EXPECT
-
-/* Define to 1 if you have the <byteswap.h> header file. */
-#undef HAVE_BYTESWAP_H
-
-/* Define to 1 if you have the <dlfcn.h> header file. */
-#undef HAVE_DLFCN_H
-
-/* Use the gflags package for command-line parsing. */
-#undef HAVE_GFLAGS
-
-/* Defined when Google Test is available. */
-#undef HAVE_GTEST
-
-/* Define to 1 if you have the <inttypes.h> header file. */
-#undef HAVE_INTTYPES_H
-
-/* Define to 1 if you have the `fastlz' library (-lfastlz). */
-#undef HAVE_LIBFASTLZ
-
-/* Define to 1 if you have the `lzf' library (-llzf). */
-#undef HAVE_LIBLZF
-
-/* Define to 1 if you have the `lzo2' library (-llzo2). */
-#undef HAVE_LIBLZO2
-
-/* Define to 1 if you have the `quicklz' library (-lquicklz). */
-#undef HAVE_LIBQUICKLZ
-
-/* Define to 1 if you have the `z' library (-lz). */
-#undef HAVE_LIBZ
-
-/* Define to 1 if you have the <memory.h> header file. */
-#undef HAVE_MEMORY_H
-
-/* Define to 1 if you have the <stddef.h> header file. */
-#undef HAVE_STDDEF_H
-
-/* Define to 1 if you have the <stdint.h> header file. */
-#undef HAVE_STDINT_H
-
-/* Define to 1 if you have the <stdlib.h> header file. */
-#undef HAVE_STDLIB_H
-
-/* Define to 1 if you have the <strings.h> header file. */
-#undef HAVE_STRINGS_H
-
-/* Define to 1 if you have the <string.h> header file. */
-#undef HAVE_STRING_H
-
-/* Define to 1 if you have the <sys/byteswap.h> header file. */
-#undef HAVE_SYS_BYTESWAP_H
-
-/* Define to 1 if you have the <sys/endian.h> header file. */
-#undef HAVE_SYS_ENDIAN_H
-
-/* Define to 1 if you have the <sys/mman.h> header file. */
-#undef HAVE_SYS_MMAN_H
-
-/* Define to 1 if you have the <sys/resource.h> header file. */
-#undef HAVE_SYS_RESOURCE_H
-
-/* Define to 1 if you have the <sys/stat.h> header file. */
-#undef HAVE_SYS_STAT_H
-
-/* Define to 1 if you have the <sys/types.h> header file. */
-#undef HAVE_SYS_TYPES_H
-
-/* Define to 1 if you have the <unistd.h> header file. */
-#undef HAVE_UNISTD_H
-
-/* Define to 1 if you have the <windows.h> header file. */
-#undef HAVE_WINDOWS_H
-
-/* Define to the sub-directory in which libtool stores uninstalled libraries.
-   */
-#undef LT_OBJDIR
-
-/* Name of package */
-#undef PACKAGE
-
-/* Define to the address where bug reports for this package should be sent. */
-#undef PACKAGE_BUGREPORT
-
-/* Define to the full name of this package. */
-#undef PACKAGE_NAME
-
-/* Define to the full name and version of this package. */
-#undef PACKAGE_STRING
-
-/* Define to the one symbol short name of this package. */
-#undef PACKAGE_TARNAME
-
-/* Define to the home page for this package. */
-#undef PACKAGE_URL
-
-/* Define to the version of this package. */
-#undef PACKAGE_VERSION
-
-/* Define to 1 if you have the ANSI C header files. */
-#undef STDC_HEADERS
-
-/* Version number of package */
-#undef VERSION
-
-/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
-   significant byte first (like Motorola and SPARC, unlike Intel). */
-#if defined AC_APPLE_UNIVERSAL_BUILD
-# if defined __BIG_ENDIAN__
-#  define WORDS_BIGENDIAN 1
-# endif
-#else
-# ifndef WORDS_BIGENDIAN
-#  undef WORDS_BIGENDIAN
-# endif
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/c_src/google-snappy/snappy-internal.h
----------------------------------------------------------------------
diff --git a/src/snappy/c_src/google-snappy/snappy-internal.h b/src/snappy/c_src/google-snappy/snappy-internal.h
deleted file mode 100644
index a32eda5..0000000
--- a/src/snappy/c_src/google-snappy/snappy-internal.h
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2008 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Internals shared between the Snappy implementation and its unittest.
-
-#ifndef UTIL_SNAPPY_SNAPPY_INTERNAL_H_
-#define UTIL_SNAPPY_SNAPPY_INTERNAL_H_
-
-#include "snappy-stubs-internal.h"
-
-namespace snappy {
-namespace internal {
-
-class WorkingMemory {
- public:
-  WorkingMemory() : large_table_(NULL) { }
-  ~WorkingMemory() { delete[] large_table_; }
-
-  // Allocates and clears a hash table using memory in "*this",
-  // stores the number of buckets in "*table_size" and returns a pointer to
-  // the base of the hash table.
-  uint16* GetHashTable(size_t input_size, int* table_size);
-
- private:
-  uint16 small_table_[1<<10];    // 2KB
-  uint16* large_table_;          // Allocated only when needed
-
-  DISALLOW_COPY_AND_ASSIGN(WorkingMemory);
-};
-
-// Flat array compression that does not emit the "uncompressed length"
-// prefix. Compresses "input" string to the "*op" buffer.
-//
-// REQUIRES: "input_length <= kBlockSize"
-// REQUIRES: "op" points to an array of memory that is at least
-// "MaxCompressedLength(input_length)" in size.
-// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
-// REQUIRES: "table_size" is a power of two
-//
-// Returns an "end" pointer into "op" buffer.
-// "end - op" is the compressed size of "input".
-char* CompressFragment(const char* input,
-                       size_t input_length,
-                       char* op,
-                       uint16* table,
-                       const int table_size);
-
-// Return the largest n such that
-//
-//   s1[0,n-1] == s2[0,n-1]
-//   and n <= (s2_limit - s2).
-//
-// Does not read *s2_limit or beyond.
-// Does not read *(s1 + (s2_limit - s2)) or beyond.
-// Requires that s2_limit >= s2.
-//
-// Separate implementation for x86_64, for speed.  Uses the fact that
-// x86_64 is little endian.
-#if defined(ARCH_K8)
-static inline int FindMatchLength(const char* s1,
-                                  const char* s2,
-                                  const char* s2_limit) {
-  DCHECK_GE(s2_limit, s2);
-  int matched = 0;
-
-  // Find out how long the match is. We loop over the data 64 bits at a
-  // time until we find a 64-bit block that doesn't match; then we find
-  // the first non-matching bit and use that to calculate the total
-  // length of the match.
-  while (PREDICT_TRUE(s2 <= s2_limit - 8)) {
-    if (PREDICT_FALSE(UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched))) {
-      s2 += 8;
-      matched += 8;
-    } else {
-      // On current (mid-2008) Opteron models there is a 3% more
-      // efficient code sequence to find the first non-matching byte.
-      // However, what follows is ~10% better on Intel Core 2 and newer,
-      // and we expect AMD's bsf instruction to improve.
-      uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
-      int matching_bits = Bits::FindLSBSetNonZero64(x);
-      matched += matching_bits >> 3;
-      return matched;
-    }
-  }
-  while (PREDICT_TRUE(s2 < s2_limit)) {
-    if (PREDICT_TRUE(s1[matched] == *s2)) {
-      ++s2;
-      ++matched;
-    } else {
-      return matched;
-    }
-  }
-  return matched;
-}
-#else
-static inline int FindMatchLength(const char* s1,
-                                  const char* s2,
-                                  const char* s2_limit) {
-  // Implementation based on the x86-64 version, above.
-  DCHECK_GE(s2_limit, s2);
-  int matched = 0;
-
-  while (s2 <= s2_limit - 4 &&
-         UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
-    s2 += 4;
-    matched += 4;
-  }
-  if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
-    uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
-    int matching_bits = Bits::FindLSBSetNonZero(x);
-    matched += matching_bits >> 3;
-  } else {
-    while ((s2 < s2_limit) && (s1[matched] == *s2)) {
-      ++s2;
-      ++matched;
-    }
-  }
-  return matched;
-}
-#endif
-
-}  // end namespace internal
-}  // end namespace snappy
-
-#endif  // UTIL_SNAPPY_SNAPPY_INTERNAL_H_

http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/c_src/google-snappy/snappy-sinksource.cc
----------------------------------------------------------------------
diff --git a/src/snappy/c_src/google-snappy/snappy-sinksource.cc b/src/snappy/c_src/google-snappy/snappy-sinksource.cc
deleted file mode 100644
index 5844552..0000000
--- a/src/snappy/c_src/google-snappy/snappy-sinksource.cc
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <string.h>
-
-#include "snappy-sinksource.h"
-
-namespace snappy {
-
-Source::~Source() { }
-
-Sink::~Sink() { }
-
-char* Sink::GetAppendBuffer(size_t length, char* scratch) {
-  return scratch;
-}
-
-ByteArraySource::~ByteArraySource() { }
-
-size_t ByteArraySource::Available() const { return left_; }
-
-const char* ByteArraySource::Peek(size_t* len) {
-  *len = left_;
-  return ptr_;
-}
-
-void ByteArraySource::Skip(size_t n) {
-  left_ -= n;
-  ptr_ += n;
-}
-
-UncheckedByteArraySink::~UncheckedByteArraySink() { }
-
-void UncheckedByteArraySink::Append(const char* data, size_t n) {
-  // Do no copying if the caller filled in the result of GetAppendBuffer()
-  if (data != dest_) {
-    memcpy(dest_, data, n);
-  }
-  dest_ += n;
-}
-
-char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
-  return dest_;
-}
-
-}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/c_src/google-snappy/snappy-sinksource.h
----------------------------------------------------------------------
diff --git a/src/snappy/c_src/google-snappy/snappy-sinksource.h b/src/snappy/c_src/google-snappy/snappy-sinksource.h
deleted file mode 100644
index faabfa1..0000000
--- a/src/snappy/c_src/google-snappy/snappy-sinksource.h
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
-#define UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
-
-#include <stddef.h>
-
-
-namespace snappy {
-
-// A Sink is an interface that consumes a sequence of bytes.
-class Sink {
- public:
-  Sink() { }
-  virtual ~Sink();
-
-  // Append "bytes[0,n-1]" to this.
-  virtual void Append(const char* bytes, size_t n) = 0;
-
-  // Returns a writable buffer of the specified length for appending.
-  // May return a pointer to the caller-owned scratch buffer which
-  // must have at least the indicated length.  The returned buffer is
-  // only valid until the next operation on this Sink.
-  //
-  // After writing at most "length" bytes, call Append() with the
-  // pointer returned from this function and the number of bytes
-  // written.  Many Append() implementations will avoid copying
-  // bytes if this function returned an internal buffer.
-  //
-  // If a non-scratch buffer is returned, the caller may only pass a
-  // prefix of it to Append().  That is, it is not correct to pass an
-  // interior pointer of the returned array to Append().
-  //
-  // The default implementation always returns the scratch buffer.
-  virtual char* GetAppendBuffer(size_t length, char* scratch);
-
-
- private:
-  // No copying
-  Sink(const Sink&);
-  void operator=(const Sink&);
-};
-
-// A Source is an interface that yields a sequence of bytes
-class Source {
- public:
-  Source() { }
-  virtual ~Source();
-
-  // Return the number of bytes left to read from the source
-  virtual size_t Available() const = 0;
-
-  // Peek at the next flat region of the source.  Does not reposition
-  // the source.  The returned region is empty iff Available()==0.
-  //
-  // Returns a pointer to the beginning of the region and store its
-  // length in *len.
-  //
-  // The returned region is valid until the next call to Skip() or
-  // until this object is destroyed, whichever occurs first.
-  //
-  // The returned region may be larger than Available() (for example
-  // if this ByteSource is a view on a substring of a larger source).
-  // The caller is responsible for ensuring that it only reads the
-  // Available() bytes.
-  virtual const char* Peek(size_t* len) = 0;
-
-  // Skip the next n bytes.  Invalidates any buffer returned by
-  // a previous call to Peek().
-  // REQUIRES: Available() >= n
-  virtual void Skip(size_t n) = 0;
-
- private:
-  // No copying
-  Source(const Source&);
-  void operator=(const Source&);
-};
-
-// A Source implementation that yields the contents of a flat array
-class ByteArraySource : public Source {
- public:
-  ByteArraySource(const char* p, size_t n) : ptr_(p), left_(n) { }
-  virtual ~ByteArraySource();
-  virtual size_t Available() const;
-  virtual const char* Peek(size_t* len);
-  virtual void Skip(size_t n);
- private:
-  const char* ptr_;
-  size_t left_;
-};
-
-// A Sink implementation that writes to a flat array without any bound checks.
-class UncheckedByteArraySink : public Sink {
- public:
-  explicit UncheckedByteArraySink(char* dest) : dest_(dest) { }
-  virtual ~UncheckedByteArraySink();
-  virtual void Append(const char* data, size_t n);
-  virtual char* GetAppendBuffer(size_t len, char* scratch);
-
-  // Return the current output pointer so that a caller can see how
-  // many bytes were produced.
-  // Note: this is not a Sink method.
-  char* CurrentDestination() const { return dest_; }
- private:
-  char* dest_;
-};
-
-
-}
-
-#endif  // UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_

http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/c_src/google-snappy/snappy-stubs-internal.cc
----------------------------------------------------------------------
diff --git a/src/snappy/c_src/google-snappy/snappy-stubs-internal.cc b/src/snappy/c_src/google-snappy/snappy-stubs-internal.cc
deleted file mode 100644
index 6ed3343..0000000
--- a/src/snappy/c_src/google-snappy/snappy-stubs-internal.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <algorithm>
-#include <string>
-
-#include "snappy-stubs-internal.h"
-
-namespace snappy {
-
-void Varint::Append32(string* s, uint32 value) {
-  char buf[Varint::kMax32];
-  const char* p = Varint::Encode32(buf, value);
-  s->append(buf, p - buf);
-}
-
-}  // namespace snappy

http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/c_src/google-snappy/snappy-stubs-internal.h
----------------------------------------------------------------------
diff --git a/src/snappy/c_src/google-snappy/snappy-stubs-internal.h b/src/snappy/c_src/google-snappy/snappy-stubs-internal.h
deleted file mode 100644
index 6033cdf..0000000
--- a/src/snappy/c_src/google-snappy/snappy-stubs-internal.h
+++ /dev/null
@@ -1,571 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Various stubs for the open-source version of Snappy.
-
-#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
-#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include <iostream>
-#include <string>
-
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-
-#ifdef HAVE_SYS_MMAN_H
-#include <sys/mman.h>
-#endif
-
-#include "snappy-stubs-public.h"
-
-#if defined(__x86_64__)
-
-// Enable 64-bit optimized versions of some routines.
-#define ARCH_K8 1
-
-#endif
-
-// Needed by OS X, among others.
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-// Pull in std::min, std::ostream, and the likes. This is safe because this
-// header file is never used from any public header files.
-using namespace std;
-
-// The size of an array, if known at compile-time.
-// Will give unexpected results if used on a pointer.
-// We undefine it first, since some compilers already have a definition.
-#ifdef ARRAYSIZE
-#undef ARRAYSIZE
-#endif
-#define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
-
-// Static prediction hints.
-#ifdef HAVE_BUILTIN_EXPECT
-#define PREDICT_FALSE(x) (__builtin_expect(x, 0))
-#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
-#else
-#define PREDICT_FALSE(x) x
-#define PREDICT_TRUE(x) x
-#endif
-
-// This is only used for recomputing the tag byte table used during
-// decompression; for simplicity we just remove it from the open-source
-// version (anyone who wants to regenerate it can just do the call
-// themselves within main()).
-#define DEFINE_bool(flag_name, default_value, description) \
-  bool FLAGS_ ## flag_name = default_value
-#define DECLARE_bool(flag_name) \
-  extern bool FLAGS_ ## flag_name
-
-namespace snappy {
-
-static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
-static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
-
-// Logging.
-
-#define LOG(level) LogMessage()
-#define VLOG(level) true ? (void)0 : \
-    snappy::LogMessageVoidify() & snappy::LogMessage()
-
-class LogMessage {
- public:
-  LogMessage() { }
-  ~LogMessage() {
-    cerr << endl;
-  }
-
-  LogMessage& operator<<(const std::string& msg) {
-    cerr << msg;
-    return *this;
-  }
-  LogMessage& operator<<(int x) {
-    cerr << x;
-    return *this;
-  }
-};
-
-// Asserts, both versions activated in debug mode only,
-// and ones that are always active.
-
-#define CRASH_UNLESS(condition) \
-    PREDICT_TRUE(condition) ? (void)0 : \
-    snappy::LogMessageVoidify() & snappy::LogMessageCrash()
-
-class LogMessageCrash : public LogMessage {
- public:
-  LogMessageCrash() { }
-  ~LogMessageCrash() {
-    cerr << endl;
-    abort();
-  }
-};
-
-// This class is used to explicitly ignore values in the conditional
-// logging macros.  This avoids compiler warnings like "value computed
-// is not used" and "statement has no effect".
-
-class LogMessageVoidify {
- public:
-  LogMessageVoidify() { }
-  // This has to be an operator with a precedence lower than << but
-  // higher than ?:
-  void operator&(const LogMessage&) { }
-};
-
-#define CHECK(cond) CRASH_UNLESS(cond)
-#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
-#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
-#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
-#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
-#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
-#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
-
-#ifdef NDEBUG
-
-#define DCHECK(cond) CRASH_UNLESS(true)
-#define DCHECK_LE(a, b) CRASH_UNLESS(true)
-#define DCHECK_GE(a, b) CRASH_UNLESS(true)
-#define DCHECK_EQ(a, b) CRASH_UNLESS(true)
-#define DCHECK_NE(a, b) CRASH_UNLESS(true)
-#define DCHECK_LT(a, b) CRASH_UNLESS(true)
-#define DCHECK_GT(a, b) CRASH_UNLESS(true)
-
-#else
-
-#define DCHECK(cond) CHECK(cond)
-#define DCHECK_LE(a, b) CHECK_LE(a, b)
-#define DCHECK_GE(a, b) CHECK_GE(a, b)
-#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
-#define DCHECK_NE(a, b) CHECK_NE(a, b)
-#define DCHECK_LT(a, b) CHECK_LT(a, b)
-#define DCHECK_GT(a, b) CHECK_GT(a, b)
-
-#endif
-
-// Potentially unaligned loads and stores.
-
-// x86 and PowerPC can simply do these loads and stores native.
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
-
-#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
-#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
-#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
-
-#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
-#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
-#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
-
-// ARMv7 and newer support native unaligned accesses, but only of 16-bit
-// and 32-bit values (not 64-bit); older versions either raise a fatal signal,
-// do an unaligned read and rotate the words around a bit, or do the reads very
-// slowly (trip through kernel mode). There's no simple #define that says just
-// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
-// sub-architectures.
-//
-// This is a mess, but there's not much we can do about it.
-
-#elif defined(__arm__) && \
-      !defined(__ARM_ARCH_5__) && \
-      !defined(__ARM_ARCH_5T__) && \
-      !defined(__ARM_ARCH_5TE__) && \
-      !defined(__ARM_ARCH_5TEJ__) && \
-      !defined(__ARM_ARCH_6__) && \
-      !defined(__ARM_ARCH_6J__) && \
-      !defined(__ARM_ARCH_6K__) && \
-      !defined(__ARM_ARCH_6Z__) && \
-      !defined(__ARM_ARCH_6ZK__) && \
-      !defined(__ARM_ARCH_6T2__)
-
-#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
-#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
-
-#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
-#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
-
-// TODO(user): NEON supports unaligned 64-bit loads and stores.
-// See if that would be more efficient on platforms supporting it,
-// at least for copies.
-
-inline uint64 UNALIGNED_LOAD64(const void *p) {
-  uint64 t;
-  memcpy(&t, p, sizeof t);
-  return t;
-}
-
-inline void UNALIGNED_STORE64(void *p, uint64 v) {
-  memcpy(p, &v, sizeof v);
-}
-
-#else
-
-// These functions are provided for architectures that don't support
-// unaligned loads and stores.
-
-inline uint16 UNALIGNED_LOAD16(const void *p) {
-  uint16 t;
-  memcpy(&t, p, sizeof t);
-  return t;
-}
-
-inline uint32 UNALIGNED_LOAD32(const void *p) {
-  uint32 t;
-  memcpy(&t, p, sizeof t);
-  return t;
-}
-
-inline uint64 UNALIGNED_LOAD64(const void *p) {
-  uint64 t;
-  memcpy(&t, p, sizeof t);
-  return t;
-}
-
-inline void UNALIGNED_STORE16(void *p, uint16 v) {
-  memcpy(p, &v, sizeof v);
-}
-
-inline void UNALIGNED_STORE32(void *p, uint32 v) {
-  memcpy(p, &v, sizeof v);
-}
-
-inline void UNALIGNED_STORE64(void *p, uint64 v) {
-  memcpy(p, &v, sizeof v);
-}
-
-#endif
-
-// This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64
-// on some platforms, in particular ARM.
-inline void UnalignedCopy64(const void *src, void *dst) {
-  if (sizeof(void *) == 8) {
-    UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
-  } else {
-    const char *src_char = reinterpret_cast<const char *>(src);
-    char *dst_char = reinterpret_cast<char *>(dst);
-
-    UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char));
-    UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4));
-  }
-}
-
-// The following guarantees declaration of the byte swap functions.
-#ifdef WORDS_BIGENDIAN
-
-#ifdef HAVE_SYS_BYTEORDER_H
-#include <sys/byteorder.h>
-#endif
-
-#ifdef HAVE_SYS_ENDIAN_H
-#include <sys/endian.h>
-#endif
-
-#ifdef _MSC_VER
-#include <stdlib.h>
-#define bswap_16(x) _byteswap_ushort(x)
-#define bswap_32(x) _byteswap_ulong(x)
-#define bswap_64(x) _byteswap_uint64(x)
-
-#elif defined(__APPLE__)
-// Mac OS X / Darwin features
-#include <libkern/OSByteOrder.h>
-#define bswap_16(x) OSSwapInt16(x)
-#define bswap_32(x) OSSwapInt32(x)
-#define bswap_64(x) OSSwapInt64(x)
-
-#elif defined(HAVE_BYTESWAP_H)
-#include <byteswap.h>
-
-#elif defined(bswap32)
-// FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
-#define bswap_16(x) bswap16(x)
-#define bswap_32(x) bswap32(x)
-#define bswap_64(x) bswap64(x)
-
-#elif defined(BSWAP_64)
-// Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
-#define bswap_16(x) BSWAP_16(x)
-#define bswap_32(x) BSWAP_32(x)
-#define bswap_64(x) BSWAP_64(x)
-
-#else
-
-inline uint16 bswap_16(uint16 x) {
-  return (x << 8) | (x >> 8);
-}
-
-inline uint32 bswap_32(uint32 x) {
-  x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
-  return (x >> 16) | (x << 16);
-}
-
-inline uint64 bswap_64(uint64 x) {
-  x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
-  x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
-  return (x >> 32) | (x << 32);
-}
-
-#endif
-
-#endif  // WORDS_BIGENDIAN
-
-// Convert to little-endian storage, opposite of network format.
-// Convert x from host to little endian: x = LittleEndian.FromHost(x);
-// convert x from little endian to host: x = LittleEndian.ToHost(x);
-//
-//  Store values into unaligned memory converting to little endian order:
-//    LittleEndian.Store16(p, x);
-//
-//  Load unaligned values stored in little endian converting to host order:
-//    x = LittleEndian.Load16(p);
-class LittleEndian {
- public:
-  // Conversion functions.
-#ifdef WORDS_BIGENDIAN
-
-  static uint16 FromHost16(uint16 x) { return bswap_16(x); }
-  static uint16 ToHost16(uint16 x) { return bswap_16(x); }
-
-  static uint32 FromHost32(uint32 x) { return bswap_32(x); }
-  static uint32 ToHost32(uint32 x) { return bswap_32(x); }
-
-  static bool IsLittleEndian() { return false; }
-
-#else  // !defined(WORDS_BIGENDIAN)
-
-  static uint16 FromHost16(uint16 x) { return x; }
-  static uint16 ToHost16(uint16 x) { return x; }
-
-  static uint32 FromHost32(uint32 x) { return x; }
-  static uint32 ToHost32(uint32 x) { return x; }
-
-  static bool IsLittleEndian() { return true; }
-
-#endif  // !defined(WORDS_BIGENDIAN)
-
-  // Functions to do unaligned loads and stores in little-endian order.
-  static uint16 Load16(const void *p) {
-    return ToHost16(UNALIGNED_LOAD16(p));
-  }
-
-  static void Store16(void *p, uint16 v) {
-    UNALIGNED_STORE16(p, FromHost16(v));
-  }
-
-  static uint32 Load32(const void *p) {
-    return ToHost32(UNALIGNED_LOAD32(p));
-  }
-
-  static void Store32(void *p, uint32 v) {
-    UNALIGNED_STORE32(p, FromHost32(v));
-  }
-};
-
-// Some bit-manipulation functions.
-class Bits {
- public:
-  // Return floor(log2(n)) for positive integer n.  Returns -1 iff n == 0.
-  static int Log2Floor(uint32 n);
-
-  // Return the first set least / most significant bit, 0-indexed.  Returns an
-  // undefined value if n == 0.  FindLSBSetNonZero() is similar to ffs() except
-  // that it's 0-indexed.
-  static int FindLSBSetNonZero(uint32 n);
-  static int FindLSBSetNonZero64(uint64 n);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(Bits);
-};
-
-#ifdef HAVE_BUILTIN_CTZ
-
-inline int Bits::Log2Floor(uint32 n) {
-  return n == 0 ? -1 : 31 ^ __builtin_clz(n);
-}
-
-inline int Bits::FindLSBSetNonZero(uint32 n) {
-  return __builtin_ctz(n);
-}
-
-inline int Bits::FindLSBSetNonZero64(uint64 n) {
-  return __builtin_ctzll(n);
-}
-
-#else  // Portable versions.
-
-inline int Bits::Log2Floor(uint32 n) {
-  if (n == 0)
-    return -1;
-  int log = 0;
-  uint32 value = n;
-  for (int i = 4; i >= 0; --i) {
-    int shift = (1 << i);
-    uint32 x = value >> shift;
-    if (x != 0) {
-      value = x;
-      log += shift;
-    }
-  }
-  assert(value == 1);
-  return log;
-}
-
-inline int Bits::FindLSBSetNonZero(uint32 n) {
-  int rc = 31;
-  for (int i = 4, shift = 1 << 4; i >= 0; --i) {
-    const uint32 x = n << shift;
-    if (x != 0) {
-      n = x;
-      rc -= shift;
-    }
-    shift >>= 1;
-  }
-  return rc;
-}
-
-// FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
-inline int Bits::FindLSBSetNonZero64(uint64 n) {
-  const uint32 bottombits = static_cast<uint32>(n);
-  if (bottombits == 0) {
-    // Bottom bits are zero, so scan in top bits
-    return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
-  } else {
-    return FindLSBSetNonZero(bottombits);
-  }
-}
-
-#endif  // End portable versions.
-
-// Variable-length integer encoding.
-class Varint {
- public:
-  // Maximum lengths of varint encoding of uint32.
-  static const int kMax32 = 5;
-
-  // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
-  // Never reads a character at or beyond limit.  If a valid/terminated varint32
-  // was found in the range, stores it in *OUTPUT and returns a pointer just
-  // past the last byte of the varint32. Else returns NULL.  On success,
-  // "result <= limit".
-  static const char* Parse32WithLimit(const char* ptr, const char* limit,
-                                      uint32* OUTPUT);
-
-  // REQUIRES   "ptr" points to a buffer of length sufficient to hold "v".
-  // EFFECTS    Encodes "v" into "ptr" and returns a pointer to the
-  //            byte just past the last encoded byte.
-  static char* Encode32(char* ptr, uint32 v);
-
-  // EFFECTS    Appends the varint representation of "value" to "*s".
-  static void Append32(string* s, uint32 value);
-};
-
-inline const char* Varint::Parse32WithLimit(const char* p,
-                                            const char* l,
-                                            uint32* OUTPUT) {
-  const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
-  const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
-  uint32 b, result;
-  if (ptr >= limit) return NULL;
-  b = *(ptr++); result = b & 127;          if (b < 128) goto done;
-  if (ptr >= limit) return NULL;
-  b = *(ptr++); result |= (b & 127) <<  7; if (b < 128) goto done;
-  if (ptr >= limit) return NULL;
-  b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done;
-  if (ptr >= limit) return NULL;
-  b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done;
-  if (ptr >= limit) return NULL;
-  b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done;
-  return NULL;       // Value is too long to be a varint32
- done:
-  *OUTPUT = result;
-  return reinterpret_cast<const char*>(ptr);
-}
-
-inline char* Varint::Encode32(char* sptr, uint32 v) {
-  // Operate on characters as unsigneds
-  unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
-  static const int B = 128;
-  if (v < (1<<7)) {
-    *(ptr++) = v;
-  } else if (v < (1<<14)) {
-    *(ptr++) = v | B;
-    *(ptr++) = v>>7;
-  } else if (v < (1<<21)) {
-    *(ptr++) = v | B;
-    *(ptr++) = (v>>7) | B;
-    *(ptr++) = v>>14;
-  } else if (v < (1<<28)) {
-    *(ptr++) = v | B;
-    *(ptr++) = (v>>7) | B;
-    *(ptr++) = (v>>14) | B;
-    *(ptr++) = v>>21;
-  } else {
-    *(ptr++) = v | B;
-    *(ptr++) = (v>>7) | B;
-    *(ptr++) = (v>>14) | B;
-    *(ptr++) = (v>>21) | B;
-    *(ptr++) = v>>28;
-  }
-  return reinterpret_cast<char*>(ptr);
-}
-
-// If you know the internal layout of the std::string in use, you can
-// replace this function with one that resizes the string without
-// filling the new space with zeros (if applicable) --
-// it will be non-portable but faster.
-inline void STLStringResizeUninitialized(string* s, size_t new_size) {
-  s->resize(new_size);
-}
-
-// Return a mutable char* pointing to a string's internal buffer,
-// which may not be null-terminated. Writing through this pointer will
-// modify the string.
-//
-// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
-// next call to a string method that invalidates iterators.
-//
-// As of 2006-04, there is no standard-blessed way of getting a
-// mutable reference to a string's internal buffer. However, issue 530
-// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
-// proposes this as the method. It will officially be part of the standard
-// for C++0x. This should already work on all current implementations.
-inline char* string_as_array(string* str) {
-  return str->empty() ? NULL : &*str->begin();
-}
-
-}  // namespace snappy
-
-#endif  // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_

http://git-wip-us.apache.org/repos/asf/couchdb/blob/350a7efd/src/snappy/c_src/google-snappy/snappy-stubs-public.h.in
----------------------------------------------------------------------
diff --git a/src/snappy/c_src/google-snappy/snappy-stubs-public.h.in b/src/snappy/c_src/google-snappy/snappy-stubs-public.h.in
deleted file mode 100644
index f0babcb..0000000
--- a/src/snappy/c_src/google-snappy/snappy-stubs-public.h.in
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-// Author: sesse@google.com (Steinar H. Gunderson)
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Various type stubs for the open-source version of Snappy.
-//
-// This file cannot include config.h, as it is included from snappy.h,
-// which is a public header. Instead, snappy-stubs-public.h is generated by
-// from snappy-stubs-public.h.in at configure time.
-
-#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
-#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
-
-#if @ac_cv_have_stdint_h@
-#include <stdint.h>
-#endif
-
-#if @ac_cv_have_stddef_h@
-#include <stddef.h>
-#endif
-
-#define SNAPPY_MAJOR @SNAPPY_MAJOR@
-#define SNAPPY_MINOR @SNAPPY_MINOR@
-#define SNAPPY_PATCHLEVEL @SNAPPY_PATCHLEVEL@
-#define SNAPPY_VERSION \
-    ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
-
-#include <string>
-
-namespace snappy {
-
-#if @ac_cv_have_stdint_h@
-typedef int8_t int8;
-typedef uint8_t uint8;
-typedef int16_t int16;
-typedef uint16_t uint16;
-typedef int32_t int32;
-typedef uint32_t uint32;
-typedef int64_t int64;
-typedef uint64_t uint64;
-#else
-typedef signed char int8;
-typedef unsigned char uint8;
-typedef short int16;
-typedef unsigned short uint16;
-typedef int int32;
-typedef unsigned int uint32;
-typedef long long int64;
-typedef unsigned long long uint64;
-#endif
-
-typedef std::string string;
-
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
-  TypeName(const TypeName&);               \
-  void operator=(const TypeName&)
-
-}  // namespace snappy
-
-#endif  // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_


[14/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/couch


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/ed98610c
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/ed98610c
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/ed98610c

Branch: refs/heads/1843-feature-bigcouch
Commit: ed98610c5f27e5ea7e7528c081e1e7b54330e221
Parents: 4964214
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:39:22 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:39:22 2014 -0600

----------------------------------------------------------------------
 src/couch/include/couch_db.hrl                  |  256 ----
 .../couch_ejson_compare/couch_ejson_compare.c   |  457 ------
 .../priv/couch_ejson_compare/erl_nif_compat.h   |  128 --
 src/couch/priv/couch_js/help.h                  |   82 -
 src/couch/priv/couch_js/http.c                  |  698 ---------
 src/couch/priv/couch_js/http.h                  |   27 -
 src/couch/priv/couch_js/main.c                  |   21 -
 src/couch/priv/couch_js/sm185.c                 |  431 ------
 src/couch/priv/couch_js/utf8.c                  |  291 ----
 src/couch/priv/couch_js/utf8.h                  |   19 -
 src/couch/priv/couch_js/util.c                  |  294 ----
 src/couch/priv/couch_js/util.h                  |   35 -
 src/couch/priv/icu_driver/couch_icu_driver.c    |  189 ---
 .../priv/spawnkillable/couchspawnkillable.sh    |   20 -
 .../priv/spawnkillable/couchspawnkillable_win.c |  145 --
 src/couch/priv/stat_descriptions.cfg            |   50 -
 src/couch/rebar.config.script                   |   70 -
 src/couch/src/couch.app.src                     |   22 -
 src/couch/src/couch.erl                         |   65 -
 src/couch/src/couch_app.erl                     |   31 -
 src/couch/src/couch_auth_cache.erl              |  437 ------
 src/couch/src/couch_btree.erl                   |  731 ---------
 src/couch/src/couch_changes.erl                 |  583 --------
 src/couch/src/couch_compaction_daemon.erl       |  514 -------
 src/couch/src/couch_compress.erl                |   84 --
 src/couch/src/couch_config.erl                  |  251 ----
 src/couch/src/couch_config_writer.erl           |   88 --
 src/couch/src/couch_db.erl                      | 1412 ------------------
 src/couch/src/couch_db_update_notifier.erl      |   82 -
 src/couch/src/couch_db_update_notifier_sup.erl  |   68 -
 src/couch/src/couch_db_updater.erl              | 1264 ----------------
 src/couch/src/couch_doc.erl                     |  784 ----------
 src/couch/src/couch_drv.erl                     |   62 -
 src/couch/src/couch_ejson_compare.erl           |  113 --
 src/couch/src/couch_emsort.erl                  |  318 ----
 src/couch/src/couch_event_sup.erl               |   73 -
 src/couch/src/couch_external_manager.erl        |  115 --
 src/couch/src/couch_external_server.erl         |   84 --
 src/couch/src/couch_file.erl                    |  587 --------
 src/couch/src/couch_httpd.erl                   | 1082 --------------
 src/couch/src/couch_httpd_auth.erl              |  376 -----
 src/couch/src/couch_httpd_cors.erl              |  343 -----
 src/couch/src/couch_httpd_db.erl                | 1210 ---------------
 src/couch/src/couch_httpd_external.erl          |  173 ---
 src/couch/src/couch_httpd_misc_handlers.erl     |  306 ----
 src/couch/src/couch_httpd_oauth.erl             |  387 -----
 src/couch/src/couch_httpd_proxy.erl             |  426 ------
 src/couch/src/couch_httpd_rewrite.erl           |  483 ------
 src/couch/src/couch_httpd_stats_handlers.erl    |   56 -
 src/couch/src/couch_httpd_vhost.erl             |  397 -----
 src/couch/src/couch_js_functions.hrl            |  155 --
 src/couch/src/couch_key_tree.erl                |  422 ------
 src/couch/src/couch_log.erl                     |  263 ----
 src/couch/src/couch_lru.erl                     |   48 -
 src/couch/src/couch_native_process.erl          |  422 ------
 src/couch/src/couch_os_daemons.erl              |  377 -----
 src/couch/src/couch_os_process.erl              |  285 ----
 src/couch/src/couch_passwords.erl               |  119 --
 src/couch/src/couch_primary_sup.erl             |   60 -
 src/couch/src/couch_proc_manager.erl            |  307 ----
 src/couch/src/couch_query_servers.erl           |  479 ------
 src/couch/src/couch_secondary_sup.erl           |   42 -
 src/couch/src/couch_server.erl                  |  510 -------
 src/couch/src/couch_stats_aggregator.erl        |  312 ----
 src/couch/src/couch_stats_collector.erl         |  134 --
 src/couch/src/couch_stream.erl                  |  299 ----
 src/couch/src/couch_sup.erl                     |  159 --
 src/couch/src/couch_task_status.erl             |  151 --
 src/couch/src/couch_users_db.erl                |  110 --
 src/couch/src/couch_util.erl                    |  500 -------
 src/couch/src/couch_uuids.erl                   |  116 --
 src/couch/src/couch_work_queue.erl              |  187 ---
 72 files changed, 21677 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/include/couch_db.hrl
----------------------------------------------------------------------
diff --git a/src/couch/include/couch_db.hrl b/src/couch/include/couch_db.hrl
deleted file mode 100644
index ffecae0..0000000
--- a/src/couch/include/couch_db.hrl
+++ /dev/null
@@ -1,256 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(LOCAL_DOC_PREFIX, "_local/").
--define(DESIGN_DOC_PREFIX0, "_design").
--define(DESIGN_DOC_PREFIX, "_design/").
--define(DEFAULT_COMPRESSION, snappy).
-
--define(MIN_STR, <<"">>).
--define(MAX_STR, <<255>>). % illegal utf string
-
--define(JSON_ENCODE(V), ejson:encode(V)).
--define(JSON_DECODE(V), ejson:decode(V)).
-
--define(b2l(V), binary_to_list(V)).
--define(l2b(V), list_to_binary(V)).
--define(i2b(V), couch_util:integer_to_boolean(V)).
--define(b2i(V), couch_util:boolean_to_integer(V)).
--define(term_to_bin(T), term_to_binary(T, [{minor_version, 1}])).
--define(term_size(T),
-    try
-        erlang:external_size(T)
-    catch _:_ ->
-        byte_size(?term_to_bin(T))
-    end).
-
--define(DEFAULT_ATTACHMENT_CONTENT_TYPE, <<"application/octet-stream">>).
-
--define(LOG_DEBUG(Format, Args), twig:log(debug, Format, Args)).
--define(LOG_INFO(Format, Args), twig:log(info, Format, Args)).
--define(LOG_WARN(Format, Args), twig:log(warn, Format, Args)).
--define(LOG_ERROR(Format, Args), twig:log(error, Format, Args)).
-
-% Tree::term() is really a tree(), but we don't want to require R13B04 yet
--type branch() :: {Key::term(), Value::term(), Tree::term()}.
--type path() :: {Start::pos_integer(), branch()}.
--type tree() :: [branch()]. % sorted by key
-
--record(rev_info,
-    {
-    rev,
-    seq = 0,
-    deleted = false,
-    body_sp = nil % stream pointer
-    }).
-
--record(doc_info,
-    {
-    id = <<"">>,
-    high_seq = 0,
-    revs = [] % rev_info
-    }).
-
--record(full_doc_info,
-    {id = <<"">>,
-    update_seq = 0,
-    deleted = false,
-    rev_tree = [],
-    leafs_size = 0
-    }).
-
--record(httpd,
-    {mochi_req,
-    peer,
-    method,
-    requested_path_parts,
-    path_parts,
-    db_url_handlers,
-    user_ctx,
-    req_body = undefined,
-    design_url_handlers,
-    auth,
-    default_fun,
-    url_handlers
-    }).
-
-
--record(doc,
-    {
-    id = <<"">>,
-    revs = {0, []},
-
-    % the json body object.
-    body = {[]},
-
-    atts = [], % attachments
-
-    deleted = false,
-
-    % key/value tuple of meta information, provided when using special options:
-    % couch_db:open_doc(Db, Id, Options).
-    meta = []
-    }).
-
-
--record(att,
-    {
-    name,
-    type,
-    att_len,
-    disk_len, % length of the attachment in its identity form
-              % (that is, without a content encoding applied to it)
-              % differs from att_len when encoding /= identity
-    md5= <<>>,
-    revpos=0,
-    data,
-    encoding=identity % currently supported values are:
-                      %     identity, gzip
-                      % additional values to support in the future:
-                      %     deflate, compress
-    }).
-
-
--record(user_ctx,
-    {
-    name=null,
-    roles=[],
-    handler
-    }).
-
-% This should be updated anytime a header change happens that requires more
-% than filling in new defaults.
-%
-% As long the changes are limited to new header fields (with inline
-% defaults) added to the end of the record, then there is no need to increment
-% the disk revision number.
-%
-% if the disk revision is incremented, then new upgrade logic will need to be
-% added to couch_db_updater:init_db.
-
--define(LATEST_DISK_VERSION, 6).
-
--record(db_header,
-    {disk_version = ?LATEST_DISK_VERSION,
-     update_seq = 0,
-     unused = 0,
-     id_tree_state = nil,
-     seq_tree_state = nil,
-     local_tree_state = nil,
-     purge_seq = 0,
-     purged_docs = nil,
-     security_ptr = nil,
-     revs_limit = 1000
-    }).
-
--record(db,
-    {main_pid = nil,
-    compactor_pid = nil,
-    instance_start_time, % number of microsecs since jan 1 1970 as a binary string
-    fd,
-    fd_monitor,
-    header = #db_header{},
-    committed_update_seq,
-    id_tree,
-    seq_tree,
-    local_tree,
-    update_seq,
-    name,
-    filepath,
-    validate_doc_funs = undefined,
-    security = [],
-    security_ptr = nil,
-    user_ctx = #user_ctx{},
-    waiting_delayed_commit = nil,
-    revs_limit = 1000,
-    fsync_options = [],
-    options = [],
-    compression,
-    before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc
-    after_doc_read = nil     % nil | fun(Doc, Db) -> NewDoc
-    }).
-
--record(view_fold_helper_funs, {
-    reduce_count,
-    passed_end,
-    start_response,
-    send_row
-}).
-
--record(reduce_fold_helper_funs, {
-    start_response,
-    send_row
-}).
-
--record(extern_resp_args, {
-    code = 200,
-    stop = false,
-    data = <<>>,
-    ctype = "application/json",
-    headers = [],
-    json = nil
-}).
-
--record(index_header,
-    {seq=0,
-    purge_seq=0,
-    id_btree_state=nil,
-    view_states=nil
-    }).
-
-% small value used in revision trees to indicate the revision isn't stored
--define(REV_MISSING, []).
-
--record(changes_args, {
-    feed = "normal",
-    dir = fwd,
-    since = 0,
-    limit = 1000000000000000,
-    style = main_only,
-    heartbeat,
-    timeout,
-    filter = "",
-    filter_fun,
-    filter_args = [],
-    include_docs = false,
-    conflicts = false,
-    db_open_options = []
-}).
-
--record(btree, {
-    fd,
-    root,
-    extract_kv,
-    assemble_kv,
-    less,
-    reduce = nil,
-    compression = ?DEFAULT_COMPRESSION
-}).
-
--record(proc, {
-    pid,
-    lang,
-    client = nil,
-    ddoc_keys = [],
-    prompt_fun,
-    prompt_many_fun,
-    set_timeout_fun,
-    stop_fun
-}).
-
--record(leaf,  {
-    deleted,
-    ptr,
-    seq,
-    size = nil
-}).
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c
----------------------------------------------------------------------
diff --git a/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c b/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c
deleted file mode 100644
index df68c2a..0000000
--- a/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c
+++ /dev/null
@@ -1,457 +0,0 @@
-/**
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-#include <stdio.h>
-#include <assert.h>
-#include "erl_nif_compat.h"
-#include "unicode/ucol.h"
-#include "unicode/ucasemap.h"
-
-#define MAX_DEPTH 10
-
-#if (ERL_NIF_MAJOR_VERSION > 2) || \
-    (ERL_NIF_MAJOR_VERSION == 2 && ERL_NIF_MINOR_VERSION >= 3)
-/* OTP R15B or higher */
-#define term_is_number(env, t) enif_is_number(env, t)
-#else
-#define term_is_number(env, t)  \
-    (!enif_is_binary(env, t) && \
-     !enif_is_list(env, t) &&   \
-     !enif_is_tuple(env, t))
-#endif
-
-static ERL_NIF_TERM ATOM_TRUE;
-static ERL_NIF_TERM ATOM_FALSE;
-static ERL_NIF_TERM ATOM_NULL;
-
-typedef struct {
-    ErlNifEnv* env;
-    int error;
-    UCollator* coll;
-} ctx_t;
-
-static UCollator** collators = NULL;
-static int collStackTop = 0;
-static int numCollators = 0;
-static ErlNifMutex* collMutex = NULL;
-
-static ERL_NIF_TERM less_json_nif(ErlNifEnv*, int, const ERL_NIF_TERM []);
-static int on_load(ErlNifEnv*, void**, ERL_NIF_TERM);
-static void on_unload(ErlNifEnv*, void*);
-static __inline int less_json(int, ctx_t*, ERL_NIF_TERM, ERL_NIF_TERM);
-static __inline int atom_sort_order(ErlNifEnv*, ERL_NIF_TERM);
-static __inline int compare_strings(ctx_t*, ErlNifBinary, ErlNifBinary);
-static __inline int compare_lists(int, ctx_t*, ERL_NIF_TERM, ERL_NIF_TERM);
-static __inline int compare_props(int, ctx_t*, ERL_NIF_TERM, ERL_NIF_TERM);
-static __inline void reserve_coll(ctx_t*);
-static __inline void release_coll(ctx_t*);
-
-
-void
-reserve_coll(ctx_t *ctx)
-{
-    if (ctx->coll == NULL) {
-        enif_mutex_lock(collMutex);
-        assert(collStackTop < numCollators);
-        ctx->coll = collators[collStackTop];
-        collStackTop += 1;
-        enif_mutex_unlock(collMutex);
-    }
-}
-
-
-void
-release_coll(ctx_t *ctx)
-{
-    if (ctx->coll != NULL) {
-        enif_mutex_lock(collMutex);
-        collStackTop -= 1;
-        assert(collStackTop >= 0);
-        enif_mutex_unlock(collMutex);
-    }
-}
-
-
-
-ERL_NIF_TERM
-less_json_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
-    ctx_t ctx;
-    int result;
-
-    ctx.env = env;
-    ctx.error = 0;
-    ctx.coll = NULL;
-
-    result = less_json(1, &ctx, argv[0], argv[1]);
-    release_coll(&ctx);
-
-    /*
-     * There are 2 possible failure reasons:
-     *
-     * 1) We got an invalid EJSON operand;
-     * 2) The EJSON structures are too deep - to avoid allocating too
-     *    many C stack frames (because less_json is a recursive function),
-     *    and running out of memory, we throw a badarg exception to Erlang
-     *    and do the comparison in Erlang land. In practice, views keys are
-     *    EJSON structures with very little nesting.
-     */
-    return ctx.error ? enif_make_badarg(env) : enif_make_int(env, result);
-}
-
-
-int
-less_json(int depth, ctx_t* ctx, ERL_NIF_TERM a, ERL_NIF_TERM b)
-{
-    int aIsAtom, bIsAtom;
-    int aIsBin, bIsBin;
-    int aIsNumber, bIsNumber;
-    int aIsList, bIsList;
-    int aArity, bArity;
-    const ERL_NIF_TERM *aProps, *bProps;
-
-    /*
-     * Avoid too much recursion. Normally there isn't more than a few levels
-     * of recursion, as in practice view keys do not go beyond 1 to 3 levels
-     * of nesting. In case of too much recursion, signal it to the Erlang land
-     * via an exception and do the EJSON comparison in Erlang land.
-     */
-    if (depth > MAX_DEPTH) {
-        ctx->error = 1;
-        return 0;
-    }
-
-    aIsAtom = enif_is_atom(ctx->env, a);
-    bIsAtom = enif_is_atom(ctx->env, b);
-
-    if (aIsAtom) {
-        if (bIsAtom) {
-            int aSortOrd, bSortOrd;
-
-            if ((aSortOrd = atom_sort_order(ctx->env, a)) == -1) {
-                ctx->error = 1;
-                return 0;
-            }
-
-            if ((bSortOrd = atom_sort_order(ctx->env, b)) == -1) {
-                ctx->error = 1;
-                return 0;
-            }
-
-            return aSortOrd - bSortOrd;
-        }
-
-        return -1;
-    }
-
-    if (bIsAtom) {
-        return 1;
-    }
-
-    aIsNumber = term_is_number(ctx->env, a);
-    bIsNumber = term_is_number(ctx->env, b);
-
-    if (aIsNumber) {
-        if (bIsNumber) {
-            return enif_compare_compat(ctx->env, a, b);
-        }
-
-        return -1;
-    }
-
-    if (bIsNumber) {
-        return 1;
-    }
-
-    aIsBin = enif_is_binary(ctx->env, a);
-    bIsBin = enif_is_binary(ctx->env, b);
-
-    if (aIsBin) {
-        if (bIsBin) {
-            ErlNifBinary binA, binB;
-
-            enif_inspect_binary(ctx->env, a, &binA);
-            enif_inspect_binary(ctx->env, b, &binB);
-
-            return compare_strings(ctx, binA, binB);
-        }
-
-        return -1;
-    }
-
-    if (bIsBin) {
-        return 1;
-    }
-
-    aIsList = enif_is_list(ctx->env, a);
-    bIsList = enif_is_list(ctx->env, b);
-
-    if (aIsList) {
-        if (bIsList) {
-            return compare_lists(depth, ctx, a, b);
-        }
-
-        return -1;
-    }
-
-    if (bIsList) {
-        return 1;
-    }
-
-    if (!enif_get_tuple(ctx->env, a, &aArity, &aProps)) {
-        ctx->error = 1;
-        return 0;
-    }
-    if ((aArity != 1) || !enif_is_list(ctx->env, aProps[0])) {
-        ctx->error = 1;
-        return 0;
-    }
-
-    if (!enif_get_tuple(ctx->env, b, &bArity, &bProps)) {
-        ctx->error = 1;
-        return 0;
-    }
-    if ((bArity != 1) || !enif_is_list(ctx->env, bProps[0])) {
-        ctx->error = 1;
-        return 0;
-    }
-
-    return compare_props(depth, ctx, aProps[0], bProps[0]);
-}
-
-
-int
-atom_sort_order(ErlNifEnv* env, ERL_NIF_TERM a)
-{
-    if (enif_compare_compat(env, a, ATOM_NULL) == 0) {
-        return 1;
-    } else if (enif_compare_compat(env, a, ATOM_FALSE) == 0) {
-        return 2;
-    } else if (enif_compare_compat(env, a, ATOM_TRUE) == 0) {
-        return 3;
-    }
-
-    return -1;
-}
-
-
-int
-compare_lists(int depth, ctx_t* ctx, ERL_NIF_TERM a, ERL_NIF_TERM b)
-{
-    ERL_NIF_TERM headA, tailA;
-    ERL_NIF_TERM headB, tailB;
-    int aIsEmpty, bIsEmpty;
-    int result;
-
-    while (1) {
-        aIsEmpty = !enif_get_list_cell(ctx->env, a, &headA, &tailA);
-        bIsEmpty = !enif_get_list_cell(ctx->env, b, &headB, &tailB);
-
-        if (aIsEmpty) {
-            if (bIsEmpty) {
-                return 0;
-            }
-            return -1;
-        }
-
-        if (bIsEmpty) {
-            return 1;
-        }
-
-        result = less_json(depth + 1, ctx, headA, headB);
-
-        if (ctx->error || result != 0) {
-            return result;
-        }
-
-        a = tailA;
-        b = tailB;
-    }
-
-    return result;
-}
-
-
-int
-compare_props(int depth, ctx_t* ctx, ERL_NIF_TERM a, ERL_NIF_TERM b)
-{
-    ERL_NIF_TERM headA, tailA;
-    ERL_NIF_TERM headB, tailB;
-    int aArity, bArity;
-    const ERL_NIF_TERM *aKV, *bKV;
-    ErlNifBinary keyA, keyB;
-    int aIsEmpty, bIsEmpty;
-    int keyCompResult, valueCompResult;
-
-    while (1) {
-        aIsEmpty = !enif_get_list_cell(ctx->env, a, &headA, &tailA);
-        bIsEmpty = !enif_get_list_cell(ctx->env, b, &headB, &tailB);
-
-        if (aIsEmpty) {
-            if (bIsEmpty) {
-                return 0;
-            }
-            return -1;
-        }
-
-        if (bIsEmpty) {
-            return 1;
-        }
-
-        if (!enif_get_tuple(ctx->env, headA, &aArity, &aKV)) {
-            ctx->error = 1;
-            return 0;
-        }
-        if ((aArity != 2) || !enif_inspect_binary(ctx->env, aKV[0], &keyA)) {
-            ctx->error = 1;
-            return 0;
-        }
-
-        if (!enif_get_tuple(ctx->env, headB, &bArity, &bKV)) {
-            ctx->error = 1;
-            return 0;
-        }
-        if ((bArity != 2) || !enif_inspect_binary(ctx->env, bKV[0], &keyB)) {
-            ctx->error = 1;
-            return 0;
-        }
-
-        keyCompResult = compare_strings(ctx, keyA, keyB);
-
-        if (ctx->error || keyCompResult != 0) {
-            return keyCompResult;
-        }
-
-        valueCompResult = less_json(depth + 1, ctx, aKV[1], bKV[1]);
-
-        if (ctx->error || valueCompResult != 0) {
-            return valueCompResult;
-        }
-
-        a = tailA;
-        b = tailB;
-    }
-
-    return 0;
-}
-
-
-int
-compare_strings(ctx_t* ctx, ErlNifBinary a, ErlNifBinary b)
-{
-    UErrorCode status = U_ZERO_ERROR;
-    UCharIterator iterA, iterB;
-    int result;
-
-    uiter_setUTF8(&iterA, (const char *) a.data, (uint32_t) a.size);
-    uiter_setUTF8(&iterB, (const char *) b.data, (uint32_t) b.size);
-
-    reserve_coll(ctx);
-    result = ucol_strcollIter(ctx->coll, &iterA, &iterB, &status);
-
-    if (U_FAILURE(status)) {
-        ctx->error = 1;
-        return 0;
-    }
-
-    /* ucol_strcollIter returns 0, -1 or 1
-     * (see type UCollationResult in unicode/ucol.h) */
-
-    return result;
-}
-
-
-int
-on_load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info)
-{
-    UErrorCode status = U_ZERO_ERROR;
-    int i, j;
-
-    if (!enif_get_int(env, info, &numCollators)) {
-        return 1;
-    }
-
-    if (numCollators < 1) {
-        return 2;
-    }
-
-    collMutex = enif_mutex_create("coll_mutex");
-
-    if (collMutex == NULL) {
-        return 3;
-    }
-
-    collators = enif_alloc(sizeof(UCollator*) * numCollators);
-
-    if (collators == NULL) {
-        enif_mutex_destroy(collMutex);
-        return 4;
-    }
-
-    for (i = 0; i < numCollators; i++) {
-        collators[i] = ucol_open("", &status);
-
-        if (U_FAILURE(status)) {
-            for (j = 0; j < i; j++) {
-                ucol_close(collators[j]);
-            }
-
-            enif_free(collators);
-            enif_mutex_destroy(collMutex);
-
-            return 5;
-        }
-    }
-
-    ATOM_TRUE = enif_make_atom(env, "true");
-    ATOM_FALSE = enif_make_atom(env, "false");
-    ATOM_NULL = enif_make_atom(env, "null");
-
-    return 0;
-}
-
-
-void
-on_unload(ErlNifEnv* env, void* priv_data)
-{
-    if (collators != NULL) {
-        int i;
-
-        for (i = 0; i < numCollators; i++) {
-            ucol_close(collators[i]);
-        }
-
-        enif_free(collators);
-    }
-
-    if (collMutex != NULL) {
-        enif_mutex_destroy(collMutex);
-    }
-}
-
-
-static ErlNifFunc nif_functions[] = {
-    {"less_nif", 2, less_json_nif}
-};
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-ERL_NIF_INIT(couch_ejson_compare, nif_functions, &on_load, NULL, NULL, &on_unload);
-
-#ifdef __cplusplus
-}
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/couch_ejson_compare/erl_nif_compat.h
----------------------------------------------------------------------
diff --git a/src/couch/priv/couch_ejson_compare/erl_nif_compat.h b/src/couch/priv/couch_ejson_compare/erl_nif_compat.h
deleted file mode 100644
index 0aa3ae6..0000000
--- a/src/couch/priv/couch_ejson_compare/erl_nif_compat.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* Copyright (c) 2010-2011 Basho Technologies, Inc.
- * With some minor modifications for Apache CouchDB.
- *
- * This file is provided to you under the Apache License,
- * Version 2.0 (the "License"); you may not use this file
- * except in compliance with the License.  You may obtain
- * a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
-*/
-
-#ifndef ERL_NIF_COMPAT_H_
-#define ERL_NIF_COMPAT_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-#include "erl_nif.h"
-
-
-#if ERL_NIF_MAJOR_VERSION == 0 && ERL_NIF_MINOR_VERSION == 1
-#define OTP_R13B03
-#elif ERL_NIF_MAJOR_VERSION == 1 && ERL_NIF_MINOR_VERSION == 0
-#define OTP_R13B04
-#elif ERL_NIF_MAJOR_VERSION == 2 && ERL_NIF_MINOR_VERSION == 0
-#define OTP_R14A
-#define OTP_R14B
-#define OTP_R14B01
-#elif ERL_NIF_MAJOR_VERSION == 2 && ERL_NIF_MINOR_VERSION == 1
-#define OTP_R14B02
-#elif ERL_NIF_MAJOR_VERSION == 2 && ERL_NIF_MINOR_VERSION == 2
-#define OTP_R14B03
-#endif
-
-
-#ifdef OTP_R13B03
-
-#define enif_open_resource_type_compat enif_open_resource_type
-#define enif_alloc_resource_compat enif_alloc_resource
-#define enif_release_resource_compat enif_release_resource
-#define enif_alloc_binary_compat enif_alloc_binary
-#define enif_alloc_compat enif_alloc
-#define enif_release_binary_compat enif_release_binary
-#define enif_free_compat enif_free
-#define enif_get_atom_compat enif_get_atom
-#define enif_priv_data_compat enif_get_data
-#define enif_make_uint_compat enif_make_ulong
-
-#define enif_make_string_compat(E, B, Enc) \
-    enif_make_string(E, B)
-
-#define enif_compare_compat enif_compare
-
-#endif /* R13B03 */
-
-
-#ifdef OTP_R13B04
-
-#define enif_open_resource_type_compat enif_open_resource_type
-#define enif_alloc_resource_compat enif_alloc_resource
-#define enif_release_resource_compat enif_release_resource
-#define enif_alloc_binary_compat enif_alloc_binary
-#define enif_realloc_binary_compat enif_realloc_binary
-#define enif_release_binary_compat enif_release_binary
-#define enif_alloc_compat enif_alloc
-#define enif_free_compat enif_free
-#define enif_get_atom_compat enif_get_atom
-#define enif_priv_data_compat enif_priv_data
-#define enif_make_string_compat enif_make_string
-#define enif_make_uint_compat enif_make_uint
-#define enif_compare_compat enif_compare
-
-#endif /* R13B04 */
-
-
-/* OTP R14 and future releases */
-#if !defined(OTP_R13B03) && !defined(OTP_R13B04)
-
-#define enif_open_resource_type_compat(E, N, D, F, T) \
-    enif_open_resource_type(E, NULL, N, D, F, T)
-
-#define enif_alloc_resource_compat(E, T, S) \
-    enif_alloc_resource(T, S)
-
-#define enif_release_resource_compat(E, H) \
-    enif_release_resource(H)
-
-#define enif_alloc_binary_compat(E, S, B) \
-    enif_alloc_binary(S, B)
-
-#define enif_realloc_binary_compat(E, S, B) \
-    enif_realloc_binary(S, B)
-
-#define enif_release_binary_compat(E, B) \
-    enif_release_binary(B)
-
-#define enif_alloc_compat(E, S) \
-    enif_alloc(S)
-
-#define enif_free_compat(E, P) \
-    enif_free(P)
-
-#define enif_get_atom_compat(E, T, B, S) \
-    enif_get_atom(E, T, B, S, ERL_NIF_LATIN1)
-
-#define enif_priv_data_compat enif_priv_data
-#define enif_make_string_compat enif_make_string
-#define enif_make_uint_compat enif_make_uint
-
-#define enif_compare_compat(E, A, B) \
-    enif_compare(A, B)
-
-#endif  /* R14 and future releases */
-
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* ERL_NIF_COMPAT_H_ */

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/couch_js/help.h
----------------------------------------------------------------------
diff --git a/src/couch/priv/couch_js/help.h b/src/couch/priv/couch_js/help.h
deleted file mode 100644
index b31bb8c..0000000
--- a/src/couch/priv/couch_js/help.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCHJS_HELP_H
-#define COUCHJS_HELP_H
-
-#include "config.h"
-
-static const char VERSION_TEMPLATE[] =
-    "%s - %s\n"
-    "\n"
-    "Licensed under the Apache License, Version 2.0 (the \"License\"); you may "
-        "not use\n"
-    "this file except in compliance with the License. You may obtain a copy of"
-        "the\n"
-    "License at\n"
-    "\n"
-    "  http://www.apache.org/licenses/LICENSE-2.0\n"
-    "\n"
-    "Unless required by applicable law or agreed to in writing, software "
-        "distributed\n"
-    "under the License is distributed on an \"AS IS\" BASIS, WITHOUT "
-        "WARRANTIES OR\n"
-    "CONDITIONS OF ANY KIND, either express or implied. See the License "
-        "for the\n"
-    "specific language governing permissions and limitations under the "
-        "License.\n";
-
-static const char USAGE_TEMPLATE[] =
-    "Usage: %s [FILE]\n"
-    "\n"
-    "The %s command runs the %s JavaScript interpreter.\n"
-    "\n"
-    "The exit status is 0 for success or 1 for failure.\n"
-    "\n"
-    "Options:\n"
-    "\n"
-    "  -h          display a short help message and exit\n"
-    "  -V          display version information and exit\n"
-    "  -H          enable %s cURL bindings (only avaiable\n"
-    "              if package was built with cURL available)\n"
-    "  -S SIZE     specify that the interpreter should set the\n"
-    "              stack quota for JS contexts to SIZE bytes\n"
-    "  -u FILE     path to a .uri file containing the address\n"
-    "              (or addresses) of one or more servers\n"
-    "\n"
-    "Report bugs at <%s>.\n";
-
-#define BASENAME COUCHJS_NAME
-
-#define couch_version(basename)  \
-    fprintf(                     \
-            stdout,              \
-            VERSION_TEMPLATE,    \
-            basename,            \
-            PACKAGE_STRING)
-
-#define DISPLAY_VERSION couch_version(BASENAME)
-
-
-#define couch_usage(basename) \
-    fprintf(                                    \
-            stdout,                             \
-            USAGE_TEMPLATE,                     \
-            basename,                           \
-            basename,                           \
-            PACKAGE_NAME,                       \
-            basename,                           \
-            PACKAGE_BUGREPORT)
-
-#define DISPLAY_USAGE couch_usage(BASENAME)
-
-#endif // Included help.h

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/couch_js/http.c
----------------------------------------------------------------------
diff --git a/src/couch/priv/couch_js/http.c b/src/couch/priv/couch_js/http.c
deleted file mode 100644
index c66b5da..0000000
--- a/src/couch/priv/couch_js/http.c
+++ /dev/null
@@ -1,698 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <jsapi.h>
-#include "config.h"
-#include "utf8.h"
-#include "util.h"
-
-// Soft dependency on cURL bindings because they're
-// only used when running the JS tests from the
-// command line which is rare.
-#ifndef HAVE_CURL
-
-void
-http_check_enabled()
-{
-    fprintf(stderr, "HTTP API was disabled at compile time.\n");
-    exit(3);
-}
-
-
-JSBool
-http_ctor(JSContext* cx, JSObject* req)
-{
-    return JS_FALSE;
-}
-
-
-JSBool
-http_dtor(JSContext* cx, JSObject* req)
-{
-    return JS_FALSE;
-}
-
-
-JSBool
-http_open(JSContext* cx, JSObject* req, jsval mth, jsval url, jsval snc)
-{
-    return JS_FALSE;
-}
-
-
-JSBool
-http_set_hdr(JSContext* cx, JSObject* req, jsval name, jsval val)
-{
-    return JS_FALSE;
-}
-
-
-JSBool
-http_send(JSContext* cx, JSObject* req, jsval body)
-{
-    return JS_FALSE;
-}
-
-
-int
-http_status(JSContext* cx, JSObject* req, jsval body)
-{
-    return -1;
-}
-
-JSBool
-http_uri(JSContext* cx, JSObject* req, couch_args* args, jsval* uri_val)
-{
-    return JS_FALSE;
-}
-
-
-#else
-#include <curl/curl.h>
-#include <unistd.h>
-
-
-void
-http_check_enabled()
-{
-    return;
-}
-
-
-// Map some of the string function names to things which exist on Windows
-#ifdef XP_WIN
-#define strcasecmp _strcmpi
-#define strncasecmp _strnicmp
-#define snprintf _snprintf
-#endif
-
-
-typedef struct curl_slist CurlHeaders;
-
-
-typedef struct {
-    int             method;
-    char*           url;
-    CurlHeaders*    req_headers;
-    jsint           last_status;
-} HTTPData;
-
-
-char* METHODS[] = {"GET", "HEAD", "POST", "PUT", "DELETE", "COPY", "OPTIONS", NULL};
-
-
-#define GET     0
-#define HEAD    1
-#define POST    2
-#define PUT     3
-#define DELETE  4
-#define COPY    5
-#define OPTIONS 6
-
-
-static JSBool
-go(JSContext* cx, JSObject* obj, HTTPData* http, char* body, size_t blen);
-
-
-static JSString*
-str_from_binary(JSContext* cx, char* data, size_t length);
-
-
-JSBool
-http_ctor(JSContext* cx, JSObject* req)
-{
-    HTTPData* http = NULL;
-    JSBool ret = JS_FALSE;
-
-    http = (HTTPData*) malloc(sizeof(HTTPData));
-    if(!http)
-    {
-        JS_ReportError(cx, "Failed to create CouchHTTP instance.");
-        goto error;
-    }
-
-    http->method = -1;
-    http->url = NULL;
-    http->req_headers = NULL;
-    http->last_status = -1;
-
-    if(!JS_SetPrivate(cx, req, http))
-    {
-        JS_ReportError(cx, "Failed to set private CouchHTTP data.");
-        goto error;
-    }
-
-    ret = JS_TRUE;
-    goto success;
-
-error:
-    if(http) free(http);
-
-success:
-    return ret;
-}
-
-
-void
-http_dtor(JSContext* cx, JSObject* obj)
-{
-    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, obj);
-    if(http) { 
-        if(http->url) free(http->url);
-        if(http->req_headers) curl_slist_free_all(http->req_headers);
-        free(http);
-    }
-}
-
-
-JSBool
-http_open(JSContext* cx, JSObject* req, jsval mth, jsval url, jsval snc)
-{
-    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req);
-    char* method = NULL;
-    int methid;
-    JSBool ret = JS_FALSE;
-
-    if(!http) {
-        JS_ReportError(cx, "Invalid CouchHTTP instance.");
-        goto done;
-    }
-
-    if(JSVAL_IS_VOID(mth)) {
-        JS_ReportError(cx, "You must specify a method.");
-        goto done;
-    }
-
-    method = enc_string(cx, mth, NULL);
-    if(!method) {
-        JS_ReportError(cx, "Failed to encode method.");
-        goto done;
-    }
-    
-    for(methid = 0; METHODS[methid] != NULL; methid++) {
-        if(strcasecmp(METHODS[methid], method) == 0) break;
-    }
-    
-    if(methid > OPTIONS) {
-        JS_ReportError(cx, "Invalid method specified.");
-        goto done;
-    }
-
-    http->method = methid;
-
-    if(JSVAL_IS_VOID(url)) {
-        JS_ReportError(cx, "You must specify a URL.");
-        goto done;
-    }
-
-    if(http->url != NULL) {
-        free(http->url);
-        http->url = NULL;
-    }
-
-    http->url = enc_string(cx, url, NULL);
-    if(http->url == NULL) {
-        JS_ReportError(cx, "Failed to encode URL.");
-        goto done;
-    }
-    
-    if(JSVAL_IS_BOOLEAN(snc) && JSVAL_TO_BOOLEAN(snc)) {
-        JS_ReportError(cx, "Synchronous flag must be false.");
-        goto done;
-    }
-    
-    if(http->req_headers) {
-        curl_slist_free_all(http->req_headers);
-        http->req_headers = NULL;
-    }
-    
-    // Disable Expect: 100-continue
-    http->req_headers = curl_slist_append(http->req_headers, "Expect:");
-
-    ret = JS_TRUE;
-
-done:
-    if(method) free(method);
-    return ret;
-}
-
-
-JSBool
-http_set_hdr(JSContext* cx, JSObject* req, jsval name, jsval val)
-{
-    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req);
-    char* keystr = NULL;
-    char* valstr = NULL;
-    char* hdrbuf = NULL;
-    size_t hdrlen = -1;
-    JSBool ret = JS_FALSE;
-
-    if(!http) {
-        JS_ReportError(cx, "Invalid CouchHTTP instance.");
-        goto done;
-    }
-
-    if(JSVAL_IS_VOID(name))
-    {
-        JS_ReportError(cx, "You must speciy a header name.");
-        goto done;
-    }
-
-    keystr = enc_string(cx, name, NULL);
-    if(!keystr)
-    {
-        JS_ReportError(cx, "Failed to encode header name.");
-        goto done;
-    }
-    
-    if(JSVAL_IS_VOID(val))
-    {
-        JS_ReportError(cx, "You must specify a header value.");
-        goto done;
-    }
-    
-    valstr = enc_string(cx, val, NULL);
-    if(!valstr)
-    {
-        JS_ReportError(cx, "Failed to encode header value.");
-        goto done;
-    }
-    
-    hdrlen = strlen(keystr) + strlen(valstr) + 3;
-    hdrbuf = (char*) malloc(hdrlen * sizeof(char));
-    if(!hdrbuf) {
-        JS_ReportError(cx, "Failed to allocate header buffer.");
-        goto done;
-    }
-    
-    snprintf(hdrbuf, hdrlen, "%s: %s", keystr, valstr);
-    http->req_headers = curl_slist_append(http->req_headers, hdrbuf);
-
-    ret = JS_TRUE;
-
-done:
-    if(keystr) free(keystr);
-    if(valstr) free(valstr);
-    if(hdrbuf) free(hdrbuf);
-    return ret;
-}
-
-JSBool
-http_send(JSContext* cx, JSObject* req, jsval body)
-{
-    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req);
-    char* bodystr = NULL;
-    size_t bodylen = 0;
-    JSBool ret = JS_FALSE;
-    
-    if(!http) {
-        JS_ReportError(cx, "Invalid CouchHTTP instance.");
-        goto done;
-    }
-
-    if(!JSVAL_IS_VOID(body)) {
-        bodystr = enc_string(cx, body, &bodylen);
-        if(!bodystr) {
-            JS_ReportError(cx, "Failed to encode body.");
-            goto done;
-        }
-    }
-
-    ret = go(cx, req, http, bodystr, bodylen);
-
-done:
-    if(bodystr) free(bodystr);
-    return ret;
-}
-
-int
-http_status(JSContext* cx, JSObject* req)
-{
-    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req);
-    
-    if(!http) {
-        JS_ReportError(cx, "Invalid CouchHTTP instance.");
-        return JS_FALSE;
-    }
-
-    return http->last_status;
-}
-
-JSBool
-http_uri(JSContext* cx, JSObject* req, couch_args* args, jsval* uri_val)
-{
-    FILE* uri_fp = NULL;
-    JSString* uri_str;
-
-    // Default is http://localhost:5984/ when no uri file is specified
-    if (!args->uri_file) {
-        uri_str = JS_InternString(cx, "http://localhost:5984/");
-        *uri_val = STRING_TO_JSVAL(uri_str);
-        return JS_TRUE;
-    }
-
-    // Else check to see if the base url is cached in a reserved slot
-    if (JS_GetReservedSlot(cx, req, 0, uri_val) && !JSVAL_IS_VOID(*uri_val)) {
-        return JS_TRUE;
-    }
-
-    // Read the first line of the couch.uri file.
-    if(!((uri_fp = fopen(args->uri_file, "r")) &&
-         (uri_str = couch_readline(cx, uri_fp)))) {
-        JS_ReportError(cx, "Failed to read couch.uri file.");
-        goto error;
-    }
-
-    fclose(uri_fp);
-    *uri_val = STRING_TO_JSVAL(uri_str);
-    JS_SetReservedSlot(cx, req, 0, *uri_val);
-    return JS_TRUE;
-
-error:
-    if(uri_fp) fclose(uri_fp);
-    return JS_FALSE;
-}
-
-
-// Curl Helpers
-
-typedef struct {
-    HTTPData*   http;
-    JSContext*  cx;
-    JSObject*   resp_headers;
-    char*       sendbuf;
-    size_t      sendlen;
-    size_t      sent;
-    int         sent_once;
-    char*       recvbuf;
-    size_t      recvlen;
-    size_t      read;
-} CurlState;
-
-/*
- * I really hate doing this but this doesn't have to be
- * uber awesome, it just has to work.
- */
-CURL*       HTTP_HANDLE = NULL;
-char        ERRBUF[CURL_ERROR_SIZE];
-
-static size_t send_body(void *ptr, size_t size, size_t nmem, void *data);
-static int seek_body(void *ptr, curl_off_t offset, int origin);
-static size_t recv_body(void *ptr, size_t size, size_t nmem, void *data);
-static size_t recv_header(void *ptr, size_t size, size_t nmem, void *data);
-
-static JSBool
-go(JSContext* cx, JSObject* obj, HTTPData* http, char* body, size_t bodylen)
-{
-    CurlState state;
-    char* referer;
-    JSString* jsbody;
-    JSBool ret = JS_FALSE;
-    jsval tmp;
-    
-    state.cx = cx;
-    state.http = http;
-    
-    state.sendbuf = body;
-    state.sendlen = bodylen;
-    state.sent = 0;
-    state.sent_once = 0;
-
-    state.recvbuf = NULL;
-    state.recvlen = 0;
-    state.read = 0;
-
-    if(HTTP_HANDLE == NULL) {
-        HTTP_HANDLE = curl_easy_init();
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_READFUNCTION, send_body);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_SEEKFUNCTION,
-                                        (curl_seek_callback) seek_body);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_HEADERFUNCTION, recv_header);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_WRITEFUNCTION, recv_body);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_NOPROGRESS, 1);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V4);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_ERRORBUFFER, ERRBUF);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_COOKIEFILE, "");
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_USERAGENT,
-                                            "CouchHTTP Client - Relax");
-    }
-    
-    if(!HTTP_HANDLE) {
-        JS_ReportError(cx, "Failed to initialize cURL handle.");
-        goto done;
-    }
-
-    if(!JS_GetReservedSlot(cx, obj, 0, &tmp)) {
-      JS_ReportError(cx, "Failed to readreserved slot.");
-      goto done;
-    }
-
-    if(!(referer = enc_string(cx, tmp, NULL))) {
-      JS_ReportError(cx, "Failed to encode referer.");
-      goto done;
-    }
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_REFERER, referer);
-    free(referer);
-
-    if(http->method < 0 || http->method > OPTIONS) {
-        JS_ReportError(cx, "INTERNAL: Unknown method.");
-        goto done;
-    }
-
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_CUSTOMREQUEST, METHODS[http->method]);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_NOBODY, 0);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 1);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_UPLOAD, 0);
-    
-    if(http->method == HEAD) {
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_NOBODY, 1);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 0);
-    } else if(http->method == POST || http->method == PUT) {
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_UPLOAD, 1);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 0);
-    }
-    
-    if(body && bodylen) {
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_INFILESIZE, bodylen);        
-    } else {
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_INFILESIZE, 0);
-    }
-
-    // curl_easy_setopt(HTTP_HANDLE, CURLOPT_VERBOSE, 1);
-
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_URL, http->url);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_HTTPHEADER, http->req_headers);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_READDATA, &state);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_SEEKDATA, &state);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_WRITEHEADER, &state);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_WRITEDATA, &state);
-
-    if(curl_easy_perform(HTTP_HANDLE) != 0) {
-        JS_ReportError(cx, "Failed to execute HTTP request: %s", ERRBUF);
-        goto done;
-    }
-    
-    if(!state.resp_headers) {
-        JS_ReportError(cx, "Failed to recieve HTTP headers.");
-        goto done;
-    }
-
-    tmp = OBJECT_TO_JSVAL(state.resp_headers);
-    if(!JS_DefineProperty(
-        cx, obj,
-        "_headers",
-        tmp,
-        NULL, NULL,
-        JSPROP_READONLY
-    )) {
-        JS_ReportError(cx, "INTERNAL: Failed to set response headers.");
-        goto done;
-    }
-    
-    if(state.recvbuf) {
-        state.recvbuf[state.read] = '\0';
-        jsbody = dec_string(cx, state.recvbuf, state.read+1);
-        if(!jsbody) {
-            // If we can't decode the body as UTF-8 we forcefully
-            // convert it to a string by just forcing each byte
-            // to a jschar.
-            jsbody = str_from_binary(cx, state.recvbuf, state.read);
-            if(!jsbody) {
-                if(!JS_IsExceptionPending(cx)) {
-                    JS_ReportError(cx, "INTERNAL: Failed to decode body.");
-                }
-                goto done;
-            }
-        }
-        tmp = STRING_TO_JSVAL(jsbody);
-    } else {
-        tmp = JS_GetEmptyStringValue(cx);
-    }
-    
-    if(!JS_DefineProperty(
-        cx, obj,
-        "responseText",
-        tmp,
-        NULL, NULL,
-        JSPROP_READONLY
-    )) {
-        JS_ReportError(cx, "INTERNAL: Failed to set responseText.");
-        goto done;
-    }
-    
-    ret = JS_TRUE;
-
-done:
-    if(state.recvbuf) JS_free(cx, state.recvbuf);
-    return ret;
-}
-
-static size_t
-send_body(void *ptr, size_t size, size_t nmem, void *data)
-{
-    CurlState* state = (CurlState*) data;
-    size_t length = size * nmem;
-    size_t towrite = state->sendlen - state->sent;
-
-    // Assume this is cURL trying to resend a request that
-    // failed.
-    if(towrite == 0 && state->sent_once == 0) {
-        state->sent_once = 1;
-        return 0;
-    } else if(towrite == 0) {
-        state->sent = 0;
-        state->sent_once = 0;
-        towrite = state->sendlen;
-    }
-
-    if(length < towrite) towrite = length;
-
-    memcpy(ptr, state->sendbuf + state->sent, towrite);
-    state->sent += towrite;
-
-    return towrite;
-}
-
-static int
-seek_body(void* ptr, curl_off_t offset, int origin)
-{
-    CurlState* state = (CurlState*) ptr;
-    if(origin != SEEK_SET) return -1;
-
-    state->sent = (size_t) offset;
-    return (int) state->sent;
-}
-
-static size_t
-recv_header(void *ptr, size_t size, size_t nmem, void *data)
-{
-    CurlState* state = (CurlState*) data;
-    char code[4];
-    char* header = (char*) ptr;
-    size_t length = size * nmem;
-    JSString* hdr = NULL;
-    jsuint hdrlen;
-    jsval hdrval;
-    
-    if(length > 7 && strncasecmp(header, "HTTP/1.", 7) == 0) {
-        if(length < 12) {
-            return CURLE_WRITE_ERROR;
-        }
-
-        memcpy(code, header+9, 3*sizeof(char));
-        code[3] = '\0';
-        state->http->last_status = atoi(code);
-
-        state->resp_headers = JS_NewArrayObject(state->cx, 0, NULL);
-        if(!state->resp_headers) {
-            return CURLE_WRITE_ERROR;
-        }
-
-        return length;
-    }
-
-    // We get a notice at the \r\n\r\n after headers.
-    if(length <= 2) {
-        return length;
-    }
-
-    // Append the new header to our array.
-    hdr = dec_string(state->cx, header, length);
-    if(!hdr) {
-        return CURLE_WRITE_ERROR;
-    }
-
-    if(!JS_GetArrayLength(state->cx, state->resp_headers, &hdrlen)) {
-        return CURLE_WRITE_ERROR;
-    }
-
-    hdrval = STRING_TO_JSVAL(hdr);
-    if(!JS_SetElement(state->cx, state->resp_headers, hdrlen, &hdrval)) {
-        return CURLE_WRITE_ERROR;
-    }
-
-    return length;
-}
-
-static size_t
-recv_body(void *ptr, size_t size, size_t nmem, void *data)
-{
-    CurlState* state = (CurlState*) data;
-    size_t length = size * nmem;
-    char* tmp = NULL;
-    
-    if(!state->recvbuf) {
-        state->recvlen = 4096;
-        state->read = 0;
-        state->recvbuf = JS_malloc(state->cx, state->recvlen);
-    }
-    
-    if(!state->recvbuf) {
-        return CURLE_WRITE_ERROR;
-    }
-
-    // +1 so we can add '\0' back up in the go function.
-    while(length+1 > state->recvlen - state->read) state->recvlen *= 2;
-    tmp = JS_realloc(state->cx, state->recvbuf, state->recvlen);
-    if(!tmp) return CURLE_WRITE_ERROR;
-    state->recvbuf = tmp;
-   
-    memcpy(state->recvbuf + state->read, ptr, length);
-    state->read += length;
-    return length;
-}
-
-JSString*
-str_from_binary(JSContext* cx, char* data, size_t length)
-{
-    jschar* conv = (jschar*) JS_malloc(cx, length * sizeof(jschar));
-    JSString* ret = NULL;
-    size_t i;
-
-    if(!conv) return NULL;
-
-    for(i = 0; i < length; i++) {
-        conv[i] = (jschar) data[i];
-    }
-
-    ret = JS_NewUCString(cx, conv, length);
-    if(!ret) JS_free(cx, conv);
-
-    return ret;
-}
-
-#endif /* HAVE_CURL */

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/couch_js/http.h
----------------------------------------------------------------------
diff --git a/src/couch/priv/couch_js/http.h b/src/couch/priv/couch_js/http.h
deleted file mode 100644
index 63d45bd..0000000
--- a/src/couch/priv/couch_js/http.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCH_JS_HTTP_H
-#define COUCH_JS_HTTP_H
-
-#include "util.h"
-
-void http_check_enabled();
-JSBool http_ctor(JSContext* cx, JSObject* req);
-void http_dtor(JSContext* cx, JSObject* req);
-JSBool http_open(JSContext* cx, JSObject* req, jsval mth, jsval url, jsval snc);
-JSBool http_set_hdr(JSContext* cx, JSObject* req, jsval name, jsval val);
-JSBool http_send(JSContext* cx, JSObject* req, jsval body);
-int http_status(JSContext* cx, JSObject* req);
-JSBool http_uri(JSContext* cx, JSObject *req, couch_args* args, jsval* uri);
-
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/couch_js/main.c
----------------------------------------------------------------------
diff --git a/src/couch/priv/couch_js/main.c b/src/couch/priv/couch_js/main.c
deleted file mode 100644
index 209bb02..0000000
--- a/src/couch/priv/couch_js/main.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include "config.h"
-
-#if defined(SM185)
-#include "sm185.c"
-#elif defined(SM180)
-#include "sm180.c"
-#else
-#include "sm170.c"
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/couch_js/sm185.c
----------------------------------------------------------------------
diff --git a/src/couch/priv/couch_js/sm185.c b/src/couch/priv/couch_js/sm185.c
deleted file mode 100644
index bfee023..0000000
--- a/src/couch/priv/couch_js/sm185.c
+++ /dev/null
@@ -1,431 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#include <jsapi.h>
-#include "http.h"
-#include "utf8.h"
-#include "util.h"
-
-
-#define SETUP_REQUEST(cx) \
-    JS_SetContextThread(cx); \
-    JS_BeginRequest(cx);
-#define FINISH_REQUEST(cx) \
-    JS_EndRequest(cx); \
-    JS_ClearContextThread(cx);
-
-
-static JSClass global_class = {
-    "GlobalClass",
-    JSCLASS_GLOBAL_FLAGS,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_StrictPropertyStub,
-    JS_EnumerateStub,
-    JS_ResolveStub,
-    JS_ConvertStub,
-    JS_FinalizeStub,
-    JSCLASS_NO_OPTIONAL_MEMBERS
-};
-
-
-static JSBool
-req_ctor(JSContext* cx, uintN argc, jsval* vp)
-{
-    JSBool ret;
-    JSObject* obj = JS_NewObjectForConstructor(cx, vp);
-    if(!obj) {
-        JS_ReportError(cx, "Failed to create CouchHTTP instance.\n");
-        return JS_FALSE;
-    }
-    ret = http_ctor(cx, obj);
-    JS_SET_RVAL(cx, vp, OBJECT_TO_JSVAL(obj));
-    return ret;
-}
-
-
-static void 
-req_dtor(JSContext* cx, JSObject* obj)
-{
-    http_dtor(cx, obj);
-}
-
-
-static JSBool
-req_open(JSContext* cx, uintN argc, jsval* vp)
-{
-    JSObject* obj = JS_THIS_OBJECT(cx, vp);
-    jsval* argv = JS_ARGV(cx, vp);
-    JSBool ret = JS_FALSE;
-
-    if(argc == 2) {
-        ret = http_open(cx, obj, argv[0], argv[1], JSVAL_FALSE);
-    } else if(argc == 3) {
-        ret = http_open(cx, obj, argv[0], argv[1], argv[2]);
-    } else {
-        JS_ReportError(cx, "Invalid call to CouchHTTP.open");
-    }
-
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return ret;
-}
-
-
-static JSBool
-req_set_hdr(JSContext* cx, uintN argc, jsval* vp)
-{
-    JSObject* obj = JS_THIS_OBJECT(cx, vp);
-    jsval* argv = JS_ARGV(cx, vp);
-    JSBool ret = JS_FALSE;
-
-    if(argc == 2) {
-        ret = http_set_hdr(cx, obj, argv[0], argv[1]);
-    } else {
-        JS_ReportError(cx, "Invalid call to CouchHTTP.set_header");
-    }
-
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return ret;
-}
-
-
-static JSBool
-req_send(JSContext* cx, uintN argc, jsval* vp)
-{
-    JSObject* obj = JS_THIS_OBJECT(cx, vp);
-    jsval* argv = JS_ARGV(cx, vp);
-    JSBool ret = JS_FALSE;
-
-    if(argc == 1) {
-        ret = http_send(cx, obj, argv[0]);
-    } else {
-        JS_ReportError(cx, "Invalid call to CouchHTTP.send");
-    }
-
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return ret;
-}
-
-
-static JSBool
-req_status(JSContext* cx, JSObject* obj, jsid pid, jsval* vp)
-{
-    int status = http_status(cx, obj);
-    if(status < 0)
-        return JS_FALSE;
-
-    JS_SET_RVAL(cx, vp, INT_TO_JSVAL(status));
-    return JS_TRUE;
-}
-
-
-static JSBool
-base_url(JSContext *cx, JSObject* obj, jsid pid, jsval* vp)
-{
-    couch_args *args = (couch_args*)JS_GetContextPrivate(cx);
-    return http_uri(cx, obj, args, &JS_RVAL(cx, vp));
-}
-
-
-static JSBool
-evalcx(JSContext *cx, uintN argc, jsval* vp)
-{
-    jsval* argv = JS_ARGV(cx, vp);
-    JSString* str;
-    JSObject* sandbox;
-    JSObject* global;
-    JSContext* subcx;
-    JSCrossCompartmentCall* call = NULL;
-    const jschar* src;
-    size_t srclen;
-    jsval rval;
-    JSBool ret = JS_FALSE;
-    char *name = NULL;
-
-    sandbox = NULL;
-    if(!JS_ConvertArguments(cx, argc, argv, "S / o", &str, &sandbox)) {
-        return JS_FALSE;
-    }
-
-    subcx = JS_NewContext(JS_GetRuntime(cx), 8L * 1024L);
-    if(!subcx) {
-        JS_ReportOutOfMemory(cx);
-        return JS_FALSE;
-    }
-
-    SETUP_REQUEST(subcx);
-
-    src = JS_GetStringCharsAndLength(cx, str, &srclen);
-
-    // Re-use the compartment associated with the main context,
-    // rather than creating a new compartment */
-    global = JS_GetGlobalObject(cx);
-    if(global == NULL) goto done;
-    call = JS_EnterCrossCompartmentCall(subcx, global);
-
-    if(!sandbox) {
-        sandbox = JS_NewGlobalObject(subcx, &global_class);
-        if(!sandbox || !JS_InitStandardClasses(subcx, sandbox)) {
-            goto done;
-        }
-    }
-
-    if(argc > 2) {
-        name = enc_string(cx, argv[2], NULL);
-    }
-
-    if(srclen == 0) {
-        JS_SET_RVAL(cx, vp, OBJECT_TO_JSVAL(sandbox));
-    } else {
-        JS_EvaluateUCScript(subcx, sandbox, src, srclen, name, 1, &rval);
-        JS_SET_RVAL(cx, vp, rval);
-    }
-    
-    ret = JS_TRUE;
-
-done:
-    if(name) JS_free(cx, name);
-    JS_LeaveCrossCompartmentCall(call);
-    FINISH_REQUEST(subcx);
-    JS_DestroyContext(subcx);
-    return ret;
-}
-
-
-static JSBool
-gc(JSContext* cx, uintN argc, jsval* vp)
-{
-    JS_GC(cx);
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return JS_TRUE;
-}
-
-
-static JSBool
-print(JSContext* cx, uintN argc, jsval* vp)
-{
-    jsval* argv = JS_ARGV(cx, vp);
-    couch_print(cx, argc, argv);
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return JS_TRUE;
-}
-
-
-static JSBool
-quit(JSContext* cx, uintN argc, jsval* vp)
-{
-    jsval* argv = JS_ARGV(cx, vp);
-    int exit_code = 0;
-    JS_ConvertArguments(cx, argc, argv, "/i", &exit_code);
-    exit(exit_code);
-}
-
-
-static JSBool
-readline(JSContext* cx, uintN argc, jsval* vp)
-{
-    JSString* line;
-
-    /* GC Occasionally */
-    JS_MaybeGC(cx);
-
-    line = couch_readline(cx, stdin);
-    if(line == NULL) return JS_FALSE;
-
-    JS_SET_RVAL(cx, vp, STRING_TO_JSVAL(line));
-    return JS_TRUE;
-}
-
-
-static JSBool
-seal(JSContext* cx, uintN argc, jsval* vp)
-{
-    jsval* argv = JS_ARGV(cx, vp);
-    JSObject *target;
-    JSBool deep = JS_FALSE;
-    JSBool ret;
-
-    if(!JS_ConvertArguments(cx, argc, argv, "o/b", &target, &deep))
-        return JS_FALSE;
-
-    if(!target) {
-        JS_SET_RVAL(cx, vp, JSVAL_VOID);
-        return JS_TRUE;
-    }
-
-    
-    ret = deep ? JS_DeepFreezeObject(cx, target) : JS_FreezeObject(cx, target);
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return ret;
-}
-
-
-JSClass CouchHTTPClass = {
-    "CouchHTTP",
-    JSCLASS_HAS_PRIVATE
-        | JSCLASS_CONSTRUCT_PROTOTYPE
-        | JSCLASS_HAS_RESERVED_SLOTS(2),
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_StrictPropertyStub,
-    JS_EnumerateStub,
-    JS_ResolveStub,
-    JS_ConvertStub,
-    req_dtor,
-    JSCLASS_NO_OPTIONAL_MEMBERS
-};
-
-
-JSPropertySpec CouchHTTPProperties[] = {
-    {"status", 0, JSPROP_READONLY, req_status, NULL},
-    {"base_url", 0, JSPROP_READONLY | JSPROP_SHARED, base_url, NULL},
-    {0, 0, 0, 0, 0}
-};
-
-
-JSFunctionSpec CouchHTTPFunctions[] = {
-    JS_FS("_open", req_open, 3, 0),
-    JS_FS("_setRequestHeader", req_set_hdr, 2, 0),
-    JS_FS("_send", req_send, 1, 0),
-    JS_FS_END
-};
-
-
-static JSFunctionSpec global_functions[] = {
-    JS_FS("evalcx", evalcx, 0, 0),
-    JS_FS("gc", gc, 0, 0),
-    JS_FS("print", print, 0, 0),
-    JS_FS("quit", quit, 0, 0),
-    JS_FS("readline", readline, 0, 0),
-    JS_FS("seal", seal, 0, 0),
-    JS_FS_END
-};
-
-
-int
-main(int argc, const char* argv[])
-{
-    JSRuntime* rt = NULL;
-    JSContext* cx = NULL;
-    JSObject* global = NULL;
-    JSCrossCompartmentCall *call = NULL;
-    JSObject* klass = NULL;
-    JSSCRIPT_TYPE script;
-    JSString* scriptsrc;
-    const jschar* schars;
-    size_t slen;
-    jsval sroot;
-    jsval result;
-    int i;
-
-    couch_args* args = couch_parse_args(argc, argv);
-
-    rt = JS_NewRuntime(64L * 1024L * 1024L);
-    if(rt == NULL)
-        return 1;
-
-    cx = JS_NewContext(rt, args->stack_size);
-    if(cx == NULL)
-        return 1;
-
-    JS_SetErrorReporter(cx, couch_error);
-    JS_ToggleOptions(cx, JSOPTION_XML);
-    JS_SetOptions(cx, JSOPTION_METHODJIT);
-#ifdef JSOPTION_TYPE_INFERENCE
-    JS_SetOptions(cx, JSOPTION_TYPE_INFERENCE);
-#endif
-    JS_SetContextPrivate(cx, args);
-    
-    SETUP_REQUEST(cx);
-
-    global = JS_NewCompartmentAndGlobalObject(cx, &global_class, NULL);
-    if(global == NULL)
-        return 1;
-
-    call = JS_EnterCrossCompartmentCall(cx, global);
-
-    JS_SetGlobalObject(cx, global);
-    
-    if(!JS_InitStandardClasses(cx, global))
-        return 1;
-
-    if(couch_load_funcs(cx, global, global_functions) != JS_TRUE)
-        return 1;
- 
-    if(args->use_http) {
-        http_check_enabled();
-
-        klass = JS_InitClass(
-            cx, global,
-            NULL,
-            &CouchHTTPClass, req_ctor,
-            0,
-            CouchHTTPProperties, CouchHTTPFunctions,
-            NULL, NULL
-        );
-
-        if(!klass)
-        {
-            fprintf(stderr, "Failed to initialize CouchHTTP class.\n");
-            exit(2);
-        }
-    } 
-
-    for(i = 0 ; args->scripts[i] ; i++) {
-        // Convert script source to jschars.
-        scriptsrc = couch_readfile(cx, args->scripts[i]);
-        if(!scriptsrc)
-            return 1;
-
-        schars = JS_GetStringCharsAndLength(cx, scriptsrc, &slen);
-
-        // Root it so GC doesn't collect it.
-        sroot = STRING_TO_JSVAL(scriptsrc);
-        if(JS_AddValueRoot(cx, &sroot) != JS_TRUE) {
-            fprintf(stderr, "Internal root error.\n");
-            return 1;
-        }
-
-        // Compile and run
-        script = JS_CompileUCScript(cx, global, schars, slen,
-                                    args->scripts[i], 1);
-        if(!script) {
-            fprintf(stderr, "Failed to compile script.\n");
-            return 1;
-        }
-
-        if(JS_ExecuteScript(cx, global, script, &result) != JS_TRUE) {
-            fprintf(stderr, "Failed to execute script.\n");
-            return 1;
-        }
-
-        // Warning message if we don't remove it.
-        JS_RemoveValueRoot(cx, &sroot);
-
-        // Give the GC a chance to run.
-        JS_MaybeGC(cx);
-    }
-
-    JS_LeaveCrossCompartmentCall(call);
-    FINISH_REQUEST(cx);
-    JS_DestroyContext(cx);
-    JS_DestroyRuntime(rt);
-    JS_ShutDown();
-
-    return 0;
-}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/couch_js/utf8.c
----------------------------------------------------------------------
diff --git a/src/couch/priv/couch_js/utf8.c b/src/couch/priv/couch_js/utf8.c
deleted file mode 100644
index d606426..0000000
--- a/src/couch/priv/couch_js/utf8.c
+++ /dev/null
@@ -1,291 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <jsapi.h>
-#include "config.h"
-
-static int
-enc_char(uint8 *utf8Buffer, uint32 ucs4Char)
-{
-    int utf8Length = 1;
-
-    if (ucs4Char < 0x80)
-    {
-        *utf8Buffer = (uint8)ucs4Char;
-    }
-    else
-    {
-        int i;
-        uint32 a = ucs4Char >> 11;
-        utf8Length = 2;
-        while(a)
-        {
-            a >>= 5;
-            utf8Length++;
-        }
-        i = utf8Length;
-        while(--i)
-        {
-            utf8Buffer[i] = (uint8)((ucs4Char & 0x3F) | 0x80);
-            ucs4Char >>= 6;
-        }
-        *utf8Buffer = (uint8)(0x100 - (1 << (8-utf8Length)) + ucs4Char);
-    }
-
-    return utf8Length;
-}
-
-static JSBool
-enc_charbuf(const jschar* src, size_t srclen, char* dst, size_t* dstlenp)
-{
-    size_t i;
-    size_t utf8Len;
-    size_t dstlen = *dstlenp;
-    size_t origDstlen = dstlen;
-    jschar c;
-    jschar c2;
-    uint32 v;
-    uint8 utf8buf[6];
-
-    if(!dst)
-    {
-        dstlen = origDstlen = (size_t) -1;
-    }
-
-    while(srclen)
-    {
-        c = *src++;
-        srclen--;
-
-        if((c >= 0xDC00) && (c <= 0xDFFF)) goto bad_surrogate;
-        
-        if(c < 0xD800 || c > 0xDBFF)
-        {
-            v = c;
-        }
-        else
-        {
-            if(srclen < 1) goto buffer_too_small;
-            c2 = *src++;
-            srclen--;
-            if ((c2 < 0xDC00) || (c2 > 0xDFFF))
-            {
-                c = c2;
-                goto bad_surrogate;
-            }
-            v = ((c - 0xD800) << 10) + (c2 - 0xDC00) + 0x10000;
-        }
-        if(v < 0x0080)
-        {
-            /* no encoding necessary - performance hack */
-            if(!dstlen) goto buffer_too_small;
-            if(dst) *dst++ = (char) v;
-            utf8Len = 1;
-        }
-        else
-        {
-            utf8Len = enc_char(utf8buf, v);
-            if(utf8Len > dstlen) goto buffer_too_small;
-            if(dst)
-            {
-                for (i = 0; i < utf8Len; i++)
-                {
-                    *dst++ = (char) utf8buf[i];
-                }
-            }
-        }
-        dstlen -= utf8Len;
-    }
-    
-    *dstlenp = (origDstlen - dstlen);
-    return JS_TRUE;
-
-bad_surrogate:
-    *dstlenp = (origDstlen - dstlen);
-    return JS_FALSE;
-
-buffer_too_small:
-    *dstlenp = (origDstlen - dstlen);
-    return JS_FALSE;
-}
-
-char*
-enc_string(JSContext* cx, jsval arg, size_t* buflen)
-{
-    JSString* str = NULL;
-    const jschar* src = NULL;
-    char* bytes = NULL;
-    size_t srclen = 0;
-    size_t byteslen = 0;
-    
-    str = JS_ValueToString(cx, arg);
-    if(!str) goto error;
-
-#ifdef HAVE_JS_GET_STRING_CHARS_AND_LENGTH
-    src = JS_GetStringCharsAndLength(cx, str, &srclen);
-#else
-    src = JS_GetStringChars(str);
-    srclen = JS_GetStringLength(str);
-#endif
-
-    if(!enc_charbuf(src, srclen, NULL, &byteslen)) goto error;
-    
-    bytes = JS_malloc(cx, (byteslen) + 1);
-    bytes[byteslen] = 0;
-    
-    if(!enc_charbuf(src, srclen, bytes, &byteslen)) goto error;
-
-    if(buflen) *buflen = byteslen;
-    goto success;
-
-error:
-    if(bytes != NULL) JS_free(cx, bytes);
-    bytes = NULL;
-
-success:
-    return bytes;
-}
-
-static uint32
-dec_char(const uint8 *utf8Buffer, int utf8Length)
-{
-    uint32 ucs4Char;
-    uint32 minucs4Char;
-
-    /* from Unicode 3.1, non-shortest form is illegal */
-    static const uint32 minucs4Table[] = {
-        0x00000080, 0x00000800, 0x0001000, 0x0020000, 0x0400000
-    };
-
-    if (utf8Length == 1)
-    {
-        ucs4Char = *utf8Buffer;
-    }
-    else
-    {
-        ucs4Char = *utf8Buffer++ & ((1<<(7-utf8Length))-1);
-        minucs4Char = minucs4Table[utf8Length-2];
-        while(--utf8Length)
-        {
-            ucs4Char = ucs4Char<<6 | (*utf8Buffer++ & 0x3F);
-        }
-        if(ucs4Char < minucs4Char || ucs4Char == 0xFFFE || ucs4Char == 0xFFFF)
-        {
-            ucs4Char = 0xFFFD;
-        }
-    }
-
-    return ucs4Char;
-}
-
-static JSBool
-dec_charbuf(const char *src, size_t srclen, jschar *dst, size_t *dstlenp)
-{
-    uint32 v;
-    size_t offset = 0;
-    size_t j;
-    size_t n;
-    size_t dstlen = *dstlenp;
-    size_t origDstlen = dstlen;
-
-    if(!dst) dstlen = origDstlen = (size_t) -1;
-
-    while(srclen)
-    {
-        v = (uint8) *src;
-        n = 1;
-        
-        if(v & 0x80)
-        {
-            while(v & (0x80 >> n))
-            {
-                n++;
-            }
-            
-            if(n > srclen) goto buffer_too_small;
-            if(n == 1 || n > 6) goto bad_character;
-            
-            for(j = 1; j < n; j++)
-            {
-                if((src[j] & 0xC0) != 0x80) goto bad_character;
-            }
-
-            v = dec_char((const uint8 *) src, n);
-            if(v >= 0x10000)
-            {
-                v -= 0x10000;
-                
-                if(v > 0xFFFFF || dstlen < 2)
-                {
-                    *dstlenp = (origDstlen - dstlen);
-                    return JS_FALSE;
-                }
-                
-                if(dstlen < 2) goto buffer_too_small;
-
-                if(dst)
-                {
-                    *dst++ = (jschar)((v >> 10) + 0xD800);
-                    v = (jschar)((v & 0x3FF) + 0xDC00);
-                }
-                dstlen--;
-            }
-        }
-
-        if(!dstlen) goto buffer_too_small;
-        if(dst) *dst++ = (jschar) v;
-
-        dstlen--;
-        offset += n;
-        src += n;
-        srclen -= n;
-    }
-
-    *dstlenp = (origDstlen - dstlen);
-    return JS_TRUE;
-
-bad_character:
-    *dstlenp = (origDstlen - dstlen);
-    return JS_FALSE;
-
-buffer_too_small:
-    *dstlenp = (origDstlen - dstlen);
-    return JS_FALSE;
-}
-
-JSString*
-dec_string(JSContext* cx, const char* bytes, size_t byteslen)
-{
-    JSString* str = NULL;
-    jschar* chars = NULL;
-    size_t charslen;
-    
-    if(!dec_charbuf(bytes, byteslen, NULL, &charslen)) goto error;
-
-    chars = JS_malloc(cx, (charslen + 1) * sizeof(jschar));
-    if(!chars) return NULL;
-    chars[charslen] = 0;
-
-    if(!dec_charbuf(bytes, byteslen, chars, &charslen)) goto error;
-
-    str = JS_NewUCString(cx, chars, charslen - 1);
-    if(!str) goto error;
-
-    goto success;
-
-error:
-    if(chars != NULL) JS_free(cx, chars);
-    str = NULL;
-
-success:
-    return str;
-}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/couch_js/utf8.h
----------------------------------------------------------------------
diff --git a/src/couch/priv/couch_js/utf8.h b/src/couch/priv/couch_js/utf8.h
deleted file mode 100644
index c5cb86c..0000000
--- a/src/couch/priv/couch_js/utf8.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCH_JS_UTF_8_H
-#define COUCH_JS_UTF_8_H
-
-char* enc_string(JSContext* cx, jsval arg, size_t* buflen);
-JSString* dec_string(JSContext* cx, const char* buf, size_t buflen);
-
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/couch_js/util.c
----------------------------------------------------------------------
diff --git a/src/couch/priv/couch_js/util.c b/src/couch/priv/couch_js/util.c
deleted file mode 100644
index 5c88402..0000000
--- a/src/couch/priv/couch_js/util.c
+++ /dev/null
@@ -1,294 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdlib.h>
-#include <string.h>
-
-#include <jsapi.h>
-
-#include "help.h"
-#include "util.h"
-#include "utf8.h"
-
-
-size_t
-slurp_file(const char* file, char** outbuf_p)
-{
-    FILE* fp;
-    char fbuf[16384];
-    char *buf = NULL;
-    char* tmp;
-    size_t nread = 0;
-    size_t buflen = 0;
-
-    if(strcmp(file, "-") == 0) {
-        fp = stdin;
-    } else {
-        fp = fopen(file, "r");
-        if(fp == NULL) {
-            fprintf(stderr, "Failed to read file: %s\n", file);
-            exit(3);
-        }
-    }
-
-    while((nread = fread(fbuf, 1, 16384, fp)) > 0) {
-        if(buf == NULL) {
-            buf = (char*) malloc(nread + 1);
-            if(buf == NULL) {
-                fprintf(stderr, "Out of memory.\n");
-                exit(3);
-            }
-            memcpy(buf, fbuf, nread);
-        } else {
-            tmp = (char*) malloc(buflen + nread + 1);
-            if(tmp == NULL) {
-                fprintf(stderr, "Out of memory.\n");
-                exit(3);
-            }
-            memcpy(tmp, buf, buflen);
-            memcpy(tmp+buflen, fbuf, nread);
-            free(buf);
-            buf = tmp;
-        }
-        buflen += nread;
-        buf[buflen] = '\0';
-    }
-    *outbuf_p = buf;
-    return buflen + 1;
-}
-
-couch_args*
-couch_parse_args(int argc, const char* argv[])
-{
-    couch_args* args;
-    int i = 1;
-
-    args = (couch_args*) malloc(sizeof(couch_args));
-    if(args == NULL)
-        return NULL;
-
-    memset(args, '\0', sizeof(couch_args));
-    args->stack_size = 8L * 1024L;
-
-    while(i < argc) {
-        if(strcmp("-h", argv[i]) == 0) {
-            DISPLAY_USAGE;
-            exit(0);
-        } else if(strcmp("-V", argv[i]) == 0) {
-            DISPLAY_VERSION;
-            exit(0);
-        } else if(strcmp("-H", argv[i]) == 0) {
-            args->use_http = 1;
-        } else if(strcmp("-S", argv[i]) == 0) {
-            args->stack_size = atoi(argv[++i]);
-            if(args->stack_size <= 0) {
-                fprintf(stderr, "Invalid stack size.\n");
-                exit(2);
-            }
-        } else if(strcmp("-u", argv[i]) == 0) {
-            args->uri_file = argv[++i];
-        } else if(strcmp("--", argv[i]) == 0) {
-            i++;
-            break;
-        } else {
-            break;
-        }
-        i++;
-    }
-
-    if(i >= argc) {
-        DISPLAY_USAGE;
-        exit(3);
-    }
-    args->scripts = argv + i;
-
-    return args;
-}
-
-
-int
-couch_fgets(char* buf, int size, FILE* fp)
-{
-    int n, i, c;
-
-    if(size <= 0) return -1;
-    n = size - 1;
-
-    for(i = 0; i < n && (c = getc(fp)) != EOF; i++) {
-        buf[i] = c;
-        if(c == '\n') {
-            i++;
-            break;
-        }
-    }
-
-    buf[i] = '\0';
-    return i;
-}
-
-
-JSString*
-couch_readline(JSContext* cx, FILE* fp)
-{
-    JSString* str;
-    char* bytes = NULL;
-    char* tmp = NULL;
-    size_t used = 0;
-    size_t byteslen = 256;
-    size_t readlen = 0;
-
-    bytes = JS_malloc(cx, byteslen);
-    if(bytes == NULL) return NULL;
-    
-    while((readlen = couch_fgets(bytes+used, byteslen-used, fp)) > 0) {
-        used += readlen;
-        
-        if(bytes[used-1] == '\n') {
-            bytes[used-1] = '\0';
-            break;
-        }
-        
-        // Double our buffer and read more.
-        byteslen *= 2;
-        tmp = JS_realloc(cx, bytes, byteslen);
-        if(!tmp) {
-            JS_free(cx, bytes);
-            return NULL;
-        }
-        
-        bytes = tmp;
-    }
-
-    // Treat empty strings specially
-    if(used == 0) {
-        JS_free(cx, bytes);
-        return JSVAL_TO_STRING(JS_GetEmptyStringValue(cx));
-    }
-
-    // Shring the buffer to the actual data size
-    tmp = JS_realloc(cx, bytes, used);
-    if(!tmp) {
-        JS_free(cx, bytes);
-        return NULL;
-    }
-    bytes = tmp;
-    byteslen = used;
-
-    str = dec_string(cx, bytes, byteslen);
-    JS_free(cx, bytes);
-    return str;
-}
-
-
-JSString*
-couch_readfile(JSContext* cx, const char* filename)
-{
-    JSString *string;
-    size_t byteslen;
-    char *bytes;
-
-    if((byteslen = slurp_file(filename, &bytes))) {
-        string = dec_string(cx, bytes, byteslen);
-
-        free(bytes);
-        return string;
-    }
-    return NULL;    
-}
-
-
-void
-couch_print(JSContext* cx, uintN argc, jsval* argv)
-{
-    char *bytes = NULL;
-    FILE *stream = stdout;
-
-    if (argc) {
-        if (argc > 1 && argv[1] == JSVAL_TRUE) {
-          stream = stderr;
-        }
-        bytes = enc_string(cx, argv[0], NULL);
-        if(!bytes) return;
-        fprintf(stream, "%s", bytes);
-        JS_free(cx, bytes);
-    }
-
-    fputc('\n', stream);
-    fflush(stream);
-}
-
-
-void
-couch_error(JSContext* cx, const char* mesg, JSErrorReport* report)
-{
-    jsval v, replace;
-    char* bytes;
-    JSObject* regexp, *stack;
-    jsval re_args[2];
-
-    if(!report || !JSREPORT_IS_WARNING(report->flags))
-    {
-        fprintf(stderr, "%s\n", mesg);
-
-        // Print a stack trace, if available.
-        if (JSREPORT_IS_EXCEPTION(report->flags) &&
-            JS_GetPendingException(cx, &v))
-        {
-            // Clear the exception before an JS method calls or the result is
-            // infinite, recursive error report generation.
-            JS_ClearPendingException(cx);
-
-            // Use JS regexp to indent the stack trace.
-            // If the regexp can't be created, don't JS_ReportError since it is
-            // probably not productive to wind up here again.
-#ifdef SM185
-            if(JS_GetProperty(cx, JSVAL_TO_OBJECT(v), "stack", &v) &&
-               (regexp = JS_NewRegExpObjectNoStatics(
-                   cx, "^(?=.)", 6, JSREG_GLOB | JSREG_MULTILINE)))
-#else
-            if(JS_GetProperty(cx, JSVAL_TO_OBJECT(v), "stack", &v) &&
-               (regexp = JS_NewRegExpObject(
-                   cx, "^(?=.)", 6, JSREG_GLOB | JSREG_MULTILINE)))
-#endif
-            {
-                // Set up the arguments to ``String.replace()``
-                re_args[0] = OBJECT_TO_JSVAL(regexp);
-                re_args[1] = STRING_TO_JSVAL(JS_InternString(cx, "\t"));
-
-                // Perform the replacement
-                if(JS_ValueToObject(cx, v, &stack) &&
-                   JS_GetProperty(cx, stack, "replace", &replace) &&
-                   JS_CallFunctionValue(cx, stack, replace, 2, re_args, &v))
-                {
-                    // Print the result
-                    bytes = enc_string(cx, v, NULL);
-                    fprintf(stderr, "Stacktrace:\n%s", bytes);
-                    JS_free(cx, bytes);
-                }
-            }
-        }
-    }
-}
-
-
-JSBool
-couch_load_funcs(JSContext* cx, JSObject* obj, JSFunctionSpec* funcs)
-{
-    JSFunctionSpec* f;
-    for(f = funcs; f->name != NULL; f++) {
-        if(!JS_DefineFunction(cx, obj, f->name, f->call, f->nargs, f->flags)) {
-            fprintf(stderr, "Failed to create function: %s\n", f->name);
-            return JS_FALSE;
-        }
-    }
-    return JS_TRUE;
-}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/couch_js/util.h
----------------------------------------------------------------------
diff --git a/src/couch/priv/couch_js/util.h b/src/couch/priv/couch_js/util.h
deleted file mode 100644
index 65a2a06..0000000
--- a/src/couch/priv/couch_js/util.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCHJS_UTIL_H
-#define COUCHJS_UTIL_H
-
-#include <jsapi.h>
-
-typedef struct {
-    int          use_http;
-    int          stack_size;
-    const char** scripts;
-    const char*  uri_file;
-    JSString*    uri;
-} couch_args;
-
-couch_args* couch_parse_args(int argc, const char* argv[]);
-int couch_fgets(char* buf, int size, FILE* fp);
-JSString* couch_readline(JSContext* cx, FILE* fp);
-JSString* couch_readfile(JSContext* cx, const char* filename);
-void couch_print(JSContext* cx, uintN argc, jsval* argv);
-void couch_error(JSContext* cx, const char* mesg, JSErrorReport* report);
-JSBool couch_load_funcs(JSContext* cx, JSObject* obj, JSFunctionSpec* funcs);
-
-
-#endif // Included util.h

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/icu_driver/couch_icu_driver.c
----------------------------------------------------------------------
diff --git a/src/couch/priv/icu_driver/couch_icu_driver.c b/src/couch/priv/icu_driver/couch_icu_driver.c
deleted file mode 100644
index a59e8cb..0000000
--- a/src/couch/priv/icu_driver/couch_icu_driver.c
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
-
-Licensed under the Apache License, Version 2.0 (the "License"); you may not use
-this file except in compliance with the License. You may obtain a copy of the
-License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software distributed
-under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-CONDITIONS OF ANY KIND, either express or implied. See the License for the
-specific language governing permissions and limitations under the License.
-
-*/
-
-/* This file is the C port driver for Erlang. It provides a low overhead
- * means of calling into C code, however coding errors in this module can
- * crash the entire Erlang server.
- */
-
-#ifdef DARWIN
-#define U_HIDE_DRAFT_API 1
-#define U_DISABLE_RENAMING 1
-#endif
-
-#include "erl_driver.h"
-#include "unicode/ucol.h"
-#include "unicode/ucasemap.h"
-#ifndef WIN32
-#include <string.h> /* for memcpy */
-#endif
-
-#if ERL_DRV_EXTENDED_MAJOR_VERSION < 2
-typedef int COUCH_SSIZET;
-#else
-typedef ErlDrvSSizeT COUCH_SSIZET;
-#endif
-
-typedef struct {
-    ErlDrvPort port;
-    UCollator* collNoCase;
-    UCollator* coll;
-} couch_drv_data;
-
-static void couch_drv_stop(ErlDrvData data)
-{
-    couch_drv_data* pData = (couch_drv_data*)data;
-    if (pData->coll) {
-        ucol_close(pData->coll);
-    }
-    if (pData->collNoCase) {
-        ucol_close(pData->collNoCase);
-    }
-    driver_free((void*)pData);
-}
-
-static ErlDrvData couch_drv_start(ErlDrvPort port, char *buff)
-{
-    UErrorCode status = U_ZERO_ERROR;
-    couch_drv_data* pData = (couch_drv_data*)driver_alloc(sizeof(couch_drv_data));
-
-    if (pData == NULL)
-        return ERL_DRV_ERROR_GENERAL;
-
-    pData->port = port;
-
-    pData->coll = ucol_open("", &status);
-    if (U_FAILURE(status)) {
-        couch_drv_stop((ErlDrvData)pData);
-        return ERL_DRV_ERROR_GENERAL;
-    }
-
-    pData->collNoCase = ucol_open("", &status);
-    if (U_FAILURE(status)) {
-        couch_drv_stop((ErlDrvData)pData);
-        return ERL_DRV_ERROR_GENERAL;
-    }
-
-    ucol_setAttribute(pData->collNoCase, UCOL_STRENGTH, UCOL_PRIMARY, &status);
-    if (U_FAILURE(status)) {
-        couch_drv_stop((ErlDrvData)pData);
-        return ERL_DRV_ERROR_GENERAL;
-    }
-
-    return (ErlDrvData)pData;
-}
-
-COUCH_SSIZET
-return_control_result(void* pLocalResult, int localLen,
-            char **ppRetBuf, COUCH_SSIZET returnLen)
-{
-    if (*ppRetBuf == NULL || localLen > returnLen) {
-        *ppRetBuf = (char*)driver_alloc_binary(localLen);
-        if(*ppRetBuf == NULL) {
-            return -1;
-        }
-    }
-    memcpy(*ppRetBuf, pLocalResult, localLen);
-    return localLen;
-}
-
-static COUCH_SSIZET
-couch_drv_control(ErlDrvData drv_data, unsigned int command,
-        char *pBuf, COUCH_SSIZET bufLen,
-        char **rbuf, COUCH_SSIZET rlen)
-{
-
-    couch_drv_data* pData = (couch_drv_data*)drv_data;
-    switch(command) {
-    case 0: /* COLLATE */
-    case 1: /* COLLATE_NO_CASE: */
-        {
-        UErrorCode status = U_ZERO_ERROR;
-        int collResult;
-        char response;
-        UCharIterator iterA;
-        UCharIterator iterB;
-        int32_t length;
-
-        /* 2 strings are in the buffer, consecutively
-         * The strings begin first with a 32 bit integer byte length, then the actual
-         * string bytes follow.
-         */
-
-        /* first 32bits are the length */
-        memcpy(&length, pBuf, sizeof(length));
-        pBuf += sizeof(length);
-
-        /* point the iterator at it. */
-        uiter_setUTF8(&iterA, pBuf, length);
-
-        pBuf += length; /* now on to string b */
-
-        /* first 32bits are the length */
-        memcpy(&length, pBuf, sizeof(length));
-        pBuf += sizeof(length);
-
-        /* point the iterator at it. */
-        uiter_setUTF8(&iterB, pBuf, length);
-
-        if (command == 0) /* COLLATE */
-          collResult = ucol_strcollIter(pData->coll, &iterA, &iterB, &status);
-        else              /* COLLATE_NO_CASE */
-          collResult = ucol_strcollIter(pData->collNoCase, &iterA, &iterB, &status);
-
-        if (collResult < 0)
-          response = 0; /*lt*/
-        else if (collResult > 0)
-          response = 2; /*gt*/
-        else
-          response = 1; /*eq*/
-
-        return return_control_result(&response, sizeof(response), rbuf, rlen);
-        }
-
-    default:
-        return -1;
-    }
-}
-
-ErlDrvEntry couch_driver_entry = {
-        NULL,               /* F_PTR init, N/A */
-        couch_drv_start,    /* L_PTR start, called when port is opened */
-        couch_drv_stop,     /* F_PTR stop, called when port is closed */
-        NULL,               /* F_PTR output, called when erlang has sent */
-        NULL,               /* F_PTR ready_input, called when input descriptor ready */
-        NULL,               /* F_PTR ready_output, called when output descriptor ready */
-        "couch_icu_driver", /* char *driver_name, the argument to open_port */
-        NULL,               /* F_PTR finish, called when unloaded */
-        NULL,               /* Not used */
-        couch_drv_control,  /* F_PTR control, port_command callback */
-        NULL,               /* F_PTR timeout, reserved */
-        NULL,               /* F_PTR outputv, reserved */
-        NULL,               /* F_PTR ready_async */
-        NULL,               /* F_PTR flush */
-        NULL,               /* F_PTR call */
-        NULL,               /* F_PTR event */
-        ERL_DRV_EXTENDED_MARKER,
-        ERL_DRV_EXTENDED_MAJOR_VERSION,
-        ERL_DRV_EXTENDED_MINOR_VERSION,
-        ERL_DRV_FLAG_USE_PORT_LOCKING,
-        NULL,               /* Reserved -- Used by emulator internally */
-        NULL,               /* F_PTR process_exit */
-};
-
-DRIVER_INIT(couch_icu_driver) /* must match name in driver_entry */
-{
-        return &couch_driver_entry;
-}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/priv/spawnkillable/couchspawnkillable.sh
----------------------------------------------------------------------
diff --git a/src/couch/priv/spawnkillable/couchspawnkillable.sh b/src/couch/priv/spawnkillable/couchspawnkillable.sh
deleted file mode 100644
index f8d042e..0000000
--- a/src/couch/priv/spawnkillable/couchspawnkillable.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#! /bin/sh -e
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# The purpose of this script is to echo an OS specific command before launching
-# the actual process. This provides a way for Erlang to hard-kill its external
-# processes.
-
-echo "kill -9 $$"
-exec $*


[40/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/rexi


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/834aeb0d
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/834aeb0d
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/834aeb0d

Branch: refs/heads/1843-feature-bigcouch
Commit: 834aeb0d785f7f700670222f71ba7d71c5c3ecf5
Parents: 21118e2
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:42:38 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:42:38 2014 -0600

----------------------------------------------------------------------
 src/rexi/README.md                |  23 ----
 src/rexi/include/rexi.hrl         |  20 ----
 src/rexi/src/rexi.app.src         |  36 -------
 src/rexi/src/rexi.erl             | 188 ---------------------------------
 src/rexi/src/rexi_app.erl         |  23 ----
 src/rexi/src/rexi_gov_manager.erl | 156 ---------------------------
 src/rexi/src/rexi_governor.erl    |  70 ------------
 src/rexi/src/rexi_monitor.erl     |  64 -----------
 src/rexi/src/rexi_server.erl      | 188 ---------------------------------
 src/rexi/src/rexi_sup.erl         |  28 -----
 src/rexi/src/rexi_utils.erl       |  68 ------------
 11 files changed, 864 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/834aeb0d/src/rexi/README.md
----------------------------------------------------------------------
diff --git a/src/rexi/README.md b/src/rexi/README.md
deleted file mode 100644
index b2eeaea..0000000
--- a/src/rexi/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-Rexi is a tailor-made RPC server application for sending [CouchDB][1] operations to nodes in a cluster.  It is used in [BigCouch][2] as the remote procedure vehicle to get [fabric][6] functions to execute on remote cluster nodes.
-
-Rexi better fits the needs of the BigCouch distributed data store by dropping some unneeded overhead in rex, the RPC server that ships with Erlang/OTP.  Rexi is optimized for the case when you need to spawn a bunch of remote processes.  Cast messages are sent from the origin to the remote rexi server, and local processes are spawned from there, which is vastly more efficient than spawning remote processes from the origin.  You still get monitoring of the remote processes, but the request-handling process doesn't get stuck trying to connect to an overloaded/dead node.  'rexi_DOWN' messages will arrive at the client eventually.  This has been an extremely advantageous mix of latency and failure detection, vastly improving the performance of BigCouch.
-
-Rexi is used in conjunction with 'Fabric' which is also an application within BigCouch, but can be used on a stand-alone basis.
-
-### Getting Started
-Rexi requires R13B03 or higher and can be built with [rebar][7], which comes bundled in the repository.
-
-### License
-[Apache 2.0][3]
-
-### Contact
- * [http://cloudant.com][4]
- * [info@cloudant.com][5]
-
-[1]: http://couchdb.apache.org
-[2]: http://github.com/cloudant/BigCouch
-[3]: http://www.apache.org/licenses/LICENSE-2.0.html
-[4]: http://cloudant.com
-[5]: mailto:info@cloudant.com
-[6]: http://github.com/cloudant/fabric
-[7]: http://github.com/basho/rebar

http://git-wip-us.apache.org/repos/asf/couchdb/blob/834aeb0d/src/rexi/include/rexi.hrl
----------------------------------------------------------------------
diff --git a/src/rexi/include/rexi.hrl b/src/rexi/include/rexi.hrl
deleted file mode 100644
index a2d86b2..0000000
--- a/src/rexi/include/rexi.hrl
+++ /dev/null
@@ -1,20 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--record(error, {
-    timestamp,
-    reason,
-    mfa,
-    nonce,
-    stack
-}).
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/834aeb0d/src/rexi/src/rexi.app.src
----------------------------------------------------------------------
diff --git a/src/rexi/src/rexi.app.src b/src/rexi/src/rexi.app.src
deleted file mode 100644
index 0691edd..0000000
--- a/src/rexi/src/rexi.app.src
+++ /dev/null
@@ -1,36 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-{application, rexi, [
-    {description, "Lightweight RPC server"},
-    {vsn, git},
-    {modules, [
-        rexi,
-        rexi_app,
-        rexi_gov_manager,
-        rexi_governor,
-        rexi_monitor,
-        rexi_server,
-        rexi_sup,
-        rexi_utils
-    ]},
-    {registered, [
-        rexi_sup,
-        rexi_server
-    ]},
-    {applications, [
-        kernel,
-        stdlib,
-        config
-    ]},
-    {mod, {rexi_app,[]}}
-]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/834aeb0d/src/rexi/src/rexi.erl
----------------------------------------------------------------------
diff --git a/src/rexi/src/rexi.erl b/src/rexi/src/rexi.erl
deleted file mode 100644
index 8e53dba..0000000
--- a/src/rexi/src/rexi.erl
+++ /dev/null
@@ -1,188 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi).
--export([start/0, stop/0, restart/0]).
--export([cast/2, cast/3, cast/4, kill/2]).
--export([reply/1, sync_reply/1, sync_reply/2]).
--export([async_server_call/2, async_server_call/3]).
--export([get_errors/0, get_last_error/0, set_error_limit/1]).
--export([stream/1, stream/2, stream/3, stream_ack/1, stream_ack/2]).
-
--include_lib("rexi/include/rexi.hrl").
-
--define(SERVER, rexi_server).
-
-start() ->
-    application:start(rexi).
-
-stop() ->
-    application:stop(rexi).
-
-restart() ->
-    stop(), start().
-
--spec get_errors() -> {ok, [#error{}]}.
-get_errors() ->
-    gen_server:call(?SERVER, get_errors).
-
--spec get_last_error() -> {ok, #error{}} | {error, empty}.
-get_last_error() ->
-    gen_server:call(?SERVER, get_last_error).
-
--spec set_error_limit(pos_integer()) -> ok.
-set_error_limit(N) when is_integer(N), N > 0 ->
-    gen_server:call(?SERVER, {set_error_limit, N}).
-
-%% @equiv cast(Node, self(), MFA)
--spec cast(node(), {atom(), atom(), list()}) -> reference().
-cast(Node, MFA) ->
-    cast(Node, self(), MFA).
-
-%% @doc Executes apply(M, F, A) on Node.
-%% You might want to use this instead of rpc:cast/4 for two reasons.  First,
-%% the Caller pid and the returned reference are inserted into the remote
-%% process' dictionary as `rexi_from', so it has a way to communicate with you.
-%% Second, the remote process is monitored. If it exits with a Reason other
-%% than normal, Caller will receive a message of the form
-%% `{Ref, {rexi_EXIT, Reason}}' where Ref is the returned reference.
--spec cast(node(), pid(), {atom(), atom(), list()}) -> reference().
-cast(Node, Caller, MFA) ->
-    Ref = make_ref(),
-    Msg = cast_msg({doit, {Caller, Ref}, get(nonce), MFA}),
-    rexi_utils:send({?SERVER, Node}, Msg),
-    Ref.
-
-%% @doc Executes apply(M, F, A) on Node.
-%% This version accepts a sync option which uses the erlang:send/2 call
-%% directly in process instead of deferring to a spawned process if
-%% erlang:send/2 were to block. If the sync option is omitted this call
-%% is identical to cast/3.
--spec cast(node(), pid(), {atom(), atom(), list()}, [atom()]) -> reference().
-cast(Node, Caller, MFA, Options) ->
-    case lists:member(sync, Options) of
-        true ->
-            Ref = make_ref(),
-            Msg = cast_msg({doit, {Caller, Ref}, get(nonce), MFA}),
-            erlang:send({?SERVER, Node}, Msg),
-            Ref;
-        false ->
-            cast(Node, Caller, MFA)
-    end.
-
-%% @doc Sends an async kill signal to the remote process associated with Ref.
-%% No rexi_EXIT message will be sent.
--spec kill(node(), reference()) -> ok.
-kill(Node, Ref) ->
-    rexi_utils:send({?SERVER, Node}, cast_msg({kill, Ref})),
-    ok.
-
-%% @equiv async_server_call(Server, self(), Request)
--spec async_server_call(pid() | {atom(),node()}, any()) -> reference().
-async_server_call(Server, Request) ->
-    async_server_call(Server, self(), Request).
-
-%% @doc Sends a properly formatted gen_server:call Request to the Server and
-%% returns the reference which the Server will include in its reply.  The
-%% function acts more like cast() than call() in that the server process
-%% is not monitored.  Clients who want to know if the server is alive should
-%% monitor it themselves before calling this function.
--spec async_server_call(pid() | {atom(),node()}, pid(), any()) -> reference().
-async_server_call(Server, Caller, Request) ->
-    Ref = make_ref(),
-    rexi_utils:send(Server, {'$gen_call', {Caller,Ref}, Request}),
-    Ref.
-
-%% @doc convenience function to reply to the original rexi Caller.
--spec reply(any()) -> any().
-reply(Reply) ->
-    {Caller, Ref} = get(rexi_from),
-    erlang:send(Caller, {Ref,Reply}).
-
-%% @equiv sync_reply(Reply, 300000)
-sync_reply(Reply) ->
-    sync_reply(Reply, 300000).
-
-%% @doc convenience function to reply to caller and wait for response.  Message
-%% is of the form {OriginalRef, {self(),reference()}, Reply}, which enables the
-%% original caller to respond back.
--spec sync_reply(any(), pos_integer() | infinity) -> any().
-sync_reply(Reply, Timeout) ->
-    {Caller, Ref} = get(rexi_from),
-    Tag = make_ref(),
-    erlang:send(Caller, {Ref, {self(),Tag}, Reply}),
-    receive {Tag, Response} ->
-        Response
-    after Timeout ->
-        timeout
-    end.
-
-%% @equiv stream(Msg, 100, 300000)
-stream(Msg) ->
-    stream(Msg, 10, 300000).
-
-%% @equiv stream(Msg, Limit, 300000)
-stream(Msg, Limit) ->
-    stream(Msg, Limit, 300000).
-
-%% @doc convenience function to stream messages to caller while blocking when
-%% a specific number of messages are outstanding. Message is of the form
-%% {OriginalRef, self(), Reply}, which enables the original caller to ack.
--spec stream(any(), integer(), pos_integer() | infinity) -> any().
-stream(Msg, Limit, Timeout) ->
-    try maybe_wait(Limit, Timeout) of
-        {ok, Count} ->
-            put(rexi_unacked, Count+1),
-            {Caller, Ref} = get(rexi_from),
-            erlang:send(Caller, {Ref, self(), Msg}),
-            ok
-    catch throw:timeout ->
-        timeout
-    end.
-
-%% @equiv stream_ack(Client, 1)
-stream_ack(Client) ->
-    erlang:send(Client, {rexi_ack, 1}).
-
-%% @doc Ack streamed messages
-stream_ack(Client, N) ->
-    erlang:send(Client, {rexi_ack, N}).
-
-%% internal functions %%
-
-cast_msg(Msg) -> {'$gen_cast', Msg}.
-
-maybe_wait(Limit, Timeout) ->
-    case get(rexi_unacked) of
-        undefined ->
-            {ok, 0};
-        Count when Count >= Limit ->
-            wait_for_ack(Count, Timeout);
-        Count ->
-            drain_acks(Count)
-    end.
-
-wait_for_ack(Count, Timeout) ->
-    receive
-        {rexi_ack, N} -> drain_acks(Count-N)
-    after Timeout ->
-        throw(timeout)
-    end.
-
-drain_acks(Count) when Count < 0 ->
-    erlang:error(mismatched_rexi_ack);
-drain_acks(Count) ->
-    receive
-        {rexi_ack, N} -> drain_acks(Count-N)
-    after 0 ->
-        {ok, Count}
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/834aeb0d/src/rexi/src/rexi_app.erl
----------------------------------------------------------------------
diff --git a/src/rexi/src/rexi_app.erl b/src/rexi/src/rexi_app.erl
deleted file mode 100644
index c791d83..0000000
--- a/src/rexi/src/rexi_app.erl
+++ /dev/null
@@ -1,23 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_app).
--behaviour(application).
--export([start/2, stop/1]).
-
--include_lib("eunit/include/eunit.hrl").
-
-start(_Type, StartArgs) ->
-    rexi_sup:start_link(StartArgs).
-
-stop(_State) ->
-    ok.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/834aeb0d/src/rexi/src/rexi_gov_manager.erl
----------------------------------------------------------------------
diff --git a/src/rexi/src/rexi_gov_manager.erl b/src/rexi/src/rexi_gov_manager.erl
deleted file mode 100644
index 4fb69e3..0000000
--- a/src/rexi/src/rexi_gov_manager.erl
+++ /dev/null
@@ -1,156 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(rexi_gov_manager).
-
--behaviour(gen_server).
--behaviour(config_listener).
-
-% API
--export([start_link/0, send/2]).
-
-% gen_server callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
-         terminate/2, code_change/3]).
--export([handle_config_change/5]).
-
--record(state, {node_timers = ets:new(timers, [set]),
-                nodeout_timeout = 2000,
-                pid_spawn_max = 10000}).
-
-
-% API
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-send(Dest, Msg) ->
-    case erlang:send(Dest, Msg, [noconnect, nosuspend]) of
-    ok -> ok;
-    _ ->
-        % treat nosuspend and noconnect the same
-        {ok, Governor} = get_governor(get_node(Dest)),
-        gen_server:cast(Governor, {spawn_and_track, Dest, Msg})
-    end.
-
-get_node({_, Node}) when is_atom(Node) ->
-    Node;
-get_node(Pid) when is_pid(Pid) ->
-    node(Pid).
-
-get_governor(Node) ->
-    case ets:lookup(govs, Node) of
-    [{Node, Gov}] ->
-        {ok, Gov};
-    [] ->
-        gen_server:call(?MODULE, {get_governor, Node})
-    end.
-
-% gen_server callbacks
-
-init([]) ->
-    ets:new(govs, [named_table, set, {read_concurrency, true}]),
-    net_kernel:monitor_nodes(true),
-    NodeOutTimeout = config:get("rexi","nodeout_timeout","500"),
-    PidSpawnMax = config:get("rexi","pid_spawn_max", "10000"),
-    State = #state{
-        nodeout_timeout = list_to_integer(NodeOutTimeout),
-        pid_spawn_max = list_to_integer(PidSpawnMax)
-    },
-    config:listen_for_changes(?MODULE, State),
-    {ok, State}.
-
-handle_config_change("rexi", "nodeout_timeout", Value, _, State) ->
-    IntValue = list_to_integer(Value),
-    %% Setting the timeout is cheap, no need to check if it actually changed
-    gen_server:call(?MODULE, {set_timeout, IntValue}),
-    {ok, State#state{nodeout_timeout = IntValue}};
-handle_config_change("rexi", "pid_spawn_max", Value, _, State) ->
-    IntValue = list_to_integer(Value),
-    %% Setting the timeout is cheap, no need to check if it actually changed
-    gen_server:call(?MODULE, {set_spawn_max, IntValue}),
-    {ok, State#state{pid_spawn_max = IntValue}};
-handle_config_change(_, _, _, _, State) ->
-    {ok, State}.
-
-handle_call({set_timeout, TO}, _, #state{nodeout_timeout = Old} = State) ->
-    {reply, Old, State#state{nodeout_timeout = TO}};
-handle_call({set_spawn_max, Max}, _, #state{pid_spawn_max = Old} = State) ->
-    {reply, Old, State#state{pid_spawn_max = Max}};
-handle_call({get_governor, Node}, _From,
-            #state{pid_spawn_max = PidSpawnMax} = State) ->
-    case ets:lookup(govs, Node) of
-    [] ->
-        {ok, Gov} = gen_server:start_link(rexi_governor, [PidSpawnMax], []),
-        ets:insert(govs, {Node, Gov});
-    [{Node, Gov}] ->
-        Gov
-    end,
-    {reply, {ok, Gov}, State}.
-
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-handle_info({nodeup, Node}, #state{node_timers = Timers,
-                                   pid_spawn_max = PidSpawnMax} = State) ->
-    case ets:lookup(Timers, Node) of
-    [{Node, TRef}] ->
-        erlang:cancel_timer(TRef),
-        ets:delete(Timers, Node);
-    _ ->
-        ok
-    end,
-    case ets:lookup(govs, Node) of
-    [{Node, _}] ->
-        ok;
-    [] ->
-        {ok, Gov} = gen_server:start_link(rexi_governor, [PidSpawnMax], []),
-        ets:insert(govs, {Node, Gov})
-    end,
-    {noreply, State};
-
-handle_info({nodedown, Node}, #state{node_timers = Timers,
-                                     nodeout_timeout = NodeTimeout} = State) ->
-    case ets:lookup(Timers, Node) of
-    [] ->
-        TRef = erlang:send_after(NodeTimeout, self(), {nodeout, Node}),
-        ets:insert(Timers, {Node, TRef}),
-        {noreply, State};
-    _ ->
-        {noreply, State}
-    end;
-
-handle_info({nodeout, Node}, #state{node_timers = Timers} = State) ->
-    % check for race with node up
-    case ets:member(Timers, Node) of
-    true ->
-        ets:delete(Timers, Node),
-        case ets:lookup(govs, Node) of
-        [] ->
-            ok;
-        [{Node, Governor}] ->
-            gen_server:cast(Governor, nodeout)
-        end;
-    false ->
-        ok
-    end,
-    {noreply, State};
-
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-% Internal functions

http://git-wip-us.apache.org/repos/asf/couchdb/blob/834aeb0d/src/rexi/src/rexi_governor.erl
----------------------------------------------------------------------
diff --git a/src/rexi/src/rexi_governor.erl b/src/rexi/src/rexi_governor.erl
deleted file mode 100644
index ad62150..0000000
--- a/src/rexi/src/rexi_governor.erl
+++ /dev/null
@@ -1,70 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(rexi_governor).
-
--behaviour(gen_server).
-
-%  gen_server callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
-         terminate/2, code_change/3]).
-
--record(state, {pids = ets:new(pids, [set]),
-                spawn_max = 10000,
-                spawn_cnt = 0,
-                drop_cnt = 0}).
-
-init([PidSpawnMax]) ->
-    {ok, #state{spawn_max = PidSpawnMax}}.
-
-handle_call(_Request, _From, State) ->
-    Reply = ok,
-    {reply, Reply, State}.
-
-handle_cast({spawn_and_track, Dest, Msg},
-            #state{pids = Pids,
-                   spawn_max = SpawnMax,
-                   spawn_cnt = SC,
-                   drop_cnt = DC} = State) ->
-    {NewSC, NewDC} =
-    case ets:info(Pids, size) < SpawnMax of
-    true ->
-        {Pid, Ref} = spawn_monitor(erlang, send, [Dest, Msg]),
-        ets:insert(Pids, {Pid, Ref}),
-        {SC + 1, DC};
-    false ->
-        % drop message on floor
-        {SC, DC + 1}
-    end,
-    {noreply, State#state{spawn_cnt = NewSC, drop_cnt = NewDC}};
-
-handle_cast(nodeout, #state{pids = Pids} = State) ->
-    % kill all the pids
-    ets:foldl(fun({P, _Ref}, Acc) ->
-                  exit(P, kill),
-                  Acc
-              end, [], Pids),
-    ets:delete_all_objects(Pids),
-    {noreply, State}.
-
-handle_info({'DOWN', _, process, Pid, normal},
-            #state{pids = Pids} = State) ->
-    ets:delete(Pids, Pid),
-    {noreply, State};
-
-handle_info({'DOWN', _, process, _Pid, killed}, State) ->
-    {noreply, State}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/834aeb0d/src/rexi/src/rexi_monitor.erl
----------------------------------------------------------------------
diff --git a/src/rexi/src/rexi_monitor.erl b/src/rexi/src/rexi_monitor.erl
deleted file mode 100644
index 7be3f0a..0000000
--- a/src/rexi/src/rexi_monitor.erl
+++ /dev/null
@@ -1,64 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_monitor).
--export([start/1, stop/1]).
-
--include_lib("eunit/include/eunit.hrl").
-
-%% @doc spawn_links a process which monitors the supplied list of items and
-%% returns the process ID.  If a monitored process exits, the caller will
-%% receive a {rexi_DOWN, MonitoringPid, DeadPid, Reason} message.
--spec start([pid() | atom() | {atom(),node()}]) -> pid().
-start(Procs) ->
-    Parent = self(),
-    Nodes = [node() | nodes()],
-    {Mon, Skip} = lists:partition(fun(P) -> should_monitor(P, Nodes) end,
-        Procs),
-    spawn_link(fun() ->
-        [notify_parent(Parent, P, noconnect) || P <- Skip],
-        [erlang:monitor(process, P) || P <- Mon],
-        wait_monitors(Parent)
-    end).
-
-%% @doc Cleanly shut down the monitoring process and flush all rexi_DOWN
-%% messages from our mailbox.
--spec stop(pid()) -> ok.
-stop(MonitoringPid) ->
-    MonitoringPid ! {self(), shutdown},
-    flush_down_messages().
-
-%% internal functions %%
-
-notify_parent(Parent, Pid, Reason) ->
-    erlang:send(Parent, {rexi_DOWN, self(), Pid, Reason}).
-
-should_monitor(Pid, Nodes) when is_pid(Pid) ->
-    lists:member(node(Pid), Nodes);
-should_monitor({_, Node}, Nodes) ->
-    lists:member(Node, Nodes).
-
-wait_monitors(Parent) ->
-    receive
-    {'DOWN', _, process, Pid, Reason} ->
-        notify_parent(Parent, Pid, Reason),
-        wait_monitors(Parent);
-    {Parent, shutdown} ->
-        ok
-    end.
-
-flush_down_messages() ->
-    receive {rexi_DOWN, _, _, _} ->
-        flush_down_messages()
-    after 0 ->
-        ok
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/834aeb0d/src/rexi/src/rexi_server.erl
----------------------------------------------------------------------
diff --git a/src/rexi/src/rexi_server.erl b/src/rexi/src/rexi_server.erl
deleted file mode 100644
index c3fc508..0000000
--- a/src/rexi/src/rexi_server.erl
+++ /dev/null
@@ -1,188 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_server).
--behaviour(gen_server).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
-    code_change/3]).
-
--export([start_link/0, init_p/2, init_p/3]).
-
--include_lib("rexi/include/rexi.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--record(job, {
-    client::reference(),
-    worker::reference(),
-    client_pid::pid(),
-    worker_pid::pid()
-}).
-
--record(st, {
-    workers = ets:new(workers, [private, {keypos, #job.worker}]),
-    clients = ets:new(clients, [private, {keypos, #job.client}]),
-    errors = queue:new(),
-    error_limit = 20,
-    error_count = 0
-}).
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-init([]) ->
-    {ok, #st{}}.
-
-handle_call(get_errors, _From, #st{errors = Errors} = St) ->
-    {reply, {ok, lists:reverse(queue:to_list(Errors))}, St};
-
-handle_call(get_last_error, _From, #st{errors = Errors} = St) ->
-    try
-        {reply, {ok, queue:get_r(Errors)}, St}
-    catch error:empty ->
-        {reply, {error, empty}, St}
-    end;
-
-handle_call({set_error_limit, N}, _From, #st{error_count=Len, errors=Q} = St) ->
-    if N < Len ->
-        {NewQ, _} = queue:split(N, Q);
-    true ->
-        NewQ = Q
-    end,
-    NewLen = queue:len(NewQ),
-    {reply, ok, St#st{error_limit=N, error_count=NewLen, errors=NewQ}};
-
-handle_call(_Request, _From, St) ->
-    {reply, ignored, St}.
-
-
-handle_cast({doit, From, MFA}, St) ->
-    handle_cast({doit, From, undefined, MFA}, St);
-
-handle_cast({doit, {ClientPid, ClientRef} = From, Nonce, MFA}, State) ->
-    {LocalPid, Ref} = spawn_monitor(?MODULE, init_p, [From, MFA, Nonce]),
-    Job = #job{
-        client = ClientRef,
-        worker = Ref,
-        client_pid = ClientPid,
-        worker_pid = LocalPid
-    },
-    {noreply, add_job(Job, State)};
-
-
-handle_cast({kill, FromRef}, #st{clients = Clients} = St) ->
-    case find_worker(FromRef, Clients) of
-    #job{worker = KeyRef, worker_pid = Pid} = Job ->
-        erlang:demonitor(KeyRef),
-        exit(Pid, kill),
-        {noreply, remove_job(Job, St)};
-    false ->
-        {noreply, St}
-    end;
-
-handle_cast(_, St) ->
-    twig:log(notice, "rexi_server ignored_cast"),
-    {noreply, St}.
-
-handle_info({'DOWN', Ref, process, _, normal}, #st{workers=Workers} = St) ->
-    case find_worker(Ref, Workers) of
-    #job{} = Job ->
-        {noreply, remove_job(Job, St)};
-    false ->
-        {noreply, St}
-    end;
-
-handle_info({'DOWN', Ref, process, Pid, Error}, #st{workers=Workers} = St) ->
-    case find_worker(Ref, Workers) of
-    #job{worker_pid=Pid, worker=Ref, client_pid=CPid, client=CRef} =Job ->
-        case Error of #error{reason = {_Class, Reason}, stack = Stack} ->
-            notify_caller({CPid, CRef}, {Reason, Stack}),
-            St1 = save_error(Error, St),
-            {noreply, remove_job(Job, St1)};
-        _ ->
-            notify_caller({CPid, CRef}, Error),
-            {noreply, remove_job(Job, St)}
-        end;
-    false ->
-        {noreply, St}
-    end;
-
-handle_info(_Info, St) ->
-    {noreply, St}.
-
-terminate(_Reason, St) ->
-    ets:foldl(fun(#job{worker_pid=Pid},_) -> exit(Pid,kill) end, nil,
-        St#st.workers),
-    ok.
-
-code_change(_OldVsn, {st, Workers}, _Extra) ->
-    {ok, #st{workers = Workers}};
-
-code_change(_OldVsn, {st, Workers0, Errors, Limit, Count}, _Extra) ->
-    Jobs = [#job{worker_pid=A, worker=B, client_pid=C, client=D}
-        || {A, B, {C, D}} <- ets:tab2list(Workers0)],
-    ets:delete(Workers0),
-    State = #st{errors = Errors, error_limit = Limit, error_count = Count},
-    ets:insert(State#st.workers, Jobs),
-    ets:insert(State#st.clients, Jobs),
-    {ok, State};
-
-code_change(_OldVsn, St, _Extra) ->
-    {ok, St}.
-
-init_p(From, MFA) ->
-    init_p(From, MFA, undefined).
-
-%% @doc initializes a process started by rexi_server.
--spec init_p({pid(), reference()}, {atom(), atom(), list()},
-    string() | undefined) -> any().
-init_p(From, {M,F,A}, Nonce) ->
-    put(rexi_from, From),
-    put(initial_call, {M,F,length(A)}),
-    put(nonce, Nonce),
-    try apply(M, F, A) catch exit:normal -> ok; Class:Reason ->
-        Stack = clean_stack(),
-        twig:log(error, "rexi_server ~p:~p ~100p", [Class, Reason, Stack]),
-        exit(#error{
-            timestamp = now(),
-            reason = {Class, Reason},
-            mfa = {M,F,A},
-            nonce = Nonce,
-            stack = Stack
-        })
-    end.
-
-%% internal
-
-save_error(E, #st{errors=Q, error_limit=L, error_count=C} = St) when C >= L ->
-    St#st{errors = queue:in(E, queue:drop(Q))};
-save_error(E, #st{errors=Q, error_count=C} = St) ->
-    St#st{errors = queue:in(E, Q), error_count = C+1}.
-
-clean_stack() ->
-    lists:map(fun({M,F,A}) when is_list(A) -> {M,F,length(A)}; (X) -> X end,
-        erlang:get_stacktrace()).
-
-add_job(Job, #st{workers = Workers, clients = Clients} = State) ->
-    ets:insert(Workers, Job),
-    ets:insert(Clients, Job),
-    State.
-
-remove_job(Job, #st{workers = Workers, clients = Clients} = State) ->
-    ets:delete_object(Workers, Job),
-    ets:delete_object(Clients, Job),
-    State.
-
-find_worker(Ref, Tab) ->
-    case ets:lookup(Tab, Ref) of [] -> false; [Worker] -> Worker end.
-
-notify_caller({Caller, Ref}, Reason) ->
-    rexi_utils:send(Caller, {Ref, {rexi_EXIT, Reason}}).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/834aeb0d/src/rexi/src/rexi_sup.erl
----------------------------------------------------------------------
diff --git a/src/rexi/src/rexi_sup.erl b/src/rexi/src/rexi_sup.erl
deleted file mode 100644
index a8aa800..0000000
--- a/src/rexi/src/rexi_sup.erl
+++ /dev/null
@@ -1,28 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_sup).
--behaviour(supervisor).
--export([init/1]).
-
--export([start_link/1]).
-
--include_lib("eunit/include/eunit.hrl").
-
-%% Helper macro for declaring children of supervisor
--define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 100, Type, [I]}).
-
-start_link(Args) ->
-    supervisor:start_link({local,?MODULE}, ?MODULE, Args).
-
-init([]) ->
-    {ok, {{one_for_one, 3, 10}, [?CHILD(rexi_gov_manager, worker), ?CHILD(rexi_server, worker)]}}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/834aeb0d/src/rexi/src/rexi_utils.erl
----------------------------------------------------------------------
diff --git a/src/rexi/src/rexi_utils.erl b/src/rexi/src/rexi_utils.erl
deleted file mode 100644
index 1b11576..0000000
--- a/src/rexi/src/rexi_utils.erl
+++ /dev/null
@@ -1,68 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_utils).
-
--export([send/2, recv/6]).
-
-%% @doc send a message as quickly as possible
-send(Dest, Msg) ->
-    rexi_gov_manager:send(Dest, Msg).
-
-%% @doc set up the receive loop with an overall timeout
--spec recv([any()], integer(), function(), any(), timeout(), timeout()) ->
-    {ok, any()} | {timeout, any()} | {error, atom()} | {error, atom(), any()}.
-recv(Refs, Keypos, Fun, Acc0, infinity, PerMsgTO) ->
-    process_mailbox(Refs, Keypos, Fun, Acc0, nil, PerMsgTO);
-recv(Refs, Keypos, Fun, Acc0, GlobalTimeout, PerMsgTO) ->
-    TimeoutRef = erlang:make_ref(),
-    TRef = erlang:send_after(GlobalTimeout, self(), {timeout, TimeoutRef}),
-    try
-        process_mailbox(Refs, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO)
-    after
-        erlang:cancel_timer(TRef)
-    end.
-
-process_mailbox(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) ->
-    case process_message(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) of
-    {ok, Acc} ->
-        process_mailbox(RefList, Keypos, Fun, Acc, TimeoutRef, PerMsgTO);
-    {stop, Acc} ->
-        {ok, Acc};
-    Error ->
-        Error
-    end.
-
-process_message(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) ->
-    receive
-    {timeout, TimeoutRef} ->
-        {timeout, Acc0};
-    {Ref, Msg} ->
-        case lists:keyfind(Ref, Keypos, RefList) of
-        false ->
-            % this was some non-matching message which we will ignore
-            {ok, Acc0};
-        Worker ->
-            Fun(Msg, Worker, Acc0)
-        end;
-    {Ref, From, Msg} ->
-        case lists:keyfind(Ref, Keypos, RefList) of
-        false ->
-            {ok, Acc0};
-        Worker ->
-            Fun(Msg, {Worker, From}, Acc0)
-        end;
-    {rexi_DOWN, _, _, _} = Msg ->
-        Fun(Msg, nil, Acc0)
-    after PerMsgTO ->
-        {timeout, Acc0}
-    end.


[34/49] Remove src/mochiweb

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_socket_server.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_socket_server.erl b/src/mochiweb/src/mochiweb_socket_server.erl
deleted file mode 100644
index ff0d8f3..0000000
--- a/src/mochiweb/src/mochiweb_socket_server.erl
+++ /dev/null
@@ -1,364 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc MochiWeb socket server.
-
--module(mochiweb_socket_server).
--author('bob@mochimedia.com').
--behaviour(gen_server).
-
--include("internal.hrl").
-
--export([start/1, stop/1]).
--export([init/1, handle_call/3, handle_cast/2, terminate/2, code_change/3,
-         handle_info/2]).
--export([get/2, set/3]).
-
--record(mochiweb_socket_server,
-        {port,
-         loop,
-         name=undefined,
-         %% NOTE: This is currently ignored.
-         max=2048,
-         ip=any,
-         listen=null,
-         nodelay=false,
-         backlog=128,
-         active_sockets=0,
-         acceptor_pool_size=16,
-         ssl=false,
-         ssl_opts=[{ssl_imp, new}],
-         acceptor_pool=sets:new(),
-         profile_fun=undefined}).
-
--define(is_old_state(State), not is_record(State, mochiweb_socket_server)).
-
-start(State=#mochiweb_socket_server{}) ->
-    start_server(State);
-start(Options) ->
-    start(parse_options(Options)).
-
-get(Name, Property) ->
-    gen_server:call(Name, {get, Property}).
-
-set(Name, profile_fun, Fun) ->
-    gen_server:cast(Name, {set, profile_fun, Fun});
-set(Name, Property, _Value) ->
-    error_logger:info_msg("?MODULE:set for ~p with ~p not implemented~n",
-                          [Name, Property]).
-
-stop(Name) when is_atom(Name) ->
-    gen_server:cast(Name, stop);
-stop(Pid) when is_pid(Pid) ->
-    gen_server:cast(Pid, stop);
-stop({local, Name}) ->
-    stop(Name);
-stop({global, Name}) ->
-    stop(Name);
-stop(Options) ->
-    State = parse_options(Options),
-    stop(State#mochiweb_socket_server.name).
-
-%% Internal API
-
-parse_options(Options) ->
-    parse_options(Options, #mochiweb_socket_server{}).
-
-parse_options([], State) ->
-    State;
-parse_options([{name, L} | Rest], State) when is_list(L) ->
-    Name = {local, list_to_atom(L)},
-    parse_options(Rest, State#mochiweb_socket_server{name=Name});
-parse_options([{name, A} | Rest], State) when A =:= undefined ->
-    parse_options(Rest, State#mochiweb_socket_server{name=A});
-parse_options([{name, A} | Rest], State) when is_atom(A) ->
-    Name = {local, A},
-    parse_options(Rest, State#mochiweb_socket_server{name=Name});
-parse_options([{name, Name} | Rest], State) ->
-    parse_options(Rest, State#mochiweb_socket_server{name=Name});
-parse_options([{port, L} | Rest], State) when is_list(L) ->
-    Port = list_to_integer(L),
-    parse_options(Rest, State#mochiweb_socket_server{port=Port});
-parse_options([{port, Port} | Rest], State) ->
-    parse_options(Rest, State#mochiweb_socket_server{port=Port});
-parse_options([{ip, Ip} | Rest], State) ->
-    ParsedIp = case Ip of
-                   any ->
-                       any;
-                   Ip when is_tuple(Ip) ->
-                       Ip;
-                   Ip when is_list(Ip) ->
-                       {ok, IpTuple} = inet_parse:address(Ip),
-                       IpTuple
-               end,
-    parse_options(Rest, State#mochiweb_socket_server{ip=ParsedIp});
-parse_options([{loop, Loop} | Rest], State) ->
-    parse_options(Rest, State#mochiweb_socket_server{loop=Loop});
-parse_options([{backlog, Backlog} | Rest], State) ->
-    parse_options(Rest, State#mochiweb_socket_server{backlog=Backlog});
-parse_options([{nodelay, NoDelay} | Rest], State) ->
-    parse_options(Rest, State#mochiweb_socket_server{nodelay=NoDelay});
-parse_options([{acceptor_pool_size, Max} | Rest], State) ->
-    MaxInt = ensure_int(Max),
-    parse_options(Rest,
-                  State#mochiweb_socket_server{acceptor_pool_size=MaxInt});
-parse_options([{max, Max} | Rest], State) ->
-    error_logger:info_report([{warning, "TODO: max is currently unsupported"},
-                              {max, Max}]),
-    MaxInt = ensure_int(Max),
-    parse_options(Rest, State#mochiweb_socket_server{max=MaxInt});
-parse_options([{ssl, Ssl} | Rest], State) when is_boolean(Ssl) ->
-    parse_options(Rest, State#mochiweb_socket_server{ssl=Ssl});
-parse_options([{ssl_opts, SslOpts} | Rest], State) when is_list(SslOpts) ->
-    SslOpts1 = [{ssl_imp, new} | proplists:delete(ssl_imp, SslOpts)],
-    parse_options(Rest, State#mochiweb_socket_server{ssl_opts=SslOpts1});
-parse_options([{profile_fun, ProfileFun} | Rest], State) when is_function(ProfileFun) ->
-    parse_options(Rest, State#mochiweb_socket_server{profile_fun=ProfileFun}).
-
-
-start_server(State=#mochiweb_socket_server{ssl=Ssl, name=Name}) ->
-    case Ssl of
-        true ->
-            application:start(crypto),
-            application:start(public_key),
-            application:start(ssl);
-        false ->
-            void
-    end,
-    case Name of
-        undefined ->
-            gen_server:start_link(?MODULE, State, []);
-        _ ->
-            gen_server:start_link(Name, ?MODULE, State, [])
-    end.
-
-ensure_int(N) when is_integer(N) ->
-    N;
-ensure_int(S) when is_list(S) ->
-    list_to_integer(S).
-
-ipv6_supported() ->
-    case (catch inet:getaddr("localhost", inet6)) of
-        {ok, _Addr} ->
-            true;
-        {error, _} ->
-            false
-    end.
-
-init(State=#mochiweb_socket_server{ip=Ip, port=Port, backlog=Backlog, nodelay=NoDelay}) ->
-    process_flag(trap_exit, true),
-    BaseOpts = [binary,
-                {reuseaddr, true},
-                {packet, 0},
-                {backlog, Backlog},
-                {recbuf, ?RECBUF_SIZE},
-                {active, false},
-                {nodelay, NoDelay}],
-    Opts = case Ip of
-        any ->
-            case ipv6_supported() of % IPv4, and IPv6 if supported
-                true -> [inet, inet6 | BaseOpts];
-                _ -> BaseOpts
-            end;
-        {_, _, _, _} -> % IPv4
-            [inet, {ip, Ip} | BaseOpts];
-        {_, _, _, _, _, _, _, _} -> % IPv6
-            [inet6, {ip, Ip} | BaseOpts]
-    end,
-    case listen(Port, Opts, State) of
-        {stop, eacces} ->
-            case Port < 1024 of
-                true ->
-                    case catch fdsrv:start() of
-                        {ok, _} ->
-                            case fdsrv:bind_socket(tcp, Port) of
-                                {ok, Fd} ->
-                                    listen(Port, [{fd, Fd} | Opts], State);
-                                _ ->
-                                    {stop, fdsrv_bind_failed}
-                            end;
-                        _ ->
-                            {stop, fdsrv_start_failed}
-                    end;
-                false ->
-                    {stop, eacces}
-            end;
-        Other ->
-            Other
-    end.
-
-new_acceptor_pool(Listen,
-                  State=#mochiweb_socket_server{acceptor_pool=Pool,
-                                                acceptor_pool_size=Size,
-                                                loop=Loop}) ->
-    F = fun (_, S) ->
-                Pid = mochiweb_acceptor:start_link(self(), Listen, Loop),
-                sets:add_element(Pid, S)
-        end,
-    Pool1 = lists:foldl(F, Pool, lists:seq(1, Size)),
-    State#mochiweb_socket_server{acceptor_pool=Pool1}.
-
-listen(Port, Opts, State=#mochiweb_socket_server{ssl=Ssl, ssl_opts=SslOpts}) ->
-    case mochiweb_socket:listen(Ssl, Port, Opts, SslOpts) of
-        {ok, Listen} ->
-            {ok, ListenPort} = mochiweb_socket:port(Listen),
-            {ok, new_acceptor_pool(
-                   Listen,
-                   State#mochiweb_socket_server{listen=Listen,
-                                                port=ListenPort})};
-        {error, Reason} ->
-            {stop, Reason}
-    end.
-
-do_get(port, #mochiweb_socket_server{port=Port}) ->
-    Port;
-do_get(active_sockets, #mochiweb_socket_server{active_sockets=ActiveSockets}) ->
-    ActiveSockets.
-
-
-state_to_proplist(#mochiweb_socket_server{name=Name,
-                                          port=Port,
-                                          active_sockets=ActiveSockets}) ->
-    [{name, Name}, {port, Port}, {active_sockets, ActiveSockets}].
-
-upgrade_state(State = #mochiweb_socket_server{}) ->
-    State;
-upgrade_state({mochiweb_socket_server, Port, Loop, Name,
-             Max, IP, Listen, NoDelay, Backlog, ActiveSockets,
-             AcceptorPoolSize, SSL, SSL_opts,
-             AcceptorPool}) ->
-    #mochiweb_socket_server{port=Port, loop=Loop, name=Name, max=Max, ip=IP,
-                            listen=Listen, nodelay=NoDelay, backlog=Backlog,
-                            active_sockets=ActiveSockets,
-                            acceptor_pool_size=AcceptorPoolSize,
-                            ssl=SSL,
-                            ssl_opts=SSL_opts,
-                            acceptor_pool=AcceptorPool}.
-
-handle_call(Req, From, State) when ?is_old_state(State) ->
-    handle_call(Req, From, upgrade_state(State));
-handle_call({get, Property}, _From, State) ->
-    Res = do_get(Property, State),
-    {reply, Res, State};
-handle_call(_Message, _From, State) ->
-    Res = error,
-    {reply, Res, State}.
-
-
-handle_cast(Req, State) when ?is_old_state(State) ->
-    handle_cast(Req, upgrade_state(State));
-handle_cast({accepted, Pid, Timing},
-            State=#mochiweb_socket_server{active_sockets=ActiveSockets}) ->
-    State1 = State#mochiweb_socket_server{active_sockets=1 + ActiveSockets},
-    case State#mochiweb_socket_server.profile_fun of
-        undefined ->
-            undefined;
-        F when is_function(F) ->
-            catch F([{timing, Timing} | state_to_proplist(State1)])
-    end,
-    {noreply, recycle_acceptor(Pid, State1)};
-handle_cast({set, profile_fun, ProfileFun}, State) ->
-    State1 = case ProfileFun of
-                 ProfileFun when is_function(ProfileFun); ProfileFun =:= undefined ->
-                     State#mochiweb_socket_server{profile_fun=ProfileFun};
-                 _ ->
-                     State
-             end,
-    {noreply, State1};
-handle_cast(stop, State) ->
-    {stop, normal, State}.
-
-
-terminate(Reason, State) when ?is_old_state(State) ->
-    terminate(Reason, upgrade_state(State));
-terminate(_Reason, #mochiweb_socket_server{listen=Listen, port=Port}) ->
-    mochiweb_socket:close(Listen),
-    case Port < 1024 of
-        true ->
-            catch fdsrv:stop(),
-            ok;
-        false ->
-            ok
-    end.
-
-code_change(_OldVsn, State, _Extra) ->
-    State.
-
-recycle_acceptor(Pid, State=#mochiweb_socket_server{
-                        acceptor_pool=Pool,
-                        listen=Listen,
-                        loop=Loop,
-                        active_sockets=ActiveSockets}) ->
-    case sets:is_element(Pid, Pool) of
-        true ->
-            Acceptor = mochiweb_acceptor:start_link(self(), Listen, Loop),
-            Pool1 = sets:add_element(Acceptor, sets:del_element(Pid, Pool)),
-            State#mochiweb_socket_server{acceptor_pool=Pool1};
-        false ->
-            State#mochiweb_socket_server{active_sockets=ActiveSockets - 1}
-    end.
-
-handle_info(Msg, State) when ?is_old_state(State) ->
-    handle_info(Msg, upgrade_state(State));
-handle_info({'EXIT', Pid, normal}, State) ->
-    {noreply, recycle_acceptor(Pid, State)};
-handle_info({'EXIT', Pid, Reason},
-            State=#mochiweb_socket_server{acceptor_pool=Pool}) ->
-    case sets:is_element(Pid, Pool) of
-        true ->
-            %% If there was an unexpected error accepting, log and sleep.
-            error_logger:error_report({?MODULE, ?LINE,
-                                       {acceptor_error, Reason}}),
-            timer:sleep(100);
-        false ->
-            ok
-    end,
-    {noreply, recycle_acceptor(Pid, State)};
-
-% this is what release_handler needs to get a list of modules,
-% since our supervisor modules list is set to 'dynamic'
-% see sasl-2.1.9.2/src/release_handler_1.erl get_dynamic_mods
-handle_info({From, Tag, get_modules}, State = #mochiweb_socket_server{name={local,Mod}}) ->
-    From ! {element(2,Tag), [Mod]},
-    {noreply, State};
-
-% If for some reason we can't get the module name, send empty list to avoid release_handler timeout:
-handle_info({From, Tag, get_modules}, State) ->
-    error_logger:info_msg("mochiweb_socket_server replying to dynamic modules request as '[]'~n",[]),
-    From ! {element(2,Tag), []},
-    {noreply, State};
-
-handle_info(Info, State) ->
-    error_logger:info_report([{'INFO', Info}, {'State', State}]),
-    {noreply, State}.
-
-
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-upgrade_state_test() ->
-    OldState = {mochiweb_socket_server,
-                port, loop, name,
-                max, ip, listen,
-                nodelay, backlog,
-                active_sockets,
-                acceptor_pool_size,
-                ssl, ssl_opts, acceptor_pool},
-    State = upgrade_state(OldState),
-    CmpState = #mochiweb_socket_server{port=port, loop=loop,
-                                       name=name, max=max, ip=ip,
-                                       listen=listen, nodelay=nodelay,
-                                       backlog=backlog,
-                                       active_sockets=active_sockets,
-                                       acceptor_pool_size=acceptor_pool_size,
-                                       ssl=ssl, ssl_opts=ssl_opts,
-                                       acceptor_pool=acceptor_pool,
-                                       profile_fun=undefined},
-    ?assertEqual(CmpState, State).
-
--endif.
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_sup.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_sup.erl b/src/mochiweb/src/mochiweb_sup.erl
deleted file mode 100644
index af7df9b..0000000
--- a/src/mochiweb/src/mochiweb_sup.erl
+++ /dev/null
@@ -1,41 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Supervisor for the mochiweb application.
-
--module(mochiweb_sup).
--author('bob@mochimedia.com').
-
--behaviour(supervisor).
-
-%% External exports
--export([start_link/0, upgrade/0]).
-
-%% supervisor callbacks
--export([init/1]).
-
-%% @spec start_link() -> ServerRet
-%% @doc API for starting the supervisor.
-start_link() ->
-    supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-%% @spec upgrade() -> ok
-%% @doc Add processes if necessary.
-upgrade() ->
-    {ok, {_, Specs}} = init([]),
-    [supervisor:start_child(?MODULE, Spec) || Spec <- Specs],
-    ok.
-
-%% @spec init([]) -> SupervisorTree
-%% @doc supervisor callback, ensures yaws is in embedded mode and then
-%%      returns the supervisor tree.
-init([]) ->
-    Processes = [],
-    {ok, {{one_for_one, 10, 10}, Processes}}.
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/mochiweb_util.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/mochiweb_util.erl b/src/mochiweb/src/mochiweb_util.erl
deleted file mode 100644
index 6b88818..0000000
--- a/src/mochiweb/src/mochiweb_util.erl
+++ /dev/null
@@ -1,980 +0,0 @@
-%% @author Bob Ippolito <bo...@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Utilities for parsing and quoting.
-
--module(mochiweb_util).
--author('bob@mochimedia.com').
--export([join/2, quote_plus/1, urlencode/1, parse_qs/1, unquote/1]).
--export([path_split/1]).
--export([urlsplit/1, urlsplit_path/1, urlunsplit/1, urlunsplit_path/1]).
--export([guess_mime/1, parse_header/1]).
--export([shell_quote/1, cmd/1, cmd_string/1, cmd_port/2, cmd_status/1]).
--export([record_to_proplist/2, record_to_proplist/3]).
--export([safe_relative_path/1, partition/2]).
--export([parse_qvalues/1, pick_accepted_encodings/3]).
--export([make_io/1]).
-
--define(PERCENT, 37).  % $\%
--define(FULLSTOP, 46). % $\.
--define(IS_HEX(C), ((C >= $0 andalso C =< $9) orelse
-                    (C >= $a andalso C =< $f) orelse
-                    (C >= $A andalso C =< $F))).
--define(QS_SAFE(C), ((C >= $a andalso C =< $z) orelse
-                     (C >= $A andalso C =< $Z) orelse
-                     (C >= $0 andalso C =< $9) orelse
-                     (C =:= ?FULLSTOP orelse C =:= $- orelse C =:= $~ orelse
-                      C =:= $_))).
-
-hexdigit(C) when C < 10 -> $0 + C;
-hexdigit(C) when C < 16 -> $A + (C - 10).
-
-unhexdigit(C) when C >= $0, C =< $9 -> C - $0;
-unhexdigit(C) when C >= $a, C =< $f -> C - $a + 10;
-unhexdigit(C) when C >= $A, C =< $F -> C - $A + 10.
-
-%% @spec partition(String, Sep) -> {String, [], []} | {Prefix, Sep, Postfix}
-%% @doc Inspired by Python 2.5's str.partition:
-%%      partition("foo/bar", "/") = {"foo", "/", "bar"},
-%%      partition("foo", "/") = {"foo", "", ""}.
-partition(String, Sep) ->
-    case partition(String, Sep, []) of
-        undefined ->
-            {String, "", ""};
-        Result ->
-            Result
-    end.
-
-partition("", _Sep, _Acc) ->
-    undefined;
-partition(S, Sep, Acc) ->
-    case partition2(S, Sep) of
-        undefined ->
-            [C | Rest] = S,
-            partition(Rest, Sep, [C | Acc]);
-        Rest ->
-            {lists:reverse(Acc), Sep, Rest}
-    end.
-
-partition2(Rest, "") ->
-    Rest;
-partition2([C | R1], [C | R2]) ->
-    partition2(R1, R2);
-partition2(_S, _Sep) ->
-    undefined.
-
-
-
-%% @spec safe_relative_path(string()) -> string() | undefined
-%% @doc Return the reduced version of a relative path or undefined if it
-%%      is not safe. safe relative paths can be joined with an absolute path
-%%      and will result in a subdirectory of the absolute path. Safe paths
-%%      never contain a backslash character.
-safe_relative_path("/" ++ _) ->
-    undefined;
-safe_relative_path(P) ->
-    case string:chr(P, $\\) of
-        0 ->
-           safe_relative_path(P, []);
-        _ ->
-           undefined
-    end.
-
-safe_relative_path("", Acc) ->
-    case Acc of
-        [] ->
-            "";
-        _ ->
-            string:join(lists:reverse(Acc), "/")
-    end;
-safe_relative_path(P, Acc) ->
-    case partition(P, "/") of
-        {"", "/", _} ->
-            %% /foo or foo//bar
-            undefined;
-        {"..", _, _} when Acc =:= [] ->
-            undefined;
-        {"..", _, Rest} ->
-            safe_relative_path(Rest, tl(Acc));
-        {Part, "/", ""} ->
-            safe_relative_path("", ["", Part | Acc]);
-        {Part, _, Rest} ->
-            safe_relative_path(Rest, [Part | Acc])
-    end.
-
-%% @spec shell_quote(string()) -> string()
-%% @doc Quote a string according to UNIX shell quoting rules, returns a string
-%%      surrounded by double quotes.
-shell_quote(L) ->
-    shell_quote(L, [$\"]).
-
-%% @spec cmd_port([string()], Options) -> port()
-%% @doc open_port({spawn, mochiweb_util:cmd_string(Argv)}, Options).
-cmd_port(Argv, Options) ->
-    open_port({spawn, cmd_string(Argv)}, Options).
-
-%% @spec cmd([string()]) -> string()
-%% @doc os:cmd(cmd_string(Argv)).
-cmd(Argv) ->
-    os:cmd(cmd_string(Argv)).
-
-%% @spec cmd_string([string()]) -> string()
-%% @doc Create a shell quoted command string from a list of arguments.
-cmd_string(Argv) ->
-    string:join([shell_quote(X) || X <- Argv], " ").
-
-%% @spec cmd_status([string()]) -> {ExitStatus::integer(), Stdout::binary()}
-%% @doc Accumulate the output and exit status from the given application, will be
-%%      spawned with cmd_port/2.
-cmd_status(Argv) ->
-    Port = cmd_port(Argv, [exit_status, stderr_to_stdout,
-                           use_stdio, binary]),
-    try cmd_loop(Port, [])
-    after catch port_close(Port)
-    end.
-
-%% @spec cmd_loop(port(), list()) -> {ExitStatus::integer(), Stdout::binary()}
-%% @doc Accumulate the output and exit status from a port.
-cmd_loop(Port, Acc) ->
-    receive
-        {Port, {exit_status, Status}} ->
-            {Status, iolist_to_binary(lists:reverse(Acc))};
-        {Port, {data, Data}} ->
-            cmd_loop(Port, [Data | Acc])
-    end.
-
-%% @spec join([iolist()], iolist()) -> iolist()
-%% @doc Join a list of strings or binaries together with the given separator
-%%      string or char or binary. The output is flattened, but may be an
-%%      iolist() instead of a string() if any of the inputs are binary().
-join([], _Separator) ->
-    [];
-join([S], _Separator) ->
-    lists:flatten(S);
-join(Strings, Separator) ->
-    lists:flatten(revjoin(lists:reverse(Strings), Separator, [])).
-
-revjoin([], _Separator, Acc) ->
-    Acc;
-revjoin([S | Rest], Separator, []) ->
-    revjoin(Rest, Separator, [S]);
-revjoin([S | Rest], Separator, Acc) ->
-    revjoin(Rest, Separator, [S, Separator | Acc]).
-
-%% @spec quote_plus(atom() | integer() | float() | string() | binary()) -> string()
-%% @doc URL safe encoding of the given term.
-quote_plus(Atom) when is_atom(Atom) ->
-    quote_plus(atom_to_list(Atom));
-quote_plus(Int) when is_integer(Int) ->
-    quote_plus(integer_to_list(Int));
-quote_plus(Binary) when is_binary(Binary) ->
-    quote_plus(binary_to_list(Binary));
-quote_plus(Float) when is_float(Float) ->
-    quote_plus(mochinum:digits(Float));
-quote_plus(String) ->
-    quote_plus(String, []).
-
-quote_plus([], Acc) ->
-    lists:reverse(Acc);
-quote_plus([C | Rest], Acc) when ?QS_SAFE(C) ->
-    quote_plus(Rest, [C | Acc]);
-quote_plus([$\s | Rest], Acc) ->
-    quote_plus(Rest, [$+ | Acc]);
-quote_plus([C | Rest], Acc) ->
-    <<Hi:4, Lo:4>> = <<C>>,
-    quote_plus(Rest, [hexdigit(Lo), hexdigit(Hi), ?PERCENT | Acc]).
-
-%% @spec urlencode([{Key, Value}]) -> string()
-%% @doc URL encode the property list.
-urlencode(Props) ->
-    Pairs = lists:foldr(
-              fun ({K, V}, Acc) ->
-                      [quote_plus(K) ++ "=" ++ quote_plus(V) | Acc]
-              end, [], Props),
-    string:join(Pairs, "&").
-
-%% @spec parse_qs(string() | binary()) -> [{Key, Value}]
-%% @doc Parse a query string or application/x-www-form-urlencoded.
-parse_qs(Binary) when is_binary(Binary) ->
-    parse_qs(binary_to_list(Binary));
-parse_qs(String) ->
-    parse_qs(String, []).
-
-parse_qs([], Acc) ->
-    lists:reverse(Acc);
-parse_qs(String, Acc) ->
-    {Key, Rest} = parse_qs_key(String),
-    {Value, Rest1} = parse_qs_value(Rest),
-    parse_qs(Rest1, [{Key, Value} | Acc]).
-
-parse_qs_key(String) ->
-    parse_qs_key(String, []).
-
-parse_qs_key([], Acc) ->
-    {qs_revdecode(Acc), ""};
-parse_qs_key([$= | Rest], Acc) ->
-    {qs_revdecode(Acc), Rest};
-parse_qs_key(Rest=[$; | _], Acc) ->
-    {qs_revdecode(Acc), Rest};
-parse_qs_key(Rest=[$& | _], Acc) ->
-    {qs_revdecode(Acc), Rest};
-parse_qs_key([C | Rest], Acc) ->
-    parse_qs_key(Rest, [C | Acc]).
-
-parse_qs_value(String) ->
-    parse_qs_value(String, []).
-
-parse_qs_value([], Acc) ->
-    {qs_revdecode(Acc), ""};
-parse_qs_value([$; | Rest], Acc) ->
-    {qs_revdecode(Acc), Rest};
-parse_qs_value([$& | Rest], Acc) ->
-    {qs_revdecode(Acc), Rest};
-parse_qs_value([C | Rest], Acc) ->
-    parse_qs_value(Rest, [C | Acc]).
-
-%% @spec unquote(string() | binary()) -> string()
-%% @doc Unquote a URL encoded string.
-unquote(Binary) when is_binary(Binary) ->
-    unquote(binary_to_list(Binary));
-unquote(String) ->
-    qs_revdecode(lists:reverse(String)).
-
-qs_revdecode(S) ->
-    qs_revdecode(S, []).
-
-qs_revdecode([], Acc) ->
-    Acc;
-qs_revdecode([$+ | Rest], Acc) ->
-    qs_revdecode(Rest, [$\s | Acc]);
-qs_revdecode([Lo, Hi, ?PERCENT | Rest], Acc) when ?IS_HEX(Lo), ?IS_HEX(Hi) ->
-    qs_revdecode(Rest, [(unhexdigit(Lo) bor (unhexdigit(Hi) bsl 4)) | Acc]);
-qs_revdecode([C | Rest], Acc) ->
-    qs_revdecode(Rest, [C | Acc]).
-
-%% @spec urlsplit(Url) -> {Scheme, Netloc, Path, Query, Fragment}
-%% @doc Return a 5-tuple, does not expand % escapes. Only supports HTTP style
-%%      URLs.
-urlsplit(Url) ->
-    {Scheme, Url1} = urlsplit_scheme(Url),
-    {Netloc, Url2} = urlsplit_netloc(Url1),
-    {Path, Query, Fragment} = urlsplit_path(Url2),
-    {Scheme, Netloc, Path, Query, Fragment}.
-
-urlsplit_scheme(Url) ->
-    case urlsplit_scheme(Url, []) of
-        no_scheme ->
-            {"", Url};
-        Res ->
-            Res
-    end.
-
-urlsplit_scheme([C | Rest], Acc) when ((C >= $a andalso C =< $z) orelse
-                                       (C >= $A andalso C =< $Z) orelse
-                                       (C >= $0 andalso C =< $9) orelse
-                                       C =:= $+ orelse C =:= $- orelse
-                                       C =:= $.) ->
-    urlsplit_scheme(Rest, [C | Acc]);
-urlsplit_scheme([$: | Rest], Acc=[_ | _]) ->
-    {string:to_lower(lists:reverse(Acc)), Rest};
-urlsplit_scheme(_Rest, _Acc) ->
-    no_scheme.
-
-urlsplit_netloc("//" ++ Rest) ->
-    urlsplit_netloc(Rest, []);
-urlsplit_netloc(Path) ->
-    {"", Path}.
-
-urlsplit_netloc("", Acc) ->
-    {lists:reverse(Acc), ""};
-urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
-    {lists:reverse(Acc), Rest};
-urlsplit_netloc([C | Rest], Acc) ->
-    urlsplit_netloc(Rest, [C | Acc]).
-
-
-%% @spec path_split(string()) -> {Part, Rest}
-%% @doc Split a path starting from the left, as in URL traversal.
-%%      path_split("foo/bar") = {"foo", "bar"},
-%%      path_split("/foo/bar") = {"", "foo/bar"}.
-path_split(S) ->
-    path_split(S, []).
-
-path_split("", Acc) ->
-    {lists:reverse(Acc), ""};
-path_split("/" ++ Rest, Acc) ->
-    {lists:reverse(Acc), Rest};
-path_split([C | Rest], Acc) ->
-    path_split(Rest, [C | Acc]).
-
-
-%% @spec urlunsplit({Scheme, Netloc, Path, Query, Fragment}) -> string()
-%% @doc Assemble a URL from the 5-tuple. Path must be absolute.
-urlunsplit({Scheme, Netloc, Path, Query, Fragment}) ->
-    lists:flatten([case Scheme of "" -> "";  _ -> [Scheme, "://"] end,
-                   Netloc,
-                   urlunsplit_path({Path, Query, Fragment})]).
-
-%% @spec urlunsplit_path({Path, Query, Fragment}) -> string()
-%% @doc Assemble a URL path from the 3-tuple.
-urlunsplit_path({Path, Query, Fragment}) ->
-    lists:flatten([Path,
-                   case Query of "" -> ""; _ -> [$? | Query] end,
-                   case Fragment of "" -> ""; _ -> [$# | Fragment] end]).
-
-%% @spec urlsplit_path(Url) -> {Path, Query, Fragment}
-%% @doc Return a 3-tuple, does not expand % escapes. Only supports HTTP style
-%%      paths.
-urlsplit_path(Path) ->
-    urlsplit_path(Path, []).
-
-urlsplit_path("", Acc) ->
-    {lists:reverse(Acc), "", ""};
-urlsplit_path("?" ++ Rest, Acc) ->
-    {Query, Fragment} = urlsplit_query(Rest),
-    {lists:reverse(Acc), Query, Fragment};
-urlsplit_path("#" ++ Rest, Acc) ->
-    {lists:reverse(Acc), "", Rest};
-urlsplit_path([C | Rest], Acc) ->
-    urlsplit_path(Rest, [C | Acc]).
-
-urlsplit_query(Query) ->
-    urlsplit_query(Query, []).
-
-urlsplit_query("", Acc) ->
-    {lists:reverse(Acc), ""};
-urlsplit_query("#" ++ Rest, Acc) ->
-    {lists:reverse(Acc), Rest};
-urlsplit_query([C | Rest], Acc) ->
-    urlsplit_query(Rest, [C | Acc]).
-
-%% @spec guess_mime(string()) -> string()
-%% @doc  Guess the mime type of a file by the extension of its filename.
-guess_mime(File) ->
-    case mochiweb_mime:from_extension(filename:extension(File)) of
-        undefined ->
-            "text/plain";
-        Mime ->
-            Mime
-    end.
-
-%% @spec parse_header(string()) -> {Type, [{K, V}]}
-%% @doc  Parse a Content-Type like header, return the main Content-Type
-%%       and a property list of options.
-parse_header(String) ->
-    %% TODO: This is exactly as broken as Python's cgi module.
-    %%       Should parse properly like mochiweb_cookies.
-    [Type | Parts] = [string:strip(S) || S <- string:tokens(String, ";")],
-    F = fun (S, Acc) ->
-                case lists:splitwith(fun (C) -> C =/= $= end, S) of
-                    {"", _} ->
-                        %% Skip anything with no name
-                        Acc;
-                    {_, ""} ->
-                        %% Skip anything with no value
-                        Acc;
-                    {Name, [$\= | Value]} ->
-                        [{string:to_lower(string:strip(Name)),
-                          unquote_header(string:strip(Value))} | Acc]
-                end
-        end,
-    {string:to_lower(Type),
-     lists:foldr(F, [], Parts)}.
-
-unquote_header("\"" ++ Rest) ->
-    unquote_header(Rest, []);
-unquote_header(S) ->
-    S.
-
-unquote_header("", Acc) ->
-    lists:reverse(Acc);
-unquote_header("\"", Acc) ->
-    lists:reverse(Acc);
-unquote_header([$\\, C | Rest], Acc) ->
-    unquote_header(Rest, [C | Acc]);
-unquote_header([C | Rest], Acc) ->
-    unquote_header(Rest, [C | Acc]).
-
-%% @spec record_to_proplist(Record, Fields) -> proplist()
-%% @doc calls record_to_proplist/3 with a default TypeKey of '__record'
-record_to_proplist(Record, Fields) ->
-    record_to_proplist(Record, Fields, '__record').
-
-%% @spec record_to_proplist(Record, Fields, TypeKey) -> proplist()
-%% @doc Return a proplist of the given Record with each field in the
-%%      Fields list set as a key with the corresponding value in the Record.
-%%      TypeKey is the key that is used to store the record type
-%%      Fields should be obtained by calling record_info(fields, record_type)
-%%      where record_type is the record type of Record
-record_to_proplist(Record, Fields, TypeKey)
-  when tuple_size(Record) - 1 =:= length(Fields) ->
-    lists:zip([TypeKey | Fields], tuple_to_list(Record)).
-
-
-shell_quote([], Acc) ->
-    lists:reverse([$\" | Acc]);
-shell_quote([C | Rest], Acc) when C =:= $\" orelse C =:= $\` orelse
-                                  C =:= $\\ orelse C =:= $\$ ->
-    shell_quote(Rest, [C, $\\ | Acc]);
-shell_quote([C | Rest], Acc) ->
-    shell_quote(Rest, [C | Acc]).
-
-%% @spec parse_qvalues(string()) -> [qvalue()] | invalid_qvalue_string
-%% @type qvalue() = {media_type() | encoding() , float()}.
-%% @type media_type() = string().
-%% @type encoding() = string().
-%%
-%% @doc Parses a list (given as a string) of elements with Q values associated
-%%      to them. Elements are separated by commas and each element is separated
-%%      from its Q value by a semicolon. Q values are optional but when missing
-%%      the value of an element is considered as 1.0. A Q value is always in the
-%%      range [0.0, 1.0]. A Q value list is used for example as the value of the
-%%      HTTP "Accept" and "Accept-Encoding" headers.
-%%
-%%      Q values are described in section 2.9 of the RFC 2616 (HTTP 1.1).
-%%
-%%      Example:
-%%
-%%      parse_qvalues("gzip; q=0.5, deflate, identity;q=0.0") ->
-%%          [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}]
-%%
-parse_qvalues(QValuesStr) ->
-    try
-        lists:map(
-            fun(Pair) ->
-                [Type | Params] = string:tokens(Pair, ";"),
-                NormParams = normalize_media_params(Params),
-                {Q, NonQParams} = extract_q(NormParams),
-                {string:join([string:strip(Type) | NonQParams], ";"), Q}
-            end,
-            string:tokens(string:to_lower(QValuesStr), ",")
-        )
-    catch
-        _Type:_Error ->
-            invalid_qvalue_string
-    end.
-
-normalize_media_params(Params) ->
-    {ok, Re} = re:compile("\\s"),
-    normalize_media_params(Re, Params, []).
-
-normalize_media_params(_Re, [], Acc) ->
-    lists:reverse(Acc);
-normalize_media_params(Re, [Param | Rest], Acc) ->
-    NormParam = re:replace(Param, Re, "", [global, {return, list}]),
-    normalize_media_params(Re, Rest, [NormParam | Acc]).
-
-extract_q(NormParams) ->
-    {ok, KVRe} = re:compile("^([^=]+)=([^=]+)$"),
-    {ok, QRe} = re:compile("^((?:0|1)(?:\\.\\d{1,3})?)$"),
-    extract_q(KVRe, QRe, NormParams, []).
-
-extract_q(_KVRe, _QRe, [], Acc) ->
-    {1.0, lists:reverse(Acc)};
-extract_q(KVRe, QRe, [Param | Rest], Acc) ->
-    case re:run(Param, KVRe, [{capture, [1, 2], list}]) of
-        {match, [Name, Value]} ->
-            case Name of
-            "q" ->
-                {match, [Q]} = re:run(Value, QRe, [{capture, [1], list}]),
-                QVal = case Q of
-                    "0" ->
-                        0.0;
-                    "1" ->
-                        1.0;
-                    Else ->
-                        list_to_float(Else)
-                end,
-                case QVal < 0.0 orelse QVal > 1.0 of
-                false ->
-                    {QVal, lists:reverse(Acc) ++ Rest}
-                end;
-            _ ->
-                extract_q(KVRe, QRe, Rest, [Param | Acc])
-            end
-    end.
-
-%% @spec pick_accepted_encodings([qvalue()], [encoding()], encoding()) ->
-%%    [encoding()]
-%%
-%% @doc Determines which encodings specified in the given Q values list are
-%%      valid according to a list of supported encodings and a default encoding.
-%%
-%%      The returned list of encodings is sorted, descendingly, according to the
-%%      Q values of the given list. The last element of this list is the given
-%%      default encoding unless this encoding is explicitily or implicitily
-%%      marked with a Q value of 0.0 in the given Q values list.
-%%      Note: encodings with the same Q value are kept in the same order as
-%%            found in the input Q values list.
-%%
-%%      This encoding picking process is described in section 14.3 of the
-%%      RFC 2616 (HTTP 1.1).
-%%
-%%      Example:
-%%
-%%      pick_accepted_encodings(
-%%          [{"gzip", 0.5}, {"deflate", 1.0}],
-%%          ["gzip", "identity"],
-%%          "identity"
-%%      ) ->
-%%          ["gzip", "identity"]
-%%
-pick_accepted_encodings(AcceptedEncs, SupportedEncs, DefaultEnc) ->
-    SortedQList = lists:reverse(
-        lists:sort(fun({_, Q1}, {_, Q2}) -> Q1 < Q2 end, AcceptedEncs)
-    ),
-    {Accepted, Refused} = lists:foldr(
-        fun({E, Q}, {A, R}) ->
-            case Q > 0.0 of
-                true ->
-                    {[E | A], R};
-                false ->
-                    {A, [E | R]}
-            end
-        end,
-        {[], []},
-        SortedQList
-    ),
-    Refused1 = lists:foldr(
-        fun(Enc, Acc) ->
-            case Enc of
-                "*" ->
-                    lists:subtract(SupportedEncs, Accepted) ++ Acc;
-                _ ->
-                    [Enc | Acc]
-            end
-        end,
-        [],
-        Refused
-    ),
-    Accepted1 = lists:foldr(
-        fun(Enc, Acc) ->
-            case Enc of
-                "*" ->
-                    lists:subtract(SupportedEncs, Accepted ++ Refused1) ++ Acc;
-                _ ->
-                    [Enc | Acc]
-            end
-        end,
-        [],
-        Accepted
-    ),
-    Accepted2 = case lists:member(DefaultEnc, Accepted1) of
-        true ->
-            Accepted1;
-        false ->
-            Accepted1 ++ [DefaultEnc]
-    end,
-    [E || E <- Accepted2, lists:member(E, SupportedEncs),
-        not lists:member(E, Refused1)].
-
-make_io(Atom) when is_atom(Atom) ->
-    atom_to_list(Atom);
-make_io(Integer) when is_integer(Integer) ->
-    integer_to_list(Integer);
-make_io(Io) when is_list(Io); is_binary(Io) ->
-    Io.
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
-
-make_io_test() ->
-    ?assertEqual(
-       <<"atom">>,
-       iolist_to_binary(make_io(atom))),
-    ?assertEqual(
-       <<"20">>,
-       iolist_to_binary(make_io(20))),
-    ?assertEqual(
-       <<"list">>,
-       iolist_to_binary(make_io("list"))),
-    ?assertEqual(
-       <<"binary">>,
-       iolist_to_binary(make_io(<<"binary">>))),
-    ok.
-
--record(test_record, {field1=f1, field2=f2}).
-record_to_proplist_test() ->
-    ?assertEqual(
-       [{'__record', test_record},
-        {field1, f1},
-        {field2, f2}],
-       record_to_proplist(#test_record{}, record_info(fields, test_record))),
-    ?assertEqual(
-       [{'typekey', test_record},
-        {field1, f1},
-        {field2, f2}],
-       record_to_proplist(#test_record{},
-                          record_info(fields, test_record),
-                          typekey)),
-    ok.
-
-shell_quote_test() ->
-    ?assertEqual(
-       "\"foo \\$bar\\\"\\`' baz\"",
-       shell_quote("foo $bar\"`' baz")),
-    ok.
-
-cmd_port_test_spool(Port, Acc) ->
-    receive
-        {Port, eof} ->
-            Acc;
-        {Port, {data, {eol, Data}}} ->
-            cmd_port_test_spool(Port, ["\n", Data | Acc]);
-        {Port, Unknown} ->
-            throw({unknown, Unknown})
-    after 1000 ->
-            throw(timeout)
-    end.
-
-cmd_port_test() ->
-    Port = cmd_port(["echo", "$bling$ `word`!"],
-                    [eof, stream, {line, 4096}]),
-    Res = try lists:append(lists:reverse(cmd_port_test_spool(Port, [])))
-          after catch port_close(Port)
-          end,
-    self() ! {Port, wtf},
-    try cmd_port_test_spool(Port, [])
-    catch throw:{unknown, wtf} -> ok
-    end,
-    try cmd_port_test_spool(Port, [])
-    catch throw:timeout -> ok
-    end,
-    ?assertEqual(
-       "$bling$ `word`!\n",
-       Res).
-
-cmd_test() ->
-    ?assertEqual(
-       "$bling$ `word`!\n",
-       cmd(["echo", "$bling$ `word`!"])),
-    ok.
-
-cmd_string_test() ->
-    ?assertEqual(
-       "\"echo\" \"\\$bling\\$ \\`word\\`!\"",
-       cmd_string(["echo", "$bling$ `word`!"])),
-    ok.
-
-cmd_status_test() ->
-    ?assertEqual(
-       {0, <<"$bling$ `word`!\n">>},
-       cmd_status(["echo", "$bling$ `word`!"])),
-    ok.
-
-
-parse_header_test() ->
-    ?assertEqual(
-       {"multipart/form-data", [{"boundary", "AaB03x"}]},
-       parse_header("multipart/form-data; boundary=AaB03x")),
-    %% This tests (currently) intentionally broken behavior
-    ?assertEqual(
-       {"multipart/form-data",
-        [{"b", ""},
-         {"cgi", "is"},
-         {"broken", "true\"e"}]},
-       parse_header("multipart/form-data;b=;cgi=\"i\\s;broken=true\"e;=z;z")),
-    ok.
-
-guess_mime_test() ->
-    "text/plain" = guess_mime(""),
-    "text/plain" = guess_mime(".text"),
-    "application/zip" = guess_mime(".zip"),
-    "application/zip" = guess_mime("x.zip"),
-    "text/html" = guess_mime("x.html"),
-    "application/xhtml+xml" = guess_mime("x.xhtml"),
-    ok.
-
-path_split_test() ->
-    {"", "foo/bar"} = path_split("/foo/bar"),
-    {"foo", "bar"} = path_split("foo/bar"),
-    {"bar", ""} = path_split("bar"),
-    ok.
-
-urlsplit_test() ->
-    {"", "", "/foo", "", "bar?baz"} = urlsplit("/foo#bar?baz"),
-    {"http", "host:port", "/foo", "", "bar?baz"} =
-        urlsplit("http://host:port/foo#bar?baz"),
-    {"http", "host", "", "", ""} = urlsplit("http://host"),
-    {"", "", "/wiki/Category:Fruit", "", ""} =
-        urlsplit("/wiki/Category:Fruit"),
-    ok.
-
-urlsplit_path_test() ->
-    {"/foo/bar", "", ""} = urlsplit_path("/foo/bar"),
-    {"/foo", "baz", ""} = urlsplit_path("/foo?baz"),
-    {"/foo", "", "bar?baz"} = urlsplit_path("/foo#bar?baz"),
-    {"/foo", "", "bar?baz#wibble"} = urlsplit_path("/foo#bar?baz#wibble"),
-    {"/foo", "bar", "baz"} = urlsplit_path("/foo?bar#baz"),
-    {"/foo", "bar?baz", "baz"} = urlsplit_path("/foo?bar?baz#baz"),
-    ok.
-
-urlunsplit_test() ->
-    "/foo#bar?baz" = urlunsplit({"", "", "/foo", "", "bar?baz"}),
-    "http://host:port/foo#bar?baz" =
-        urlunsplit({"http", "host:port", "/foo", "", "bar?baz"}),
-    ok.
-
-urlunsplit_path_test() ->
-    "/foo/bar" = urlunsplit_path({"/foo/bar", "", ""}),
-    "/foo?baz" = urlunsplit_path({"/foo", "baz", ""}),
-    "/foo#bar?baz" = urlunsplit_path({"/foo", "", "bar?baz"}),
-    "/foo#bar?baz#wibble" = urlunsplit_path({"/foo", "", "bar?baz#wibble"}),
-    "/foo?bar#baz" = urlunsplit_path({"/foo", "bar", "baz"}),
-    "/foo?bar?baz#baz" = urlunsplit_path({"/foo", "bar?baz", "baz"}),
-    ok.
-
-join_test() ->
-    ?assertEqual("foo,bar,baz",
-                  join(["foo", "bar", "baz"], $,)),
-    ?assertEqual("foo,bar,baz",
-                  join(["foo", "bar", "baz"], ",")),
-    ?assertEqual("foo bar",
-                  join([["foo", " bar"]], ",")),
-    ?assertEqual("foo bar,baz",
-                  join([["foo", " bar"], "baz"], ",")),
-    ?assertEqual("foo",
-                  join(["foo"], ",")),
-    ?assertEqual("foobarbaz",
-                  join(["foo", "bar", "baz"], "")),
-    ?assertEqual("foo" ++ [<<>>] ++ "bar" ++ [<<>>] ++ "baz",
-                 join(["foo", "bar", "baz"], <<>>)),
-    ?assertEqual("foobar" ++ [<<"baz">>],
-                 join(["foo", "bar", <<"baz">>], "")),
-    ?assertEqual("",
-                 join([], "any")),
-    ok.
-
-quote_plus_test() ->
-    "foo" = quote_plus(foo),
-    "1" = quote_plus(1),
-    "1.1" = quote_plus(1.1),
-    "foo" = quote_plus("foo"),
-    "foo+bar" = quote_plus("foo bar"),
-    "foo%0A" = quote_plus("foo\n"),
-    "foo%0A" = quote_plus("foo\n"),
-    "foo%3B%26%3D" = quote_plus("foo;&="),
-    "foo%3B%26%3D" = quote_plus(<<"foo;&=">>),
-    ok.
-
-unquote_test() ->
-    ?assertEqual("foo bar",
-                 unquote("foo+bar")),
-    ?assertEqual("foo bar",
-                 unquote("foo%20bar")),
-    ?assertEqual("foo\r\n",
-                 unquote("foo%0D%0A")),
-    ?assertEqual("foo\r\n",
-                 unquote(<<"foo%0D%0A">>)),
-    ok.
-
-urlencode_test() ->
-    "foo=bar&baz=wibble+%0D%0A&z=1" = urlencode([{foo, "bar"},
-                                                 {"baz", "wibble \r\n"},
-                                                 {z, 1}]),
-    ok.
-
-parse_qs_test() ->
-    ?assertEqual(
-       [{"foo", "bar"}, {"baz", "wibble \r\n"}, {"z", "1"}],
-       parse_qs("foo=bar&baz=wibble+%0D%0a&z=1")),
-    ?assertEqual(
-       [{"", "bar"}, {"baz", "wibble \r\n"}, {"z", ""}],
-       parse_qs("=bar&baz=wibble+%0D%0a&z=")),
-    ?assertEqual(
-       [{"foo", "bar"}, {"baz", "wibble \r\n"}, {"z", "1"}],
-       parse_qs(<<"foo=bar&baz=wibble+%0D%0a&z=1">>)),
-    ?assertEqual(
-       [],
-       parse_qs("")),
-    ?assertEqual(
-       [{"foo", ""}, {"bar", ""}, {"baz", ""}],
-       parse_qs("foo;bar&baz")),
-    ok.
-
-partition_test() ->
-    {"foo", "", ""} = partition("foo", "/"),
-    {"foo", "/", "bar"} = partition("foo/bar", "/"),
-    {"foo", "/", ""} = partition("foo/", "/"),
-    {"", "/", "bar"} = partition("/bar", "/"),
-    {"f", "oo/ba", "r"} = partition("foo/bar", "oo/ba"),
-    ok.
-
-safe_relative_path_test() ->
-    "foo" = safe_relative_path("foo"),
-    "foo/" = safe_relative_path("foo/"),
-    "foo" = safe_relative_path("foo/bar/.."),
-    "bar" = safe_relative_path("foo/../bar"),
-    "bar/" = safe_relative_path("foo/../bar/"),
-    "" = safe_relative_path("foo/.."),
-    "" = safe_relative_path("foo/../"),
-    undefined = safe_relative_path("/foo"),
-    undefined = safe_relative_path("../foo"),
-    undefined = safe_relative_path("foo/../.."),
-    undefined = safe_relative_path("foo//"),
-    undefined = safe_relative_path("foo\\bar"),
-    ok.
-
-parse_qvalues_test() ->
-    [] = parse_qvalues(""),
-    [{"identity", 0.0}] = parse_qvalues("identity;q=0"),
-    [{"identity", 0.0}] = parse_qvalues("identity ;q=0"),
-    [{"identity", 0.0}] = parse_qvalues(" identity; q =0 "),
-    [{"identity", 0.0}] = parse_qvalues("identity ; q = 0"),
-    [{"identity", 0.0}] = parse_qvalues("identity ; q= 0.0"),
-    [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
-        "gzip,deflate,identity;q=0.0"
-    ),
-    [{"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] = parse_qvalues(
-        "deflate,gzip,identity;q=0.0"
-    ),
-    [{"gzip", 1.0}, {"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] =
-        parse_qvalues("gzip,deflate,gzip,identity;q=0"),
-    [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
-        "gzip, deflate , identity; q=0.0"
-    ),
-    [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
-        "gzip; q=1, deflate;q=1.0, identity;q=0.0"
-    ),
-    [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
-        "gzip; q=0.5, deflate;q=1.0, identity;q=0"
-    ),
-    [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
-        "gzip; q=0.5, deflate , identity;q=0.0"
-    ),
-    [{"gzip", 0.5}, {"deflate", 0.8}, {"identity", 0.0}] = parse_qvalues(
-        "gzip; q=0.5, deflate;q=0.8, identity;q=0.0"
-    ),
-    [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}] = parse_qvalues(
-        "gzip; q=0.5,deflate,identity"
-    ),
-    [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}, {"identity", 1.0}] =
-        parse_qvalues("gzip; q=0.5,deflate,identity, identity "),
-    [{"text/html;level=1", 1.0}, {"text/plain", 0.5}] =
-        parse_qvalues("text/html;level=1, text/plain;q=0.5"),
-    [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
-        parse_qvalues("text/html;level=1;q=0.3, text/plain"),
-    [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
-        parse_qvalues("text/html; level = 1; q = 0.3, text/plain"),
-    [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
-        parse_qvalues("text/html;q=0.3;level=1, text/plain"),
-    invalid_qvalue_string = parse_qvalues("gzip; q=1.1, deflate"),
-    invalid_qvalue_string = parse_qvalues("gzip; q=0.5, deflate;q=2"),
-    invalid_qvalue_string = parse_qvalues("gzip, deflate;q=AB"),
-    invalid_qvalue_string = parse_qvalues("gzip; q=2.1, deflate"),
-    invalid_qvalue_string = parse_qvalues("gzip; q=0.1234, deflate"),
-    invalid_qvalue_string = parse_qvalues("text/html;level=1;q=0.3, text/html;level"),
-    ok.
-
-pick_accepted_encodings_test() ->
-    ["identity"] = pick_accepted_encodings(
-        [],
-        ["gzip", "identity"],
-        "identity"
-    ),
-    ["gzip", "identity"] = pick_accepted_encodings(
-        [{"gzip", 1.0}],
-        ["gzip", "identity"],
-        "identity"
-    ),
-    ["identity"] = pick_accepted_encodings(
-        [{"gzip", 0.0}],
-        ["gzip", "identity"],
-        "identity"
-    ),
-    ["gzip", "identity"] = pick_accepted_encodings(
-        [{"gzip", 1.0}, {"deflate", 1.0}],
-        ["gzip", "identity"],
-        "identity"
-    ),
-    ["gzip", "identity"] = pick_accepted_encodings(
-        [{"gzip", 0.5}, {"deflate", 1.0}],
-        ["gzip", "identity"],
-        "identity"
-    ),
-    ["identity"] = pick_accepted_encodings(
-        [{"gzip", 0.0}, {"deflate", 0.0}],
-        ["gzip", "identity"],
-        "identity"
-    ),
-    ["gzip"] = pick_accepted_encodings(
-        [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}],
-        ["gzip", "identity"],
-        "identity"
-    ),
-    ["gzip", "deflate", "identity"] = pick_accepted_encodings(
-        [{"gzip", 1.0}, {"deflate", 1.0}],
-        ["gzip", "deflate", "identity"],
-        "identity"
-    ),
-    ["gzip", "deflate"] = pick_accepted_encodings(
-        [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}],
-        ["gzip", "deflate", "identity"],
-        "identity"
-    ),
-    ["deflate", "gzip", "identity"] = pick_accepted_encodings(
-        [{"gzip", 0.2}, {"deflate", 1.0}],
-        ["gzip", "deflate", "identity"],
-        "identity"
-    ),
-    ["deflate", "deflate", "gzip", "identity"] = pick_accepted_encodings(
-        [{"gzip", 0.2}, {"deflate", 1.0}, {"deflate", 1.0}],
-        ["gzip", "deflate", "identity"],
-        "identity"
-    ),
-    ["deflate", "gzip", "gzip", "identity"] = pick_accepted_encodings(
-        [{"gzip", 0.2}, {"deflate", 1.0}, {"gzip", 1.0}],
-        ["gzip", "deflate", "identity"],
-        "identity"
-    ),
-    ["gzip", "deflate", "gzip", "identity"] = pick_accepted_encodings(
-        [{"gzip", 0.2}, {"deflate", 0.9}, {"gzip", 1.0}],
-        ["gzip", "deflate", "identity"],
-        "identity"
-    ),
-    [] = pick_accepted_encodings(
-        [{"*", 0.0}],
-        ["gzip", "deflate", "identity"],
-        "identity"
-    ),
-    ["gzip", "deflate", "identity"] = pick_accepted_encodings(
-        [{"*", 1.0}],
-        ["gzip", "deflate", "identity"],
-        "identity"
-    ),
-    ["gzip", "deflate", "identity"] = pick_accepted_encodings(
-        [{"*", 0.6}],
-        ["gzip", "deflate", "identity"],
-        "identity"
-    ),
-    ["gzip"] = pick_accepted_encodings(
-        [{"gzip", 1.0}, {"*", 0.0}],
-        ["gzip", "deflate", "identity"],
-        "identity"
-    ),
-    ["gzip", "deflate"] = pick_accepted_encodings(
-        [{"gzip", 1.0}, {"deflate", 0.6}, {"*", 0.0}],
-        ["gzip", "deflate", "identity"],
-        "identity"
-    ),
-    ["deflate", "gzip"] = pick_accepted_encodings(
-        [{"gzip", 0.5}, {"deflate", 1.0}, {"*", 0.0}],
-        ["gzip", "deflate", "identity"],
-        "identity"
-    ),
-    ["gzip", "identity"] = pick_accepted_encodings(
-        [{"deflate", 0.0}, {"*", 1.0}],
-        ["gzip", "deflate", "identity"],
-        "identity"
-    ),
-    ["gzip", "identity"] = pick_accepted_encodings(
-        [{"*", 1.0}, {"deflate", 0.0}],
-        ["gzip", "deflate", "identity"],
-        "identity"
-    ),
-    ok.
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/acf8eaff/src/mochiweb/src/reloader.erl
----------------------------------------------------------------------
diff --git a/src/mochiweb/src/reloader.erl b/src/mochiweb/src/reloader.erl
deleted file mode 100644
index c0f5de8..0000000
--- a/src/mochiweb/src/reloader.erl
+++ /dev/null
@@ -1,161 +0,0 @@
-%% @copyright 2007 Mochi Media, Inc.
-%% @author Matthew Dempsky <ma...@mochimedia.com>
-%%
-%% @doc Erlang module for automatically reloading modified modules
-%% during development.
-
--module(reloader).
--author("Matthew Dempsky <ma...@mochimedia.com>").
-
--include_lib("kernel/include/file.hrl").
-
--behaviour(gen_server).
--export([start/0, start_link/0]).
--export([stop/0]).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
--export([all_changed/0]).
--export([is_changed/1]).
--export([reload_modules/1]).
--record(state, {last, tref}).
-
-%% External API
-
-%% @spec start() -> ServerRet
-%% @doc Start the reloader.
-start() ->
-    gen_server:start({local, ?MODULE}, ?MODULE, [], []).
-
-%% @spec start_link() -> ServerRet
-%% @doc Start the reloader.
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-%% @spec stop() -> ok
-%% @doc Stop the reloader.
-stop() ->
-    gen_server:call(?MODULE, stop).
-
-%% gen_server callbacks
-
-%% @spec init([]) -> {ok, State}
-%% @doc gen_server init, opens the server in an initial state.
-init([]) ->
-    {ok, TRef} = timer:send_interval(timer:seconds(1), doit),
-    {ok, #state{last = stamp(), tref = TRef}}.
-
-%% @spec handle_call(Args, From, State) -> tuple()
-%% @doc gen_server callback.
-handle_call(stop, _From, State) ->
-    {stop, shutdown, stopped, State};
-handle_call(_Req, _From, State) ->
-    {reply, {error, badrequest}, State}.
-
-%% @spec handle_cast(Cast, State) -> tuple()
-%% @doc gen_server callback.
-handle_cast(_Req, State) ->
-    {noreply, State}.
-
-%% @spec handle_info(Info, State) -> tuple()
-%% @doc gen_server callback.
-handle_info(doit, State) ->
-    Now = stamp(),
-    doit(State#state.last, Now),
-    {noreply, State#state{last = Now}};
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-%% @spec terminate(Reason, State) -> ok
-%% @doc gen_server termination callback.
-terminate(_Reason, State) ->
-    {ok, cancel} = timer:cancel(State#state.tref),
-    ok.
-
-
-%% @spec code_change(_OldVsn, State, _Extra) -> State
-%% @doc gen_server code_change callback (trivial).
-code_change(_Vsn, State, _Extra) ->
-    {ok, State}.
-
-%% @spec reload_modules([atom()]) -> [{module, atom()} | {error, term()}]
-%% @doc code:purge/1 and code:load_file/1 the given list of modules in order,
-%%      return the results of code:load_file/1.
-reload_modules(Modules) ->
-    [begin code:purge(M), code:load_file(M) end || M <- Modules].
-
-%% @spec all_changed() -> [atom()]
-%% @doc Return a list of beam modules that have changed.
-all_changed() ->
-    [M || {M, Fn} <- code:all_loaded(), is_list(Fn), is_changed(M)].
-
-%% @spec is_changed(atom()) -> boolean()
-%% @doc true if the loaded module is a beam with a vsn attribute
-%%      and does not match the on-disk beam file, returns false otherwise.
-is_changed(M) ->
-    try
-        module_vsn(M:module_info()) =/= module_vsn(code:get_object_code(M))
-    catch _:_ ->
-            false
-    end.
-
-%% Internal API
-
-module_vsn({M, Beam, _Fn}) ->
-    {ok, {M, Vsn}} = beam_lib:version(Beam),
-    Vsn;
-module_vsn(L) when is_list(L) ->
-    {_, Attrs} = lists:keyfind(attributes, 1, L),
-    {_, Vsn} = lists:keyfind(vsn, 1, Attrs),
-    Vsn.
-
-doit(From, To) ->
-    [case file:read_file_info(Filename) of
-         {ok, #file_info{mtime = Mtime}} when Mtime >= From, Mtime < To ->
-             reload(Module);
-         {ok, _} ->
-             unmodified;
-         {error, enoent} ->
-             %% The Erlang compiler deletes existing .beam files if
-             %% recompiling fails.  Maybe it's worth spitting out a
-             %% warning here, but I'd want to limit it to just once.
-             gone;
-         {error, Reason} ->
-             io:format("Error reading ~s's file info: ~p~n",
-                       [Filename, Reason]),
-             error
-     end || {Module, Filename} <- code:all_loaded(), is_list(Filename)].
-
-reload(Module) ->
-    io:format("Reloading ~p ...", [Module]),
-    code:purge(Module),
-    case code:load_file(Module) of
-        {module, Module} ->
-            io:format(" ok.~n"),
-            case erlang:function_exported(Module, test, 0) of
-                true ->
-                    io:format(" - Calling ~p:test() ...", [Module]),
-                    case catch Module:test() of
-                        ok ->
-                            io:format(" ok.~n"),
-                            reload;
-                        Reason ->
-                            io:format(" fail: ~p.~n", [Reason]),
-                            reload_but_test_failed
-                    end;
-                false ->
-                    reload
-            end;
-        {error, Reason} ->
-            io:format(" fail: ~p.~n", [Reason]),
-            error
-    end.
-
-
-stamp() ->
-    erlang:localtime().
-
-%%
-%% Tests
-%%
--include_lib("eunit/include/eunit.hrl").
--ifdef(TEST).
--endif.


[05/49] Remove src/couch

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_util.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
deleted file mode 100644
index d09211a..0000000
--- a/src/couch/src/couch_util.erl
+++ /dev/null
@@ -1,500 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_util).
-
--export([priv_dir/0, normpath/1]).
--export([should_flush/0, should_flush/1, to_existing_atom/1]).
--export([rand32/0, implode/2, collate/2, collate/3]).
--export([abs_pathname/1,abs_pathname/2, trim/1]).
--export([encodeBase64Url/1, decodeBase64Url/1]).
--export([validate_utf8/1, to_hex/1, parse_term/1, dict_find/3]).
--export([get_nested_json_value/2, json_user_ctx/1]).
--export([proplist_apply_field/2, json_apply_field/2]).
--export([to_binary/1, to_integer/1, to_list/1, url_encode/1]).
--export([verify/2,simple_call/2,shutdown_sync/1]).
--export([get_value/2, get_value/3]).
--export([md5/1, md5_init/0, md5_update/2, md5_final/1]).
--export([reorder_results/2]).
--export([url_strip_password/1]).
--export([encode_doc_id/1]).
--export([with_db/2]).
--export([rfc1123_date/0, rfc1123_date/1]).
--export([integer_to_boolean/1, boolean_to_integer/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
-% arbitrarily chosen amount of memory to use before flushing to disk
--define(FLUSH_MAX_MEM, 10000000).
-
-priv_dir() ->
-    case code:priv_dir(couch) of
-        {error, bad_name} ->
-            % small hack, in dev mode "app" is couchdb. Fixing requires
-            % renaming src/couch to src/couch. Not really worth the hassle.
-            % -Damien
-            code:priv_dir(couchdb);
-        Dir -> Dir
-    end.
-
-% Normalize a pathname by removing .. and . components.
-normpath(Path) ->
-    normparts(filename:split(Path), []).
-
-normparts([], Acc) ->
-    filename:join(lists:reverse(Acc));
-normparts([".." | RestParts], [_Drop | RestAcc]) ->
-    normparts(RestParts, RestAcc);
-normparts(["." | RestParts], Acc) ->
-    normparts(RestParts, Acc);
-normparts([Part | RestParts], Acc) ->
-    normparts(RestParts, [Part | Acc]).
-
-% works like list_to_existing_atom, except can be list or binary and it
-% gives you the original value instead of an error if no existing atom.
-to_existing_atom(V) when is_list(V) ->
-    try list_to_existing_atom(V) catch _:_ -> V end;
-to_existing_atom(V) when is_binary(V) ->
-    try list_to_existing_atom(?b2l(V)) catch _:_ -> V end;
-to_existing_atom(V) when is_atom(V) ->
-    V.
-
-shutdown_sync(Pid) when not is_pid(Pid)->
-    ok;
-shutdown_sync(Pid) ->
-    MRef = erlang:monitor(process, Pid),
-    try
-        catch unlink(Pid),
-        catch exit(Pid, shutdown),
-        receive
-        {'DOWN', MRef, _, _, _} ->
-            ok
-        end
-    after
-        erlang:demonitor(MRef, [flush])
-    end.
-
-
-simple_call(Pid, Message) ->
-    MRef = erlang:monitor(process, Pid),
-    try
-        Pid ! {self(), Message},
-        receive
-        {Pid, Result} ->
-            Result;
-        {'DOWN', MRef, _, _, Reason} ->
-            exit(Reason)
-        end
-    after
-        erlang:demonitor(MRef, [flush])
-    end.
-
-validate_utf8(Data) when is_list(Data) ->
-    validate_utf8(?l2b(Data));
-validate_utf8(Bin) when is_binary(Bin) ->
-    validate_utf8_fast(Bin, 0).
-
-validate_utf8_fast(B, O) ->
-    case B of
-        <<_:O/binary>> ->
-            true;
-        <<_:O/binary, C1, _/binary>> when
-                C1 < 128 ->
-            validate_utf8_fast(B, 1 + O);
-        <<_:O/binary, C1, C2, _/binary>> when
-                C1 >= 194, C1 =< 223,
-                C2 >= 128, C2 =< 191 ->
-            validate_utf8_fast(B, 2 + O);
-        <<_:O/binary, C1, C2, C3, _/binary>> when
-                C1 >= 224, C1 =< 239,
-                C2 >= 128, C2 =< 191,
-                C3 >= 128, C3 =< 191 ->
-            validate_utf8_fast(B, 3 + O);
-        <<_:O/binary, C1, C2, C3, C4, _/binary>> when
-                C1 >= 240, C1 =< 244,
-                C2 >= 128, C2 =< 191,
-                C3 >= 128, C3 =< 191,
-                C4 >= 128, C4 =< 191 ->
-            validate_utf8_fast(B, 4 + O);
-        _ ->
-            false
-    end.
-
-to_hex([]) ->
-    [];
-to_hex(Bin) when is_binary(Bin) ->
-    to_hex(binary_to_list(Bin));
-to_hex([H|T]) ->
-    [to_digit(H div 16), to_digit(H rem 16) | to_hex(T)].
-
-to_digit(N) when N < 10 -> $0 + N;
-to_digit(N)             -> $a + N-10.
-
-
-parse_term(Bin) when is_binary(Bin) ->
-    parse_term(binary_to_list(Bin));
-parse_term(List) ->
-    {ok, Tokens, _} = erl_scan:string(List ++ "."),
-    erl_parse:parse_term(Tokens).
-
-get_value(Key, List) ->
-    get_value(Key, List, undefined).
-
-get_value(Key, List, Default) ->
-    case lists:keysearch(Key, 1, List) of
-    {value, {Key,Value}} ->
-        Value;
-    false ->
-        Default
-    end.
-
-get_nested_json_value({Props}, [Key|Keys]) ->
-    case couch_util:get_value(Key, Props, nil) of
-    nil -> throw({not_found, <<"missing json key: ", Key/binary>>});
-    Value -> get_nested_json_value(Value, Keys)
-    end;
-get_nested_json_value(Value, []) ->
-    Value;
-get_nested_json_value(_NotJSONObj, _) ->
-    throw({not_found, json_mismatch}).
-
-proplist_apply_field(H, L) ->
-    {R} = json_apply_field(H, {L}),
-    R.
-
-json_apply_field(H, {L}) ->
-    json_apply_field(H, L, []).
-json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
-    json_apply_field({Key, NewValue}, Headers, Acc);
-json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
-    json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
-json_apply_field({Key, NewValue}, [], Acc) ->
-    {[{Key, NewValue}|Acc]}.
-
-json_user_ctx(#db{name=ShardName, user_ctx=Ctx}) ->
-    {[{<<"db">>, mem3:dbname(ShardName)},
-            {<<"name">>,Ctx#user_ctx.name},
-            {<<"roles">>,Ctx#user_ctx.roles}]}.
-
-
-% returns a random integer
-rand32() ->
-    crypto:rand_uniform(0, 16#100000000).
-
-% given a pathname "../foo/bar/" it gives back the fully qualified
-% absolute pathname.
-abs_pathname(" " ++ Filename) ->
-    % strip leading whitspace
-    abs_pathname(Filename);
-abs_pathname([$/ |_]=Filename) ->
-    Filename;
-abs_pathname(Filename) ->
-    {ok, Cwd} = file:get_cwd(),
-    {Filename2, Args} = separate_cmd_args(Filename, ""),
-    abs_pathname(Filename2, Cwd) ++ Args.
-
-abs_pathname(Filename, Dir) ->
-    Name = filename:absname(Filename, Dir ++ "/"),
-    OutFilename = filename:join(fix_path_list(filename:split(Name), [])),
-    % If the filename is a dir (last char slash, put back end slash
-    case string:right(Filename,1) of
-    "/" ->
-        OutFilename ++ "/";
-    "\\" ->
-        OutFilename ++ "/";
-    _Else->
-        OutFilename
-    end.
-
-% if this as an executable with arguments, seperate out the arguments
-% ""./foo\ bar.sh -baz=blah" -> {"./foo\ bar.sh", " -baz=blah"}
-separate_cmd_args("", CmdAcc) ->
-    {lists:reverse(CmdAcc), ""};
-separate_cmd_args("\\ " ++ Rest, CmdAcc) -> % handle skipped value
-    separate_cmd_args(Rest, " \\" ++ CmdAcc);
-separate_cmd_args(" " ++ Rest, CmdAcc) ->
-    {lists:reverse(CmdAcc), " " ++ Rest};
-separate_cmd_args([Char|Rest], CmdAcc) ->
-    separate_cmd_args(Rest, [Char | CmdAcc]).
-
-% Is a character whitespace?
-is_whitespace($\s) -> true;
-is_whitespace($\t) -> true;
-is_whitespace($\n) -> true;
-is_whitespace($\r) -> true;
-is_whitespace(_Else) -> false.
-
-
-% removes leading and trailing whitespace from a string
-trim(String) ->
-    String2 = lists:dropwhile(fun is_whitespace/1, String),
-    lists:reverse(lists:dropwhile(fun is_whitespace/1, lists:reverse(String2))).
-
-% takes a heirarchical list of dirs and removes the dots ".", double dots
-% ".." and the corresponding parent dirs.
-fix_path_list([], Acc) ->
-    lists:reverse(Acc);
-fix_path_list([".."|Rest], [_PrevAcc|RestAcc]) ->
-    fix_path_list(Rest, RestAcc);
-fix_path_list(["."|Rest], Acc) ->
-    fix_path_list(Rest, Acc);
-fix_path_list([Dir | Rest], Acc) ->
-    fix_path_list(Rest, [Dir | Acc]).
-
-
-implode(List, Sep) ->
-    implode(List, Sep, []).
-
-implode([], _Sep, Acc) ->
-    lists:flatten(lists:reverse(Acc));
-implode([H], Sep, Acc) ->
-    implode([], Sep, [H|Acc]);
-implode([H|T], Sep, Acc) ->
-    implode(T, Sep, [Sep,H|Acc]).
-
-
-drv_port() ->
-    case get(couch_drv_port) of
-    undefined ->
-        Port = open_port({spawn, "couch_icu_driver"}, []),
-        put(couch_drv_port, Port),
-        Port;
-    Port ->
-        Port
-    end.
-
-collate(A, B) ->
-    collate(A, B, []).
-
-collate(A, B, Options) when is_binary(A), is_binary(B) ->
-    Operation =
-    case lists:member(nocase, Options) of
-        true -> 1; % Case insensitive
-        false -> 0 % Case sensitive
-    end,
-    SizeA = byte_size(A),
-    SizeB = byte_size(B),
-    Bin = <<SizeA:32/native, A/binary, SizeB:32/native, B/binary>>,
-    [Result] = erlang:port_control(drv_port(), Operation, Bin),
-    % Result is 0 for lt, 1 for eq and 2 for gt. Subtract 1 to return the
-    % expected typical -1, 0, 1
-    Result - 1.
-
-should_flush() ->
-    should_flush(?FLUSH_MAX_MEM).
-
-should_flush(MemThreshHold) ->
-    {memory, ProcMem} = process_info(self(), memory),
-    BinMem = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
-        0, element(2,process_info(self(), binary))),
-    if ProcMem+BinMem > 2*MemThreshHold ->
-        garbage_collect(),
-        {memory, ProcMem2} = process_info(self(), memory),
-        BinMem2 = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
-            0, element(2,process_info(self(), binary))),
-        ProcMem2+BinMem2 > MemThreshHold;
-    true -> false end.
-
-encodeBase64Url(Url) ->
-    Url1 = re:replace(base64:encode(Url), ["=+", $$], ""),
-    Url2 = re:replace(Url1, "/", "_", [global]),
-    re:replace(Url2, "\\+", "-", [global, {return, binary}]).
-
-decodeBase64Url(Url64) ->
-    Url1 = re:replace(Url64, "-", "+", [global]),
-    Url2 = re:replace(Url1, "_", "/", [global]),
-    Padding = lists:duplicate((4 - iolist_size(Url2) rem 4) rem 4, $=),
-    base64:decode(iolist_to_binary([Url2, Padding])).
-
-dict_find(Key, Dict, DefaultValue) ->
-    case dict:find(Key, Dict) of
-    {ok, Value} ->
-        Value;
-    error ->
-        DefaultValue
-    end.
-
-to_binary(V) when is_binary(V) ->
-    V;
-to_binary(V) when is_list(V) ->
-    try
-        list_to_binary(V)
-    catch
-        _:_ ->
-            list_to_binary(io_lib:format("~p", [V]))
-    end;
-to_binary(V) when is_atom(V) ->
-    list_to_binary(atom_to_list(V));
-to_binary(V) ->
-    list_to_binary(io_lib:format("~p", [V])).
-
-to_integer(V) when is_integer(V) ->
-    V;
-to_integer(V) when is_list(V) ->
-    erlang:list_to_integer(V);
-to_integer(V) when is_binary(V) ->
-    erlang:list_to_integer(binary_to_list(V)).
-
-to_list(V) when is_list(V) ->
-    V;
-to_list(V) when is_binary(V) ->
-    binary_to_list(V);
-to_list(V) when is_atom(V) ->
-    atom_to_list(V);
-to_list(V) ->
-    lists:flatten(io_lib:format("~p", [V])).
-
-url_encode(Bin) when is_binary(Bin) ->
-    url_encode(binary_to_list(Bin));
-url_encode([H|T]) ->
-    if
-    H >= $a, $z >= H ->
-        [H|url_encode(T)];
-    H >= $A, $Z >= H ->
-        [H|url_encode(T)];
-    H >= $0, $9 >= H ->
-        [H|url_encode(T)];
-    H == $_; H == $.; H == $-; H == $: ->
-        [H|url_encode(T)];
-    true ->
-        case lists:flatten(io_lib:format("~.16.0B", [H])) of
-        [X, Y] ->
-            [$%, X, Y | url_encode(T)];
-        [X] ->
-            [$%, $0, X | url_encode(T)]
-        end
-    end;
-url_encode([]) ->
-    [].
-
-verify([X|RestX], [Y|RestY], Result) ->
-    verify(RestX, RestY, (X bxor Y) bor Result);
-verify([], [], Result) ->
-    Result == 0.
-
-verify(<<X/binary>>, <<Y/binary>>) ->
-    verify(?b2l(X), ?b2l(Y));
-verify(X, Y) when is_list(X) and is_list(Y) ->
-    case length(X) == length(Y) of
-        true ->
-            verify(X, Y, 0);
-        false ->
-            false
-    end;
-verify(_X, _Y) -> false.
-
--spec md5(Data::(iolist() | binary())) -> Digest::binary().
-md5(Data) ->
-    try crypto:md5(Data) catch error:_ -> erlang:md5(Data) end.
-
--spec md5_init() -> Context::binary().
-md5_init() ->
-    try crypto:md5_init() catch error:_ -> erlang:md5_init() end.
-
--spec md5_update(Context::binary(), Data::(iolist() | binary())) ->
-    NewContext::binary().
-md5_update(Ctx, D) ->
-    try crypto:md5_update(Ctx,D) catch error:_ -> erlang:md5_update(Ctx,D) end.
-
--spec md5_final(Context::binary()) -> Digest::binary().
-md5_final(Ctx) ->
-    try crypto:md5_final(Ctx) catch error:_ -> erlang:md5_final(Ctx) end.
-
-% linear search is faster for small lists, length() is 0.5 ms for 100k list
-reorder_results(Keys, SortedResults) when length(Keys) < 100 ->
-    [couch_util:get_value(Key, SortedResults) || Key <- Keys];
-reorder_results(Keys, SortedResults) ->
-    KeyDict = dict:from_list(SortedResults),
-    [dict:fetch(Key, KeyDict) || Key <- Keys].
-
-url_strip_password(Url) ->
-    re:replace(Url,
-        "http(s)?://([^:]+):[^@]+@(.*)$",
-        "http\\1://\\2:*****@\\3",
-        [{return, list}]).
-
-encode_doc_id(#doc{id = Id}) ->
-    encode_doc_id(Id);
-encode_doc_id(Id) when is_list(Id) ->
-    encode_doc_id(?l2b(Id));
-encode_doc_id(<<"_design/", Rest/binary>>) ->
-    "_design/" ++ url_encode(Rest);
-encode_doc_id(<<"_local/", Rest/binary>>) ->
-    "_local/" ++ url_encode(Rest);
-encode_doc_id(Id) ->
-    url_encode(Id).
-
-
-with_db(Db, Fun) when is_record(Db, db) ->
-    Fun(Db);
-with_db(DbName, Fun) ->
-    case couch_db:open_int(DbName, [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}]) of
-        {ok, Db} ->
-            try
-                Fun(Db)
-            after
-                catch couch_db:close(Db)
-            end;
-        Else ->
-            throw(Else)
-    end.
-
-rfc1123_date() ->
-    {{YYYY,MM,DD},{Hour,Min,Sec}} = calendar:universal_time(),
-    DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),
-    lists:flatten(
-      io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
-            [day(DayNumber),DD,month(MM),YYYY,Hour,Min,Sec])).
-
-rfc1123_date(undefined) ->
-    undefined;
-rfc1123_date(UniversalTime) ->
-    {{YYYY,MM,DD},{Hour,Min,Sec}} = UniversalTime,
-    DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),
-    lists:flatten(
-      io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
-            [day(DayNumber),DD,month(MM),YYYY,Hour,Min,Sec])).
-
-%% day
-
-day(1) -> "Mon";
-day(2) -> "Tue";
-day(3) -> "Wed";
-day(4) -> "Thu";
-day(5) -> "Fri";
-day(6) -> "Sat";
-day(7) -> "Sun".
-
-%% month
-
-month(1) -> "Jan";
-month(2) -> "Feb";
-month(3) -> "Mar";
-month(4) -> "Apr";
-month(5) -> "May";
-month(6) -> "Jun";
-month(7) -> "Jul";
-month(8) -> "Aug";
-month(9) -> "Sep";
-month(10) -> "Oct";
-month(11) -> "Nov";
-month(12) -> "Dec".
-
-integer_to_boolean(1) ->
-    true;
-integer_to_boolean(0) ->
-    false.
-
-boolean_to_integer(true) ->
-    1;
-boolean_to_integer(false) ->
-    0.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_uuids.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_uuids.erl b/src/couch/src/couch_uuids.erl
deleted file mode 100644
index 3065938..0000000
--- a/src/couch/src/couch_uuids.erl
+++ /dev/null
@@ -1,116 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_uuids).
--include_lib("couch/include/couch_db.hrl").
-
--behaviour(gen_server).
--behaviour(config_listener).
-
--export([start/0, stop/0]).
--export([new/0, random/0, utc_random/0]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-% config_listener api
--export([handle_config_change/5]).
-
-start() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-stop() ->
-    gen_server:cast(?MODULE, stop).
-
-new() ->
-    gen_server:call(?MODULE, create).
-
-random() ->
-    list_to_binary(couch_util:to_hex(crypto:rand_bytes(16))).
-
-utc_random() ->
-    utc_suffix(couch_util:to_hex(crypto:rand_bytes(9))).
-
-utc_suffix(Suffix) ->
-    Now = {_, _, Micro} = now(),
-    Nowish = calendar:now_to_universal_time(Now),
-    Nowsecs = calendar:datetime_to_gregorian_seconds(Nowish),
-    Then = calendar:datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}}),
-    Prefix = io_lib:format("~14.16.0b", [(Nowsecs - Then) * 1000000 + Micro]),
-    list_to_binary(Prefix ++ Suffix).
-
-init([]) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-    {ok, state()}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-handle_call(create, _From, random) ->
-    {reply, random(), random};
-handle_call(create, _From, utc_random) ->
-    {reply, utc_random(), utc_random};
-handle_call(create, _From, {utc_id, UtcIdSuffix}) ->
-    {reply, utc_suffix(UtcIdSuffix), {utc_id, UtcIdSuffix}};
-handle_call(create, _From, {sequential, Pref, Seq}) ->
-    Result = ?l2b(Pref ++ io_lib:format("~6.16.0b", [Seq])),
-    case Seq >= 16#fff000 of
-        true ->
-            {reply, Result, {sequential, new_prefix(), inc()}};
-        _ ->
-            {reply, Result, {sequential, Pref, Seq + inc()}}
-    end.
-
-handle_cast(change, _State) ->
-    {noreply, state()};
-handle_cast(stop, State) ->
-    {stop, normal, State};
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {noreply, State};
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-    {noreply, State};
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-handle_config_change("uuids", _, _, _, _) ->
-    {ok, gen_server:cast(?MODULE, change)};
-handle_config_change(_, _, _, _, _) ->
-    {ok, nil}.
-
-new_prefix() ->
-    couch_util:to_hex((crypto:rand_bytes(13))).
-
-inc() ->
-    crypto:rand_uniform(1, 16#ffe).
-
-state() ->
-    AlgoStr = config:get("uuids", "algorithm", "random"),
-    case couch_util:to_existing_atom(AlgoStr) of
-        random ->
-            random;
-        utc_random ->
-            utc_random;
-        utc_id ->
-            UtcIdSuffix = config:get("uuids", "utc_id_suffix", ""),
-            {utc_id, UtcIdSuffix};
-        sequential ->
-            {sequential, new_prefix(), inc()};
-        Unknown ->
-            throw({unknown_uuid_algorithm, Unknown})
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_work_queue.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_work_queue.erl b/src/couch/src/couch_work_queue.erl
deleted file mode 100644
index ea871e2..0000000
--- a/src/couch/src/couch_work_queue.erl
+++ /dev/null
@@ -1,187 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_work_queue).
--behaviour(gen_server).
-
--include_lib("couch/include/couch_db.hrl").
-
-% public API
--export([new/1, queue/2, dequeue/1, dequeue/2, close/1, item_count/1, size/1]).
-
-% gen_server callbacks
--export([init/1, terminate/2]).
--export([handle_call/3, handle_cast/2, code_change/3, handle_info/2]).
-
--record(q, {
-    queue = queue:new(),
-    blocked = [],
-    max_size,
-    max_items,
-    items = 0,
-    size = 0,
-    work_waiters = [],
-    close_on_dequeue = false,
-    multi_workers = false
-}).
-
-
-new(Options) ->
-    gen_server:start_link(couch_work_queue, Options, []).
-
-
-queue(Wq, Item) when is_binary(Item) ->
-    gen_server:call(Wq, {queue, Item, byte_size(Item)}, infinity);
-queue(Wq, Item) ->
-    gen_server:call(Wq, {queue, Item, ?term_size(Item)}, infinity).
-
-
-dequeue(Wq) ->
-    dequeue(Wq, all).
-
-    
-dequeue(Wq, MaxItems) ->
-    try
-        gen_server:call(Wq, {dequeue, MaxItems}, infinity)
-    catch
-        _:_ -> closed
-    end.
-
-
-item_count(Wq) ->
-    try
-        gen_server:call(Wq, item_count, infinity)
-    catch
-        _:_ -> closed
-    end.
-
-
-size(Wq) ->
-    try
-        gen_server:call(Wq, size, infinity)
-    catch
-        _:_ -> closed
-    end.
-
-
-close(Wq) ->
-    gen_server:cast(Wq, close).
-    
-
-init(Options) ->
-    Q = #q{
-        max_size = couch_util:get_value(max_size, Options, nil),
-        max_items = couch_util:get_value(max_items, Options, nil),
-        multi_workers = couch_util:get_value(multi_workers, Options, false)
-    },
-    {ok, Q, hibernate}.
-
-
-terminate(_Reason, #q{work_waiters=Workers}) ->
-    lists:foreach(fun({W, _}) -> gen_server:reply(W, closed) end, Workers).
-
-    
-handle_call({queue, Item, Size}, From, #q{work_waiters = []} = Q0) ->
-    Q = Q0#q{size = Q0#q.size + Size,
-                items = Q0#q.items + 1,
-                queue = queue:in({Item, Size}, Q0#q.queue)},
-    case (Q#q.size >= Q#q.max_size) orelse
-            (Q#q.items >= Q#q.max_items) of
-    true ->
-        {noreply, Q#q{blocked = [From | Q#q.blocked]}, hibernate};
-    false ->
-        {reply, ok, Q, hibernate}
-    end;
-
-handle_call({queue, Item, _}, _From, #q{work_waiters = [{W, _Max} | Rest]} = Q) ->
-    gen_server:reply(W, {ok, [Item]}),
-    {reply, ok, Q#q{work_waiters = Rest}, hibernate};
-
-handle_call({dequeue, Max}, From, Q) ->
-    #q{work_waiters = Workers, multi_workers = Multi, items = Count} = Q,
-    case {Workers, Multi} of
-    {[_ | _], false} ->
-        exit("Only one caller allowed to wait for this work at a time");
-    {[_ | _], true} ->
-        {noreply, Q#q{work_waiters=Workers ++ [{From, Max}]}};
-    _ ->
-        case Count of
-        0 ->
-            {noreply, Q#q{work_waiters=Workers ++ [{From, Max}]}};
-        C when C > 0 ->
-            deliver_queue_items(Max, Q)
-        end
-    end;
-
-handle_call(item_count, _From, Q) ->
-    {reply, Q#q.items, Q};
-
-handle_call(size, _From, Q) ->
-    {reply, Q#q.size, Q}.
-
-
-deliver_queue_items(Max, Q) ->
-    #q{
-        queue = Queue,
-        items = Count,
-        size = Size,
-        close_on_dequeue = Close,
-        blocked = Blocked
-    } = Q,
-    case (Max =:= all) orelse (Max >= Count) of
-    false ->
-        {Items, Size2, Queue2, Blocked2} = dequeue_items(
-            Max, Size, Queue, Blocked, []),
-        Q2 = Q#q{
-            items = Count - Max, size = Size2, blocked = Blocked2, queue = Queue2
-        },
-        {reply, {ok, Items}, Q2};
-    true ->
-        lists:foreach(fun(F) -> gen_server:reply(F, ok) end, Blocked),
-        Q2 = Q#q{items = 0, size = 0, blocked = [], queue = queue:new()},
-        Items = [Item || {Item, _} <- queue:to_list(Queue)],
-        case Close of
-        false ->
-            {reply, {ok, Items}, Q2};
-        true ->
-            {stop, normal, {ok, Items}, Q2}
-        end
-    end.
-
-
-dequeue_items(0, Size, Queue, Blocked, DequeuedAcc) ->
-    {lists:reverse(DequeuedAcc), Size, Queue, Blocked};
-
-dequeue_items(NumItems, Size, Queue, Blocked, DequeuedAcc) ->
-    {{value, {Item, ItemSize}}, Queue2} = queue:out(Queue),
-    case Blocked of
-    [] ->
-        Blocked2 = Blocked;
-    [From | Blocked2] ->
-        gen_server:reply(From, ok)
-    end,
-    dequeue_items(
-        NumItems - 1, Size - ItemSize, Queue2, Blocked2, [Item | DequeuedAcc]).
-    
-
-handle_cast(close, #q{items = 0} = Q) ->
-    {stop, normal, Q};
-
-handle_cast(close, Q) ->
-    {noreply, Q#q{close_on_dequeue = true}}.
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-handle_info(X, Q) ->
-    {stop, X, Q}.


[06/49] Remove src/couch

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_query_servers.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_query_servers.erl b/src/couch/src/couch_query_servers.erl
deleted file mode 100644
index 4fef028..0000000
--- a/src/couch/src/couch_query_servers.erl
+++ /dev/null
@@ -1,479 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_query_servers).
-
--export([try_compile/4]).
--export([start_doc_map/3, map_docs/2, map_docs_raw/2, stop_doc_map/1, raw_to_ejson/1]).
--export([reduce/3, rereduce/3,validate_doc_update/5]).
--export([filter_docs/5]).
--export([filter_view/3]).
-
--export([with_ddoc_proc/2, proc_prompt/2, ddoc_prompt/3, ddoc_proc_prompt/3, json_doc/1]).
-
-% For 210-os-proc-pool.t
--export([get_os_process/1, ret_os_process/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(SUMERROR, <<"The _sum function requires that map values be numbers, "
-    "arrays of numbers, or objects, not '~p'. Objects cannot be mixed with other "
-    "data structures. Objects can be arbitrarily nested, provided that the values "
-    "for all fields are themselves numbers, arrays of numbers, or objects.">>).
-
--define(STATERROR, <<"The _stats function requires that map values be numbers "
-    "or arrays of numbers, not '~p'">>).
-
-% https://gist.github.com/df10284c76d85f988c3f
--define(SUMREGEX, {re_pattern,3,0,<<69,82,67,80,194,0,0,0,8,0,0,0,5,0,0,0,3,0,
-2,0,0,0,125,2,48,0,9,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,118,97,108,
-117,101,115,0,93,0,130,65,9,27,102,27,117,27,110,27,99,27,116,27,105,27,111,27,
-110,102,94,0,9,0,1,66,9,58,11,84,0,9,65,9,27,40,65,9,58,11,65,9,27,44,56,9,94,
-0,7,0,2,58,11,84,0,7,102,94,0,15,0,3,65,9,27,44,65,9,58,11,56,9,84,0,15,65,9,
-27,41,65,9,27,123,65,9,27,114,27,101,27,116,27,117,27,114,27,110,66,9,27,115,
-27,117,27,109,65,9,27,40,56,9,80,0,2,65,9,27,41,56,9,34,59,65,9,27,125,56,9,84,
-0,130,0,0,0,0>>}).
-
-% https://gist.github.com/cbd73238b671325f5a6f
--define(COUNTREGEX, {re_pattern,8,0,<<69,82,67,80,30,2,0,0,8,0,0,0,5,0,0,0,8,0,
-4,0,0,0,125,2,48,0,11,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,114,101,
-114,101,100,117,99,101,0,0,2,118,97,108,117,101,115,0,101,0,93,1,206,65,9,27,
-102,27,117,27,110,27,99,27,116,27,105,27,111,27,110,102,94,0,9,0,1,66,9,58,11,
-84,0,9,65,9,27,40,65,9,58,11,65,9,27,44,56,9,94,0,7,0,2,58,11,84,0,7,102,94,0,
-23,0,3,65,9,27,44,56,9,94,0,7,0,4,58,11,84,0,7,56,9,84,0,23,65,9,27,41,65,9,27,
-123,56,9,94,0,136,0,5,94,0,128,0,6,27,105,27,102,65,9,27,40,56,9,80,0,4,65,9,
-27,41,56,9,34,123,65,9,27,114,27,101,27,116,27,117,27,114,27,110,66,9,27,115,
-27,117,27,109,65,9,27,40,56,9,80,0,2,65,9,27,41,56,9,34,59,56,9,34,125,65,9,27,
-101,27,108,27,115,27,101,56,9,34,123,65,9,27,114,27,101,27,116,27,117,27,114,
-27,110,58,9,80,0,2,65,9,27,46,65,9,27,108,27,101,27,110,27,103,27,116,27,104,
-56,9,34,59,56,9,34,125,84,0,128,83,0,138,94,0,132,0,7,27,105,27,102,65,9,27,40,
-65,9,27,33,56,9,80,0,4,65,9,27,41,56,9,34,123,65,9,27,114,27,101,27,116,27,117,
-27,114,27,110,58,9,80,0,2,65,9,27,46,65,9,27,108,27,101,27,110,27,103,27,116,
-27,104,56,9,34,59,56,9,34,125,65,9,27,101,27,108,27,115,27,101,56,9,34,123,65,
-9,27,114,27,101,27,116,27,117,27,114,27,110,66,9,27,115,27,117,27,109,65,9,27,
-40,56,9,80,0,2,65,9,27,41,56,9,34,59,56,9,34,125,84,0,132,83,0,84,94,0,78,0,8,
-27,114,27,101,27,116,27,117,27,114,27,110,58,9,80,0,4,65,9,27,63,65,9,27,115,
-27,117,27,109,65,9,27,40,56,9,80,0,2,65,9,27,41,65,9,27,58,56,9,80,0,2,65,9,27,
-46,65,9,27,108,27,101,27,110,27,103,27,116,27,104,56,9,34,59,84,0,78,84,1,102,
-65,9,27,125,56,9,84,1,206,0,0,0,0,0,0,0>>}).
-
-
-try_compile(Proc, FunctionType, FunctionName, FunctionSource) ->
-    try
-        proc_prompt(Proc, [<<"add_fun">>, FunctionSource]),
-        ok
-    catch {compilation_error, E} ->
-        Fmt = "Compilation of the ~s function in the '~s' view failed: ~s",
-        Msg = io_lib:format(Fmt, [FunctionType, FunctionName, E]),
-        throw({compilation_error, Msg})
-    end.
-
-start_doc_map(Lang, Functions, Lib) ->
-    Proc = get_os_process(Lang),
-    case Lib of
-    {[]} -> ok;
-    Lib ->
-        true = proc_prompt(Proc, [<<"add_lib">>, Lib])
-    end,
-    lists:foreach(fun(FunctionSource) ->
-        true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource])
-    end, Functions),
-    {ok, Proc}.
-
-map_docs(Proc, Docs) ->
-    % send the documents
-    Results = lists:map(
-        fun(Doc) ->
-            Json = couch_doc:to_json_obj(Doc, []),
-
-            FunsResults = proc_prompt(Proc, [<<"map_doc">>, Json]),
-            % the results are a json array of function map yields like this:
-            % [FunResults1, FunResults2 ...]
-            % where funresults is are json arrays of key value pairs:
-            % [[Key1, Value1], [Key2, Value2]]
-            % Convert the key, value pairs to tuples like
-            % [{Key1, Value1}, {Key2, Value2}]
-            lists:map(
-                fun(FunRs) ->
-                    [list_to_tuple(FunResult) || FunResult <- FunRs]
-                end,
-            FunsResults)
-        end,
-        Docs),
-    {ok, Results}.
-
-map_docs_raw(Proc, DocList) ->
-    {Mod, Fun} = Proc#proc.prompt_many_fun,
-    CommandList = lists:map(
-        fun(Doc) ->
-            EJson = couch_doc:to_json_obj(Doc, []),
-            [<<"map_doc">>, EJson]
-        end,
-        DocList),
-    Mod:Fun(Proc#proc.pid, CommandList).
-
-stop_doc_map(nil) ->
-    ok;
-stop_doc_map(Proc) ->
-    ok = ret_os_process(Proc).
-
-group_reductions_results([]) ->
-    [];
-group_reductions_results(List) ->
-    {Heads, Tails} = lists:foldl(
-        fun([H|T], {HAcc,TAcc}) ->
-            {[H|HAcc], [T|TAcc]}
-        end, {[], []}, List),
-    case Tails of
-    [[]|_] -> % no tails left
-        [Heads];
-    _ ->
-     [Heads | group_reductions_results(Tails)]
-    end.
-
-rereduce(_Lang, [], _ReducedValues) ->
-    {ok, []};
-rereduce(Lang, RedSrcs, ReducedValues) ->
-    Grouped = group_reductions_results(ReducedValues),
-    Results = lists:zipwith(
-        fun
-        (<<"_", _/binary>> = FunSrc, Values) ->
-            {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []),
-            Result;
-        (FunSrc, Values) ->
-            os_rereduce(Lang, [FunSrc], Values)
-        end, replace_builtin_equivalents(RedSrcs), Grouped),
-    {ok, Results}.
-
-reduce(_Lang, [], _KVs) ->
-    {ok, []};
-reduce(Lang, RedSrcs0, KVs) ->
-    RedSrcs = replace_builtin_equivalents(RedSrcs0),
-    {OsRedSrcs, BuiltinReds} = lists:partition(fun
-        (<<"_", _/binary>>) -> false;
-        (_OsFun) -> true
-    end, RedSrcs),
-    {ok, OsResults} = os_reduce(Lang, OsRedSrcs, KVs),
-    {ok, BuiltinResults} = builtin_reduce(reduce, BuiltinReds, KVs, []),
-    recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, []).
-
-replace_builtin_equivalents([<<"_", _/binary>> = R | Rest]) ->
-    [R | replace_builtin_equivalents(Rest)];
-replace_builtin_equivalents([OsFun | Rest]) ->
-    case re:run(OsFun, ?SUMREGEX) of nomatch ->
-        case re:run(OsFun, ?COUNTREGEX) of nomatch ->
-            [OsFun | replace_builtin_equivalents(Rest)];
-        {match, _} ->
-            [<<"_count">> | replace_builtin_equivalents(Rest)]
-        end;
-    {match, _} ->
-        [<<"_sum">> | replace_builtin_equivalents(Rest)]
-    end;
-replace_builtin_equivalents([]) ->
-    [].
-
-recombine_reduce_results([], [], [], Acc) ->
-    {ok, lists:reverse(Acc)};
-recombine_reduce_results([<<"_", _/binary>>|RedSrcs], OsResults, [BRes|BuiltinResults], Acc) ->
-    recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes|Acc]);
-recombine_reduce_results([_OsFun|RedSrcs], [OsR|OsResults], BuiltinResults, Acc) ->
-    recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR|Acc]).
-
-os_reduce(_Lang, [], _KVs) ->
-    {ok, []};
-os_reduce(Lang, OsRedSrcs, KVs) ->
-    Proc = get_os_process(Lang),
-    OsResults = try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of
-        [true, Reductions] -> Reductions
-    after
-        ok = ret_os_process(Proc)
-    end,
-    {ok, OsResults}.
-
-os_rereduce(Lang, OsRedSrcs, KVs) ->
-    Proc = get_os_process(Lang),
-    try proc_prompt(Proc, [<<"rereduce">>, OsRedSrcs, KVs]) of
-        [true, [Reduction]] -> Reduction
-    after
-        ok = ret_os_process(Proc)
-    end.
-
-
-builtin_reduce(_Re, [], _KVs, Acc) ->
-    {ok, lists:reverse(Acc)};
-builtin_reduce(Re, [<<"_sum",_/binary>>|BuiltinReds], KVs, Acc) ->
-    Sum = builtin_sum_rows(KVs),
-    builtin_reduce(Re, BuiltinReds, KVs, [Sum|Acc]);
-builtin_reduce(reduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
-    Count = length(KVs),
-    builtin_reduce(reduce, BuiltinReds, KVs, [Count|Acc]);
-builtin_reduce(rereduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
-    Count = builtin_sum_rows(KVs),
-    builtin_reduce(rereduce, BuiltinReds, KVs, [Count|Acc]);
-builtin_reduce(Re, [<<"_stats",_/binary>>|BuiltinReds], KVs, Acc) ->
-    Stats = builtin_stats(Re, KVs),
-    builtin_reduce(Re, BuiltinReds, KVs, [Stats|Acc]).
-
-builtin_sum_rows(KVs) ->
-    lists:foldl(fun([_Key, Value], Acc) -> sum_values(Value, Acc) end, 0, KVs).
-
-sum_values({Props}, 0) ->
-    {Props};
-sum_values({Props}, {AccProps}) ->
-    {sum_objects(lists:sort(Props), lists:sort(AccProps))};
-sum_values(Value, Acc) when is_number(Value), is_number(Acc) ->
-    Acc + Value;
-sum_values(Value, Acc) when is_list(Value), is_list(Acc) ->
-    sum_arrays(Acc, Value);
-sum_values(Value, Acc) when is_number(Value), is_list(Acc) ->
-    sum_arrays(Acc, [Value]);
-sum_values(Value, Acc) when is_list(Value), is_number(Acc) ->
-    sum_arrays([Acc], Value);
-sum_values(Else, _Acc) ->
-    throw_sum_error(Else).
-
-sum_objects([{K1, V1} | Rest1], [{K1, V2} | Rest2]) ->
-    [{K1, sum_values(V1, V2)} | sum_objects(Rest1, Rest2)];
-sum_objects([{K1, V1} | Rest1], [{K2, V2} | Rest2]) when K1 < K2 ->
-    [{K1, V1} | sum_objects(Rest1, [{K2, V2} | Rest2])];
-sum_objects([{K1, V1} | Rest1], [{K2, V2} | Rest2]) when K1 > K2 ->
-    [{K2, V2} | sum_objects([{K1, V1} | Rest1], Rest2)];
-sum_objects([], Rest) ->
-    Rest;
-sum_objects(Rest, []) ->
-    Rest.
-
-sum_arrays([], []) ->
-    [];
-sum_arrays([_|_]=Xs, []) ->
-    Xs;
-sum_arrays([], [_|_]=Ys) ->
-    Ys;
-sum_arrays([X|Xs], [Y|Ys]) when is_number(X), is_number(Y) ->
-    [X+Y | sum_arrays(Xs,Ys)];
-sum_arrays(Else, _) ->
-    throw_sum_error(Else).
-
-builtin_stats(_, []) ->
-    {[{sum,0}, {count,0}, {min,0}, {max,0}, {sumsqr,0}]};
-builtin_stats(_, [[_,First]|Rest]) ->
-    Unpacked = lists:foldl(fun([_Key, Value], Acc) -> stat_values(Value, Acc) end,
-                           build_initial_accumulator(First), Rest),
-    pack_stats(Unpacked).
-
-stat_values(Value, Acc) when is_list(Value), is_list(Acc) ->
-    lists:zipwith(fun stat_values/2, Value, Acc);
-stat_values({PreRed}, Acc) when is_list(PreRed) ->
-    stat_values(unpack_stats({PreRed}), Acc);
-stat_values(Value, Acc) when is_number(Value) ->
-    stat_values({Value, 1, Value, Value, Value*Value}, Acc);
-stat_values(Value, Acc) when is_number(Acc) ->
-    stat_values(Value, {Acc, 1, Acc, Acc, Acc*Acc});
-stat_values(Value, Acc) when is_tuple(Value), is_tuple(Acc) ->
-    {Sum0, Cnt0, Min0, Max0, Sqr0} = Value,
-    {Sum1, Cnt1, Min1, Max1, Sqr1} = Acc,
-    {
-      Sum0 + Sum1,
-      Cnt0 + Cnt1,
-      erlang:min(Min0, Min1),
-      erlang:max(Max0, Max1),
-      Sqr0 + Sqr1
-    };
-stat_values(Else, _Acc) ->
-    throw_stat_error(Else).
-
-build_initial_accumulator(L) when is_list(L) ->
-    [build_initial_accumulator(X) || X <- L];
-build_initial_accumulator(X) when is_number(X) ->
-    {X, 1, X, X, X*X};
-build_initial_accumulator({Props}) ->
-    unpack_stats({Props});
-build_initial_accumulator(Else) ->
-    Msg = io_lib:format("non-numeric _stats input: ~w", [Else]),
-    throw({invalid_value, iolist_to_binary(Msg)}).
-
-unpack_stats({PreRed}) when is_list(PreRed) ->
-    {
-      get_number(<<"sum">>, PreRed),
-      get_number(<<"count">>, PreRed),
-      get_number(<<"min">>, PreRed),
-      get_number(<<"max">>, PreRed),
-      get_number(<<"sumsqr">>, PreRed)
-    }.
-
-pack_stats({Sum, Cnt, Min, Max, Sqr}) ->
-    {[{<<"sum">>,Sum}, {<<"count">>,Cnt}, {<<"min">>,Min}, {<<"max">>,Max}, {<<"sumsqr">>,Sqr}]};
-pack_stats(Stats) when is_list(Stats) ->
-    lists:map(fun pack_stats/1, Stats).
-
-get_number(Key, Props) ->
-    case couch_util:get_value(Key, Props) of
-    X when is_number(X) ->
-        X;
-    undefined when is_binary(Key) ->
-        get_number(binary_to_atom(Key, latin1), Props);
-    undefined ->
-        Msg = io_lib:format("user _stats input missing required field ~s (~p)",
-            [Key, Props]),
-        throw({invalid_value, iolist_to_binary(Msg)});
-    Else ->
-        Msg = io_lib:format("non-numeric _stats input received for ~s: ~w",
-            [Key, Else]),
-        throw({invalid_value, iolist_to_binary(Msg)})
-    end.
-
-% use the function stored in ddoc.validate_doc_update to test an update.
-validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
-    JsonEditDoc = couch_doc:to_json_obj(EditDoc, [revs]),
-    JsonDiskDoc = json_doc(DiskDoc),
-    case ddoc_prompt(DDoc, [<<"validate_doc_update">>], [JsonEditDoc, JsonDiskDoc, Ctx, SecObj]) of
-        1 ->
-            ok;
-        {[{<<"forbidden">>, Message}]} ->
-            throw({forbidden, Message});
-        {[{<<"unauthorized">>, Message}]} ->
-            throw({unauthorized, Message});
-        Message when is_binary(Message) ->
-            throw({unknown_error, Message})
-    end.
-
-json_doc(nil) -> null;
-json_doc(Doc) ->
-    couch_doc:to_json_obj(Doc, [revs]).
-
-filter_view(DDoc, VName, Docs) ->
-    JsonDocs = [couch_doc:to_json_obj(Doc, [revs]) || Doc <- Docs],
-    [true, Passes] = ddoc_prompt(DDoc, [<<"views">>, VName, <<"map">>], [JsonDocs]),
-    {ok, Passes}.
-
-filter_docs(Req, Db, DDoc, FName, Docs) ->
-    JsonReq = case Req of
-    {json_req, JsonObj} ->
-        JsonObj;
-    #httpd{} = HttpReq ->
-        couch_httpd_external:json_req_obj(HttpReq, Db)
-    end,
-    JsonDocs = [couch_doc:to_json_obj(Doc, [revs]) || Doc <- Docs],
-    [true, Passes] = ddoc_prompt(DDoc, [<<"filters">>, FName],
-        [JsonDocs, JsonReq]),
-    {ok, Passes}.
-
-ddoc_proc_prompt({Proc, DDocId}, FunPath, Args) ->
-    proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args]).
-
-ddoc_prompt(DDoc, FunPath, Args) ->
-    with_ddoc_proc(DDoc, fun({Proc, DDocId}) ->
-        proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args])
-    end).
-
-with_ddoc_proc(#doc{id=DDocId,revs={Start, [DiskRev|_]}}=DDoc, Fun) ->
-    Rev = couch_doc:rev_to_str({Start, DiskRev}),
-    DDocKey = {DDocId, Rev},
-    Proc = get_ddoc_process(DDoc, DDocKey),
-    try Fun({Proc, DDocId})
-    after
-        ok = ret_os_process(Proc)
-    end.
-
-proc_prompt(Proc, Args) ->
-     case proc_prompt_raw(Proc, Args) of
-     {json, Json} ->
-         ?JSON_DECODE(Json);
-     EJson ->
-         EJson
-     end.
-
-proc_prompt_raw(#proc{prompt_fun = {Mod, Func}} = Proc, Args) ->
-    apply(Mod, Func, [Proc#proc.pid, Args]).
-
-raw_to_ejson({json, Json}) ->
-    ?JSON_DECODE(Json);
-raw_to_ejson(EJson) ->
-    EJson.
-
-proc_stop(Proc) ->
-    {Mod, Func} = Proc#proc.stop_fun,
-    apply(Mod, Func, [Proc#proc.pid]).
-
-proc_set_timeout(Proc, Timeout) ->
-    {Mod, Func} = Proc#proc.set_timeout_fun,
-    apply(Mod, Func, [Proc#proc.pid, Timeout]).
-
-get_ddoc_process(#doc{} = DDoc, DDocKey) ->
-    % remove this case statement
-    case gen_server:call(couch_proc_manager, {get_proc, DDoc, DDocKey}, infinity) of
-    {ok, Proc, {QueryConfig}} ->
-        % process knows the ddoc
-        case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
-        true ->
-            proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
-            Proc;
-        _ ->
-            catch proc_stop(Proc),
-            get_ddoc_process(DDoc, DDocKey)
-        end;
-    Error ->
-        throw(Error)
-    end.
-
-get_os_process(Lang) ->
-    case gen_server:call(couch_proc_manager, {get_proc, Lang}, infinity) of
-    {ok, Proc, {QueryConfig}} ->
-        case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
-        true ->
-            proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
-            Proc;
-        _ ->
-            catch proc_stop(Proc),
-            get_os_process(Lang)
-        end;
-    Error ->
-        throw(Error)
-    end.
-
-ret_os_process(Proc) ->
-    true = gen_server:call(couch_proc_manager, {ret_proc, Proc}, infinity),
-    catch unlink(Proc#proc.pid),
-    ok.
-
-throw_sum_error(Else) ->
-    throw({invalid_value, iolist_to_binary(io_lib:format(?SUMERROR, [Else]))}).
-
-throw_stat_error(Else) ->
-    throw({invalid_value, iolist_to_binary(io_lib:format(?STATERROR, [Else]))}).
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-sum_values_test() ->
-    ?assertEqual(3, sum_values(1, 2)),
-    ?assertEqual([2,4,6], sum_values(1, [1,4,6])),
-    ?assertEqual([3,5,7], sum_values([3,2,4], [0,3,3])),
-    X = {[{<<"a">>,1}, {<<"b">>,[1,2]}, {<<"c">>, {[{<<"d">>,3}]}},
-            {<<"g">>,1}]},
-    Y = {[{<<"a">>,2}, {<<"b">>,3}, {<<"c">>, {[{<<"e">>, 5}]}},
-            {<<"f">>,1}, {<<"g">>,1}]},
-    Z = {[{<<"a">>,3}, {<<"b">>,[4,2]}, {<<"c">>, {[{<<"d">>,3},{<<"e">>,5}]}},
-            {<<"f">>,1}, {<<"g">>,2}]},
-    ?assertEqual(Z, sum_values(X, Y)),
-    ?assertEqual(Z, sum_values(Y, X)).
-
-stat_values_test() ->
-    ?assertEqual({1, 2, 0, 1, 1}, stat_values(1, 0)),
-    ?assertEqual({11, 2, 1, 10, 101}, stat_values(1, 10)),
-    ?assertEqual([{9, 2, 2, 7, 53},
-                  {14, 2, 3, 11, 130},
-                  {18, 2, 5, 13, 194}
-                 ], stat_values([2,3,5], [7,11,13])).
-
--endif.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_secondary_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_secondary_sup.erl b/src/couch/src/couch_secondary_sup.erl
deleted file mode 100644
index d0ed0c2..0000000
--- a/src/couch/src/couch_secondary_sup.erl
+++ /dev/null
@@ -1,42 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_secondary_sup).
--behaviour(supervisor).
--export([init/1, start_link/0]).
-
-start_link() ->
-    supervisor:start_link({local,couch_secondary_services}, ?MODULE, []).
-
-init([]) ->
-    SecondarySupervisors = [
-        {couch_db_update_notifier_sup,
-            {couch_db_update_notifier_sup, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_db_update_notifier_sup]}
-    ],
-    Children = SecondarySupervisors ++ [
-        begin
-            {ok, {Module, Fun, Args}} = couch_util:parse_term(SpecStr),
-
-            {list_to_atom(Name),
-                {Module, Fun, Args},
-                permanent,
-                brutal_kill,
-                worker,
-                [Module]}
-        end
-        || {Name, SpecStr}
-        <- config:get("daemons"), SpecStr /= ""],
-    {ok, {{one_for_one, 50, 3600}, Children}}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_server.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl
deleted file mode 100644
index e4de69e..0000000
--- a/src/couch/src/couch_server.erl
+++ /dev/null
@@ -1,510 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_server).
--behaviour(gen_server).
--behaviour(config_listener).
-
--export([open/2,create/2,delete/2,get_version/0,get_uuid/0]).
--export([all_databases/0, all_databases/2]).
--export([init/1, handle_call/3,sup_start_link/0]).
--export([handle_cast/2,code_change/3,handle_info/2,terminate/2]).
--export([dev_start/0,is_admin/2,has_admins/0,get_stats/0]).
--export([close_lru/0]).
-
-% config_listener api
--export([handle_config_change/5]).
-
--include_lib("couch/include/couch_db.hrl").
-
--record(server,{
-    root_dir = [],
-    dbname_regexp,
-    max_dbs_open=100,
-    dbs_open=0,
-    start_time="",
-    lru = couch_lru:new()
-    }).
-
-dev_start() ->
-    couch:stop(),
-    up_to_date = make:all([load, debug_info]),
-    couch:start().
-
-get_version() ->
-    Apps = application:loaded_applications(),
-    case lists:keysearch(couch, 1, Apps) of
-    {value, {_, _, Vsn}} ->
-        Vsn;
-    false ->
-        "0.0.0"
-    end.
-
-get_uuid() ->
-    case config:get("couchdb", "uuid", nil) of
-        nil ->
-            UUID = couch_uuids:random(),
-            config:set("couchdb", "uuid", ?b2l(UUID)),
-            UUID;
-        UUID -> ?l2b(UUID)
-    end.
-
-get_stats() ->
-    {ok, #server{start_time=Time,dbs_open=Open}} =
-            gen_server:call(couch_server, get_server),
-    [{start_time, ?l2b(Time)}, {dbs_open, Open}].
-
-sup_start_link() ->
-    gen_server:start_link({local, couch_server}, couch_server, [], []).
-
-
-open(DbName, Options0) ->
-    Options = maybe_add_sys_db_callbacks(DbName, Options0),
-    Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
-    case ets:lookup(couch_dbs, DbName) of
-    [#db{fd=Fd, fd_monitor=Lock} = Db] when Lock =/= locked ->
-        update_lru(DbName, Options),
-        {ok, Db#db{user_ctx=Ctx, fd_monitor=erlang:monitor(process,Fd)}};
-    _ ->
-        Timeout = couch_util:get_value(timeout, Options, infinity),
-        case gen_server:call(couch_server, {open, DbName, Options}, Timeout) of
-        {ok, #db{fd=Fd} = Db} ->
-            update_lru(DbName, Options),
-            {ok, Db#db{user_ctx=Ctx, fd_monitor=erlang:monitor(process,Fd)}};
-        Error ->
-            Error
-        end
-    end.
-
-update_lru(DbName, Options) ->
-    case lists:member(sys_db, Options) of
-        false -> gen_server:cast(couch_server, {update_lru, DbName});
-        true -> ok
-    end.
-
-close_lru() ->
-    gen_server:call(couch_server, close_lru).
-
-create(DbName, Options0) ->
-    Options = maybe_add_sys_db_callbacks(DbName, Options0),
-    case gen_server:call(couch_server, {create, DbName, Options}, infinity) of
-    {ok, #db{fd=Fd} = Db} ->
-        Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
-        {ok, Db#db{user_ctx=Ctx, fd_monitor=erlang:monitor(process,Fd)}};
-    Error ->
-        Error
-    end.
-
-delete(DbName, Options) ->
-    gen_server:call(couch_server, {delete, DbName, Options}, infinity).
-
-maybe_add_sys_db_callbacks(DbName, Options) when is_binary(DbName) ->
-    maybe_add_sys_db_callbacks(?b2l(DbName), Options);
-maybe_add_sys_db_callbacks(DbName, Options) ->
-    case config:get("replicator", "db", "_replicator") of
-    DbName ->
-        [
-            {before_doc_update, fun couch_replicator_manager:before_doc_update/2},
-            {after_doc_read, fun couch_replicator_manager:after_doc_read/2},
-            sys_db | Options
-        ];
-    _ ->
-        case config:get("couch_httpd_auth", "authentication_db", "_users") of
-        DbName ->
-        [
-            {before_doc_update, fun couch_users_db:before_doc_update/2},
-            {after_doc_read, fun couch_users_db:after_doc_read/2},
-            sys_db | Options
-        ];
-        _ ->
-            case config:get("mem3", "shard_db", "dbs") of
-            DbName ->
-                [sys_db | Options];
-            _ ->
-                Options
-            end
-        end
-    end.
-
-check_dbname(#server{dbname_regexp=RegExp}, DbName) ->
-    case re:run(DbName, RegExp, [{capture, none}]) of
-    nomatch ->
-        case DbName of
-            "_users" -> ok;
-            "_replicator" -> ok;
-            _Else ->
-                {error, illegal_database_name, DbName}
-            end;
-    match ->
-        ok
-    end.
-
-is_admin(User, ClearPwd) ->
-    case config:get("admins", User) of
-    "-hashed-" ++ HashedPwdAndSalt ->
-        [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
-        couch_util:to_hex(crypto:sha(ClearPwd ++ Salt)) == HashedPwd;
-    _Else ->
-        false
-    end.
-
-has_admins() ->
-    config:get("admins") /= [].
-
-get_full_filename(Server, DbName) ->
-    filename:join([Server#server.root_dir, "./" ++ DbName ++ ".couch"]).
-
-hash_admin_passwords() ->
-    hash_admin_passwords(true).
-
-hash_admin_passwords(Persist) ->
-    lists:foreach(
-        fun({User, ClearPassword}) ->
-            HashedPassword = couch_passwords:hash_admin_password(ClearPassword),
-            config:set("admins", User, ?b2l(HashedPassword), Persist)
-        end, couch_passwords:get_unhashed_admins()).
-
-init([]) ->
-    % read config and register for configuration changes
-
-    % just stop if one of the config settings change. couch_server_sup
-    % will restart us and then we will pick up the new settings.
-
-    RootDir = config:get("couchdb", "database_dir", "."),
-    MaxDbsOpen = list_to_integer(
-            config:get("couchdb", "max_dbs_open")),
-    ok = config:listen_for_changes(?MODULE, nil),
-    ok = couch_file:init_delete_dir(RootDir),
-    hash_admin_passwords(),
-    {ok, RegExp} = re:compile(
-        "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex
-        "(\\.[0-9]{10,})?$" % but allow an optional shard timestamp at the end
-    ),
-    ets:new(couch_dbs, [set, protected, named_table, {keypos, #db.name}]),
-    process_flag(trap_exit, true),
-    {ok, #server{root_dir=RootDir,
-                dbname_regexp=RegExp,
-                max_dbs_open=MaxDbsOpen,
-                start_time=couch_util:rfc1123_date()}}.
-
-terminate(_Reason, _Srv) ->
-    ets:foldl(fun(#db{main_pid=Pid}, _) -> couch_util:shutdown_sync(Pid) end,
-        nil, couch_dbs),
-    ok.
-
-handle_config_change("couchdb", "database_dir", _, _, _) ->
-    exit(whereis(couch_server), config_change),
-    remove_handler;
-handle_config_change("couchdb", "max_dbs_open", Max, _, _) ->
-    {ok, gen_server:call(couch_server,{set_max_dbs_open,list_to_integer(Max)})};
-handle_config_change("admins", _, _, Persist, _) ->
-    % spawn here so couch event manager doesn't deadlock
-    {ok, spawn(fun() -> hash_admin_passwords(Persist) end)};
-handle_config_change("httpd", "authentication_handlers", _, _, _) ->
-    {ok, couch_httpd:stop()};
-handle_config_change("httpd", "bind_address", _, _, _) ->
-    {ok, couch_httpd:stop()};
-handle_config_change("httpd", "port", _, _, _) ->
-    {ok, couch_httpd:stop()};
-handle_config_change("httpd", "max_connections", _, _, _) ->
-    {ok, couch_httpd:stop()};
-handle_config_change("httpd", "default_handler", _, _, _) ->
-    {ok, couch_httpd:stop()};
-handle_config_change("httpd_global_handlers", _, _, _, _) ->
-    {ok, couch_httpd:stop()};
-handle_config_change("httpd_db_handlers", _, _, _, _) ->
-    {ok, couch_httpd:stop()};
-handle_config_change(_, _, _, _, _) ->
-    {ok, nil}.
-
-
-all_databases() ->
-    {ok, DbList} = all_databases(
-        fun(DbName, Acc) -> {ok, [DbName | Acc]} end, []),
-    {ok, lists:usort(DbList)}.
-
-all_databases(Fun, Acc0) ->
-    {ok, #server{root_dir=Root}} = gen_server:call(couch_server, get_server),
-    NormRoot = couch_util:normpath(Root),
-    FinalAcc = try
-    filelib:fold_files(Root,
-        "^[a-z0-9\\_\\$()\\+\\-]*" % stock CouchDB name regex
-        "(\\.[0-9]{10,})?"         % optional shard timestamp
-        "\\.couch$",               % filename extension
-        true,
-            fun(Filename, AccIn) ->
-                NormFilename = couch_util:normpath(Filename),
-                case NormFilename -- NormRoot of
-                [$/ | RelativeFilename] -> ok;
-                RelativeFilename -> ok
-                end,
-                case Fun(?l2b(filename:rootname(RelativeFilename, ".couch")), AccIn) of
-                {ok, NewAcc} -> NewAcc;
-                {stop, NewAcc} -> throw({stop, Fun, NewAcc})
-                end
-            end, Acc0)
-    catch throw:{stop, Fun, Acc1} ->
-         Acc1
-    end,
-    {ok, FinalAcc}.
-
-
-make_room(Server, Options) ->
-    case lists:member(sys_db, Options) of
-        false -> maybe_close_lru_db(Server);
-        true -> {ok, Server}
-    end.
-
-maybe_close_lru_db(#server{dbs_open=NumOpen, max_dbs_open=MaxOpen}=Server)
-        when NumOpen < MaxOpen ->
-    {ok, Server};
-maybe_close_lru_db(#server{lru=Lru}=Server) ->
-    try
-        {ok, db_closed(Server#server{lru = couch_lru:close(Lru)}, [])}
-    catch error:all_dbs_active ->
-        {error, all_dbs_active}
-    end.
-
-open_async(Server, From, DbName, Filepath, Options) ->
-    Parent = self(),
-    put({async_open, DbName}, now()),
-    Opener = spawn_link(fun() ->
-        Res = couch_db:start_link(DbName, Filepath, Options),
-        case {Res, lists:member(create, Options)} of
-            {{ok, _Db}, true} ->
-                couch_db_update_notifier:notify({created, DbName});
-            _ ->
-                ok
-        end,
-        gen_server:call(Parent, {open_result, DbName, Res}, infinity),
-        unlink(Parent)
-    end),
-    ReqType = case lists:member(create, Options) of
-        true -> create;
-        false -> open
-    end,
-    % icky hack of field values - compactor_pid used to store clients
-    % and fd used for opening request info
-    true = ets:insert(couch_dbs, #db{
-        name = DbName,
-        fd = ReqType,
-        main_pid = Opener,
-        compactor_pid = [From],
-        fd_monitor = locked,
-        options = Options
-    }),
-    db_opened(Server, Options).
-
-handle_call(close_lru, _From, #server{lru=Lru} = Server) ->
-    try
-        {reply, ok, db_closed(Server#server{lru = couch_lru:close(Lru)}, [])}
-    catch error:all_dbs_active ->
-        {reply, {error, all_dbs_active}, Server}
-    end;
-handle_call(open_dbs_count, _From, Server) ->
-    {reply, Server#server.dbs_open, Server};
-handle_call({set_max_dbs_open, Max}, _From, Server) ->
-    {reply, ok, Server#server{max_dbs_open=Max}};
-handle_call(get_server, _From, Server) ->
-    {reply, {ok, Server}, Server};
-handle_call({open_result, DbName, {ok, Db}}, _From, Server) ->
-    link(Db#db.main_pid),
-    case erase({async_open, DbName}) of undefined -> ok; T0 ->
-        ?LOG_INFO("needed ~p ms to open new ~s", [timer:now_diff(now(),T0)/1000,
-            DbName])
-    end,
-    % icky hack of field values - compactor_pid used to store clients
-    % and fd used to possibly store a creation request
-    [#db{fd=ReqType, compactor_pid=Froms}] = ets:lookup(couch_dbs, DbName),
-    [gen_server:reply(From, {ok, Db}) || From <- Froms],
-    % Cancel the creation request if it exists.
-    case ReqType of
-        {create, DbName, _Filepath, _Options, CrFrom} ->
-            gen_server:reply(CrFrom, file_exists);
-        _ ->
-            ok
-    end,
-    true = ets:insert(couch_dbs, Db),
-    Lru = case couch_db:is_system_db(Db) of
-        false ->
-            Stat = {couchdb, open_databases},
-            couch_stats_collector:track_process_count(Db#db.main_pid, Stat),
-            couch_lru:insert(DbName, Server#server.lru);
-        true ->
-            Server#server.lru
-    end,
-    {reply, ok, Server#server{lru = Lru}};
-handle_call({open_result, DbName, {error, eexist}}, From, Server) ->
-    handle_call({open_result, DbName, file_exists}, From, Server);
-handle_call({open_result, DbName, Error}, _From, Server) ->
-    % icky hack of field values - compactor_pid used to store clients
-    [#db{fd=ReqType, compactor_pid=Froms}=Db] = ets:lookup(couch_dbs, DbName),
-    [gen_server:reply(From, Error) || From <- Froms],
-    ?LOG_INFO("open_result error ~p for ~s", [Error, DbName]),
-    true = ets:delete(couch_dbs, DbName),
-    NewServer = case ReqType of
-        {create, DbName, Filepath, Options, CrFrom} ->
-            open_async(Server, CrFrom, DbName, Filepath, Options);
-        _ ->
-            Server
-    end,
-    {reply, ok, db_closed(NewServer, Db#db.options)};
-handle_call({open, DbName, Options}, From, Server) ->
-    case ets:lookup(couch_dbs, DbName) of
-    [] ->
-        DbNameList = binary_to_list(DbName),
-        case check_dbname(Server, DbNameList) of
-        ok ->
-            case make_room(Server, Options) of
-            {ok, Server2} ->
-                Filepath = get_full_filename(Server, DbNameList),
-                {noreply, open_async(Server2, From, DbName, Filepath, Options)};
-            CloseError ->
-                {reply, CloseError, Server}
-            end;
-        Error ->
-            {reply, Error, Server}
-        end;
-    [#db{compactor_pid = Froms} = Db] when is_list(Froms) ->
-        % icky hack of field values - compactor_pid used to store clients
-        true = ets:insert(couch_dbs, Db#db{compactor_pid = [From|Froms]}),
-        if length(Froms) =< 10 -> ok; true ->
-            Fmt = "~b clients waiting to open db ~s",
-            ?LOG_INFO(Fmt, [length(Froms), DbName])
-        end,
-        {noreply, Server};
-    [#db{} = Db] ->
-        {reply, {ok, Db}, Server}
-    end;
-handle_call({create, DbName, Options}, From, Server) ->
-    DbNameList = binary_to_list(DbName),
-    Filepath = get_full_filename(Server, DbNameList),
-    case check_dbname(Server, DbNameList) of
-    ok ->
-        case ets:lookup(couch_dbs, DbName) of
-        [] ->
-            case make_room(Server, Options) of
-            {ok, Server2} ->
-                {noreply, open_async(Server2, From, DbName, Filepath,
-                        [create | Options])};
-            CloseError ->
-                {reply, CloseError, Server}
-            end;
-        [#db{fd=open}=Db] ->
-            % We're trying to create a database while someone is in
-            % the middle of trying to open it. We allow one creator
-            % to wait while we figure out if it'll succeed.
-            % icky hack of field values - fd used to store create request
-            CrOptions = [create | Options],
-            NewDb = Db#db{fd={create, DbName, Filepath, CrOptions, From}},
-            true = ets:insert(couch_dbs, NewDb),
-            {noreply, Server};
-        [_AlreadyRunningDb] ->
-            {reply, file_exists, Server}
-        end;
-    Error ->
-        {reply, Error, Server}
-    end;
-handle_call({delete, DbName, Options}, _From, Server) ->
-    DbNameList = binary_to_list(DbName),
-    case check_dbname(Server, DbNameList) of
-    ok ->
-        FullFilepath = get_full_filename(Server, DbNameList),
-        Server2 =
-        case ets:lookup(couch_dbs, DbName) of
-        [] -> Server;
-        [#db{main_pid=Pid, compactor_pid=Froms} = Db] when is_list(Froms) ->
-            % icky hack of field values - compactor_pid used to store clients
-            true = ets:delete(couch_dbs, DbName),
-            exit(Pid, kill),
-            [gen_server:reply(F, not_found) || F <- Froms],
-            db_closed(Server, Db#db.options);
-        [#db{main_pid=Pid} = Db] ->
-            true = ets:delete(couch_dbs, DbName),
-            exit(Pid, kill),
-            db_closed(Server, Db#db.options)
-        end,
-
-        %% Delete any leftover compaction files. If we don't do this a
-        %% subsequent request for this DB will try to open them to use
-        %% as a recovery.
-        lists:foreach(fun(Ext) ->
-            couch_file:delete(Server#server.root_dir, FullFilepath ++ Ext)
-        end, [".compact", ".compact.data", ".compact.meta"]),
-        couch_file:delete(Server#server.root_dir, FullFilepath ++ ".compact"),
-
-        Async = not lists:member(sync, Options),
-
-        case couch_file:delete(Server#server.root_dir, FullFilepath, Async) of
-        ok ->
-            couch_db_update_notifier:notify({deleted, DbName}),
-            {reply, ok, Server2};
-        {error, enoent} ->
-            {reply, not_found, Server2};
-        Else ->
-            {reply, Else, Server2}
-        end;
-    Error ->
-        {reply, Error, Server}
-    end;
-handle_call({db_updated, #db{name = DbName} = Db}, _From, Server) ->
-    true = ets:insert(couch_dbs, Db),
-    Lru = case couch_db:is_system_db(Db) of
-        false -> couch_lru:update(DbName, Server#server.lru);
-        true -> Server#server.lru
-    end,
-    {reply, ok, Server#server{lru = Lru}}.
-
-handle_cast({update_lru, DbName}, #server{lru = Lru} = Server) ->
-    {noreply, Server#server{lru = couch_lru:update(DbName, Lru)}};
-handle_cast(Msg, Server) ->
-    {stop, {unknown_cast_message, Msg}, Server}.
-
-code_change(_, State, _) ->
-    {ok, State}.
-
-handle_info({'EXIT', _Pid, config_change}, Server) ->
-    {stop, config_change, Server};
-handle_info({'EXIT', Pid, Reason}, Server) ->
-    case ets:match_object(couch_dbs, #db{main_pid=Pid, _='_'}) of
-    [#db{name = DbName, compactor_pid=Froms} = Db] ->
-        if Reason /= snappy_nif_not_loaded -> ok; true ->
-            Msg = io_lib:format("To open the database `~s`, Apache CouchDB "
-                "must be built with Erlang OTP R13B04 or higher.", [DbName]),
-            ?LOG_ERROR(Msg, [])
-        end,
-        ?LOG_INFO("db ~s died with reason ~p", [DbName, Reason]),
-        % icky hack of field values - compactor_pid used to store clients
-        if is_list(Froms) ->
-            [gen_server:reply(From, Reason) || From <- Froms];
-        true ->
-            ok
-        end,
-        true = ets:delete(couch_dbs, DbName),
-        {noreply, db_closed(Server, Db#db.options)};
-    [] ->
-        {noreply, Server}
-    end;
-handle_info(Info, Server) ->
-    {stop, {unknown_message, Info}, Server}.
-
-db_opened(Server, Options) ->
-    case lists:member(sys_db, Options) of
-        false -> Server#server{dbs_open=Server#server.dbs_open + 1};
-        true -> Server
-    end.
-
-db_closed(Server, Options) ->
-    case lists:member(sys_db, Options) of
-        false -> Server#server{dbs_open=Server#server.dbs_open - 1};
-        true -> Server
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_stats_aggregator.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_stats_aggregator.erl b/src/couch/src/couch_stats_aggregator.erl
deleted file mode 100644
index 416c9a0..0000000
--- a/src/couch/src/couch_stats_aggregator.erl
+++ /dev/null
@@ -1,312 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stats_aggregator).
--behaviour(gen_server).
--behaviour(config_listener).
-
--export([start/0, start/1, stop/0]).
--export([all/0, all/1, get/1, get/2, get_json/1, get_json/2, collect_sample/0]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-% config_listener api
--export([handle_config_change/5]).
-
-
--record(aggregate, {
-    description = <<"">>,
-    seconds = 0,
-    count = 0,
-    current = null,
-    sum = null,
-    mean = null,
-    variance = null,
-    stddev = null,
-    min = null,
-    max = null,
-    samples = []
-}).
-
-
-start() ->
-    PrivDir = couch_util:priv_dir(),
-    start(filename:join(PrivDir, "stat_descriptions.cfg")).
-    
-start(FileName) ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [FileName], []).
-
-stop() ->
-    gen_server:cast(?MODULE, stop).
-
-all() ->
-    ?MODULE:all(0).
-all(Time) when is_binary(Time) ->
-    ?MODULE:all(list_to_integer(binary_to_list(Time)));
-all(Time) when is_atom(Time) ->
-    ?MODULE:all(list_to_integer(atom_to_list(Time)));
-all(Time) when is_integer(Time) ->
-    Aggs = ets:match(?MODULE, {{'$1', Time}, '$2'}),
-    Stats = lists:map(fun([Key, Agg]) -> {Key, Agg} end, Aggs),
-    case Stats of
-        [] ->
-            {[]};
-        _ ->
-            Ret = lists:foldl(fun({{Mod, Key}, Agg}, Acc) ->
-                CurrKeys = case proplists:lookup(Mod, Acc) of
-                    none -> [];
-                    {Mod, {Keys}} -> Keys
-                end,
-                NewMod = {[{Key, to_json_term(Agg)} | CurrKeys]},
-                [{Mod, NewMod} | proplists:delete(Mod, Acc)]
-            end, [], Stats),
-            {Ret}
-    end.
-
-get(Key) ->
-    ?MODULE:get(Key, 0).
-get(Key, Time) when is_binary(Time) ->
-    ?MODULE:get(Key, list_to_integer(binary_to_list(Time)));
-get(Key, Time) when is_atom(Time) ->
-    ?MODULE:get(Key, list_to_integer(atom_to_list(Time)));
-get(Key, Time) when is_integer(Time) ->
-    case ets:lookup(?MODULE, {make_key(Key), Time}) of
-        [] -> #aggregate{seconds=Time};
-        [{_, Agg}] -> Agg
-    end.
-
-get_json(Key) ->
-    get_json(Key, 0).
-get_json(Key, Time) ->
-    to_json_term(?MODULE:get(Key, Time)).
-
-collect_sample() ->
-    gen_server:call(?MODULE, collect_sample, infinity).
-
-
-init(StatDescsFileName) ->
-    % Create an aggregate entry for each {description, rate} pair.
-    ets:new(?MODULE, [named_table, set, protected]),
-    SampleStr = config:get("stats", "samples", "[0]"),
-    {ok, Samples} = couch_util:parse_term(SampleStr),
-    {ok, Descs} = file:consult(StatDescsFileName),
-    lists:foreach(fun({Sect, Key, Value}) ->
-        lists:foreach(fun(Secs) ->
-            Agg = #aggregate{
-                description=list_to_binary(Value),
-                seconds=Secs
-            },
-            ets:insert(?MODULE, {{{Sect, Key}, Secs}, Agg})
-        end, Samples)
-    end, Descs),
-    
-    ok = config:listen_for_changes(?MODULE, nil),
-    
-    Rate = list_to_integer(config:get("stats", "rate", "1000")),
-    % TODO: Add timer_start to kernel start options.
-    {ok, TRef} = timer:apply_after(Rate, ?MODULE, collect_sample, []),
-    {ok, {TRef, Rate}}.
-    
-terminate(_Reason, {TRef, _Rate}) ->
-    timer:cancel(TRef),
-    ok.
-
-handle_call(collect_sample, _, {OldTRef, SampleInterval}) ->
-    timer:cancel(OldTRef),
-    {ok, TRef} = timer:apply_after(SampleInterval, ?MODULE, collect_sample, []),
-    % Gather new stats values to add.
-    Incs = lists:map(fun({Key, Value}) ->
-        {Key, {incremental, Value}}
-    end, couch_stats_collector:all(incremental)),
-    Abs = lists:map(fun({Key, Values}) ->
-        couch_stats_collector:clear(Key),
-        Values2 = case Values of
-            X when is_list(X) -> X;
-            Else -> [Else]
-        end,
-        {_, Mean} = lists:foldl(fun(Val, {Count, Curr}) ->
-            {Count+1, Curr + (Val - Curr) / (Count+1)}
-        end, {0, 0}, Values2),
-        {Key, {absolute, Mean}}
-    end, couch_stats_collector:all(absolute)),
-    
-    Values = Incs ++ Abs,
-    Now = erlang:now(),
-    lists:foreach(fun({{Key, Rate}, Agg}) ->
-        NewAgg = case proplists:lookup(Key, Values) of
-            none ->
-                rem_values(Now, Agg);
-            {Key, {Type, Value}} ->
-                NewValue = new_value(Type, Value, Agg#aggregate.current),
-                Agg2 = add_value(Now, NewValue, Agg),
-                rem_values(Now, Agg2)
-        end,
-        ets:insert(?MODULE, {{Key, Rate}, NewAgg})
-    end, ets:tab2list(?MODULE)),
-    {reply, ok, {TRef, SampleInterval}}.
-
-handle_cast(stop, State) ->
-    {stop, normal, State}.
-
-handle_info({gen_event_EXIT, {config_listener, ?MODULE}, _Reason}, State) ->
-    erlang:send_after(5000, self(), restart_config_listener),
-    {noreply, State};
-handle_info(restart_config_listener, State) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-    {noreply, State};
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-code_change(_OldVersion, State, _Extra) ->
-    {ok, State}.
-
-
-handle_config_change("stats", _, _, _, _) ->
-    exit(whereis(?MODULE), config_change),
-    remove_handler;
-handle_config_change(_, _, _, _, _) ->
-    {ok, nil}.
-
-
-new_value(incremental, Value, null) ->
-    Value;
-new_value(incremental, Value, Current) ->
-    Value - Current;
-new_value(absolute, Value, _Current) ->
-    Value.
-
-add_value(Time, Value, #aggregate{count=Count, seconds=Secs}=Agg) when Count < 1 ->
-    Samples = case Secs of
-        0 -> [];
-        _ -> [{Time, Value}]
-    end,
-    Agg#aggregate{
-        count=1,
-        current=Value,
-        sum=Value,
-        mean=Value,
-        variance=0.0,
-        stddev=null,
-        min=Value,
-        max=Value,
-        samples=Samples
-    };
-add_value(Time, Value, Agg) ->
-    #aggregate{
-        count=Count,
-        current=Current,
-        sum=Sum,
-        mean=Mean,
-        variance=Variance,
-        samples=Samples
-    } = Agg,
-    
-    NewCount = Count + 1,
-    NewMean = Mean + (Value - Mean) / NewCount,
-    NewVariance = Variance + (Value - Mean) * (Value - NewMean),
-    StdDev = case NewCount > 1 of
-        false -> null;
-        _ -> math:sqrt(NewVariance / (NewCount - 1))
-    end,
-    Agg2 = Agg#aggregate{
-        count=NewCount,
-        current=Current + Value,
-        sum=Sum + Value,
-        mean=NewMean,
-        variance=NewVariance,
-        stddev=StdDev,
-        min=lists:min([Agg#aggregate.min, Value]),
-        max=lists:max([Agg#aggregate.max, Value])
-    },
-    case Agg2#aggregate.seconds of
-        0 -> Agg2;
-        _ -> Agg2#aggregate{samples=[{Time, Value} | Samples]}
-    end.
-
-rem_values(Time, Agg) ->
-    Seconds = Agg#aggregate.seconds,
-    Samples = Agg#aggregate.samples,
-    Pred = fun({When, _Value}) ->
-        timer:now_diff(Time, When) =< (Seconds * 1000000)
-    end,
-    {Keep, Remove} = lists:splitwith(Pred, Samples),
-    Agg2 = lists:foldl(fun({_, Value}, Acc) ->
-        rem_value(Value, Acc)
-    end, Agg, Remove),
-    Agg2#aggregate{samples=Keep}.
-
-rem_value(_Value, #aggregate{count=Count, seconds=Secs}) when Count =< 1 ->
-    #aggregate{seconds=Secs};
-rem_value(Value, Agg) ->
-    #aggregate{
-        count=Count,
-        sum=Sum,
-        mean=Mean,
-        variance=Variance
-    } = Agg,
-
-    OldMean = (Mean * Count - Value) / (Count - 1),
-    OldVariance = Variance - (Value - OldMean) * (Value - Mean),
-    OldCount = Count - 1,
-    StdDev = case OldCount > 1 of
-        false -> null;
-        _ -> math:sqrt(clamp_value(OldVariance / (OldCount - 1)))
-    end,
-    Agg#aggregate{
-        count=OldCount,
-        sum=Sum-Value,
-        mean=clamp_value(OldMean),
-        variance=clamp_value(OldVariance),
-        stddev=StdDev
-    }.
-
-to_json_term(Agg) ->
-    {Min, Max} = case Agg#aggregate.seconds > 0 of
-        false ->
-            {Agg#aggregate.min, Agg#aggregate.max};
-        _ ->
-            case length(Agg#aggregate.samples) > 0 of
-                true ->
-                    Extract = fun({_Time, Value}) -> Value end,
-                    Samples = lists:map(Extract, Agg#aggregate.samples),
-                    {lists:min(Samples), lists:max(Samples)};
-                _ ->
-                    {null, null}
-            end
-    end,
-    {[
-        {description, Agg#aggregate.description},
-        {current, round_value(Agg#aggregate.sum)},
-        {sum, round_value(Agg#aggregate.sum)},
-        {mean, round_value(Agg#aggregate.mean)},
-        {stddev, round_value(Agg#aggregate.stddev)},
-        {min, Min},
-        {max, Max}
-    ]}.
-
-make_key({Mod, Val}) when is_integer(Val) ->
-    {Mod, list_to_atom(integer_to_list(Val))};
-make_key(Key) ->
-    Key.
-
-round_value(Val) when not is_number(Val) ->
-    Val;
-round_value(Val) when Val == 0 ->
-    Val;
-round_value(Val) ->
-    erlang:round(Val * 1000.0) / 1000.0.
-
-clamp_value(Val) when Val > 0.00000000000001 ->
-    Val;
-clamp_value(_) ->
-    0.0.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_stats_collector.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_stats_collector.erl b/src/couch/src/couch_stats_collector.erl
deleted file mode 100644
index 99814de..0000000
--- a/src/couch/src/couch_stats_collector.erl
+++ /dev/null
@@ -1,134 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% todo
-% - remove existance check on increment(), decrement() and record(). have
-%   modules initialize counters on startup.
-
--module(couch_stats_collector).
-
--behaviour(gen_server).
-
--export([start/0, stop/0]).
--export([all/0, all/1, get/1, increment/1, decrement/1, record/2, clear/1]).
--export([track_process_count/1, track_process_count/2]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--define(HIT_TABLE, stats_hit_table).
--define(ABS_TABLE, stats_abs_table).
-
-start() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-stop() ->
-    gen_server:call(?MODULE, stop).
-
-all() ->
-    ets:tab2list(?HIT_TABLE) ++ abs_to_list().
-
-all(Type) ->
-    case Type of
-        incremental -> ets:tab2list(?HIT_TABLE);
-        absolute -> abs_to_list()
-    end.
-
-get(Key) ->
-    case ets:lookup(?HIT_TABLE, Key) of
-        [] ->
-            case ets:lookup(?ABS_TABLE, Key) of
-                [] ->
-                    nil;
-                AbsVals ->
-                    lists:map(fun({_, Value}) -> Value end, AbsVals)
-            end;
-        [{_, Counter}] ->
-            Counter
-    end.
-
-increment(Key) ->
-    Key2 = make_key(Key),
-    case catch ets:update_counter(?HIT_TABLE, Key2, 1) of
-        {'EXIT', {badarg, _}} ->
-            catch ets:insert(?HIT_TABLE, {Key2, 1}),
-            ok;
-        _ ->
-            ok
-    end.
-
-decrement(Key) ->
-    Key2 = make_key(Key),
-    case catch ets:update_counter(?HIT_TABLE, Key2, -1) of
-        {'EXIT', {badarg, _}} ->
-            catch ets:insert(?HIT_TABLE, {Key2, -1}),
-            ok;
-        _ -> ok
-    end.
-
-record(Key, Value) ->
-    catch ets:insert(?ABS_TABLE, {make_key(Key), Value}).
-
-clear(Key) ->
-    catch ets:delete(?ABS_TABLE, make_key(Key)).
-
-track_process_count(Stat) ->
-    track_process_count(self(), Stat).
-
-track_process_count(Pid, Stat) ->
-    ok = couch_stats_collector:increment(Stat),
-    gen_server:cast(?MODULE, {track_process_count, Pid, Stat}).
-
-
-init(_) ->
-    ets:new(?HIT_TABLE, [named_table, set, public]),
-    ets:new(?ABS_TABLE, [named_table, duplicate_bag, public]),
-    {ok, dict:new()}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-handle_call(stop, _, State) ->
-    {stop, normal, stopped, State}.
-
-handle_cast({track_process_count, Pid, Stat}, State) ->
-    Ref = erlang:monitor(process, Pid),
-    {noreply, dict:store(Ref, Stat, State)}.
-
-handle_info({'DOWN', Ref, _, _, _}, State) ->
-    Stat = dict:fetch(Ref, State),
-    ok = couch_stats_collector:decrement(Stat),
-    {noreply, dict:erase(Ref, State)}.
-
-code_change(_OldVersion, State, _Extra) when is_list(State) ->
-    {ok, dict:from_list(State)};
-code_change(_OldVersion, State, _Extra) ->
-    {ok, State}.
-
-
-make_key({Module, Key}) when is_integer(Key) ->
-    {Module, list_to_atom(integer_to_list(Key))};
-make_key(Key) ->
-    Key.
-
-abs_to_list() ->
-    SortedKVs = lists:sort(ets:tab2list(?ABS_TABLE)),
-    lists:foldl(fun({Key, Val}, Acc) ->
-        case Acc of
-            [] ->
-                [{Key, [Val]}];
-            [{Key, Prev} | Rest] ->
-                [{Key, [Val | Prev]} | Rest];
-            Others ->
-                [{Key, [Val]} | Others]
-        end
-    end, [], SortedKVs).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_stream.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_stream.erl b/src/couch/src/couch_stream.erl
deleted file mode 100644
index 6e7213b..0000000
--- a/src/couch/src/couch_stream.erl
+++ /dev/null
@@ -1,299 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stream).
--behaviour(gen_server).
-
-% public API
--export([open/1, open/2, close/1]).
--export([foldl/4, foldl/5, foldl_decode/6, range_foldl/6]).
--export([copy_to_new_stream/3, write/2]).
-
-% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_cast/2, handle_call/3, handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(DEFAULT_BUFFER_SIZE, 4096).
-
--record(stream,
-    {fd = 0,
-    written_pointers=[],
-    buffer_list = [],
-    buffer_len = 0,
-    max_buffer,
-    written_len = 0,
-    md5,
-    % md5 of the content without any transformation applied (e.g. compression)
-    % needed for the attachment upload integrity check (ticket 558)
-    identity_md5,
-    identity_len = 0,
-    encoding_fun,
-    end_encoding_fun
-    }).
-
-
-%%% Interface functions %%%
-
-open(Fd) ->
-    open(Fd, []).
-
-open(Fd, Options) ->
-    gen_server:start_link(couch_stream, {Fd, Options}, []).
-
-close(Pid) ->
-    gen_server:call(Pid, close, infinity).
-
-copy_to_new_stream(Fd, PosList, DestFd) ->
-    {ok, Dest} = open(DestFd),
-    foldl(Fd, PosList,
-        fun(Bin, _) ->
-            ok = write(Dest, Bin)
-        end, ok),
-    close(Dest).
-
-foldl(_Fd, [], _Fun, Acc) ->
-    Acc;
-foldl(Fd, [Pos|Rest], Fun, Acc) ->
-    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
-    foldl(Fd, Rest, Fun, Fun(Bin, Acc)).
-
-foldl(Fd, PosList, <<>>, Fun, Acc) ->
-    foldl(Fd, PosList, Fun, Acc);
-foldl(Fd, PosList, Md5, Fun, Acc) ->
-    foldl(Fd, PosList, Md5, couch_util:md5_init(), Fun, Acc).
-
-foldl_decode(Fd, PosList, Md5, Enc, Fun, Acc) ->
-    {DecDataFun, DecEndFun} = case Enc of
-    gzip ->
-        ungzip_init();
-    identity ->
-        identity_enc_dec_funs()
-    end,
-    Result = foldl_decode(
-        DecDataFun, Fd, PosList, Md5, couch_util:md5_init(), Fun, Acc
-    ),
-    DecEndFun(),
-    Result.
-
-foldl(_Fd, [], Md5, Md5Acc, _Fun, Acc) ->
-    Md5 = couch_util:md5_final(Md5Acc),
-    Acc;
-foldl(Fd, [{Pos, _Size}], Md5, Md5Acc, Fun, Acc) -> % 0110 UPGRADE CODE
-    foldl(Fd, [Pos], Md5, Md5Acc, Fun, Acc);
-foldl(Fd, [Pos], Md5, Md5Acc, Fun, Acc) ->
-    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
-    Md5 = couch_util:md5_final(couch_util:md5_update(Md5Acc, Bin)),
-    Fun(Bin, Acc);
-foldl(Fd, [{Pos, _Size}|Rest], Md5, Md5Acc, Fun, Acc) ->
-    foldl(Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc);
-foldl(Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc) ->
-    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
-    foldl(Fd, Rest, Md5, couch_util:md5_update(Md5Acc, Bin), Fun, Fun(Bin, Acc)).
-
-range_foldl(Fd, PosList, From, To, Fun, Acc) ->
-    range_foldl(Fd, PosList, From, To, 0, Fun, Acc).
-
-range_foldl(_Fd, _PosList, _From, To, Off, _Fun, Acc) when Off >= To ->
-    Acc;
-range_foldl(Fd, [Pos|Rest], From, To, Off, Fun, Acc) when is_integer(Pos) -> % old-style attachment
-    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
-    range_foldl(Fd, [{Pos, iolist_size(Bin)}] ++ Rest, From, To, Off, Fun, Acc);
-range_foldl(Fd, [{_Pos, Size}|Rest], From, To, Off, Fun, Acc) when From > Off + Size ->
-    range_foldl(Fd, Rest, From, To, Off + Size, Fun, Acc);
-range_foldl(Fd, [{Pos, Size}|Rest], From, To, Off, Fun, Acc) ->
-    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
-    Bin1 = if
-        From =< Off andalso To >= Off + Size -> Bin; %% the whole block is covered
-        true ->
-            PrefixLen = clip(From - Off, 0, Size),
-            PostfixLen = clip(Off + Size - To, 0, Size),
-            MatchLen = Size - PrefixLen - PostfixLen,
-            <<_Prefix:PrefixLen/binary,Match:MatchLen/binary,_Postfix:PostfixLen/binary>> = iolist_to_binary(Bin),
-            Match
-    end,
-    range_foldl(Fd, Rest, From, To, Off + Size, Fun, Fun(Bin1, Acc)).
-
-clip(Value, Lo, Hi) ->
-    if
-        Value < Lo -> Lo;
-        Value > Hi -> Hi;
-        true -> Value
-    end.
-
-foldl_decode(_DecFun, _Fd, [], Md5, Md5Acc, _Fun, Acc) ->
-    Md5 = couch_util:md5_final(Md5Acc),
-    Acc;
-foldl_decode(DecFun, Fd, [{Pos, _Size}], Md5, Md5Acc, Fun, Acc) ->
-    foldl_decode(DecFun, Fd, [Pos], Md5, Md5Acc, Fun, Acc);
-foldl_decode(DecFun, Fd, [Pos], Md5, Md5Acc, Fun, Acc) ->
-    {ok, EncBin} = couch_file:pread_iolist(Fd, Pos),
-    Md5 = couch_util:md5_final(couch_util:md5_update(Md5Acc, EncBin)),
-    Bin = DecFun(EncBin),
-    Fun(Bin, Acc);
-foldl_decode(DecFun, Fd, [{Pos, _Size}|Rest], Md5, Md5Acc, Fun, Acc) ->
-    foldl_decode(DecFun, Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc);
-foldl_decode(DecFun, Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc) ->
-    {ok, EncBin} = couch_file:pread_iolist(Fd, Pos),
-    Bin = DecFun(EncBin),
-    Md5Acc2 = couch_util:md5_update(Md5Acc, EncBin),
-    foldl_decode(DecFun, Fd, Rest, Md5, Md5Acc2, Fun, Fun(Bin, Acc)).
-
-gzip_init(Options) ->
-    case couch_util:get_value(compression_level, Options, 0) of
-    Lvl when Lvl >= 1 andalso Lvl =< 9 ->
-        Z = zlib:open(),
-        % 15 = ?MAX_WBITS (defined in the zlib module)
-        % the 16 + ?MAX_WBITS formula was obtained by inspecting zlib:gzip/1
-        ok = zlib:deflateInit(Z, Lvl, deflated, 16 + 15, 8, default),
-        {
-            fun(Data) ->
-                zlib:deflate(Z, Data)
-            end,
-            fun() ->
-                Last = zlib:deflate(Z, [], finish),
-                ok = zlib:deflateEnd(Z),
-                ok = zlib:close(Z),
-                Last
-            end
-        };
-    _ ->
-        identity_enc_dec_funs()
-    end.
-
-ungzip_init() ->
-    Z = zlib:open(),
-    zlib:inflateInit(Z, 16 + 15),
-    {
-        fun(Data) ->
-            zlib:inflate(Z, Data)
-        end,
-        fun() ->
-            ok = zlib:inflateEnd(Z),
-            ok = zlib:close(Z)
-        end
-    }.
-
-identity_enc_dec_funs() ->
-    {
-        fun(Data) -> Data end,
-        fun() -> [] end
-    }.
-
-write(_Pid, <<>>) ->
-    ok;
-write(Pid, Bin) ->
-    gen_server:call(Pid, {write, Bin}, infinity).
-
-
-init({Fd, Options}) ->
-    {EncodingFun, EndEncodingFun} =
-    case couch_util:get_value(encoding, Options, identity) of
-    identity ->
-        identity_enc_dec_funs();
-    gzip ->
-        gzip_init(Options)
-    end,
-    {ok, #stream{
-            fd=Fd,
-            md5=couch_util:md5_init(),
-            identity_md5=couch_util:md5_init(),
-            encoding_fun=EncodingFun,
-            end_encoding_fun=EndEncodingFun,
-            max_buffer=couch_util:get_value(
-                buffer_size, Options, ?DEFAULT_BUFFER_SIZE)
-        }
-    }.
-
-terminate(_Reason, _Stream) ->
-    ok.
-
-handle_call({write, Bin}, _From, Stream) ->
-    BinSize = iolist_size(Bin),
-    #stream{
-        fd = Fd,
-        written_len = WrittenLen,
-        written_pointers = Written,
-        buffer_len = BufferLen,
-        buffer_list = Buffer,
-        max_buffer = Max,
-        md5 = Md5,
-        identity_md5 = IdenMd5,
-        identity_len = IdenLen,
-        encoding_fun = EncodingFun} = Stream,
-    if BinSize + BufferLen > Max ->
-        WriteBin = lists:reverse(Buffer, [Bin]),
-        IdenMd5_2 = couch_util:md5_update(IdenMd5, WriteBin),
-        case EncodingFun(WriteBin) of
-        [] ->
-            % case where the encoder did some internal buffering
-            % (zlib does it for example)
-            WrittenLen2 = WrittenLen,
-            Md5_2 = Md5,
-            Written2 = Written;
-        WriteBin2 ->
-            {ok, Pos, _} = couch_file:append_binary(Fd, WriteBin2),
-            WrittenLen2 = WrittenLen + iolist_size(WriteBin2),
-            Md5_2 = couch_util:md5_update(Md5, WriteBin2),
-            Written2 = [{Pos, iolist_size(WriteBin2)}|Written]
-        end,
-
-        {reply, ok, Stream#stream{
-                        written_len=WrittenLen2,
-                        written_pointers=Written2,
-                        buffer_list=[],
-                        buffer_len=0,
-                        md5=Md5_2,
-                        identity_md5=IdenMd5_2,
-                        identity_len=IdenLen + BinSize}};
-    true ->
-        {reply, ok, Stream#stream{
-                        buffer_list=[Bin|Buffer],
-                        buffer_len=BufferLen + BinSize,
-                        identity_len=IdenLen + BinSize}}
-    end;
-handle_call(close, _From, Stream) ->
-    #stream{
-        fd = Fd,
-        written_len = WrittenLen,
-        written_pointers = Written,
-        buffer_list = Buffer,
-        md5 = Md5,
-        identity_md5 = IdenMd5,
-        identity_len = IdenLen,
-        encoding_fun = EncodingFun,
-        end_encoding_fun = EndEncodingFun} = Stream,
-
-    WriteBin = lists:reverse(Buffer),
-    IdenMd5Final = couch_util:md5_final(couch_util:md5_update(IdenMd5, WriteBin)),
-    WriteBin2 = EncodingFun(WriteBin) ++ EndEncodingFun(),
-    Md5Final = couch_util:md5_final(couch_util:md5_update(Md5, WriteBin2)),
-    Result = case WriteBin2 of
-    [] ->
-        {lists:reverse(Written), WrittenLen, IdenLen, Md5Final, IdenMd5Final};
-    _ ->
-        {ok, Pos, _} = couch_file:append_binary(Fd, WriteBin2),
-        StreamInfo = lists:reverse(Written, [{Pos, iolist_size(WriteBin2)}]),
-        StreamLen = WrittenLen + iolist_size(WriteBin2),
-        {StreamInfo, StreamLen, IdenLen, Md5Final, IdenMd5Final}
-    end,
-    {stop, normal, Result, Stream}.
-
-handle_cast(_Msg, State) ->
-    {noreply,State}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-handle_info(_Info, State) ->
-    {noreply, State}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_sup.erl b/src/couch/src/couch_sup.erl
deleted file mode 100644
index 3508d4f..0000000
--- a/src/couch/src/couch_sup.erl
+++ /dev/null
@@ -1,159 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_sup).
--behaviour(supervisor).
--behaviour(config_listener).
-
-
--export([
-    start_link/0,
-    init/1,
-    handle_config_change/5
-]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
-
-start_link() ->
-    write_pidfile(),
-    notify_starting(),
-
-    case supervisor:start_link({local, ?MODULE}, ?MODULE, []) of
-        {ok, _} = Resp ->
-            notify_started(),
-            notify_uris(),
-            write_uris(),
-            ok = config:listen_for_changes(?MODULE, nil),
-            Resp;
-        Else ->
-            notify_error(Else),
-            Else
-    end.
-
-
-init(_Args) ->
-    twig:log(info, "Starting ~s", [?MODULE]),
-    {ok, {{one_for_one,10, 60}, [
-        {
-            couch_primary_services,
-            {couch_primary_sup, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_primary_sup]
-        },
-        {
-            couch_secondary_services,
-            {couch_secondary_sup, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_secondary_sup]
-        }
-    ]}}.
-
-
-handle_config_change("daemons", _, _, _, _) ->
-    exit(whereis(couch_server_sup), shutdown),
-    remove_handler;
-handle_config_change("couchdb", "util_driver_dir", _, _, _) ->
-    [Pid] = [P || {collation_driver, P, _, _}
-        <- supervisor:which_children(couch_primary_services)],
-    Pid ! reload_driver,
-    {ok, nil};
-handle_config_change(_, _, _, _, _) ->
-    {ok, nil}.
-
-
-notify_starting() ->
-    io:format("Apache CouchDB ~s (LogLevel=~s) is starting.~n", [
-        couch_server:get_version(),
-        config:get("log", "level", "info")
-    ]).
-
-
-notify_started() ->
-    io:format("Apache CouchDB has started. Time to relax.~n").
-
-
-notify_error(Error) ->
-    io:format("Error starting Apache CouchDB:~n~n    ~p~n~n", [Error]).
-
-
-notify_uris() ->
-    lists:foreach(fun(Uri) ->
-        ?LOG_INFO("Apache CouchDB has started on ~s", [Uri])
-    end, get_uris()).
-
-
-write_pidfile() ->
-    case init:get_argument(pidfile) of
-        {ok, [PidFile]} ->
-            write_file(PidFile, os:getpid());
-        _ ->
-            ok
-    end.
-
-
-write_uris() ->
-    case config:get("couchdb", "uri_file", null) of
-        null ->
-            ok;
-        UriFile ->
-            Lines = [io_lib:format("~s~n", [Uri]) || Uri <- get_uris()],
-            write_file(UriFile, Lines)
-    end.
-
-
-get_uris() ->
-    Ip = config:get("httpd", "bind_address"),
-    lists:flatmap(fun(Uri) ->
-        case get_uri(Uri, Ip) of
-            undefined -> [];
-            Else -> [Else]
-        end
-    end, [couch_httpd, https]).
-
-
-get_uri(Name, Ip) ->
-    case get_port(Name) of
-        undefined ->
-            undefined;
-        Port ->
-            io_lib:format("~s://~s:~w/", [get_scheme(Name), Ip, Port])
-    end.
-
-
-get_scheme(couch_httpd) -> "http";
-get_scheme(https) -> "https".
-
-
-get_port(Name) ->
-    try
-        mochiweb_socket_server:get(Name, port)
-    catch
-        exit:{noproc, _} ->
-            undefined
-    end.
-
-
-write_file(FileName, Contents) ->
-    case file:write_file(FileName, Contents) of
-        ok ->
-            ok;
-        {error, Reason} ->
-            Args = [FileName, file:format_error(Reason)],
-            io:format(standard_error, "Failed ot write ~s :: ~s", Args),
-            throw({error, Reason})
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_task_status.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_task_status.erl b/src/couch/src/couch_task_status.erl
deleted file mode 100644
index ea9821f..0000000
--- a/src/couch/src/couch_task_status.erl
+++ /dev/null
@@ -1,151 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_task_status).
--behaviour(gen_server).
-
-% This module is used to track the status of long running tasks.
-% Long running tasks register themselves, via a call to add_task/1, and then
-% update their status properties via update/1. The status of a task is a
-% list of properties. Each property is a tuple, with the first element being
-% either an atom or a binary and the second element must be an EJSON value. When
-% a task updates its status, it can override some or all of its properties.
-% The properties {started_on, UnitTimestamp}, {updated_on, UnixTimestamp} and
-% {pid, ErlangPid} are automatically added by this module.
-% When a tracked task dies, its status will be automatically removed from
-% memory. To get the tasks list, call the all/0 function.
-
--export([start_link/0, stop/0]).
--export([all/0, add_task/1, update/1, get/1, set_update_frequency/1]).
--export([is_task_added/0]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(set(L, K, V), lists:keystore(K, 1, L, {K, V})).
-
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-stop() ->
-    gen_server:cast(?MODULE, stop).
-
-
-all() ->
-    gen_server:call(?MODULE, all).
-
-
-add_task(Props) ->
-    put(task_status_update, {{0, 0, 0}, 0}),
-    Ts = timestamp(),
-    TaskProps = lists:ukeysort(
-        1, [{started_on, Ts}, {updated_on, Ts} | Props]),
-    put(task_status_props, TaskProps),
-    gen_server:call(?MODULE, {add_task, TaskProps}).
-
-
-is_task_added() ->
-    is_list(erlang:get(task_status_props)).
-
-
-set_update_frequency(Msecs) ->
-    put(task_status_update, {{0, 0, 0}, Msecs * 1000}).
-
-
-update(Props) ->
-    MergeProps = lists:ukeysort(1, Props),
-    TaskProps = lists:ukeymerge(1, MergeProps, erlang:get(task_status_props)),
-    put(task_status_props, TaskProps),
-    maybe_persist(TaskProps).
-
-
-get(Props) when is_list(Props) ->
-    TaskProps = erlang:get(task_status_props),
-    [couch_util:get_value(P, TaskProps) || P <- Props];
-get(Prop) ->
-    TaskProps = erlang:get(task_status_props),
-    couch_util:get_value(Prop, TaskProps).
-
-
-maybe_persist(TaskProps0) ->
-    {LastUpdateTime, Frequency} = erlang:get(task_status_update),
-    case timer:now_diff(Now = now(), LastUpdateTime) >= Frequency of
-    true ->
-        put(task_status_update, {Now, Frequency}),
-        TaskProps = ?set(TaskProps0, updated_on, timestamp(Now)),
-        gen_server:cast(?MODULE, {update_status, self(), TaskProps});
-    false ->
-        ok
-    end.
-
-
-init([]) ->
-    % read configuration settings and register for configuration changes
-    ets:new(?MODULE, [ordered_set, protected, named_table]),
-    {ok, nil}.
-
-
-terminate(_Reason,_State) ->
-    ok.
-
-
-handle_call({add_task, TaskProps}, {From, _}, Server) ->
-    case ets:lookup(?MODULE, From) of
-    [] ->
-        true = ets:insert(?MODULE, {From, TaskProps}),
-        erlang:monitor(process, From),
-        {reply, ok, Server};
-    [_] ->
-        {reply, {add_task_error, already_registered}, Server}
-    end;
-handle_call(all, _, Server) ->
-    All = [
-        [{pid, ?l2b(pid_to_list(Pid))} | TaskProps]
-        ||
-        {Pid, TaskProps} <- ets:tab2list(?MODULE)
-    ],
-    {reply, All, Server}.
-
-
-handle_cast({update_status, Pid, NewProps}, Server) ->
-    case ets:lookup(?MODULE, Pid) of
-    [{Pid, _CurProps}] ->
-        ?LOG_DEBUG("New task status for ~p: ~p", [Pid, NewProps]),
-        true = ets:insert(?MODULE, {Pid, NewProps});
-    _ ->
-        % Task finished/died in the meanwhile and we must have received
-        % a monitor message before this call - ignore.
-        ok
-    end,
-    {noreply, Server};
-handle_cast(stop, State) ->
-    {stop, normal, State}.
-
-handle_info({'DOWN', _MonitorRef, _Type, Pid, _Info}, Server) ->
-    %% should we also erlang:demonitor(_MonitorRef), ?
-    ets:delete(?MODULE, Pid),
-    {noreply, Server}.
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-timestamp() ->
-    timestamp(now()).
-
-timestamp({Mega, Secs, _}) ->
-    Mega * 1000000 + Secs.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_users_db.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_users_db.erl b/src/couch/src/couch_users_db.erl
deleted file mode 100644
index 76acfee..0000000
--- a/src/couch/src/couch_users_db.erl
+++ /dev/null
@@ -1,110 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_users_db).
-
--export([before_doc_update/2, after_doc_read/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--define(NAME, <<"name">>).
--define(PASSWORD, <<"password">>).
--define(DERIVED_KEY, <<"derived_key">>).
--define(PASSWORD_SCHEME, <<"password_scheme">>).
--define(PBKDF2, <<"pbkdf2">>).
--define(ITERATIONS, <<"iterations">>).
--define(SALT, <<"salt">>).
--define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
-
-% If the request's userCtx identifies an admin
-%   -> save_doc (see below)
-%
-% If the request's userCtx.name is null:
-%   -> save_doc
-%   // this is an anonymous user registering a new document
-%   // in case a user doc with the same id already exists, the anonymous
-%   // user will get a regular doc update conflict.
-% If the request's userCtx.name doesn't match the doc's name
-%   -> 404 // Not Found
-% Else
-%   -> save_doc
-before_doc_update(Doc, #db{user_ctx = UserCtx} = Db) ->
-    #user_ctx{name=Name} = UserCtx,
-    DocName = get_doc_name(Doc),
-    case (catch couch_db:check_is_admin(Db)) of
-    ok ->
-        save_doc(Doc);
-    _ when Name =:= DocName orelse Name =:= null ->
-        save_doc(Doc);
-    _ ->
-        throw(not_found)
-    end.
-
-% If newDoc.password == null || newDoc.password == undefined:
-%   ->
-%   noop
-% Else -> // calculate password hash server side
-%    newDoc.password_sha = hash_pw(newDoc.password + salt)
-%    newDoc.salt = salt
-%    newDoc.password = null
-save_doc(#doc{body={Body}} = Doc) ->
-    case couch_util:get_value(?PASSWORD, Body) of
-    null -> % server admins don't have a user-db password entry
-        Doc;
-    undefined ->
-        Doc;
-    ClearPassword ->
-        Iterations = list_to_integer(config:get("couch_httpd_auth", "iterations", "1000")),
-        Salt = couch_uuids:random(),
-        DerivedKey = couch_passwords:pbkdf2(ClearPassword, Salt, Iterations),
-        Body0 = [{?PASSWORD_SCHEME, ?PBKDF2}, {?ITERATIONS, Iterations}|Body],
-        Body1 = ?replace(Body0, ?DERIVED_KEY, DerivedKey),
-        Body2 = ?replace(Body1, ?SALT, Salt),
-        Body3 = proplists:delete(?PASSWORD, Body2),
-        Doc#doc{body={Body3}}
-    end.
-
-% If the doc is a design doc
-%   If the request's userCtx identifies an admin
-%     -> return doc
-%   Else
-%     -> 403 // Forbidden
-% If the request's userCtx identifies an admin
-%   -> return doc
-% If the request's userCtx.name doesn't match the doc's name
-%   -> 404 // Not Found
-% Else
-%   -> return doc
-after_doc_read(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, Db) ->
-    case (catch couch_db:check_is_admin(Db)) of
-    ok ->
-        Doc;
-    _ ->
-        throw({forbidden,
-        <<"Only administrators can view design docs in the users database.">>})
-    end;
-after_doc_read(Doc, #db{user_ctx = UserCtx} = Db) ->
-    #user_ctx{name=Name} = UserCtx,
-    DocName = get_doc_name(Doc),
-    case (catch couch_db:check_is_admin(Db)) of
-    ok ->
-        Doc;
-    _ when Name =:= DocName ->
-        Doc;
-    _ ->
-        throw(not_found)
-    end.
-
-get_doc_name(#doc{id= <<"org.couchdb.user:", Name/binary>>}) ->
-    Name;
-get_doc_name(_) ->
-    undefined.


[25/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/ejson


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/191a9b41
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/191a9b41
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/191a9b41

Branch: refs/heads/1843-feature-bigcouch
Commit: 191a9b41f2f67d16472e0ace293bf1dc0fe4cece
Parents: 572ee3c
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:40:42 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:40:42 2014 -0600

----------------------------------------------------------------------
 src/ejson/c_src/decode.c              | 308 -----------
 src/ejson/c_src/ejson.c               |  30 -
 src/ejson/c_src/encode.c              | 200 -------
 src/ejson/c_src/erl_nif_compat.h      | 120 ----
 src/ejson/c_src/yajl/yajl.c           | 159 ------
 src/ejson/c_src/yajl/yajl_alloc.c     |  65 ---
 src/ejson/c_src/yajl/yajl_alloc.h     |  50 --
 src/ejson/c_src/yajl/yajl_buf.c       | 119 ----
 src/ejson/c_src/yajl/yajl_buf.h       |  73 ---
 src/ejson/c_src/yajl/yajl_bytestack.h |  85 ---
 src/ejson/c_src/yajl/yajl_common.h    |  85 ---
 src/ejson/c_src/yajl/yajl_encode.c    | 188 -------
 src/ejson/c_src/yajl/yajl_encode.h    |  50 --
 src/ejson/c_src/yajl/yajl_gen.c       | 322 -----------
 src/ejson/c_src/yajl/yajl_gen.h       | 159 ------
 src/ejson/c_src/yajl/yajl_lex.c       | 737 -------------------------
 src/ejson/c_src/yajl/yajl_lex.h       | 133 -----
 src/ejson/c_src/yajl/yajl_parse.h     | 193 -------
 src/ejson/c_src/yajl/yajl_parser.c    | 470 ----------------
 src/ejson/c_src/yajl/yajl_parser.h    |  95 ----
 src/ejson/src/ejson.app.src           |   9 -
 src/ejson/src/ejson.erl               | 168 ------
 src/ejson/src/mochijson2.erl          | 849 -----------------------------
 src/ejson/src/mochinum.erl            | 354 ------------
 24 files changed, 5021 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/decode.c
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/decode.c b/src/ejson/c_src/decode.c
deleted file mode 100644
index 68f1317..0000000
--- a/src/ejson/c_src/decode.c
+++ /dev/null
@@ -1,308 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <assert.h>
-#include <stdio.h>
-#include <string.h>
-
-#include "erl_nif.h"
-#include "erl_nif_compat.h"
-#include "yajl/yajl_parse.h"
-#include "yajl/yajl_parser.h"
-#include "yajl/yajl_lex.h"
-
-typedef struct {
-    ERL_NIF_TERM head;
-    ErlNifEnv* env;
-} decode_ctx;
-
-#define ENV(ctxarg) (((decode_ctx*)ctxarg)->env)
-
-#define CONTINUE 1
-#define CANCEL 0
-
-
-static ERL_NIF_TERM
-make_error(yajl_handle handle, ErlNifEnv* env)
-{
-    char* yajlError = (char*) yajl_get_error(handle, 0, NULL, 0);
-    ERL_NIF_TERM errMsg;
-
-    if(yajlError != NULL)
-    {
-        errMsg = enif_make_string(env, yajlError, ERL_NIF_LATIN1);
-        yajl_free_error(handle, (unsigned char*) yajlError);
-    }
-    else
-    {
-        errMsg = enif_make_string(env, "unknown parse error", ERL_NIF_LATIN1);
-    }
-
-    return enif_make_tuple(env, 2,
-        enif_make_atom(env, "error"),
-        enif_make_tuple(env, 2,
-            enif_make_uint(env, handle->bytesConsumed),
-            errMsg
-        )
-    );
-}
-
-
-static void
-add_to_head(void* vctx, ERL_NIF_TERM newhead)
-{
-    decode_ctx* ctx = (decode_ctx*)vctx;
-    ctx->head = enif_make_list_cell(ctx->env, newhead, ctx->head);
-}
-
-static int
-decode_null(void* ctx)
-{
-    add_to_head(ctx, enif_make_atom(ENV(ctx), "null"));
-    return CONTINUE;
-}
-
-static int
-decode_boolean(void* ctx, int val)
-{
-    add_to_head(ctx, enif_make_atom(ENV(ctx), val ? "true" : "false"));
-    return CONTINUE;
-}
-
-static int
-decode_number(void * ctx, const char * numberVal, unsigned int numberLen)
-{
-    // scan in the input to see if it's a float or int
-
-    int numberType = 0; // 0 means integer, 1 means float
-    unsigned int i;
-    ErlNifBinary bin;
-    int missingDot = 1;
-    unsigned int expPos;
-
-    for(i=0; i<numberLen; i++) {
-        switch (numberVal[i]) {
-        case '.':
-            missingDot = 0;
-            numberType = 1; // it's  a float
-            goto loopend;
-        case 'E':
-        case 'e':
-            expPos = i;
-            numberType = 1; // it's  a float
-            goto loopend;
-        }
-    }
-loopend:
-    if ((numberType == 1) && missingDot)
-    {
-        if(!enif_alloc_binary_compat(ENV(ctx), numberLen + 2, &bin))
-        {
-            return CANCEL;
-        }
-        memcpy(bin.data, numberVal, expPos);
-        bin.data[expPos] = '.';
-        bin.data[expPos + 1] = '0';
-        memcpy(bin.data + expPos + 2, numberVal + expPos, numberLen - expPos);
-    }
-    else
-    {
-        if(!enif_alloc_binary_compat(ENV(ctx), numberLen, &bin))
-        {
-            return CANCEL;
-        }
-        memcpy(bin.data, numberVal, numberLen);
-    }
-    add_to_head(ctx, enif_make_tuple(ENV(ctx), 2,
-                        enif_make_int(ENV(ctx), numberType),
-                        enif_make_binary(ENV(ctx), &bin)));
-    return CONTINUE;
-}
-
-
-
-static int
-decode_string(void* ctx, const unsigned char* data, unsigned int size)
-{
-    ErlNifBinary bin;
-    if(!enif_alloc_binary_compat(ENV(ctx), size, &bin))
-    {
-        return CANCEL;
-    }
-    memcpy(bin.data, data, size);
-    add_to_head(ctx, enif_make_binary(ENV(ctx), &bin));
-    return CONTINUE;
-}
-
-static int
-decode_start_array(void* ctx)
-{
-    add_to_head(ctx, enif_make_int(ENV(ctx), 0));
-    return CONTINUE;
-}
-
-
-static int
-decode_end_array(void* ctx)
-{
-    add_to_head(ctx, enif_make_int(ENV(ctx), 1));
-    return CONTINUE;
-}
-
-
-static int
-decode_start_map(void* ctx)
-{
-    add_to_head(ctx, enif_make_int(ENV(ctx), 2));
-    return CONTINUE;
-}
-
-
-static int
-decode_end_map(void* ctx)
-{
-    add_to_head(ctx, enif_make_int(ENV(ctx), 3));
-    return CONTINUE;
-}
-
-
-static int
-decode_map_key(void* ctx, const unsigned char* data, unsigned int size)
-{
-    ErlNifBinary bin;
-    if(!enif_alloc_binary_compat(ENV(ctx), size, &bin))
-    {
-       return CANCEL;
-    }
-    memcpy(bin.data, data, size);
-    add_to_head(ctx, enif_make_tuple(ENV(ctx), 2,
-                        enif_make_int(ENV(ctx), 3),
-                        enif_make_binary(ENV(ctx), &bin)));
-    return CONTINUE;
-}
-
-static yajl_callbacks
-decoder_callbacks = {
-    decode_null,
-    decode_boolean,
-    NULL,
-    NULL,
-    decode_number,
-    decode_string,
-    decode_start_map,
-    decode_map_key,
-    decode_end_map,
-    decode_start_array,
-    decode_end_array
-};
-
-static int
-check_rest(unsigned char* data, unsigned int size, unsigned int used)
-{
-    unsigned int i = 0;
-    for(i = used; i < size; i++)
-    {
-        switch(data[i])
-        {
-            case ' ':
-            case '\t':
-            case '\r':
-            case '\n':
-                continue;
-            default:
-                return CANCEL;
-        }
-    }
-
-    return CONTINUE;
-}
-
-ERL_NIF_TERM
-reverse_tokens(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
-    decode_ctx ctx;
-    yajl_parser_config conf = {0, 1}; // No comments, check utf8
-    yajl_handle handle = yajl_alloc(&decoder_callbacks, &conf, NULL, &ctx);
-    yajl_status status;
-    unsigned int used;
-    ErlNifBinary bin;
-    ERL_NIF_TERM ret;
-
-    ctx.env = env;
-    ctx.head = enif_make_list_from_array(env, NULL, 0);
-
-    if(!enif_inspect_iolist_as_binary(env, argv[0], &bin))
-    {
-        ret = enif_make_badarg(env);
-        goto done;
-    }
-
-    status = yajl_parse(handle, bin.data, bin.size);
-    used = handle->bytesConsumed;
-
-    // Parsing something like "2.0" (without quotes) will
-    // cause a spurious semi-error. We add the extra size
-    // check so that "2008-20-10" doesn't pass.
-    if(status == yajl_status_insufficient_data && used == bin.size)
-    {
-        status = yajl_parse_complete(handle);
-    }
-
-    if(status == yajl_status_ok && used != bin.size)
-    {
-        if(check_rest(bin.data, bin.size, used) == CANCEL)
-        {
-            ret = enif_make_tuple(env, 2,
-                enif_make_atom(env, "error"),
-                enif_make_atom(env, "garbage_after_value")
-            );
-            goto done;
-        }
-    }
-
-    switch(status)
-    {
-        case yajl_status_ok:
-            ret = enif_make_tuple(env, 2, enif_make_atom(env, "ok"), ctx.head);
-            goto done;
-
-        case yajl_status_error:
-            ret = make_error(handle, env);
-            goto done;
-
-        case yajl_status_insufficient_data:
-            ret = enif_make_tuple(env, 2,
-                enif_make_atom(env, "error"),
-                enif_make_atom(env, "insufficient_data")
-            );
-            goto done;
-
-        case yajl_status_client_canceled:
-        /* the only time we do this is when we can't allocate a binary. */
-            ret = enif_make_tuple(env, 2,
-                enif_make_atom(env, "error"),
-                enif_make_atom(env, "insufficient_memory")
-            );
-            goto done;
-
-        default:
-            ret = enif_make_tuple(env, 2,
-                enif_make_atom(env, "error"),
-                enif_make_atom(env, "unknown")
-            );
-            goto done;
-    }
-
-done:
-    if(handle != NULL) yajl_free(handle);
-    return ret;
-}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/ejson.c
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/ejson.c b/src/ejson/c_src/ejson.c
deleted file mode 100644
index 390f762..0000000
--- a/src/ejson/c_src/ejson.c
+++ /dev/null
@@ -1,30 +0,0 @@
-#include "erl_nif.h"
-
-ERL_NIF_TERM final_encode(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
-ERL_NIF_TERM reverse_tokens(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
-
-int
-on_load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM info)
-{
-    return 0;
-}
-
-int
-on_reload(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM info)
-{
-    return 0;
-}
-
-int
-on_upgrade(ErlNifEnv* env, void** priv_data, void** old_data, ERL_NIF_TERM info)
-{
-    return 0;
-}
-
-static ErlNifFunc nif_funcs[] =
-{
-    {"final_encode", 1, final_encode},
-    {"reverse_tokens", 1, reverse_tokens}
-};
-
-ERL_NIF_INIT(ejson, nif_funcs, &on_load, &on_reload, &on_upgrade, NULL);

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/encode.c
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/encode.c b/src/ejson/c_src/encode.c
deleted file mode 100644
index 1dbd1df..0000000
--- a/src/ejson/c_src/encode.c
+++ /dev/null
@@ -1,200 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdio.h>
-#include <string.h>
-#include <math.h>
-
-#include "erl_nif.h"
-#include "erl_nif_compat.h"
-#include "yajl/yajl_encode.h"
-
-#if defined(_WIN32) || defined(WIN32) || defined(__WIN32__)
-#include <float.h>
-#define isnan _isnan
-#define isinf !_finite
-#define snprintf _snprintf
-#endif
-
-#define SUCCESS 0
-#define NOMEM 1
-#define BADARG 2
-
-
-typedef struct {
-    ErlNifEnv* env;
-    ErlNifBinary bin;
-    size_t fill_offset;
-    int error;
-} encode_ctx;
-
-
-static int
-ensure_buffer(void* vctx, unsigned int len) {
-    encode_ctx* ctx = (encode_ctx*)vctx;
-    if ((ctx->bin.size - ctx->fill_offset) < len) {
-        if(!enif_realloc_binary_compat(ctx->env, &(ctx->bin), (ctx->bin.size * 2) + len)) {
-            return NOMEM;
-        }
-    }
-    return SUCCESS;
-}
-
-static void
-fill_buffer(void* vctx, const char* str, unsigned int len)
-{
-    encode_ctx* ctx = (encode_ctx*)vctx;
-
-    if (ctx->error || (ctx->error = ensure_buffer(vctx, len))) {
-        return;
-    }
-    memcpy(ctx->bin.data + ctx->fill_offset, str, len);
-    ctx->fill_offset += len;
-}
-
-/* Json encode the string binary into the ctx.bin,
-  with surrounding quotes and all */
-static int
-encode_string(void* vctx, ERL_NIF_TERM binary)
-{
-    encode_ctx* ctx = (encode_ctx*)vctx;
-    ErlNifBinary bin;
-
-    if(!enif_inspect_binary(ctx->env, binary, &bin)) {
-        return NOMEM;
-    }
-    fill_buffer(ctx, "\"", 1);
-    if (ctx->error) {
-        return ctx->error;
-    }
-    yajl_string_encode2(fill_buffer, ctx, bin.data, bin.size);
-    fill_buffer(ctx, "\"", 1);
-
-    return ctx->error;
-}
-
-static ERL_NIF_TERM
-no_mem_error(ErlNifEnv* env)
-{
-    return enif_make_tuple(env, 2,
-            enif_make_atom(env, "error"),
-            enif_make_atom(env, "insufficient_memory"));
-}
-
-ERL_NIF_TERM
-final_encode(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
-    ERL_NIF_TERM head = argv[0];
-    ERL_NIF_TERM term;
-    double number;
-    encode_ctx ctx;
-    char* start;
-    size_t len;
-    size_t i;
-
-    ctx.env = env;
-    ctx.fill_offset = 0;
-    ctx.error = 0;
-
-    if (!enif_alloc_binary_compat(env, 100, &ctx.bin)) {
-            return no_mem_error(env);
-    }
-
-    while(enif_get_list_cell(env, head, &term, &head)) {
-        ErlNifBinary termbin;
-        const ERL_NIF_TERM* array;
-        int arity;
-        int code;
-
-        // We scan the list, looking for things to write into the binary, or
-        // encode and then write into the binary. We encode values that are
-        // tuples tagged with a type and a value: {Type, Value} where Type
-        // is a an Integer and Value is what is to be encoded
-
-        if (enif_get_tuple(env, term, &arity, &array)) {
-            // It's a tuple to encode and copy
-            if (arity != 2 || !enif_get_int(env, array[0], &code)) {
-                // not arity 2 or the first element isn't an int
-                ctx.error = BADARG;
-                goto done;
-            }
-            if (code == 0) {
-                // {0, String}
-                if (encode_string(&ctx, array[1]) != SUCCESS) {
-                    goto done;
-                }
-            }
-            else {
-                // {1, Double}
-                if(!enif_get_double(env, array[1], &number)) {
-                    ctx.error = BADARG;
-                    goto done;
-                }
-                // We can't encode these.
-                if (isnan(number) || isinf(number)) {
-                    ctx.error = BADARG;
-                    goto done;
-                }
-                if ((ctx.error = ensure_buffer(&ctx, 32)) != SUCCESS) {
-                    goto done;
-                }
-                // write the string into the buffer
-                start = (char*) (ctx.bin.data + ctx.fill_offset);
-                snprintf(start, 32, "%0.20g", number);
-                len = strlen(start);
-                for(i = 0; i < len; i++) {
-                    if(start[i] == '.' || start[i] == 'e' || start[i] == 'E') {
-                        break;
-                    }
-                }
-                if(i == len) {
-                    if(i > 29) {
-                        ctx.error = BADARG;
-                        goto done;
-                    }
-                    start[len++] = '.';
-                    start[len++] = '0';
-                }
-                // increment the length
-                ctx.fill_offset += len;
-            }
-        } else if (enif_inspect_binary(env, term, &termbin)) {
-            // this is a regular binary, copy the contents into the buffer
-            fill_buffer(&ctx, (char*)termbin.data, termbin.size);
-            if (ctx.error) {
-                goto done;
-            }
-        }
-        else {
-            //not a binary, not a tuple, wtf!
-            ctx.error = BADARG;
-            goto done;
-        }
-    }
-done:
-    if (ctx.error == NOMEM) {
-        enif_release_binary_compat(env, &ctx.bin);
-        return no_mem_error(env);
-    } else if (ctx.error == BADARG) {
-        enif_release_binary_compat(env, &ctx.bin);
-        return enif_make_badarg(env);
-    }
-
-    // Resize the binary to our exact final size
-    if(!enif_realloc_binary_compat(env, &(ctx.bin), ctx.fill_offset)) {
-        enif_release_binary_compat(env, &ctx.bin);
-        return no_mem_error(env);
-    }
-    // make the binary term which transfers ownership
-    return enif_make_binary(env, &ctx.bin);
-}
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/erl_nif_compat.h
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/erl_nif_compat.h b/src/ejson/c_src/erl_nif_compat.h
deleted file mode 100644
index 548ea7a..0000000
--- a/src/ejson/c_src/erl_nif_compat.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/* Copyright (c) 2010-2011 Basho Technologies, Inc.
- * With some minor modifications for Apache CouchDB.
- *
- * This file is provided to you under the Apache License,
- * Version 2.0 (the "License"); you may not use this file
- * except in compliance with the License.  You may obtain
- * a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
-*/
-
-#ifndef ERL_NIF_COMPAT_H_
-#define ERL_NIF_COMPAT_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-#include "erl_nif.h"
-
-
-#if ERL_NIF_MAJOR_VERSION == 0 && ERL_NIF_MINOR_VERSION == 1
-#define OTP_R13B03
-#elif ERL_NIF_MAJOR_VERSION == 1 && ERL_NIF_MINOR_VERSION == 0
-#define OTP_R13B04
-#elif ERL_NIF_MAJOR_VERSION == 2 && ERL_NIF_MINOR_VERSION == 0
-#define OTP_R14A
-#define OTP_R14B
-#define OTP_R14B01
-#elif ERL_NIF_MAJOR_VERSION == 2 && ERL_NIF_MINOR_VERSION == 1
-#define OTP_R14B02
-#endif
-
-
-#ifdef OTP_R13B03
-
-#define enif_open_resource_type_compat enif_open_resource_type
-#define enif_alloc_resource_compat enif_alloc_resource
-#define enif_release_resource_compat enif_release_resource
-#define enif_alloc_binary_compat enif_alloc_binary
-#define enif_alloc_compat enif_alloc
-#define enif_release_binary_compat enif_release_binary
-#define enif_free_compat enif_free
-#define enif_get_atom_compat enif_get_atom
-#define enif_priv_data_compat enif_get_data
-#define enif_make_uint_compat enif_make_ulong
-
-#define enif_make_string_compat(E, B, Enc) \
-    enif_make_string(E, B)
-
-#endif /* R13B03 */
-
-
-#ifdef OTP_R13B04
-
-#define enif_open_resource_type_compat enif_open_resource_type
-#define enif_alloc_resource_compat enif_alloc_resource
-#define enif_release_resource_compat enif_release_resource
-#define enif_alloc_binary_compat enif_alloc_binary
-#define enif_realloc_binary_compat enif_realloc_binary
-#define enif_release_binary_compat enif_release_binary
-#define enif_alloc_compat enif_alloc
-#define enif_free_compat enif_free
-#define enif_get_atom_compat enif_get_atom
-#define enif_priv_data_compat enif_priv_data
-#define enif_make_string_compat enif_make_string
-#define enif_make_uint_compat enif_make_uint
-
-#endif /* R13B04 */
-
-
-/* OTP R14 and future releases */
-#if !defined(OTP_R13B03) && !defined(OTP_R13B04)
-
-#define enif_open_resource_type_compat(E, N, D, F, T) \
-    enif_open_resource_type(E, NULL, N, D, F, T)
-
-#define enif_alloc_resource_compat(E, T, S) \
-    enif_alloc_resource(T, S)
-
-#define enif_release_resource_compat(E, H) \
-    enif_release_resource(H)
-
-#define enif_alloc_binary_compat(E, S, B) \
-    enif_alloc_binary(S, B)
-
-#define enif_realloc_binary_compat(E, S, B) \
-    enif_realloc_binary(S, B)
-
-#define enif_release_binary_compat(E, B) \
-    enif_release_binary(B)
-
-#define enif_alloc_compat(E, S) \
-    enif_alloc(S)
-
-#define enif_free_compat(E, P) \
-    enif_free(P)
-
-#define enif_get_atom_compat(E, T, B, S) \
-    enif_get_atom(E, T, B, S, ERL_NIF_LATIN1)
-
-#define enif_priv_data_compat enif_priv_data
-#define enif_make_string_compat enif_make_string
-#define enif_make_uint_compat enif_make_uint
-
-#endif  /* R14 and future releases */
-
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* ERL_NIF_COMPAT_H_ */

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl.c
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl.c b/src/ejson/c_src/yajl/yajl.c
deleted file mode 100644
index 39d8b9f..0000000
--- a/src/ejson/c_src/yajl/yajl.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-#include "yajl_parse.h"
-#include "yajl_lex.h"
-#include "yajl_parser.h"
-#include "yajl_alloc.h"
-
-#include <stdlib.h>
-#include <string.h>
-#include <assert.h>
-
-const char *
-yajl_status_to_string(yajl_status stat)
-{
-    const char * statStr = "unknown";
-    switch (stat) {
-        case yajl_status_ok:
-            statStr = "ok, no error";
-            break;
-        case yajl_status_client_canceled:
-            statStr = "client canceled parse";
-            break;
-        case yajl_status_insufficient_data:
-            statStr = "eof was met before the parse could complete";
-            break;
-        case yajl_status_error:
-            statStr = "parse error";
-            break;
-    }
-    return statStr;
-}
-
-yajl_handle
-yajl_alloc(const yajl_callbacks * callbacks,
-           const yajl_parser_config * config,
-           const yajl_alloc_funcs * afs,
-           void * ctx)
-{
-    unsigned int allowComments = 0;
-    unsigned int validateUTF8 = 0;
-    yajl_handle hand = NULL;
-    yajl_alloc_funcs afsBuffer;
-    
-    /* first order of business is to set up memory allocation routines */
-    if (afs != NULL) {
-        if (afs->malloc == NULL || afs->realloc == NULL || afs->free == NULL)
-        {
-            return NULL;
-        }
-    } else {
-        yajl_set_default_alloc_funcs(&afsBuffer);
-        afs = &afsBuffer;
-    }
-
-    hand = (yajl_handle) YA_MALLOC(afs, sizeof(struct yajl_handle_t));
-
-    /* copy in pointers to allocation routines */
-    memcpy((void *) &(hand->alloc), (void *) afs, sizeof(yajl_alloc_funcs));
-
-    if (config != NULL) {
-        allowComments = config->allowComments;
-        validateUTF8 = config->checkUTF8;
-    }
-
-    hand->callbacks = callbacks;
-    hand->ctx = ctx;
-    hand->lexer = yajl_lex_alloc(&(hand->alloc), allowComments, validateUTF8);
-    hand->bytesConsumed = 0;
-    hand->decodeBuf = yajl_buf_alloc(&(hand->alloc));
-    yajl_bs_init(hand->stateStack, &(hand->alloc));
-
-    yajl_bs_push(hand->stateStack, yajl_state_start);    
-
-    return hand;
-}
-
-void
-yajl_free(yajl_handle handle)
-{
-    yajl_bs_free(handle->stateStack);
-    yajl_buf_free(handle->decodeBuf);
-    yajl_lex_free(handle->lexer);
-    YA_FREE(&(handle->alloc), handle);
-}
-
-yajl_status
-yajl_parse(yajl_handle hand, const unsigned char * jsonText,
-           unsigned int jsonTextLen)
-{
-    yajl_status status;
-    status = yajl_do_parse(hand, jsonText, jsonTextLen);
-    return status;
-}
-
-yajl_status
-yajl_parse_complete(yajl_handle hand)
-{
-    /* The particular case we want to handle is a trailing number.
-     * Further input consisting of digits could cause our interpretation
-     * of the number to change (buffered "1" but "2" comes in).
-     * A very simple approach to this is to inject whitespace to terminate
-     * any number in the lex buffer.
-     */
-    return yajl_parse(hand, (const unsigned char *)" ", 1);
-}
-
-unsigned char *
-yajl_get_error(yajl_handle hand, int verbose,
-               const unsigned char * jsonText, unsigned int jsonTextLen)
-{
-    return yajl_render_error_string(hand, jsonText, jsonTextLen, verbose);
-}
-
-unsigned int
-yajl_get_bytes_consumed(yajl_handle hand)
-{
-    if (!hand) return 0;
-    else return hand->bytesConsumed;
-}
-
-
-void
-yajl_free_error(yajl_handle hand, unsigned char * str)
-{
-    /* use memory allocation functions if set */
-    YA_FREE(&(hand->alloc), str);
-}
-
-/* XXX: add utility routines to parse from file */

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_alloc.c
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_alloc.c b/src/ejson/c_src/yajl/yajl_alloc.c
deleted file mode 100644
index ccfb7c3..0000000
--- a/src/ejson/c_src/yajl/yajl_alloc.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-/**
- * \file yajl_alloc.h
- * default memory allocation routines for yajl which use malloc/realloc and
- * free
- */
-
-#include "yajl_alloc.h"
-#include <stdlib.h>
-
-static void * yajl_internal_malloc(void *ctx, unsigned int sz)
-{
-    return malloc(sz);
-}
-
-static void * yajl_internal_realloc(void *ctx, void * previous,
-                                    unsigned int sz)
-{
-    return realloc(previous, sz);
-}
-
-static void yajl_internal_free(void *ctx, void * ptr)
-{
-    free(ptr);
-}
-
-void yajl_set_default_alloc_funcs(yajl_alloc_funcs * yaf)
-{
-    yaf->malloc = yajl_internal_malloc;
-    yaf->free = yajl_internal_free;
-    yaf->realloc = yajl_internal_realloc;
-    yaf->ctx = NULL;
-}
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_alloc.h
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_alloc.h b/src/ejson/c_src/yajl/yajl_alloc.h
deleted file mode 100644
index cc1e5cf..0000000
--- a/src/ejson/c_src/yajl/yajl_alloc.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-/**
- * \file yajl_alloc.h
- * default memory allocation routines for yajl which use malloc/realloc and
- * free
- */
-
-#ifndef __YAJL_ALLOC_H__
-#define __YAJL_ALLOC_H__
-
-#include "yajl_common.h"
-
-#define YA_MALLOC(afs, sz) (afs)->malloc((afs)->ctx, (sz))
-#define YA_FREE(afs, ptr) (afs)->free((afs)->ctx, (ptr))
-#define YA_REALLOC(afs, ptr, sz) (afs)->realloc((afs)->ctx, (ptr), (sz))
-
-void yajl_set_default_alloc_funcs(yajl_alloc_funcs * yaf);
-
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_buf.c
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_buf.c b/src/ejson/c_src/yajl/yajl_buf.c
deleted file mode 100644
index 04e608a..0000000
--- a/src/ejson/c_src/yajl/yajl_buf.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-#include "yajl_buf.h"
-
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-
-#define YAJL_BUF_INIT_SIZE 2048
-
-struct yajl_buf_t {
-    unsigned int len;
-    unsigned int used;
-    unsigned char * data;
-    yajl_alloc_funcs * alloc;
-};
-
-static
-void yajl_buf_ensure_available(yajl_buf buf, unsigned int want)
-{
-    unsigned int need;
-    
-    assert(buf != NULL);
-
-    /* first call */
-    if (buf->data == NULL) {
-        buf->len = YAJL_BUF_INIT_SIZE;
-        buf->data = (unsigned char *) YA_MALLOC(buf->alloc, buf->len);
-        buf->data[0] = 0;
-    }
-
-    need = buf->len;
-
-    while (want >= (need - buf->used)) need <<= 1;
-
-    if (need != buf->len) {
-        buf->data = (unsigned char *) YA_REALLOC(buf->alloc, buf->data, need);
-        buf->len = need;
-    }
-}
-
-yajl_buf yajl_buf_alloc(yajl_alloc_funcs * alloc)
-{
-    yajl_buf b = YA_MALLOC(alloc, sizeof(struct yajl_buf_t));
-    memset((void *) b, 0, sizeof(struct yajl_buf_t));
-    b->alloc = alloc;
-    return b;
-}
-
-void yajl_buf_free(yajl_buf buf)
-{
-    assert(buf != NULL);
-    if (buf->data) YA_FREE(buf->alloc, buf->data);
-    YA_FREE(buf->alloc, buf);
-}
-
-void yajl_buf_append(yajl_buf buf, const void * data, unsigned int len)
-{
-    yajl_buf_ensure_available(buf, len);
-    if (len > 0) {
-        assert(data != NULL);
-        memcpy(buf->data + buf->used, data, len);
-        buf->used += len;
-        buf->data[buf->used] = 0;
-    }
-}
-
-void yajl_buf_clear(yajl_buf buf)
-{
-    buf->used = 0;
-    if (buf->data) buf->data[buf->used] = 0;
-}
-
-const unsigned char * yajl_buf_data(yajl_buf buf)
-{
-    return buf->data;
-}
-
-unsigned int yajl_buf_len(yajl_buf buf)
-{
-    return buf->used;
-}
-
-void
-yajl_buf_truncate(yajl_buf buf, unsigned int len)
-{
-    assert(len <= buf->used);
-    buf->used = len;
-}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_buf.h
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_buf.h b/src/ejson/c_src/yajl/yajl_buf.h
deleted file mode 100644
index a6dcbe9..0000000
--- a/src/ejson/c_src/yajl/yajl_buf.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-#ifndef __YAJL_BUF_H__
-#define __YAJL_BUF_H__
-
-#include "yajl_common.h"
-#include "yajl_alloc.h"
-
-/*
- * Implementation/performance notes.  If this were moved to a header
- * only implementation using #define's where possible we might be 
- * able to sqeeze a little performance out of the guy by killing function
- * call overhead.  YMMV.
- */
-
-/**
- * yajl_buf is a buffer with exponential growth.  the buffer ensures that
- * you are always null padded.
- */
-typedef struct yajl_buf_t * yajl_buf;
-
-/* allocate a new buffer */
-yajl_buf yajl_buf_alloc(yajl_alloc_funcs * alloc);
-
-/* free the buffer */
-void yajl_buf_free(yajl_buf buf);
-
-/* append a number of bytes to the buffer */
-void yajl_buf_append(yajl_buf buf, const void * data, unsigned int len);
-
-/* empty the buffer */
-void yajl_buf_clear(yajl_buf buf);
-
-/* get a pointer to the beginning of the buffer */
-const unsigned char * yajl_buf_data(yajl_buf buf);
-
-/* get the length of the buffer */
-unsigned int yajl_buf_len(yajl_buf buf);
-
-/* truncate the buffer */
-void yajl_buf_truncate(yajl_buf buf, unsigned int len);
-
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_bytestack.h
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_bytestack.h b/src/ejson/c_src/yajl/yajl_bytestack.h
deleted file mode 100644
index 3b49d17..0000000
--- a/src/ejson/c_src/yajl/yajl_bytestack.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-/*
- * A header only implementation of a simple stack of bytes, used in YAJL
- * to maintain parse state.
- */
-
-#ifndef __YAJL_BYTESTACK_H__
-#define __YAJL_BYTESTACK_H__
-
-#include "yajl_common.h"
-
-#define YAJL_BS_INC 128
-
-typedef struct yajl_bytestack_t
-{
-    unsigned char * stack;
-    unsigned int size;
-    unsigned int used;
-    yajl_alloc_funcs * yaf;
-} yajl_bytestack;
-
-/* initialize a bytestack */
-#define yajl_bs_init(obs, _yaf) {               \
-        (obs).stack = NULL;                     \
-        (obs).size = 0;                         \
-        (obs).used = 0;                         \
-        (obs).yaf = (_yaf);                     \
-    }                                           \
-
-
-/* initialize a bytestack */
-#define yajl_bs_free(obs)                 \
-    if ((obs).stack) (obs).yaf->free((obs).yaf->ctx, (obs).stack);   
-
-#define yajl_bs_current(obs)               \
-    (assert((obs).used > 0), (obs).stack[(obs).used - 1])
-
-#define yajl_bs_push(obs, byte) {                       \
-    if (((obs).size - (obs).used) == 0) {               \
-        (obs).size += YAJL_BS_INC;                      \
-        (obs).stack = (obs).yaf->realloc((obs).yaf->ctx,\
-                                         (void *) (obs).stack, (obs).size);\
-    }                                                   \
-    (obs).stack[((obs).used)++] = (byte);               \
-}
-    
-/* removes the top item of the stack, returns nothing */
-#define yajl_bs_pop(obs) { ((obs).used)--; }
-
-#define yajl_bs_set(obs, byte)                          \
-    (obs).stack[((obs).used) - 1] = (byte);             
-    
-
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_common.h
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_common.h b/src/ejson/c_src/yajl/yajl_common.h
deleted file mode 100644
index a227deb..0000000
--- a/src/ejson/c_src/yajl/yajl_common.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-#ifndef __YAJL_COMMON_H__
-#define __YAJL_COMMON_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif    
-
-#define YAJL_MAX_DEPTH 128
-
-/* msft dll export gunk.  To build a DLL on windows, you
- * must define WIN32, YAJL_SHARED, and YAJL_BUILD.  To use a shared
- * DLL, you must define YAJL_SHARED and WIN32 */
-#if defined(WIN32) && defined(YAJL_SHARED)
-#  ifdef YAJL_BUILD
-#    define YAJL_API __declspec(dllexport)
-#  else
-#    define YAJL_API __declspec(dllimport)
-#  endif
-#else
-#  define YAJL_API
-#endif 
-
-/** pointer to a malloc function, supporting client overriding memory
- *  allocation routines */
-typedef void * (*yajl_malloc_func)(void *ctx, unsigned int sz);
-
-/** pointer to a free function, supporting client overriding memory
- *  allocation routines */
-typedef void (*yajl_free_func)(void *ctx, void * ptr);
-
-/** pointer to a realloc function which can resize an allocation. */
-typedef void * (*yajl_realloc_func)(void *ctx, void * ptr, unsigned int sz);
-
-/** A structure which can be passed to yajl_*_alloc routines to allow the
- *  client to specify memory allocation functions to be used. */
-typedef struct
-{
-    /** pointer to a function that can allocate uninitialized memory */
-    yajl_malloc_func malloc;
-    /** pointer to a function that can resize memory allocations */
-    yajl_realloc_func realloc;
-    /** pointer to a function that can free memory allocated using
-     *  reallocFunction or mallocFunction */
-    yajl_free_func free;
-    /** a context pointer that will be passed to above allocation routines */
-    void * ctx;
-} yajl_alloc_funcs;
-
-#ifdef __cplusplus
-}
-#endif    
-
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_encode.c
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_encode.c b/src/ejson/c_src/yajl/yajl_encode.c
deleted file mode 100644
index ad5b1c5..0000000
--- a/src/ejson/c_src/yajl/yajl_encode.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-#include "yajl_encode.h"
-
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdio.h>
-
-static void CharToHex(unsigned char c, char * hexBuf)
-{
-    const char * hexchar = "0123456789ABCDEF";
-    hexBuf[0] = hexchar[c >> 4];
-    hexBuf[1] = hexchar[c & 0x0F];
-}
-
-void
-yajl_string_encode(yajl_buf buf, const unsigned char * str,
-                   unsigned int len)
-{
-    yajl_string_encode2((const yajl_print_t) &yajl_buf_append, buf, str, len);
-}
-
-void
-yajl_string_encode2(const yajl_print_t print,
-                    void * ctx,
-                    const unsigned char * str,
-                    unsigned int len)
-{
-    unsigned int beg = 0;
-    unsigned int end = 0;    
-    char hexBuf[7];
-    hexBuf[0] = '\\'; hexBuf[1] = 'u'; hexBuf[2] = '0'; hexBuf[3] = '0';
-    hexBuf[6] = 0;
-
-    while (end < len) {
-        const char * escaped = NULL;
-        switch (str[end]) {
-            case '\r': escaped = "\\r"; break;
-            case '\n': escaped = "\\n"; break;
-            case '\\': escaped = "\\\\"; break;
-            /* case '/': escaped = "\\/"; break; */
-            case '"': escaped = "\\\""; break;
-            case '\f': escaped = "\\f"; break;
-            case '\b': escaped = "\\b"; break;
-            case '\t': escaped = "\\t"; break;
-            default:
-                if ((unsigned char) str[end] < 32) {
-                    CharToHex(str[end], hexBuf + 4);
-                    escaped = hexBuf;
-                }
-                break;
-        }
-        if (escaped != NULL) {
-            print(ctx, (const char *) (str + beg), end - beg);
-            print(ctx, escaped, strlen(escaped));
-            beg = ++end;
-        } else {
-            ++end;
-        }
-    }
-    print(ctx, (const char *) (str + beg), end - beg);
-}
-
-static void hexToDigit(unsigned int * val, const unsigned char * hex)
-{
-    unsigned int i;
-    for (i=0;i<4;i++) {
-        unsigned char c = hex[i];
-        if (c >= 'A') c = (c & ~0x20) - 7;
-        c -= '0';
-        assert(!(c & 0xF0));
-        *val = (*val << 4) | c;
-    }
-}
-
-static void Utf32toUtf8(unsigned int codepoint, char * utf8Buf) 
-{
-    if (codepoint < 0x80) {
-        utf8Buf[0] = (char) codepoint;
-        utf8Buf[1] = 0;
-    } else if (codepoint < 0x0800) {
-        utf8Buf[0] = (char) ((codepoint >> 6) | 0xC0);
-        utf8Buf[1] = (char) ((codepoint & 0x3F) | 0x80);
-        utf8Buf[2] = 0;
-    } else if (codepoint < 0x10000) {
-        utf8Buf[0] = (char) ((codepoint >> 12) | 0xE0);
-        utf8Buf[1] = (char) (((codepoint >> 6) & 0x3F) | 0x80);
-        utf8Buf[2] = (char) ((codepoint & 0x3F) | 0x80);
-        utf8Buf[3] = 0;
-    } else if (codepoint < 0x200000) {
-        utf8Buf[0] =(char)((codepoint >> 18) | 0xF0);
-        utf8Buf[1] =(char)(((codepoint >> 12) & 0x3F) | 0x80);
-        utf8Buf[2] =(char)(((codepoint >> 6) & 0x3F) | 0x80);
-        utf8Buf[3] =(char)((codepoint & 0x3F) | 0x80);
-        utf8Buf[4] = 0;
-    } else {
-        utf8Buf[0] = '?';
-        utf8Buf[1] = 0;
-    }
-}
-
-void yajl_string_decode(yajl_buf buf, const unsigned char * str,
-                        unsigned int len)
-{
-    unsigned int beg = 0;
-    unsigned int end = 0;    
-
-    while (end < len) {
-        if (str[end] == '\\') {
-            char utf8Buf[5];
-            const char * unescaped = "?";
-            yajl_buf_append(buf, str + beg, end - beg);
-            switch (str[++end]) {
-                case 'r': unescaped = "\r"; break;
-                case 'n': unescaped = "\n"; break;
-                case '\\': unescaped = "\\"; break;
-                case '/': unescaped = "/"; break;
-                case '"': unescaped = "\""; break;
-                case 'f': unescaped = "\f"; break;
-                case 'b': unescaped = "\b"; break;
-                case 't': unescaped = "\t"; break;
-                case 'u': {
-                    unsigned int codepoint = 0;
-                    hexToDigit(&codepoint, str + ++end);
-                    end+=3;
-                    /* check if this is a surrogate */
-                    if ((codepoint & 0xFC00) == 0xD800) {
-                        end++;
-                        if (str[end] == '\\' && str[end + 1] == 'u') {
-                            unsigned int surrogate = 0;
-                            hexToDigit(&surrogate, str + end + 2);
-                            codepoint =
-                                (((codepoint & 0x3F) << 10) | 
-                                 ((((codepoint >> 6) & 0xF) + 1) << 16) | 
-                                 (surrogate & 0x3FF));
-                            end += 5;
-                        } else {
-                            unescaped = "?";
-                            break;
-                        }
-                    }
-                    
-                    Utf32toUtf8(codepoint, utf8Buf);
-                    unescaped = utf8Buf;
-                    break;
-                }
-                default:
-                    assert("this should never happen" == NULL);
-            }
-            yajl_buf_append(buf, unescaped, strlen(unescaped));
-            beg = ++end;
-        } else {
-            end++;
-        }
-    }
-    yajl_buf_append(buf, str + beg, end - beg);
-}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_encode.h
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_encode.h b/src/ejson/c_src/yajl/yajl_encode.h
deleted file mode 100644
index 3e3b092..0000000
--- a/src/ejson/c_src/yajl/yajl_encode.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-#ifndef __YAJL_ENCODE_H__
-#define __YAJL_ENCODE_H__
-
-#include "yajl_buf.h"
-#include "yajl_gen.h"
-
-void yajl_string_encode2(const yajl_print_t printer,
-                         void * ctx,
-                         const unsigned char * str,
-                         unsigned int length);
-
-void yajl_string_encode(yajl_buf buf, const unsigned char * str,
-                        unsigned int length);
-
-void yajl_string_decode(yajl_buf buf, const unsigned char * str,
-                        unsigned int length);
-
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_gen.c
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_gen.c b/src/ejson/c_src/yajl/yajl_gen.c
deleted file mode 100644
index 6cfda0a..0000000
--- a/src/ejson/c_src/yajl/yajl_gen.c
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-#include "yajl_gen.h"
-#include "yajl_buf.h"
-#include "yajl_encode.h"
-
-#include <stdlib.h>
-#include <string.h>
-#include <stdio.h>
-#include <math.h>
-
-typedef enum {
-    yajl_gen_start,
-    yajl_gen_map_start,
-    yajl_gen_map_key,
-    yajl_gen_map_val,
-    yajl_gen_array_start,
-    yajl_gen_in_array,
-    yajl_gen_complete,
-    yajl_gen_error
-} yajl_gen_state;
-
-struct yajl_gen_t 
-{
-    unsigned int depth;
-    unsigned int pretty;
-    const char * indentString;
-    yajl_gen_state state[YAJL_MAX_DEPTH];
-    yajl_print_t print;
-    void * ctx; /* yajl_buf */
-    /* memory allocation routines */
-    yajl_alloc_funcs alloc;
-};
-
-yajl_gen
-yajl_gen_alloc(const yajl_gen_config * config,
-               const yajl_alloc_funcs * afs)
-{
-    return yajl_gen_alloc2(NULL, config, afs, NULL);
-}
-
-yajl_gen
-yajl_gen_alloc2(const yajl_print_t callback,
-                const yajl_gen_config * config,
-                const yajl_alloc_funcs * afs,
-                void * ctx)
-{
-    yajl_gen g = NULL;
-    yajl_alloc_funcs afsBuffer;
-
-    /* first order of business is to set up memory allocation routines */
-    if (afs != NULL) {
-        if (afs->malloc == NULL || afs->realloc == NULL || afs->free == NULL)
-        {
-            return NULL;
-        }
-    } else {
-        yajl_set_default_alloc_funcs(&afsBuffer);
-        afs = &afsBuffer;
-    }
-
-    g = (yajl_gen) YA_MALLOC(afs, sizeof(struct yajl_gen_t));
-    memset((void *) g, 0, sizeof(struct yajl_gen_t));
-    /* copy in pointers to allocation routines */
-    memcpy((void *) &(g->alloc), (void *) afs, sizeof(yajl_alloc_funcs));
-
-    if (config) {
-        g->pretty = config->beautify;
-        g->indentString = config->indentString ? config->indentString : "  ";
-    }
-
-    if (callback) {
-        g->print = callback;
-        g->ctx = ctx;
-    } else {
-        g->print = (yajl_print_t)&yajl_buf_append;
-        g->ctx = yajl_buf_alloc(&(g->alloc));
-    }
-
-    return g;
-}
-
-void
-yajl_gen_free(yajl_gen g)
-{
-    if (g->print == (yajl_print_t)&yajl_buf_append) yajl_buf_free((yajl_buf)g->ctx);
-    YA_FREE(&(g->alloc), g);
-}
-
-#define INSERT_SEP \
-    if (g->state[g->depth] == yajl_gen_map_key ||               \
-        g->state[g->depth] == yajl_gen_in_array) {              \
-        g->print(g->ctx, ",", 1);                               \
-        if (g->pretty) g->print(g->ctx, "\n", 1);               \
-    } else if (g->state[g->depth] == yajl_gen_map_val) {        \
-        g->print(g->ctx, ":", 1);                               \
-        if (g->pretty) g->print(g->ctx, " ", 1);                \
-   } 
-
-#define INSERT_WHITESPACE                                               \
-    if (g->pretty) {                                                    \
-        if (g->state[g->depth] != yajl_gen_map_val) {                   \
-            unsigned int _i;                                            \
-            for (_i=0;_i<g->depth;_i++)                                 \
-                g->print(g->ctx, g->indentString,                       \
-                         strlen(g->indentString));                      \
-        }                                                               \
-    }
-
-#define ENSURE_NOT_KEY \
-    if (g->state[g->depth] == yajl_gen_map_key) {   \
-        return yajl_gen_keys_must_be_strings;       \
-    }                                               \
-
-/* check that we're not complete, or in error state.  in a valid state
- * to be generating */
-#define ENSURE_VALID_STATE \
-    if (g->state[g->depth] == yajl_gen_error) {   \
-        return yajl_gen_in_error_state;\
-    } else if (g->state[g->depth] == yajl_gen_complete) {   \
-        return yajl_gen_generation_complete;                \
-    }
-
-#define INCREMENT_DEPTH \
-    if (++(g->depth) >= YAJL_MAX_DEPTH) return yajl_max_depth_exceeded;
-
-#define APPENDED_ATOM \
-    switch (g->state[g->depth]) {                   \
-        case yajl_gen_start:                        \
-            g->state[g->depth] = yajl_gen_complete; \
-            break;                                  \
-        case yajl_gen_map_start:                    \
-        case yajl_gen_map_key:                      \
-            g->state[g->depth] = yajl_gen_map_val;  \
-            break;                                  \
-        case yajl_gen_array_start:                  \
-            g->state[g->depth] = yajl_gen_in_array; \
-            break;                                  \
-        case yajl_gen_map_val:                      \
-            g->state[g->depth] = yajl_gen_map_key;  \
-            break;                                  \
-        default:                                    \
-            break;                                  \
-    }                                               \
-
-#define FINAL_NEWLINE                                        \
-    if (g->pretty && g->state[g->depth] == yajl_gen_complete) \
-        g->print(g->ctx, "\n", 1);        
-    
-yajl_gen_status
-yajl_gen_integer(yajl_gen g, long int number)
-{
-    char i[32];
-    ENSURE_VALID_STATE; ENSURE_NOT_KEY; INSERT_SEP; INSERT_WHITESPACE;
-    sprintf(i, "%ld", number);
-    g->print(g->ctx, i, strlen(i));
-    APPENDED_ATOM;
-    FINAL_NEWLINE;
-    return yajl_gen_status_ok;
-}
-
-#if defined(_WIN32) || defined(WIN32) || defined(__WIN32__)
-#include <float.h>
-#define isnan _isnan
-#define isinf !_finite
-#endif
-
-yajl_gen_status
-yajl_gen_double(yajl_gen g, double number)
-{
-    char i[32];
-    ENSURE_VALID_STATE; ENSURE_NOT_KEY; 
-    if (isnan(number) || isinf(number)) return yajl_gen_invalid_number;
-    INSERT_SEP; INSERT_WHITESPACE;
-    sprintf(i, "%g", number);
-    g->print(g->ctx, i, strlen(i));
-    APPENDED_ATOM;
-    FINAL_NEWLINE;
-    return yajl_gen_status_ok;
-}
-
-yajl_gen_status
-yajl_gen_number(yajl_gen g, const char * s, unsigned int l)
-{
-    ENSURE_VALID_STATE; ENSURE_NOT_KEY; INSERT_SEP; INSERT_WHITESPACE;
-    g->print(g->ctx, s, l);
-    APPENDED_ATOM;
-    FINAL_NEWLINE;
-    return yajl_gen_status_ok;
-}
-
-yajl_gen_status
-yajl_gen_string(yajl_gen g, const unsigned char * str,
-                unsigned int len)
-{
-    ENSURE_VALID_STATE; INSERT_SEP; INSERT_WHITESPACE;
-    g->print(g->ctx, "\"", 1);
-    yajl_string_encode2(g->print, g->ctx, str, len);
-    g->print(g->ctx, "\"", 1);
-    APPENDED_ATOM;
-    FINAL_NEWLINE;
-    return yajl_gen_status_ok;
-}
-
-yajl_gen_status
-yajl_gen_null(yajl_gen g)
-{
-    ENSURE_VALID_STATE; ENSURE_NOT_KEY; INSERT_SEP; INSERT_WHITESPACE;
-    g->print(g->ctx, "null", strlen("null"));
-    APPENDED_ATOM;
-    FINAL_NEWLINE;
-    return yajl_gen_status_ok;
-}
-
-yajl_gen_status
-yajl_gen_bool(yajl_gen g, int boolean)
-{
-    const char * val = boolean ? "true" : "false";
-
-	ENSURE_VALID_STATE; ENSURE_NOT_KEY; INSERT_SEP; INSERT_WHITESPACE;
-    g->print(g->ctx, val, strlen(val));
-    APPENDED_ATOM;
-    FINAL_NEWLINE;
-    return yajl_gen_status_ok;
-}
-
-yajl_gen_status
-yajl_gen_map_open(yajl_gen g)
-{
-    ENSURE_VALID_STATE; ENSURE_NOT_KEY; INSERT_SEP; INSERT_WHITESPACE;
-    INCREMENT_DEPTH; 
-    
-    g->state[g->depth] = yajl_gen_map_start;
-    g->print(g->ctx, "{", 1);
-    if (g->pretty) g->print(g->ctx, "\n", 1);
-    FINAL_NEWLINE;
-    return yajl_gen_status_ok;
-}
-
-yajl_gen_status
-yajl_gen_map_close(yajl_gen g)
-{
-    ENSURE_VALID_STATE; 
-    (g->depth)--;
-    if (g->pretty) g->print(g->ctx, "\n", 1);
-    APPENDED_ATOM;
-    INSERT_WHITESPACE;
-    g->print(g->ctx, "}", 1);
-    FINAL_NEWLINE;
-    return yajl_gen_status_ok;
-}
-
-yajl_gen_status
-yajl_gen_array_open(yajl_gen g)
-{
-    ENSURE_VALID_STATE; ENSURE_NOT_KEY; INSERT_SEP; INSERT_WHITESPACE;
-    INCREMENT_DEPTH; 
-    g->state[g->depth] = yajl_gen_array_start;
-    g->print(g->ctx, "[", 1);
-    if (g->pretty) g->print(g->ctx, "\n", 1);
-    FINAL_NEWLINE;
-    return yajl_gen_status_ok;
-}
-
-yajl_gen_status
-yajl_gen_array_close(yajl_gen g)
-{
-    ENSURE_VALID_STATE;
-    if (g->pretty) g->print(g->ctx, "\n", 1);
-    (g->depth)--;
-    APPENDED_ATOM;
-    INSERT_WHITESPACE;
-    g->print(g->ctx, "]", 1);
-    FINAL_NEWLINE;
-    return yajl_gen_status_ok;
-}
-
-yajl_gen_status
-yajl_gen_get_buf(yajl_gen g, const unsigned char ** buf,
-                 unsigned int * len)
-{
-    if (g->print != (yajl_print_t)&yajl_buf_append) return yajl_gen_no_buf;
-    *buf = yajl_buf_data((yajl_buf)g->ctx);
-    *len = yajl_buf_len((yajl_buf)g->ctx);
-    return yajl_gen_status_ok;
-}
-
-void
-yajl_gen_clear(yajl_gen g)
-{
-    if (g->print == (yajl_print_t)&yajl_buf_append) yajl_buf_clear((yajl_buf)g->ctx);
-}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/191a9b41/src/ejson/c_src/yajl/yajl_gen.h
----------------------------------------------------------------------
diff --git a/src/ejson/c_src/yajl/yajl_gen.h b/src/ejson/c_src/yajl/yajl_gen.h
deleted file mode 100644
index 97c2042..0000000
--- a/src/ejson/c_src/yajl/yajl_gen.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright 2010, Lloyd Hilaiel.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- * 
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- * 
- *  3. Neither the name of Lloyd Hilaiel nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */ 
-
-/**
- * \file yajl_gen.h
- * Interface to YAJL's JSON generation facilities.
- */
-
-#include "yajl_common.h"
-
-#ifndef __YAJL_GEN_H__
-#define __YAJL_GEN_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif    
-    /** generator status codes */
-    typedef enum {
-        /** no error */
-        yajl_gen_status_ok = 0,
-        /** at a point where a map key is generated, a function other than
-         *  yajl_gen_string was called */
-        yajl_gen_keys_must_be_strings,
-        /** YAJL's maximum generation depth was exceeded.  see
-         *  YAJL_MAX_DEPTH */
-        yajl_max_depth_exceeded,
-        /** A generator function (yajl_gen_XXX) was called while in an error
-         *  state */
-        yajl_gen_in_error_state,
-        /** A complete JSON document has been generated */
-        yajl_gen_generation_complete,                
-        /** yajl_gen_double was passed an invalid floating point value
-         *  (infinity or NaN). */
-        yajl_gen_invalid_number,
-        /** A print callback was passed in, so there is no internal
-         * buffer to get from */
-        yajl_gen_no_buf
-    } yajl_gen_status;
-
-    /** an opaque handle to a generator */
-    typedef struct yajl_gen_t * yajl_gen;
-
-    /** a callback used for "printing" the results. */
-    typedef void (*yajl_print_t)(void * ctx,
-                                 const char * str,
-                                 unsigned int len);
-
-    /** configuration structure for the generator */
-    typedef struct {
-        /** generate indented (beautiful) output */
-        unsigned int beautify;
-        /** an opportunity to define an indent string.  such as \\t or
-         *  some number of spaces.  default is four spaces '    '.  This
-         *  member is only relevant when beautify is true */
-        const char * indentString;
-    } yajl_gen_config;
-
-    /** allocate a generator handle
-     *  \param config a pointer to a structure containing parameters which
-     *                configure the behavior of the json generator
-     *  \param allocFuncs an optional pointer to a structure which allows
-     *                    the client to overide the memory allocation
-     *                    used by yajl.  May be NULL, in which case
-     *                    malloc/free/realloc will be used.
-     *
-     *  \returns an allocated handle on success, NULL on failure (bad params)
-     */
-    YAJL_API yajl_gen yajl_gen_alloc(const yajl_gen_config * config,
-                                     const yajl_alloc_funcs * allocFuncs);
-
-    /** allocate a generator handle that will print to the specified
-     *  callback rather than storing the results in an internal buffer.
-     *  \param callback   a pointer to a printer function.  May be NULL
-     *                    in which case, the results will be store in an
-     *                    internal buffer.
-     *  \param config     a pointer to a structure containing parameters
-     *                    which configure the behavior of the json
-     *                    generator.
-     *  \param allocFuncs an optional pointer to a structure which allows
-     *                    the client to overide the memory allocation
-     *                    used by yajl.  May be NULL, in which case
-     *                    malloc/free/realloc will be used.
-     *  \param ctx        a context pointer that will be passed to the
-     *                    printer callback.
-     *
-     *  \returns an allocated handle on success, NULL on failure (bad params)
-     */
-    YAJL_API yajl_gen yajl_gen_alloc2(const yajl_print_t callback,
-                                      const yajl_gen_config * config,
-                                      const yajl_alloc_funcs * allocFuncs,
-                                      void * ctx);
-
-    /** free a generator handle */    
-    YAJL_API void yajl_gen_free(yajl_gen handle);
-
-    YAJL_API yajl_gen_status yajl_gen_integer(yajl_gen hand, long int number);
-    /** generate a floating point number.  number may not be infinity or
-     *  NaN, as these have no representation in JSON.  In these cases the
-     *  generator will return 'yajl_gen_invalid_number' */
-    YAJL_API yajl_gen_status yajl_gen_double(yajl_gen hand, double number);
-    YAJL_API yajl_gen_status yajl_gen_number(yajl_gen hand,
-                                             const char * num,
-                                             unsigned int len);
-    YAJL_API yajl_gen_status yajl_gen_string(yajl_gen hand,
-                                             const unsigned char * str,
-                                             unsigned int len);
-    YAJL_API yajl_gen_status yajl_gen_null(yajl_gen hand);
-    YAJL_API yajl_gen_status yajl_gen_bool(yajl_gen hand, int boolean);    
-    YAJL_API yajl_gen_status yajl_gen_map_open(yajl_gen hand);
-    YAJL_API yajl_gen_status yajl_gen_map_close(yajl_gen hand);
-    YAJL_API yajl_gen_status yajl_gen_array_open(yajl_gen hand);
-    YAJL_API yajl_gen_status yajl_gen_array_close(yajl_gen hand);
-
-    /** access the null terminated generator buffer.  If incrementally
-     *  outputing JSON, one should call yajl_gen_clear to clear the
-     *  buffer.  This allows stream generation. */
-    YAJL_API yajl_gen_status yajl_gen_get_buf(yajl_gen hand,
-                                              const unsigned char ** buf,
-                                              unsigned int * len);
-
-    /** clear yajl's output buffer, but maintain all internal generation
-     *  state.  This function will not "reset" the generator state, and is
-     *  intended to enable incremental JSON outputing. */
-    YAJL_API void yajl_gen_clear(yajl_gen hand);
-
-#ifdef __cplusplus
-}
-#endif    
-
-#endif


[11/49] Remove src/couch

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_db_update_notifier_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_db_update_notifier_sup.erl b/src/couch/src/couch_db_update_notifier_sup.erl
deleted file mode 100644
index 9eb943a..0000000
--- a/src/couch/src/couch_db_update_notifier_sup.erl
+++ /dev/null
@@ -1,68 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%
-% This causes an OS process to spawned and it is notified every time a database
-% is updated.
-%
-% The notifications are in the form of a the database name sent as a line of
-% text to the OS processes stdout.
-%
-
--module(couch_db_update_notifier_sup).
-
--behaviour(supervisor).
--behaviour(config_listener).
-
--export([start_link/0, init/1]).
-
-% config_listener api
--export([handle_config_change/5]).
-
-
-start_link() ->
-    supervisor:start_link({local, couch_db_update_notifier_sup},
-        couch_db_update_notifier_sup, []).
-
-init([]) ->
-    ok = config:listen_for_changes(?MODULE, nil),
-
-    UpdateNotifierExes = config:get("update_notification"),
-
-    {ok,
-        {{one_for_one, 10, 3600},
-            lists:map(fun({Name, UpdateNotifierExe}) ->
-                {Name,
-                {couch_db_update_notifier, start_link, [UpdateNotifierExe]},
-                    permanent,
-                    1000,
-                    supervisor,
-                    [couch_db_update_notifier]}
-                end, UpdateNotifierExes)}}.
-
-%% @doc when update_notification configuration changes, terminate the process
-%%      for that notifier and start a new one with the updated config
-handle_config_change("update_notification", Id, Exe, _, _) ->
-    ChildSpec = {
-        Id,
-        {couch_db_update_notifier, start_link, [Exe]},
-        permanent,
-        1000,
-        supervisor,
-        [couch_db_update_notifier]
-    },
-    supervisor:terminate_child(couch_db_update_notifier_sup, Id),
-    supervisor:delete_child(couch_db_update_notifier_sup, Id),
-    supervisor:start_child(couch_db_update_notifier_sup, ChildSpec),
-    {ok, nil};
-handle_config_change(_, _, _, _, _) ->
-    {ok, nil}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_db_updater.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
deleted file mode 100644
index 649826a..0000000
--- a/src/couch/src/couch_db_updater.erl
+++ /dev/null
@@ -1,1264 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_updater).
--behaviour(gen_server).
-
--export([btree_by_id_split/1, btree_by_id_join/2, btree_by_id_reduce/2]).
--export([btree_by_seq_split/1, btree_by_seq_join/2, btree_by_seq_reduce/2]).
--export([make_doc_summary/2]).
--export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
-
--include_lib("couch/include/couch_db.hrl").
-
--record(comp_header, {
-    db_header,
-    meta_state
-}).
-
--record(merge_st, {
-    id_tree,
-    seq_tree,
-    curr,
-    rem_seqs,
-    infos
-}).
-
-init({DbName, Filepath, Fd, Options}) ->
-    case lists:member(create, Options) of
-    true ->
-        % create a new header and writes it to the file
-        Header =  #db_header{},
-        ok = couch_file:write_header(Fd, Header),
-        % delete any old compaction files that might be hanging around
-        RootDir = config:get("couchdb", "database_dir", "."),
-        couch_file:delete(RootDir, Filepath ++ ".compact"),
-        couch_file:delete(RootDir, Filepath ++ ".compact.data"),
-        couch_file:delete(RootDir, Filepath ++ ".compact.meta");
-    false ->
-        case couch_file:read_header(Fd) of
-        {ok, Header} ->
-            ok;
-        no_valid_header ->
-            % create a new header and writes it to the file
-            Header =  #db_header{},
-            ok = couch_file:write_header(Fd, Header),
-            % delete any old compaction files that might be hanging around
-            file:delete(Filepath ++ ".compact"),
-            file:delete(Filepath ++ ".compact.data"),
-            file:delete(Filepath ++ ".compact.meta")
-        end
-    end,
-    Db = init_db(DbName, Filepath, Fd, Header, Options),
-    % we don't load validation funs here because the fabric query is liable to
-    % race conditions.  Instead see couch_db:validate_doc_update, which loads
-    % them lazily
-    {ok, Db#db{main_pid = self()}}.
-
-
-terminate(_Reason, Db) ->
-    % If the reason we died is becuase our fd disappeared
-    % then we don't need to try closing it again.
-    case Db#db.fd of
-        Pid when is_pid(Pid) ->
-            ok = couch_file:close(Db#db.fd);
-        _ ->
-            ok
-    end,
-    couch_util:shutdown_sync(Db#db.compactor_pid),
-    couch_util:shutdown_sync(Db#db.fd),
-    ok.
-
-handle_call(get_db, _From, Db) ->
-    {reply, {ok, Db}, Db};
-handle_call(full_commit, _From, #db{waiting_delayed_commit=nil}=Db) ->
-    {reply, ok, Db}; % no data waiting, return ok immediately
-handle_call(full_commit, _From,  Db) ->
-    {reply, ok, commit_data(Db)};
-handle_call({full_commit, RequiredSeq}, _From, Db)
-        when RequiredSeq =< Db#db.committed_update_seq ->
-    {reply, ok, Db};
-handle_call({full_commit, _}, _, Db) ->
-    {reply, ok, commit_data(Db)}; % commit the data and return ok
-handle_call(start_compact, _From, Db) ->
-    {noreply, NewDb} = handle_cast(start_compact, Db),
-    {reply, {ok, NewDb#db.compactor_pid}, NewDb};
-handle_call(compactor_pid, _From, #db{compactor_pid = Pid} = Db) ->
-    {reply, Pid, Db};
-handle_call(cancel_compact, _From, #db{compactor_pid = nil} = Db) ->
-    {reply, ok, Db};
-handle_call(cancel_compact, _From, #db{compactor_pid = Pid} = Db) ->
-    unlink(Pid),
-    exit(Pid, kill),
-    RootDir = config:get("couchdb", "database_dir", "."),
-    ok = couch_file:delete(RootDir, Db#db.filepath ++ ".compact"),
-    Db2 = Db#db{compactor_pid = nil},
-    ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-    {reply, ok, Db2};
-handle_call(increment_update_seq, _From, Db) ->
-    Db2 = commit_data(Db#db{update_seq=Db#db.update_seq+1}),
-    ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-    couch_db_update_notifier:notify({updated, Db#db.name}),
-    {reply, {ok, Db2#db.update_seq}, Db2};
-
-handle_call({set_security, NewSec}, _From, #db{compression = Comp} = Db) ->
-    {ok, Ptr, _} = couch_file:append_term(
-        Db#db.fd, NewSec, [{compression, Comp}]),
-    Db2 = commit_data(Db#db{security=NewSec, security_ptr=Ptr,
-            update_seq=Db#db.update_seq+1}),
-    ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-    {reply, ok, Db2};
-
-handle_call({set_revs_limit, Limit}, _From, Db) ->
-    Db2 = commit_data(Db#db{revs_limit=Limit,
-            update_seq=Db#db.update_seq+1}),
-    ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-    {reply, ok, Db2};
-
-handle_call({purge_docs, _IdRevs}, _From,
-        #db{compactor_pid=Pid}=Db) when Pid /= nil ->
-    {reply, {error, purge_during_compaction}, Db};
-handle_call({purge_docs, IdRevs}, _From, Db) ->
-    #db{
-        fd = Fd,
-        id_tree = DocInfoByIdBTree,
-        seq_tree = DocInfoBySeqBTree,
-        update_seq = LastSeq,
-        header = Header = #db_header{purge_seq=PurgeSeq},
-        compression = Comp
-        } = Db,
-    DocLookups = couch_btree:lookup(DocInfoByIdBTree,
-            [Id || {Id, _Revs} <- IdRevs]),
-
-    NewDocInfos = lists:zipwith(
-        fun({_Id, Revs}, {ok, #full_doc_info{rev_tree=Tree}=FullDocInfo}) ->
-            case couch_key_tree:remove_leafs(Tree, Revs) of
-            {_, []=_RemovedRevs} -> % no change
-                nil;
-            {NewTree, RemovedRevs} ->
-                {FullDocInfo#full_doc_info{rev_tree=NewTree},RemovedRevs}
-            end;
-        (_, not_found) ->
-            nil
-        end,
-        IdRevs, DocLookups),
-
-    SeqsToRemove = [Seq
-            || {#full_doc_info{update_seq=Seq},_} <- NewDocInfos],
-
-    FullDocInfoToUpdate = [FullInfo
-            || {#full_doc_info{rev_tree=Tree}=FullInfo,_}
-            <- NewDocInfos, Tree /= []],
-
-    IdRevsPurged = [{Id, Revs}
-            || {#full_doc_info{id=Id}, Revs} <- NewDocInfos],
-
-    {DocInfoToUpdate, NewSeq} = lists:mapfoldl(
-        fun(#full_doc_info{rev_tree=Tree}=FullInfo, SeqAcc) ->
-            Tree2 = couch_key_tree:map_leafs(
-                fun(_RevId, Leaf) ->
-                    Leaf#leaf{seq=SeqAcc+1}
-                end, Tree),
-            {FullInfo#full_doc_info{rev_tree=Tree2}, SeqAcc + 1}
-        end, LastSeq, FullDocInfoToUpdate),
-
-    IdsToRemove = [Id || {#full_doc_info{id=Id,rev_tree=[]},_}
-            <- NewDocInfos],
-
-    {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree,
-            DocInfoToUpdate, SeqsToRemove),
-    {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree,
-            FullDocInfoToUpdate, IdsToRemove),
-    {ok, Pointer, _} = couch_file:append_term(
-            Fd, IdRevsPurged, [{compression, Comp}]),
-
-    Db2 = commit_data(
-        Db#db{
-            id_tree = DocInfoByIdBTree2,
-            seq_tree = DocInfoBySeqBTree2,
-            update_seq = NewSeq + 1,
-            header=Header#db_header{purge_seq=PurgeSeq+1, purged_docs=Pointer}}),
-
-    ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-    couch_db_update_notifier:notify({updated, Db#db.name}),
-    {reply, {ok, (Db2#db.header)#db_header.purge_seq, IdRevsPurged}, Db2}.
-
-
-handle_cast({load_validation_funs, ValidationFuns}, Db) ->
-    Db2 = Db#db{validate_doc_funs = ValidationFuns},
-    ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-    {noreply, Db2};
-handle_cast(start_compact, Db) ->
-    case Db#db.compactor_pid of
-    nil ->
-        ?LOG_INFO("Starting compaction for db \"~s\"", [Db#db.name]),
-        Pid = spawn_link(fun() -> start_copy_compact(Db) end),
-        Db2 = Db#db{compactor_pid=Pid},
-        ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-        {noreply, Db2};
-    _ ->
-        % compact currently running, this is a no-op
-        {noreply, Db}
-    end;
-handle_cast({compact_done, CompactFilepath}, #db{filepath=Filepath}=Db) ->
-    {ok, NewFd} = couch_file:open(CompactFilepath),
-    {ok, NewHeader} = couch_file:read_header(NewFd),
-    #db{update_seq=NewSeq} = NewDb =
-        init_db(Db#db.name, Filepath, NewFd, NewHeader, Db#db.options),
-    unlink(NewFd),
-    case Db#db.update_seq == NewSeq of
-    true ->
-        % suck up all the local docs into memory and write them to the new db
-        {ok, _, LocalDocs} = couch_btree:foldl(Db#db.local_tree,
-                fun(Value, _Offset, Acc) -> {ok, [Value | Acc]} end, []),
-        {ok, NewLocalBtree} = couch_btree:add(NewDb#db.local_tree, LocalDocs),
-
-        NewDb2 = commit_data(NewDb#db{
-            local_tree = NewLocalBtree,
-            main_pid = self(),
-            filepath = Filepath,
-            instance_start_time = Db#db.instance_start_time,
-            revs_limit = Db#db.revs_limit
-        }),
-
-        ?LOG_DEBUG("CouchDB swapping files ~s and ~s.",
-                [Filepath, CompactFilepath]),
-        ok = file:rename(CompactFilepath, Filepath ++ ".compact"),
-        RootDir = config:get("couchdb", "database_dir", "."),
-        couch_file:delete(RootDir, Filepath),
-        ok = file:rename(Filepath ++ ".compact", Filepath),
-        % Delete the old meta compaction file after promoting
-        % the compaction file.
-        couch_file:delete(RootDir, Filepath ++ ".compact.meta"),
-        close_db(Db),
-        NewDb3 = refresh_validate_doc_funs(NewDb2),
-        ok = gen_server:call(couch_server, {db_updated, NewDb3}, infinity),
-        couch_db_update_notifier:notify({compacted, NewDb3#db.name}),
-        ?LOG_INFO("Compaction for db \"~s\" completed.", [Db#db.name]),
-        {noreply, NewDb3#db{compactor_pid=nil}};
-    false ->
-        ?LOG_INFO("Compaction file still behind main file "
-            "(update seq=~p. compact update seq=~p). Retrying.",
-            [Db#db.update_seq, NewSeq]),
-        close_db(NewDb),
-        Pid = spawn_link(fun() -> start_copy_compact(Db) end),
-        Db2 = Db#db{compactor_pid=Pid},
-        ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-        {noreply, Db2}
-    end;
-
-handle_cast(Msg, #db{name = Name} = Db) ->
-    ?LOG_ERROR("Database `~s` updater received unexpected cast: ~p", [Name, Msg]),
-    {stop, Msg, Db}.
-
-
-handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts,
-        FullCommit}, Db) ->
-    GroupedDocs2 = [[{Client, D} || D <- DocGroup] || DocGroup <- GroupedDocs],
-    if NonRepDocs == [] ->
-        {GroupedDocs3, Clients, FullCommit2} = collect_updates(GroupedDocs2,
-                [Client], MergeConflicts, FullCommit);
-    true ->
-        GroupedDocs3 = GroupedDocs2,
-        FullCommit2 = FullCommit,
-        Clients = [Client]
-    end,
-    NonRepDocs2 = [{Client, NRDoc} || NRDoc <- NonRepDocs],
-    try update_docs_int(Db, GroupedDocs3, NonRepDocs2, MergeConflicts,
-                FullCommit2) of
-    {ok, Db2, UpdatedDDocIds} ->
-        ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-        if Db2#db.update_seq /= Db#db.update_seq ->
-            couch_db_update_notifier:notify({updated, Db2#db.name});
-        true -> ok
-        end,
-        [catch(ClientPid ! {done, self()}) || ClientPid <- Clients],
-        lists:foreach(fun(DDocId) ->
-            couch_db_update_notifier:notify({ddoc_updated, {Db#db.name, DDocId}})
-        end, UpdatedDDocIds),
-        {noreply, Db2, hibernate}
-    catch
-        throw: retry ->
-            [catch(ClientPid ! {retry, self()}) || ClientPid <- Clients],
-            {noreply, Db, hibernate}
-    end;
-handle_info(delayed_commit, #db{waiting_delayed_commit=nil}=Db) ->
-    %no outstanding delayed commits, ignore
-    {noreply, Db};
-handle_info(delayed_commit, Db) ->
-    case commit_data(Db) of
-        Db ->
-            {noreply, Db};
-        Db2 ->
-            ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-            {noreply, Db2}
-    end;
-handle_info({'EXIT', _Pid, normal}, Db) ->
-    {noreply, Db};
-handle_info({'EXIT', _Pid, Reason}, Db) ->
-    {stop, Reason, Db};
-handle_info({'DOWN', Ref, _, _, Reason}, #db{fd_monitor=Ref, name=Name} = Db) ->
-    ?LOG_ERROR("DB ~s shutting down - Fd ~p", [Name, Reason]),
-    {stop, normal, Db#db{fd=undefined, fd_monitor=undefined}}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-merge_updates([[{_,{#doc{id=X},_}}|_]=A|RestA], [[{_,{#doc{id=X},_}}|_]=B|RestB]) ->
-    [A++B | merge_updates(RestA, RestB)];
-merge_updates([[{_,{#doc{id=X},_}}|_]|_]=A, [[{_,{#doc{id=Y},_}}|_]|_]=B) when X < Y ->
-    [hd(A) | merge_updates(tl(A), B)];
-merge_updates([[{_,{#doc{id=X},_}}|_]|_]=A, [[{_,{#doc{id=Y},_}}|_]|_]=B) when X > Y ->
-    [hd(B) | merge_updates(A, tl(B))];
-merge_updates([], RestB) ->
-    RestB;
-merge_updates(RestA, []) ->
-    RestA.
-
-collect_updates(GroupedDocsAcc, ClientsAcc, MergeConflicts, FullCommit) ->
-    receive
-        % Only collect updates with the same MergeConflicts flag and without
-        % local docs. It's easier to just avoid multiple _local doc
-        % updaters than deal with their possible conflicts, and local docs
-        % writes are relatively rare. Can be optmized later if really needed.
-        {update_docs, Client, GroupedDocs, [], MergeConflicts, FullCommit2} ->
-            GroupedDocs2 = [[{Client, Doc} || Doc <- DocGroup]
-                    || DocGroup <- GroupedDocs],
-            GroupedDocsAcc2 =
-                merge_updates(GroupedDocsAcc, GroupedDocs2),
-            collect_updates(GroupedDocsAcc2, [Client | ClientsAcc],
-                    MergeConflicts, (FullCommit or FullCommit2))
-    after 0 ->
-        {GroupedDocsAcc, ClientsAcc, FullCommit}
-    end.
-
-rev_tree(DiskTree) ->
-    couch_key_tree:mapfold(fun
-        (_RevId, {IsDeleted, BodyPointer, UpdateSeq}, leaf, _Acc) ->
-            % pre 1.2 format, will be upgraded on compaction
-            {#leaf{deleted=?i2b(IsDeleted), ptr=BodyPointer, seq=UpdateSeq}, nil};
-        (_RevId, {IsDeleted, BodyPointer, UpdateSeq}, branch, Acc) ->
-            {#leaf{deleted=?i2b(IsDeleted), ptr=BodyPointer, seq=UpdateSeq}, Acc};
-        (_RevId, {IsDeleted, BodyPointer, UpdateSeq, Size}, leaf, Acc) ->
-            Acc2 = sum_leaf_sizes(Acc, Size),
-            {#leaf{deleted=?i2b(IsDeleted), ptr=BodyPointer, seq=UpdateSeq, size=Size}, Acc2};
-        (_RevId, {IsDeleted, BodyPointer, UpdateSeq, Size}, branch, Acc) ->
-            {#leaf{deleted=?i2b(IsDeleted), ptr=BodyPointer, seq=UpdateSeq, size=Size}, Acc};
-        (_RevId, ?REV_MISSING, _Type, Acc) ->
-            {?REV_MISSING, Acc}
-    end, 0, DiskTree).
-
-disk_tree(RevTree) ->
-    couch_key_tree:map(fun
-        (_RevId, ?REV_MISSING) ->
-            ?REV_MISSING;
-        (_RevId, #leaf{deleted=IsDeleted, ptr=BodyPointer, seq=UpdateSeq, size=Size}) ->
-            {?b2i(IsDeleted), BodyPointer, UpdateSeq, Size}
-    end, RevTree).
-
-btree_by_seq_split(#full_doc_info{id=Id, update_seq=Seq, deleted=Del, rev_tree=T}) ->
-    {Seq, {Id, ?b2i(Del), disk_tree(T)}}.
-
-btree_by_seq_join(Seq, {Id, Del, DiskTree}) when is_integer(Del) ->
-    {RevTree, LeafsSize} = rev_tree(DiskTree),
-    #full_doc_info{
-        id = Id,
-        update_seq = Seq,
-        deleted = ?i2b(Del),
-        rev_tree = RevTree,
-        leafs_size = LeafsSize
-    };
-btree_by_seq_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) ->
-    % Older versions stored #doc_info records in the seq_tree.
-    % Compact to upgrade.
-    #doc_info{
-        id = Id,
-        high_seq=KeySeq,
-        revs =
-            [#rev_info{rev=Rev,seq=Seq,deleted=false,body_sp = Bp} ||
-                {Rev, Seq, Bp} <- RevInfos] ++
-            [#rev_info{rev=Rev,seq=Seq,deleted=true,body_sp = Bp} ||
-                {Rev, Seq, Bp} <- DeletedRevInfos]}.
-
-btree_by_id_split(#full_doc_info{id=Id, update_seq=Seq,
-        deleted=Deleted, rev_tree=Tree}) ->
-    {Id, {Seq, ?b2i(Deleted), disk_tree(Tree)}}.
-
-btree_by_id_join(Id, {HighSeq, Deleted, DiskTree}) ->
-    {Tree, LeafsSize} = rev_tree(DiskTree),
-    #full_doc_info{
-        id = Id,
-        update_seq = HighSeq,
-        deleted = ?i2b(Deleted),
-        rev_tree = Tree,
-        leafs_size = LeafsSize
-    }.
-
-btree_by_id_reduce(reduce, FullDocInfos) ->
-    lists:foldl(
-        fun(Info, {NotDeleted, Deleted, Size}) ->
-            Size2 = sum_leaf_sizes(Size, Info#full_doc_info.leafs_size),
-            case Info#full_doc_info.deleted of
-            true ->
-                {NotDeleted, Deleted + 1, Size2};
-            false ->
-                {NotDeleted + 1, Deleted, Size2}
-            end
-        end,
-        {0, 0, 0}, FullDocInfos);
-btree_by_id_reduce(rereduce, Reds) ->
-    lists:foldl(
-        fun({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted, _AccSize}) ->
-            % pre 1.2 format, will be upgraded on compaction
-            {AccNotDeleted + NotDeleted, AccDeleted + Deleted, nil};
-        ({NotDeleted, Deleted, Size}, {AccNotDeleted, AccDeleted, AccSize}) ->
-            AccSize2 = sum_leaf_sizes(AccSize, Size),
-            {AccNotDeleted + NotDeleted, AccDeleted + Deleted, AccSize2}
-        end,
-        {0, 0, 0}, Reds).
-
-sum_leaf_sizes(nil, _) ->
-    nil;
-sum_leaf_sizes(_, nil) ->
-    nil;
-sum_leaf_sizes(Size1, Size2) ->
-    Size1 + Size2.
-
-btree_by_seq_reduce(reduce, DocInfos) ->
-    % count the number of documents
-    length(DocInfos);
-btree_by_seq_reduce(rereduce, Reds) ->
-    lists:sum(Reds).
-
-simple_upgrade_record(Old, New) when tuple_size(Old) < tuple_size(New) ->
-    OldSz = tuple_size(Old),
-    NewValuesTail =
-        lists:sublist(tuple_to_list(New), OldSz + 1, tuple_size(New) - OldSz),
-    list_to_tuple(tuple_to_list(Old) ++ NewValuesTail);
-simple_upgrade_record(Old, _New) ->
-    Old.
-
--define(OLD_DISK_VERSION_ERROR,
-    "Database files from versions smaller than 0.10.0 are no longer supported").
-
-init_db(DbName, Filepath, Fd, Header0, Options) ->
-    Header1 = simple_upgrade_record(Header0, #db_header{}),
-    Header =
-    case element(2, Header1) of
-    1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-    2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-    3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-    4 -> Header1#db_header{security_ptr = nil}; % 0.10 and pre 0.11
-    5 -> Header1; % pre 1.2
-    ?LATEST_DISK_VERSION -> Header1;
-    _ -> throw({database_disk_version_error, "Incorrect disk header version"})
-    end,
-
-    {ok, FsyncOptions} = couch_util:parse_term(
-            config:get("couchdb", "fsync_options",
-                    "[before_header, after_header, on_file_open]")),
-
-    case lists:member(on_file_open, FsyncOptions) of
-    true -> ok = couch_file:sync(Fd);
-    _ -> ok
-    end,
-
-    Compression = couch_compress:get_compression_method(),
-
-    {ok, IdBtree} = couch_btree:open(Header#db_header.id_tree_state, Fd,
-        [{split, fun ?MODULE:btree_by_id_split/1},
-        {join, fun ?MODULE:btree_by_id_join/2},
-        {reduce, fun ?MODULE:btree_by_id_reduce/2},
-        {compression, Compression}]),
-    {ok, SeqBtree} = couch_btree:open(Header#db_header.seq_tree_state, Fd,
-            [{split, fun ?MODULE:btree_by_seq_split/1},
-            {join, fun ?MODULE:btree_by_seq_join/2},
-            {reduce, fun ?MODULE:btree_by_seq_reduce/2},
-            {compression, Compression}]),
-    {ok, LocalDocsBtree} = couch_btree:open(Header#db_header.local_tree_state, Fd,
-        [{compression, Compression}]),
-    case Header#db_header.security_ptr of
-    nil ->
-        Security = [],
-        SecurityPtr = nil;
-    SecurityPtr ->
-        {ok, Security} = couch_file:pread_term(Fd, SecurityPtr)
-    end,
-    % convert start time tuple to microsecs and store as a binary string
-    {MegaSecs, Secs, MicroSecs} = now(),
-    StartTime = ?l2b(io_lib:format("~p",
-            [(MegaSecs*1000000*1000000) + (Secs*1000000) + MicroSecs])),
-    ok = couch_file:set_db_pid(Fd, self()),
-    #db{
-        fd=Fd,
-        fd_monitor = erlang:monitor(process, Fd),
-        header=Header,
-        id_tree = IdBtree,
-        seq_tree = SeqBtree,
-        local_tree = LocalDocsBtree,
-        committed_update_seq = Header#db_header.update_seq,
-        update_seq = Header#db_header.update_seq,
-        name = DbName,
-        filepath = Filepath,
-        security = Security,
-        security_ptr = SecurityPtr,
-        instance_start_time = StartTime,
-        revs_limit = Header#db_header.revs_limit,
-        fsync_options = FsyncOptions,
-        options = Options,
-        compression = Compression,
-        before_doc_update = couch_util:get_value(before_doc_update, Options, nil),
-        after_doc_read = couch_util:get_value(after_doc_read, Options, nil)
-        }.
-
-
-close_db(#db{fd_monitor = Ref}) ->
-    erlang:demonitor(Ref).
-
-
-refresh_validate_doc_funs(#db{name = <<"shards/", _/binary>> = Name} = Db) ->
-    spawn(fabric, reset_validation_funs, [mem3:dbname(Name)]),
-    Db#db{validate_doc_funs = undefined};
-refresh_validate_doc_funs(Db0) ->
-    Db = Db0#db{user_ctx = #user_ctx{roles=[<<"_admin">>]}},
-    {ok, DesignDocs} = couch_db:get_design_docs(Db),
-    ProcessDocFuns = lists:flatmap(
-        fun(DesignDocInfo) ->
-            {ok, DesignDoc} = couch_db:open_doc_int(
-                Db, DesignDocInfo, [ejson_body]),
-            case couch_doc:get_validate_doc_fun(DesignDoc) of
-            nil -> [];
-            Fun -> [Fun]
-            end
-        end, DesignDocs),
-    Db#db{validate_doc_funs=ProcessDocFuns}.
-
-% rev tree functions
-
-flush_trees(_Db, [], AccFlushedTrees) ->
-    {ok, lists:reverse(AccFlushedTrees)};
-flush_trees(#db{fd = Fd} = Db,
-        [InfoUnflushed | RestUnflushed], AccFlushed) ->
-    #full_doc_info{update_seq=UpdateSeq, rev_tree=Unflushed} = InfoUnflushed,
-    {Flushed, LeafsSize} = couch_key_tree:mapfold(
-        fun(_Rev, Value, Type, Acc) ->
-            case Value of
-            #doc{deleted = IsDeleted, body = {summary, Summary, AttsFd}} ->
-                % this node value is actually an unwritten document summary,
-                % write to disk.
-                % make sure the Fd in the written bins is the same Fd we are
-                % and convert bins, removing the FD.
-                % All bins should have been written to disk already.
-                case {AttsFd, Fd} of
-                {nil, _} ->
-                    ok;
-                {SameFd, SameFd} ->
-                    ok;
-                _ ->
-                    % Fd where the attachments were written to is not the same
-                    % as our Fd. This can happen when a database is being
-                    % switched out during a compaction.
-                    ?LOG_DEBUG("File where the attachments are written has"
-                            " changed. Possibly retrying.", []),
-                    throw(retry)
-                end,
-                {ok, NewSummaryPointer, SummarySize} =
-                    couch_file:append_raw_chunk(Fd, Summary),
-                TotalSize = lists:foldl(
-                    fun(#att{att_len = L}, A) -> A + L end,
-                    SummarySize, Value#doc.atts),
-                NewValue = #leaf{deleted=IsDeleted, ptr=NewSummaryPointer,
-                                 seq=UpdateSeq, size=TotalSize},
-                case Type of
-                leaf ->
-                    {NewValue, Acc + TotalSize};
-                branch ->
-                    {NewValue, Acc}
-                end;
-             {_, _, _, LeafSize} when Type =:= leaf, LeafSize =/= nil ->
-                {Value, Acc + LeafSize};
-             _ ->
-                {Value, Acc}
-            end
-        end, 0, Unflushed),
-    InfoFlushed = InfoUnflushed#full_doc_info{
-        rev_tree = Flushed,
-        leafs_size = LeafsSize
-    },
-    flush_trees(Db, RestUnflushed, [InfoFlushed | AccFlushed]).
-
-
-send_result(Client, Ref, NewResult) ->
-    % used to send a result to the client
-    catch(Client ! {result, self(), {Ref, NewResult}}).
-
-merge_rev_trees(_Limit, _Merge, [], [], AccNewInfos, AccRemoveSeqs, AccSeq) ->
-    {ok, lists:reverse(AccNewInfos), AccRemoveSeqs, AccSeq};
-merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList],
-        [OldDocInfo|RestOldInfo], AccNewInfos, AccRemoveSeqs, AccSeq) ->
-    #full_doc_info{id=Id,rev_tree=OldTree,deleted=OldDeleted0,update_seq=OldSeq}
-            = OldDocInfo,
-    {NewRevTree, _} = lists:foldl(
-        fun({Client, {#doc{revs={Pos,[_Rev|PrevRevs]}}=NewDoc, Ref}}, {AccTree, OldDeleted}) ->
-            if not MergeConflicts ->
-                case couch_key_tree:merge(AccTree, couch_doc:to_path(NewDoc),
-                    Limit) of
-                {_NewTree, conflicts} when (not OldDeleted) ->
-                    send_result(Client, Ref, conflict),
-                    {AccTree, OldDeleted};
-                {NewTree, conflicts} when PrevRevs /= [] ->
-                    % Check to be sure if prev revision was specified, it's
-                    % a leaf node in the tree
-                    Leafs = couch_key_tree:get_all_leafs(AccTree),
-                    IsPrevLeaf = lists:any(fun({_, {LeafPos, [LeafRevId|_]}}) ->
-                            {LeafPos, LeafRevId} == {Pos-1, hd(PrevRevs)}
-                        end, Leafs),
-                    if IsPrevLeaf ->
-                        {NewTree, OldDeleted};
-                    true ->
-                        send_result(Client, Ref, conflict),
-                        {AccTree, OldDeleted}
-                    end;
-                {NewTree, no_conflicts} when  AccTree == NewTree ->
-                    % the tree didn't change at all
-                    % meaning we are saving a rev that's already
-                    % been editted again.
-                    if (Pos == 1) and OldDeleted ->
-                        % this means we are recreating a brand new document
-                        % into a state that already existed before.
-                        % put the rev into a subsequent edit of the deletion
-                        #doc_info{revs=[#rev_info{rev={OldPos,OldRev}}|_]} =
-                                couch_doc:to_doc_info(OldDocInfo),
-                        NewRevId = couch_db:new_revid(
-                                NewDoc#doc{revs={OldPos, [OldRev]}}),
-                        NewDoc2 = NewDoc#doc{revs={OldPos + 1, [NewRevId, OldRev]}},
-                        {NewTree2, _} = couch_key_tree:merge(AccTree,
-                                couch_doc:to_path(NewDoc2), Limit),
-                        % we changed the rev id, this tells the caller we did
-                        send_result(Client, Ref, {ok, {OldPos + 1, NewRevId}}),
-                        {NewTree2, OldDeleted};
-                    true ->
-                        send_result(Client, Ref, conflict),
-                        {AccTree, OldDeleted}
-                    end;
-                {NewTree, _} ->
-                    {NewTree, NewDoc#doc.deleted}
-                end;
-            true ->
-                {NewTree, _} = couch_key_tree:merge(AccTree,
-                            couch_doc:to_path(NewDoc), Limit),
-                {NewTree, OldDeleted}
-            end
-        end,
-        {OldTree, OldDeleted0}, NewDocs),
-    if NewRevTree == OldTree ->
-        % nothing changed
-        merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
-            AccNewInfos, AccRemoveSeqs, AccSeq);
-    true ->
-        % we have updated the document, give it a new seq #
-        NewInfo = #full_doc_info{id=Id,update_seq=AccSeq+1,rev_tree=NewRevTree},
-        RemoveSeqs = case OldSeq of
-            0 -> AccRemoveSeqs;
-            _ -> [OldSeq | AccRemoveSeqs]
-        end,
-        merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
-            [NewInfo|AccNewInfos], RemoveSeqs, AccSeq+1)
-    end.
-
-
-
-new_index_entries([], AccById, AccDDocIds) ->
-    {AccById, AccDDocIds};
-new_index_entries([#full_doc_info{id=Id}=Info | Rest], AccById, AccDDocIds) ->
-    #doc_info{revs=[#rev_info{deleted=Del}|_]} = couch_doc:to_doc_info(Info),
-    AccById2 = [Info#full_doc_info{deleted=Del} | AccById],
-    AccDDocIds2 = case Id of
-        <<?DESIGN_DOC_PREFIX, _/binary>> -> [Id | AccDDocIds];
-        _ -> AccDDocIds
-    end,
-    new_index_entries(Rest, AccById2, AccDDocIds2).
-
-
-stem_full_doc_infos(#db{revs_limit=Limit}, DocInfos) ->
-    [Info#full_doc_info{rev_tree=couch_key_tree:stem(Tree, Limit)} ||
-            #full_doc_info{rev_tree=Tree}=Info <- DocInfos].
-
-update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) ->
-    #db{
-        id_tree = DocInfoByIdBTree,
-        seq_tree = DocInfoBySeqBTree,
-        update_seq = LastSeq,
-        revs_limit = RevsLimit
-        } = Db,
-    Ids = [Id || [{_Client, {#doc{id=Id}, _Ref}}|_] <- DocsList],
-    % lookup up the old documents, if they exist.
-    OldDocLookups = couch_btree:lookup(DocInfoByIdBTree, Ids),
-    OldDocInfos = lists:zipwith(
-        fun(_Id, {ok, FullDocInfo}) ->
-            FullDocInfo;
-        (Id, not_found) ->
-            #full_doc_info{id=Id}
-        end,
-        Ids, OldDocLookups),
-    % Merge the new docs into the revision trees.
-    {ok, NewFullDocInfos, RemoveSeqs, NewSeq} = merge_rev_trees(RevsLimit,
-            MergeConflicts, DocsList, OldDocInfos, [], [], LastSeq),
-
-    % All documents are now ready to write.
-
-    {ok, Db2}  = update_local_docs(Db, NonRepDocs),
-
-    % Write out the document summaries (the bodies are stored in the nodes of
-    % the trees, the attachments are already written to disk)
-    {ok, FlushedFullDocInfos} = flush_trees(Db2, NewFullDocInfos, []),
-
-    {IndexFullDocInfos, UpdatedDDocIds} =
-            new_index_entries(FlushedFullDocInfos, [], []),
-
-    % and the indexes
-    {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree, IndexFullDocInfos, []),
-    {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree, IndexFullDocInfos, RemoveSeqs),
-
-    Db3 = Db2#db{
-        id_tree = DocInfoByIdBTree2,
-        seq_tree = DocInfoBySeqBTree2,
-        update_seq = NewSeq},
-
-    % Check if we just updated any design documents, and update the validation
-    % funs if we did.
-    Db4 = case length(UpdatedDDocIds) > 0 of
-        true ->
-            ddoc_cache:evict(Db3#db.name, UpdatedDDocIds),
-            refresh_validate_doc_funs(Db3);
-        false ->
-            Db3
-    end,
-
-    {ok, commit_data(Db4, not FullCommit), UpdatedDDocIds}.
-
-update_local_docs(Db, []) ->
-    {ok, Db};
-update_local_docs(#db{local_tree=Btree}=Db, Docs) ->
-    Ids = [Id || {_Client, {#doc{id=Id}, _Ref}} <- Docs],
-    OldDocLookups = couch_btree:lookup(Btree, Ids),
-    BtreeEntries = lists:zipwith(
-        fun({Client, {#doc{id=Id,deleted=Delete,revs={0,PrevRevs},body=Body}, Ref}}, _OldDocLookup) ->
-            case PrevRevs of
-            [RevStr|_] ->
-                PrevRev = list_to_integer(?b2l(RevStr));
-            [] ->
-                PrevRev = 0
-            end,
-            %% disabled conflict checking for local docs -- APK 16 June 2010
-            % OldRev =
-            % case OldDocLookup of
-            %     {ok, {_, {OldRev0, _}}} -> OldRev0;
-            %     not_found -> 0
-            % end,
-            % case OldRev == PrevRev of
-            % true ->
-                case Delete of
-                    false ->
-                        send_result(Client, Ref, {ok,
-                                {0, ?l2b(integer_to_list(PrevRev + 1))}}),
-                        {update, {Id, {PrevRev + 1, Body}}};
-                    true  ->
-                        send_result(Client, Ref,
-                                {ok, {0, <<"0">>}}),
-                        {remove, Id}
-                end%;
-            % false ->
-            %     send_result(Client, Ref, conflict),
-            %     ignore
-            % end
-        end, Docs, OldDocLookups),
-
-    BtreeIdsRemove = [Id || {remove, Id} <- BtreeEntries],
-    BtreeIdsUpdate = [{Key, Val} || {update, {Key, Val}} <- BtreeEntries],
-
-    {ok, Btree2} =
-        couch_btree:add_remove(Btree, BtreeIdsUpdate, BtreeIdsRemove),
-
-    {ok, Db#db{local_tree = Btree2}}.
-
-db_to_header(Db, Header) ->
-    Header#db_header{
-        update_seq = Db#db.update_seq,
-        seq_tree_state = couch_btree:get_state(Db#db.seq_tree),
-        id_tree_state = couch_btree:get_state(Db#db.id_tree),
-        local_tree_state = couch_btree:get_state(Db#db.local_tree),
-        security_ptr = Db#db.security_ptr,
-        revs_limit = Db#db.revs_limit}.
-
-commit_data(Db) ->
-    commit_data(Db, false).
-
-commit_data(#db{waiting_delayed_commit=nil} = Db, true) ->
-    TRef = erlang:send_after(1000,self(),delayed_commit),
-    Db#db{waiting_delayed_commit=TRef};
-commit_data(Db, true) ->
-    Db;
-commit_data(Db, _) ->
-    #db{
-        header = OldHeader,
-        waiting_delayed_commit = Timer
-    } = Db,
-    if is_reference(Timer) -> erlang:cancel_timer(Timer); true -> ok end,
-    case db_to_header(Db, OldHeader) of
-        OldHeader -> Db#db{waiting_delayed_commit=nil};
-        NewHeader -> sync_header(Db, NewHeader)
-    end.
-
-sync_header(Db, NewHeader) ->
-    #db{
-        fd = Fd,
-        filepath = FilePath,
-        fsync_options = FsyncOptions,
-        waiting_delayed_commit = Timer
-    } = Db,
-
-    if is_reference(Timer) -> erlang:cancel_timer(Timer); true -> ok end,
-
-    Before = lists:member(before_header, FsyncOptions),
-    After = lists:member(after_header, FsyncOptions),
-
-    if Before -> couch_file:sync(FilePath); true -> ok end,
-    ok = couch_file:write_header(Fd, NewHeader),
-    if After -> couch_file:sync(FilePath); true -> ok end,
-
-    Db#db{
-        header=NewHeader,
-        committed_update_seq=Db#db.update_seq,
-        waiting_delayed_commit=nil
-    }.
-
-copy_doc_attachments(#db{fd = SrcFd} = SrcDb, SrcSp, DestFd) ->
-    {ok, {BodyData, BinInfos0}} = couch_db:read_doc(SrcDb, SrcSp),
-    BinInfos = case BinInfos0 of
-    _ when is_binary(BinInfos0) ->
-        couch_compress:decompress(BinInfos0);
-    _ when is_list(BinInfos0) ->
-        % pre 1.2 file format
-        BinInfos0
-    end,
-    % copy the bin values
-    NewBinInfos = lists:map(
-        fun({Name, Type, BinSp, AttLen, RevPos, Md5}) ->
-            % 010 UPGRADE CODE
-            {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} =
-                couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
-            {Name, Type, NewBinSp, AttLen, AttLen, RevPos, Md5, identity};
-        ({Name, Type, BinSp, AttLen, DiskLen, RevPos, Md5, Enc1}) ->
-            {NewBinSp, AttLen, _, Md5, _IdentityMd5} =
-                couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
-            Enc = case Enc1 of
-            true ->
-                % 0110 UPGRADE CODE
-                gzip;
-            false ->
-                % 0110 UPGRADE CODE
-                identity;
-            _ ->
-                Enc1
-            end,
-            {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, Md5, Enc}
-        end, BinInfos),
-    {BodyData, NewBinInfos}.
-
-merge_lookups(Infos, []) ->
-    Infos;
-merge_lookups([], _) ->
-    [];
-merge_lookups([#doc_info{}=DI | RestInfos], [{ok, FDI} | RestLookups]) ->
-    % Assert we've matched our lookups
-    if DI#doc_info.id == FDI#full_doc_info.id -> ok; true ->
-        erlang:error({mismatched_doc_infos, DI#doc_info.id})
-    end,
-    [FDI | merge_lookups(RestInfos, RestLookups)];
-merge_lookups([FDI | RestInfos], Lookups) ->
-    [FDI | merge_lookups(RestInfos, Lookups)].
-
-copy_docs(Db, #db{fd = DestFd} = NewDb, MixedInfos, Retry) ->
-    DocInfoIds = [Id || #doc_info{id=Id} <- MixedInfos],
-    LookupResults = couch_btree:lookup(Db#db.id_tree, DocInfoIds),
-    % COUCHDB-968, make sure we prune duplicates during compaction
-    NewInfos0 = lists:usort(fun(#full_doc_info{id=A}, #full_doc_info{id=B}) ->
-        A =< B
-    end, merge_lookups(MixedInfos, LookupResults)),
-
-    NewInfos1 = lists:map(
-        fun(#full_doc_info{rev_tree=RevTree}=Info) ->
-            Info#full_doc_info{rev_tree=couch_key_tree:map(
-                fun(_, _, branch) ->
-                    ?REV_MISSING;
-                (_Rev, #leaf{ptr=Sp}=Leaf, leaf) ->
-                    {_Body, AttsInfo} = Summary = copy_doc_attachments(
-                        Db, Sp, DestFd),
-                    SummaryChunk = make_doc_summary(NewDb, Summary),
-                    {ok, Pos, SummarySize} = couch_file:append_raw_chunk(
-                        DestFd, SummaryChunk),
-                    TotalLeafSize = lists:foldl(
-                        fun({_, _, _, AttLen, _, _, _, _}, S) -> S + AttLen end,
-                        SummarySize, AttsInfo),
-                    Leaf#leaf{ptr=Pos, size=TotalLeafSize}
-                end, RevTree)}
-        end, NewInfos0),
-
-    NewInfos = stem_full_doc_infos(Db, NewInfos1),
-    RemoveSeqs =
-    case Retry of
-    nil ->
-        [];
-    OldDocIdTree ->
-        % Compaction is being rerun to catch up to writes during the
-        % first pass. This means we may have docs that already exist
-        % in the seq_tree in the .data file. Here we lookup any old
-        % update_seqs so that they can be removed.
-        Ids = [Id || #full_doc_info{id=Id} <- NewInfos],
-        Existing = couch_btree:lookup(OldDocIdTree, Ids),
-        [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing]
-    end,
-
-    {ok, SeqTree} = couch_btree:add_remove(
-            NewDb#db.seq_tree, NewInfos, RemoveSeqs),
-
-    FDIKVs = lists:map(fun(#full_doc_info{id=Id, update_seq=Seq}=FDI) ->
-        {{Id, Seq}, FDI}
-    end, NewInfos),
-    {ok, IdEms} = couch_emsort:add(NewDb#db.id_tree, FDIKVs),
-    update_compact_task(length(NewInfos)),
-    NewDb#db{id_tree=IdEms, seq_tree=SeqTree}.
-
-
-copy_compact(Db, NewDb0, Retry) ->
-    Compression = couch_compress:get_compression_method(),
-    NewDb = NewDb0#db{compression=Compression},
-    TotalChanges = couch_db:count_changes_since(Db, NewDb#db.update_seq),
-    BufferSize = list_to_integer(
-        config:get("database_compaction", "doc_buffer_size", "524288")),
-    CheckpointAfter = couch_util:to_integer(
-        config:get("database_compaction", "checkpoint_after",
-            BufferSize * 10)),
-
-    EnumBySeqFun =
-    fun(DocInfo, _Offset,
-            {AccNewDb, AccUncopied, AccUncopiedSize, AccCopiedSize}) ->
-
-        Seq = case DocInfo of
-            #full_doc_info{} -> DocInfo#full_doc_info.update_seq;
-            #doc_info{} -> DocInfo#doc_info.high_seq
-        end,
-
-        AccUncopiedSize2 = AccUncopiedSize + ?term_size(DocInfo),
-        if AccUncopiedSize2 >= BufferSize ->
-            NewDb2 = copy_docs(
-                Db, AccNewDb, lists:reverse([DocInfo | AccUncopied]), Retry),
-            AccCopiedSize2 = AccCopiedSize + AccUncopiedSize2,
-            if AccCopiedSize2 >= CheckpointAfter ->
-                CommNewDb2 = commit_compaction_data(NewDb2#db{update_seq=Seq}),
-                {ok, {CommNewDb2, [], 0, 0}};
-            true ->
-                {ok, {NewDb2#db{update_seq = Seq}, [], 0, AccCopiedSize2}}
-            end;
-        true ->
-            {ok, {AccNewDb, [DocInfo | AccUncopied], AccUncopiedSize2,
-                AccCopiedSize}}
-        end
-    end,
-
-    TaskProps0 = [
-        {type, database_compaction},
-        {database, Db#db.name},
-        {progress, 0},
-        {changes_done, 0},
-        {total_changes, TotalChanges}
-    ],
-    case (Retry =/= nil) and couch_task_status:is_task_added() of
-    true ->
-        couch_task_status:update([
-            {retry, true},
-            {progress, 0},
-            {changes_done, 0},
-            {total_changes, TotalChanges}
-        ]);
-    false ->
-        couch_task_status:add_task(TaskProps0),
-        couch_task_status:set_update_frequency(500)
-    end,
-
-    {ok, _, {NewDb2, Uncopied, _, _}} =
-        couch_btree:foldl(Db#db.seq_tree, EnumBySeqFun,
-            {NewDb, [], 0, 0},
-            [{start_key, NewDb#db.update_seq + 1}]),
-
-    NewDb3 = copy_docs(Db, NewDb2, lists:reverse(Uncopied), Retry),
-
-    % copy misc header values
-    if NewDb3#db.security /= Db#db.security ->
-        {ok, Ptr, _} = couch_file:append_term(
-            NewDb3#db.fd, Db#db.security,
-            [{compression, NewDb3#db.compression}]),
-        NewDb4 = NewDb3#db{security=Db#db.security, security_ptr=Ptr};
-    true ->
-        NewDb4 = NewDb3
-    end,
-
-    commit_compaction_data(NewDb4#db{update_seq=Db#db.update_seq}).
-
-
-start_copy_compact(#db{}=Db) ->
-    #db{name=Name, filepath=Filepath, options=Options} = Db,
-    ?LOG_DEBUG("Compaction process spawned for db \"~s\"", [Name]),
-
-    {ok, NewDb, DName, DFd, MFd, Retry} =
-        open_compaction_files(Name, Filepath, Options),
-    erlang:monitor(process, MFd),
-
-    % This is a bit worrisome. init_db/4 will monitor the data fd
-    % but it doesn't know about the meta fd. For now I'll maintain
-    % that the data fd is the old normal fd and meta fd is special
-    % and hope everything works out for the best.
-    unlink(DFd),
-
-    NewDb1 = copy_purge_info(Db, NewDb),
-    NewDb2 = copy_compact(Db, NewDb1, Retry),
-    NewDb3 = sort_meta_data(NewDb2),
-    NewDb4 = commit_compaction_data(NewDb3),
-    NewDb5 = copy_meta_data(NewDb4),
-    NewDb6 = sync_header(NewDb5, db_to_header(NewDb5, NewDb5#db.header)),
-    close_db(NewDb6),
-
-    ok = couch_file:close(MFd),
-    gen_server:cast(Db#db.main_pid, {compact_done, DName}).
-
-
-open_compaction_files(DbName, DbFilePath, Options) ->
-    DataFile = DbFilePath ++ ".compact.data",
-    MetaFile = DbFilePath ++ ".compact.meta",
-    {ok, DataFd, DataHdr} = open_compaction_file(DataFile),
-    {ok, MetaFd, MetaHdr} = open_compaction_file(MetaFile),
-    case {DataHdr, MetaHdr} of
-        {#comp_header{}=A, #comp_header{}=A} ->
-            DbHeader = A#comp_header.db_header,
-            Db0 = init_db(DbName, DataFile, DataFd, DbHeader, Options),
-            Db1 = bind_emsort(Db0, MetaFd, A#comp_header.meta_state),
-            {ok, Db1, DataFile, DataFd, MetaFd, Db0#db.id_tree};
-        {#db_header{}, _} ->
-            ok = reset_compaction_file(MetaFd, #db_header{}),
-            Db0 = init_db(DbName, DataFile, DataFd, DataHdr, Options),
-            Db1 = bind_emsort(Db0, MetaFd, nil),
-            {ok, Db1, DataFile, DataFd, MetaFd, Db0#db.id_tree};
-        _ ->
-            Header = #db_header{},
-            ok = reset_compaction_file(DataFd, Header),
-            ok = reset_compaction_file(MetaFd, Header),
-            Db0 = init_db(DbName, DataFile, DataFd, Header, Options),
-            Db1 = bind_emsort(Db0, MetaFd, nil),
-            {ok, Db1, DataFile, DataFd, MetaFd, nil}
-    end.
-
-
-open_compaction_file(FilePath) ->
-    case couch_file:open(FilePath) of
-        {ok, Fd} ->
-            case couch_file:read_header(Fd) of
-                {ok, Header} -> {ok, Fd, Header};
-                no_valid_header -> {ok, Fd, nil}
-            end;
-        {error, enoent} ->
-            {ok, Fd} = couch_file:open(FilePath, [create]),
-            {ok, Fd, nil}
-    end.
-
-
-reset_compaction_file(Fd, Header) ->
-    ok = couch_file:truncate(Fd, 0),
-    ok = couch_file:write_header(Fd, Header).
-
-
-copy_purge_info(OldDb, NewDb) ->
-    OldHdr = OldDb#db.header,
-    NewHdr = NewDb#db.header,
-    if OldHdr#db_header.purge_seq > 0 ->
-        {ok, PurgedIdsRevs} = couch_db:get_last_purged(OldDb),
-        Opts = [{compression, NewDb#db.compression}],
-        {ok, Ptr, _} = couch_file:append_term(NewDb#db.fd, PurgedIdsRevs, Opts),
-        NewDb#db{
-            header=NewHdr#db_header{
-                purge_seq=OldHdr#db_header.purge_seq,
-                purged_docs=Ptr
-            }
-        };
-    true ->
-        NewDb
-    end.
-
-
-commit_compaction_data(#db{}=Db) ->
-    % Compaction needs to write headers to both the data file
-    % and the meta file so if we need to restart we can pick
-    % back up from where we left off.
-    commit_compaction_data(Db, couch_emsort:get_fd(Db#db.id_tree)),
-    commit_compaction_data(Db, Db#db.fd).
-
-
-commit_compaction_data(#db{header=OldHeader}=Db0, Fd) ->
-    % Mostly copied from commit_data/2 but I have to
-    % replace the logic to commit and fsync to a specific
-    % fd instead of the Filepath stuff that commit_data/2
-    % does.
-    DataState = OldHeader#db_header.id_tree_state,
-    MetaFd = couch_emsort:get_fd(Db0#db.id_tree),
-    MetaState = couch_emsort:get_state(Db0#db.id_tree),
-    Db1 = bind_id_tree(Db0, Db0#db.fd, DataState),
-    Header = db_to_header(Db1, OldHeader),
-    CompHeader = #comp_header{
-        db_header = Header,
-        meta_state = MetaState
-    },
-    ok = couch_file:sync(Fd),
-    ok = couch_file:write_header(Fd, CompHeader),
-    Db2 = Db1#db{
-        waiting_delayed_commit=nil,
-        header=Header,
-        committed_update_seq=Db1#db.update_seq
-    },
-    bind_emsort(Db2, MetaFd, MetaState).
-
-
-bind_emsort(Db, Fd, nil) ->
-    {ok, Ems} = couch_emsort:open(Fd),
-    Db#db{id_tree=Ems};
-bind_emsort(Db, Fd, State) ->
-    {ok, Ems} = couch_emsort:open(Fd, [{root, State}]),
-    Db#db{id_tree=Ems}.
-
-
-bind_id_tree(Db, Fd, State) ->
-    {ok, IdBtree} = couch_btree:open(State, Fd, [
-        {split, fun ?MODULE:btree_by_id_split/1},
-        {join, fun ?MODULE:btree_by_id_join/2},
-        {reduce, fun ?MODULE:btree_by_id_reduce/2}
-    ]),
-    Db#db{id_tree=IdBtree}.
-
-
-sort_meta_data(Db0) ->
-    {ok, Ems} = couch_emsort:merge(Db0#db.id_tree),
-    Db0#db{id_tree=Ems}.
-
-
-copy_meta_data(#db{fd=Fd, header=Header}=Db) ->
-    Src = Db#db.id_tree,
-    DstState = Header#db_header.id_tree_state,
-    {ok, IdTree0} = couch_btree:open(DstState, Fd, [
-        {split, fun ?MODULE:btree_by_id_split/1},
-        {join, fun ?MODULE:btree_by_id_join/2},
-        {reduce, fun ?MODULE:btree_by_id_reduce/2}
-    ]),
-    {ok, Iter} = couch_emsort:iter(Src),
-    Acc0 = #merge_st{
-        id_tree=IdTree0,
-        seq_tree=Db#db.seq_tree,
-        rem_seqs=[],
-        infos=[]
-    },
-    Acc = merge_docids(Iter, Acc0),
-    {ok, IdTree} = couch_btree:add(Acc#merge_st.id_tree, Acc#merge_st.infos),
-    {ok, SeqTree} = couch_btree:add_remove(
-        Acc#merge_st.seq_tree, [], Acc#merge_st.rem_seqs
-    ),
-    Db#db{id_tree=IdTree, seq_tree=SeqTree}.
-
-
-merge_docids(Iter, #merge_st{infos=Infos}=Acc) when length(Infos) > 1000 ->
-    #merge_st{
-        id_tree=IdTree0,
-        seq_tree=SeqTree0,
-        rem_seqs=RemSeqs
-    } = Acc,
-    {ok, IdTree1} = couch_btree:add(IdTree0, Infos),
-    {ok, SeqTree1} = couch_btree:add_remove(SeqTree0, [], RemSeqs),
-    Acc1 = Acc#merge_st{
-        id_tree=IdTree1,
-        seq_tree=SeqTree1,
-        rem_seqs=[],
-        infos=[]
-    },
-    merge_docids(Iter, Acc1);
-merge_docids(Iter, #merge_st{curr=Curr}=Acc) ->
-    case next_info(Iter, Curr, []) of
-        {NextIter, NewCurr, FDI, Seqs} ->
-            Acc1 = Acc#merge_st{
-                infos = [FDI | Acc#merge_st.infos],
-                rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
-                curr = NewCurr
-            },
-            merge_docids(NextIter, Acc1);
-        {finished, FDI, Seqs} ->
-            Acc#merge_st{
-                infos = [FDI | Acc#merge_st.infos],
-                rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
-                curr = undefined
-            };
-        empty ->
-            Acc
-    end.
-
-
-next_info(Iter, undefined, []) ->
-    case couch_emsort:next(Iter) of
-        {ok, {{Id, Seq}, FDI}, NextIter} ->
-            next_info(NextIter, {Id, Seq, FDI}, []);
-        finished ->
-            empty
-    end;
-next_info(Iter, {Id, Seq, FDI}, Seqs) ->
-    case couch_emsort:next(Iter) of
-        {ok, {{Id, NSeq}, NFDI}, NextIter} ->
-            next_info(NextIter, {Id, NSeq, NFDI}, [Seq | Seqs]);
-        {ok, {{NId, NSeq}, NFDI}, NextIter} ->
-            {NextIter, {NId, NSeq, NFDI}, FDI, Seqs};
-        finished ->
-            {finished, FDI, Seqs}
-    end.
-
-
-update_compact_task(NumChanges) ->
-    [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
-    Changes2 = Changes + NumChanges,
-    Progress = case Total of
-    0 ->
-        0;
-    _ ->
-        (Changes2 * 100) div Total
-    end,
-    couch_task_status:update([{changes_done, Changes2}, {progress, Progress}]).
-
-
-make_doc_summary(#db{compression = Comp}, {Body0, Atts0}) ->
-    Body = case couch_compress:is_compressed(Body0, Comp) of
-    true ->
-        Body0;
-    false ->
-        % pre 1.2 database file format
-        couch_compress:compress(Body0, Comp)
-    end,
-    Atts = case couch_compress:is_compressed(Atts0, Comp) of
-    true ->
-        Atts0;
-    false ->
-        couch_compress:compress(Atts0, Comp)
-    end,
-    SummaryBin = ?term_to_bin({Body, Atts}),
-    couch_file:assemble_file_chunk(SummaryBin, couch_util:md5(SummaryBin)).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_doc.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl
deleted file mode 100644
index 6f2ca9b..0000000
--- a/src/couch/src/couch_doc.erl
+++ /dev/null
@@ -1,784 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_doc).
-
--export([to_doc_info/1,to_doc_info_path/1,parse_rev/1,parse_revs/1,rev_to_str/1,revs_to_strs/1]).
--export([att_foldl/3,range_att_foldl/5,att_foldl_decode/3,get_validate_doc_fun/1]).
--export([from_json_obj/1,to_json_obj/2,has_stubs/1, merge_stubs/2]).
--export([validate_docid/1]).
--export([doc_from_multi_part_stream/2, doc_from_multi_part_stream/3]).
--export([doc_to_multi_part_stream/5, len_doc_to_multi_part_stream/4]).
--export([abort_multi_part_stream/1, restart_open_doc_revs/3]).
--export([to_path/1]).
--export([mp_parse_doc/2]).
--export([with_ejson_body/1]).
-
--include_lib("couch/include/couch_db.hrl").
-
--spec to_path(#doc{}) -> path().
-to_path(#doc{revs={Start, RevIds}}=Doc) ->
-    [Branch] = to_branch(Doc, lists:reverse(RevIds)),
-    {Start - length(RevIds) + 1, Branch}.
-
--spec to_branch(#doc{}, [RevId::binary()]) -> [branch()].
-to_branch(Doc, [RevId]) ->
-    [{RevId, Doc, []}];
-to_branch(Doc, [RevId | Rest]) ->
-    [{RevId, ?REV_MISSING, to_branch(Doc, Rest)}].
-
-% helpers used by to_json_obj
-to_json_rev(0, []) ->
-    [];
-to_json_rev(Start, [FirstRevId|_]) ->
-    [{<<"_rev">>, ?l2b([integer_to_list(Start),"-",revid_to_str(FirstRevId)])}].
-
-to_json_body(true, {Body}) ->
-    Body ++ [{<<"_deleted">>, true}];
-to_json_body(false, {Body}) ->
-    Body.
-
-to_json_revisions(Options, Start, RevIds) ->
-    case lists:member(revs, Options) of
-    false -> [];
-    true ->
-        [{<<"_revisions">>, {[{<<"start">>, Start},
-                {<<"ids">>, [revid_to_str(R) ||R <- RevIds]}]}}]
-    end.
-
-revid_to_str(RevId) when size(RevId) =:= 16 ->
-    ?l2b(couch_util:to_hex(RevId));
-revid_to_str(RevId) ->
-    RevId.
-
-rev_to_str({Pos, RevId}) ->
-    ?l2b([integer_to_list(Pos),"-",revid_to_str(RevId)]).
-
-
-revs_to_strs([]) ->
-    [];
-revs_to_strs([{Pos, RevId}| Rest]) ->
-    [rev_to_str({Pos, RevId}) | revs_to_strs(Rest)].
-
-to_json_meta(Meta) ->
-    lists:map(
-        fun({revs_info, Start, RevsInfo}) ->
-            {JsonRevsInfo, _Pos}  = lists:mapfoldl(
-                fun({RevId, Status}, PosAcc) ->
-                    JsonObj = {[{<<"rev">>, rev_to_str({PosAcc, RevId})},
-                        {<<"status">>, ?l2b(atom_to_list(Status))}]},
-                    {JsonObj, PosAcc - 1}
-                end, Start, RevsInfo),
-            {<<"_revs_info">>, JsonRevsInfo};
-        ({local_seq, Seq}) ->
-            {<<"_local_seq">>, Seq};
-        ({conflicts, Conflicts}) ->
-            {<<"_conflicts">>, revs_to_strs(Conflicts)};
-        ({deleted_conflicts, DConflicts}) ->
-            {<<"_deleted_conflicts">>, revs_to_strs(DConflicts)}
-        end, Meta).
-
-to_json_attachments(Attachments, Options) ->
-    to_json_attachments(
-        Attachments,
-        lists:member(attachments, Options),
-        lists:member(follows, Options),
-        lists:member(att_encoding_info, Options)
-    ).
-
-to_json_attachments([], _OutputData, _DataToFollow, _ShowEncInfo) ->
-    [];
-to_json_attachments(Atts, OutputData, DataToFollow, ShowEncInfo) ->
-    AttProps = lists:map(
-        fun(#att{disk_len=DiskLen, att_len=AttLen, encoding=Enc}=Att) ->
-            {Att#att.name, {[
-                {<<"content_type">>, Att#att.type},
-                {<<"revpos">>, Att#att.revpos}] ++
-                case Att#att.md5 of
-                    <<>> ->
-                        [];
-                    Md5 ->
-                        EncodedMd5 = base64:encode(Md5),
-                        [{<<"digest">>, <<"md5-",EncodedMd5/binary>>}]
-                end ++
-                if not OutputData orelse Att#att.data == stub ->
-                    [{<<"length">>, DiskLen}, {<<"stub">>, true}];
-                true ->
-                    if DataToFollow ->
-                        [{<<"length">>, DiskLen}, {<<"follows">>, true}];
-                    true ->
-                        AttData = case Enc of
-                        gzip ->
-                            zlib:gunzip(att_to_bin(Att));
-                        identity ->
-                            att_to_bin(Att)
-                        end,
-                        [{<<"data">>, base64:encode(AttData)}]
-                    end
-                end ++
-                    case {ShowEncInfo, Enc} of
-                    {false, _} ->
-                        [];
-                    {true, identity} ->
-                        [];
-                    {true, _} ->
-                        [
-                            {<<"encoding">>, couch_util:to_binary(Enc)},
-                            {<<"encoded_length">>, AttLen}
-                        ]
-                    end
-            }}
-        end, Atts),
-    [{<<"_attachments">>, {AttProps}}].
-
-to_json_obj(Doc, Options) ->
-    doc_to_json_obj(with_ejson_body(Doc), Options).
-
-doc_to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds},
-            meta=Meta}=Doc,Options)->
-    {[{<<"_id">>, Id}]
-        ++ to_json_rev(Start, RevIds)
-        ++ to_json_body(Del, Body)
-        ++ to_json_revisions(Options, Start, RevIds)
-        ++ to_json_meta(Meta)
-        ++ to_json_attachments(Doc#doc.atts, Options)
-    }.
-
-from_json_obj({Props}) ->
-    transfer_fields(Props, #doc{body=[]});
-
-from_json_obj(_Other) ->
-    throw({bad_request, "Document must be a JSON object"}).
-
-parse_revid(RevId) when size(RevId) =:= 32 ->
-    RevInt = erlang:list_to_integer(?b2l(RevId), 16),
-     <<RevInt:128>>;
-parse_revid(RevId) when length(RevId) =:= 32 ->
-    RevInt = erlang:list_to_integer(RevId, 16),
-     <<RevInt:128>>;
-parse_revid(RevId) when is_binary(RevId) ->
-    RevId;
-parse_revid(RevId) when is_list(RevId) ->
-    ?l2b(RevId).
-
-
-parse_rev(Rev) when is_binary(Rev) ->
-    parse_rev(?b2l(Rev));
-parse_rev(Rev) when is_list(Rev) ->
-    SplitRev = lists:splitwith(fun($-) -> false; (_) -> true end, Rev),
-    case SplitRev of
-        {Pos, [$- | RevId]} -> {list_to_integer(Pos), parse_revid(RevId)};
-        _Else -> throw({bad_request, <<"Invalid rev format">>})
-    end;
-parse_rev(_BadRev) ->
-    throw({bad_request, <<"Invalid rev format">>}).
-
-parse_revs([]) ->
-    [];
-parse_revs([Rev | Rest]) ->
-    [parse_rev(Rev) | parse_revs(Rest)].
-
-
-validate_docid(<<"">>) ->
-    throw({bad_request, <<"Document id must not be empty">>});
-validate_docid(Id) when is_binary(Id) ->
-    case couch_util:validate_utf8(Id) of
-        false -> throw({bad_request, <<"Document id must be valid UTF-8">>});
-        true -> ok
-    end,
-    case Id of
-    <<"_design/", _/binary>> -> ok;
-    <<"_local/", _/binary>> -> ok;
-    <<"_", _/binary>> ->
-        throw({bad_request, <<"Only reserved document ids may start with underscore.">>});
-    _Else -> ok
-    end;
-validate_docid(Id) ->
-    ?LOG_DEBUG("Document id is not a string: ~p", [Id]),
-    throw({bad_request, <<"Document id must be a string">>}).
-
-transfer_fields([], #doc{body=Fields}=Doc) ->
-    % convert fields back to json object
-    Doc#doc{body={lists:reverse(Fields)}};
-
-transfer_fields([{<<"_id">>, Id} | Rest], Doc) ->
-    validate_docid(Id),
-    transfer_fields(Rest, Doc#doc{id=Id});
-
-transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc) ->
-    {Pos, RevId} = parse_rev(Rev),
-    transfer_fields(Rest,
-            Doc#doc{revs={Pos, [RevId]}});
-
-transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc) ->
-    % we already got the rev from the _revisions
-    transfer_fields(Rest,Doc);
-
-transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) ->
-    Atts = lists:map(fun({Name, {BinProps}}) ->
-        Md5 = case couch_util:get_value(<<"digest">>, BinProps) of
-            <<"md5-",EncodedMd5/binary>> ->
-                base64:decode(EncodedMd5);
-            _ ->
-               <<>>
-        end,
-        case couch_util:get_value(<<"stub">>, BinProps) of
-        true ->
-            Type = couch_util:get_value(<<"content_type">>, BinProps),
-            RevPos = couch_util:get_value(<<"revpos">>, BinProps, nil),
-            DiskLen = couch_util:get_value(<<"length">>, BinProps),
-            {Enc, EncLen} = att_encoding_info(BinProps),
-            #att{name=Name, data=stub, type=Type, att_len=EncLen,
-                disk_len=DiskLen, encoding=Enc, revpos=RevPos, md5=Md5};
-        _ ->
-            Type = couch_util:get_value(<<"content_type">>, BinProps,
-                    ?DEFAULT_ATTACHMENT_CONTENT_TYPE),
-            RevPos = couch_util:get_value(<<"revpos">>, BinProps, 0),
-            case couch_util:get_value(<<"follows">>, BinProps) of
-            true ->
-                DiskLen = couch_util:get_value(<<"length">>, BinProps),
-                {Enc, EncLen} = att_encoding_info(BinProps),
-                #att{name=Name, data=follows, type=Type, encoding=Enc,
-                    att_len=EncLen, disk_len=DiskLen, revpos=RevPos, md5=Md5};
-            _ ->
-                Value = couch_util:get_value(<<"data">>, BinProps),
-                Bin = base64:decode(Value),
-                LenBin = size(Bin),
-                #att{name=Name, data=Bin, type=Type, att_len=LenBin,
-                        disk_len=LenBin, revpos=RevPos}
-            end
-        end
-    end, JsonBins),
-    transfer_fields(Rest, Doc#doc{atts=Atts});
-
-transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) ->
-    RevIds = couch_util:get_value(<<"ids">>, Props),
-    Start = couch_util:get_value(<<"start">>, Props),
-    if not is_integer(Start) ->
-        throw({doc_validation, "_revisions.start isn't an integer."});
-    not is_list(RevIds) ->
-        throw({doc_validation, "_revisions.ids isn't a array."});
-    true ->
-        ok
-    end,
-    [throw({doc_validation, "RevId isn't a string"}) ||
-            RevId <- RevIds, not is_binary(RevId)],
-    RevIds2 = [parse_revid(RevId) || RevId <- RevIds],
-    transfer_fields(Rest, Doc#doc{revs={Start, RevIds2}});
-
-transfer_fields([{<<"_deleted">>, B} | Rest], Doc) when is_boolean(B) ->
-    transfer_fields(Rest, Doc#doc{deleted=B});
-
-% ignored fields
-transfer_fields([{<<"_revs_info">>, _} | Rest], Doc) ->
-    transfer_fields(Rest, Doc);
-transfer_fields([{<<"_local_seq">>, _} | Rest], Doc) ->
-    transfer_fields(Rest, Doc);
-transfer_fields([{<<"_conflicts">>, _} | Rest], Doc) ->
-    transfer_fields(Rest, Doc);
-transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc) ->
-    transfer_fields(Rest, Doc);
-
-% special fields for replication documents
-transfer_fields([{<<"_replication_state">>, _} = Field | Rest],
-    #doc{body=Fields} = Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
-transfer_fields([{<<"_replication_state_time">>, _} = Field | Rest],
-    #doc{body=Fields} = Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
-transfer_fields([{<<"_replication_id">>, _} = Field | Rest],
-    #doc{body=Fields} = Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
-transfer_fields([{<<"_replication_stats">>, _} = Field | Rest],
-    #doc{body=Fields} = Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
-
-% unknown special field
-transfer_fields([{<<"_",Name/binary>>, _} | _], _) ->
-    throw({doc_validation,
-            ?l2b(io_lib:format("Bad special document member: _~s", [Name]))});
-
-transfer_fields([Field | Rest], #doc{body=Fields}=Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]}).
-
-att_encoding_info(BinProps) ->
-    DiskLen = couch_util:get_value(<<"length">>, BinProps),
-    case couch_util:get_value(<<"encoding">>, BinProps) of
-    undefined ->
-        {identity, DiskLen};
-    Enc ->
-        EncodedLen = couch_util:get_value(<<"encoded_length">>, BinProps, DiskLen),
-        {list_to_existing_atom(?b2l(Enc)), EncodedLen}
-    end.
-
-to_doc_info(FullDocInfo) ->
-    {DocInfo, _Path} = to_doc_info_path(FullDocInfo),
-    DocInfo.
-
-max_seq(Tree, UpdateSeq) ->
-    FoldFun = fun({_Pos, _Key}, Value, _Type, MaxOldSeq) ->
-        case Value of
-            {_Deleted, _DiskPos, OldTreeSeq} ->
-                % Older versions didn't track data sizes.
-                erlang:max(MaxOldSeq, OldTreeSeq);
-            {_Deleted, _DiskPos, OldTreeSeq, _Size} -> % necessary clause?
-                % Older versions didn't store #leaf records.
-                erlang:max(MaxOldSeq, OldTreeSeq);
-            #leaf{seq=OldTreeSeq} ->
-                erlang:max(MaxOldSeq, OldTreeSeq);
-            _ ->
-                MaxOldSeq
-        end
-    end,
-    couch_key_tree:fold(FoldFun, UpdateSeq, Tree).
-
-to_doc_info_path(#full_doc_info{id=Id,rev_tree=Tree,update_seq=FDISeq}) ->
-    RevInfosAndPath = [
-        {#rev_info{
-            deleted = Leaf#leaf.deleted,
-            body_sp = Leaf#leaf.ptr,
-            seq = Leaf#leaf.seq,
-            rev = {Pos, RevId}
-        }, Path} || {Leaf, {Pos, [RevId | _]} = Path} <-
-            couch_key_tree:get_all_leafs(Tree)
-    ],
-    SortedRevInfosAndPath = lists:sort(
-            fun({#rev_info{deleted=DeletedA,rev=RevA}, _PathA},
-                {#rev_info{deleted=DeletedB,rev=RevB}, _PathB}) ->
-            % sort descending by {not deleted, rev}
-            {not DeletedA, RevA} > {not DeletedB, RevB}
-        end, RevInfosAndPath),
-    [{_RevInfo, WinPath}|_] = SortedRevInfosAndPath,
-    RevInfos = [RevInfo || {RevInfo, _Path} <- SortedRevInfosAndPath],
-    {#doc_info{id=Id, high_seq=max_seq(Tree, FDISeq), revs=RevInfos}, WinPath}.
-
-
-
-
-att_foldl(#att{data=Bin}, Fun, Acc) when is_binary(Bin) ->
-    Fun(Bin, Acc);
-att_foldl(#att{data={Fd,Sp},md5=Md5}, Fun, Acc) ->
-    couch_stream:foldl(Fd, Sp, Md5, Fun, Acc);
-att_foldl(#att{data=DataFun,att_len=Len}, Fun, Acc) when is_function(DataFun) ->
-   fold_streamed_data(DataFun, Len, Fun, Acc);
-att_foldl(#att{data={follows, Parser, Ref}}=Att, Fun, Acc) ->
-    ParserRef = erlang:monitor(process, Parser),
-    DataFun = fun() ->
-        Parser ! {get_bytes, Ref, self()},
-        receive
-            {started_open_doc_revs, NewRef} ->
-                couch_doc:restart_open_doc_revs(Parser, Ref, NewRef);
-            {bytes, Ref, Bytes} ->
-                Bytes;
-            {'DOWN', ParserRef, _, _, Reason} ->
-                throw({mp_parser_died, Reason})
-        end
-    end,
-    try
-        att_foldl(Att#att{data=DataFun}, Fun, Acc)
-    after
-        erlang:demonitor(ParserRef, [flush])
-    end.
-
-range_att_foldl(#att{data={Fd,Sp}}, From, To, Fun, Acc) ->
-   couch_stream:range_foldl(Fd, Sp, From, To, Fun, Acc).
-
-att_foldl_decode(#att{data={Fd,Sp},md5=Md5,encoding=Enc}, Fun, Acc) ->
-    couch_stream:foldl_decode(Fd, Sp, Md5, Enc, Fun, Acc);
-att_foldl_decode(#att{data=Fun2,att_len=Len, encoding=identity}, Fun, Acc) ->
-       fold_streamed_data(Fun2, Len, Fun, Acc).
-
-att_to_bin(#att{data=Bin}) when is_binary(Bin) ->
-    Bin;
-att_to_bin(#att{data=Iolist}) when is_list(Iolist) ->
-    iolist_to_binary(Iolist);
-att_to_bin(#att{data={_Fd,_Sp}}=Att) ->
-    iolist_to_binary(
-        lists:reverse(att_foldl(
-                Att,
-                fun(Bin,Acc) -> [Bin|Acc] end,
-                []
-        ))
-    );
-att_to_bin(#att{data=DataFun, att_len=Len}) when is_function(DataFun)->
-    iolist_to_binary(
-        lists:reverse(fold_streamed_data(
-            DataFun,
-            Len,
-            fun(Data, Acc) -> [Data | Acc] end,
-            []
-        ))
-    ).
-
-get_validate_doc_fun({Props}) ->
-    get_validate_doc_fun(couch_doc:from_json_obj({Props}));
-get_validate_doc_fun(#doc{body={Props}}=DDoc) ->
-    case couch_util:get_value(<<"validate_doc_update">>, Props) of
-    undefined ->
-        nil;
-    _Else ->
-        fun(EditDoc, DiskDoc, Ctx, SecObj) ->
-            couch_query_servers:validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj)
-        end
-    end.
-
-
-has_stubs(#doc{atts=Atts}) ->
-    has_stubs(Atts);
-has_stubs([]) ->
-    false;
-has_stubs([#att{data=stub}|_]) ->
-    true;
-has_stubs([_Att|Rest]) ->
-    has_stubs(Rest).
-
-merge_stubs(#doc{id = Id}, nil) ->
-    throw({missing_stub, <<"Previous revision missing for document ", Id/binary>>});
-merge_stubs(#doc{id=Id,atts=MemBins}=StubsDoc, #doc{atts=DiskBins}) ->
-    BinDict = dict:from_list([{Name, Att} || #att{name=Name}=Att <- DiskBins]),
-    MergedBins = lists:map(
-        fun(#att{name=Name, data=stub, revpos=StubRevPos}) ->
-            case dict:find(Name, BinDict) of
-            {ok, #att{revpos=DiskRevPos}=DiskAtt}
-                    when DiskRevPos == StubRevPos orelse StubRevPos == nil ->
-                DiskAtt;
-            _ ->
-                throw({missing_stub,
-                        <<"id:", Id/binary, ", name:", Name/binary>>})
-            end;
-        (Att) ->
-            Att
-        end, MemBins),
-    StubsDoc#doc{atts= MergedBins}.
-
-fold_streamed_data(_RcvFun, 0, _Fun, Acc) ->
-    Acc;
-fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0->
-    Bin = RcvFun(),
-    ResultAcc = Fun(Bin, Acc),
-    fold_streamed_data(RcvFun, LenLeft - size(Bin), Fun, ResultAcc).
-
-len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, SendEncodedAtts) ->
-    AttsSize = lists:foldl(fun(Att, AccAttsSize) ->
-            #att{
-                data=Data,
-                name=Name,
-                att_len=AttLen,
-                disk_len=DiskLen,
-                type=Type,
-                encoding=Encoding
-            } = Att,
-            case Data of
-            stub ->
-                AccAttsSize;
-            _ ->
-                AccAttsSize +
-                4 + % "\r\n\r\n"
-                case SendEncodedAtts of
-                true ->
-                    % header
-                    length(integer_to_list(AttLen)) +
-                    AttLen;
-                _ ->
-                    % header
-                    length(integer_to_list(DiskLen)) +
-                    DiskLen
-                end +
-                4 + % "\r\n--"
-                size(Boundary) +
-
-                % attachment headers
-                % (the length of the Content-Length has already been set)
-                size(Name) +
-                size(Type) +
-                length("\r\nContent-Disposition: attachment; filename=\"\"") +
-                length("\r\nContent-Type: ") +
-                length("\r\nContent-Length: ") +
-                case Encoding of
-                identity ->
-                    0;
-                 _ ->
-                    length(atom_to_list(Encoding)) +
-                    length("\r\nContent-Encoding: ")
-                end
-            end
-        end, 0, Atts),
-    if AttsSize == 0 ->
-        {<<"application/json">>, iolist_size(JsonBytes)};
-    true ->
-        {<<"multipart/related; boundary=\"", Boundary/binary, "\"">>,
-            2 + % "--"
-            size(Boundary) +
-            36 + % "\r\ncontent-type: application/json\r\n\r\n"
-            iolist_size(JsonBytes) +
-            4 + % "\r\n--"
-            size(Boundary) +
-            + AttsSize +
-            2 % "--"
-            }
-    end.
-
-doc_to_multi_part_stream(Boundary, JsonBytes, Atts, WriteFun,
-    SendEncodedAtts) ->
-    case lists:any(fun(#att{data=Data})-> Data /= stub end, Atts) of
-    true ->
-        WriteFun([<<"--", Boundary/binary,
-                "\r\nContent-Type: application/json\r\n\r\n">>,
-                JsonBytes, <<"\r\n--", Boundary/binary>>]),
-        atts_to_mp(Atts, Boundary, WriteFun, SendEncodedAtts);
-    false ->
-        WriteFun(JsonBytes)
-    end.
-
-atts_to_mp([], _Boundary, WriteFun, _SendEncAtts) ->
-    WriteFun(<<"--">>);
-atts_to_mp([#att{data=stub} | RestAtts], Boundary, WriteFun,
-        SendEncodedAtts) ->
-    atts_to_mp(RestAtts, Boundary, WriteFun, SendEncodedAtts);
-atts_to_mp([Att | RestAtts], Boundary, WriteFun,
-        SendEncodedAtts)  ->
-    #att{
-        name=Name,
-        att_len=AttLen,
-        disk_len=DiskLen,
-        type=Type,
-        encoding=Encoding
-    } = Att,
-
-    % write headers
-    LengthBin = case SendEncodedAtts of
-    true -> list_to_binary(integer_to_list(AttLen));
-    false -> list_to_binary(integer_to_list(DiskLen))
-    end,
-    WriteFun(<<"\r\nContent-Disposition: attachment; filename=\"", Name/binary, "\"">>),
-    WriteFun(<<"\r\nContent-Type: ", Type/binary>>),
-    WriteFun(<<"\r\nContent-Length: ", LengthBin/binary>>),
-    case Encoding of
-    identity ->
-        ok;
-    _ ->
-        EncodingBin = atom_to_binary(Encoding, latin1),
-        WriteFun(<<"\r\nContent-Encoding: ", EncodingBin/binary>>)
-    end,
-
-    % write data
-    WriteFun(<<"\r\n\r\n">>),
-    AttFun = case SendEncodedAtts of
-    false ->
-        fun att_foldl_decode/3;
-    true ->
-        fun att_foldl/3
-    end,
-    AttFun(Att, fun(Data, _) -> WriteFun(Data) end, ok),
-    WriteFun(<<"\r\n--", Boundary/binary>>),
-    atts_to_mp(RestAtts, Boundary, WriteFun, SendEncodedAtts).
-
-
-doc_from_multi_part_stream(ContentType, DataFun) ->
-    doc_from_multi_part_stream(ContentType, DataFun, make_ref()).
-
-
-doc_from_multi_part_stream(ContentType, DataFun, Ref) ->
-    Parent = self(),
-    NumMpWriters = num_mp_writers(),
-    Parser = spawn_link(fun() ->
-        ParentRef = erlang:monitor(process, Parent),
-        put(mp_parent_ref, ParentRef),
-        put(num_mp_writers, NumMpWriters),
-        {<<"--",_/binary>>, _, _} = couch_httpd:parse_multipart_request(
-            ContentType, DataFun,
-            fun(Next) -> mp_parse_doc(Next, []) end),
-        unlink(Parent)
-        end),
-    ParserRef = erlang:monitor(process, Parser),
-    Parser ! {get_doc_bytes, Ref, self()},
-    receive
-    {started_open_doc_revs, NewRef} ->
-        restart_open_doc_revs(Parser, Ref, NewRef);
-    {doc_bytes, Ref, DocBytes} ->
-        Doc = from_json_obj(?JSON_DECODE(DocBytes)),
-        % we'll send the Parser process ID to the remote nodes so they can
-        % retrieve their own copies of the attachment data
-        Atts2 = lists:map(
-            fun(#att{data=follows}=A) ->
-                A#att{data={follows, Parser, Ref}};
-            (A) ->
-                A
-            end, Doc#doc.atts),
-        WaitFun = fun() ->
-            receive {'DOWN', ParserRef, _, _, _} -> ok end,
-            erlang:put(mochiweb_request_recv, true)
-        end,
-        {ok, Doc#doc{atts=Atts2}, WaitFun, Parser}
-    end.
-
-
-mp_parse_doc({headers, H}, []) ->
-    case couch_util:get_value("content-type", H) of
-    {"application/json", _} ->
-        fun (Next) ->
-            mp_parse_doc(Next, [])
-        end
-    end;
-mp_parse_doc({body, Bytes}, AccBytes) ->
-    fun (Next) ->
-        mp_parse_doc(Next, [Bytes | AccBytes])
-    end;
-mp_parse_doc(body_end, AccBytes) ->
-    receive {get_doc_bytes, Ref, From} ->
-        From ! {doc_bytes, Ref, lists:reverse(AccBytes)}
-    end,
-    fun(Next) ->
-        mp_parse_atts(Next, {Ref, [], 0, orddict:new(), []})
-    end.
-
-mp_parse_atts({headers, _}, Acc) ->
-    fun(Next) -> mp_parse_atts(Next, Acc) end;
-mp_parse_atts(body_end, Acc) ->
-    fun(Next) -> mp_parse_atts(Next, Acc) end;
-mp_parse_atts({body, Bytes}, {Ref, Chunks, Offset, Counters, Waiting}) ->
-    case maybe_send_data({Ref, Chunks++[Bytes], Offset, Counters, Waiting}) of
-        abort_parsing ->
-            fun(Next) -> mp_abort_parse_atts(Next, nil) end;
-        NewAcc ->
-            fun(Next) -> mp_parse_atts(Next, NewAcc) end
-    end;
-mp_parse_atts(eof, {Ref, Chunks, Offset, Counters, Waiting}) ->
-    N = num_mp_writers(),
-    M = length(Counters),
-    case (M == N) andalso Chunks == [] of
-    true ->
-        ok;
-    false ->
-        ParentRef = get(mp_parent_ref),
-        receive
-        abort_parsing ->
-            ok;
-        {get_bytes, Ref, From} ->
-            C2 = orddict:update_counter(From, 1, Counters),
-            NewAcc = maybe_send_data({Ref, Chunks, Offset, C2, [From|Waiting]}),
-            mp_parse_atts(eof, NewAcc);
-        {'DOWN', ParentRef, _, _, _} ->
-            exit(mp_reader_coordinator_died)
-        after 3600000 ->
-            ok
-        end
-    end.
-
-mp_abort_parse_atts(eof, _) ->
-    ok;
-mp_abort_parse_atts(_, _) ->
-    fun(Next) -> mp_abort_parse_atts(Next, nil) end.
-
-maybe_send_data({Ref, Chunks, Offset, Counters, Waiting}) ->
-    receive {get_bytes, Ref, From} ->
-        NewCounters = orddict:update_counter(From, 1, Counters),
-        maybe_send_data({Ref, Chunks, Offset, NewCounters, [From|Waiting]})
-    after 0 ->
-        % reply to as many writers as possible
-        NewWaiting = lists:filter(fun(Writer) ->
-            WhichChunk = orddict:fetch(Writer, Counters),
-            ListIndex = WhichChunk - Offset,
-            if ListIndex =< length(Chunks) ->
-                Writer ! {bytes, Ref, lists:nth(ListIndex, Chunks)},
-                false;
-            true ->
-                true
-            end
-        end, Waiting),
-
-        % check if we can drop a chunk from the head of the list
-        case Counters of
-        [] ->
-            SmallestIndex = 0;
-        _ ->
-            SmallestIndex = lists:min(element(2, lists:unzip(Counters)))
-        end,
-        Size = length(Counters),
-        N = num_mp_writers(),
-        if Size == N andalso SmallestIndex == (Offset+1) ->
-            NewChunks = tl(Chunks),
-            NewOffset = Offset+1;
-        true ->
-            NewChunks = Chunks,
-            NewOffset = Offset
-        end,
-
-        % we should wait for a writer if no one has written the last chunk
-        LargestIndex = lists:max([0|element(2, lists:unzip(Counters))]),
-        if LargestIndex  >= (Offset + length(Chunks)) ->
-            % someone has written all possible chunks, keep moving
-            {Ref, NewChunks, NewOffset, Counters, NewWaiting};
-        true ->
-            ParentRef = get(mp_parent_ref),
-            receive
-            abort_parsing ->
-                abort_parsing;
-            {'DOWN', ParentRef, _, _, _} ->
-                exit(mp_reader_coordinator_died);
-            {get_bytes, Ref, X} ->
-                C2 = orddict:update_counter(X, 1, Counters),
-                maybe_send_data({Ref, NewChunks, NewOffset, C2, [X|NewWaiting]})
-            end
-        end
-    end.
-
-
-num_mp_writers() ->
-    case erlang:get(mp_att_writers) of
-        undefined -> 1;
-        Count -> Count
-    end.
-
-
-abort_multi_part_stream(Parser) ->
-    MonRef = erlang:monitor(process, Parser),
-    Parser ! abort_parsing,
-    receive
-        {'DOWN', MonRef, _, _, _} -> ok
-    after 60000 ->
-        % One minute is quite on purpose for this timeout. We
-        % want to try and read data to keep the socket open
-        % when possible but we also don't want to just make
-        % this a super long timeout because people have to
-        % wait this long to see if they just had an error
-        % like a validate_doc_update failure.
-        throw(multi_part_abort_timeout)
-    end.
-
-
-restart_open_doc_revs(Parser, Ref, NewRef) ->
-    unlink(Parser),
-    exit(Parser, kill),
-    flush_parser_messages(Ref),
-    erlang:error({restart_open_doc_revs, NewRef}).
-
-
-flush_parser_messages(Ref) ->
-    receive
-        {headers, Ref, _} ->
-            flush_parser_messages(Ref);
-        {body_bytes, Ref, _} ->
-            flush_parser_messages(Ref);
-        {body_done, Ref} ->
-            flush_parser_messages(Ref);
-        {done, Ref} ->
-            flush_parser_messages(Ref)
-    after 0 ->
-        ok
-    end.
-
-
-with_ejson_body(#doc{body = Body} = Doc) when is_binary(Body) ->
-    Doc#doc{body = couch_compress:decompress(Body)};
-with_ejson_body(#doc{body = {_}} = Doc) ->
-    Doc.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_drv.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_drv.erl b/src/couch/src/couch_drv.erl
deleted file mode 100644
index 7fe119a..0000000
--- a/src/couch/src/couch_drv.erl
+++ /dev/null
@@ -1,62 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_drv).
--behaviour(gen_server).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
-    code_change/3]).
-
--export([start_link/0]).
-
--include_lib("couch/include/couch_db.hrl").
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-init([]) ->
-    LibDir = util_driver_dir(),
-    case erl_ddll:load(LibDir, "couch_icu_driver") of
-    ok ->
-        {ok, nil};
-    {error, already_loaded} ->
-        ?LOG_INFO("~p reloading couch_icu_driver", [?MODULE]),
-        ok = erl_ddll:reload(LibDir, "couch_icu_driver"),
-        {ok, nil};
-    {error, Error} ->
-        {stop, erl_ddll:format_error(Error)}
-    end.
-
-handle_call(_Request, _From, State) ->
-    {reply, ok, State}.
-
-handle_cast(_Request, State) ->
-    {noreply, State}.
-
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-code_change(_OldVsn, State, _Extra) ->
-
-    {ok, State}.
-
-
-% private API
-util_driver_dir() ->
-    case config:get("couchdb", "util_driver_dir", null) of
-    null ->
-        couch_util:priv_dir();
-    LibDir0 ->
-        LibDir0
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ed98610c/src/couch/src/couch_ejson_compare.erl
----------------------------------------------------------------------
diff --git a/src/couch/src/couch_ejson_compare.erl b/src/couch/src/couch_ejson_compare.erl
deleted file mode 100644
index 7b000fc..0000000
--- a/src/couch/src/couch_ejson_compare.erl
+++ /dev/null
@@ -1,113 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_ejson_compare).
-
--export([less/2, less_json_ids/2, less_json/2]).
-
--on_load(init/0).
-
-
-init() ->
-    LibDir = case config:get("couchdb", "util_driver_dir") of
-    undefined ->
-        filename:join(couch_util:priv_dir(), "lib");
-    LibDir0 ->
-        LibDir0
-    end,
-    NumScheds = erlang:system_info(schedulers),
-    (catch erlang:load_nif(filename:join([LibDir, ?MODULE]), NumScheds)),
-    case erlang:system_info(otp_release) of
-    "R13B03" -> true;
-    _ -> ok
-    end.
-
-
-less(A, B) ->
-    try
-        less_nif(A, B)
-    catch
-    error:badarg ->
-        % Maybe the EJSON structure is too deep, fallback to Erlang land.
-        less_erl(A, B)
-    end.
-
-less_json_ids({JsonA, IdA}, {JsonB, IdB}) ->
-    case less(JsonA, JsonB) of
-    0 ->
-        IdA < IdB;
-    Result ->
-        Result < 0
-    end.
-
-less_json(A,B) ->
-    less(A, B) < 0.
-
-
-less_nif(A, B) ->
-    less_erl(A, B).
-
-
-less_erl(A,A)                                 -> 0;
-
-less_erl(A,B) when is_atom(A), is_atom(B)     -> atom_sort(A) - atom_sort(B);
-less_erl(A,_) when is_atom(A)                 -> -1;
-less_erl(_,B) when is_atom(B)                 -> 1;
-
-less_erl(A,B) when is_number(A), is_number(B) -> A - B;
-less_erl(A,_) when is_number(A)               -> -1;
-less_erl(_,B) when is_number(B)               -> 1;
-
-less_erl(A,B) when is_binary(A), is_binary(B) -> couch_util:collate(A,B);
-less_erl(A,_) when is_binary(A)               -> -1;
-less_erl(_,B) when is_binary(B)               -> 1;
-
-less_erl(A,B) when is_list(A), is_list(B)     -> less_list(A,B);
-less_erl(A,_) when is_list(A)                 -> -1;
-less_erl(_,B) when is_list(B)                 -> 1;
-
-less_erl({A},{B}) when is_list(A), is_list(B) -> less_props(A,B);
-less_erl({A},_) when is_list(A)               -> -1;
-less_erl(_,{B}) when is_list(B)               -> 1.
-
-atom_sort(null) -> 1;
-atom_sort(false) -> 2;
-atom_sort(true) -> 3.
-
-less_props([], [_|_]) ->
-    -1;
-less_props(_, []) ->
-    1;
-less_props([{AKey, AValue}|RestA], [{BKey, BValue}|RestB]) ->
-    case couch_util:collate(AKey, BKey) of
-    0 ->
-        case less_erl(AValue, BValue) of
-        0 ->
-            less_props(RestA, RestB);
-        Result ->
-            Result
-        end;
-    Result ->
-        Result
-    end.
-
-less_list([], [_|_]) ->
-    -1;
-less_list(_, []) ->
-    1;
-less_list([A|RestA], [B|RestB]) ->
-    case less_erl(A,B) of
-    0 ->
-        less_list(RestA, RestB);
-    Result ->
-        Result
-    end.


[39/49] couchdb commit: updated refs/heads/1843-feature-bigcouch to 3069c01

Posted by da...@apache.org.
Remove src/oauth


Project: http://git-wip-us.apache.org/repos/asf/couchdb/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb/commit/21118e28
Tree: http://git-wip-us.apache.org/repos/asf/couchdb/tree/21118e28
Diff: http://git-wip-us.apache.org/repos/asf/couchdb/diff/21118e28

Branch: refs/heads/1843-feature-bigcouch
Commit: 21118e28c030e24045aae7dd972a81e57ee55bd1
Parents: acf8eaf
Author: Paul J. Davis <pa...@gmail.com>
Authored: Tue Feb 4 17:42:30 2014 -0600
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 4 17:42:30 2014 -0600

----------------------------------------------------------------------
 src/oauth/src/oauth.app.src |  20 ---
 src/oauth/src/oauth.erl     | 315 ---------------------------------------
 2 files changed, 335 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb/blob/21118e28/src/oauth/src/oauth.app.src
----------------------------------------------------------------------
diff --git a/src/oauth/src/oauth.app.src b/src/oauth/src/oauth.app.src
deleted file mode 100644
index a8ec17c..0000000
--- a/src/oauth/src/oauth.app.src
+++ /dev/null
@@ -1,20 +0,0 @@
-{application, oauth, [
-  {description, "Erlang OAuth implementation"},
-  {vsn, "7d85d3ef"},
-  {modules, [
-    oauth,
-    oauth_hmac_sha1,
-    oauth_http,
-    oauth_plaintext,
-    oauth_rsa_sha1,
-    oauth_unix,
-    oauth_uri
-  ]},
-  {registered, []},
-  {applications, [
-    kernel,
-    stdlib,
-    crypto,
-    inets
-  ]}
-]}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/21118e28/src/oauth/src/oauth.erl
----------------------------------------------------------------------
diff --git a/src/oauth/src/oauth.erl b/src/oauth/src/oauth.erl
deleted file mode 100644
index e75d5fd..0000000
--- a/src/oauth/src/oauth.erl
+++ /dev/null
@@ -1,315 +0,0 @@
--module(oauth).
-
--export([get/3, get/5, get/6, post/3, post/5, post/6, put/6, put/7, uri/2, header/1,
-  sign/6, params_decode/1, token/1, token_secret/1, verify/6]).
-
--export([plaintext_signature/2, hmac_sha1_signature/5,
-  hmac_sha1_signature/3, rsa_sha1_signature/4, rsa_sha1_signature/2,
-  signature_base_string/3, params_encode/1]).
-
--export([plaintext_verify/3, hmac_sha1_verify/6, hmac_sha1_verify/4,
-  rsa_sha1_verify/5, rsa_sha1_verify/3]).
-
--export([header_params_encode/1, header_params_decode/1,
-  uri_params_encode/1, uri_params_decode/1]).
-
--include_lib("public_key/include/public_key.hrl").
-
-get(URL, ExtraParams, Consumer) ->
-  get(URL, ExtraParams, Consumer, "", "").
-
-get(URL, ExtraParams, Consumer, Token, TokenSecret) ->
-  get(URL, ExtraParams, Consumer, Token, TokenSecret, []).
-
-get(URL, ExtraParams, Consumer, Token, TokenSecret, HttpcOptions) ->
-  SignedParams = sign("GET", URL, ExtraParams, Consumer, Token, TokenSecret),
-  http_request(get, {uri(URL, SignedParams), []}, HttpcOptions).
-
-post(URL, ExtraParams, Consumer) ->
-  post(URL, ExtraParams, Consumer, "", "").
-
-post(URL, ExtraParams, Consumer, Token, TokenSecret) ->
-  post(URL, ExtraParams, Consumer, Token, TokenSecret, []).
-
-post(URL, ExtraParams, Consumer, Token, TokenSecret, HttpcOptions) ->
-  SignedParams = sign("POST", URL, ExtraParams, Consumer, Token, TokenSecret),
-  http_request(post, {URL, [], "application/x-www-form-urlencoded", uri_params_encode(SignedParams)}, HttpcOptions).
-
-put(URL, ExtraParams, {ContentType, Body}, Consumer, Token, TokenSecret) ->
-  put(URL, ExtraParams, {ContentType, Body}, Consumer, Token, TokenSecret, []).
-
-put(URL, ExtraParams, {ContentType, Body}, Consumer, Token, TokenSecret, HttpcOptions) ->
-  SignedParams = sign("PUT", URL, ExtraParams, Consumer, Token, TokenSecret),
-  http_request(put, {uri(URL, SignedParams), [], ContentType, Body}, HttpcOptions).
-
-uri(Base, []) ->
-  Base;
-uri(Base, Params) ->
-  lists:concat([Base, "?", uri_params_encode(Params)]).
-
-header(Params) ->
-  {"Authorization", "OAuth " ++ header_params_encode(Params)}.
-
-token(Params) ->
-  proplists:get_value("oauth_token", Params).
-
-token_secret(Params) ->
-  proplists:get_value("oauth_token_secret", Params).
-
-consumer_key(_Consumer={Key, _, _}) ->
-  Key.
-
-consumer_secret(_Consumer={_, Secret, _}) ->
-  Secret.
-
-signature_method(_Consumer={_, _, Method}) ->
-  Method.
-
-sign(HttpMethod, URL, Params, Consumer, Token, TokenSecret) ->
-  SignatureParams = signature_params(Consumer, Params, Token),
-  Signature = signature(HttpMethod, URL, SignatureParams, Consumer, TokenSecret),
-  [{"oauth_signature", Signature} | SignatureParams].
-
-signature_params(Consumer, Params, "") ->
-  signature_params(Consumer, Params);
-signature_params(Consumer, Params, Token) ->
-  signature_params(Consumer, [{"oauth_token", Token} | Params]).
-
-signature_params(Consumer, Params) ->
-  Timestamp = unix_timestamp(),
-  Nonce = base64:encode_to_string(crypto:rand_bytes(32)), % cf. ruby-oauth
-  [ {"oauth_version", "1.0"}
-  , {"oauth_nonce", Nonce}
-  , {"oauth_timestamp", integer_to_list(Timestamp)}
-  , {"oauth_signature_method", signature_method_string(Consumer)}
-  , {"oauth_consumer_key", consumer_key(Consumer)}
-  | Params
-  ].
-
-verify(Signature, HttpMethod, URL, Params, Consumer, TokenSecret) ->
-  case signature_method(Consumer) of
-    plaintext ->
-      plaintext_verify(Signature, Consumer, TokenSecret);
-    hmac_sha1 ->
-      hmac_sha1_verify(Signature, HttpMethod, URL, Params, Consumer, TokenSecret);
-    rsa_sha1 ->
-      rsa_sha1_verify(Signature, HttpMethod, URL, Params, Consumer)
-  end.
-
-signature(HttpMethod, URL, Params, Consumer, TokenSecret) ->
-  case signature_method(Consumer) of
-    plaintext ->
-      plaintext_signature(Consumer, TokenSecret);
-    hmac_sha1 ->
-      hmac_sha1_signature(HttpMethod, URL, Params, Consumer, TokenSecret);
-    rsa_sha1 ->
-      rsa_sha1_signature(HttpMethod, URL, Params, Consumer)
-  end.
-
-signature_method_string(Consumer) ->
-  case signature_method(Consumer) of
-    plaintext ->
-      "PLAINTEXT";
-    hmac_sha1 ->
-      "HMAC-SHA1";
-    rsa_sha1 ->
-      "RSA-SHA1"
-  end.
-
-plaintext_signature(Consumer, TokenSecret) ->
-  uri_join([consumer_secret(Consumer), TokenSecret]).
-
-plaintext_verify(Signature, Consumer, TokenSecret) ->
-  verify_in_constant_time(Signature, plaintext_signature(Consumer, TokenSecret)).
-
-hmac_sha1_signature(HttpMethod, URL, Params, Consumer, TokenSecret) ->
-  BaseString = signature_base_string(HttpMethod, URL, Params),
-  hmac_sha1_signature(BaseString, Consumer, TokenSecret).
-
-hmac_sha1_signature(BaseString, Consumer, TokenSecret) ->
-  Key = uri_join([consumer_secret(Consumer), TokenSecret]),
-  base64:encode_to_string(crypto:sha_mac(Key, BaseString)).
-
-hmac_sha1_verify(Signature, HttpMethod, URL, Params, Consumer, TokenSecret) ->
-  verify_in_constant_time(Signature, hmac_sha1_signature(HttpMethod, URL, Params, Consumer, TokenSecret)).
-
-hmac_sha1_verify(Signature, BaseString, Consumer, TokenSecret) ->
-  verify_in_constant_time(Signature, hmac_sha1_signature(BaseString, Consumer, TokenSecret)).
-
-rsa_sha1_signature(HttpMethod, URL, Params, Consumer) ->
-  BaseString = signature_base_string(HttpMethod, URL, Params),
-  rsa_sha1_signature(BaseString, Consumer).
-
-rsa_sha1_signature(BaseString, Consumer) ->
-  Key = read_private_key(consumer_secret(Consumer)),
-  base64:encode_to_string(public_key:sign(list_to_binary(BaseString), sha, Key)).
-
-rsa_sha1_verify(Signature, HttpMethod, URL, Params, Consumer) ->
-  BaseString = signature_base_string(HttpMethod, URL, Params),
-  rsa_sha1_verify(Signature, BaseString, Consumer).
-
-rsa_sha1_verify(Signature, BaseString, Consumer) ->
-  Key = read_cert_key(consumer_secret(Consumer)),
-  public_key:verify(to_binary(BaseString), sha, base64:decode(Signature), Key).
-
-verify_in_constant_time(<<X/binary>>, <<Y/binary>>) ->
-  verify_in_constant_time(binary_to_list(X), binary_to_list(Y));
-verify_in_constant_time(X, Y) when is_list(X) and is_list(Y) ->
-  case length(X) == length(Y) of
-    true ->
-      verify_in_constant_time(X, Y, 0);
-    false ->
-      false
-  end.
-
-verify_in_constant_time([X | RestX], [Y | RestY], Result) ->
-  verify_in_constant_time(RestX, RestY, (X bxor Y) bor Result);
-verify_in_constant_time([], [], Result) ->
-  Result == 0.
-
-signature_base_string(HttpMethod, URL, Params) ->
-  uri_join([HttpMethod, uri_normalize(URL), params_encode(Params)]).
-
-params_encode(Params) ->
-  % cf. http://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
-  Encoded = [{uri_encode(K), uri_encode(V)} || {K, V} <- Params],
-  Sorted = lists:sort(Encoded),
-  Concatenated = [lists:concat([K, "=", V]) || {K, V} <- Sorted],
-  string:join(Concatenated, "&").
-
-params_decode(_Response={{_, _, _}, _, Body}) ->
-  uri_params_decode(Body).
-
-http_request(Method, Request, Options) ->
-  httpc:request(Method, Request, [{autoredirect, false}], Options).
-
-unix_timestamp() ->
-  unix_timestamp(calendar:universal_time()).
-
-unix_timestamp(DateTime) ->
-  unix_seconds(DateTime) - unix_epoch().
-
-unix_epoch() ->
-  unix_seconds({{1970,1,1},{00,00,00}}).
-
-unix_seconds(DateTime) ->
-  calendar:datetime_to_gregorian_seconds(DateTime).
-
-read_cert_key(Path) when is_list(Path) ->
-  {ok, Contents} = file:read_file(Path),
-  [{'Certificate', DerCert, not_encrypted}] = public_key:pem_decode(Contents),
-  read_cert_key(public_key:pkix_decode_cert(DerCert, otp));
-read_cert_key(#'OTPCertificate'{tbsCertificate=Cert}) ->
-  read_cert_key(Cert);
-read_cert_key(#'OTPTBSCertificate'{subjectPublicKeyInfo=Info}) ->
-  read_cert_key(Info);
-read_cert_key(#'OTPSubjectPublicKeyInfo'{subjectPublicKey=Key}) ->
-  Key.
-
-read_private_key(Path) ->
-  {ok, Contents} = file:read_file(Path),
-  [Info] = public_key:pem_decode(Contents),
-  public_key:pem_entry_decode(Info).
-
-to_binary(Term) when is_list(Term) ->
-  list_to_binary(Term);
-to_binary(Term) when is_binary(Term) ->
-  Term.
-
-header_params_encode(Params) ->
-  intercalate(", ", [lists:concat([uri_encode(K), "=\"", uri_encode(V), "\""]) || {K, V} <- Params]).
-
-header_params_decode(String) ->
-  [header_param_decode(Param) || Param <- re:split(String, ",\\s*", [{return, list}]), Param =/= ""].
-
-header_param_decode(Param) ->
-  [Key, QuotedValue] = string:tokens(Param, "="),
-  Value = string:substr(QuotedValue, 2, length(QuotedValue) - 2),
-  {uri_decode(Key), uri_decode(Value)}.
-
-uri_normalize(URI) ->
-  case http_uri:parse(URI) of
-    {ok, {Scheme, UserInfo, Host, Port, Path, _Query}} -> % R15B
-      uri_normalize(Scheme, UserInfo, string:to_lower(Host), Port, [Path]);
-    {Scheme, UserInfo, Host, Port, Path, _Query} ->
-      uri_normalize(Scheme, UserInfo, string:to_lower(Host), Port, [Path]);
-    Else ->
-      Else
-  end.
-
-uri_normalize(http, UserInfo, Host, 80, Acc) ->
-  uri_normalize(http, UserInfo, [Host|Acc]);
-uri_normalize(https, UserInfo, Host, 443, Acc) ->
-  uri_normalize(https, UserInfo, [Host|Acc]);
-uri_normalize(Scheme, UserInfo, Host, Port, Acc) ->
-  uri_normalize(Scheme, UserInfo, [Host, ":", Port|Acc]).
-
-uri_normalize(Scheme, [], Acc) ->
-  lists:concat([Scheme, "://" | Acc]);
-uri_normalize(Scheme, UserInfo, Acc) ->
-  lists:concat([Scheme, "://", UserInfo, "@" | Acc]).
-
-uri_params_encode(Params) ->
-  intercalate("&", [uri_join([K, V], "=") || {K, V} <- Params]).
-
-uri_params_decode(String) ->
-  [uri_param_decode(Substring) || Substring <- string:tokens(String, "&")].
-
-uri_param_decode(String) ->
-  [Key, Value] = string:tokens(String, "="),
-  {uri_decode(Key), uri_decode(Value)}.
-
-uri_join(Values) ->
-  uri_join(Values, "&").
-
-uri_join(Values, Separator) ->
-  string:join([uri_encode(Value) || Value <- Values], Separator).
-
-intercalate(Sep, Xs) ->
-  lists:concat(intersperse(Sep, Xs)).
-
-intersperse(_, []) ->
-  [];
-intersperse(_, [X]) ->
-  [X];
-intersperse(Sep, [X | Xs]) ->
-  [X, Sep | intersperse(Sep, Xs)].
-
-uri_encode(Term) when is_integer(Term) ->
-  integer_to_list(Term);
-uri_encode(Term) when is_atom(Term) ->
-  uri_encode(atom_to_list(Term));
-uri_encode(Term) when is_list(Term) ->
-  uri_encode(lists:reverse(Term, []), []).
-
--define(is_alphanum(C), C >= $A, C =< $Z; C >= $a, C =< $z; C >= $0, C =< $9).
-
-uri_encode([X | T], Acc) when ?is_alphanum(X); X =:= $-; X =:= $_; X =:= $.; X =:= $~ ->
-  uri_encode(T, [X | Acc]);
-uri_encode([X | T], Acc) ->
-  NewAcc = [$%, dec2hex(X bsr 4), dec2hex(X band 16#0f) | Acc],
-  uri_encode(T, NewAcc);
-uri_encode([], Acc) ->
-  Acc.
-
-uri_decode(Str) when is_list(Str) ->
-  uri_decode(Str, []).
-
-uri_decode([$%, A, B | T], Acc) ->
-  uri_decode(T, [(hex2dec(A) bsl 4) + hex2dec(B) | Acc]);
-uri_decode([X | T], Acc) ->
-  uri_decode(T, [X | Acc]);
-uri_decode([], Acc) ->
-  lists:reverse(Acc, []).
-
--compile({inline, [{dec2hex, 1}, {hex2dec, 1}]}).
-
-dec2hex(N) when N >= 10 andalso N =< 15 ->
-  N + $A - 10;
-dec2hex(N) when N >= 0 andalso N =< 9 ->
-  N + $0.
-
-hex2dec(C) when C >= $A andalso C =< $F ->
-  C - $A + 10;
-hex2dec(C) when C >= $0 andalso C =< $9 ->
-  C - $0.