You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by da...@apache.org on 2014/02/11 09:07:03 UTC

[01/41] inital move to rebar compilation

Updated Branches:
  refs/heads/import-rcouch e2dbc7928 -> f07bbfcc0 (forced update)


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/json_stream_parse.erl
----------------------------------------------------------------------
diff --git a/src/json_stream_parse.erl b/src/json_stream_parse.erl
new file mode 100644
index 0000000..b63e011
--- /dev/null
+++ b/src/json_stream_parse.erl
@@ -0,0 +1,432 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(json_stream_parse).
+
+
+-export([events/2, to_ejson/1, collect_object/2]).
+
+-define(IS_WS(X), (X == $\  orelse X == $\t orelse X == $\n orelse X == $\r)).
+-define(IS_DELIM(X), (X == $} orelse X == $] orelse X == $,)).
+-define(IS_DIGIT(X), (X >= $0 andalso X =< $9)).
+
+
+
+% Parses the json into events.
+%
+% The DataFun param is a function that produces the data for parsing. When
+% called it must yield a tuple, or the atom done. The first element in the
+% tuple is the data itself, and the second element is a function to be called
+% next to get the next chunk of data in the stream.
+%
+% The EventFun is called everytime a json element is parsed. It must produce
+% a new function to be called for the next event.
+%
+% Events happen each time a new element in the json string is parsed.
+% For simple value types, the data itself is returned:
+% Strings
+% Integers
+% Floats
+% true
+% false
+% null
+%
+% For arrays, the start of the array is signaled by the event array_start
+% atom. The end is signaled by array_end. The events before the end are the
+% values, or nested values.
+%
+% For objects, the start of the object is signaled by the event object_start
+% atom. The end is signaled by object_end. Each key is signaled by
+% {key, KeyString}, and the following event is the value, or start of the
+% value (array_start, object_start).
+%
+events(Data,EventFun) when is_list(Data)->
+    events(list_to_binary(Data),EventFun);
+events(Data,EventFun) when is_binary(Data)->
+    events(fun() -> {Data, fun() -> done end} end,EventFun);
+events(DataFun,EventFun) ->
+    parse_one(DataFun, EventFun, <<>>).
+
+% converts the JSON directly to the erlang represention of Json
+to_ejson(DF) ->
+    {_DF2, EF, _Rest} = events(DF, fun(Ev) -> collect_events(Ev, []) end),
+    [[EJson]] = make_ejson(EF(get_results), [[]]),
+    EJson.
+
+
+% This function is used to return complete objects while parsing streams.
+%
+% Return this function from inside an event function right after getting an
+% object_start event. It then collects the remaining events for that object
+% and converts it to the erlang represention of Json.
+%
+% It then calls your ReturnControl function with the erlang object. Your
+% return control function then should yield another event function.
+%
+% This example stream parses an array of objects, calling
+% fun do_something_with_the_object/1 for each object.
+%
+%    ev_array(array_start) ->
+%        fun(Ev) -> ev_object_loop(Ev) end.
+%
+%    ev_object_loop(object_start) ->
+%        fun(Ev) ->
+%            json_stream_parse:collect_object(Ev,
+%                fun(Obj) ->
+%                    do_something_with_the_object(Obj),
+%                    fun(Ev2) -> ev_object_loop(Ev2) end
+%                end)
+%        end;
+%    ev_object_loop(array_end) ->
+%        ok
+%    end.
+%
+%    % invoke the parse
+%    main() ->
+%        ...
+%        events(Data, fun(Ev) -> ev_array(Ev) end).
+
+collect_object(Ev, ReturnControl) ->
+    collect_object(Ev, 0, ReturnControl, [object_start]).
+
+
+
+% internal methods
+
+parse_one(DF,EF,Acc) ->
+    case toke(DF, Acc) of
+    none ->
+        none;
+    {Token, DF2, Rest} ->
+        case Token of
+        "{" ->
+            EF2 = EF(object_start),
+            {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
+            {DF3, EF3(object_end), Rest2};
+        "[" ->
+            EF2 = EF(array_start),
+            {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
+            {DF3, EF3(array_end), Rest2};
+        Int when is_integer(Int)->
+            {DF2, EF(Int), Rest};
+        Float when is_float(Float)->
+            {DF2, EF(Float), Rest};
+        Atom when is_atom(Atom)->
+            {DF2, EF(Atom), Rest};
+        String when is_binary(String)->
+            {DF2, EF(String), Rest};
+        _OtherToken ->
+            err(unexpected_token)
+        end
+    end.
+
+must_parse_one(DF,EF,Acc,Error)->
+    case parse_one(DF, EF, Acc) of
+    none ->
+        err(Error);
+    Else ->
+        Else
+    end.
+
+must_toke(DF, Data, Error) ->
+    case toke(DF, Data) of
+    none ->
+        err(Error);
+    Result ->
+        Result
+    end.
+
+toke(DF, <<>>) ->
+    case DF() of
+    done ->
+        none;
+    {Data, DF2} ->
+        toke(DF2, Data)
+    end;
+toke(DF, <<C,Rest/binary>>) when ?IS_WS(C)->
+    toke(DF, Rest);
+toke(DF, <<${,Rest/binary>>) ->
+    {"{", DF, Rest};
+toke(DF, <<$},Rest/binary>>) ->
+    {"}", DF, Rest};
+toke(DF, <<$[,Rest/binary>>) ->
+    {"[", DF, Rest};
+toke(DF, <<$],Rest/binary>>) ->
+    {"]", DF, Rest};
+toke(DF, <<$",Rest/binary>>) ->
+    toke_string(DF,Rest,[]);
+toke(DF, <<$,,Rest/binary>>) ->
+    {",", DF, Rest};
+toke(DF, <<$:,Rest/binary>>) ->
+    {":", DF, Rest};
+toke(DF, <<$-,Rest/binary>>) ->
+    {<<C,_/binary>> = Data, DF2} = must_df(DF,1,Rest,expected_number),
+    case ?IS_DIGIT(C) of
+    true ->
+        toke_number_leading(DF2, Data, "-");
+    false ->
+        err(expected_number)
+    end;
+toke(DF, <<C,_/binary>> = Data) when ?IS_DIGIT(C) ->
+    toke_number_leading(DF, Data, []);
+toke(DF, <<$t,Rest/binary>>) ->
+    {Data, DF2} = must_match(<<"rue">>, DF, Rest),
+    {true, DF2, Data};
+toke(DF, <<$f,Rest/binary>>) ->
+    {Data, DF2} = must_match(<<"alse">>, DF, Rest),
+    {false, DF2, Data};
+toke(DF, <<$n,Rest/binary>>) ->
+    {Data, DF2} = must_match(<<"ull">>, DF, Rest),
+    {null, DF2, Data};
+toke(_, _) ->
+    err(bad_token).
+
+
+must_match(Pattern, DF, Data) ->
+    Size = size(Pattern),
+    case must_df(DF, Size, Data, bad_token) of
+    {<<Pattern:Size/binary,Data2/binary>>, DF2} ->
+        {Data2, DF2};
+    {_, _} ->
+        err(bad_token)
+    end.
+
+must_df(DF,Error)->
+    case DF() of
+    done ->
+        err(Error);
+    {Data, DF2} ->
+        {Data, DF2}
+    end.
+
+
+must_df(DF,NeedLen,Acc,Error)->
+    if size(Acc) >= NeedLen ->
+        {Acc, DF};
+    true ->
+        case DF() of
+        done ->
+            err(Error);
+        {Data, DF2} ->
+            must_df(DF2, NeedLen, <<Acc/binary, Data/binary>>, Error)
+        end
+    end.
+
+
+parse_object(DF,EF,Acc) ->
+    case must_toke(DF, Acc, unterminated_object) of
+    {String, DF2, Rest} when is_binary(String)->
+        EF2 = EF({key,String}),
+        case must_toke(DF2,Rest,unterminated_object) of
+        {":", DF3, Rest2} ->
+            {DF4, EF3, Rest3} = must_parse_one(DF3, EF2, Rest2, expected_value),
+            case must_toke(DF4,Rest3, unterminated_object) of
+            {",", DF5, Rest4} ->
+                parse_object(DF5, EF3, Rest4);
+            {"}", DF5, Rest4} ->
+                {DF5, EF3, Rest4};
+            {_, _, _} ->
+                err(unexpected_token)
+            end;
+        _Else ->
+            err(expected_colon)
+        end;
+    {"}", DF2, Rest} ->
+        {DF2, EF, Rest};
+    {_, _, _} ->
+        err(unexpected_token)
+    end.
+
+parse_array0(DF,EF,Acc) ->
+    case toke(DF, Acc) of
+    none ->
+        err(unterminated_array);
+    {",", DF2, Rest} ->
+        parse_array(DF2,EF,Rest);
+    {"]", DF2, Rest} ->
+        {DF2,EF,Rest};
+    _ ->
+        err(unexpected_token)
+    end.
+
+parse_array(DF,EF,Acc) ->
+    case toke(DF, Acc) of
+    none ->
+         err(unterminated_array);
+    {Token, DF2, Rest} ->
+        case Token of
+        "{" ->
+            EF2 = EF(object_start),
+            {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
+            parse_array0(DF3, EF3(object_end), Rest2);
+        "[" ->
+            EF2 = EF(array_start),
+            {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
+            parse_array0(DF3, EF3(array_end), Rest2);
+        Int when is_integer(Int)->
+            parse_array0(DF2, EF(Int), Rest);
+        Float when is_float(Float)->
+            parse_array0(DF2, EF(Float), Rest);
+        Atom when is_atom(Atom)->
+            parse_array0(DF2, EF(Atom), Rest);
+        String when is_binary(String)->
+            parse_array0(DF2, EF(String), Rest);
+        "]" ->
+            {DF2, EF, Rest};
+        _ ->
+            err(unexpected_token)
+        end
+    end.
+
+
+toke_string(DF, <<>>, Acc) ->
+    {Data, DF2} = must_df(DF, unterminated_string),
+    toke_string(DF2, Data, Acc);
+toke_string(DF, <<$\\,$",Rest/binary>>, Acc) ->
+    toke_string(DF, Rest, [$" | Acc]);
+toke_string(DF, <<$\\,$\\,Rest/binary>>, Acc) ->
+    toke_string(DF, Rest, [$\\ | Acc]);
+toke_string(DF, <<$\\,$/,Rest/binary>>, Acc) ->
+    toke_string(DF, Rest, [$/ | Acc]);
+toke_string(DF, <<$\\,$b,Rest/binary>>, Acc) ->
+    toke_string(DF, Rest, [$\b | Acc]);
+toke_string(DF, <<$\\,$f,Rest/binary>>, Acc) ->
+    toke_string(DF, Rest, [$\f | Acc]);
+toke_string(DF, <<$\\,$n,Rest/binary>>, Acc) ->
+    toke_string(DF, Rest, [$\n | Acc]);
+toke_string(DF, <<$\\,$r,Rest/binary>>, Acc) ->
+    toke_string(DF, Rest, [$\r | Acc]);
+toke_string(DF, <<$\\,$t,Rest/binary>>, Acc) ->
+    toke_string(DF, Rest, [$\t | Acc]);
+toke_string(DF, <<$\\,$u,Rest/binary>>, Acc) ->
+    {<<A,B,C,D,Data/binary>>, DF2} = must_df(DF,4,Rest,missing_hex),
+    UTFChar = erlang:list_to_integer([A, B, C, D], 16),
+    if UTFChar == 16#FFFF orelse UTFChar == 16#FFFE ->
+        err(invalid_utf_char);
+    true ->
+        ok
+    end,
+    Chars = xmerl_ucs:to_utf8(UTFChar),
+    toke_string(DF2, Data, lists:reverse(Chars) ++ Acc);
+toke_string(DF, <<$\\>>, Acc) ->
+    {Data, DF2} = must_df(DF, unterminated_string),
+    toke_string(DF2, <<$\\,Data/binary>>, Acc);
+toke_string(_DF, <<$\\, _/binary>>, _Acc) ->
+    err(bad_escape);
+toke_string(DF, <<$", Rest/binary>>, Acc) ->
+    {list_to_binary(lists:reverse(Acc)), DF, Rest};
+toke_string(DF, <<C, Rest/binary>>, Acc) ->
+    toke_string(DF, Rest, [C | Acc]).
+
+
+toke_number_leading(DF, <<Digit,Rest/binary>>, Acc)
+        when ?IS_DIGIT(Digit) ->
+    toke_number_leading(DF, Rest, [Digit | Acc]);
+toke_number_leading(DF, <<C,_/binary>>=Rest, Acc)
+        when ?IS_WS(C) orelse ?IS_DELIM(C) ->
+    {list_to_integer(lists:reverse(Acc)), DF, Rest};
+toke_number_leading(DF, <<>>, Acc) ->
+    case DF() of
+    done ->
+         {list_to_integer(lists:reverse(Acc)), fun() -> done end, <<>>};
+    {Data, DF2} ->
+        toke_number_leading(DF2, Data, Acc)
+    end;
+toke_number_leading(DF, <<$., Rest/binary>>, Acc) ->
+    toke_number_trailing(DF, Rest, [$.|Acc]);
+toke_number_leading(DF, <<$e, Rest/binary>>, Acc) ->
+    toke_number_exponent(DF, Rest, [$e, $0, $.|Acc]);
+toke_number_leading(DF, <<$E, Rest/binary>>, Acc) ->
+    toke_number_exponent(DF, Rest, [$e, $0, $.|Acc]);
+toke_number_leading(_, _, _) ->
+    err(unexpected_character_in_number).
+
+toke_number_trailing(DF, <<Digit,Rest/binary>>, Acc)
+        when ?IS_DIGIT(Digit) ->
+    toke_number_trailing(DF, Rest, [Digit | Acc]);
+toke_number_trailing(DF, <<C,_/binary>>=Rest, Acc)
+        when ?IS_WS(C) orelse ?IS_DELIM(C) ->
+    {list_to_float(lists:reverse(Acc)), DF, Rest};
+toke_number_trailing(DF, <<>>, Acc) ->
+    case DF() of
+    done ->
+        {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
+    {Data, DF2} ->
+        toke_number_trailing(DF2, Data, Acc)
+    end;
+toke_number_trailing(DF, <<"e", Rest/binary>>, [C|_]=Acc) when C /= $. ->
+    toke_number_exponent(DF, Rest, [$e|Acc]);
+toke_number_trailing(DF, <<"E", Rest/binary>>, [C|_]=Acc) when C /= $. ->
+    toke_number_exponent(DF, Rest, [$e|Acc]);
+toke_number_trailing(_, _, _) ->
+    err(unexpected_character_in_number).
+
+
+toke_number_exponent(DF, <<Digit,Rest/binary>>, Acc) when ?IS_DIGIT(Digit) ->
+    toke_number_exponent(DF, Rest, [Digit | Acc]);
+toke_number_exponent(DF, <<Sign,Rest/binary>>, [$e|_]=Acc)
+        when Sign == $+ orelse Sign == $- ->
+    toke_number_exponent(DF, Rest, [Sign | Acc]);
+toke_number_exponent(DF, <<C,_/binary>>=Rest, Acc)
+        when ?IS_WS(C) orelse ?IS_DELIM(C) ->
+    {list_to_float(lists:reverse(Acc)), DF, Rest};
+toke_number_exponent(DF, <<>>, Acc) ->
+    case DF() of
+    done ->
+        {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
+    {Data, DF2} ->
+        toke_number_exponent(DF2, Data, Acc)
+    end;
+toke_number_exponent(_, _, _) ->
+        err(unexpected_character_in_number).
+
+
+err(Error)->
+    throw({parse_error,Error}).
+
+
+make_ejson([], Stack) ->
+    Stack;
+make_ejson([array_start | RevEvs], [ArrayValues, PrevValues | RestStack]) ->
+    make_ejson(RevEvs, [[ArrayValues | PrevValues] | RestStack]);
+make_ejson([array_end | RevEvs], Stack) ->
+    make_ejson(RevEvs, [[] | Stack]);
+make_ejson([object_start | RevEvs], [ObjValues, PrevValues | RestStack]) ->
+    make_ejson(RevEvs, [[{ObjValues} | PrevValues] | RestStack]);
+make_ejson([object_end | RevEvs], Stack) ->
+    make_ejson(RevEvs, [[] | Stack]);
+make_ejson([{key, String} | RevEvs], [[PrevValue|RestObject] | RestStack] = _Stack) ->
+    make_ejson(RevEvs, [[{String, PrevValue}|RestObject] | RestStack]);
+make_ejson([Value | RevEvs], [Vals | RestStack] = _Stack) ->
+    make_ejson(RevEvs, [[Value | Vals] | RestStack]).
+
+collect_events(get_results, Acc) ->
+    Acc;
+collect_events(Ev, Acc) ->
+    fun(NextEv) -> collect_events(NextEv, [Ev | Acc]) end.
+
+
+collect_object(object_end, 0, ReturnControl, Acc) ->
+    [[Obj]] = make_ejson([object_end | Acc], [[]]),
+    ReturnControl(Obj);
+collect_object(object_end, NestCount, ReturnControl, Acc) ->
+    fun(Ev) ->
+        collect_object(Ev, NestCount - 1, ReturnControl, [object_end | Acc])
+    end;
+collect_object(object_start, NestCount, ReturnControl, Acc) ->
+    fun(Ev) ->
+        collect_object(Ev, NestCount + 1, ReturnControl, [object_start | Acc])
+    end;
+collect_object(Ev, NestCount, ReturnControl, Acc) ->
+    fun(Ev2) ->
+        collect_object(Ev2, NestCount, ReturnControl, [Ev | Acc])
+    end.


[11/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_log.erl
----------------------------------------------------------------------
diff --git a/couch_log.erl b/couch_log.erl
deleted file mode 100644
index cd4bbbb..0000000
--- a/couch_log.erl
+++ /dev/null
@@ -1,254 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_log).
--behaviour(gen_event).
-
-% public API
--export([start_link/0, stop/0]).
--export([debug/2, info/2, warn/2, error/2]).
--export([debug_on/0, info_on/0, warn_on/0, get_level/0, get_level_integer/0, set_level/1]).
--export([debug_on/1, info_on/1, warn_on/1, get_level/1, get_level_integer/1, set_level/2]).
--export([read/2]).
-
-% gen_event callbacks
--export([init/1, handle_event/2, terminate/2, code_change/3]).
--export([handle_info/2, handle_call/2]).
-
--define(LEVEL_ERROR, 4).
--define(LEVEL_WARN, 3).
--define(LEVEL_INFO, 2).
--define(LEVEL_DEBUG, 1).
-
--record(state, {
-    fd,
-    level,
-    sasl
-}).
-
-debug(Format, Args) ->
-    {ConsoleMsg, FileMsg} = get_log_messages(self(), debug, Format, Args),
-    gen_event:sync_notify(error_logger, {couch_debug, ConsoleMsg, FileMsg}).
-
-info(Format, Args) ->
-    {ConsoleMsg, FileMsg} = get_log_messages(self(), info, Format, Args),
-    gen_event:sync_notify(error_logger, {couch_info, ConsoleMsg, FileMsg}).
-
-warn(Format, Args) ->
-    {ConsoleMsg, FileMsg} = get_log_messages(self(), warn, Format, Args),
-    gen_event:sync_notify(error_logger, {couch_warn, ConsoleMsg, FileMsg}).
-
-error(Format, Args) ->
-    {ConsoleMsg, FileMsg} = get_log_messages(self(), error, Format, Args),
-    gen_event:sync_notify(error_logger, {couch_error, ConsoleMsg, FileMsg}).
-
-
-level_integer(error)    -> ?LEVEL_ERROR;
-level_integer(warn)     -> ?LEVEL_WARN;
-level_integer(info)     -> ?LEVEL_INFO;
-level_integer(debug)    -> ?LEVEL_DEBUG;
-level_integer(_Else)    -> ?LEVEL_ERROR. % anything else default to ERROR level
-
-level_atom(?LEVEL_ERROR) -> error;
-level_atom(?LEVEL_WARN) -> warn;
-level_atom(?LEVEL_INFO) -> info;
-level_atom(?LEVEL_DEBUG) -> debug.
-
-
-start_link() ->
-    couch_event_sup:start_link({local, couch_log}, error_logger, couch_log, []).
-
-stop() ->
-    couch_event_sup:stop(couch_log).
-
-init([]) ->
-    % read config and register for configuration changes
-
-    % just stop if one of the config settings change. couch_server_sup
-    % will restart us and then we will pick up the new settings.
-    ok = couch_config:register(
-        fun("log", "file") ->
-            ?MODULE:stop();
-        ("log", "level") ->
-            ?MODULE:stop();
-        ("log", "include_sasl") ->
-            ?MODULE:stop();
-        ("log_level_by_module", _) ->
-            ?MODULE:stop()
-        end),
-
-    Filename = couch_config:get("log", "file", "couchdb.log"),
-    Level = level_integer(list_to_atom(couch_config:get("log", "level", "info"))),
-    Sasl = couch_config:get("log", "include_sasl", "true") =:= "true",
-    LevelByModule = couch_config:get("log_level_by_module"),
-
-    case ets:info(?MODULE) of
-    undefined -> ets:new(?MODULE, [named_table]);
-    _ -> ok
-    end,
-    ets:insert(?MODULE, {level, Level}),
-    lists:foreach(fun({Module, ModuleLevel}) ->
-        ModuleLevelInteger = level_integer(list_to_atom(ModuleLevel)),
-        ets:insert(?MODULE, {Module, ModuleLevelInteger})
-    end, LevelByModule),
-
-
-    case file:open(Filename, [append]) of
-    {ok, Fd} ->
-        {ok, #state{fd = Fd, level = Level, sasl = Sasl}};
-    {error, Reason} ->
-        ReasonStr = file:format_error(Reason),
-        io:format("Error opening log file ~s: ~s", [Filename, ReasonStr]),
-        {stop, {error, ReasonStr, Filename}}
-    end.
-
-debug_on() ->
-    get_level_integer() =< ?LEVEL_DEBUG.
-
-info_on() ->
-    get_level_integer() =< ?LEVEL_INFO.
-
-warn_on() ->
-    get_level_integer() =< ?LEVEL_WARN.
-
-debug_on(Module) ->
-    get_level_integer(Module) =< ?LEVEL_DEBUG.
-
-info_on(Module) ->
-    get_level_integer(Module) =< ?LEVEL_INFO.
-
-warn_on(Module) ->
-    get_level_integer(Module) =< ?LEVEL_WARN.
-
-set_level(LevelAtom) ->
-    set_level_integer(level_integer(LevelAtom)).
-
-set_level(Module, LevelAtom) ->
-    set_level_integer(Module, level_integer(LevelAtom)).
-
-get_level() ->
-    level_atom(get_level_integer()).
-
-get_level(Module) ->
-    level_atom(get_level_integer(Module)).
-
-get_level_integer() ->
-    try
-        ets:lookup_element(?MODULE, level, 2)
-    catch error:badarg ->
-        ?LEVEL_ERROR
-    end.
-
-get_level_integer(Module0) ->
-    Module = atom_to_list(Module0),
-    try
-        [{_Module, Level}] = ets:lookup(?MODULE, Module),
-        Level
-    catch error:_ ->
-        get_level_integer()
-    end.
-
-set_level_integer(Int) ->
-    gen_event:call(error_logger, couch_log, {set_level_integer, Int}).
-
-set_level_integer(Module, Int) ->
-    gen_event:call(error_logger, couch_log, {set_level_integer, Module, Int}).
-
-handle_event({couch_error, ConMsg, FileMsg}, State) ->
-    log(State, ConMsg, FileMsg),
-    {ok, State};
-handle_event({couch_warn, ConMsg, FileMsg}, State) ->
-    log(State, ConMsg, FileMsg),
-    {ok, State};
-handle_event({couch_info, ConMsg, FileMsg}, State) ->
-    log(State, ConMsg, FileMsg),
-    {ok, State};
-handle_event({couch_debug, ConMsg, FileMsg}, State) ->
-    log(State, ConMsg, FileMsg),
-    {ok, State};
-handle_event({error_report, _, {Pid, _, _}}=Event, #state{sasl = true} = St) ->
-    {ConMsg, FileMsg} = get_log_messages(Pid, error, "~p", [Event]),
-    log(St, ConMsg, FileMsg),
-    {ok, St};
-handle_event({error, _, {Pid, Format, Args}}, #state{sasl = true} = State) ->
-    {ConMsg, FileMsg} = get_log_messages(Pid, error, Format, Args),
-    log(State, ConMsg, FileMsg),
-    {ok, State};
-handle_event(_Event, State) ->
-    {ok, State}.
-
-handle_call({set_level_integer, NewLevel}, State) ->
-    ets:insert(?MODULE, {level, NewLevel}),
-    {ok, ok, State#state{level = NewLevel}};
-
-handle_call({set_level_integer, Module, NewLevel}, State) ->
-    ets:insert(?MODULE, {Module, NewLevel}),
-    {ok, ok, State#state{level = NewLevel}}.
-
-handle_info(_Info, State) ->
-    {ok, State}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-terminate(_Arg, #state{fd = Fd}) ->
-    file:close(Fd).
-
-log(#state{fd = Fd}, ConsoleMsg, FileMsg) ->
-    ok = io:put_chars(ConsoleMsg),
-    ok = io:put_chars(Fd, FileMsg).
-
-get_log_messages(Pid, Level, Format, Args) ->
-    ConsoleMsg = unicode:characters_to_binary(io_lib:format(
-        "[~s] [~p] " ++ Format ++ "~n", [Level, Pid | Args])),
-    FileMsg = ["[", couch_util:rfc1123_date(), "] ", ConsoleMsg],
-    {ConsoleMsg, iolist_to_binary(FileMsg)}.
-
-
-% Read Bytes bytes from the end of log file, jumping Offset bytes towards
-% the beginning of the file first.
-%
-%  Log File    FilePos
-%  ----------
-% |          |  10
-% |          |  20
-% |          |  30
-% |          |  40
-% |          |  50
-% |          |  60
-% |          |  70 -- Bytes = 20  --
-% |          |  80                 | Chunk
-% |          |  90 -- Offset = 10 --
-% |__________| 100
-
-read(Bytes, Offset) ->
-    LogFileName = couch_config:get("log", "file"),
-    LogFileSize = filelib:file_size(LogFileName),
-    MaxChunkSize = list_to_integer(
-        couch_config:get("httpd", "log_max_chunk_size", "1000000")),
-    case Bytes > MaxChunkSize of
-    true ->
-        throw({bad_request, "'bytes' cannot exceed " ++
-            integer_to_list(MaxChunkSize)});
-    false ->
-        ok
-    end,
-
-    {ok, Fd} = file:open(LogFileName, [read]),
-    Start = lists:max([LogFileSize - Bytes - Offset, 0]),
-
-    % TODO: truncate chopped first line
-    % TODO: make streaming
-
-    {ok, Chunk} = file:pread(Fd, Start, Bytes),
-    ok = file:close(Fd),
-    Chunk.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_native_process.erl
----------------------------------------------------------------------
diff --git a/couch_native_process.erl b/couch_native_process.erl
deleted file mode 100644
index 5a32e75..0000000
--- a/couch_native_process.erl
+++ /dev/null
@@ -1,409 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License");
-% you may not use this file except in compliance with the License.
-%
-% You may obtain a copy of the License at
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing,
-% software distributed under the License is distributed on an
-% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
-% either express or implied.
-%
-% See the License for the specific language governing permissions
-% and limitations under the License.
-%
-% This file drew much inspiration from erlview, which was written by and
-% copyright Michael McDaniel [http://autosys.us], and is also under APL 2.0
-%
-%
-% This module provides the smallest possible native view-server.
-% With this module in-place, you can add the following to your couch INI files:
-%  [native_query_servers]
-%  erlang={couch_native_process, start_link, []}
-%
-% Which will then allow following example map function to be used:
-%
-%  fun({Doc}) ->
-%    % Below, we emit a single record - the _id as key, null as value
-%    DocId = couch_util:get_value(<<"_id">>, Doc, null),
-%    Emit(DocId, null)
-%  end.
-%
-% which should be roughly the same as the javascript:
-%    emit(doc._id, null);
-%
-% This module exposes enough functions such that a native erlang server can
-% act as a fully-fleged view server, but no 'helper' functions specifically
-% for simplifying your erlang view code.  It is expected other third-party
-% extensions will evolve which offer useful layers on top of this view server
-% to help simplify your view code.
--module(couch_native_process).
--behaviour(gen_server).
-
--export([start_link/0,init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,
-         handle_info/2]).
--export([set_timeout/2, prompt/2]).
-
--define(STATE, native_proc_state).
--record(evstate, {ddocs, funs=[], query_config=[], list_pid=nil, timeout=5000}).
-
--include("couch_db.hrl").
-
-start_link() ->
-    gen_server:start_link(?MODULE, [], []).
-
-% this is a bit messy, see also couch_query_servers handle_info
-% stop(_Pid) ->
-%     ok.
-
-set_timeout(Pid, TimeOut) ->
-    gen_server:call(Pid, {set_timeout, TimeOut}).
-
-prompt(Pid, Data) when is_list(Data) ->
-    gen_server:call(Pid, {prompt, Data}).
-
-% gen_server callbacks
-init([]) ->
-    {ok, #evstate{ddocs=dict:new()}}.
-
-handle_call({set_timeout, TimeOut}, _From, State) ->
-    {reply, ok, State#evstate{timeout=TimeOut}};
-
-handle_call({prompt, Data}, _From, State) ->
-    ?LOG_DEBUG("Prompt native qs: ~s",[?JSON_ENCODE(Data)]),
-    {NewState, Resp} = try run(State, to_binary(Data)) of
-        {S, R} -> {S, R}
-        catch
-            throw:{error, Why} ->
-                {State, [<<"error">>, Why, Why]}
-        end,
-
-    case Resp of
-        {error, Reason} ->
-            Msg = io_lib:format("couch native server error: ~p", [Reason]),
-            {reply, [<<"error">>, <<"native_query_server">>, list_to_binary(Msg)], NewState};
-        [<<"error">> | Rest] ->
-            % Msg = io_lib:format("couch native server error: ~p", [Rest]),
-            % TODO: markh? (jan)
-            {reply, [<<"error">> | Rest], NewState};
-        [<<"fatal">> | Rest] ->
-            % Msg = io_lib:format("couch native server error: ~p", [Rest]),
-            % TODO: markh? (jan)
-            {stop, fatal, [<<"error">> | Rest], NewState};
-        Resp ->
-            {reply, Resp, NewState}
-    end.
-
-handle_cast(foo, State) -> {noreply, State}.
-handle_info({'EXIT',_,normal}, State) -> {noreply, State};
-handle_info({'EXIT',_,Reason}, State) ->
-    {stop, Reason, State}.
-terminate(_Reason, _State) -> ok.
-code_change(_OldVersion, State, _Extra) -> {ok, State}.
-
-run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) ->
-    Pid ! {self(), list_row, Row},
-    receive
-        {Pid, chunks, Data} ->
-            {State, [<<"chunks">>, Data]};
-        {Pid, list_end, Data} ->
-            receive
-                {'EXIT', Pid, normal} -> ok
-            after State#evstate.timeout ->
-                throw({timeout, list_cleanup})
-            end,
-            process_flag(trap_exit, erlang:get(do_trap)),
-            {State#evstate{list_pid=nil}, [<<"end">>, Data]}
-    after State#evstate.timeout ->
-        throw({timeout, list_row})
-    end;
-run(#evstate{list_pid=Pid}=State, [<<"list_end">>]) when is_pid(Pid) ->
-    Pid ! {self(), list_end},
-    Resp =
-    receive
-        {Pid, list_end, Data} ->
-            receive
-                {'EXIT', Pid, normal} -> ok
-            after State#evstate.timeout ->
-                throw({timeout, list_cleanup})
-            end,
-            [<<"end">>, Data]
-    after State#evstate.timeout ->
-        throw({timeout, list_end})
-    end,
-    process_flag(trap_exit, erlang:get(do_trap)),
-    {State#evstate{list_pid=nil}, Resp};
-run(#evstate{list_pid=Pid}=State, _Command) when is_pid(Pid) ->
-    {State, [<<"error">>, list_error, list_error]};
-run(#evstate{ddocs=DDocs}, [<<"reset">>]) ->
-    {#evstate{ddocs=DDocs}, true};
-run(#evstate{ddocs=DDocs}, [<<"reset">>, QueryConfig]) ->
-    {#evstate{ddocs=DDocs, query_config=QueryConfig}, true};
-run(#evstate{funs=Funs}=State, [<<"add_fun">> , BinFunc]) ->
-    FunInfo = makefun(State, BinFunc),
-    {State#evstate{funs=Funs ++ [FunInfo]}, true};
-run(State, [<<"map_doc">> , Doc]) ->
-    Resp = lists:map(fun({Sig, Fun}) ->
-        erlang:put(Sig, []),
-        Fun(Doc),
-        lists:reverse(erlang:get(Sig))
-    end, State#evstate.funs),
-    {State, Resp};
-run(State, [<<"reduce">>, Funs, KVs]) ->
-    {Keys, Vals} =
-    lists:foldl(fun([K, V], {KAcc, VAcc}) ->
-        {[K | KAcc], [V | VAcc]}
-    end, {[], []}, KVs),
-    Keys2 = lists:reverse(Keys),
-    Vals2 = lists:reverse(Vals),
-    {State, catch reduce(State, Funs, Keys2, Vals2, false)};
-run(State, [<<"rereduce">>, Funs, Vals]) ->
-    {State, catch reduce(State, Funs, null, Vals, true)};
-run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) ->
-    DDocs2 = store_ddoc(DDocs, DDocId, DDoc),
-    {State#evstate{ddocs=DDocs2}, true};
-run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, DDocId | Rest]) ->
-    DDoc = load_ddoc(DDocs, DDocId),
-    ddoc(State, DDoc, Rest);
-run(_, Unknown) ->
-    ?LOG_ERROR("Native Process: Unknown command: ~p~n", [Unknown]),
-    throw({error, unknown_command}).
-    
-ddoc(State, {DDoc}, [FunPath, Args]) ->
-    % load fun from the FunPath
-    BFun = lists:foldl(fun
-        (Key, {Props}) when is_list(Props) ->
-            couch_util:get_value(Key, Props, nil);
-        (_Key, Fun) when is_binary(Fun) ->
-            Fun;
-        (_Key, nil) ->
-            throw({error, not_found});
-        (_Key, _Fun) ->
-            throw({error, malformed_ddoc})
-        end, {DDoc}, FunPath),
-    ddoc(State, makefun(State, BFun, {DDoc}), FunPath, Args).
-
-ddoc(State, {_, Fun}, [<<"validate_doc_update">>], Args) ->
-    {State, (catch apply(Fun, Args))};
-ddoc(State, {_, Fun}, [<<"filters">>|_], [Docs, Req]) ->
-    FilterFunWrapper = fun(Doc) ->
-        case catch Fun(Doc, Req) of
-        true -> true;
-        false -> false;
-        {'EXIT', Error} -> ?LOG_ERROR("~p", [Error])
-        end
-    end,
-    Resp = lists:map(FilterFunWrapper, Docs),
-    {State, [true, Resp]};
-ddoc(State, {_, Fun}, [<<"shows">>|_], Args) ->
-    Resp = case (catch apply(Fun, Args)) of
-        FunResp when is_list(FunResp) ->
-            FunResp;
-        {FunResp} ->
-            [<<"resp">>, {FunResp}];
-        FunResp ->
-            FunResp
-    end,
-    {State, Resp};
-ddoc(State, {_, Fun}, [<<"updates">>|_], Args) ->
-    Resp = case (catch apply(Fun, Args)) of
-        [JsonDoc, JsonResp]  ->
-            [<<"up">>, JsonDoc, JsonResp]
-    end,
-    {State, Resp};
-ddoc(State, {Sig, Fun}, [<<"lists">>|_], Args) ->
-    Self = self(),
-    SpawnFun = fun() ->
-        LastChunk = (catch apply(Fun, Args)),
-        case start_list_resp(Self, Sig) of
-            started ->
-                receive
-                    {Self, list_row, _Row} -> ignore;
-                    {Self, list_end} -> ignore
-                after State#evstate.timeout ->
-                    throw({timeout, list_cleanup_pid})
-                end;
-            _ ->
-                ok
-        end,
-        LastChunks =
-        case erlang:get(Sig) of
-            undefined -> [LastChunk];
-            OtherChunks -> [LastChunk | OtherChunks]
-        end,
-        Self ! {self(), list_end, lists:reverse(LastChunks)}
-    end,
-    erlang:put(do_trap, process_flag(trap_exit, true)),
-    Pid = spawn_link(SpawnFun),
-    Resp =
-    receive
-        {Pid, start, Chunks, JsonResp} ->
-            [<<"start">>, Chunks, JsonResp]
-    after State#evstate.timeout ->
-        throw({timeout, list_start})
-    end,
-    {State#evstate{list_pid=Pid}, Resp}.
-
-store_ddoc(DDocs, DDocId, DDoc) ->
-    dict:store(DDocId, DDoc, DDocs).
-load_ddoc(DDocs, DDocId) ->
-    try dict:fetch(DDocId, DDocs) of
-        {DDoc} -> {DDoc}
-    catch
-        _:_Else -> throw({error, ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s",[DDocId]))})
-    end.
-
-bindings(State, Sig) ->
-    bindings(State, Sig, nil).
-bindings(State, Sig, DDoc) ->
-    Self = self(),
-
-    Log = fun(Msg) ->
-        ?LOG_INFO(Msg, [])
-    end,
-
-    Emit = fun(Id, Value) ->
-        Curr = erlang:get(Sig),
-        erlang:put(Sig, [[Id, Value] | Curr])
-    end,
-
-    Start = fun(Headers) ->
-        erlang:put(list_headers, Headers)
-    end,
-
-    Send = fun(Chunk) ->
-        Curr =
-        case erlang:get(Sig) of
-            undefined -> [];
-            Else -> Else
-        end,
-        erlang:put(Sig, [Chunk | Curr])
-    end,
-
-    GetRow = fun() ->
-        case start_list_resp(Self, Sig) of
-            started ->
-                ok;
-            _ ->
-                Chunks =
-                case erlang:get(Sig) of
-                    undefined -> [];
-                    CurrChunks -> CurrChunks
-                end,
-                Self ! {self(), chunks, lists:reverse(Chunks)}
-        end,
-        erlang:put(Sig, []),
-        receive
-            {Self, list_row, Row} -> Row;
-            {Self, list_end} -> nil
-        after State#evstate.timeout ->
-            throw({timeout, list_pid_getrow})
-        end
-    end,
-   
-    FoldRows = fun(Fun, Acc) -> foldrows(GetRow, Fun, Acc) end,
-
-    Bindings = [
-        {'Log', Log},
-        {'Emit', Emit},
-        {'Start', Start},
-        {'Send', Send},
-        {'GetRow', GetRow},
-        {'FoldRows', FoldRows}
-    ],
-    case DDoc of
-        {_Props} ->
-            Bindings ++ [{'DDoc', DDoc}];
-        _Else -> Bindings
-    end.
-
-% thanks to erlview, via:
-% http://erlang.org/pipermail/erlang-questions/2003-November/010544.html
-makefun(State, Source) ->
-    Sig = couch_util:md5(Source),
-    BindFuns = bindings(State, Sig),
-    {Sig, makefun(State, Source, BindFuns)}.
-makefun(State, Source, {DDoc}) ->
-    Sig = couch_util:md5(lists:flatten([Source, term_to_binary(DDoc)])),
-    BindFuns = bindings(State, Sig, {DDoc}),
-    {Sig, makefun(State, Source, BindFuns)};
-makefun(_State, Source, BindFuns) when is_list(BindFuns) ->
-    FunStr = binary_to_list(Source),
-    {ok, Tokens, _} = erl_scan:string(FunStr),
-    Form = case (catch erl_parse:parse_exprs(Tokens)) of
-        {ok, [ParsedForm]} ->
-            ParsedForm;
-        {error, {LineNum, _Mod, [Mesg, Params]}}=Error ->
-            io:format(standard_error, "Syntax error on line: ~p~n", [LineNum]),
-            io:format(standard_error, "~s~p~n", [Mesg, Params]),
-            throw(Error)
-    end,
-    Bindings = lists:foldl(fun({Name, Fun}, Acc) ->
-        erl_eval:add_binding(Name, Fun, Acc)
-    end, erl_eval:new_bindings(), BindFuns),
-    {value, Fun, _} = erl_eval:expr(Form, Bindings),
-    Fun.
-
-reduce(State, BinFuns, Keys, Vals, ReReduce) ->
-    Funs = case is_list(BinFuns) of
-        true ->
-            lists:map(fun(BF) -> makefun(State, BF) end, BinFuns);
-        _ ->
-            [makefun(State, BinFuns)]
-    end,
-    Reds = lists:map(fun({_Sig, Fun}) ->
-        Fun(Keys, Vals, ReReduce)
-    end, Funs),
-    [true, Reds].
-
-foldrows(GetRow, ProcRow, Acc) ->
-    case GetRow() of
-        nil ->
-            {ok, Acc};
-        Row ->
-            case (catch ProcRow(Row, Acc)) of
-                {ok, Acc2} ->
-                    foldrows(GetRow, ProcRow, Acc2);
-                {stop, Acc2} ->
-                    {ok, Acc2}
-            end
-    end.
-
-start_list_resp(Self, Sig) ->
-    case erlang:get(list_started) of
-        undefined ->
-            Headers =
-            case erlang:get(list_headers) of
-                undefined -> {[{<<"headers">>, {[]}}]};
-                CurrHdrs -> CurrHdrs
-            end,
-            Chunks =
-            case erlang:get(Sig) of
-                undefined -> [];
-                CurrChunks -> CurrChunks
-            end,
-            Self ! {self(), start, lists:reverse(Chunks), Headers},
-            erlang:put(list_started, true),
-            erlang:put(Sig, []),
-            started;
-        _ ->
-            ok
-    end.
-
-to_binary({Data}) ->
-    Pred = fun({Key, Value}) ->
-        {to_binary(Key), to_binary(Value)}
-    end,
-    {lists:map(Pred, Data)};
-to_binary(Data) when is_list(Data) ->
-    [to_binary(D) || D <- Data];
-to_binary(null) ->
-    null;
-to_binary(true) ->
-    true;
-to_binary(false) ->
-    false;
-to_binary(Data) when is_atom(Data) ->
-    list_to_binary(atom_to_list(Data));
-to_binary(Data) ->
-    Data.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_os_daemons.erl
----------------------------------------------------------------------
diff --git a/couch_os_daemons.erl b/couch_os_daemons.erl
deleted file mode 100644
index cac031a..0000000
--- a/couch_os_daemons.erl
+++ /dev/null
@@ -1,374 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_os_daemons).
--behaviour(gen_server).
-
--export([start_link/0, info/0, info/1, config_change/2]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--include("couch_db.hrl").
-
--record(daemon, {
-    port,
-    name,
-    cmd,
-    kill,
-    status=running,
-    cfg_patterns=[],
-    errors=[],
-    buf=[]
-}).
-
--define(PORT_OPTIONS, [stream, {line, 1024}, binary, exit_status, hide]).
--define(TIMEOUT, 5000).
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-info() ->
-    info([]).
-
-info(Options) ->
-    gen_server:call(?MODULE, {daemon_info, Options}).
-
-config_change(Section, Key) ->
-    gen_server:cast(?MODULE, {config_change, Section, Key}).
-
-init(_) ->
-    process_flag(trap_exit, true),
-    ok = couch_config:register(fun ?MODULE:config_change/2),
-    Table = ets:new(?MODULE, [protected, set, {keypos, #daemon.port}]),
-    reload_daemons(Table),
-    {ok, Table}.
-
-terminate(_Reason, Table) ->
-    [stop_port(D) || D <- ets:tab2list(Table)],
-    ok.
-
-handle_call({daemon_info, Options}, _From, Table) when is_list(Options) ->
-    case lists:member(table, Options) of
-        true ->
-            {reply, {ok, ets:tab2list(Table)}, Table};
-        _ ->
-            {reply, {ok, Table}, Table}
-    end;
-handle_call(Msg, From, Table) ->
-    ?LOG_ERROR("Unknown call message to ~p from ~p: ~p", [?MODULE, From, Msg]),
-    {stop, error, Table}.
-
-handle_cast({config_change, Sect, Key}, Table) ->
-    restart_daemons(Table, Sect, Key),
-    case Sect of
-        "os_daemons" -> reload_daemons(Table);
-        _ -> ok
-    end,
-    {noreply, Table};
-handle_cast(stop, Table) ->
-    {stop, normal, Table};
-handle_cast(Msg, Table) ->
-    ?LOG_ERROR("Unknown cast message to ~p: ~p", [?MODULE, Msg]),
-    {stop, error, Table}.
-
-handle_info({'EXIT', Port, Reason}, Table) ->
-    case ets:lookup(Table, Port) of
-        [] ->
-            ?LOG_INFO("Port ~p exited after stopping: ~p~n", [Port, Reason]);
-        [#daemon{status=stopping}] ->
-            true = ets:delete(Table, Port);
-        [#daemon{name=Name, status=restarting}=D] ->
-            ?LOG_INFO("Daemon ~P restarting after config change.", [Name]),
-            true = ets:delete(Table, Port),
-            {ok, Port2} = start_port(D#daemon.cmd),
-            true = ets:insert(Table, D#daemon{
-                port=Port2, status=running, kill=undefined, buf=[]
-            });
-        [#daemon{name=Name, status=halted}] ->
-            ?LOG_ERROR("Halted daemon process: ~p", [Name]);
-        [D] ->
-            ?LOG_ERROR("Invalid port state at exit: ~p", [D])
-    end,
-    {noreply, Table};
-handle_info({Port, closed}, Table) ->
-    handle_info({Port, {exit_status, closed}}, Table);
-handle_info({Port, {exit_status, Status}}, Table) ->
-    case ets:lookup(Table, Port) of
-        [] ->
-            ?LOG_ERROR("Unknown port ~p exiting ~p", [Port, Status]),
-            {stop, {error, unknown_port_died, Status}, Table};
-        [#daemon{name=Name, status=restarting}=D] ->
-            ?LOG_INFO("Daemon ~P restarting after config change.", [Name]),
-            true = ets:delete(Table, Port),
-            {ok, Port2} = start_port(D#daemon.cmd),
-            true = ets:insert(Table, D#daemon{
-                port=Port2, status=running, kill=undefined, buf=[]
-            }),
-            {noreply, Table};
-        [#daemon{status=stopping}=D] ->
-            % The configuration changed and this daemon is no
-            % longer needed.
-            ?LOG_DEBUG("Port ~p shut down.", [D#daemon.name]),
-            true = ets:delete(Table, Port),
-            {noreply, Table};
-        [D] ->
-            % Port died for unknown reason. Check to see if it's
-            % died too many times or if we should boot it back up.
-            case should_halt([now() | D#daemon.errors]) of
-                {true, _} ->
-                    % Halting the process. We won't try and reboot
-                    % until the configuration changes.
-                    Fmt = "Daemon ~p halted with exit_status ~p",
-                    ?LOG_ERROR(Fmt, [D#daemon.name, Status]),
-                    D2 = D#daemon{status=halted, errors=nil, buf=nil},
-                    true = ets:insert(Table, D2),
-                    {noreply, Table};
-                {false, Errors} ->
-                    % We're guessing it was a random error, this daemon
-                    % has behaved so we'll give it another chance.
-                    Fmt = "Daemon ~p is being rebooted after exit_status ~p",
-                    ?LOG_INFO(Fmt, [D#daemon.name, Status]),
-                    true = ets:delete(Table, Port),
-                    {ok, Port2} = start_port(D#daemon.cmd),
-                    true = ets:insert(Table, D#daemon{
-                        port=Port2, status=running, kill=undefined,
-                                                errors=Errors, buf=[]
-                    }),
-                    {noreply, Table}
-            end;
-        _Else ->
-            throw(error)
-    end;
-handle_info({Port, {data, {noeol, Data}}}, Table) ->
-    [#daemon{buf=Buf}=D] = ets:lookup(Table, Port),
-    true = ets:insert(Table, D#daemon{buf=[Data | Buf]}),
-    {noreply, Table};
-handle_info({Port, {data, {eol, Data}}}, Table) ->
-    [#daemon{buf=Buf}=D] = ets:lookup(Table, Port),
-    Line = lists:reverse(Buf, Data),
-    % The first line echoed back is the kill command
-    % for when we go to get rid of the port. Lines after
-    % that are considered part of the stdio API.
-    case D#daemon.kill of
-        undefined ->
-            true = ets:insert(Table, D#daemon{kill=?b2l(Line), buf=[]});
-        _Else ->
-            D2 = case (catch ?JSON_DECODE(Line)) of
-                {invalid_json, Rejected} ->
-                    ?LOG_ERROR("Ignoring OS daemon request: ~p", [Rejected]),
-                    D;
-                JSON ->
-                    {ok, D3} = handle_port_message(D, JSON),
-                    D3
-            end,
-            true = ets:insert(Table, D2#daemon{buf=[]})
-    end,
-    {noreply, Table};
-handle_info({Port, Error}, Table) ->
-    ?LOG_ERROR("Unexpectd message from port ~p: ~p", [Port, Error]),
-    stop_port(Port),
-    [D] = ets:lookup(Table, Port),
-    true = ets:insert(Table, D#daemon{status=restarting, buf=nil}),
-    {noreply, Table};
-handle_info(Msg, Table) ->
-    ?LOG_ERROR("Unexpected info message to ~p: ~p", [?MODULE, Msg]),
-    {stop, error, Table}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-% Internal API
-
-%
-% Port management helpers
-%
-
-start_port(Command) ->
-    start_port(Command, []).
-
-start_port(Command, EnvPairs) ->
-    PrivDir = couch_util:priv_dir(),
-    Spawnkiller = filename:join(PrivDir, "couchspawnkillable"),
-    Opts = case lists:keytake(env, 1, ?PORT_OPTIONS) of
-        false ->
-            ?PORT_OPTIONS ++ [ {env,EnvPairs} ];
-        {value, {env,OldPairs}, SubOpts} ->
-            AllPairs = lists:keymerge(1, EnvPairs, OldPairs),
-            SubOpts ++ [ {env,AllPairs} ]
-    end,
-    Port = open_port({spawn, Spawnkiller ++ " " ++ Command}, Opts),
-    {ok, Port}.
-
-
-stop_port(#daemon{port=Port, kill=undefined}=D) ->
-    ?LOG_ERROR("Stopping daemon without a kill command: ~p", [D#daemon.name]),
-    catch port_close(Port);
-stop_port(#daemon{port=Port}=D) ->
-    ?LOG_DEBUG("Stopping daemon: ~p", [D#daemon.name]),
-    os:cmd(D#daemon.kill),
-    catch port_close(Port).
-
-
-handle_port_message(#daemon{port=Port}=Daemon, [<<"get">>, Section]) ->
-    KVs = couch_config:get(Section),
-    Data = lists:map(fun({K, V}) -> {?l2b(K), ?l2b(V)} end, KVs),
-    Json = iolist_to_binary(?JSON_ENCODE({Data})),
-    port_command(Port, <<Json/binary, "\n">>),
-    {ok, Daemon};
-handle_port_message(#daemon{port=Port}=Daemon, [<<"get">>, Section, Key]) ->
-    Value = case couch_config:get(Section, Key, null) of
-        null -> null;
-        String -> ?l2b(String)
-    end,
-    Json = iolist_to_binary(?JSON_ENCODE(Value)),
-    port_command(Port, <<Json/binary, "\n">>),
-    {ok, Daemon};
-handle_port_message(Daemon, [<<"register">>, Sec]) when is_binary(Sec) ->
-    Patterns = lists:usort(Daemon#daemon.cfg_patterns ++ [{?b2l(Sec)}]),
-    {ok, Daemon#daemon{cfg_patterns=Patterns}};
-handle_port_message(Daemon, [<<"register">>, Sec, Key])
-                        when is_binary(Sec) andalso is_binary(Key) ->
-    Pattern = {?b2l(Sec), ?b2l(Key)},
-    Patterns = lists:usort(Daemon#daemon.cfg_patterns ++ [Pattern]),
-    {ok, Daemon#daemon{cfg_patterns=Patterns}};
-handle_port_message(#daemon{name=Name}=Daemon, [<<"log">>, Msg]) ->
-    handle_log_message(Name, Msg, <<"info">>),
-    {ok, Daemon};
-handle_port_message(#daemon{name=Name}=Daemon, [<<"log">>, Msg, {Opts}]) ->
-    Level = couch_util:get_value(<<"level">>, Opts, <<"info">>),
-    handle_log_message(Name, Msg, Level),
-    {ok, Daemon};
-handle_port_message(#daemon{name=Name}=Daemon, Else) ->
-    ?LOG_ERROR("Daemon ~p made invalid request: ~p", [Name, Else]),
-    {ok, Daemon}.
-
-
-handle_log_message(Name, Msg, _Level) when not is_binary(Msg) ->
-    ?LOG_ERROR("Invalid log message from daemon ~p: ~p", [Name, Msg]);
-handle_log_message(Name, Msg, <<"debug">>) ->
-    ?LOG_DEBUG("Daemon ~p :: ~s", [Name, ?b2l(Msg)]);
-handle_log_message(Name, Msg, <<"info">>) ->
-    ?LOG_INFO("Daemon ~p :: ~s", [Name, ?b2l(Msg)]);
-handle_log_message(Name, Msg, <<"error">>) ->
-    ?LOG_ERROR("Daemon: ~p :: ~s", [Name, ?b2l(Msg)]);
-handle_log_message(Name, Msg, Level) ->
-    ?LOG_ERROR("Invalid log level from daemon: ~p", [Level]),
-    ?LOG_INFO("Daemon: ~p :: ~s", [Name, ?b2l(Msg)]).
-
-%
-% Daemon management helpers
-%
-
-reload_daemons(Table) ->
-    % List of daemons we want to have running.
-    Configured = lists:sort(couch_config:get("os_daemons")),
-    
-    % Remove records for daemons that were halted.
-    MSpecHalted = #daemon{name='$1', cmd='$2', status=halted, _='_'},
-    Halted = lists:sort([{N, C} || [N, C] <- ets:match(Table, MSpecHalted)]),
-    ok = stop_os_daemons(Table, find_to_stop(Configured, Halted, [])),
-    
-    % Stop daemons that are running
-    % Start newly configured daemons
-    MSpecRunning = #daemon{name='$1', cmd='$2', status=running, _='_'},
-    Running = lists:sort([{N, C} || [N, C] <- ets:match(Table, MSpecRunning)]),
-    ok = stop_os_daemons(Table, find_to_stop(Configured, Running, [])),
-    ok = boot_os_daemons(Table, find_to_boot(Configured, Running, [])),
-    ok.
-
-
-restart_daemons(Table, Sect, Key) ->
-    restart_daemons(Table, Sect, Key, ets:first(Table)).
-
-restart_daemons(_, _, _, '$end_of_table') ->
-    ok;
-restart_daemons(Table, Sect, Key, Port) ->
-    [D] = ets:lookup(Table, Port),
-    HasSect = lists:member({Sect}, D#daemon.cfg_patterns),
-    HasKey = lists:member({Sect, Key}, D#daemon.cfg_patterns),
-    case HasSect or HasKey of
-        true ->
-            stop_port(D),
-            D2 = D#daemon{status=restarting, buf=nil},
-            true = ets:insert(Table, D2);
-        _ ->
-            ok
-    end,
-    restart_daemons(Table, Sect, Key, ets:next(Table, Port)).
-    
-
-stop_os_daemons(_Table, []) ->
-    ok;
-stop_os_daemons(Table, [{Name, Cmd} | Rest]) ->
-    [[Port]] = ets:match(Table, #daemon{port='$1', name=Name, cmd=Cmd, _='_'}),
-    [D] = ets:lookup(Table, Port),
-    case D#daemon.status of
-        halted ->
-            ets:delete(Table, Port);
-        _ ->
-            stop_port(D),
-            D2 = D#daemon{status=stopping, errors=nil, buf=nil},
-            true = ets:insert(Table, D2)
-    end,
-    stop_os_daemons(Table, Rest).
-    
-boot_os_daemons(_Table, []) ->
-    ok;
-boot_os_daemons(Table, [{Name, Cmd} | Rest]) ->
-    {ok, Port} = start_port(Cmd),
-    true = ets:insert(Table, #daemon{port=Port, name=Name, cmd=Cmd}),
-    boot_os_daemons(Table, Rest).
-    
-% Elements unique to the configured set need to be booted.
-find_to_boot([], _Rest, Acc) ->
-    % Nothing else configured.
-    Acc;
-find_to_boot([D | R1], [D | R2], Acc) ->
-    % Elements are equal, daemon already running.
-    find_to_boot(R1, R2, Acc);
-find_to_boot([D1 | R1], [D2 | _]=A2, Acc) when D1 < D2 ->
-    find_to_boot(R1, A2, [D1 | Acc]);
-find_to_boot(A1, [_ | R2], Acc) ->
-    find_to_boot(A1, R2, Acc);
-find_to_boot(Rest, [], Acc) ->
-    % No more candidates for already running. Boot all.
-    Rest ++ Acc.
-
-% Elements unique to the running set need to be killed.
-find_to_stop([], Rest, Acc) ->
-    % The rest haven't been found, so they must all
-    % be ready to die.
-    Rest ++ Acc;
-find_to_stop([D | R1], [D | R2], Acc) ->
-    % Elements are equal, daemon already running.
-    find_to_stop(R1, R2, Acc);
-find_to_stop([D1 | R1], [D2 | _]=A2, Acc) when D1 < D2 ->
-    find_to_stop(R1, A2, Acc);
-find_to_stop(A1, [D2 | R2], Acc) ->
-    find_to_stop(A1, R2, [D2 | Acc]);
-find_to_stop(_, [], Acc) ->
-    % No more running daemons to worry about.
-    Acc.
-
-should_halt(Errors) ->
-    RetryTimeCfg = couch_config:get("os_daemon_settings", "retry_time", "5"),
-    RetryTime = list_to_integer(RetryTimeCfg),
-
-    Now = now(),
-    RecentErrors = lists:filter(fun(Time) ->
-        timer:now_diff(Now, Time) =< RetryTime * 1000000
-    end, Errors),
-
-    RetryCfg = couch_config:get("os_daemon_settings", "max_retries", "3"),
-    Retries = list_to_integer(RetryCfg),
-
-    {length(RecentErrors) >= Retries, RecentErrors}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_os_process.erl
----------------------------------------------------------------------
diff --git a/couch_os_process.erl b/couch_os_process.erl
deleted file mode 100644
index db62d49..0000000
--- a/couch_os_process.erl
+++ /dev/null
@@ -1,216 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_os_process).
--behaviour(gen_server).
-
--export([start_link/1, start_link/2, start_link/3, stop/1]).
--export([set_timeout/2, prompt/2]).
--export([send/2, writeline/2, readline/1, writejson/2, readjson/1]).
--export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
-
--include("couch_db.hrl").
-
--define(PORT_OPTIONS, [stream, {line, 4096}, binary, exit_status, hide]).
-
--record(os_proc,
-    {command,
-     port,
-     writer,
-     reader,
-     timeout=5000
-    }).
-
-start_link(Command) ->
-    start_link(Command, []).
-start_link(Command, Options) ->
-    start_link(Command, Options, ?PORT_OPTIONS).
-start_link(Command, Options, PortOptions) ->
-    gen_server:start_link(couch_os_process, [Command, Options, PortOptions], []).
-
-stop(Pid) ->
-    gen_server:cast(Pid, stop).
-
-% Read/Write API
-set_timeout(Pid, TimeOut) when is_integer(TimeOut) ->
-    ok = gen_server:call(Pid, {set_timeout, TimeOut}, infinity).
-
-% Used by couch_db_update_notifier.erl
-send(Pid, Data) ->
-    gen_server:cast(Pid, {send, Data}).
-
-prompt(Pid, Data) ->
-    case gen_server:call(Pid, {prompt, Data}, infinity) of
-        {ok, Result} ->
-            Result;
-        Error ->
-            ?LOG_ERROR("OS Process Error ~p :: ~p",[Pid,Error]),
-            throw(Error)
-    end.
-
-% Utility functions for reading and writing
-% in custom functions
-writeline(OsProc, Data) when is_record(OsProc, os_proc) ->
-    port_command(OsProc#os_proc.port, [Data, $\n]).
-
-readline(#os_proc{} = OsProc) ->
-    readline(OsProc, []).
-readline(#os_proc{port = Port} = OsProc, Acc) ->
-    receive
-    {Port, {data, {noeol, Data}}} when is_binary(Acc) ->
-        readline(OsProc, <<Acc/binary,Data/binary>>);
-    {Port, {data, {noeol, Data}}} when is_binary(Data) ->
-        readline(OsProc, Data);
-    {Port, {data, {noeol, Data}}} ->
-        readline(OsProc, [Data|Acc]);
-    {Port, {data, {eol, <<Data/binary>>}}} when is_binary(Acc) ->
-        [<<Acc/binary,Data/binary>>];
-    {Port, {data, {eol, Data}}} when is_binary(Data) ->
-        [Data];
-    {Port, {data, {eol, Data}}} ->
-        lists:reverse(Acc, Data);
-    {Port, Err} ->
-        catch port_close(Port),
-        throw({os_process_error, Err})
-    after OsProc#os_proc.timeout ->
-        catch port_close(Port),
-        throw({os_process_error, "OS process timed out."})
-    end.
-
-% Standard JSON functions
-writejson(OsProc, Data) when is_record(OsProc, os_proc) ->
-    JsonData = ?JSON_ENCODE(Data),
-    ?LOG_DEBUG("OS Process ~p Input  :: ~s", [OsProc#os_proc.port, JsonData]),
-    true = writeline(OsProc, JsonData).
-
-readjson(OsProc) when is_record(OsProc, os_proc) ->
-    Line = iolist_to_binary(readline(OsProc)),
-    ?LOG_DEBUG("OS Process ~p Output :: ~s", [OsProc#os_proc.port, Line]),
-    try
-        % Don't actually parse the whole JSON. Just try to see if it's
-        % a command or a doc map/reduce/filter/show/list/update output.
-        % If it's a command then parse the whole JSON and execute the
-        % command, otherwise return the raw JSON line to the caller.
-        pick_command(Line)
-    catch
-    throw:abort ->
-        {json, Line};
-    throw:{cmd, _Cmd} ->
-        case ?JSON_DECODE(Line) of
-        [<<"log">>, Msg] when is_binary(Msg) ->
-            % we got a message to log. Log it and continue
-            ?LOG_INFO("OS Process ~p Log :: ~s", [OsProc#os_proc.port, Msg]),
-            readjson(OsProc);
-        [<<"error">>, Id, Reason] ->
-            throw({error, {couch_util:to_existing_atom(Id),Reason}});
-        [<<"fatal">>, Id, Reason] ->
-            ?LOG_INFO("OS Process ~p Fatal Error :: ~s ~p",
-                [OsProc#os_proc.port, Id, Reason]),
-            throw({couch_util:to_existing_atom(Id),Reason});
-        _Result ->
-            {json, Line}
-        end
-    end.
-
-pick_command(Line) ->
-    json_stream_parse:events(Line, fun pick_command0/1).
-
-pick_command0(array_start) ->
-    fun pick_command1/1;
-pick_command0(_) ->
-    throw(abort).
-
-pick_command1(<<"log">> = Cmd) ->
-    throw({cmd, Cmd});
-pick_command1(<<"error">> = Cmd) ->
-    throw({cmd, Cmd});
-pick_command1(<<"fatal">> = Cmd) ->
-    throw({cmd, Cmd});
-pick_command1(_) ->
-    throw(abort).
-
-
-% gen_server API
-init([Command, Options, PortOptions]) ->
-    PrivDir = couch_util:priv_dir(),
-    Spawnkiller = filename:join(PrivDir, "couchspawnkillable"),
-    BaseProc = #os_proc{
-        command=Command,
-        port=open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions),
-        writer=fun writejson/2,
-        reader=fun readjson/1
-    },
-    KillCmd = iolist_to_binary(readline(BaseProc)),
-    Pid = self(),
-    ?LOG_DEBUG("OS Process Start :: ~p", [BaseProc#os_proc.port]),
-    spawn(fun() ->
-            % this ensure the real os process is killed when this process dies.
-            erlang:monitor(process, Pid),
-            receive _ -> ok end,
-            os:cmd(?b2l(iolist_to_binary(KillCmd)))
-        end),
-    OsProc =
-    lists:foldl(fun(Opt, Proc) ->
-        case Opt of
-        {writer, Writer} when is_function(Writer) ->
-            Proc#os_proc{writer=Writer};
-        {reader, Reader} when is_function(Reader) ->
-            Proc#os_proc{reader=Reader};
-        {timeout, TimeOut} when is_integer(TimeOut) ->
-            Proc#os_proc{timeout=TimeOut}
-        end
-    end, BaseProc, Options),
-    {ok, OsProc}.
-
-terminate(_Reason, #os_proc{port=Port}) ->
-    catch port_close(Port),
-    ok.
-
-handle_call({set_timeout, TimeOut}, _From, OsProc) ->
-    {reply, ok, OsProc#os_proc{timeout=TimeOut}};
-handle_call({prompt, Data}, _From, OsProc) ->
-    #os_proc{writer=Writer, reader=Reader} = OsProc,
-    try
-        Writer(OsProc, Data),
-        {reply, {ok, Reader(OsProc)}, OsProc}
-    catch
-        throw:{error, OsError} ->
-            {reply, OsError, OsProc};
-        throw:OtherError ->
-            {stop, normal, OtherError, OsProc}
-    end.
-
-handle_cast({send, Data}, #os_proc{writer=Writer}=OsProc) ->
-    try
-        Writer(OsProc, Data),
-        {noreply, OsProc}
-    catch
-        throw:OsError ->
-            ?LOG_ERROR("Failed sending data: ~p -> ~p", [Data, OsError]),
-            {stop, normal, OsProc}
-    end;
-handle_cast(stop, OsProc) ->
-    {stop, normal, OsProc};
-handle_cast(Msg, OsProc) ->
-    ?LOG_DEBUG("OS Proc: Unknown cast: ~p", [Msg]),
-    {noreply, OsProc}.
-
-handle_info({Port, {exit_status, 0}}, #os_proc{port=Port}=OsProc) ->
-    ?LOG_INFO("OS Process terminated normally", []),
-    {stop, normal, OsProc};
-handle_info({Port, {exit_status, Status}}, #os_proc{port=Port}=OsProc) ->
-    ?LOG_ERROR("OS Process died with status: ~p", [Status]),
-    {stop, {exit_status, Status}, OsProc}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_passwords.erl
----------------------------------------------------------------------
diff --git a/couch_passwords.erl b/couch_passwords.erl
deleted file mode 100644
index d9e6836..0000000
--- a/couch_passwords.erl
+++ /dev/null
@@ -1,119 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_passwords).
-
--export([simple/2, pbkdf2/3, pbkdf2/4, verify/2]).
--export([hash_admin_password/1, get_unhashed_admins/0]).
-
--include("couch_db.hrl").
-
--define(MAX_DERIVED_KEY_LENGTH, (1 bsl 32 - 1)).
--define(SHA1_OUTPUT_LENGTH, 20).
-
-%% legacy scheme, not used for new passwords.
--spec simple(binary(), binary()) -> binary().
-simple(Password, Salt) ->
-    ?l2b(couch_util:to_hex(crypto:sha(<<Password/binary, Salt/binary>>))).
-
-%% CouchDB utility functions
--spec hash_admin_password(binary()) -> binary().
-hash_admin_password(ClearPassword) ->
-    Iterations = couch_config:get("couch_httpd_auth", "iterations", "10000"),
-    Salt = couch_uuids:random(),
-    DerivedKey = couch_passwords:pbkdf2(couch_util:to_binary(ClearPassword),
-                                        Salt ,list_to_integer(Iterations)),
-    ?l2b("-pbkdf2-" ++ ?b2l(DerivedKey) ++ ","
-        ++ ?b2l(Salt) ++ ","
-        ++ Iterations).
-
--spec get_unhashed_admins() -> list().
-get_unhashed_admins() ->
-    lists:filter(
-        fun({_User, "-hashed-" ++ _}) ->
-            false; % already hashed
-        ({_User, "-pbkdf2-" ++ _}) ->
-            false; % already hashed
-        ({_User, _ClearPassword}) ->
-            true
-        end,
-    couch_config:get("admins")).
-
-%% Current scheme, much stronger.
--spec pbkdf2(binary(), binary(), integer()) -> binary().
-pbkdf2(Password, Salt, Iterations) ->
-    {ok, Result} = pbkdf2(Password, Salt, Iterations, ?SHA1_OUTPUT_LENGTH),
-    Result.
-
--spec pbkdf2(binary(), binary(), integer(), integer())
-    -> {ok, binary()} | {error, derived_key_too_long}.
-pbkdf2(_Password, _Salt, _Iterations, DerivedLength)
-    when DerivedLength > ?MAX_DERIVED_KEY_LENGTH ->
-    {error, derived_key_too_long};
-pbkdf2(Password, Salt, Iterations, DerivedLength) ->
-    L = ceiling(DerivedLength / ?SHA1_OUTPUT_LENGTH),
-    <<Bin:DerivedLength/binary,_/binary>> =
-        iolist_to_binary(pbkdf2(Password, Salt, Iterations, L, 1, [])),
-    {ok, ?l2b(couch_util:to_hex(Bin))}.
-
--spec pbkdf2(binary(), binary(), integer(), integer(), integer(), iolist())
-    -> iolist().
-pbkdf2(_Password, _Salt, _Iterations, BlockCount, BlockIndex, Acc)
-    when BlockIndex > BlockCount ->
-    lists:reverse(Acc);
-pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex, Acc) ->
-    Block = pbkdf2(Password, Salt, Iterations, BlockIndex, 1, <<>>, <<>>),
-    pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex + 1, [Block|Acc]).
-
--spec pbkdf2(binary(), binary(), integer(), integer(), integer(),
-    binary(), binary()) -> binary().
-pbkdf2(_Password, _Salt, Iterations, _BlockIndex, Iteration, _Prev, Acc)
-    when Iteration > Iterations ->
-    Acc;
-pbkdf2(Password, Salt, Iterations, BlockIndex, 1, _Prev, _Acc) ->
-    InitialBlock = crypto:sha_mac(Password,
-        <<Salt/binary,BlockIndex:32/integer>>),
-    pbkdf2(Password, Salt, Iterations, BlockIndex, 2,
-        InitialBlock, InitialBlock);
-pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration, Prev, Acc) ->
-    Next = crypto:sha_mac(Password, Prev),
-    pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration + 1,
-                   Next, crypto:exor(Next, Acc)).
-
-%% verify two lists for equality without short-circuits to avoid timing attacks.
--spec verify(string(), string(), integer()) -> boolean().
-verify([X|RestX], [Y|RestY], Result) ->
-    verify(RestX, RestY, (X bxor Y) bor Result);
-verify([], [], Result) ->
-    Result == 0.
-
--spec verify(binary(), binary()) -> boolean();
-            (list(), list()) -> boolean().
-verify(<<X/binary>>, <<Y/binary>>) ->
-    verify(?b2l(X), ?b2l(Y));
-verify(X, Y) when is_list(X) and is_list(Y) ->
-    case length(X) == length(Y) of
-        true ->
-            verify(X, Y, 0);
-        false ->
-            false
-    end;
-verify(_X, _Y) -> false.
-
--spec ceiling(number()) -> integer().
-ceiling(X) ->
-    T = erlang:trunc(X),
-    case (X - T) of
-        Neg when Neg < 0 -> T;
-        Pos when Pos > 0 -> T + 1;
-        _ -> T
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_primary_sup.erl
----------------------------------------------------------------------
diff --git a/couch_primary_sup.erl b/couch_primary_sup.erl
deleted file mode 100644
index 150b92e..0000000
--- a/couch_primary_sup.erl
+++ /dev/null
@@ -1,66 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_primary_sup).
--behaviour(supervisor).
--export([init/1, start_link/0]).
-
-start_link() ->
-    supervisor:start_link({local,couch_primary_services}, ?MODULE, []).
-
-init([]) ->
-    Children = [
-        {collation_driver,
-            {couch_drv, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_drv]},
-        {couch_task_status,
-            {couch_task_status, start_link, []},
-            permanent,
-            brutal_kill,
-            worker,
-            [couch_task_status]},
-        {couch_server,
-            {couch_server, sup_start_link, []},
-            permanent,
-            brutal_kill,
-            worker,
-            [couch_server]},
-        {couch_db_update_event,
-            {gen_event, start_link, [{local, couch_db_update}]},
-            permanent,
-            brutal_kill,
-            worker,
-            dynamic},
-        {couch_replication_event,
-            {gen_event, start_link, [{local, couch_replication}]},
-            permanent,
-            brutal_kill,
-            worker,
-            dynamic},
-        {couch_replicator_job_sup,
-            {couch_replicator_job_sup, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_replicator_job_sup]},
-        {couch_log,
-            {couch_log, start_link, []},
-            permanent,
-            brutal_kill,
-            worker,
-            [couch_log]}
-    ],
-    {ok, {{one_for_one, 10, 3600}, Children}}.
-

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_query_servers.erl
----------------------------------------------------------------------
diff --git a/couch_query_servers.erl b/couch_query_servers.erl
deleted file mode 100644
index 3b58cbe..0000000
--- a/couch_query_servers.erl
+++ /dev/null
@@ -1,616 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_query_servers).
--behaviour(gen_server).
-
--export([start_link/0, config_change/1]).
-
--export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3]).
--export([start_doc_map/3, map_docs/2, map_doc_raw/2, stop_doc_map/1, raw_to_ejson/1]).
--export([reduce/3, rereduce/3,validate_doc_update/5]).
--export([filter_docs/5]).
--export([filter_view/3]).
-
--export([with_ddoc_proc/2, proc_prompt/2, ddoc_prompt/3, ddoc_proc_prompt/3, json_doc/1]).
-
-% For 210-os-proc-pool.t
--export([get_os_process/1, ret_os_process/1]).
-
--include("couch_db.hrl").
-
--record(proc, {
-    pid,
-    lang,
-    ddoc_keys = [],
-    prompt_fun,
-    set_timeout_fun,
-    stop_fun
-}).
-
--record(qserver, {
-    langs, % Keyed by language name, value is {Mod,Func,Arg}
-    pid_procs, % Keyed by PID, valus is a #proc record.
-    lang_procs, % Keyed by language name, value is a #proc record
-    lang_limits, % Keyed by language name, value is {Lang, Limit, Current}
-    waitlist = [],
-    config
-}).
-
-start_link() ->
-    gen_server:start_link({local, couch_query_servers}, couch_query_servers, [], []).
-
-start_doc_map(Lang, Functions, Lib) ->
-    Proc = get_os_process(Lang),
-    case Lib of
-    {[]} -> ok;
-    Lib ->
-        true = proc_prompt(Proc, [<<"add_lib">>, Lib])
-    end,
-    lists:foreach(fun(FunctionSource) ->
-        true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource])
-    end, Functions),
-    {ok, Proc}.
-
-map_docs(Proc, Docs) ->
-    % send the documents
-    Results = lists:map(
-        fun(Doc) ->
-            Json = couch_doc:to_json_obj(Doc, []),
-
-            FunsResults = proc_prompt(Proc, [<<"map_doc">>, Json]),
-            % the results are a json array of function map yields like this:
-            % [FunResults1, FunResults2 ...]
-            % where funresults is are json arrays of key value pairs:
-            % [[Key1, Value1], [Key2, Value2]]
-            % Convert the key, value pairs to tuples like
-            % [{Key1, Value1}, {Key2, Value2}]
-            lists:map(
-                fun(FunRs) ->
-                    [list_to_tuple(FunResult) || FunResult <- FunRs]
-                end,
-            FunsResults)
-        end,
-        Docs),
-    {ok, Results}.
-
-map_doc_raw(Proc, Doc) ->
-    Json = couch_doc:to_json_obj(Doc, []),
-    {ok, proc_prompt_raw(Proc, [<<"map_doc">>, Json])}.
-
-
-stop_doc_map(nil) ->
-    ok;
-stop_doc_map(Proc) ->
-    ok = ret_os_process(Proc).
-
-group_reductions_results([]) ->
-    [];
-group_reductions_results(List) ->
-    {Heads, Tails} = lists:foldl(
-        fun([H|T], {HAcc,TAcc}) ->
-            {[H|HAcc], [T|TAcc]}
-        end, {[], []}, List),
-    case Tails of
-    [[]|_] -> % no tails left
-        [Heads];
-    _ ->
-     [Heads | group_reductions_results(Tails)]
-    end.
-
-rereduce(_Lang, [], _ReducedValues) ->
-    {ok, []};
-rereduce(Lang, RedSrcs, ReducedValues) ->
-    Grouped = group_reductions_results(ReducedValues),
-    Results = lists:zipwith(
-        fun
-        (<<"_", _/binary>> = FunSrc, Values) ->
-            {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []),
-            Result;
-        (FunSrc, Values) ->
-            os_rereduce(Lang, [FunSrc], Values)
-        end, RedSrcs, Grouped),
-    {ok, Results}.
-
-reduce(_Lang, [], _KVs) ->
-    {ok, []};
-reduce(Lang, RedSrcs, KVs) ->
-    {OsRedSrcs, BuiltinReds} = lists:partition(fun
-        (<<"_", _/binary>>) -> false;
-        (_OsFun) -> true
-    end, RedSrcs),
-    {ok, OsResults} = os_reduce(Lang, OsRedSrcs, KVs),
-    {ok, BuiltinResults} = builtin_reduce(reduce, BuiltinReds, KVs, []),
-    recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, []).
-
-recombine_reduce_results([], [], [], Acc) ->
-    {ok, lists:reverse(Acc)};
-recombine_reduce_results([<<"_", _/binary>>|RedSrcs], OsResults, [BRes|BuiltinResults], Acc) ->
-    recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes|Acc]);
-recombine_reduce_results([_OsFun|RedSrcs], [OsR|OsResults], BuiltinResults, Acc) ->
-    recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR|Acc]).
-
-os_reduce(_Lang, [], _KVs) ->
-    {ok, []};
-os_reduce(Lang, OsRedSrcs, KVs) ->
-    Proc = get_os_process(Lang),
-    OsResults = try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of
-        [true, Reductions] -> Reductions
-    after
-        ok = ret_os_process(Proc)
-    end,
-    {ok, OsResults}.
-
-os_rereduce(Lang, OsRedSrcs, KVs) ->
-    Proc = get_os_process(Lang),
-    try proc_prompt(Proc, [<<"rereduce">>, OsRedSrcs, KVs]) of
-        [true, [Reduction]] -> Reduction
-    after
-        ok = ret_os_process(Proc)
-    end.
-
-
-builtin_reduce(_Re, [], _KVs, Acc) ->
-    {ok, lists:reverse(Acc)};
-builtin_reduce(Re, [<<"_sum",_/binary>>|BuiltinReds], KVs, Acc) ->
-    Sum = builtin_sum_rows(KVs),
-    builtin_reduce(Re, BuiltinReds, KVs, [Sum|Acc]);
-builtin_reduce(reduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
-    Count = length(KVs),
-    builtin_reduce(reduce, BuiltinReds, KVs, [Count|Acc]);
-builtin_reduce(rereduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
-    Count = builtin_sum_rows(KVs),
-    builtin_reduce(rereduce, BuiltinReds, KVs, [Count|Acc]);
-builtin_reduce(Re, [<<"_stats",_/binary>>|BuiltinReds], KVs, Acc) ->
-    Stats = builtin_stats(Re, KVs),
-    builtin_reduce(Re, BuiltinReds, KVs, [Stats|Acc]).
-
-builtin_sum_rows(KVs) ->
-    lists:foldl(fun
-        ([_Key, Value], Acc) when is_number(Value), is_number(Acc) ->
-            Acc + Value;
-        ([_Key, Value], Acc) when is_list(Value), is_list(Acc) ->
-            sum_terms(Acc, Value);
-        ([_Key, Value], Acc) when is_number(Value), is_list(Acc) ->
-            sum_terms(Acc, [Value]);
-        ([_Key, Value], Acc) when is_list(Value), is_number(Acc) ->
-            sum_terms([Acc], Value);
-        (_Else, _Acc) ->
-            throw({invalid_value, <<"builtin _sum function requires map values to be numbers or lists of numbers">>})
-    end, 0, KVs).
-
-sum_terms([], []) ->
-    [];
-sum_terms([_|_]=Xs, []) ->
-    Xs;
-sum_terms([], [_|_]=Ys) ->
-    Ys;
-sum_terms([X|Xs], [Y|Ys]) when is_number(X), is_number(Y) ->
-    [X+Y | sum_terms(Xs,Ys)];
-sum_terms(_, _) ->
-    throw({invalid_value, <<"builtin _sum function requires map values to be numbers or lists of numbers">>}).
-
-builtin_stats(reduce, []) ->
-    {[]};
-builtin_stats(reduce, [[_,First]|Rest]) when is_number(First) ->
-    Stats = lists:foldl(fun([_K,V], {S,C,Mi,Ma,Sq}) when is_number(V) ->
-        {S+V, C+1, lists:min([Mi, V]), lists:max([Ma, V]), Sq+(V*V)};
-    (_, _) ->
-        throw({invalid_value,
-            <<"builtin _stats function requires map values to be numbers">>})
-    end, {First,1,First,First,First*First}, Rest),
-    {Sum, Cnt, Min, Max, Sqr} = Stats,
-    {[{sum,Sum}, {count,Cnt}, {min,Min}, {max,Max}, {sumsqr,Sqr}]};
-
-builtin_stats(rereduce, [[_,First]|Rest]) ->
-    {[{sum,Sum0}, {count,Cnt0}, {min,Min0}, {max,Max0}, {sumsqr,Sqr0}]} = First,
-    Stats = lists:foldl(fun([_K,Red], {S,C,Mi,Ma,Sq}) ->
-        {[{sum,Sum}, {count,Cnt}, {min,Min}, {max,Max}, {sumsqr,Sqr}]} = Red,
-        {Sum+S, Cnt+C, lists:min([Min, Mi]), lists:max([Max, Ma]), Sqr+Sq}
-    end, {Sum0,Cnt0,Min0,Max0,Sqr0}, Rest),
-    {Sum, Cnt, Min, Max, Sqr} = Stats,
-    {[{sum,Sum}, {count,Cnt}, {min,Min}, {max,Max}, {sumsqr,Sqr}]}.
-
-% use the function stored in ddoc.validate_doc_update to test an update.
-validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
-    JsonEditDoc = couch_doc:to_json_obj(EditDoc, [revs]),
-    JsonDiskDoc = json_doc(DiskDoc),
-    case ddoc_prompt(DDoc, [<<"validate_doc_update">>], [JsonEditDoc, JsonDiskDoc, Ctx, SecObj]) of
-        1 ->
-            ok;
-        {[{<<"forbidden">>, Message}]} ->
-            throw({forbidden, Message});
-        {[{<<"unauthorized">>, Message}]} ->
-            throw({unauthorized, Message})
-    end.
-
-json_doc(nil) -> null;
-json_doc(Doc) ->
-    couch_doc:to_json_obj(Doc, [revs]).
-
-filter_view(DDoc, VName, Docs) ->
-    JsonDocs = [couch_doc:to_json_obj(Doc, [revs]) || Doc <- Docs],
-    [true, Passes] = ddoc_prompt(DDoc, [<<"views">>, VName, <<"map">>], [JsonDocs]),
-    {ok, Passes}.
-
-filter_docs(Req, Db, DDoc, FName, Docs) ->
-    JsonReq = case Req of
-    {json_req, JsonObj} ->
-        JsonObj;
-    #httpd{} = HttpReq ->
-        couch_httpd_external:json_req_obj(HttpReq, Db)
-    end,
-    JsonDocs = [couch_doc:to_json_obj(Doc, [revs]) || Doc <- Docs],
-    [true, Passes] = ddoc_prompt(DDoc, [<<"filters">>, FName],
-        [JsonDocs, JsonReq]),
-    {ok, Passes}.
-
-ddoc_proc_prompt({Proc, DDocId}, FunPath, Args) ->
-    proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args]).
-
-ddoc_prompt(DDoc, FunPath, Args) ->
-    with_ddoc_proc(DDoc, fun({Proc, DDocId}) ->
-        proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args])
-    end).
-
-with_ddoc_proc(#doc{id=DDocId,revs={Start, [DiskRev|_]}}=DDoc, Fun) ->
-    Rev = couch_doc:rev_to_str({Start, DiskRev}),
-    DDocKey = {DDocId, Rev},
-    Proc = get_ddoc_process(DDoc, DDocKey),
-    try Fun({Proc, DDocId})
-    after
-        ok = ret_os_process(Proc)
-    end.
-
-init([]) ->
-    % register async to avoid deadlock on restart_child
-    Self = self(),
-    spawn(couch_config, register, [fun ?MODULE:config_change/1, Self]),
-
-    Langs = ets:new(couch_query_server_langs, [set, private]),
-    LangLimits = ets:new(couch_query_server_lang_limits, [set, private]),
-    PidProcs = ets:new(couch_query_server_pid_langs, [set, private]),
-    LangProcs = ets:new(couch_query_server_procs, [set, private]),
-
-    ProcTimeout = list_to_integer(couch_config:get(
-                        "couchdb", "os_process_timeout", "5000")),
-    ReduceLimit = list_to_atom(
-        couch_config:get("query_server_config","reduce_limit","true")),
-    OsProcLimit = list_to_integer(
-        couch_config:get("query_server_config","os_process_limit","10")),
-
-    % 'query_servers' specifies an OS command-line to execute.
-    lists:foreach(fun({Lang, Command}) ->
-        true = ets:insert(LangLimits, {?l2b(Lang), OsProcLimit, 0}),
-        true = ets:insert(Langs, {?l2b(Lang),
-                          couch_os_process, start_link, [Command]})
-    end, couch_config:get("query_servers")),
-    % 'native_query_servers' specifies a {Module, Func, Arg} tuple.
-    lists:foreach(fun({Lang, SpecStr}) ->
-        {ok, {Mod, Fun, SpecArg}} = couch_util:parse_term(SpecStr),
-        true = ets:insert(LangLimits, {?l2b(Lang), 0, 0}), % 0 means no limit
-        true = ets:insert(Langs, {?l2b(Lang),
-                          Mod, Fun, SpecArg})
-    end, couch_config:get("native_query_servers")),
-
-
-    process_flag(trap_exit, true),
-    {ok, #qserver{
-        langs = Langs, % Keyed by language name, value is {Mod,Func,Arg}
-        pid_procs = PidProcs, % Keyed by PID, valus is a #proc record.
-        lang_procs = LangProcs, % Keyed by language name, value is a #proc record
-        lang_limits = LangLimits, % Keyed by language name, value is {Lang, Limit, Current}
-        config = {[{<<"reduce_limit">>, ReduceLimit},{<<"timeout">>, ProcTimeout}]}
-    }}.
-
-terminate(_Reason, #qserver{pid_procs=PidProcs}) ->
-    [couch_util:shutdown_sync(P) || {P,_} <- ets:tab2list(PidProcs)],
-    ok.
-
-handle_call({get_proc, DDoc1, DDocKey}, From, Server) ->
-    #doc{body = {Props}} = DDoc = couch_doc:with_ejson_body(DDoc1),
-    Lang = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
-    case lang_proc(Lang, Server, fun(Procs) ->
-            % find a proc in the set that has the DDoc
-            proc_with_ddoc(DDoc, DDocKey, Procs)
-        end) of
-    {ok, Proc} ->
-        {reply, {ok, Proc, Server#qserver.config}, Server};
-    wait ->
-        {noreply, add_to_waitlist({DDoc, DDocKey}, From, Server)};
-    Error ->
-        {reply, Error, Server}
-    end;
-handle_call({get_proc, Lang}, From, Server) ->
-    case lang_proc(Lang, Server, fun([P|_Procs]) ->
-            {ok, P}
-        end) of
-    {ok, Proc} ->
-        {reply, {ok, Proc, Server#qserver.config}, Server};
-    wait ->
-        {noreply, add_to_waitlist({Lang}, From, Server)};
-    Error ->
-        {reply, Error, Server}
-    end;
-handle_call({unlink_proc, Pid}, _From, Server) ->
-    unlink(Pid),
-    {reply, ok, Server};
-handle_call({ret_proc, Proc}, _From, #qserver{
-        pid_procs=PidProcs,
-        lang_procs=LangProcs}=Server) ->
-    % Along with max process limit, here we should check
-    % if we're over the limit and discard when we are.
-    case is_process_alive(Proc#proc.pid) of
-        true ->
-            add_value(PidProcs, Proc#proc.pid, Proc),
-            add_to_list(LangProcs, Proc#proc.lang, Proc),
-            link(Proc#proc.pid);
-        false ->
-            ok
-    end,
-    {reply, true, service_waitlist(Server)}.
-
-handle_cast(_Whatever, Server) ->
-    {noreply, Server}.
-
-handle_info({'EXIT', _, _}, Server) ->
-    {noreply, Server};
-handle_info({'DOWN', _, process, Pid, Status}, #qserver{
-        pid_procs=PidProcs,
-        lang_procs=LangProcs,
-        lang_limits=LangLimits}=Server) ->
-    case ets:lookup(PidProcs, Pid) of
-    [{Pid, Proc}] ->
-        case Status of
-        normal -> ok;
-        _ -> ?LOG_DEBUG("Linked process died abnormally: ~p (reason: ~p)", [Pid, Status])
-        end,
-        rem_value(PidProcs, Pid),
-        catch rem_from_list(LangProcs, Proc#proc.lang, Proc),
-        [{Lang, Lim, Current}] = ets:lookup(LangLimits, Proc#proc.lang),
-        true = ets:insert(LangLimits, {Lang, Lim, Current-1}),
-        {noreply, service_waitlist(Server)};
-    [] ->
-        case Status of
-        normal ->
-            {noreply, Server};
-        _ ->
-            {stop, Status, Server}
-        end
-    end.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-config_change("query_servers") ->
-    supervisor:terminate_child(couch_secondary_services, query_servers),
-    supervisor:restart_child(couch_secondary_services, query_servers);
-config_change("native_query_servers") ->
-    supervisor:terminate_child(couch_secondary_services, query_servers),
-    supervisor:restart_child(couch_secondary_services, query_servers);
-config_change("query_server_config") ->
-    supervisor:terminate_child(couch_secondary_services, query_servers),
-    supervisor:restart_child(couch_secondary_services, query_servers).
-
-% Private API
-
-add_to_waitlist(Info, From, #qserver{waitlist=Waitlist}=Server) ->
-    Server#qserver{waitlist=[{Info, From}|Waitlist]}.
-
-service_waitlist(#qserver{waitlist=[]}=Server) ->
-    Server;
-service_waitlist(#qserver{waitlist=Waitlist}=Server) ->
-    [Oldest|RevWList] = lists:reverse(Waitlist),
-    case service_waiting(Oldest, Server) of
-    ok ->
-        Server#qserver{waitlist=lists:reverse(RevWList)};
-    wait ->
-        Server#qserver{waitlist=Waitlist}
-    end.
-
-% todo get rid of duplication
-service_waiting({{#doc{body={Props}}=DDoc, DDocKey}, From}, Server) ->
-    Lang = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
-    case lang_proc(Lang, Server, fun(Procs) ->
-            % find a proc in the set that has the DDoc
-            proc_with_ddoc(DDoc, DDocKey, Procs)
-        end) of
-    {ok, Proc} ->
-        gen_server:reply(From, {ok, Proc, Server#qserver.config}),
-        ok;
-    wait -> % this should never happen
-        wait;
-    Error ->
-        gen_server:reply(From, Error),
-        ok
-    end;
-service_waiting({{Lang}, From}, Server) ->
-    case lang_proc(Lang, Server, fun([P|_Procs]) ->
-            {ok, P}
-        end) of
-    {ok, Proc} ->
-        gen_server:reply(From, {ok, Proc, Server#qserver.config}),
-        ok;
-    wait -> % this should never happen
-        wait;
-    Error ->
-        gen_server:reply(From, Error),
-        ok
-    end.
-
-lang_proc(Lang, #qserver{
-        langs=Langs,
-        pid_procs=PidProcs,
-        lang_procs=LangProcs,
-        lang_limits=LangLimits}, PickFun) ->
-    % Note to future self. Add max process limit.
-    case ets:lookup(LangProcs, Lang) of
-    [{Lang, [P|Procs]}] ->
-        {ok, Proc} = PickFun([P|Procs]),
-        rem_from_list(LangProcs, Lang, Proc),
-        {ok, Proc};
-    _ ->
-        case (catch new_process(Langs, LangLimits, Lang)) of
-        {ok, Proc} ->
-            add_value(PidProcs, Proc#proc.pid, Proc),
-            PickFun([Proc]);
-        ErrorOrWait ->
-            ErrorOrWait
-        end
-    end.
-
-new_process(Langs, LangLimits, Lang) ->
-    [{Lang, Lim, Current}] = ets:lookup(LangLimits, Lang),
-    if (Lim == 0) or (Current < Lim) -> % Lim == 0 means no limit
-        % we are below the limit for our language, make a new one
-        case ets:lookup(Langs, Lang) of
-        [{Lang, Mod, Func, Arg}] ->
-            {ok, Pid} = apply(Mod, Func, Arg),
-            erlang:monitor(process, Pid),
-            true = ets:insert(LangLimits, {Lang, Lim, Current+1}),
-            {ok, #proc{lang=Lang,
-                       pid=Pid,
-                       % Called via proc_prompt, proc_set_timeout, and proc_stop
-                       prompt_fun={Mod, prompt},
-                       set_timeout_fun={Mod, set_timeout},
-                       stop_fun={Mod, stop}}};
-        _ ->
-            {unknown_query_language, Lang}
-        end;
-    true ->
-        wait
-    end.
-
-proc_with_ddoc(DDoc, DDocKey, LangProcs) ->
-    DDocProcs = lists:filter(fun(#proc{ddoc_keys=Keys}) ->
-            lists:any(fun(Key) ->
-                Key == DDocKey
-            end, Keys)
-        end, LangProcs),
-    case DDocProcs of
-        [DDocProc|_] ->
-            ?LOG_DEBUG("DDocProc found for DDocKey: ~p",[DDocKey]),
-            {ok, DDocProc};
-        [] ->
-            [TeachProc|_] = LangProcs,
-            ?LOG_DEBUG("Teach ddoc to new proc ~p with DDocKey: ~p",[TeachProc, DDocKey]),
-            {ok, SmartProc} = teach_ddoc(DDoc, DDocKey, TeachProc),
-            {ok, SmartProc}
-    end.
-
-proc_prompt(Proc, Args) ->
-     case proc_prompt_raw(Proc, Args) of
-     {json, Json} ->
-         ?JSON_DECODE(Json);
-     EJson ->
-         EJson
-     end.
-
-proc_prompt_raw(#proc{prompt_fun = {Mod, Func}} = Proc, Args) ->
-    apply(Mod, Func, [Proc#proc.pid, Args]).
-
-raw_to_ejson({json, Json}) ->
-    ?JSON_DECODE(Json);
-raw_to_ejson(EJson) ->
-    EJson.
-
-proc_stop(Proc) ->
-    {Mod, Func} = Proc#proc.stop_fun,
-    apply(Mod, Func, [Proc#proc.pid]).
-
-proc_set_timeout(Proc, Timeout) ->
-    {Mod, Func} = Proc#proc.set_timeout_fun,
-    apply(Mod, Func, [Proc#proc.pid, Timeout]).
-
-teach_ddoc(DDoc, {DDocId, _Rev}=DDocKey, #proc{ddoc_keys=Keys}=Proc) ->
-    % send ddoc over the wire
-    % we only share the rev with the client we know to update code
-    % but it only keeps the latest copy, per each ddoc, around.
-    true = proc_prompt(Proc, [<<"ddoc">>, <<"new">>, DDocId, couch_doc:to_json_obj(DDoc, [])]),
-    % we should remove any other ddocs keys for this docid
-    % because the query server overwrites without the rev
-    Keys2 = [{D,R} || {D,R} <- Keys, D /= DDocId],
-    % add ddoc to the proc
-    {ok, Proc#proc{ddoc_keys=[DDocKey|Keys2]}}.
-
-get_ddoc_process(#doc{} = DDoc, DDocKey) ->
-    % remove this case statement
-    case gen_server:call(couch_query_servers, {get_proc, DDoc, DDocKey}, infinity) of
-    {ok, Proc, {QueryConfig}} ->
-        % process knows the ddoc
-        case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
-        true ->
-            proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
-            link(Proc#proc.pid),
-            gen_server:call(couch_query_servers, {unlink_proc, Proc#proc.pid}, infinity),
-            Proc;
-        _ ->
-            catch proc_stop(Proc),
-            get_ddoc_process(DDoc, DDocKey)
-        end;
-    Error ->
-        throw(Error)
-    end.
-
-get_os_process(Lang) ->
-    case gen_server:call(couch_query_servers, {get_proc, Lang}, infinity) of
-    {ok, Proc, {QueryConfig}} ->
-        case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
-        true ->
-            proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
-            link(Proc#proc.pid),
-            gen_server:call(couch_query_servers, {unlink_proc, Proc#proc.pid}, infinity),
-            Proc;
-        _ ->
-            catch proc_stop(Proc),
-            get_os_process(Lang)
-        end;
-    Error ->
-        throw(Error)
-    end.
-
-ret_os_process(Proc) ->
-    true = gen_server:call(couch_query_servers, {ret_proc, Proc}, infinity),
-    catch unlink(Proc#proc.pid),
-    ok.
-
-add_value(Tid, Key, Value) ->
-    true = ets:insert(Tid, {Key, Value}).
-
-rem_value(Tid, Key) ->
-    true = ets:delete(Tid, Key).
-
-add_to_list(Tid, Key, Value) ->
-    case ets:lookup(Tid, Key) of
-    [{Key, Vals}] ->
-        true = ets:insert(Tid, {Key, [Value|Vals]});
-    [] ->
-        true = ets:insert(Tid, {Key, [Value]})
-    end.
-
-rem_from_list(Tid, Key, Value) when is_record(Value, proc)->
-    Pid = Value#proc.pid,
-    case ets:lookup(Tid, Key) of
-    [{Key, Vals}] ->
-        % make a new values list that doesn't include the Value arg
-        NewValues = [Val || #proc{pid=P}=Val <- Vals, P /= Pid],
-        ets:insert(Tid, {Key, NewValues});
-    [] -> ok
-    end;
-rem_from_list(Tid, Key, Value) ->
-    case ets:lookup(Tid, Key) of
-    [{Key, Vals}] ->
-        % make a new values list that doesn't include the Value arg
-        NewValues = [Val || Val <- Vals, Val /= Value],
-        ets:insert(Tid, {Key, NewValues});
-    [] -> ok
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_ref_counter.erl
----------------------------------------------------------------------
diff --git a/couch_ref_counter.erl b/couch_ref_counter.erl
deleted file mode 100644
index a774f46..0000000
--- a/couch_ref_counter.erl
+++ /dev/null
@@ -1,111 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_ref_counter).
--behaviour(gen_server).
-
--export([start/1, init/1, terminate/2, handle_call/3, handle_cast/2, code_change/3, handle_info/2]).
--export([drop/1,drop/2,add/1,add/2,count/1]).
-
-start(ChildProcs) ->
-    gen_server:start(couch_ref_counter, {self(), ChildProcs}, []).
-
-
-drop(RefCounterPid) ->
-    drop(RefCounterPid, self()).
-
-drop(RefCounterPid, Pid) ->
-    gen_server:call(RefCounterPid, {drop, Pid}, infinity).
-
-
-add(RefCounterPid) ->
-    add(RefCounterPid, self()).
-
-add(RefCounterPid, Pid) ->
-    gen_server:call(RefCounterPid, {add, Pid}, infinity).
-
-count(RefCounterPid) ->
-    gen_server:call(RefCounterPid, count).
-
-% server functions
-
--record(srv,
-    {
-    referrers=dict:new(), % a dict of each ref counting proc.
-    child_procs=[]
-    }).
-
-init({Pid, ChildProcs}) ->
-    [link(ChildProc) || ChildProc <- ChildProcs],
-    Referrers = dict:from_list([{Pid, {erlang:monitor(process, Pid), 1}}]),
-    {ok, #srv{referrers=Referrers, child_procs=ChildProcs}}.
-
-
-terminate(_Reason, #srv{child_procs=ChildProcs}) ->
-    [couch_util:shutdown_sync(Pid) || Pid <- ChildProcs],
-    ok.
-
-
-handle_call({add, Pid},_From, #srv{referrers=Referrers}=Srv) ->
-    Referrers2 =
-    case dict:find(Pid, Referrers) of
-    error ->
-        dict:store(Pid, {erlang:monitor(process, Pid), 1}, Referrers);
-    {ok, {MonRef, RefCnt}} ->
-        dict:store(Pid, {MonRef, RefCnt + 1}, Referrers)
-    end,
-    {reply, ok, Srv#srv{referrers=Referrers2}};
-handle_call(count, _From, Srv) ->
-    {monitors, Monitors} =  process_info(self(), monitors),
-    {reply, length(Monitors), Srv};
-handle_call({drop, Pid}, _From, #srv{referrers=Referrers}=Srv) ->
-    Referrers2 =
-    case dict:find(Pid, Referrers) of
-    {ok, {MonRef, 1}} ->
-        erlang:demonitor(MonRef, [flush]),
-        dict:erase(Pid, Referrers);
-    {ok, {MonRef, Num}} ->
-        dict:store(Pid, {MonRef, Num-1}, Referrers);
-    error ->
-        Referrers
-    end,
-    Srv2 = Srv#srv{referrers=Referrers2},
-    case should_close() of
-    true ->
-        {stop,normal,ok,Srv2};
-    false ->
-        {reply, ok, Srv2}
-    end.
-
-handle_cast(Msg, _Srv)->
-    exit({unknown_msg,Msg}).
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-handle_info({'DOWN', MonRef, _, Pid, _}, #srv{referrers=Referrers}=Srv) ->
-    {ok, {MonRef, _RefCount}} = dict:find(Pid, Referrers),
-    Srv2 = Srv#srv{referrers=dict:erase(Pid, Referrers)},
-    case should_close() of
-    true ->
-        {stop,normal,Srv2};
-    false ->
-        {noreply,Srv2}
-    end.
-
-
-should_close() ->
-    case process_info(self(), monitors) of
-    {monitors, []} ->   true;
-    _ ->                false
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_secondary_sup.erl
----------------------------------------------------------------------
diff --git a/couch_secondary_sup.erl b/couch_secondary_sup.erl
deleted file mode 100644
index 6dd5604..0000000
--- a/couch_secondary_sup.erl
+++ /dev/null
@@ -1,49 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_secondary_sup).
--behaviour(supervisor).
--export([init/1, start_link/0]).
-
-start_link() ->
-    supervisor:start_link({local,couch_secondary_services}, ?MODULE, []).
-
-init([]) ->
-    SecondarySupervisors = [
-        {couch_db_update_notifier_sup,
-            {couch_db_update_notifier_sup, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_db_update_notifier_sup]},
-
-        {couch_plugin_event,
-            {gen_event, start_link, [{local, couch_plugin}]},
-            permanent,
-            brutal_kill,
-            worker,
-            dynamic}
-    ],
-    Children = SecondarySupervisors ++ [
-        begin
-            {ok, {Module, Fun, Args}} = couch_util:parse_term(SpecStr),
-
-            {list_to_atom(Name),
-                {Module, Fun, Args},
-                permanent,
-                brutal_kill,
-                worker,
-                [Module]}
-        end
-        || {Name, SpecStr}
-        <- couch_config:get("daemons"), SpecStr /= ""],
-    {ok, {{one_for_one, 10, 3600}, Children}}.


[03/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_native_process.erl
----------------------------------------------------------------------
diff --git a/src/couch_native_process.erl b/src/couch_native_process.erl
new file mode 100644
index 0000000..5a32e75
--- /dev/null
+++ b/src/couch_native_process.erl
@@ -0,0 +1,409 @@
+% Licensed under the Apache License, Version 2.0 (the "License");
+% you may not use this file except in compliance with the License.
+%
+% You may obtain a copy of the License at
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing,
+% software distributed under the License is distributed on an
+% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+% either express or implied.
+%
+% See the License for the specific language governing permissions
+% and limitations under the License.
+%
+% This file drew much inspiration from erlview, which was written by and
+% copyright Michael McDaniel [http://autosys.us], and is also under APL 2.0
+%
+%
+% This module provides the smallest possible native view-server.
+% With this module in-place, you can add the following to your couch INI files:
+%  [native_query_servers]
+%  erlang={couch_native_process, start_link, []}
+%
+% Which will then allow following example map function to be used:
+%
+%  fun({Doc}) ->
+%    % Below, we emit a single record - the _id as key, null as value
+%    DocId = couch_util:get_value(<<"_id">>, Doc, null),
+%    Emit(DocId, null)
+%  end.
+%
+% which should be roughly the same as the javascript:
+%    emit(doc._id, null);
+%
+% This module exposes enough functions such that a native erlang server can
+% act as a fully-fleged view server, but no 'helper' functions specifically
+% for simplifying your erlang view code.  It is expected other third-party
+% extensions will evolve which offer useful layers on top of this view server
+% to help simplify your view code.
+-module(couch_native_process).
+-behaviour(gen_server).
+
+-export([start_link/0,init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,
+         handle_info/2]).
+-export([set_timeout/2, prompt/2]).
+
+-define(STATE, native_proc_state).
+-record(evstate, {ddocs, funs=[], query_config=[], list_pid=nil, timeout=5000}).
+
+-include("couch_db.hrl").
+
+start_link() ->
+    gen_server:start_link(?MODULE, [], []).
+
+% this is a bit messy, see also couch_query_servers handle_info
+% stop(_Pid) ->
+%     ok.
+
+set_timeout(Pid, TimeOut) ->
+    gen_server:call(Pid, {set_timeout, TimeOut}).
+
+prompt(Pid, Data) when is_list(Data) ->
+    gen_server:call(Pid, {prompt, Data}).
+
+% gen_server callbacks
+init([]) ->
+    {ok, #evstate{ddocs=dict:new()}}.
+
+handle_call({set_timeout, TimeOut}, _From, State) ->
+    {reply, ok, State#evstate{timeout=TimeOut}};
+
+handle_call({prompt, Data}, _From, State) ->
+    ?LOG_DEBUG("Prompt native qs: ~s",[?JSON_ENCODE(Data)]),
+    {NewState, Resp} = try run(State, to_binary(Data)) of
+        {S, R} -> {S, R}
+        catch
+            throw:{error, Why} ->
+                {State, [<<"error">>, Why, Why]}
+        end,
+
+    case Resp of
+        {error, Reason} ->
+            Msg = io_lib:format("couch native server error: ~p", [Reason]),
+            {reply, [<<"error">>, <<"native_query_server">>, list_to_binary(Msg)], NewState};
+        [<<"error">> | Rest] ->
+            % Msg = io_lib:format("couch native server error: ~p", [Rest]),
+            % TODO: markh? (jan)
+            {reply, [<<"error">> | Rest], NewState};
+        [<<"fatal">> | Rest] ->
+            % Msg = io_lib:format("couch native server error: ~p", [Rest]),
+            % TODO: markh? (jan)
+            {stop, fatal, [<<"error">> | Rest], NewState};
+        Resp ->
+            {reply, Resp, NewState}
+    end.
+
+handle_cast(foo, State) -> {noreply, State}.
+handle_info({'EXIT',_,normal}, State) -> {noreply, State};
+handle_info({'EXIT',_,Reason}, State) ->
+    {stop, Reason, State}.
+terminate(_Reason, _State) -> ok.
+code_change(_OldVersion, State, _Extra) -> {ok, State}.
+
+run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) ->
+    Pid ! {self(), list_row, Row},
+    receive
+        {Pid, chunks, Data} ->
+            {State, [<<"chunks">>, Data]};
+        {Pid, list_end, Data} ->
+            receive
+                {'EXIT', Pid, normal} -> ok
+            after State#evstate.timeout ->
+                throw({timeout, list_cleanup})
+            end,
+            process_flag(trap_exit, erlang:get(do_trap)),
+            {State#evstate{list_pid=nil}, [<<"end">>, Data]}
+    after State#evstate.timeout ->
+        throw({timeout, list_row})
+    end;
+run(#evstate{list_pid=Pid}=State, [<<"list_end">>]) when is_pid(Pid) ->
+    Pid ! {self(), list_end},
+    Resp =
+    receive
+        {Pid, list_end, Data} ->
+            receive
+                {'EXIT', Pid, normal} -> ok
+            after State#evstate.timeout ->
+                throw({timeout, list_cleanup})
+            end,
+            [<<"end">>, Data]
+    after State#evstate.timeout ->
+        throw({timeout, list_end})
+    end,
+    process_flag(trap_exit, erlang:get(do_trap)),
+    {State#evstate{list_pid=nil}, Resp};
+run(#evstate{list_pid=Pid}=State, _Command) when is_pid(Pid) ->
+    {State, [<<"error">>, list_error, list_error]};
+run(#evstate{ddocs=DDocs}, [<<"reset">>]) ->
+    {#evstate{ddocs=DDocs}, true};
+run(#evstate{ddocs=DDocs}, [<<"reset">>, QueryConfig]) ->
+    {#evstate{ddocs=DDocs, query_config=QueryConfig}, true};
+run(#evstate{funs=Funs}=State, [<<"add_fun">> , BinFunc]) ->
+    FunInfo = makefun(State, BinFunc),
+    {State#evstate{funs=Funs ++ [FunInfo]}, true};
+run(State, [<<"map_doc">> , Doc]) ->
+    Resp = lists:map(fun({Sig, Fun}) ->
+        erlang:put(Sig, []),
+        Fun(Doc),
+        lists:reverse(erlang:get(Sig))
+    end, State#evstate.funs),
+    {State, Resp};
+run(State, [<<"reduce">>, Funs, KVs]) ->
+    {Keys, Vals} =
+    lists:foldl(fun([K, V], {KAcc, VAcc}) ->
+        {[K | KAcc], [V | VAcc]}
+    end, {[], []}, KVs),
+    Keys2 = lists:reverse(Keys),
+    Vals2 = lists:reverse(Vals),
+    {State, catch reduce(State, Funs, Keys2, Vals2, false)};
+run(State, [<<"rereduce">>, Funs, Vals]) ->
+    {State, catch reduce(State, Funs, null, Vals, true)};
+run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) ->
+    DDocs2 = store_ddoc(DDocs, DDocId, DDoc),
+    {State#evstate{ddocs=DDocs2}, true};
+run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, DDocId | Rest]) ->
+    DDoc = load_ddoc(DDocs, DDocId),
+    ddoc(State, DDoc, Rest);
+run(_, Unknown) ->
+    ?LOG_ERROR("Native Process: Unknown command: ~p~n", [Unknown]),
+    throw({error, unknown_command}).
+    
+ddoc(State, {DDoc}, [FunPath, Args]) ->
+    % load fun from the FunPath
+    BFun = lists:foldl(fun
+        (Key, {Props}) when is_list(Props) ->
+            couch_util:get_value(Key, Props, nil);
+        (_Key, Fun) when is_binary(Fun) ->
+            Fun;
+        (_Key, nil) ->
+            throw({error, not_found});
+        (_Key, _Fun) ->
+            throw({error, malformed_ddoc})
+        end, {DDoc}, FunPath),
+    ddoc(State, makefun(State, BFun, {DDoc}), FunPath, Args).
+
+ddoc(State, {_, Fun}, [<<"validate_doc_update">>], Args) ->
+    {State, (catch apply(Fun, Args))};
+ddoc(State, {_, Fun}, [<<"filters">>|_], [Docs, Req]) ->
+    FilterFunWrapper = fun(Doc) ->
+        case catch Fun(Doc, Req) of
+        true -> true;
+        false -> false;
+        {'EXIT', Error} -> ?LOG_ERROR("~p", [Error])
+        end
+    end,
+    Resp = lists:map(FilterFunWrapper, Docs),
+    {State, [true, Resp]};
+ddoc(State, {_, Fun}, [<<"shows">>|_], Args) ->
+    Resp = case (catch apply(Fun, Args)) of
+        FunResp when is_list(FunResp) ->
+            FunResp;
+        {FunResp} ->
+            [<<"resp">>, {FunResp}];
+        FunResp ->
+            FunResp
+    end,
+    {State, Resp};
+ddoc(State, {_, Fun}, [<<"updates">>|_], Args) ->
+    Resp = case (catch apply(Fun, Args)) of
+        [JsonDoc, JsonResp]  ->
+            [<<"up">>, JsonDoc, JsonResp]
+    end,
+    {State, Resp};
+ddoc(State, {Sig, Fun}, [<<"lists">>|_], Args) ->
+    Self = self(),
+    SpawnFun = fun() ->
+        LastChunk = (catch apply(Fun, Args)),
+        case start_list_resp(Self, Sig) of
+            started ->
+                receive
+                    {Self, list_row, _Row} -> ignore;
+                    {Self, list_end} -> ignore
+                after State#evstate.timeout ->
+                    throw({timeout, list_cleanup_pid})
+                end;
+            _ ->
+                ok
+        end,
+        LastChunks =
+        case erlang:get(Sig) of
+            undefined -> [LastChunk];
+            OtherChunks -> [LastChunk | OtherChunks]
+        end,
+        Self ! {self(), list_end, lists:reverse(LastChunks)}
+    end,
+    erlang:put(do_trap, process_flag(trap_exit, true)),
+    Pid = spawn_link(SpawnFun),
+    Resp =
+    receive
+        {Pid, start, Chunks, JsonResp} ->
+            [<<"start">>, Chunks, JsonResp]
+    after State#evstate.timeout ->
+        throw({timeout, list_start})
+    end,
+    {State#evstate{list_pid=Pid}, Resp}.
+
+store_ddoc(DDocs, DDocId, DDoc) ->
+    dict:store(DDocId, DDoc, DDocs).
+load_ddoc(DDocs, DDocId) ->
+    try dict:fetch(DDocId, DDocs) of
+        {DDoc} -> {DDoc}
+    catch
+        _:_Else -> throw({error, ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s",[DDocId]))})
+    end.
+
+bindings(State, Sig) ->
+    bindings(State, Sig, nil).
+bindings(State, Sig, DDoc) ->
+    Self = self(),
+
+    Log = fun(Msg) ->
+        ?LOG_INFO(Msg, [])
+    end,
+
+    Emit = fun(Id, Value) ->
+        Curr = erlang:get(Sig),
+        erlang:put(Sig, [[Id, Value] | Curr])
+    end,
+
+    Start = fun(Headers) ->
+        erlang:put(list_headers, Headers)
+    end,
+
+    Send = fun(Chunk) ->
+        Curr =
+        case erlang:get(Sig) of
+            undefined -> [];
+            Else -> Else
+        end,
+        erlang:put(Sig, [Chunk | Curr])
+    end,
+
+    GetRow = fun() ->
+        case start_list_resp(Self, Sig) of
+            started ->
+                ok;
+            _ ->
+                Chunks =
+                case erlang:get(Sig) of
+                    undefined -> [];
+                    CurrChunks -> CurrChunks
+                end,
+                Self ! {self(), chunks, lists:reverse(Chunks)}
+        end,
+        erlang:put(Sig, []),
+        receive
+            {Self, list_row, Row} -> Row;
+            {Self, list_end} -> nil
+        after State#evstate.timeout ->
+            throw({timeout, list_pid_getrow})
+        end
+    end,
+   
+    FoldRows = fun(Fun, Acc) -> foldrows(GetRow, Fun, Acc) end,
+
+    Bindings = [
+        {'Log', Log},
+        {'Emit', Emit},
+        {'Start', Start},
+        {'Send', Send},
+        {'GetRow', GetRow},
+        {'FoldRows', FoldRows}
+    ],
+    case DDoc of
+        {_Props} ->
+            Bindings ++ [{'DDoc', DDoc}];
+        _Else -> Bindings
+    end.
+
+% thanks to erlview, via:
+% http://erlang.org/pipermail/erlang-questions/2003-November/010544.html
+makefun(State, Source) ->
+    Sig = couch_util:md5(Source),
+    BindFuns = bindings(State, Sig),
+    {Sig, makefun(State, Source, BindFuns)}.
+makefun(State, Source, {DDoc}) ->
+    Sig = couch_util:md5(lists:flatten([Source, term_to_binary(DDoc)])),
+    BindFuns = bindings(State, Sig, {DDoc}),
+    {Sig, makefun(State, Source, BindFuns)};
+makefun(_State, Source, BindFuns) when is_list(BindFuns) ->
+    FunStr = binary_to_list(Source),
+    {ok, Tokens, _} = erl_scan:string(FunStr),
+    Form = case (catch erl_parse:parse_exprs(Tokens)) of
+        {ok, [ParsedForm]} ->
+            ParsedForm;
+        {error, {LineNum, _Mod, [Mesg, Params]}}=Error ->
+            io:format(standard_error, "Syntax error on line: ~p~n", [LineNum]),
+            io:format(standard_error, "~s~p~n", [Mesg, Params]),
+            throw(Error)
+    end,
+    Bindings = lists:foldl(fun({Name, Fun}, Acc) ->
+        erl_eval:add_binding(Name, Fun, Acc)
+    end, erl_eval:new_bindings(), BindFuns),
+    {value, Fun, _} = erl_eval:expr(Form, Bindings),
+    Fun.
+
+reduce(State, BinFuns, Keys, Vals, ReReduce) ->
+    Funs = case is_list(BinFuns) of
+        true ->
+            lists:map(fun(BF) -> makefun(State, BF) end, BinFuns);
+        _ ->
+            [makefun(State, BinFuns)]
+    end,
+    Reds = lists:map(fun({_Sig, Fun}) ->
+        Fun(Keys, Vals, ReReduce)
+    end, Funs),
+    [true, Reds].
+
+foldrows(GetRow, ProcRow, Acc) ->
+    case GetRow() of
+        nil ->
+            {ok, Acc};
+        Row ->
+            case (catch ProcRow(Row, Acc)) of
+                {ok, Acc2} ->
+                    foldrows(GetRow, ProcRow, Acc2);
+                {stop, Acc2} ->
+                    {ok, Acc2}
+            end
+    end.
+
+start_list_resp(Self, Sig) ->
+    case erlang:get(list_started) of
+        undefined ->
+            Headers =
+            case erlang:get(list_headers) of
+                undefined -> {[{<<"headers">>, {[]}}]};
+                CurrHdrs -> CurrHdrs
+            end,
+            Chunks =
+            case erlang:get(Sig) of
+                undefined -> [];
+                CurrChunks -> CurrChunks
+            end,
+            Self ! {self(), start, lists:reverse(Chunks), Headers},
+            erlang:put(list_started, true),
+            erlang:put(Sig, []),
+            started;
+        _ ->
+            ok
+    end.
+
+to_binary({Data}) ->
+    Pred = fun({Key, Value}) ->
+        {to_binary(Key), to_binary(Value)}
+    end,
+    {lists:map(Pred, Data)};
+to_binary(Data) when is_list(Data) ->
+    [to_binary(D) || D <- Data];
+to_binary(null) ->
+    null;
+to_binary(true) ->
+    true;
+to_binary(false) ->
+    false;
+to_binary(Data) when is_atom(Data) ->
+    list_to_binary(atom_to_list(Data));
+to_binary(Data) ->
+    Data.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_os_daemons.erl
----------------------------------------------------------------------
diff --git a/src/couch_os_daemons.erl b/src/couch_os_daemons.erl
new file mode 100644
index 0000000..cac031a
--- /dev/null
+++ b/src/couch_os_daemons.erl
@@ -0,0 +1,374 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+-module(couch_os_daemons).
+-behaviour(gen_server).
+
+-export([start_link/0, info/0, info/1, config_change/2]).
+
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+
+-include("couch_db.hrl").
+
+-record(daemon, {
+    port,
+    name,
+    cmd,
+    kill,
+    status=running,
+    cfg_patterns=[],
+    errors=[],
+    buf=[]
+}).
+
+-define(PORT_OPTIONS, [stream, {line, 1024}, binary, exit_status, hide]).
+-define(TIMEOUT, 5000).
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+info() ->
+    info([]).
+
+info(Options) ->
+    gen_server:call(?MODULE, {daemon_info, Options}).
+
+config_change(Section, Key) ->
+    gen_server:cast(?MODULE, {config_change, Section, Key}).
+
+init(_) ->
+    process_flag(trap_exit, true),
+    ok = couch_config:register(fun ?MODULE:config_change/2),
+    Table = ets:new(?MODULE, [protected, set, {keypos, #daemon.port}]),
+    reload_daemons(Table),
+    {ok, Table}.
+
+terminate(_Reason, Table) ->
+    [stop_port(D) || D <- ets:tab2list(Table)],
+    ok.
+
+handle_call({daemon_info, Options}, _From, Table) when is_list(Options) ->
+    case lists:member(table, Options) of
+        true ->
+            {reply, {ok, ets:tab2list(Table)}, Table};
+        _ ->
+            {reply, {ok, Table}, Table}
+    end;
+handle_call(Msg, From, Table) ->
+    ?LOG_ERROR("Unknown call message to ~p from ~p: ~p", [?MODULE, From, Msg]),
+    {stop, error, Table}.
+
+handle_cast({config_change, Sect, Key}, Table) ->
+    restart_daemons(Table, Sect, Key),
+    case Sect of
+        "os_daemons" -> reload_daemons(Table);
+        _ -> ok
+    end,
+    {noreply, Table};
+handle_cast(stop, Table) ->
+    {stop, normal, Table};
+handle_cast(Msg, Table) ->
+    ?LOG_ERROR("Unknown cast message to ~p: ~p", [?MODULE, Msg]),
+    {stop, error, Table}.
+
+handle_info({'EXIT', Port, Reason}, Table) ->
+    case ets:lookup(Table, Port) of
+        [] ->
+            ?LOG_INFO("Port ~p exited after stopping: ~p~n", [Port, Reason]);
+        [#daemon{status=stopping}] ->
+            true = ets:delete(Table, Port);
+        [#daemon{name=Name, status=restarting}=D] ->
+            ?LOG_INFO("Daemon ~P restarting after config change.", [Name]),
+            true = ets:delete(Table, Port),
+            {ok, Port2} = start_port(D#daemon.cmd),
+            true = ets:insert(Table, D#daemon{
+                port=Port2, status=running, kill=undefined, buf=[]
+            });
+        [#daemon{name=Name, status=halted}] ->
+            ?LOG_ERROR("Halted daemon process: ~p", [Name]);
+        [D] ->
+            ?LOG_ERROR("Invalid port state at exit: ~p", [D])
+    end,
+    {noreply, Table};
+handle_info({Port, closed}, Table) ->
+    handle_info({Port, {exit_status, closed}}, Table);
+handle_info({Port, {exit_status, Status}}, Table) ->
+    case ets:lookup(Table, Port) of
+        [] ->
+            ?LOG_ERROR("Unknown port ~p exiting ~p", [Port, Status]),
+            {stop, {error, unknown_port_died, Status}, Table};
+        [#daemon{name=Name, status=restarting}=D] ->
+            ?LOG_INFO("Daemon ~P restarting after config change.", [Name]),
+            true = ets:delete(Table, Port),
+            {ok, Port2} = start_port(D#daemon.cmd),
+            true = ets:insert(Table, D#daemon{
+                port=Port2, status=running, kill=undefined, buf=[]
+            }),
+            {noreply, Table};
+        [#daemon{status=stopping}=D] ->
+            % The configuration changed and this daemon is no
+            % longer needed.
+            ?LOG_DEBUG("Port ~p shut down.", [D#daemon.name]),
+            true = ets:delete(Table, Port),
+            {noreply, Table};
+        [D] ->
+            % Port died for unknown reason. Check to see if it's
+            % died too many times or if we should boot it back up.
+            case should_halt([now() | D#daemon.errors]) of
+                {true, _} ->
+                    % Halting the process. We won't try and reboot
+                    % until the configuration changes.
+                    Fmt = "Daemon ~p halted with exit_status ~p",
+                    ?LOG_ERROR(Fmt, [D#daemon.name, Status]),
+                    D2 = D#daemon{status=halted, errors=nil, buf=nil},
+                    true = ets:insert(Table, D2),
+                    {noreply, Table};
+                {false, Errors} ->
+                    % We're guessing it was a random error, this daemon
+                    % has behaved so we'll give it another chance.
+                    Fmt = "Daemon ~p is being rebooted after exit_status ~p",
+                    ?LOG_INFO(Fmt, [D#daemon.name, Status]),
+                    true = ets:delete(Table, Port),
+                    {ok, Port2} = start_port(D#daemon.cmd),
+                    true = ets:insert(Table, D#daemon{
+                        port=Port2, status=running, kill=undefined,
+                                                errors=Errors, buf=[]
+                    }),
+                    {noreply, Table}
+            end;
+        _Else ->
+            throw(error)
+    end;
+handle_info({Port, {data, {noeol, Data}}}, Table) ->
+    [#daemon{buf=Buf}=D] = ets:lookup(Table, Port),
+    true = ets:insert(Table, D#daemon{buf=[Data | Buf]}),
+    {noreply, Table};
+handle_info({Port, {data, {eol, Data}}}, Table) ->
+    [#daemon{buf=Buf}=D] = ets:lookup(Table, Port),
+    Line = lists:reverse(Buf, Data),
+    % The first line echoed back is the kill command
+    % for when we go to get rid of the port. Lines after
+    % that are considered part of the stdio API.
+    case D#daemon.kill of
+        undefined ->
+            true = ets:insert(Table, D#daemon{kill=?b2l(Line), buf=[]});
+        _Else ->
+            D2 = case (catch ?JSON_DECODE(Line)) of
+                {invalid_json, Rejected} ->
+                    ?LOG_ERROR("Ignoring OS daemon request: ~p", [Rejected]),
+                    D;
+                JSON ->
+                    {ok, D3} = handle_port_message(D, JSON),
+                    D3
+            end,
+            true = ets:insert(Table, D2#daemon{buf=[]})
+    end,
+    {noreply, Table};
+handle_info({Port, Error}, Table) ->
+    ?LOG_ERROR("Unexpectd message from port ~p: ~p", [Port, Error]),
+    stop_port(Port),
+    [D] = ets:lookup(Table, Port),
+    true = ets:insert(Table, D#daemon{status=restarting, buf=nil}),
+    {noreply, Table};
+handle_info(Msg, Table) ->
+    ?LOG_ERROR("Unexpected info message to ~p: ~p", [?MODULE, Msg]),
+    {stop, error, Table}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+% Internal API
+
+%
+% Port management helpers
+%
+
+start_port(Command) ->
+    start_port(Command, []).
+
+start_port(Command, EnvPairs) ->
+    PrivDir = couch_util:priv_dir(),
+    Spawnkiller = filename:join(PrivDir, "couchspawnkillable"),
+    Opts = case lists:keytake(env, 1, ?PORT_OPTIONS) of
+        false ->
+            ?PORT_OPTIONS ++ [ {env,EnvPairs} ];
+        {value, {env,OldPairs}, SubOpts} ->
+            AllPairs = lists:keymerge(1, EnvPairs, OldPairs),
+            SubOpts ++ [ {env,AllPairs} ]
+    end,
+    Port = open_port({spawn, Spawnkiller ++ " " ++ Command}, Opts),
+    {ok, Port}.
+
+
+stop_port(#daemon{port=Port, kill=undefined}=D) ->
+    ?LOG_ERROR("Stopping daemon without a kill command: ~p", [D#daemon.name]),
+    catch port_close(Port);
+stop_port(#daemon{port=Port}=D) ->
+    ?LOG_DEBUG("Stopping daemon: ~p", [D#daemon.name]),
+    os:cmd(D#daemon.kill),
+    catch port_close(Port).
+
+
+handle_port_message(#daemon{port=Port}=Daemon, [<<"get">>, Section]) ->
+    KVs = couch_config:get(Section),
+    Data = lists:map(fun({K, V}) -> {?l2b(K), ?l2b(V)} end, KVs),
+    Json = iolist_to_binary(?JSON_ENCODE({Data})),
+    port_command(Port, <<Json/binary, "\n">>),
+    {ok, Daemon};
+handle_port_message(#daemon{port=Port}=Daemon, [<<"get">>, Section, Key]) ->
+    Value = case couch_config:get(Section, Key, null) of
+        null -> null;
+        String -> ?l2b(String)
+    end,
+    Json = iolist_to_binary(?JSON_ENCODE(Value)),
+    port_command(Port, <<Json/binary, "\n">>),
+    {ok, Daemon};
+handle_port_message(Daemon, [<<"register">>, Sec]) when is_binary(Sec) ->
+    Patterns = lists:usort(Daemon#daemon.cfg_patterns ++ [{?b2l(Sec)}]),
+    {ok, Daemon#daemon{cfg_patterns=Patterns}};
+handle_port_message(Daemon, [<<"register">>, Sec, Key])
+                        when is_binary(Sec) andalso is_binary(Key) ->
+    Pattern = {?b2l(Sec), ?b2l(Key)},
+    Patterns = lists:usort(Daemon#daemon.cfg_patterns ++ [Pattern]),
+    {ok, Daemon#daemon{cfg_patterns=Patterns}};
+handle_port_message(#daemon{name=Name}=Daemon, [<<"log">>, Msg]) ->
+    handle_log_message(Name, Msg, <<"info">>),
+    {ok, Daemon};
+handle_port_message(#daemon{name=Name}=Daemon, [<<"log">>, Msg, {Opts}]) ->
+    Level = couch_util:get_value(<<"level">>, Opts, <<"info">>),
+    handle_log_message(Name, Msg, Level),
+    {ok, Daemon};
+handle_port_message(#daemon{name=Name}=Daemon, Else) ->
+    ?LOG_ERROR("Daemon ~p made invalid request: ~p", [Name, Else]),
+    {ok, Daemon}.
+
+
+handle_log_message(Name, Msg, _Level) when not is_binary(Msg) ->
+    ?LOG_ERROR("Invalid log message from daemon ~p: ~p", [Name, Msg]);
+handle_log_message(Name, Msg, <<"debug">>) ->
+    ?LOG_DEBUG("Daemon ~p :: ~s", [Name, ?b2l(Msg)]);
+handle_log_message(Name, Msg, <<"info">>) ->
+    ?LOG_INFO("Daemon ~p :: ~s", [Name, ?b2l(Msg)]);
+handle_log_message(Name, Msg, <<"error">>) ->
+    ?LOG_ERROR("Daemon: ~p :: ~s", [Name, ?b2l(Msg)]);
+handle_log_message(Name, Msg, Level) ->
+    ?LOG_ERROR("Invalid log level from daemon: ~p", [Level]),
+    ?LOG_INFO("Daemon: ~p :: ~s", [Name, ?b2l(Msg)]).
+
+%
+% Daemon management helpers
+%
+
+reload_daemons(Table) ->
+    % List of daemons we want to have running.
+    Configured = lists:sort(couch_config:get("os_daemons")),
+    
+    % Remove records for daemons that were halted.
+    MSpecHalted = #daemon{name='$1', cmd='$2', status=halted, _='_'},
+    Halted = lists:sort([{N, C} || [N, C] <- ets:match(Table, MSpecHalted)]),
+    ok = stop_os_daemons(Table, find_to_stop(Configured, Halted, [])),
+    
+    % Stop daemons that are running
+    % Start newly configured daemons
+    MSpecRunning = #daemon{name='$1', cmd='$2', status=running, _='_'},
+    Running = lists:sort([{N, C} || [N, C] <- ets:match(Table, MSpecRunning)]),
+    ok = stop_os_daemons(Table, find_to_stop(Configured, Running, [])),
+    ok = boot_os_daemons(Table, find_to_boot(Configured, Running, [])),
+    ok.
+
+
+restart_daemons(Table, Sect, Key) ->
+    restart_daemons(Table, Sect, Key, ets:first(Table)).
+
+restart_daemons(_, _, _, '$end_of_table') ->
+    ok;
+restart_daemons(Table, Sect, Key, Port) ->
+    [D] = ets:lookup(Table, Port),
+    HasSect = lists:member({Sect}, D#daemon.cfg_patterns),
+    HasKey = lists:member({Sect, Key}, D#daemon.cfg_patterns),
+    case HasSect or HasKey of
+        true ->
+            stop_port(D),
+            D2 = D#daemon{status=restarting, buf=nil},
+            true = ets:insert(Table, D2);
+        _ ->
+            ok
+    end,
+    restart_daemons(Table, Sect, Key, ets:next(Table, Port)).
+    
+
+stop_os_daemons(_Table, []) ->
+    ok;
+stop_os_daemons(Table, [{Name, Cmd} | Rest]) ->
+    [[Port]] = ets:match(Table, #daemon{port='$1', name=Name, cmd=Cmd, _='_'}),
+    [D] = ets:lookup(Table, Port),
+    case D#daemon.status of
+        halted ->
+            ets:delete(Table, Port);
+        _ ->
+            stop_port(D),
+            D2 = D#daemon{status=stopping, errors=nil, buf=nil},
+            true = ets:insert(Table, D2)
+    end,
+    stop_os_daemons(Table, Rest).
+    
+boot_os_daemons(_Table, []) ->
+    ok;
+boot_os_daemons(Table, [{Name, Cmd} | Rest]) ->
+    {ok, Port} = start_port(Cmd),
+    true = ets:insert(Table, #daemon{port=Port, name=Name, cmd=Cmd}),
+    boot_os_daemons(Table, Rest).
+    
+% Elements unique to the configured set need to be booted.
+find_to_boot([], _Rest, Acc) ->
+    % Nothing else configured.
+    Acc;
+find_to_boot([D | R1], [D | R2], Acc) ->
+    % Elements are equal, daemon already running.
+    find_to_boot(R1, R2, Acc);
+find_to_boot([D1 | R1], [D2 | _]=A2, Acc) when D1 < D2 ->
+    find_to_boot(R1, A2, [D1 | Acc]);
+find_to_boot(A1, [_ | R2], Acc) ->
+    find_to_boot(A1, R2, Acc);
+find_to_boot(Rest, [], Acc) ->
+    % No more candidates for already running. Boot all.
+    Rest ++ Acc.
+
+% Elements unique to the running set need to be killed.
+find_to_stop([], Rest, Acc) ->
+    % The rest haven't been found, so they must all
+    % be ready to die.
+    Rest ++ Acc;
+find_to_stop([D | R1], [D | R2], Acc) ->
+    % Elements are equal, daemon already running.
+    find_to_stop(R1, R2, Acc);
+find_to_stop([D1 | R1], [D2 | _]=A2, Acc) when D1 < D2 ->
+    find_to_stop(R1, A2, Acc);
+find_to_stop(A1, [D2 | R2], Acc) ->
+    find_to_stop(A1, R2, [D2 | Acc]);
+find_to_stop(_, [], Acc) ->
+    % No more running daemons to worry about.
+    Acc.
+
+should_halt(Errors) ->
+    RetryTimeCfg = couch_config:get("os_daemon_settings", "retry_time", "5"),
+    RetryTime = list_to_integer(RetryTimeCfg),
+
+    Now = now(),
+    RecentErrors = lists:filter(fun(Time) ->
+        timer:now_diff(Now, Time) =< RetryTime * 1000000
+    end, Errors),
+
+    RetryCfg = couch_config:get("os_daemon_settings", "max_retries", "3"),
+    Retries = list_to_integer(RetryCfg),
+
+    {length(RecentErrors) >= Retries, RecentErrors}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_os_process.erl
----------------------------------------------------------------------
diff --git a/src/couch_os_process.erl b/src/couch_os_process.erl
new file mode 100644
index 0000000..db62d49
--- /dev/null
+++ b/src/couch_os_process.erl
@@ -0,0 +1,216 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_os_process).
+-behaviour(gen_server).
+
+-export([start_link/1, start_link/2, start_link/3, stop/1]).
+-export([set_timeout/2, prompt/2]).
+-export([send/2, writeline/2, readline/1, writejson/2, readjson/1]).
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
+
+-include("couch_db.hrl").
+
+-define(PORT_OPTIONS, [stream, {line, 4096}, binary, exit_status, hide]).
+
+-record(os_proc,
+    {command,
+     port,
+     writer,
+     reader,
+     timeout=5000
+    }).
+
+start_link(Command) ->
+    start_link(Command, []).
+start_link(Command, Options) ->
+    start_link(Command, Options, ?PORT_OPTIONS).
+start_link(Command, Options, PortOptions) ->
+    gen_server:start_link(couch_os_process, [Command, Options, PortOptions], []).
+
+stop(Pid) ->
+    gen_server:cast(Pid, stop).
+
+% Read/Write API
+set_timeout(Pid, TimeOut) when is_integer(TimeOut) ->
+    ok = gen_server:call(Pid, {set_timeout, TimeOut}, infinity).
+
+% Used by couch_db_update_notifier.erl
+send(Pid, Data) ->
+    gen_server:cast(Pid, {send, Data}).
+
+prompt(Pid, Data) ->
+    case gen_server:call(Pid, {prompt, Data}, infinity) of
+        {ok, Result} ->
+            Result;
+        Error ->
+            ?LOG_ERROR("OS Process Error ~p :: ~p",[Pid,Error]),
+            throw(Error)
+    end.
+
+% Utility functions for reading and writing
+% in custom functions
+writeline(OsProc, Data) when is_record(OsProc, os_proc) ->
+    port_command(OsProc#os_proc.port, [Data, $\n]).
+
+readline(#os_proc{} = OsProc) ->
+    readline(OsProc, []).
+readline(#os_proc{port = Port} = OsProc, Acc) ->
+    receive
+    {Port, {data, {noeol, Data}}} when is_binary(Acc) ->
+        readline(OsProc, <<Acc/binary,Data/binary>>);
+    {Port, {data, {noeol, Data}}} when is_binary(Data) ->
+        readline(OsProc, Data);
+    {Port, {data, {noeol, Data}}} ->
+        readline(OsProc, [Data|Acc]);
+    {Port, {data, {eol, <<Data/binary>>}}} when is_binary(Acc) ->
+        [<<Acc/binary,Data/binary>>];
+    {Port, {data, {eol, Data}}} when is_binary(Data) ->
+        [Data];
+    {Port, {data, {eol, Data}}} ->
+        lists:reverse(Acc, Data);
+    {Port, Err} ->
+        catch port_close(Port),
+        throw({os_process_error, Err})
+    after OsProc#os_proc.timeout ->
+        catch port_close(Port),
+        throw({os_process_error, "OS process timed out."})
+    end.
+
+% Standard JSON functions
+writejson(OsProc, Data) when is_record(OsProc, os_proc) ->
+    JsonData = ?JSON_ENCODE(Data),
+    ?LOG_DEBUG("OS Process ~p Input  :: ~s", [OsProc#os_proc.port, JsonData]),
+    true = writeline(OsProc, JsonData).
+
+readjson(OsProc) when is_record(OsProc, os_proc) ->
+    Line = iolist_to_binary(readline(OsProc)),
+    ?LOG_DEBUG("OS Process ~p Output :: ~s", [OsProc#os_proc.port, Line]),
+    try
+        % Don't actually parse the whole JSON. Just try to see if it's
+        % a command or a doc map/reduce/filter/show/list/update output.
+        % If it's a command then parse the whole JSON and execute the
+        % command, otherwise return the raw JSON line to the caller.
+        pick_command(Line)
+    catch
+    throw:abort ->
+        {json, Line};
+    throw:{cmd, _Cmd} ->
+        case ?JSON_DECODE(Line) of
+        [<<"log">>, Msg] when is_binary(Msg) ->
+            % we got a message to log. Log it and continue
+            ?LOG_INFO("OS Process ~p Log :: ~s", [OsProc#os_proc.port, Msg]),
+            readjson(OsProc);
+        [<<"error">>, Id, Reason] ->
+            throw({error, {couch_util:to_existing_atom(Id),Reason}});
+        [<<"fatal">>, Id, Reason] ->
+            ?LOG_INFO("OS Process ~p Fatal Error :: ~s ~p",
+                [OsProc#os_proc.port, Id, Reason]),
+            throw({couch_util:to_existing_atom(Id),Reason});
+        _Result ->
+            {json, Line}
+        end
+    end.
+
+pick_command(Line) ->
+    json_stream_parse:events(Line, fun pick_command0/1).
+
+pick_command0(array_start) ->
+    fun pick_command1/1;
+pick_command0(_) ->
+    throw(abort).
+
+pick_command1(<<"log">> = Cmd) ->
+    throw({cmd, Cmd});
+pick_command1(<<"error">> = Cmd) ->
+    throw({cmd, Cmd});
+pick_command1(<<"fatal">> = Cmd) ->
+    throw({cmd, Cmd});
+pick_command1(_) ->
+    throw(abort).
+
+
+% gen_server API
+init([Command, Options, PortOptions]) ->
+    PrivDir = couch_util:priv_dir(),
+    Spawnkiller = filename:join(PrivDir, "couchspawnkillable"),
+    BaseProc = #os_proc{
+        command=Command,
+        port=open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions),
+        writer=fun writejson/2,
+        reader=fun readjson/1
+    },
+    KillCmd = iolist_to_binary(readline(BaseProc)),
+    Pid = self(),
+    ?LOG_DEBUG("OS Process Start :: ~p", [BaseProc#os_proc.port]),
+    spawn(fun() ->
+            % this ensure the real os process is killed when this process dies.
+            erlang:monitor(process, Pid),
+            receive _ -> ok end,
+            os:cmd(?b2l(iolist_to_binary(KillCmd)))
+        end),
+    OsProc =
+    lists:foldl(fun(Opt, Proc) ->
+        case Opt of
+        {writer, Writer} when is_function(Writer) ->
+            Proc#os_proc{writer=Writer};
+        {reader, Reader} when is_function(Reader) ->
+            Proc#os_proc{reader=Reader};
+        {timeout, TimeOut} when is_integer(TimeOut) ->
+            Proc#os_proc{timeout=TimeOut}
+        end
+    end, BaseProc, Options),
+    {ok, OsProc}.
+
+terminate(_Reason, #os_proc{port=Port}) ->
+    catch port_close(Port),
+    ok.
+
+handle_call({set_timeout, TimeOut}, _From, OsProc) ->
+    {reply, ok, OsProc#os_proc{timeout=TimeOut}};
+handle_call({prompt, Data}, _From, OsProc) ->
+    #os_proc{writer=Writer, reader=Reader} = OsProc,
+    try
+        Writer(OsProc, Data),
+        {reply, {ok, Reader(OsProc)}, OsProc}
+    catch
+        throw:{error, OsError} ->
+            {reply, OsError, OsProc};
+        throw:OtherError ->
+            {stop, normal, OtherError, OsProc}
+    end.
+
+handle_cast({send, Data}, #os_proc{writer=Writer}=OsProc) ->
+    try
+        Writer(OsProc, Data),
+        {noreply, OsProc}
+    catch
+        throw:OsError ->
+            ?LOG_ERROR("Failed sending data: ~p -> ~p", [Data, OsError]),
+            {stop, normal, OsProc}
+    end;
+handle_cast(stop, OsProc) ->
+    {stop, normal, OsProc};
+handle_cast(Msg, OsProc) ->
+    ?LOG_DEBUG("OS Proc: Unknown cast: ~p", [Msg]),
+    {noreply, OsProc}.
+
+handle_info({Port, {exit_status, 0}}, #os_proc{port=Port}=OsProc) ->
+    ?LOG_INFO("OS Process terminated normally", []),
+    {stop, normal, OsProc};
+handle_info({Port, {exit_status, Status}}, #os_proc{port=Port}=OsProc) ->
+    ?LOG_ERROR("OS Process died with status: ~p", [Status]),
+    {stop, {exit_status, Status}, OsProc}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_passwords.erl
----------------------------------------------------------------------
diff --git a/src/couch_passwords.erl b/src/couch_passwords.erl
new file mode 100644
index 0000000..d9e6836
--- /dev/null
+++ b/src/couch_passwords.erl
@@ -0,0 +1,119 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_passwords).
+
+-export([simple/2, pbkdf2/3, pbkdf2/4, verify/2]).
+-export([hash_admin_password/1, get_unhashed_admins/0]).
+
+-include("couch_db.hrl").
+
+-define(MAX_DERIVED_KEY_LENGTH, (1 bsl 32 - 1)).
+-define(SHA1_OUTPUT_LENGTH, 20).
+
+%% legacy scheme, not used for new passwords.
+-spec simple(binary(), binary()) -> binary().
+simple(Password, Salt) ->
+    ?l2b(couch_util:to_hex(crypto:sha(<<Password/binary, Salt/binary>>))).
+
+%% CouchDB utility functions
+-spec hash_admin_password(binary()) -> binary().
+hash_admin_password(ClearPassword) ->
+    Iterations = couch_config:get("couch_httpd_auth", "iterations", "10000"),
+    Salt = couch_uuids:random(),
+    DerivedKey = couch_passwords:pbkdf2(couch_util:to_binary(ClearPassword),
+                                        Salt ,list_to_integer(Iterations)),
+    ?l2b("-pbkdf2-" ++ ?b2l(DerivedKey) ++ ","
+        ++ ?b2l(Salt) ++ ","
+        ++ Iterations).
+
+-spec get_unhashed_admins() -> list().
+get_unhashed_admins() ->
+    lists:filter(
+        fun({_User, "-hashed-" ++ _}) ->
+            false; % already hashed
+        ({_User, "-pbkdf2-" ++ _}) ->
+            false; % already hashed
+        ({_User, _ClearPassword}) ->
+            true
+        end,
+    couch_config:get("admins")).
+
+%% Current scheme, much stronger.
+-spec pbkdf2(binary(), binary(), integer()) -> binary().
+pbkdf2(Password, Salt, Iterations) ->
+    {ok, Result} = pbkdf2(Password, Salt, Iterations, ?SHA1_OUTPUT_LENGTH),
+    Result.
+
+-spec pbkdf2(binary(), binary(), integer(), integer())
+    -> {ok, binary()} | {error, derived_key_too_long}.
+pbkdf2(_Password, _Salt, _Iterations, DerivedLength)
+    when DerivedLength > ?MAX_DERIVED_KEY_LENGTH ->
+    {error, derived_key_too_long};
+pbkdf2(Password, Salt, Iterations, DerivedLength) ->
+    L = ceiling(DerivedLength / ?SHA1_OUTPUT_LENGTH),
+    <<Bin:DerivedLength/binary,_/binary>> =
+        iolist_to_binary(pbkdf2(Password, Salt, Iterations, L, 1, [])),
+    {ok, ?l2b(couch_util:to_hex(Bin))}.
+
+-spec pbkdf2(binary(), binary(), integer(), integer(), integer(), iolist())
+    -> iolist().
+pbkdf2(_Password, _Salt, _Iterations, BlockCount, BlockIndex, Acc)
+    when BlockIndex > BlockCount ->
+    lists:reverse(Acc);
+pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex, Acc) ->
+    Block = pbkdf2(Password, Salt, Iterations, BlockIndex, 1, <<>>, <<>>),
+    pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex + 1, [Block|Acc]).
+
+-spec pbkdf2(binary(), binary(), integer(), integer(), integer(),
+    binary(), binary()) -> binary().
+pbkdf2(_Password, _Salt, Iterations, _BlockIndex, Iteration, _Prev, Acc)
+    when Iteration > Iterations ->
+    Acc;
+pbkdf2(Password, Salt, Iterations, BlockIndex, 1, _Prev, _Acc) ->
+    InitialBlock = crypto:sha_mac(Password,
+        <<Salt/binary,BlockIndex:32/integer>>),
+    pbkdf2(Password, Salt, Iterations, BlockIndex, 2,
+        InitialBlock, InitialBlock);
+pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration, Prev, Acc) ->
+    Next = crypto:sha_mac(Password, Prev),
+    pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration + 1,
+                   Next, crypto:exor(Next, Acc)).
+
+%% verify two lists for equality without short-circuits to avoid timing attacks.
+-spec verify(string(), string(), integer()) -> boolean().
+verify([X|RestX], [Y|RestY], Result) ->
+    verify(RestX, RestY, (X bxor Y) bor Result);
+verify([], [], Result) ->
+    Result == 0.
+
+-spec verify(binary(), binary()) -> boolean();
+            (list(), list()) -> boolean().
+verify(<<X/binary>>, <<Y/binary>>) ->
+    verify(?b2l(X), ?b2l(Y));
+verify(X, Y) when is_list(X) and is_list(Y) ->
+    case length(X) == length(Y) of
+        true ->
+            verify(X, Y, 0);
+        false ->
+            false
+    end;
+verify(_X, _Y) -> false.
+
+-spec ceiling(number()) -> integer().
+ceiling(X) ->
+    T = erlang:trunc(X),
+    case (X - T) of
+        Neg when Neg < 0 -> T;
+        Pos when Pos > 0 -> T + 1;
+        _ -> T
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_primary_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch_primary_sup.erl b/src/couch_primary_sup.erl
new file mode 100644
index 0000000..150b92e
--- /dev/null
+++ b/src/couch_primary_sup.erl
@@ -0,0 +1,66 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_primary_sup).
+-behaviour(supervisor).
+-export([init/1, start_link/0]).
+
+start_link() ->
+    supervisor:start_link({local,couch_primary_services}, ?MODULE, []).
+
+init([]) ->
+    Children = [
+        {collation_driver,
+            {couch_drv, start_link, []},
+            permanent,
+            infinity,
+            supervisor,
+            [couch_drv]},
+        {couch_task_status,
+            {couch_task_status, start_link, []},
+            permanent,
+            brutal_kill,
+            worker,
+            [couch_task_status]},
+        {couch_server,
+            {couch_server, sup_start_link, []},
+            permanent,
+            brutal_kill,
+            worker,
+            [couch_server]},
+        {couch_db_update_event,
+            {gen_event, start_link, [{local, couch_db_update}]},
+            permanent,
+            brutal_kill,
+            worker,
+            dynamic},
+        {couch_replication_event,
+            {gen_event, start_link, [{local, couch_replication}]},
+            permanent,
+            brutal_kill,
+            worker,
+            dynamic},
+        {couch_replicator_job_sup,
+            {couch_replicator_job_sup, start_link, []},
+            permanent,
+            infinity,
+            supervisor,
+            [couch_replicator_job_sup]},
+        {couch_log,
+            {couch_log, start_link, []},
+            permanent,
+            brutal_kill,
+            worker,
+            [couch_log]}
+    ],
+    {ok, {{one_for_one, 10, 3600}, Children}}.
+

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_query_servers.erl
----------------------------------------------------------------------
diff --git a/src/couch_query_servers.erl b/src/couch_query_servers.erl
new file mode 100644
index 0000000..3b58cbe
--- /dev/null
+++ b/src/couch_query_servers.erl
@@ -0,0 +1,616 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_query_servers).
+-behaviour(gen_server).
+
+-export([start_link/0, config_change/1]).
+
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3]).
+-export([start_doc_map/3, map_docs/2, map_doc_raw/2, stop_doc_map/1, raw_to_ejson/1]).
+-export([reduce/3, rereduce/3,validate_doc_update/5]).
+-export([filter_docs/5]).
+-export([filter_view/3]).
+
+-export([with_ddoc_proc/2, proc_prompt/2, ddoc_prompt/3, ddoc_proc_prompt/3, json_doc/1]).
+
+% For 210-os-proc-pool.t
+-export([get_os_process/1, ret_os_process/1]).
+
+-include("couch_db.hrl").
+
+-record(proc, {
+    pid,
+    lang,
+    ddoc_keys = [],
+    prompt_fun,
+    set_timeout_fun,
+    stop_fun
+}).
+
+-record(qserver, {
+    langs, % Keyed by language name, value is {Mod,Func,Arg}
+    pid_procs, % Keyed by PID, valus is a #proc record.
+    lang_procs, % Keyed by language name, value is a #proc record
+    lang_limits, % Keyed by language name, value is {Lang, Limit, Current}
+    waitlist = [],
+    config
+}).
+
+start_link() ->
+    gen_server:start_link({local, couch_query_servers}, couch_query_servers, [], []).
+
+start_doc_map(Lang, Functions, Lib) ->
+    Proc = get_os_process(Lang),
+    case Lib of
+    {[]} -> ok;
+    Lib ->
+        true = proc_prompt(Proc, [<<"add_lib">>, Lib])
+    end,
+    lists:foreach(fun(FunctionSource) ->
+        true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource])
+    end, Functions),
+    {ok, Proc}.
+
+map_docs(Proc, Docs) ->
+    % send the documents
+    Results = lists:map(
+        fun(Doc) ->
+            Json = couch_doc:to_json_obj(Doc, []),
+
+            FunsResults = proc_prompt(Proc, [<<"map_doc">>, Json]),
+            % the results are a json array of function map yields like this:
+            % [FunResults1, FunResults2 ...]
+            % where funresults is are json arrays of key value pairs:
+            % [[Key1, Value1], [Key2, Value2]]
+            % Convert the key, value pairs to tuples like
+            % [{Key1, Value1}, {Key2, Value2}]
+            lists:map(
+                fun(FunRs) ->
+                    [list_to_tuple(FunResult) || FunResult <- FunRs]
+                end,
+            FunsResults)
+        end,
+        Docs),
+    {ok, Results}.
+
+map_doc_raw(Proc, Doc) ->
+    Json = couch_doc:to_json_obj(Doc, []),
+    {ok, proc_prompt_raw(Proc, [<<"map_doc">>, Json])}.
+
+
+stop_doc_map(nil) ->
+    ok;
+stop_doc_map(Proc) ->
+    ok = ret_os_process(Proc).
+
+group_reductions_results([]) ->
+    [];
+group_reductions_results(List) ->
+    {Heads, Tails} = lists:foldl(
+        fun([H|T], {HAcc,TAcc}) ->
+            {[H|HAcc], [T|TAcc]}
+        end, {[], []}, List),
+    case Tails of
+    [[]|_] -> % no tails left
+        [Heads];
+    _ ->
+     [Heads | group_reductions_results(Tails)]
+    end.
+
+rereduce(_Lang, [], _ReducedValues) ->
+    {ok, []};
+rereduce(Lang, RedSrcs, ReducedValues) ->
+    Grouped = group_reductions_results(ReducedValues),
+    Results = lists:zipwith(
+        fun
+        (<<"_", _/binary>> = FunSrc, Values) ->
+            {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []),
+            Result;
+        (FunSrc, Values) ->
+            os_rereduce(Lang, [FunSrc], Values)
+        end, RedSrcs, Grouped),
+    {ok, Results}.
+
+reduce(_Lang, [], _KVs) ->
+    {ok, []};
+reduce(Lang, RedSrcs, KVs) ->
+    {OsRedSrcs, BuiltinReds} = lists:partition(fun
+        (<<"_", _/binary>>) -> false;
+        (_OsFun) -> true
+    end, RedSrcs),
+    {ok, OsResults} = os_reduce(Lang, OsRedSrcs, KVs),
+    {ok, BuiltinResults} = builtin_reduce(reduce, BuiltinReds, KVs, []),
+    recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, []).
+
+recombine_reduce_results([], [], [], Acc) ->
+    {ok, lists:reverse(Acc)};
+recombine_reduce_results([<<"_", _/binary>>|RedSrcs], OsResults, [BRes|BuiltinResults], Acc) ->
+    recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes|Acc]);
+recombine_reduce_results([_OsFun|RedSrcs], [OsR|OsResults], BuiltinResults, Acc) ->
+    recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR|Acc]).
+
+os_reduce(_Lang, [], _KVs) ->
+    {ok, []};
+os_reduce(Lang, OsRedSrcs, KVs) ->
+    Proc = get_os_process(Lang),
+    OsResults = try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of
+        [true, Reductions] -> Reductions
+    after
+        ok = ret_os_process(Proc)
+    end,
+    {ok, OsResults}.
+
+os_rereduce(Lang, OsRedSrcs, KVs) ->
+    Proc = get_os_process(Lang),
+    try proc_prompt(Proc, [<<"rereduce">>, OsRedSrcs, KVs]) of
+        [true, [Reduction]] -> Reduction
+    after
+        ok = ret_os_process(Proc)
+    end.
+
+
+builtin_reduce(_Re, [], _KVs, Acc) ->
+    {ok, lists:reverse(Acc)};
+builtin_reduce(Re, [<<"_sum",_/binary>>|BuiltinReds], KVs, Acc) ->
+    Sum = builtin_sum_rows(KVs),
+    builtin_reduce(Re, BuiltinReds, KVs, [Sum|Acc]);
+builtin_reduce(reduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
+    Count = length(KVs),
+    builtin_reduce(reduce, BuiltinReds, KVs, [Count|Acc]);
+builtin_reduce(rereduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
+    Count = builtin_sum_rows(KVs),
+    builtin_reduce(rereduce, BuiltinReds, KVs, [Count|Acc]);
+builtin_reduce(Re, [<<"_stats",_/binary>>|BuiltinReds], KVs, Acc) ->
+    Stats = builtin_stats(Re, KVs),
+    builtin_reduce(Re, BuiltinReds, KVs, [Stats|Acc]).
+
+builtin_sum_rows(KVs) ->
+    lists:foldl(fun
+        ([_Key, Value], Acc) when is_number(Value), is_number(Acc) ->
+            Acc + Value;
+        ([_Key, Value], Acc) when is_list(Value), is_list(Acc) ->
+            sum_terms(Acc, Value);
+        ([_Key, Value], Acc) when is_number(Value), is_list(Acc) ->
+            sum_terms(Acc, [Value]);
+        ([_Key, Value], Acc) when is_list(Value), is_number(Acc) ->
+            sum_terms([Acc], Value);
+        (_Else, _Acc) ->
+            throw({invalid_value, <<"builtin _sum function requires map values to be numbers or lists of numbers">>})
+    end, 0, KVs).
+
+sum_terms([], []) ->
+    [];
+sum_terms([_|_]=Xs, []) ->
+    Xs;
+sum_terms([], [_|_]=Ys) ->
+    Ys;
+sum_terms([X|Xs], [Y|Ys]) when is_number(X), is_number(Y) ->
+    [X+Y | sum_terms(Xs,Ys)];
+sum_terms(_, _) ->
+    throw({invalid_value, <<"builtin _sum function requires map values to be numbers or lists of numbers">>}).
+
+builtin_stats(reduce, []) ->
+    {[]};
+builtin_stats(reduce, [[_,First]|Rest]) when is_number(First) ->
+    Stats = lists:foldl(fun([_K,V], {S,C,Mi,Ma,Sq}) when is_number(V) ->
+        {S+V, C+1, lists:min([Mi, V]), lists:max([Ma, V]), Sq+(V*V)};
+    (_, _) ->
+        throw({invalid_value,
+            <<"builtin _stats function requires map values to be numbers">>})
+    end, {First,1,First,First,First*First}, Rest),
+    {Sum, Cnt, Min, Max, Sqr} = Stats,
+    {[{sum,Sum}, {count,Cnt}, {min,Min}, {max,Max}, {sumsqr,Sqr}]};
+
+builtin_stats(rereduce, [[_,First]|Rest]) ->
+    {[{sum,Sum0}, {count,Cnt0}, {min,Min0}, {max,Max0}, {sumsqr,Sqr0}]} = First,
+    Stats = lists:foldl(fun([_K,Red], {S,C,Mi,Ma,Sq}) ->
+        {[{sum,Sum}, {count,Cnt}, {min,Min}, {max,Max}, {sumsqr,Sqr}]} = Red,
+        {Sum+S, Cnt+C, lists:min([Min, Mi]), lists:max([Max, Ma]), Sqr+Sq}
+    end, {Sum0,Cnt0,Min0,Max0,Sqr0}, Rest),
+    {Sum, Cnt, Min, Max, Sqr} = Stats,
+    {[{sum,Sum}, {count,Cnt}, {min,Min}, {max,Max}, {sumsqr,Sqr}]}.
+
+% use the function stored in ddoc.validate_doc_update to test an update.
+validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
+    JsonEditDoc = couch_doc:to_json_obj(EditDoc, [revs]),
+    JsonDiskDoc = json_doc(DiskDoc),
+    case ddoc_prompt(DDoc, [<<"validate_doc_update">>], [JsonEditDoc, JsonDiskDoc, Ctx, SecObj]) of
+        1 ->
+            ok;
+        {[{<<"forbidden">>, Message}]} ->
+            throw({forbidden, Message});
+        {[{<<"unauthorized">>, Message}]} ->
+            throw({unauthorized, Message})
+    end.
+
+json_doc(nil) -> null;
+json_doc(Doc) ->
+    couch_doc:to_json_obj(Doc, [revs]).
+
+filter_view(DDoc, VName, Docs) ->
+    JsonDocs = [couch_doc:to_json_obj(Doc, [revs]) || Doc <- Docs],
+    [true, Passes] = ddoc_prompt(DDoc, [<<"views">>, VName, <<"map">>], [JsonDocs]),
+    {ok, Passes}.
+
+filter_docs(Req, Db, DDoc, FName, Docs) ->
+    JsonReq = case Req of
+    {json_req, JsonObj} ->
+        JsonObj;
+    #httpd{} = HttpReq ->
+        couch_httpd_external:json_req_obj(HttpReq, Db)
+    end,
+    JsonDocs = [couch_doc:to_json_obj(Doc, [revs]) || Doc <- Docs],
+    [true, Passes] = ddoc_prompt(DDoc, [<<"filters">>, FName],
+        [JsonDocs, JsonReq]),
+    {ok, Passes}.
+
+ddoc_proc_prompt({Proc, DDocId}, FunPath, Args) ->
+    proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args]).
+
+ddoc_prompt(DDoc, FunPath, Args) ->
+    with_ddoc_proc(DDoc, fun({Proc, DDocId}) ->
+        proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args])
+    end).
+
+with_ddoc_proc(#doc{id=DDocId,revs={Start, [DiskRev|_]}}=DDoc, Fun) ->
+    Rev = couch_doc:rev_to_str({Start, DiskRev}),
+    DDocKey = {DDocId, Rev},
+    Proc = get_ddoc_process(DDoc, DDocKey),
+    try Fun({Proc, DDocId})
+    after
+        ok = ret_os_process(Proc)
+    end.
+
+init([]) ->
+    % register async to avoid deadlock on restart_child
+    Self = self(),
+    spawn(couch_config, register, [fun ?MODULE:config_change/1, Self]),
+
+    Langs = ets:new(couch_query_server_langs, [set, private]),
+    LangLimits = ets:new(couch_query_server_lang_limits, [set, private]),
+    PidProcs = ets:new(couch_query_server_pid_langs, [set, private]),
+    LangProcs = ets:new(couch_query_server_procs, [set, private]),
+
+    ProcTimeout = list_to_integer(couch_config:get(
+                        "couchdb", "os_process_timeout", "5000")),
+    ReduceLimit = list_to_atom(
+        couch_config:get("query_server_config","reduce_limit","true")),
+    OsProcLimit = list_to_integer(
+        couch_config:get("query_server_config","os_process_limit","10")),
+
+    % 'query_servers' specifies an OS command-line to execute.
+    lists:foreach(fun({Lang, Command}) ->
+        true = ets:insert(LangLimits, {?l2b(Lang), OsProcLimit, 0}),
+        true = ets:insert(Langs, {?l2b(Lang),
+                          couch_os_process, start_link, [Command]})
+    end, couch_config:get("query_servers")),
+    % 'native_query_servers' specifies a {Module, Func, Arg} tuple.
+    lists:foreach(fun({Lang, SpecStr}) ->
+        {ok, {Mod, Fun, SpecArg}} = couch_util:parse_term(SpecStr),
+        true = ets:insert(LangLimits, {?l2b(Lang), 0, 0}), % 0 means no limit
+        true = ets:insert(Langs, {?l2b(Lang),
+                          Mod, Fun, SpecArg})
+    end, couch_config:get("native_query_servers")),
+
+
+    process_flag(trap_exit, true),
+    {ok, #qserver{
+        langs = Langs, % Keyed by language name, value is {Mod,Func,Arg}
+        pid_procs = PidProcs, % Keyed by PID, valus is a #proc record.
+        lang_procs = LangProcs, % Keyed by language name, value is a #proc record
+        lang_limits = LangLimits, % Keyed by language name, value is {Lang, Limit, Current}
+        config = {[{<<"reduce_limit">>, ReduceLimit},{<<"timeout">>, ProcTimeout}]}
+    }}.
+
+terminate(_Reason, #qserver{pid_procs=PidProcs}) ->
+    [couch_util:shutdown_sync(P) || {P,_} <- ets:tab2list(PidProcs)],
+    ok.
+
+handle_call({get_proc, DDoc1, DDocKey}, From, Server) ->
+    #doc{body = {Props}} = DDoc = couch_doc:with_ejson_body(DDoc1),
+    Lang = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
+    case lang_proc(Lang, Server, fun(Procs) ->
+            % find a proc in the set that has the DDoc
+            proc_with_ddoc(DDoc, DDocKey, Procs)
+        end) of
+    {ok, Proc} ->
+        {reply, {ok, Proc, Server#qserver.config}, Server};
+    wait ->
+        {noreply, add_to_waitlist({DDoc, DDocKey}, From, Server)};
+    Error ->
+        {reply, Error, Server}
+    end;
+handle_call({get_proc, Lang}, From, Server) ->
+    case lang_proc(Lang, Server, fun([P|_Procs]) ->
+            {ok, P}
+        end) of
+    {ok, Proc} ->
+        {reply, {ok, Proc, Server#qserver.config}, Server};
+    wait ->
+        {noreply, add_to_waitlist({Lang}, From, Server)};
+    Error ->
+        {reply, Error, Server}
+    end;
+handle_call({unlink_proc, Pid}, _From, Server) ->
+    unlink(Pid),
+    {reply, ok, Server};
+handle_call({ret_proc, Proc}, _From, #qserver{
+        pid_procs=PidProcs,
+        lang_procs=LangProcs}=Server) ->
+    % Along with max process limit, here we should check
+    % if we're over the limit and discard when we are.
+    case is_process_alive(Proc#proc.pid) of
+        true ->
+            add_value(PidProcs, Proc#proc.pid, Proc),
+            add_to_list(LangProcs, Proc#proc.lang, Proc),
+            link(Proc#proc.pid);
+        false ->
+            ok
+    end,
+    {reply, true, service_waitlist(Server)}.
+
+handle_cast(_Whatever, Server) ->
+    {noreply, Server}.
+
+handle_info({'EXIT', _, _}, Server) ->
+    {noreply, Server};
+handle_info({'DOWN', _, process, Pid, Status}, #qserver{
+        pid_procs=PidProcs,
+        lang_procs=LangProcs,
+        lang_limits=LangLimits}=Server) ->
+    case ets:lookup(PidProcs, Pid) of
+    [{Pid, Proc}] ->
+        case Status of
+        normal -> ok;
+        _ -> ?LOG_DEBUG("Linked process died abnormally: ~p (reason: ~p)", [Pid, Status])
+        end,
+        rem_value(PidProcs, Pid),
+        catch rem_from_list(LangProcs, Proc#proc.lang, Proc),
+        [{Lang, Lim, Current}] = ets:lookup(LangLimits, Proc#proc.lang),
+        true = ets:insert(LangLimits, {Lang, Lim, Current-1}),
+        {noreply, service_waitlist(Server)};
+    [] ->
+        case Status of
+        normal ->
+            {noreply, Server};
+        _ ->
+            {stop, Status, Server}
+        end
+    end.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+config_change("query_servers") ->
+    supervisor:terminate_child(couch_secondary_services, query_servers),
+    supervisor:restart_child(couch_secondary_services, query_servers);
+config_change("native_query_servers") ->
+    supervisor:terminate_child(couch_secondary_services, query_servers),
+    supervisor:restart_child(couch_secondary_services, query_servers);
+config_change("query_server_config") ->
+    supervisor:terminate_child(couch_secondary_services, query_servers),
+    supervisor:restart_child(couch_secondary_services, query_servers).
+
+% Private API
+
+add_to_waitlist(Info, From, #qserver{waitlist=Waitlist}=Server) ->
+    Server#qserver{waitlist=[{Info, From}|Waitlist]}.
+
+service_waitlist(#qserver{waitlist=[]}=Server) ->
+    Server;
+service_waitlist(#qserver{waitlist=Waitlist}=Server) ->
+    [Oldest|RevWList] = lists:reverse(Waitlist),
+    case service_waiting(Oldest, Server) of
+    ok ->
+        Server#qserver{waitlist=lists:reverse(RevWList)};
+    wait ->
+        Server#qserver{waitlist=Waitlist}
+    end.
+
+% todo get rid of duplication
+service_waiting({{#doc{body={Props}}=DDoc, DDocKey}, From}, Server) ->
+    Lang = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
+    case lang_proc(Lang, Server, fun(Procs) ->
+            % find a proc in the set that has the DDoc
+            proc_with_ddoc(DDoc, DDocKey, Procs)
+        end) of
+    {ok, Proc} ->
+        gen_server:reply(From, {ok, Proc, Server#qserver.config}),
+        ok;
+    wait -> % this should never happen
+        wait;
+    Error ->
+        gen_server:reply(From, Error),
+        ok
+    end;
+service_waiting({{Lang}, From}, Server) ->
+    case lang_proc(Lang, Server, fun([P|_Procs]) ->
+            {ok, P}
+        end) of
+    {ok, Proc} ->
+        gen_server:reply(From, {ok, Proc, Server#qserver.config}),
+        ok;
+    wait -> % this should never happen
+        wait;
+    Error ->
+        gen_server:reply(From, Error),
+        ok
+    end.
+
+lang_proc(Lang, #qserver{
+        langs=Langs,
+        pid_procs=PidProcs,
+        lang_procs=LangProcs,
+        lang_limits=LangLimits}, PickFun) ->
+    % Note to future self. Add max process limit.
+    case ets:lookup(LangProcs, Lang) of
+    [{Lang, [P|Procs]}] ->
+        {ok, Proc} = PickFun([P|Procs]),
+        rem_from_list(LangProcs, Lang, Proc),
+        {ok, Proc};
+    _ ->
+        case (catch new_process(Langs, LangLimits, Lang)) of
+        {ok, Proc} ->
+            add_value(PidProcs, Proc#proc.pid, Proc),
+            PickFun([Proc]);
+        ErrorOrWait ->
+            ErrorOrWait
+        end
+    end.
+
+new_process(Langs, LangLimits, Lang) ->
+    [{Lang, Lim, Current}] = ets:lookup(LangLimits, Lang),
+    if (Lim == 0) or (Current < Lim) -> % Lim == 0 means no limit
+        % we are below the limit for our language, make a new one
+        case ets:lookup(Langs, Lang) of
+        [{Lang, Mod, Func, Arg}] ->
+            {ok, Pid} = apply(Mod, Func, Arg),
+            erlang:monitor(process, Pid),
+            true = ets:insert(LangLimits, {Lang, Lim, Current+1}),
+            {ok, #proc{lang=Lang,
+                       pid=Pid,
+                       % Called via proc_prompt, proc_set_timeout, and proc_stop
+                       prompt_fun={Mod, prompt},
+                       set_timeout_fun={Mod, set_timeout},
+                       stop_fun={Mod, stop}}};
+        _ ->
+            {unknown_query_language, Lang}
+        end;
+    true ->
+        wait
+    end.
+
+proc_with_ddoc(DDoc, DDocKey, LangProcs) ->
+    DDocProcs = lists:filter(fun(#proc{ddoc_keys=Keys}) ->
+            lists:any(fun(Key) ->
+                Key == DDocKey
+            end, Keys)
+        end, LangProcs),
+    case DDocProcs of
+        [DDocProc|_] ->
+            ?LOG_DEBUG("DDocProc found for DDocKey: ~p",[DDocKey]),
+            {ok, DDocProc};
+        [] ->
+            [TeachProc|_] = LangProcs,
+            ?LOG_DEBUG("Teach ddoc to new proc ~p with DDocKey: ~p",[TeachProc, DDocKey]),
+            {ok, SmartProc} = teach_ddoc(DDoc, DDocKey, TeachProc),
+            {ok, SmartProc}
+    end.
+
+proc_prompt(Proc, Args) ->
+     case proc_prompt_raw(Proc, Args) of
+     {json, Json} ->
+         ?JSON_DECODE(Json);
+     EJson ->
+         EJson
+     end.
+
+proc_prompt_raw(#proc{prompt_fun = {Mod, Func}} = Proc, Args) ->
+    apply(Mod, Func, [Proc#proc.pid, Args]).
+
+raw_to_ejson({json, Json}) ->
+    ?JSON_DECODE(Json);
+raw_to_ejson(EJson) ->
+    EJson.
+
+proc_stop(Proc) ->
+    {Mod, Func} = Proc#proc.stop_fun,
+    apply(Mod, Func, [Proc#proc.pid]).
+
+proc_set_timeout(Proc, Timeout) ->
+    {Mod, Func} = Proc#proc.set_timeout_fun,
+    apply(Mod, Func, [Proc#proc.pid, Timeout]).
+
+teach_ddoc(DDoc, {DDocId, _Rev}=DDocKey, #proc{ddoc_keys=Keys}=Proc) ->
+    % send ddoc over the wire
+    % we only share the rev with the client we know to update code
+    % but it only keeps the latest copy, per each ddoc, around.
+    true = proc_prompt(Proc, [<<"ddoc">>, <<"new">>, DDocId, couch_doc:to_json_obj(DDoc, [])]),
+    % we should remove any other ddocs keys for this docid
+    % because the query server overwrites without the rev
+    Keys2 = [{D,R} || {D,R} <- Keys, D /= DDocId],
+    % add ddoc to the proc
+    {ok, Proc#proc{ddoc_keys=[DDocKey|Keys2]}}.
+
+get_ddoc_process(#doc{} = DDoc, DDocKey) ->
+    % remove this case statement
+    case gen_server:call(couch_query_servers, {get_proc, DDoc, DDocKey}, infinity) of
+    {ok, Proc, {QueryConfig}} ->
+        % process knows the ddoc
+        case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
+        true ->
+            proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
+            link(Proc#proc.pid),
+            gen_server:call(couch_query_servers, {unlink_proc, Proc#proc.pid}, infinity),
+            Proc;
+        _ ->
+            catch proc_stop(Proc),
+            get_ddoc_process(DDoc, DDocKey)
+        end;
+    Error ->
+        throw(Error)
+    end.
+
+get_os_process(Lang) ->
+    case gen_server:call(couch_query_servers, {get_proc, Lang}, infinity) of
+    {ok, Proc, {QueryConfig}} ->
+        case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
+        true ->
+            proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
+            link(Proc#proc.pid),
+            gen_server:call(couch_query_servers, {unlink_proc, Proc#proc.pid}, infinity),
+            Proc;
+        _ ->
+            catch proc_stop(Proc),
+            get_os_process(Lang)
+        end;
+    Error ->
+        throw(Error)
+    end.
+
+ret_os_process(Proc) ->
+    true = gen_server:call(couch_query_servers, {ret_proc, Proc}, infinity),
+    catch unlink(Proc#proc.pid),
+    ok.
+
+add_value(Tid, Key, Value) ->
+    true = ets:insert(Tid, {Key, Value}).
+
+rem_value(Tid, Key) ->
+    true = ets:delete(Tid, Key).
+
+add_to_list(Tid, Key, Value) ->
+    case ets:lookup(Tid, Key) of
+    [{Key, Vals}] ->
+        true = ets:insert(Tid, {Key, [Value|Vals]});
+    [] ->
+        true = ets:insert(Tid, {Key, [Value]})
+    end.
+
+rem_from_list(Tid, Key, Value) when is_record(Value, proc)->
+    Pid = Value#proc.pid,
+    case ets:lookup(Tid, Key) of
+    [{Key, Vals}] ->
+        % make a new values list that doesn't include the Value arg
+        NewValues = [Val || #proc{pid=P}=Val <- Vals, P /= Pid],
+        ets:insert(Tid, {Key, NewValues});
+    [] -> ok
+    end;
+rem_from_list(Tid, Key, Value) ->
+    case ets:lookup(Tid, Key) of
+    [{Key, Vals}] ->
+        % make a new values list that doesn't include the Value arg
+        NewValues = [Val || Val <- Vals, Val /= Value],
+        ets:insert(Tid, {Key, NewValues});
+    [] -> ok
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_ref_counter.erl
----------------------------------------------------------------------
diff --git a/src/couch_ref_counter.erl b/src/couch_ref_counter.erl
new file mode 100644
index 0000000..a774f46
--- /dev/null
+++ b/src/couch_ref_counter.erl
@@ -0,0 +1,111 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_ref_counter).
+-behaviour(gen_server).
+
+-export([start/1, init/1, terminate/2, handle_call/3, handle_cast/2, code_change/3, handle_info/2]).
+-export([drop/1,drop/2,add/1,add/2,count/1]).
+
+start(ChildProcs) ->
+    gen_server:start(couch_ref_counter, {self(), ChildProcs}, []).
+
+
+drop(RefCounterPid) ->
+    drop(RefCounterPid, self()).
+
+drop(RefCounterPid, Pid) ->
+    gen_server:call(RefCounterPid, {drop, Pid}, infinity).
+
+
+add(RefCounterPid) ->
+    add(RefCounterPid, self()).
+
+add(RefCounterPid, Pid) ->
+    gen_server:call(RefCounterPid, {add, Pid}, infinity).
+
+count(RefCounterPid) ->
+    gen_server:call(RefCounterPid, count).
+
+% server functions
+
+-record(srv,
+    {
+    referrers=dict:new(), % a dict of each ref counting proc.
+    child_procs=[]
+    }).
+
+init({Pid, ChildProcs}) ->
+    [link(ChildProc) || ChildProc <- ChildProcs],
+    Referrers = dict:from_list([{Pid, {erlang:monitor(process, Pid), 1}}]),
+    {ok, #srv{referrers=Referrers, child_procs=ChildProcs}}.
+
+
+terminate(_Reason, #srv{child_procs=ChildProcs}) ->
+    [couch_util:shutdown_sync(Pid) || Pid <- ChildProcs],
+    ok.
+
+
+handle_call({add, Pid},_From, #srv{referrers=Referrers}=Srv) ->
+    Referrers2 =
+    case dict:find(Pid, Referrers) of
+    error ->
+        dict:store(Pid, {erlang:monitor(process, Pid), 1}, Referrers);
+    {ok, {MonRef, RefCnt}} ->
+        dict:store(Pid, {MonRef, RefCnt + 1}, Referrers)
+    end,
+    {reply, ok, Srv#srv{referrers=Referrers2}};
+handle_call(count, _From, Srv) ->
+    {monitors, Monitors} =  process_info(self(), monitors),
+    {reply, length(Monitors), Srv};
+handle_call({drop, Pid}, _From, #srv{referrers=Referrers}=Srv) ->
+    Referrers2 =
+    case dict:find(Pid, Referrers) of
+    {ok, {MonRef, 1}} ->
+        erlang:demonitor(MonRef, [flush]),
+        dict:erase(Pid, Referrers);
+    {ok, {MonRef, Num}} ->
+        dict:store(Pid, {MonRef, Num-1}, Referrers);
+    error ->
+        Referrers
+    end,
+    Srv2 = Srv#srv{referrers=Referrers2},
+    case should_close() of
+    true ->
+        {stop,normal,ok,Srv2};
+    false ->
+        {reply, ok, Srv2}
+    end.
+
+handle_cast(Msg, _Srv)->
+    exit({unknown_msg,Msg}).
+
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+handle_info({'DOWN', MonRef, _, Pid, _}, #srv{referrers=Referrers}=Srv) ->
+    {ok, {MonRef, _RefCount}} = dict:find(Pid, Referrers),
+    Srv2 = Srv#srv{referrers=dict:erase(Pid, Referrers)},
+    case should_close() of
+    true ->
+        {stop,normal,Srv2};
+    false ->
+        {noreply,Srv2}
+    end.
+
+
+should_close() ->
+    case process_info(self(), monitors) of
+    {monitors, []} ->   true;
+    _ ->                false
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_secondary_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch_secondary_sup.erl b/src/couch_secondary_sup.erl
new file mode 100644
index 0000000..6dd5604
--- /dev/null
+++ b/src/couch_secondary_sup.erl
@@ -0,0 +1,49 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_secondary_sup).
+-behaviour(supervisor).
+-export([init/1, start_link/0]).
+
+start_link() ->
+    supervisor:start_link({local,couch_secondary_services}, ?MODULE, []).
+
+init([]) ->
+    SecondarySupervisors = [
+        {couch_db_update_notifier_sup,
+            {couch_db_update_notifier_sup, start_link, []},
+            permanent,
+            infinity,
+            supervisor,
+            [couch_db_update_notifier_sup]},
+
+        {couch_plugin_event,
+            {gen_event, start_link, [{local, couch_plugin}]},
+            permanent,
+            brutal_kill,
+            worker,
+            dynamic}
+    ],
+    Children = SecondarySupervisors ++ [
+        begin
+            {ok, {Module, Fun, Args}} = couch_util:parse_term(SpecStr),
+
+            {list_to_atom(Name),
+                {Module, Fun, Args},
+                permanent,
+                brutal_kill,
+                worker,
+                [Module]}
+        end
+        || {Name, SpecStr}
+        <- couch_config:get("daemons"), SpecStr /= ""],
+    {ok, {{one_for_one, 10, 3600}, Children}}.


[19/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
fix version number

display the release number when available instead of the couch
application version.


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/70ce4007
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/70ce4007
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/70ce4007

Branch: refs/heads/import-rcouch
Commit: 70ce4007b7f39f1f820eeb3cf3f932cd9ef2cb3f
Parents: 661d430
Author: benoitc <be...@apache.org>
Authored: Tue Jan 7 17:27:28 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:20 2014 -0600

----------------------------------------------------------------------
 c_src/couch_js/help.h    |  2 +-
 src/couch.app.src        | 23 ---------------
 src/couch.app.src.script | 67 +++++++++++++++++++++++++++++++++++++++++++
 src/couch.erl            |  9 ++++++
 src/couch_server.erl     | 10 ++-----
 5 files changed, 79 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/70ce4007/c_src/couch_js/help.h
----------------------------------------------------------------------
diff --git a/c_src/couch_js/help.h b/c_src/couch_js/help.h
index 81bae4d..f4ddb24 100644
--- a/c_src/couch_js/help.h
+++ b/c_src/couch_js/help.h
@@ -73,7 +73,7 @@ static const char USAGE_TEMPLATE[] =
             USAGE_TEMPLATE,                     \
             basename,                           \
             basename,                           \
-            VENDOR_NAME,                       \
+            PACKAGE_NAME,                       \
             basename,                           \
             PACKAGE_BUGREPORT)
 

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/70ce4007/src/couch.app.src
----------------------------------------------------------------------
diff --git a/src/couch.app.src b/src/couch.app.src
index 53cee43..e69de29 100644
--- a/src/couch.app.src
+++ b/src/couch.app.src
@@ -1,23 +0,0 @@
-{application, couch, [
-    {description, "Apache CouchDB"},
-    {vsn, "1.6.1"},
-    {registered, [
-        couch_config,
-        couch_db_update,
-        couch_db_update_notifier_sup,
-        couch_external_manager,
-        couch_httpd,
-        couch_log,
-        couch_primary_services,
-        couch_query_servers,
-        couch_secondary_services,
-        couch_server,
-        couch_server_sup,
-        couch_stats_aggregator,
-        couch_stats_collector,
-        couch_task_status
-    ]},
-    {mod, {couch_app, []}},
-    {applications, [kernel, stdlib, crypto, sasl, public_key, ssl,
-                    inets, oauth, ibrowse, mochiweb, os_mon]}
-]}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/70ce4007/src/couch.app.src.script
----------------------------------------------------------------------
diff --git a/src/couch.app.src.script b/src/couch.app.src.script
new file mode 100644
index 0000000..45b5333
--- /dev/null
+++ b/src/couch.app.src.script
@@ -0,0 +1,67 @@
+%% -*- tab-width: 4;erlang-indent-level: 4;indent-tabs-mode: nil -*-
+%% ex: ft=erlang ts=4 sw=4 et
+
+%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+%% use this file except in compliance with the License. You may obtain a copy of
+%% the License at
+%%
+%%   http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+%% License for the specific language governing permissions and limitations under
+%% the License.
+
+
+Cfg = case file:consult("../../pkg.vars.config") of
+          {ok, Terms} ->
+              Terms;
+          _Err ->
+              []
+      end,
+
+%% get version infos
+MajorVersion = integer_to_list(proplists:get_value(version_major, Cfg, 0)),
+MinorVersion = integer_to_list(proplists:get_value(version_minor, Cfg, 0)),
+RevVersion = integer_to_list(proplists:get_value(version_revision, Cfg, 0)),
+StageVersion = proplists:get_value(version_stage, Cfg, ""),
+RelVersion = proplists:get_value(version_release, Cfg, ""),
+
+%% build the version
+BaseVersion = MajorVersion ++ "." ++ MinorVersion ++ "." ++ RevVersion,
+SecondaryVersion = StageVersion ++ RelVersion,
+RelVsn = case os:getenv("RELEASE") of
+    "1" ->
+        BaseVersion;
+    _ ->
+        BaseVersion ++ SecondaryVersion
+end,
+
+
+[
+    {application, couch, [
+        {description, "Apache CouchDB"},
+        {vsn, "1.6.1"},
+        {registered, [
+            couch_config,
+            couch_db_update,
+            couch_db_update_notifier_sup,
+            couch_external_manager,
+            couch_httpd,
+            couch_log,
+            couch_primary_services,
+            couch_query_servers,
+            couch_secondary_services,
+            couch_server,
+            couch_server_sup,
+            couch_stats_aggregator,
+            couch_stats_collector,
+            couch_task_status
+        ]},
+        {mod, {couch_app, []}},
+        {env, [{couch_rel, RelVsn}]},
+        {applications, [kernel, stdlib, crypto, sasl, public_key, ssl,
+                        inets, oauth, ibrowse, mochiweb, os_mon]}
+    ]}
+].

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/70ce4007/src/couch.erl
----------------------------------------------------------------------
diff --git a/src/couch.erl b/src/couch.erl
index 80e3261..92c2b74 100644
--- a/src/couch.erl
+++ b/src/couch.erl
@@ -14,6 +14,7 @@
 
 -export([get_app_env/2,
          version/0,
+         release_version/0,
          start/0,
          stop/0,
          restart/0,
@@ -33,6 +34,14 @@ version() ->
             "0.0.0"
     end.
 
+release_version() ->
+    case application:get_env(couch, couch_rel) of
+        {ok, Vsn} ->
+            Vsn;
+        _ ->
+            "0.0.0"
+    end.
+
 start() ->
     application:start(couch).
 

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/70ce4007/src/couch_server.erl
----------------------------------------------------------------------
diff --git a/src/couch_server.erl b/src/couch_server.erl
index 7cee0f5..4b80dfb 100644
--- a/src/couch_server.erl
+++ b/src/couch_server.erl
@@ -35,13 +35,7 @@ dev_start() ->
     couch:start().
 
 get_version() ->
-    Apps = application:loaded_applications(),
-    case lists:keysearch(couch, 1, Apps) of
-    {value, {_, _, Vsn}} ->
-        Vsn;
-    false ->
-        "0.0.0"
-    end.
+    couch:release_version().
 get_version(short) ->
   %% strip git hash from version string
   [Version|_Rest] = string:tokens(get_version(), "+"),
@@ -428,7 +422,7 @@ handle_cast(Msg, _Server) ->
 
 code_change(_OldVsn, State, _Extra) ->
     {ok, State}.
-    
+
 handle_info({'EXIT', _Pid, config_change}, Server) ->
     {noreply, shutdown, Server};
 handle_info({'EXIT', Pid, Reason}, Server) ->


[30/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
make the couch_replicator a full Erlang application


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/0d3662ad
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/0d3662ad
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/0d3662ad

Branch: refs/heads/import-rcouch
Commit: 0d3662ad0b35545cba62588abb8caf53c556cc0b
Parents: 8c6a64d
Author: benoitc <be...@apache.org>
Authored: Tue Jan 7 00:15:26 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:20 2014 -0600

----------------------------------------------------------------------
 src/couch_primary_sup.erl | 12 ------------
 1 file changed, 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/0d3662ad/src/couch_primary_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch_primary_sup.erl b/src/couch_primary_sup.erl
index 150b92e..3bb5875 100644
--- a/src/couch_primary_sup.erl
+++ b/src/couch_primary_sup.erl
@@ -43,18 +43,6 @@ init([]) ->
             brutal_kill,
             worker,
             dynamic},
-        {couch_replication_event,
-            {gen_event, start_link, [{local, couch_replication}]},
-            permanent,
-            brutal_kill,
-            worker,
-            dynamic},
-        {couch_replicator_job_sup,
-            {couch_replicator_job_sup, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_replicator_job_sup]},
         {couch_log,
             {couch_log, start_link, []},
             permanent,


[27/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
move sources to src


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/e9a8fe83
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/e9a8fe83
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/e9a8fe83

Branch: refs/heads/import-rcouch
Commit: e9a8fe839742a69fc0f9c8de930fc1fd8ba1b95a
Parents: 5ead928
Author: benoitc <be...@apache.org>
Authored: Wed Jan 8 03:16:05 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:20 2014 -0600

----------------------------------------------------------------------
 rebar.config.script      | 2 +-
 src/couch.app.src.script | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/e9a8fe83/rebar.config.script
----------------------------------------------------------------------
diff --git a/rebar.config.script b/rebar.config.script
index ca79b39..2b187b6 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -14,7 +14,7 @@
 %% the License.
 
 
-Cfg = case file:consult("../../pkg.vars.config") of
+Cfg = case file:consult("../../../pkg.vars.config") of
           {ok, Terms} ->
               Terms;
           _Err ->

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/e9a8fe83/src/couch.app.src.script
----------------------------------------------------------------------
diff --git a/src/couch.app.src.script b/src/couch.app.src.script
index 45b5333..599efee 100644
--- a/src/couch.app.src.script
+++ b/src/couch.app.src.script
@@ -14,7 +14,7 @@
 %% the License.
 
 
-Cfg = case file:consult("../../pkg.vars.config") of
+Cfg = case file:consult("../../../pkg.vars.config") of
           {ok, Terms} ->
               Terms;
           _Err ->


[12/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_httpd_oauth.erl
----------------------------------------------------------------------
diff --git a/couch_httpd_oauth.erl b/couch_httpd_oauth.erl
deleted file mode 100644
index 2094c08..0000000
--- a/couch_httpd_oauth.erl
+++ /dev/null
@@ -1,387 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License.  You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_oauth).
-
--include("couch_db.hrl").
--include("couch_js_functions.hrl").
-
--export([oauth_authentication_handler/1, handle_oauth_req/1]).
-
--define(OAUTH_DDOC_ID, <<"_design/oauth">>).
--define(OAUTH_VIEW_NAME, <<"oauth_credentials">>).
-
--record(callback_params, {
-    consumer,
-    token,
-    token_secret,
-    url,
-    signature,
-    params,
-    username
-}).
-
-% OAuth auth handler using per-node user db
-oauth_authentication_handler(Req) ->
-    serve_oauth(Req, fun oauth_auth_callback/2, true).
-
-
-oauth_auth_callback(Req, #callback_params{token_secret = undefined}) ->
-    couch_httpd:send_error(
-         Req, 400, <<"invalid_token">>, <<"Invalid OAuth token.">>);
-
-oauth_auth_callback(#httpd{mochi_req = MochiReq} = Req, CbParams) ->
-    Method = atom_to_list(MochiReq:get(method)),
-    #callback_params{
-        consumer = Consumer,
-        token_secret = TokenSecret,
-        url = Url,
-        signature = Sig,
-        params = Params,
-        username = User
-    } = CbParams,
-    case oauth:verify(Sig, Method, Url, Params, Consumer, TokenSecret) of
-    true ->
-        set_user_ctx(Req, User);
-    false ->
-        ?LOG_DEBUG("OAuth handler: signature verification failed for user `~p`~n"
-            "Received signature is `~p`~n"
-            "HTTP method is `~p`~n"
-            "URL is `~p`~n"
-            "Parameters are `~p`~n"
-            "Consumer is `~p`, token secret is `~p`~n"
-            "Expected signature was `~p`~n",
-            [User, Sig, Method, Url, Params, Consumer, TokenSecret,
-                oauth:signature(Method, Url, Params, Consumer, TokenSecret)]),
-        Req
-    end.
-
-
-% Look up the consumer key and get the roles to give the consumer
-set_user_ctx(_Req, undefined) ->
-    throw({bad_request, unknown_oauth_token});
-set_user_ctx(Req, Name) ->
-    case couch_auth_cache:get_user_creds(Name) of
-        nil ->
-            ?LOG_DEBUG("OAuth handler: user `~p` credentials not found", [Name]),
-            Req;
-        User ->
-            Roles = couch_util:get_value(<<"roles">>, User, []),
-            Req#httpd{user_ctx=#user_ctx{name=Name, roles=Roles}}
-    end.
-
-% OAuth request_token
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"request_token">>], method=Method}=Req1) ->
-    serve_oauth(Req1, fun(Req, CbParams) ->
-        #callback_params{
-            consumer = Consumer,
-            token_secret = TokenSecret,
-            url = Url,
-            signature = Sig,
-            params = Params
-        } = CbParams,
-        case oauth:verify(
-            Sig, atom_to_list(Method), Url, Params, Consumer, TokenSecret) of
-        true ->
-            ok(Req, <<"oauth_token=requestkey&oauth_token_secret=requestsecret">>);
-        false ->
-            invalid_signature(Req)
-        end
-    end, false);
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"authorize">>]}=Req) ->
-    {ok, serve_oauth_authorize(Req)};
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"access_token">>], method='GET'}=Req1) ->
-    serve_oauth(Req1, fun(Req, CbParams) ->
-        #callback_params{
-            consumer = Consumer,
-            token = Token,
-            url = Url,
-            signature = Sig,
-            params = Params
-        } = CbParams,
-        case Token of
-        "requestkey" ->
-            case oauth:verify(
-                Sig, "GET", Url, Params, Consumer, "requestsecret") of
-            true ->
-                ok(Req,
-                    <<"oauth_token=accesskey&oauth_token_secret=accesssecret">>);
-            false ->
-                invalid_signature(Req)
-            end;
-        _ ->
-            couch_httpd:send_error(
-                Req, 400, <<"invalid_token">>, <<"Invalid OAuth token.">>)
-        end
-    end, false);
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"access_token">>]}=Req) ->
-    couch_httpd:send_method_not_allowed(Req, "GET").
-
-invalid_signature(Req) ->
-    couch_httpd:send_error(Req, 400, <<"invalid_signature">>, <<"Invalid signature value.">>).
-
-% This needs to be protected i.e. force user to login using HTTP Basic Auth or form-based login.
-serve_oauth_authorize(#httpd{method=Method}=Req1) ->
-    case Method of
-        'GET' ->
-            % Confirm with the User that they want to authenticate the Consumer
-            serve_oauth(Req1, fun(Req, CbParams) ->
-                #callback_params{
-                    consumer = Consumer,
-                    token_secret = TokenSecret,
-                    url = Url,
-                    signature = Sig,
-                    params = Params
-                } = CbParams,
-                case oauth:verify(
-                    Sig, "GET", Url, Params, Consumer, TokenSecret) of
-                true ->
-                    ok(Req, <<"oauth_token=requestkey&",
-                        "oauth_token_secret=requestsecret">>);
-                false ->
-                    invalid_signature(Req)
-                end
-            end, false);
-        'POST' ->
-            % If the User has confirmed, we direct the User back to the Consumer with a verification code
-            serve_oauth(Req1, fun(Req, CbParams) ->
-                #callback_params{
-                    consumer = Consumer,
-                    token_secret = TokenSecret,
-                    url = Url,
-                    signature = Sig,
-                    params = Params
-                } = CbParams,
-                case oauth:verify(
-                    Sig, "POST", Url, Params, Consumer, TokenSecret) of
-                true ->
-                    %redirect(oauth_callback, oauth_token, oauth_verifier),
-                    ok(Req, <<"oauth_token=requestkey&",
-                        "oauth_token_secret=requestsecret">>);
-                false ->
-                    invalid_signature(Req)
-                end
-            end, false);
-        _ ->
-            couch_httpd:send_method_not_allowed(Req1, "GET,POST")
-    end.
-
-serve_oauth(#httpd{mochi_req=MochiReq}=Req, Fun, FailSilently) ->
-    % 1. In the HTTP Authorization header as defined in OAuth HTTP Authorization Scheme.
-    % 2. As the HTTP POST request body with a content-type of application/x-www-form-urlencoded.
-    % 3. Added to the URLs in the query part (as defined by [RFC3986] section 3).
-    AuthHeader = case MochiReq:get_header_value("authorization") of
-        undefined ->
-            "";
-        Else ->
-            [Head | Tail] = re:split(Else, "\\s", [{parts, 2}, {return, list}]),
-            case [string:to_lower(Head) | Tail] of
-                ["oauth", Rest] -> Rest;
-                _ -> ""
-            end
-    end,
-    HeaderParams = oauth:header_params_decode(AuthHeader),
-    %Realm = couch_util:get_value("realm", HeaderParams),
-
-    % get requested path
-    RequestedPath = case MochiReq:get_header_value("x-couchdb-requested-path") of
-        undefined ->
-            case MochiReq:get_header_value("x-couchdb-vhost-path") of
-                undefined ->
-                    MochiReq:get(raw_path);
-                VHostPath ->
-                    VHostPath
-            end;
-        RequestedPath0 ->
-           RequestedPath0
-    end,
-    {_, QueryString, _} = mochiweb_util:urlsplit_path(RequestedPath),
-
-    Params = proplists:delete("realm", HeaderParams) ++ mochiweb_util:parse_qs(QueryString),
-
-    ?LOG_DEBUG("OAuth Params: ~p", [Params]),
-    case couch_util:get_value("oauth_version", Params, "1.0") of
-        "1.0" ->
-            case couch_util:get_value("oauth_consumer_key", Params, undefined) of
-                undefined ->
-                    case FailSilently of
-                        true -> Req;
-                        false -> couch_httpd:send_error(Req, 400, <<"invalid_consumer">>, <<"Invalid consumer.">>)
-                    end;
-                ConsumerKey ->
-                    Url = couch_httpd:absolute_uri(Req, RequestedPath),
-                    case get_callback_params(ConsumerKey, Params, Url) of
-                        {ok, CallbackParams} ->
-                            Fun(Req, CallbackParams);
-                        invalid_consumer_token_pair ->
-                            couch_httpd:send_error(
-                                Req, 400,
-                                <<"invalid_consumer_token_pair">>,
-                                <<"Invalid consumer and token pair.">>);
-                        {error, {Error, Reason}} ->
-                            couch_httpd:send_error(Req, 400, Error, Reason)
-                    end
-            end;
-        _ ->
-            couch_httpd:send_error(Req, 400, <<"invalid_oauth_version">>, <<"Invalid OAuth version.">>)
-    end.
-
-
-get_callback_params(ConsumerKey, Params, Url) ->
-    Token = couch_util:get_value("oauth_token", Params),
-    SigMethod = sig_method(Params),
-    CbParams0 = #callback_params{
-        token = Token,
-        signature = couch_util:get_value("oauth_signature", Params),
-        params = proplists:delete("oauth_signature", Params),
-        url = Url
-    },
-    case oauth_credentials_info(Token, ConsumerKey) of
-    nil ->
-        invalid_consumer_token_pair;
-    {error, _} = Err ->
-        Err;
-    {OauthCreds} ->
-        User = couch_util:get_value(<<"username">>, OauthCreds, []),
-        ConsumerSecret = ?b2l(couch_util:get_value(
-            <<"consumer_secret">>, OauthCreds, <<>>)),
-        TokenSecret = ?b2l(couch_util:get_value(
-            <<"token_secret">>, OauthCreds, <<>>)),
-        case (User =:= []) orelse (ConsumerSecret =:= []) orelse
-            (TokenSecret =:= []) of
-        true ->
-            invalid_consumer_token_pair;
-        false ->
-            CbParams = CbParams0#callback_params{
-                consumer = {ConsumerKey, ConsumerSecret, SigMethod},
-                token_secret = TokenSecret,
-                username = User
-            },
-            ?LOG_DEBUG("Got OAuth credentials, for ConsumerKey `~p` and "
-                "Token `~p`, from the views, User: `~p`, "
-                "ConsumerSecret: `~p`, TokenSecret: `~p`",
-                [ConsumerKey, Token, User, ConsumerSecret, TokenSecret]),
-            {ok, CbParams}
-        end
-    end.
-
-
-sig_method(Params) ->
-    sig_method_1(couch_util:get_value("oauth_signature_method", Params)).
-sig_method_1("PLAINTEXT") ->
-    plaintext;
-% sig_method_1("RSA-SHA1") ->
-%    rsa_sha1;
-sig_method_1("HMAC-SHA1") ->
-    hmac_sha1;
-sig_method_1(_) ->
-    undefined.
-
-
-ok(#httpd{mochi_req=MochiReq}, Body) ->
-    {ok, MochiReq:respond({200, [], Body})}.
-
-
-oauth_credentials_info(Token, ConsumerKey) ->
-    case use_auth_db() of
-    {ok, Db} ->
-        Result = case query_oauth_view(Db, [?l2b(ConsumerKey), ?l2b(Token)]) of
-        [] ->
-            nil;
-        [Creds] ->
-            Creds;
-        [_ | _] ->
-            Reason = iolist_to_binary(
-                io_lib:format("Found multiple OAuth credentials for the pair "
-                    " (consumer_key: `~p`, token: `~p`)", [ConsumerKey, Token])),
-            {error, {<<"oauth_token_consumer_key_pair">>, Reason}}
-        end,
-        couch_db:close(Db),
-        Result;
-    nil ->
-        {
-            case couch_config:get("oauth_consumer_secrets", ConsumerKey) of
-            undefined -> [];
-            ConsumerSecret -> [{<<"consumer_secret">>, ?l2b(ConsumerSecret)}]
-            end
-            ++
-            case couch_config:get("oauth_token_secrets", Token) of
-            undefined -> [];
-            TokenSecret -> [{<<"token_secret">>, ?l2b(TokenSecret)}]
-            end
-            ++
-            case couch_config:get("oauth_token_users", Token) of
-            undefined -> [];
-            User -> [{<<"username">>, ?l2b(User)}]
-            end
-        }
-    end.
-
-
-use_auth_db() ->
-    case couch_config:get("couch_httpd_oauth", "use_users_db", "false") of
-    "false" ->
-        nil;
-    "true" ->
-        AuthDb = open_auth_db(),
-        {ok, _AuthDb2} = ensure_oauth_views_exist(AuthDb)
-    end.
-
-
-open_auth_db() ->
-    DbName = ?l2b(couch_config:get("couch_httpd_auth", "authentication_db")),
-    DbOptions = [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}],
-    {ok, AuthDb} = couch_db:open_int(DbName, DbOptions),
-    AuthDb.
-
-
-ensure_oauth_views_exist(AuthDb) ->
-    case couch_db:open_doc(AuthDb, ?OAUTH_DDOC_ID, []) of
-    {ok, _DDoc} ->
-        {ok, AuthDb};
-    _ ->
-        {ok, DDoc} = get_oauth_ddoc(),
-        {ok, _Rev} = couch_db:update_doc(AuthDb, DDoc, []),
-        {ok, _AuthDb2} = couch_db:reopen(AuthDb)
-    end.
-
-
-get_oauth_ddoc() ->
-    Json = {[
-        {<<"_id">>, ?OAUTH_DDOC_ID},
-        {<<"language">>, <<"javascript">>},
-        {<<"views">>,
-            {[
-                {?OAUTH_VIEW_NAME,
-                    {[
-                        {<<"map">>, ?OAUTH_MAP_FUN}
-                    ]}
-                }
-            ]}
-        }
-    ]},
-    {ok, couch_doc:from_json_obj(Json)}.
-
-
-query_oauth_view(Db, Key) ->
-    ViewOptions = [
-        {start_key, Key},
-        {end_key, Key}
-    ],
-    Callback = fun({row, Row}, Acc) ->
-            {ok, [couch_util:get_value(value, Row) | Acc]};
-        (_, Acc) ->
-            {ok, Acc}
-    end,
-    {ok, Result} = couch_mrview:query_view(
-        Db, ?OAUTH_DDOC_ID, ?OAUTH_VIEW_NAME, ViewOptions, Callback, []),
-    Result.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_httpd_proxy.erl
----------------------------------------------------------------------
diff --git a/couch_httpd_proxy.erl b/couch_httpd_proxy.erl
deleted file mode 100644
index dec3f55..0000000
--- a/couch_httpd_proxy.erl
+++ /dev/null
@@ -1,426 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_httpd_proxy).
-
--export([handle_proxy_req/2]).
-
--include("couch_db.hrl").
--include("../ibrowse/ibrowse.hrl").
-
--define(TIMEOUT, infinity).
--define(PKT_SIZE, 4096).
-
-
-handle_proxy_req(Req, ProxyDest) ->
-    Method = get_method(Req),
-    Url = get_url(Req, ProxyDest),
-    Version = get_version(Req),
-    Headers = get_headers(Req),
-    Body = get_body(Req),
-    Options = [
-        {http_vsn, Version},
-        {headers_as_is, true},
-        {response_format, binary},
-        {stream_to, {self(), once}}
-    ],
-    case ibrowse:send_req(Url, Headers, Method, Body, Options, ?TIMEOUT) of
-        {ibrowse_req_id, ReqId} ->
-            stream_response(Req, ProxyDest, ReqId);
-        {error, Reason} ->
-            throw({error, Reason})
-    end.
-    
-
-get_method(#httpd{mochi_req=MochiReq}) ->
-    case MochiReq:get(method) of
-        Method when is_atom(Method) ->
-            list_to_atom(string:to_lower(atom_to_list(Method)));
-        Method when is_list(Method) ->
-            list_to_atom(string:to_lower(Method));
-        Method when is_binary(Method) ->
-            list_to_atom(string:to_lower(?b2l(Method)))
-    end.
-
-
-get_url(Req, ProxyDest) when is_binary(ProxyDest) ->
-    get_url(Req, ?b2l(ProxyDest));
-get_url(#httpd{mochi_req=MochiReq}=Req, ProxyDest) ->
-    BaseUrl = case mochiweb_util:partition(ProxyDest, "/") of
-        {[], "/", _} -> couch_httpd:absolute_uri(Req, ProxyDest);
-        _ -> ProxyDest
-    end,
-    ProxyPrefix = "/" ++ ?b2l(hd(Req#httpd.path_parts)),
-    RequestedPath = MochiReq:get(raw_path),
-    case mochiweb_util:partition(RequestedPath, ProxyPrefix) of
-        {[], ProxyPrefix, []} ->
-            BaseUrl;
-        {[], ProxyPrefix, [$/ | DestPath]} ->
-            remove_trailing_slash(BaseUrl) ++ "/" ++ DestPath;
-        {[], ProxyPrefix, DestPath} ->
-            remove_trailing_slash(BaseUrl) ++ "/" ++ DestPath;
-        _Else ->
-            throw({invalid_url_path, {ProxyPrefix, RequestedPath}})
-    end.
-
-get_version(#httpd{mochi_req=MochiReq}) ->
-    MochiReq:get(version).
-
-
-get_headers(#httpd{mochi_req=MochiReq}) ->
-    to_ibrowse_headers(mochiweb_headers:to_list(MochiReq:get(headers)), []).
-
-to_ibrowse_headers([], Acc) ->
-    lists:reverse(Acc);
-to_ibrowse_headers([{K, V} | Rest], Acc) when is_atom(K) ->
-    to_ibrowse_headers([{atom_to_list(K), V} | Rest], Acc);
-to_ibrowse_headers([{K, V} | Rest], Acc) when is_list(K) ->
-    case string:to_lower(K) of
-        "content-length" ->
-            to_ibrowse_headers(Rest, [{content_length, V} | Acc]);
-        % This appears to make ibrowse too smart.
-        %"transfer-encoding" ->
-        %    to_ibrowse_headers(Rest, [{transfer_encoding, V} | Acc]);
-        _ ->
-            to_ibrowse_headers(Rest, [{K, V} | Acc])
-    end.
-
-get_body(#httpd{method='GET'}) ->
-    fun() -> eof end;
-get_body(#httpd{method='HEAD'}) ->
-    fun() -> eof end;
-get_body(#httpd{method='DELETE'}) ->
-    fun() -> eof end;
-get_body(#httpd{mochi_req=MochiReq}) ->
-    case MochiReq:get(body_length) of
-        undefined ->
-            <<>>;
-        {unknown_transfer_encoding, Unknown} ->
-            exit({unknown_transfer_encoding, Unknown});
-        chunked ->
-            {fun stream_chunked_body/1, {init, MochiReq, 0}};
-        0 ->
-            <<>>;
-        Length when is_integer(Length) andalso Length > 0 ->
-            {fun stream_length_body/1, {init, MochiReq, Length}};
-        Length ->
-            exit({invalid_body_length, Length})
-    end.
-
-
-remove_trailing_slash(Url) ->
-    rem_slash(lists:reverse(Url)).
-
-rem_slash([]) ->
-    [];
-rem_slash([$\s | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash([$\t | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash([$\r | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash([$\n | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash([$/ | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash(RevUrl) ->
-    lists:reverse(RevUrl).
-
-
-stream_chunked_body({init, MReq, 0}) ->
-    % First chunk, do expect-continue dance.
-    init_body_stream(MReq),
-    stream_chunked_body({stream, MReq, 0, [], ?PKT_SIZE});
-stream_chunked_body({stream, MReq, 0, Buf, BRem}) ->
-    % Finished a chunk, get next length. If next length
-    % is 0, its time to try and read trailers.
-    {CRem, Data} = read_chunk_length(MReq),
-    case CRem of
-        0 ->
-            BodyData = lists:reverse(Buf, Data),
-            {ok, BodyData, {trailers, MReq, [], ?PKT_SIZE}}; 
-        _ ->
-            stream_chunked_body(
-                {stream, MReq, CRem, [Data | Buf], BRem-size(Data)}
-            )
-    end;
-stream_chunked_body({stream, MReq, CRem, Buf, BRem}) when BRem =< 0 ->
-    % Time to empty our buffers to the upstream socket.
-    BodyData = lists:reverse(Buf),
-    {ok, BodyData, {stream, MReq, CRem, [], ?PKT_SIZE}};
-stream_chunked_body({stream, MReq, CRem, Buf, BRem}) ->
-    % Buffer some more data from the client.
-    Length = lists:min([CRem, BRem]),
-    Socket = MReq:get(socket),
-    NewState = case mochiweb_socket:recv(Socket, Length, ?TIMEOUT) of
-        {ok, Data} when size(Data) == CRem ->
-            case mochiweb_socket:recv(Socket, 2, ?TIMEOUT) of
-                {ok, <<"\r\n">>} ->
-                    {stream, MReq, 0, [<<"\r\n">>, Data | Buf], BRem-Length-2};
-                _ ->
-                    exit(normal)
-            end;
-        {ok, Data} ->
-            {stream, MReq, CRem-Length, [Data | Buf], BRem-Length};
-        _ ->
-            exit(normal)
-    end,
-    stream_chunked_body(NewState);
-stream_chunked_body({trailers, MReq, Buf, BRem}) when BRem =< 0 ->
-    % Empty our buffers and send data upstream.
-    BodyData = lists:reverse(Buf),
-    {ok, BodyData, {trailers, MReq, [], ?PKT_SIZE}};
-stream_chunked_body({trailers, MReq, Buf, BRem}) ->
-    % Read another trailer into the buffer or stop on an
-    % empty line.
-    Socket = MReq:get(socket),
-    mochiweb_socket:setopts(Socket, [{packet, line}]),
-    case mochiweb_socket:recv(Socket, 0, ?TIMEOUT) of
-        {ok, <<"\r\n">>} ->
-            mochiweb_socket:setopts(Socket, [{packet, raw}]),
-            BodyData = lists:reverse(Buf, <<"\r\n">>),
-            {ok, BodyData, eof};
-        {ok, Footer} ->
-            mochiweb_socket:setopts(Socket, [{packet, raw}]),
-            NewState = {trailers, MReq, [Footer | Buf], BRem-size(Footer)},
-            stream_chunked_body(NewState);
-        _ ->
-            exit(normal)
-    end;
-stream_chunked_body(eof) ->
-    % Tell ibrowse we're done sending data.
-    eof.
-
-
-stream_length_body({init, MochiReq, Length}) ->
-    % Do the expect-continue dance
-    init_body_stream(MochiReq),
-    stream_length_body({stream, MochiReq, Length});
-stream_length_body({stream, _MochiReq, 0}) ->
-    % Finished streaming.
-    eof;
-stream_length_body({stream, MochiReq, Length}) ->
-    BufLen = lists:min([Length, ?PKT_SIZE]),
-    case MochiReq:recv(BufLen) of
-        <<>> -> eof;
-        Bin -> {ok, Bin, {stream, MochiReq, Length-BufLen}}
-    end.
-
-
-init_body_stream(MochiReq) ->
-    Expect = case MochiReq:get_header_value("expect") of
-        undefined ->
-            undefined;
-        Value when is_list(Value) ->
-            string:to_lower(Value)
-    end,
-    case Expect of
-        "100-continue" ->
-            MochiReq:start_raw_response({100, gb_trees:empty()});
-        _Else ->
-            ok
-    end.
-
-
-read_chunk_length(MochiReq) ->
-    Socket = MochiReq:get(socket),
-    mochiweb_socket:setopts(Socket, [{packet, line}]),
-    case mochiweb_socket:recv(Socket, 0, ?TIMEOUT) of
-        {ok, Header} ->
-            mochiweb_socket:setopts(Socket, [{packet, raw}]),
-            Splitter = fun(C) ->
-                C =/= $\r andalso C =/= $\n andalso C =/= $\s
-            end,
-            {Hex, _Rest} = lists:splitwith(Splitter, ?b2l(Header)),
-            {mochihex:to_int(Hex), Header};
-        _ ->
-            exit(normal)
-    end.
-
-
-stream_response(Req, ProxyDest, ReqId) ->
-    receive
-        {ibrowse_async_headers, ReqId, "100", _} ->
-            % ibrowse doesn't handle 100 Continue responses which
-            % means we have to discard them so the proxy client
-            % doesn't get confused.
-            ibrowse:stream_next(ReqId),
-            stream_response(Req, ProxyDest, ReqId);
-        {ibrowse_async_headers, ReqId, Status, Headers} ->
-            {Source, Dest} = get_urls(Req, ProxyDest),
-            FixedHeaders = fix_headers(Source, Dest, Headers, []),
-            case body_length(FixedHeaders) of
-                chunked ->
-                    {ok, Resp} = couch_httpd:start_chunked_response(
-                        Req, list_to_integer(Status), FixedHeaders
-                    ),
-                    ibrowse:stream_next(ReqId),
-                    stream_chunked_response(Req, ReqId, Resp),
-                    {ok, Resp};
-                Length when is_integer(Length) ->
-                    {ok, Resp} = couch_httpd:start_response_length(
-                        Req, list_to_integer(Status), FixedHeaders, Length
-                    ),
-                    ibrowse:stream_next(ReqId),
-                    stream_length_response(Req, ReqId, Resp),
-                    {ok, Resp};
-                _ ->
-                    {ok, Resp} = couch_httpd:start_response(
-                        Req, list_to_integer(Status), FixedHeaders
-                    ),
-                    ibrowse:stream_next(ReqId),
-                    stream_length_response(Req, ReqId, Resp),
-                    % XXX: MochiWeb apparently doesn't look at the
-                    % response to see if it must force close the
-                    % connection. So we help it out here.
-                    erlang:put(mochiweb_request_force_close, true),
-                    {ok, Resp}
-            end
-    end.
-
-
-stream_chunked_response(Req, ReqId, Resp) ->
-    receive
-        {ibrowse_async_response, ReqId, {error, Reason}} ->
-            throw({error, Reason});
-        {ibrowse_async_response, ReqId, Chunk} ->
-            couch_httpd:send_chunk(Resp, Chunk),
-            ibrowse:stream_next(ReqId),
-            stream_chunked_response(Req, ReqId, Resp);
-        {ibrowse_async_response_end, ReqId} ->
-            couch_httpd:last_chunk(Resp)
-    end.
-
-
-stream_length_response(Req, ReqId, Resp) ->
-    receive
-        {ibrowse_async_response, ReqId, {error, Reason}} ->
-            throw({error, Reason});
-        {ibrowse_async_response, ReqId, Chunk} ->
-            couch_httpd:send(Resp, Chunk),
-            ibrowse:stream_next(ReqId),
-            stream_length_response(Req, ReqId, Resp);
-        {ibrowse_async_response_end, ReqId} ->
-            ok
-    end.
-
-
-get_urls(Req, ProxyDest) ->
-    SourceUrl = couch_httpd:absolute_uri(Req, "/" ++ hd(Req#httpd.path_parts)),
-    Source = parse_url(?b2l(iolist_to_binary(SourceUrl))),
-    case (catch parse_url(ProxyDest)) of
-        Dest when is_record(Dest, url) ->
-            {Source, Dest};
-        _ ->
-            DestUrl = couch_httpd:absolute_uri(Req, ProxyDest),
-            {Source, parse_url(DestUrl)}
-    end.
-
-
-fix_headers(_, _, [], Acc) ->
-    lists:reverse(Acc);
-fix_headers(Source, Dest, [{K, V} | Rest], Acc) ->
-    Fixed = case string:to_lower(K) of
-        "location" -> rewrite_location(Source, Dest, V);
-        "content-location" -> rewrite_location(Source, Dest, V);
-        "uri" -> rewrite_location(Source, Dest, V);
-        "destination" -> rewrite_location(Source, Dest, V);
-        "set-cookie" -> rewrite_cookie(Source, Dest, V);
-        _ -> V
-    end,
-    fix_headers(Source, Dest, Rest, [{K, Fixed} | Acc]).
-
-
-rewrite_location(Source, #url{host=Host, port=Port, protocol=Proto}, Url) ->
-    case (catch parse_url(Url)) of
-        #url{host=Host, port=Port, protocol=Proto} = Location ->
-            DestLoc = #url{
-                protocol=Source#url.protocol,
-                host=Source#url.host,
-                port=Source#url.port,
-                path=join_url_path(Source#url.path, Location#url.path)
-            },
-            url_to_url(DestLoc);
-        #url{} ->
-            Url;
-        _ ->
-            url_to_url(Source#url{path=join_url_path(Source#url.path, Url)})
-    end.
-
-
-rewrite_cookie(_Source, _Dest, Cookie) ->
-    Cookie.
-
-
-parse_url(Url) when is_binary(Url) ->
-    ibrowse_lib:parse_url(?b2l(Url));
-parse_url(Url) when is_list(Url) ->
-    ibrowse_lib:parse_url(?b2l(iolist_to_binary(Url))).
-
-
-join_url_path(Src, Dst) ->
-    Src2 = case lists:reverse(Src) of
-        "/" ++ RestSrc -> lists:reverse(RestSrc);
-        _ -> Src
-    end,
-    Dst2 = case Dst of
-        "/" ++ RestDst -> RestDst;
-        _ -> Dst
-    end,
-    Src2 ++ "/" ++ Dst2.
-
-
-url_to_url(#url{host=Host, port=Port, path=Path, protocol=Proto} = Url) ->
-    LPort = case {Proto, Port} of
-        {http, 80} -> "";
-        {https, 443} -> "";
-        _ -> ":" ++ integer_to_list(Port)
-    end,
-    LPath = case Path of
-        "/" ++ _RestPath -> Path;
-        _ -> "/" ++ Path
-    end,
-    HostPart = case Url#url.host_type of
-        ipv6_address ->
-            "[" ++ Host ++ "]";
-        _ ->
-            Host
-    end,
-    atom_to_list(Proto) ++ "://" ++ HostPart ++ LPort ++ LPath.
-
-
-body_length(Headers) ->
-    case is_chunked(Headers) of
-        true -> chunked;
-        _ -> content_length(Headers)
-    end.
-
-
-is_chunked([]) ->
-    false;
-is_chunked([{K, V} | Rest]) ->
-    case string:to_lower(K) of
-        "transfer-encoding" ->
-            string:to_lower(V) == "chunked";
-        _ ->
-            is_chunked(Rest)
-    end.
-
-content_length([]) ->
-    undefined;
-content_length([{K, V} | Rest]) ->
-    case string:to_lower(K) of
-        "content-length" ->
-            list_to_integer(V);
-        _ ->
-            content_length(Rest)
-    end.
-

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_httpd_rewrite.erl
----------------------------------------------------------------------
diff --git a/couch_httpd_rewrite.erl b/couch_httpd_rewrite.erl
deleted file mode 100644
index 1187397..0000000
--- a/couch_httpd_rewrite.erl
+++ /dev/null
@@ -1,484 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% bind_path is based on bind method from Webmachine
-
-
-%% @doc Module for URL rewriting by pattern matching.
-
--module(couch_httpd_rewrite).
--export([handle_rewrite_req/3]).
--include("couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, <<"*">>}).
-
-
-%% doc The http rewrite handler. All rewriting is done from
-%% /dbname/_design/ddocname/_rewrite by default.
-%%
-%% each rules should be in rewrites member of the design doc.
-%% Ex of a complete rule :
-%%
-%%  {
-%%      ....
-%%      "rewrites": [
-%%      {
-%%          "from": "",
-%%          "to": "index.html",
-%%          "method": "GET",
-%%          "query": {}
-%%      }
-%%      ]
-%%  }
-%%
-%%  from: is the path rule used to bind current uri to the rule. It
-%% use pattern matching for that.
-%%
-%%  to: rule to rewrite an url. It can contain variables depending on binding
-%% variables discovered during pattern matching and query args (url args and from
-%% the query member.)
-%%
-%%  method: method to bind the request method to the rule. by default "*"
-%%  query: query args you want to define they can contain dynamic variable
-%% by binding the key to the bindings
-%%
-%%
-%% to and from are path with  patterns. pattern can be string starting with ":" or
-%% "*". ex:
-%% /somepath/:var/*
-%%
-%% This path is converted in erlang list by splitting "/". Each var are
-%% converted in atom. "*" is converted to '*' atom. The pattern matching is done
-%% by splitting "/" in request url in a list of token. A string pattern will
-%% match equal token. The star atom ('*' in single quotes) will match any number
-%% of tokens, but may only be present as the last pathtern in a pathspec. If all
-%% tokens are matched and all pathterms are used, then the pathspec matches. It works
-%% like webmachine. Each identified token will be reused in to rule and in query
-%%
-%% The pattern matching is done by first matching the request method to a rule. by
-%% default all methods match a rule. (method is equal to "*" by default). Then
-%% It will try to match the path to one rule. If no rule match, then a 404 error
-%% is displayed.
-%%
-%% Once a rule is found we rewrite the request url using the "to" and
-%% "query" members. The identified token are matched to the rule and
-%% will replace var. if '*' is found in the rule it will contain the remaining
-%% part if it exists.
-%%
-%% Examples:
-%%
-%% Dispatch rule            URL             TO                  Tokens
-%%
-%% {"from": "/a/b",         /a/b?k=v        /some/b?k=v         var =:= b
-%% "to": "/some/"}                                              k = v
-%%
-%% {"from": "/a/b",         /a/b            /some/b?var=b       var =:= b
-%% "to": "/some/:var"}
-%%
-%% {"from": "/a",           /a              /some
-%% "to": "/some/*"}
-%%
-%% {"from": "/a/*",         /a/b/c          /some/b/c
-%% "to": "/some/*"}
-%%
-%% {"from": "/a",           /a              /some
-%% "to": "/some/*"}
-%%
-%% {"from": "/a/:foo/*",    /a/b/c          /some/b/c?foo=b     foo =:= b
-%% "to": "/some/:foo/*"}
-%%
-%% {"from": "/a/:foo",     /a/b             /some/?k=b&foo=b    foo =:= b
-%% "to": "/some",
-%%  "query": {
-%%      "k": ":foo"
-%%  }}
-%%
-%% {"from": "/a",           /a?foo=b        /some/b             foo =:= b
-%% "to": "/some/:foo",
-%%  }}
-
-
-
-handle_rewrite_req(#httpd{
-        path_parts=[DbName, <<"_design">>, DesignName, _Rewrite|PathParts],
-        method=Method,
-        mochi_req=MochiReq}=Req, _Db, DDoc) ->
-
-    % we are in a design handler
-    DesignId = <<"_design/", DesignName/binary>>,
-    Prefix = <<"/", (?l2b(couch_util:url_encode(DbName)))/binary, "/", DesignId/binary>>,
-    QueryList = lists:map(fun decode_query_value/1, couch_httpd:qs(Req)),
-
-    RewritesSoFar = erlang:get(?REWRITE_COUNT),
-    MaxRewrites = list_to_integer(couch_config:get("httpd", "rewrite_limit", "100")),
-    case RewritesSoFar >= MaxRewrites of
-        true ->
-            throw({bad_request, <<"Exceeded rewrite recursion limit">>});
-        false ->
-            erlang:put(?REWRITE_COUNT, RewritesSoFar + 1)
-    end,
-
-    #doc{body={Props}} = DDoc,
-
-    % get rules from ddoc
-    case couch_util:get_value(<<"rewrites">>, Props) of
-        undefined ->
-            couch_httpd:send_error(Req, 404, <<"rewrite_error">>,
-                <<"Invalid path.">>);
-        Bin when is_binary(Bin) ->
-            couch_httpd:send_error(Req, 400, <<"rewrite_error">>,
-                <<"Rewrite rules are a String. They must be a JSON Array.">>);
-        Rules ->
-            % create dispatch list from rules
-            DispatchList =  [make_rule(Rule) || {Rule} <- Rules],
-            Method1 = couch_util:to_binary(Method),
-
-            %% get raw path by matching url to a rule.
-            RawPath = case try_bind_path(DispatchList, Method1, 
-                    PathParts, QueryList) of
-                no_dispatch_path ->
-                    throw(not_found);
-                {NewPathParts, Bindings} ->
-                    Parts = [quote_plus(X) || X <- NewPathParts],
-
-                    % build new path, reencode query args, eventually convert
-                    % them to json
-                    Bindings1 = maybe_encode_bindings(Bindings),
-                    Path = binary_to_list(
-                        iolist_to_binary([
-                                string:join(Parts, [?SEPARATOR]),
-                                [["?", mochiweb_util:urlencode(Bindings1)] 
-                                    || Bindings1 =/= [] ]
-                            ])),
-                    
-                    % if path is relative detect it and rewrite path
-                    case mochiweb_util:safe_relative_path(Path) of
-                        undefined ->
-                            ?b2l(Prefix) ++ "/" ++ Path;
-                        P1 ->
-                            ?b2l(Prefix) ++ "/" ++ P1
-                    end
-
-                end,
-
-            % normalize final path (fix levels "." and "..")
-            RawPath1 = ?b2l(iolist_to_binary(normalize_path(RawPath))),
-
-            % In order to do OAuth correctly, we have to save the
-            % requested path. We use default so chained rewriting
-            % wont replace the original header.
-            Headers = mochiweb_headers:default("x-couchdb-requested-path",
-                                             MochiReq:get(raw_path),
-                                             MochiReq:get(headers)),
-
-            ?LOG_DEBUG("rewrite to ~p ~n", [RawPath1]),
-
-            % build a new mochiweb request
-            MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
-                                             MochiReq:get(method),
-                                             RawPath1,
-                                             MochiReq:get(version),
-                                             Headers),
-
-            % cleanup, It force mochiweb to reparse raw uri.
-            MochiReq1:cleanup(),
-
-            #httpd{
-                db_url_handlers = DbUrlHandlers,
-                design_url_handlers = DesignUrlHandlers,
-                default_fun = DefaultFun,
-                url_handlers = UrlHandlers,
-                user_ctx = UserCtx,
-               auth = Auth
-            } = Req,
-
-            erlang:put(pre_rewrite_auth, Auth),
-            erlang:put(pre_rewrite_user_ctx, UserCtx),
-            couch_httpd:handle_request_int(MochiReq1, DefaultFun,
-                    UrlHandlers, DbUrlHandlers, DesignUrlHandlers)
-        end.
-
-quote_plus({bind, X}) ->
-    mochiweb_util:quote_plus(X);
-quote_plus(X) ->
-    mochiweb_util:quote_plus(X).
-
-%% @doc Try to find a rule matching current url. If none is found
-%% 404 error not_found is raised
-try_bind_path([], _Method, _PathParts, _QueryList) ->
-    no_dispatch_path;
-try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
-    [{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch,
-    case bind_method(Method1, Method) of
-        true ->
-            case bind_path(PathParts1, PathParts, []) of
-                {ok, Remaining, Bindings} ->
-                    Bindings1 = Bindings ++ QueryList,
-                    % we parse query args from the rule and fill
-                    % it eventually with bindings vars
-                    QueryArgs1 = make_query_list(QueryArgs, Bindings1,
-                        Formats, []),
-                    % remove params in QueryLists1 that are already in
-                    % QueryArgs1
-                    Bindings2 = lists:foldl(fun({K, V}, Acc) ->
-                        K1 = to_binding(K),
-                        KV = case couch_util:get_value(K1, QueryArgs1) of
-                            undefined -> [{K1, V}];
-                            _V1 -> []
-                        end,
-                        Acc ++ KV
-                    end, [], Bindings1),
-
-                    FinalBindings = Bindings2 ++ QueryArgs1,
-                    NewPathParts = make_new_path(RedirectPath, FinalBindings,
-                                    Remaining, []),
-                    {NewPathParts, FinalBindings};
-                fail ->
-                    try_bind_path(Rest, Method, PathParts, QueryList)
-            end;
-        false ->
-            try_bind_path(Rest, Method, PathParts, QueryList)
-    end.
-
-%% rewriting dynamically the quey list given as query member in
-%% rewrites. Each value is replaced by one binding or an argument
-%% passed in url.
-make_query_list([], _Bindings, _Formats, Acc) ->
-    Acc;
-make_query_list([{Key, {Value}}|Rest], Bindings, Formats, Acc) ->
-    Value1 = {Value},
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_binary(Value) ->
-    Value1 = replace_var(Value, Bindings, Formats),
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_list(Value) ->
-    Value1 = replace_var(Value, Bindings, Formats),
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) ->
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value}|Acc]).
-
-replace_var(<<"*">>=Value, Bindings, Formats) ->
-    get_var(Value, Bindings, Value, Formats);
-replace_var(<<":", Var/binary>> = Value, Bindings, Formats) ->
-    get_var(Var, Bindings, Value, Formats);
-replace_var(Value, _Bindings, _Formats) when is_binary(Value) ->
-    Value;
-replace_var(Value, Bindings, Formats) when is_list(Value) ->
-    lists:reverse(lists:foldl(fun
-                (<<":", Var/binary>>=Value1, Acc) ->
-                    [get_var(Var, Bindings, Value1, Formats)|Acc];
-                (Value1, Acc) ->
-                    [Value1|Acc]
-            end, [], Value));
-replace_var(Value, _Bindings, _Formats) ->
-    Value.
-                    
-maybe_json(Key, Value) ->
-    case lists:member(Key, [<<"key">>, <<"startkey">>, <<"start_key">>,
-                <<"endkey">>, <<"end_key">>, <<"keys">>]) of
-        true ->
-            ?JSON_ENCODE(Value);
-        false ->
-            Value
-    end.
-
-get_var(VarName, Props, Default, Formats) ->
-    VarName1 = to_binding(VarName),
-    Val = couch_util:get_value(VarName1, Props, Default),
-    maybe_format(VarName, Val, Formats).
-
-maybe_format(VarName, Value, Formats) ->
-    case couch_util:get_value(VarName, Formats) of
-        undefined ->
-             Value;
-        Format ->
-            format(Format, Value)
-    end.
-
-format(<<"int">>, Value) when is_integer(Value) ->
-    Value;
-format(<<"int">>, Value) when is_binary(Value) ->
-    format(<<"int">>, ?b2l(Value));
-format(<<"int">>, Value) when is_list(Value) ->
-    case (catch list_to_integer(Value)) of
-        IntVal when is_integer(IntVal) ->
-            IntVal;
-        _ ->
-            Value
-    end;
-format(<<"bool">>, Value) when is_binary(Value) ->
-    format(<<"bool">>, ?b2l(Value));
-format(<<"bool">>, Value) when is_list(Value) ->
-    case string:to_lower(Value) of
-        "true" -> true;
-        "false" -> false;
-        _ -> Value
-    end;
-format(_Format, Value) ->
-   Value. 
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-make_new_path([], _Bindings, _Remaining, Acc) ->
-    lists:reverse(Acc);
-make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) ->
-    P2 = case couch_util:get_value({bind, P}, Bindings) of
-        undefined -> << "undefined">>;
-        P1 -> 
-            iolist_to_binary(P1)
-    end,
-    make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
-make_new_path([P|Rest], Bindings, Remaining, Acc) ->
-    make_new_path(Rest, Bindings, Remaining, [P|Acc]).
-
-
-%% @doc If method of the query fith the rule method. If the
-%% method rule is '*', which is the default, all
-%% request method will bind. It allows us to make rules
-%% depending on HTTP method.
-bind_method(?MATCH_ALL, _Method ) ->
-    true;
-bind_method({bind, Method}, Method) ->
-    true;
-bind_method(_, _) ->
-    false.
-
-
-%% @doc bind path. Using the rule from we try to bind variables given
-%% to the current url by pattern matching
-bind_path([], [], Bindings) ->
-    {ok, [], Bindings};
-bind_path([?MATCH_ALL], [Match|_RestMatch]=Rest, Bindings) ->
-    {ok, Rest, [{?MATCH_ALL, Match}|Bindings]};
-bind_path(_, [], _) ->
-    fail;
-bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) ->
-    bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]);
-bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
-    bind_path(RestToken, RestMatch, Bindings);
-bind_path(_, _, _) ->
-    fail.
-
-
-%% normalize path.
-normalize_path(Path)  ->
-    "/" ++ string:join(normalize_path1(string:tokens(Path,
-                "/"), []), [?SEPARATOR]).
-
-
-normalize_path1([], Acc) ->
-    lists:reverse(Acc);
-normalize_path1([".."|Rest], Acc) ->
-    Acc1 = case Acc of
-        [] -> [".."|Acc];
-        [T|_] when T =:= ".." -> [".."|Acc];
-        [_|R] -> R
-    end,
-    normalize_path1(Rest, Acc1);
-normalize_path1(["."|Rest], Acc) ->
-    normalize_path1(Rest, Acc);
-normalize_path1([Path|Rest], Acc) ->
-    normalize_path1(Rest, [Path|Acc]).
-
-
-%% @doc transform json rule in erlang for pattern matching
-make_rule(Rule) ->
-    Method = case couch_util:get_value(<<"method">>, Rule) of
-        undefined -> ?MATCH_ALL;
-        M -> to_binding(M)
-    end,
-    QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
-        undefined -> [];
-        {Args} -> Args
-        end,
-    FromParts  = case couch_util:get_value(<<"from">>, Rule) of
-        undefined -> [?MATCH_ALL];
-        From ->
-            parse_path(From)
-        end,
-    ToParts  = case couch_util:get_value(<<"to">>, Rule) of
-        undefined ->
-            throw({error, invalid_rewrite_target});
-        To ->
-            parse_path(To)
-        end,
-    Formats = case couch_util:get_value(<<"formats">>, Rule) of
-        undefined -> [];
-        {Fmts} -> Fmts
-    end,
-    [{FromParts, Method}, ToParts, QueryArgs, Formats].
-
-parse_path(Path) ->
-    {ok, SlashRE} = re:compile(<<"\\/">>),
-    path_to_list(re:split(Path, SlashRE), [], 0).
-
-%% @doc convert a path rule (from or to) to an erlang list
-%% * and path variable starting by ":" are converted
-%% in erlang atom.
-path_to_list([], Acc, _DotDotCount) ->
-    lists:reverse(Acc);
-path_to_list([<<>>|R], Acc, DotDotCount) ->
-    path_to_list(R, Acc, DotDotCount);
-path_to_list([<<"*">>|R], Acc, DotDotCount) ->
-    path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
-path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
-    case couch_config:get("httpd", "secure_rewrites", "true") of
-    "false" ->
-        path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-    _Else ->
-        ?LOG_INFO("insecure_rewrite_rule ~p blocked", [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
-        throw({insecure_rewrite_rule, "too many ../.. segments"})
-    end;
-path_to_list([<<"..">>|R], Acc, DotDotCount) ->
-    path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-path_to_list([P|R], Acc, DotDotCount) ->
-    P1 = case P of
-        <<":", Var/binary>> ->
-            to_binding(Var);
-        _ -> P
-    end,
-    path_to_list(R, [P1|Acc], DotDotCount).
-
-maybe_encode_bindings([]) ->
-    [];
-maybe_encode_bindings(Props) -> 
-    lists:foldl(fun 
-            ({{bind, <<"*">>}, _V}, Acc) ->
-                Acc;
-            ({{bind, K}, V}, Acc) ->
-                V1 = iolist_to_binary(maybe_json(K, V)),
-                [{K, V1}|Acc]
-        end, [], Props).
-                
-decode_query_value({K,V}) ->
-    case lists:member(K, ["key", "startkey", "start_key",
-                "endkey", "end_key", "keys"]) of
-        true ->
-            {to_binding(K), ?JSON_DECODE(V)};
-        false ->
-            {to_binding(K), ?l2b(V)}
-    end.
-
-to_binding({bind, V}) ->
-    {bind, V};
-to_binding(V) when is_list(V) ->
-    to_binding(?l2b(V));
-to_binding(V) ->
-    {bind, V}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_httpd_stats_handlers.erl
----------------------------------------------------------------------
diff --git a/couch_httpd_stats_handlers.erl b/couch_httpd_stats_handlers.erl
deleted file mode 100644
index d6973f6..0000000
--- a/couch_httpd_stats_handlers.erl
+++ /dev/null
@@ -1,56 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_stats_handlers).
--include("couch_db.hrl").
-
--export([handle_stats_req/1]).
--import(couch_httpd, [
-    send_json/2, send_json/3, send_json/4, send_method_not_allowed/2,
-    start_json_response/2, send_chunk/2, end_json_response/1,
-    start_chunked_response/3, send_error/4
-]).
-
-handle_stats_req(#httpd{method='GET', path_parts=[_]}=Req) ->
-    flush(Req),
-    send_json(Req, couch_stats_aggregator:all(range(Req)));
-
-handle_stats_req(#httpd{method='GET', path_parts=[_, _Mod]}) ->
-    throw({bad_request, <<"Stat names must have exactly two parts.">>});
-
-handle_stats_req(#httpd{method='GET', path_parts=[_, Mod, Key]}=Req) ->
-    flush(Req),
-    Stats = couch_stats_aggregator:get_json({list_to_atom(binary_to_list(Mod)),
-        list_to_atom(binary_to_list(Key))}, range(Req)),
-    send_json(Req, {[{Mod, {[{Key, Stats}]}}]});
-
-handle_stats_req(#httpd{method='GET', path_parts=[_, _Mod, _Key | _Extra]}) ->
-    throw({bad_request, <<"Stat names must have exactly two parts.">>});
-
-handle_stats_req(Req) ->
-    send_method_not_allowed(Req, "GET").
-
-range(Req) ->
-    case couch_util:get_value("range", couch_httpd:qs(Req)) of
-        undefined ->
-            0;
-        Value ->
-            list_to_integer(Value)
-    end.
-
-flush(Req) ->
-    case couch_util:get_value("flush", couch_httpd:qs(Req)) of
-        "true" ->
-            couch_stats_aggregator:collect_sample();
-        _Else ->
-            ok
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_httpd_vhost.erl
----------------------------------------------------------------------
diff --git a/couch_httpd_vhost.erl b/couch_httpd_vhost.erl
deleted file mode 100644
index 4c3ebfe..0000000
--- a/couch_httpd_vhost.erl
+++ /dev/null
@@ -1,383 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_vhost).
--behaviour(gen_server).
-
--export([start_link/0, config_change/2, reload/0, get_state/0, dispatch_host/1]).
--export([urlsplit_netloc/2, redirect_to_vhost/2]).
--export([host/1, split_host_port/1]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
-
--include("couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, '*'}).
-
--record(vhosts_state, {
-        vhosts,
-        vhost_globals,
-        vhosts_fun}).
-
-%% doc the vhost manager.
-%% This gen_server keep state of vhosts added to the ini and try to
-%% match the Host header (or forwarded) against rules built against
-%% vhost list.
-%%
-%% Declaration of vhosts take place in the configuration file :
-%%
-%% [vhosts]
-%% example.com = /example
-%% *.example.com = /example
-%%
-%% The first line will rewrite the rquest to display the content of the
-%% example database. This rule works only if the Host header is
-%% 'example.com' and won't work for CNAMEs. Second rule on the other hand
-%% match all CNAMES to example db. So www.example.com or db.example.com
-%% will work.
-%%
-%% The wildcard ('*') should always be the last in the cnames:
-%%
-%%      "*.db.example.com = /"  will match all cname on top of db
-%% examples to the root of the machine.
-%%
-%%
-%% Rewriting Hosts to path
-%% -----------------------
-%%
-%% Like in the _rewrite handler you could match some variable and use
-%them to create the target path. Some examples:
-%%
-%%    [vhosts]
-%%    *.example.com = /*
-%%    :dbname.example.com = /:dbname
-%%    :ddocname.:dbname.example.com = /:dbname/_design/:ddocname/_rewrite
-%%
-%% First rule pass wildcard as dbname, second do the same but use a
-%% variable name and the third one allows you to use any app with
-%% @ddocname in any db with @dbname .
-%%
-%% You could also change the default function to handle request by
-%% changing the setting `redirect_vhost_handler` in `httpd` section of
-%% the Ini:
-%%
-%%    [httpd]
-%%    redirect_vhost_handler = {Module, Fun}
-%%
-%% The function take 2 args : the mochiweb request object and the target
-%%% path.
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-%% @doc reload vhosts rules
-reload() ->
-    gen_server:call(?MODULE, reload).
-
-get_state() ->
-    gen_server:call(?MODULE, get_state).
-
-%% @doc Try to find a rule matching current Host heade. some rule is
-%% found it rewrite the Mochiweb Request else it return current Request.
-dispatch_host(MochiReq) ->
-    #vhosts_state{
-        vhost_globals = VHostGlobals,
-        vhosts = VHosts,
-        vhosts_fun=Fun} = get_state(),
-
-    {"/" ++ VPath, Query, Fragment} = mochiweb_util:urlsplit_path(MochiReq:get(raw_path)),
-    VPathParts =  string:tokens(VPath, "/"),
-
-    VHost = host(MochiReq),
-    {VHostParts, VhostPort} = split_host_port(VHost),
-    FinalMochiReq = case try_bind_vhost(VHosts, lists:reverse(VHostParts),
-            VhostPort, VPathParts) of
-        no_vhost_matched -> MochiReq;
-        {VhostTarget, NewPath} ->
-            case vhost_global(VHostGlobals, MochiReq) of
-                true ->
-                    MochiReq;
-                _Else ->
-                    NewPath1 = mochiweb_util:urlunsplit_path({NewPath, Query,
-                                          Fragment}),
-                    MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
-                                      MochiReq:get(method),
-                                      NewPath1,
-                                      MochiReq:get(version),
-                                      MochiReq:get(headers)),
-                    Fun(MochiReq1, VhostTarget)
-            end
-    end,
-    FinalMochiReq.
-
-append_path("/"=_Target, "/"=_Path) ->
-    "/";
-append_path(Target, Path) ->
-    Target ++ Path.
-
-% default redirect vhost handler
-redirect_to_vhost(MochiReq, VhostTarget) ->
-    Path = MochiReq:get(raw_path),
-    Target = append_path(VhostTarget, Path),
-
-    ?LOG_DEBUG("Vhost Target: '~p'~n", [Target]),
-
-    Headers = mochiweb_headers:enter("x-couchdb-vhost-path", Path,
-        MochiReq:get(headers)),
-
-    % build a new mochiweb request
-    MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
-                                      MochiReq:get(method),
-                                      Target,
-                                      MochiReq:get(version),
-                                      Headers),
-    % cleanup, It force mochiweb to reparse raw uri.
-    MochiReq1:cleanup(),
-    MochiReq1.
-
-%% if so, then it will not be rewritten, but will run as a normal couchdb request.
-%* normally you'd use this for _uuids _utils and a few of the others you want to
-%% keep available on vhosts. You can also use it to make databases 'global'.
-vhost_global( VhostGlobals, MochiReq) ->
-    RawUri = MochiReq:get(raw_path),
-    {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
-
-    Front = case couch_httpd:partition(Path) of
-    {"", "", ""} ->
-        "/"; % Special case the root url handler
-    {FirstPart, _, _} ->
-        FirstPart
-    end,
-    [true] == [true||V <- VhostGlobals, V == Front].
-
-%% bind host
-%% first it try to bind the port then the hostname.
-try_bind_vhost([], _HostParts, _Port, _PathParts) ->
-    no_vhost_matched;
-try_bind_vhost([VhostSpec|Rest], HostParts, Port, PathParts) ->
-    {{VHostParts, VPort, VPath}, Path} = VhostSpec,
-    case bind_port(VPort, Port) of
-        ok ->
-            case bind_vhost(lists:reverse(VHostParts), HostParts, []) of
-                {ok, Bindings, Remainings} ->
-                    case bind_path(VPath, PathParts) of
-                        {ok, PathParts1} ->
-                            Path1 = make_target(Path, Bindings, Remainings, []),
-                            {make_path(Path1), make_path(PathParts1)};
-                        fail ->
-                            try_bind_vhost(Rest, HostParts, Port,
-                                PathParts)
-                    end;
-                fail -> try_bind_vhost(Rest, HostParts, Port, PathParts)
-            end;
-        fail ->  try_bind_vhost(Rest, HostParts, Port, PathParts)
-    end.
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-%% TODO: merge code with rewrite. But we need to make sure we are
-%% in string here.
-make_target([], _Bindings, _Remaining, Acc) ->
-    lists:reverse(Acc);
-make_target([?MATCH_ALL], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_target([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_target([{bind, P}|Rest], Bindings, Remaining, Acc) ->
-    P2 = case couch_util:get_value({bind, P}, Bindings) of
-        undefined ->  "undefined";
-        P1 -> P1
-    end,
-    make_target(Rest, Bindings, Remaining, [P2|Acc]);
-make_target([P|Rest], Bindings, Remaining, Acc) ->
-    make_target(Rest, Bindings, Remaining, [P|Acc]).
-
-%% bind port
-bind_port(Port, Port) -> ok;
-bind_port('*', _) -> ok;
-bind_port(_,_) -> fail.
-
-%% bind bhost
-bind_vhost([],[], Bindings) -> {ok, Bindings, []};
-bind_vhost([?MATCH_ALL], [], _Bindings) -> fail;
-bind_vhost([?MATCH_ALL], Rest, Bindings) -> {ok, Bindings, Rest};
-bind_vhost([], _HostParts, _Bindings) -> fail;
-bind_vhost([{bind, Token}|Rest], [Match|RestHost], Bindings) ->
-    bind_vhost(Rest, RestHost, [{{bind, Token}, Match}|Bindings]);
-bind_vhost([Cname|Rest], [Cname|RestHost], Bindings) ->
-    bind_vhost(Rest, RestHost, Bindings);
-bind_vhost(_, _, _) -> fail.
-
-%% bind path
-bind_path([], PathParts) ->
-    {ok, PathParts};
-bind_path(_VPathParts, []) ->
-    fail;
-bind_path([Path|VRest],[Path|Rest]) ->
-   bind_path(VRest, Rest);
-bind_path(_, _) ->
-    fail.
-
-% utilities
-
-
-%% create vhost list from ini
-
-host(MochiReq) ->
-    XHost = couch_config:get("httpd", "x_forwarded_host",
-                             "X-Forwarded-Host"),
-    case MochiReq:get_header_value(XHost) of
-        undefined ->
-            case MochiReq:get_header_value("Host") of
-                undefined -> [];
-                Value1 -> Value1
-            end;
-        Value -> Value
-    end.
-
-make_vhosts() ->
-    Vhosts = lists:foldl(fun
-                ({_, ""}, Acc) ->
-                    Acc;
-                ({Vhost, Path}, Acc) ->
-                    [{parse_vhost(Vhost), split_path(Path)}|Acc]
-            end, [], couch_config:get("vhosts")),
-
-    lists:reverse(lists:usort(Vhosts)).
-
-
-parse_vhost(Vhost) ->
-    case urlsplit_netloc(Vhost, []) of
-        {[], Path} ->
-            {make_spec("*", []), '*', Path};
-        {HostPort, []} ->
-            {H, P} = split_host_port(HostPort),
-            H1 = make_spec(H, []),
-            {H1, P, []};
-        {HostPort, Path} ->
-            {H, P} = split_host_port(HostPort),
-            H1 = make_spec(H, []),
-            {H1, P, string:tokens(Path, "/")}
-    end.
-
-
-split_host_port(HostAsString) ->
-    case string:rchr(HostAsString, $:) of
-        0 ->
-            {split_host(HostAsString), '*'};
-        N ->
-            HostPart = string:substr(HostAsString, 1, N-1),
-            case (catch erlang:list_to_integer(string:substr(HostAsString,
-                            N+1, length(HostAsString)))) of
-                {'EXIT', _} ->
-                    {split_host(HostAsString), '*'};
-                Port ->
-                    {split_host(HostPart), Port}
-            end
-    end.
-
-split_host(HostAsString) ->
-    string:tokens(HostAsString, "\.").
-
-split_path(Path) ->
-    make_spec(string:tokens(Path, "/"), []).
-
-
-make_spec([], Acc) ->
-    lists:reverse(Acc);
-make_spec([""|R], Acc) ->
-    make_spec(R, Acc);
-make_spec(["*"|R], Acc) ->
-    make_spec(R, [?MATCH_ALL|Acc]);
-make_spec([P|R], Acc) ->
-    P1 = parse_var(P),
-    make_spec(R, [P1|Acc]).
-
-
-parse_var(P) ->
-    case P of
-        ":" ++ Var ->
-            {bind, Var};
-        _ -> P
-    end.
-
-
-% mochiweb doesn't export it.
-urlsplit_netloc("", Acc) ->
-    {lists:reverse(Acc), ""};
-urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
-    {lists:reverse(Acc), Rest};
-urlsplit_netloc([C | Rest], Acc) ->
-    urlsplit_netloc(Rest, [C | Acc]).
-
-make_path(Parts) ->
-     "/" ++ string:join(Parts,[?SEPARATOR]).
-
-init(_) ->
-    ok = couch_config:register(fun ?MODULE:config_change/2),
-
-    %% load configuration
-    {VHostGlobals, VHosts, Fun} = load_conf(),
-    State = #vhosts_state{
-        vhost_globals=VHostGlobals,
-        vhosts=VHosts,
-        vhosts_fun=Fun},
-    {ok, State}.
-
-handle_call(reload, _From, _State) ->
-    {VHostGlobals, VHosts, Fun} = load_conf(),
-    {reply, ok, #vhosts_state{
-            vhost_globals=VHostGlobals,
-            vhosts=VHosts,
-            vhosts_fun=Fun}};
-handle_call(get_state, _From, State) ->
-    {reply, State, State};
-handle_call(_Msg, _From, State) ->
-    {noreply, State}.
-
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-config_change("httpd", "vhost_global_handlers") ->
-    ?MODULE:reload();
-config_change("httpd", "redirect_vhost_handler") ->
-    ?MODULE:reload();
-config_change("vhosts", _) ->
-    ?MODULE:reload().
-
-load_conf() ->
-    %% get vhost globals
-    VHostGlobals = re:split(couch_config:get("httpd",
-            "vhost_global_handlers",""), "\\s*,\\s*",[{return, list}]),
-
-    %% build vhosts matching rules
-    VHosts = make_vhosts(),
-
-    %% build vhosts handler fun
-    DefaultVHostFun = "{couch_httpd_vhost, redirect_to_vhost}",
-    Fun = couch_httpd:make_arity_2_fun(couch_config:get("httpd",
-            "redirect_vhost_handler", DefaultVHostFun)),
-
-    {VHostGlobals, VHosts, Fun}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_js_functions.hrl
----------------------------------------------------------------------
diff --git a/couch_js_functions.hrl b/couch_js_functions.hrl
deleted file mode 100644
index a48feae..0000000
--- a/couch_js_functions.hrl
+++ /dev/null
@@ -1,170 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License.  You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(AUTH_DB_DOC_VALIDATE_FUNCTION, <<"
-    function(newDoc, oldDoc, userCtx, secObj) {
-        if (newDoc._deleted === true) {
-            // allow deletes by admins and matching users
-            // without checking the other fields
-            if ((userCtx.roles.indexOf('_admin') !== -1) ||
-                (userCtx.name == oldDoc.name)) {
-                return;
-            } else {
-                throw({forbidden: 'Only admins may delete other user docs.'});
-            }
-        }
-
-        if ((oldDoc && oldDoc.type !== 'user') || newDoc.type !== 'user') {
-            throw({forbidden : 'doc.type must be user'});
-        } // we only allow user docs for now
-
-        if (!newDoc.name) {
-            throw({forbidden: 'doc.name is required'});
-        }
-
-        if (!newDoc.roles) {
-            throw({forbidden: 'doc.roles must exist'});
-        }
-
-        if (!isArray(newDoc.roles)) {
-            throw({forbidden: 'doc.roles must be an array'});
-        }
-
-        for (var idx = 0; idx < newDoc.roles.length; idx++) {
-            if (typeof newDoc.roles[idx] !== 'string') {
-                throw({forbidden: 'doc.roles can only contain strings'});
-            }
-        }
-
-        if (newDoc._id !== ('org.couchdb.user:' + newDoc.name)) {
-            throw({
-                forbidden: 'Doc ID must be of the form org.couchdb.user:name'
-            });
-        }
-
-        if (oldDoc) { // validate all updates
-            if (oldDoc.name !== newDoc.name) {
-                throw({forbidden: 'Usernames can not be changed.'});
-            }
-        }
-
-        if (newDoc.password_sha && !newDoc.salt) {
-            throw({
-                forbidden: 'Users with password_sha must have a salt.' +
-                    'See /_utils/script/couch.js for example code.'
-            });
-        }
-
-        if (newDoc.password_scheme === \"pbkdf2\") {
-            if (typeof(newDoc.iterations) !== \"number\") {
-               throw({forbidden: \"iterations must be a number.\"});
-            }
-            if (typeof(newDoc.derived_key) !== \"string\") {
-               throw({forbidden: \"derived_key must be a string.\"});
-            }
-        }
-
-        var is_server_or_database_admin = function(userCtx, secObj) {
-            // see if the user is a server admin
-            if(userCtx.roles.indexOf('_admin') !== -1) {
-                return true; // a server admin
-            }
-
-            // see if the user a database admin specified by name
-            if(secObj && secObj.admins && secObj.admins.names) {
-                if(secObj.admins.names.indexOf(userCtx.name) !== -1) {
-                    return true; // database admin
-                }
-            }
-
-            // see if the user a database admin specified by role
-            if(secObj && secObj.admins && secObj.admins.roles) {
-                var db_roles = secObj.admins.roles;
-                for(var idx = 0; idx < userCtx.roles.length; idx++) {
-                    var user_role = userCtx.roles[idx];
-                    if(db_roles.indexOf(user_role) !== -1) {
-                        return true; // role matches!
-                    }
-                }
-            }
-
-            return false; // default to no admin
-        }
-
-        if (!is_server_or_database_admin(userCtx, secObj)) {
-            if (oldDoc) { // validate non-admin updates
-                if (userCtx.name !== newDoc.name) {
-                    throw({
-                        forbidden: 'You may only update your own user document.'
-                    });
-                }
-                // validate role updates
-                var oldRoles = oldDoc.roles.sort();
-                var newRoles = newDoc.roles.sort();
-
-                if (oldRoles.length !== newRoles.length) {
-                    throw({forbidden: 'Only _admin may edit roles'});
-                }
-
-                for (var i = 0; i < oldRoles.length; i++) {
-                    if (oldRoles[i] !== newRoles[i]) {
-                        throw({forbidden: 'Only _admin may edit roles'});
-                    }
-                }
-            } else if (newDoc.roles.length > 0) {
-                throw({forbidden: 'Only _admin may set roles'});
-            }
-        }
-
-        // no system roles in users db
-        for (var i = 0; i < newDoc.roles.length; i++) {
-            if (newDoc.roles[i][0] === '_') {
-                throw({
-                    forbidden:
-                    'No system roles (starting with underscore) in users db.'
-                });
-            }
-        }
-
-        // no system names as names
-        if (newDoc.name[0] === '_') {
-            throw({forbidden: 'Username may not start with underscore.'});
-        }
-
-        var badUserNameChars = [':'];
-
-        for (var i = 0; i < badUserNameChars.length; i++) {
-            if (newDoc.name.indexOf(badUserNameChars[i]) >= 0) {
-                throw({forbidden: 'Character `' + badUserNameChars[i] +
-                        '` is not allowed in usernames.'});
-            }
-        }
-    }
-">>).
-
-
--define(OAUTH_MAP_FUN, <<"
-    function(doc) {
-        if (doc.type === 'user' && doc.oauth && doc.oauth.consumer_keys) {
-            for (var consumer_key in doc.oauth.consumer_keys) {
-                for (var token in doc.oauth.tokens) {
-                    var obj = {
-                        'consumer_secret': doc.oauth.consumer_keys[consumer_key],
-                        'token_secret': doc.oauth.tokens[token],
-                        'username': doc.name
-                    };
-                    emit([consumer_key, token], obj);
-                }
-            }
-        }
-    }
-">>).

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_key_tree.erl
----------------------------------------------------------------------
diff --git a/couch_key_tree.erl b/couch_key_tree.erl
deleted file mode 100644
index ce45ab8..0000000
--- a/couch_key_tree.erl
+++ /dev/null
@@ -1,422 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% @doc Data structure used to represent document edit histories.
-
-%% A key tree is used to represent the edit history of a document. Each node of
-%% the tree represents a particular version. Relations between nodes represent
-%% the order that these edits were applied. For instance, a set of three edits
-%% would produce a tree of versions A->B->C indicating that edit C was based on
-%% version B which was in turn based on A. In a world without replication (and
-%% no ability to disable MVCC checks), all histories would be forced to be
-%% linear lists of edits due to constraints imposed by MVCC (ie, new edits must
-%% be based on the current version). However, we have replication, so we must
-%% deal with not so easy cases, which lead to trees.
-%%
-%% Consider a document in state A. This doc is replicated to a second node. We
-%% then edit the document on each node leaving it in two different states, B
-%% and C. We now have two key trees, A->B and A->C. When we go to replicate a
-%% second time, the key tree must combine these two trees which gives us
-%% A->(B|C). This is how conflicts are introduced. In terms of the key tree, we
-%% say that we have two leaves (B and C) that are not deleted. The presense of
-%% the multiple leaves indicate conflict. To remove a conflict, one of the
-%% edits (B or C) can be deleted, which results in, A->(B|C->D) where D is an
-%% edit that is specially marked with the a deleted=true flag.
-%%
-%% What makes this a bit more complicated is that there is a limit to the
-%% number of revisions kept, specified in couch_db.hrl (default is 1000). When
-%% this limit is exceeded only the last 1000 are kept. This comes in to play
-%% when branches are merged. The comparison has to begin at the same place in
-%% the branches. A revision id is of the form N-XXXXXXX where N is the current
-%% revision. So each path will have a start number, calculated in
-%% couch_doc:to_path using the formula N - length(RevIds) + 1 So, .eg. if a doc
-%% was edit 1003 times this start number would be 4, indicating that 3
-%% revisions were truncated.
-%%
-%% This comes into play in @see merge_at/3 which recursively walks down one
-%% tree or the other until they begin at the same revision.
-
--module(couch_key_tree).
-
--export([merge/3, find_missing/2, get_key_leafs/2, get_full_key_paths/2, get/2]).
--export([get_all_leafs/1, count_leafs/1, remove_leafs/2, get_all_leafs_full/1, stem/2]).
--export([map/2, mapfold/3, map_leafs/2, fold/3]).
-
--include("couch_db.hrl").
-
-%% @doc Merge a path with a list of paths and stem to the given length.
--spec merge([path()], path(), pos_integer()) -> {[path()],
-    conflicts | no_conflicts}.
-merge(Paths, Path, Depth) ->
-    {Merged, Conflicts} = merge(Paths, Path),
-    {stem(Merged, Depth), Conflicts}.
-
-%% @doc Merge a path with an existing list of paths, returning a new list of
-%% paths. A return of conflicts indicates a new conflict was discovered in this
-%% merge. Conflicts may already exist in the original list of paths.
--spec merge([path()], path()) -> {[path()], conflicts | no_conflicts}.
-merge(Paths, Path) ->
-    {ok, Merged, HasConflicts} = merge_one(Paths, Path, [], false),
-    if HasConflicts ->
-        Conflicts = conflicts;
-    (length(Merged) =/= length(Paths)) and (length(Merged) =/= 1) ->
-        Conflicts = conflicts;
-    true ->
-        Conflicts = no_conflicts
-    end,
-    {lists:sort(Merged), Conflicts}.
-
--spec merge_one(Original::[path()], Inserted::path(), [path()], boolean()) ->
-    {ok, Merged::[path()], NewConflicts::boolean()}.
-merge_one([], Insert, OutAcc, ConflictsAcc) ->
-    {ok, [Insert | OutAcc], ConflictsAcc};
-merge_one([{Start, Tree}|Rest], {StartInsert, TreeInsert}, Acc, HasConflicts) ->
-    case merge_at([Tree], StartInsert - Start, [TreeInsert]) of
-    {ok, [Merged], Conflicts} ->
-        MergedStart = lists:min([Start, StartInsert]),
-        {ok, Rest ++ [{MergedStart, Merged} | Acc], Conflicts or HasConflicts};
-    no ->
-        AccOut = [{Start, Tree} | Acc],
-        merge_one(Rest, {StartInsert, TreeInsert}, AccOut, HasConflicts)
-    end.
-
--spec merge_at(tree(), Place::integer(), tree()) ->
-    {ok, Merged::tree(), HasConflicts::boolean()} | no.
-merge_at(_Ours, _Place, []) ->
-    no;
-merge_at([], _Place, _Insert) ->
-    no;
-merge_at([{Key, Value, SubTree}|Sibs], Place, InsertTree) when Place > 0 ->
-    % inserted starts later than committed, need to drill into committed subtree
-    case merge_at(SubTree, Place - 1, InsertTree) of
-    {ok, Merged, Conflicts} ->
-        {ok, [{Key, Value, Merged} | Sibs], Conflicts};
-    no ->
-        % first branch didn't merge, move to next branch
-        case merge_at(Sibs, Place, InsertTree) of
-        {ok, Merged, Conflicts} ->
-            {ok, [{Key, Value, SubTree} | Merged], Conflicts};
-        no ->
-            no
-        end
-    end;
-merge_at(OurTree, Place, [{Key, Value, SubTree}]) when Place < 0 ->
-    % inserted starts earlier than committed, need to drill into insert subtree
-    case merge_at(OurTree, Place + 1, SubTree) of
-    {ok, Merged, Conflicts} ->
-        {ok, [{Key, Value, Merged}], Conflicts};
-    no ->
-        no
-    end;
-merge_at([{Key, V1, SubTree}|Sibs], 0, [{Key, V2, InsertSubTree}]) ->
-    {Merged, Conflicts} = merge_simple(SubTree, InsertSubTree),
-    {ok, [{Key, value_pref(V1, V2), Merged} | Sibs], Conflicts};
-merge_at([{OurKey, _, _} | _], 0, [{Key, _, _}]) when OurKey > Key ->
-    % siblings keys are ordered, no point in continuing
-    no;
-merge_at([Tree | Sibs], 0, InsertTree) ->
-    case merge_at(Sibs, 0, InsertTree) of
-    {ok, Merged, Conflicts} ->
-        {ok, [Tree | Merged], Conflicts};
-    no ->
-        no
-    end.
-
-% key tree functions
-
--spec merge_simple(tree(), tree()) -> {Merged::tree(), NewConflicts::boolean()}.
-merge_simple([], B) ->
-    {B, false};
-merge_simple(A, []) ->
-    {A, false};
-merge_simple([{Key, V1, SubA} | NextA], [{Key, V2, SubB} | NextB]) ->
-    {MergedSubTree, Conflict1} = merge_simple(SubA, SubB),
-    {MergedNextTree, Conflict2} = merge_simple(NextA, NextB),
-    Value = value_pref(V1, V2),
-    {[{Key, Value, MergedSubTree} | MergedNextTree], Conflict1 or Conflict2};
-merge_simple([{A, _, _} = Tree | Next], [{B, _, _} | _] = Insert) when A < B ->
-    {Merged, Conflict} = merge_simple(Next, Insert),
-    % if Merged has more branches than the input we added a new conflict
-    {[Tree | Merged], Conflict orelse (length(Merged) > length(Next))};
-merge_simple(Ours, [Tree | Next]) ->
-    {Merged, Conflict} = merge_simple(Ours, Next),
-    {[Tree | Merged], Conflict orelse (length(Merged) > length(Next))}.
-
-find_missing(_Tree, []) ->
-    [];
-find_missing([], SeachKeys) ->
-    SeachKeys;
-find_missing([{Start, {Key, Value, SubTree}} | RestTree], SeachKeys) ->
-    PossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos >= Start],
-    ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Start],
-    Missing = find_missing_simple(Start, [{Key, Value, SubTree}], PossibleKeys),
-    find_missing(RestTree, ImpossibleKeys ++ Missing).
-
-find_missing_simple(_Pos, _Tree, []) ->
-    [];
-find_missing_simple(_Pos, [], SeachKeys) ->
-    SeachKeys;
-find_missing_simple(Pos, [{Key, _, SubTree} | RestTree], SeachKeys) ->
-    PossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos >= Pos],
-    ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Pos],
-
-    SrcKeys2 = PossibleKeys -- [{Pos, Key}],
-    SrcKeys3 = find_missing_simple(Pos + 1, SubTree, SrcKeys2),
-    ImpossibleKeys ++ find_missing_simple(Pos, RestTree, SrcKeys3).
-
-
-filter_leafs([], _Keys, FilteredAcc, RemovedKeysAcc) ->
-    {FilteredAcc, RemovedKeysAcc};
-filter_leafs([{Pos, [{LeafKey, _}|_]} = Path |Rest], Keys, FilteredAcc, RemovedKeysAcc) ->
-    FilteredKeys = lists:delete({Pos, LeafKey}, Keys),
-    if FilteredKeys == Keys ->
-        % this leaf is not a key we are looking to remove
-        filter_leafs(Rest, Keys, [Path | FilteredAcc], RemovedKeysAcc);
-    true ->
-        % this did match a key, remove both the node and the input key
-        filter_leafs(Rest, FilteredKeys, FilteredAcc, [{Pos, LeafKey} | RemovedKeysAcc])
-    end.
-
-% Removes any branches from the tree whose leaf node(s) are in the Keys
-remove_leafs(Trees, Keys) ->
-    % flatten each branch in a tree into a tree path
-    Paths = get_all_leafs_full(Trees),
-
-    % filter out any that are in the keys list.
-    {FilteredPaths, RemovedKeys} = filter_leafs(Paths, Keys, [], []),
-
-    SortedPaths = lists:sort(
-        [{Pos + 1 - length(Path), Path} || {Pos, Path} <- FilteredPaths]
-    ),
-
-    % convert paths back to trees
-    NewTree = lists:foldl(
-        fun({StartPos, Path},TreeAcc) ->
-            [SingleTree] = lists:foldl(
-                fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path),
-            {NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}),
-            NewTrees
-        end, [], SortedPaths),
-    {NewTree, RemovedKeys}.
-
-
-% get the leafs in the tree matching the keys. The matching key nodes can be
-% leafs or an inner nodes. If an inner node, then the leafs for that node
-% are returned.
-get_key_leafs(Tree, Keys) ->
-    get_key_leafs(Tree, Keys, []).
-
-get_key_leafs(_, [], Acc) ->
-    {Acc, []};
-get_key_leafs([], Keys, Acc) ->
-    {Acc, Keys};
-get_key_leafs([{Pos, Tree}|Rest], Keys, Acc) ->
-    {Gotten, RemainingKeys} = get_key_leafs_simple(Pos, [Tree], Keys, []),
-    get_key_leafs(Rest, RemainingKeys, Gotten ++ Acc).
-
-get_key_leafs_simple(_Pos, _Tree, [], _KeyPathAcc) ->
-    {[], []};
-get_key_leafs_simple(_Pos, [], KeysToGet, _KeyPathAcc) ->
-    {[], KeysToGet};
-get_key_leafs_simple(Pos, [{Key, _Value, SubTree}=Tree | RestTree], KeysToGet, KeyPathAcc) ->
-    case lists:delete({Pos, Key}, KeysToGet) of
-    KeysToGet -> % same list, key not found
-        {LeafsFound, KeysToGet2} = get_key_leafs_simple(Pos + 1, SubTree, KeysToGet, [Key | KeyPathAcc]),
-        {RestLeafsFound, KeysRemaining} = get_key_leafs_simple(Pos, RestTree, KeysToGet2, KeyPathAcc),
-        {LeafsFound ++ RestLeafsFound, KeysRemaining};
-    KeysToGet2 ->
-        LeafsFound = get_all_leafs_simple(Pos, [Tree], KeyPathAcc),
-        LeafKeysFound = [{LeafPos, LeafRev} || {_, {LeafPos, [LeafRev|_]}}
-            <- LeafsFound],
-        KeysToGet3 = KeysToGet2 -- LeafKeysFound,
-        {RestLeafsFound, KeysRemaining} = get_key_leafs_simple(Pos, RestTree, KeysToGet3, KeyPathAcc),
-        {LeafsFound ++ RestLeafsFound, KeysRemaining}
-    end.
-
-get(Tree, KeysToGet) ->
-    {KeyPaths, KeysNotFound} = get_full_key_paths(Tree, KeysToGet),
-    FixedResults = [ {Value, {Pos, [Key0 || {Key0, _} <- Path]}} || {Pos, [{_Key, Value}|_]=Path} <- KeyPaths],
-    {FixedResults, KeysNotFound}.
-
-get_full_key_paths(Tree, Keys) ->
-    get_full_key_paths(Tree, Keys, []).
-
-get_full_key_paths(_, [], Acc) ->
-    {Acc, []};
-get_full_key_paths([], Keys, Acc) ->
-    {Acc, Keys};
-get_full_key_paths([{Pos, Tree}|Rest], Keys, Acc) ->
-    {Gotten, RemainingKeys} = get_full_key_paths(Pos, [Tree], Keys, []),
-    get_full_key_paths(Rest, RemainingKeys, Gotten ++ Acc).
-
-
-get_full_key_paths(_Pos, _Tree, [], _KeyPathAcc) ->
-    {[], []};
-get_full_key_paths(_Pos, [], KeysToGet, _KeyPathAcc) ->
-    {[], KeysToGet};
-get_full_key_paths(Pos, [{KeyId, Value, SubTree} | RestTree], KeysToGet, KeyPathAcc) ->
-    KeysToGet2 = KeysToGet -- [{Pos, KeyId}],
-    CurrentNodeResult =
-    case length(KeysToGet2) =:= length(KeysToGet) of
-    true -> % not in the key list.
-        [];
-    false -> % this node is the key list. return it
-        [{Pos, [{KeyId, Value} | KeyPathAcc]}]
-    end,
-    {KeysGotten, KeysRemaining} = get_full_key_paths(Pos + 1, SubTree, KeysToGet2, [{KeyId, Value} | KeyPathAcc]),
-    {KeysGotten2, KeysRemaining2} = get_full_key_paths(Pos, RestTree, KeysRemaining, KeyPathAcc),
-    {CurrentNodeResult ++ KeysGotten ++ KeysGotten2, KeysRemaining2}.
-
-get_all_leafs_full(Tree) ->
-    get_all_leafs_full(Tree, []).
-
-get_all_leafs_full([], Acc) ->
-    Acc;
-get_all_leafs_full([{Pos, Tree} | Rest], Acc) ->
-    get_all_leafs_full(Rest, get_all_leafs_full_simple(Pos, [Tree], []) ++ Acc).
-
-get_all_leafs_full_simple(_Pos, [], _KeyPathAcc) ->
-    [];
-get_all_leafs_full_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
-    [{Pos, [{KeyId, Value} | KeyPathAcc]} | get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc)];
-get_all_leafs_full_simple(Pos, [{KeyId, Value, SubTree} | RestTree], KeyPathAcc) ->
-    get_all_leafs_full_simple(Pos + 1, SubTree, [{KeyId, Value} | KeyPathAcc]) ++ get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc).
-
-get_all_leafs(Trees) ->
-    get_all_leafs(Trees, []).
-
-get_all_leafs([], Acc) ->
-    Acc;
-get_all_leafs([{Pos, Tree}|Rest], Acc) ->
-    get_all_leafs(Rest, get_all_leafs_simple(Pos, [Tree], []) ++ Acc).
-
-get_all_leafs_simple(_Pos, [], _KeyPathAcc) ->
-    [];
-get_all_leafs_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
-    [{Value, {Pos, [KeyId | KeyPathAcc]}} | get_all_leafs_simple(Pos, RestTree, KeyPathAcc)];
-get_all_leafs_simple(Pos, [{KeyId, _Value, SubTree} | RestTree], KeyPathAcc) ->
-    get_all_leafs_simple(Pos + 1, SubTree, [KeyId | KeyPathAcc]) ++ get_all_leafs_simple(Pos, RestTree, KeyPathAcc).
-
-
-count_leafs([]) ->
-    0;
-count_leafs([{_Pos,Tree}|Rest]) ->
-    count_leafs_simple([Tree]) + count_leafs(Rest).
-
-count_leafs_simple([]) ->
-    0;
-count_leafs_simple([{_Key, _Value, []} | RestTree]) ->
-    1 + count_leafs_simple(RestTree);
-count_leafs_simple([{_Key, _Value, SubTree} | RestTree]) ->
-    count_leafs_simple(SubTree) + count_leafs_simple(RestTree).
-
-
-fold(_Fun, Acc, []) ->
-    Acc;
-fold(Fun, Acc0, [{Pos, Tree}|Rest]) ->
-    Acc1 = fold_simple(Fun, Acc0, Pos, [Tree]),
-    fold(Fun, Acc1, Rest).
-
-fold_simple(_Fun, Acc, _Pos, []) ->
-    Acc;
-fold_simple(Fun, Acc0, Pos, [{Key, Value, SubTree} | RestTree]) ->
-    Type = if SubTree == [] -> leaf; true -> branch end,
-    Acc1 = Fun({Pos, Key}, Value, Type, Acc0),
-    Acc2 = fold_simple(Fun, Acc1, Pos+1, SubTree),
-    fold_simple(Fun, Acc2, Pos, RestTree).
-
-
-map(_Fun, []) ->
-    [];
-map(Fun, [{Pos, Tree}|Rest]) ->
-    case erlang:fun_info(Fun, arity) of
-    {arity, 2} ->
-        [NewTree] = map_simple(fun(A,B,_C) -> Fun(A,B) end, Pos, [Tree]),
-        [{Pos, NewTree} | map(Fun, Rest)];
-    {arity, 3} ->
-        [NewTree] = map_simple(Fun, Pos, [Tree]),
-        [{Pos, NewTree} | map(Fun, Rest)]
-    end.
-
-map_simple(_Fun, _Pos, []) ->
-    [];
-map_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
-    Value2 = Fun({Pos, Key}, Value,
-            if SubTree == [] -> leaf; true -> branch end),
-    [{Key, Value2, map_simple(Fun, Pos + 1, SubTree)} | map_simple(Fun, Pos, RestTree)].
-
-
-mapfold(_Fun, Acc, []) ->
-    {[], Acc};
-mapfold(Fun, Acc, [{Pos, Tree} | Rest]) ->
-    {[NewTree], Acc2} = mapfold_simple(Fun, Acc, Pos, [Tree]),
-    {Rest2, Acc3} = mapfold(Fun, Acc2, Rest),
-    {[{Pos, NewTree} | Rest2], Acc3}.
-
-mapfold_simple(_Fun, Acc, _Pos, []) ->
-    {[], Acc};
-mapfold_simple(Fun, Acc, Pos, [{Key, Value, SubTree} | RestTree]) ->
-    {Value2, Acc2} = Fun({Pos, Key}, Value,
-            if SubTree == [] -> leaf; true -> branch end, Acc),
-    {SubTree2, Acc3} = mapfold_simple(Fun, Acc2, Pos + 1, SubTree),
-    {RestTree2, Acc4} = mapfold_simple(Fun, Acc3, Pos, RestTree),
-    {[{Key, Value2, SubTree2} | RestTree2], Acc4}.
-
-
-map_leafs(_Fun, []) ->
-    [];
-map_leafs(Fun, [{Pos, Tree}|Rest]) ->
-    [NewTree] = map_leafs_simple(Fun, Pos, [Tree]),
-    [{Pos, NewTree} | map_leafs(Fun, Rest)].
-
-map_leafs_simple(_Fun, _Pos, []) ->
-    [];
-map_leafs_simple(Fun, Pos, [{Key, Value, []} | RestTree]) ->
-    Value2 = Fun({Pos, Key}, Value),
-    [{Key, Value2, []} | map_leafs_simple(Fun, Pos, RestTree)];
-map_leafs_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
-    [{Key, Value, map_leafs_simple(Fun, Pos + 1, SubTree)} | map_leafs_simple(Fun, Pos, RestTree)].
-
-
-stem(Trees, Limit) ->
-    % flatten each branch in a tree into a tree path, sort by starting rev #
-    Paths = lists:sort(lists:map(fun({Pos, Path}) ->
-        StemmedPath = lists:sublist(Path, Limit),
-        {Pos + 1 - length(StemmedPath), StemmedPath}
-    end, get_all_leafs_full(Trees))),
-
-    % convert paths back to trees
-    lists:foldl(
-        fun({StartPos, Path},TreeAcc) ->
-            [SingleTree] = lists:foldl(
-                fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path),
-            {NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}),
-            NewTrees
-        end, [], Paths).
-
-
-value_pref(Tuple, _) when is_tuple(Tuple),
-        (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) ->
-    Tuple;
-value_pref(_, Tuple) when is_tuple(Tuple),
-        (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) ->
-    Tuple;
-value_pref(?REV_MISSING, Other) ->
-    Other;
-value_pref(Other, ?REV_MISSING) ->
-    Other;
-value_pref(Last, _) ->
-    Last.
-
-
-% Tests moved to test/etap/06?-*.t
-


[32/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
add javascript test and make check target

now it's possible to run javascript tests from the console:

    make testjs

While I was here I added the make check target to launch all tests.


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/fd93bf9c
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/fd93bf9c
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/fd93bf9c

Branch: refs/heads/import-rcouch
Commit: fd93bf9cc830f063f991303366f2a4db9b4cd8c2
Parents: 731f840
Author: benoitc <be...@apache.org>
Authored: Fri Jan 10 02:07:57 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:21 2014 -0600

----------------------------------------------------------------------
 src/couch_app.erl | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/fd93bf9c/src/couch_app.erl
----------------------------------------------------------------------
diff --git a/src/couch_app.erl b/src/couch_app.erl
index 414a5c9..2e1e5bd 100644
--- a/src/couch_app.erl
+++ b/src/couch_app.erl
@@ -33,4 +33,6 @@ get_ini_files() ->
     Defaults = lists:map(fun(FName) ->
                     filename:join(DefaultConfDir, FName)
             end, ?CONF_FILES),
+    io:format("default files ~p~n", [couch:get_app_env(config_files,
+                                                       Defaults)]),
     couch:get_app_env(config_files, Defaults).


[02/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_server.erl
----------------------------------------------------------------------
diff --git a/src/couch_server.erl b/src/couch_server.erl
new file mode 100644
index 0000000..7cee0f5
--- /dev/null
+++ b/src/couch_server.erl
@@ -0,0 +1,499 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_server).
+-behaviour(gen_server).
+
+-export([open/2,create/2,delete/2,get_version/0,get_version/1,get_uuid/0]).
+-export([all_databases/0, all_databases/2]).
+-export([init/1, handle_call/3,sup_start_link/0]).
+-export([handle_cast/2,code_change/3,handle_info/2,terminate/2]).
+-export([dev_start/0,is_admin/2,has_admins/0,get_stats/0]).
+
+-include("couch_db.hrl").
+
+-record(server,{
+    root_dir = [],
+    dbname_regexp,
+    max_dbs_open=100,
+    dbs_open=0,
+    start_time=""
+    }).
+
+dev_start() ->
+    couch:stop(),
+    up_to_date = make:all([load, debug_info]),
+    couch:start().
+
+get_version() ->
+    Apps = application:loaded_applications(),
+    case lists:keysearch(couch, 1, Apps) of
+    {value, {_, _, Vsn}} ->
+        Vsn;
+    false ->
+        "0.0.0"
+    end.
+get_version(short) ->
+  %% strip git hash from version string
+  [Version|_Rest] = string:tokens(get_version(), "+"),
+  Version.
+
+
+get_uuid() ->
+    case couch_config:get("couchdb", "uuid", nil) of
+        nil ->
+            UUID = couch_uuids:random(),
+            couch_config:set("couchdb", "uuid", ?b2l(UUID)),
+            UUID;
+        UUID -> ?l2b(UUID)
+    end.
+
+get_stats() ->
+    {ok, #server{start_time=Time,dbs_open=Open}} =
+            gen_server:call(couch_server, get_server),
+    [{start_time, ?l2b(Time)}, {dbs_open, Open}].
+
+sup_start_link() ->
+    gen_server:start_link({local, couch_server}, couch_server, [], []).
+
+open(DbName, Options0) ->
+    Options = maybe_add_sys_db_callbacks(DbName, Options0),
+    case gen_server:call(couch_server, {open, DbName, Options}, infinity) of
+    {ok, Db} ->
+        Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
+        {ok, Db#db{user_ctx=Ctx}};
+    Error ->
+        Error
+    end.
+
+create(DbName, Options0) ->
+    Options = maybe_add_sys_db_callbacks(DbName, Options0),
+    case gen_server:call(couch_server, {create, DbName, Options}, infinity) of
+    {ok, Db} ->
+        Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
+        {ok, Db#db{user_ctx=Ctx}};
+    Error ->
+        Error
+    end.
+
+delete(DbName, Options) ->
+    gen_server:call(couch_server, {delete, DbName, Options}, infinity).
+
+maybe_add_sys_db_callbacks(DbName, Options) when is_binary(DbName) ->
+    maybe_add_sys_db_callbacks(?b2l(DbName), Options);
+maybe_add_sys_db_callbacks(DbName, Options) ->
+    case couch_config:get("replicator", "db", "_replicator") of
+    DbName ->
+        [
+            {before_doc_update, fun couch_replicator_manager:before_doc_update/2},
+            {after_doc_read, fun couch_replicator_manager:after_doc_read/2},
+            sys_db | Options
+        ];
+    _ ->
+        case couch_config:get("couch_httpd_auth", "authentication_db", "_users") of
+        DbName ->
+        [
+            {before_doc_update, fun couch_users_db:before_doc_update/2},
+            {after_doc_read, fun couch_users_db:after_doc_read/2},
+            sys_db | Options
+        ];
+        _ ->
+            Options
+        end
+    end.
+
+check_dbname(#server{dbname_regexp=RegExp}, DbName) ->
+    case re:run(DbName, RegExp, [{capture, none}]) of
+    nomatch ->
+        case DbName of
+            "_users" -> ok;
+            "_replicator" -> ok;
+            _Else ->
+                {error, illegal_database_name, DbName}
+            end;
+    match ->
+        ok
+    end.
+
+is_admin(User, ClearPwd) ->
+    case couch_config:get("admins", User) of
+    "-hashed-" ++ HashedPwdAndSalt ->
+        [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
+        couch_util:to_hex(crypto:sha(ClearPwd ++ Salt)) == HashedPwd;
+    _Else ->
+        false
+    end.
+
+has_admins() ->
+    couch_config:get("admins") /= [].
+
+get_full_filename(Server, DbName) ->
+    filename:join([Server#server.root_dir, "./" ++ DbName ++ ".couch"]).
+
+hash_admin_passwords() ->
+    hash_admin_passwords(true).
+
+hash_admin_passwords(Persist) ->
+    lists:foreach(
+        fun({User, ClearPassword}) ->
+            HashedPassword = couch_passwords:hash_admin_password(ClearPassword),
+            couch_config:set("admins", User, ?b2l(HashedPassword), Persist)
+        end, couch_passwords:get_unhashed_admins()).
+
+init([]) ->
+    % read config and register for configuration changes
+
+    % just stop if one of the config settings change. couch_server_sup
+    % will restart us and then we will pick up the new settings.
+
+    RootDir = couch_config:get("couchdb", "database_dir", "."),
+    MaxDbsOpen = list_to_integer(
+            couch_config:get("couchdb", "max_dbs_open")),
+    Self = self(),
+    ok = couch_config:register(
+        fun("couchdb", "database_dir") ->
+            exit(Self, config_change)
+        end),
+    ok = couch_config:register(
+        fun("couchdb", "max_dbs_open", Max) ->
+            gen_server:call(couch_server,
+                    {set_max_dbs_open, list_to_integer(Max)})
+        end),
+    ok = couch_file:init_delete_dir(RootDir),
+    hash_admin_passwords(),
+    ok = couch_config:register(
+        fun("admins", _Key, _Value, Persist) ->
+            % spawn here so couch_config doesn't try to call itself
+            spawn(fun() -> hash_admin_passwords(Persist) end)
+        end, false),
+    {ok, RegExp} = re:compile("^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*$"),
+    ets:new(couch_dbs_by_name, [set, private, named_table]),
+    ets:new(couch_dbs_by_pid, [set, private, named_table]),
+    ets:new(couch_dbs_by_lru, [ordered_set, private, named_table]),
+    ets:new(couch_sys_dbs, [set, private, named_table]),
+    process_flag(trap_exit, true),
+    {ok, #server{root_dir=RootDir,
+                dbname_regexp=RegExp,
+                max_dbs_open=MaxDbsOpen,
+                start_time=couch_util:rfc1123_date()}}.
+
+terminate(_Reason, _Srv) ->
+    lists:foreach(
+        fun({_, {_, Pid, _}}) ->
+                couch_util:shutdown_sync(Pid)
+        end,
+        ets:tab2list(couch_dbs_by_name)).
+
+all_databases() ->
+    {ok, DbList} = all_databases(
+        fun(DbName, Acc) -> {ok, [DbName | Acc]} end, []),
+    {ok, lists:usort(DbList)}.
+
+all_databases(Fun, Acc0) ->
+    {ok, #server{root_dir=Root}} = gen_server:call(couch_server, get_server),
+    NormRoot = couch_util:normpath(Root),
+    FinalAcc = try
+        filelib:fold_files(Root, "^[a-z0-9\\_\\$()\\+\\-]*[\\.]couch$", true,
+            fun(Filename, AccIn) ->
+                NormFilename = couch_util:normpath(Filename),
+                case NormFilename -- NormRoot of
+                [$/ | RelativeFilename] -> ok;
+                RelativeFilename -> ok
+                end,
+                case Fun(?l2b(filename:rootname(RelativeFilename, ".couch")), AccIn) of
+                {ok, NewAcc} -> NewAcc;
+                {stop, NewAcc} -> throw({stop, Fun, NewAcc})
+                end
+            end, Acc0)
+    catch throw:{stop, Fun, Acc1} ->
+         Acc1
+    end,
+    {ok, FinalAcc}.
+
+
+maybe_close_lru_db(#server{dbs_open=NumOpen, max_dbs_open=MaxOpen}=Server)
+        when NumOpen < MaxOpen ->
+    {ok, Server};
+maybe_close_lru_db(#server{dbs_open=NumOpen}=Server) ->
+    % must free up the lru db.
+    case try_close_lru(now()) of
+    ok ->
+        {ok, Server#server{dbs_open=NumOpen - 1}};
+    Error -> Error
+    end.
+
+try_close_lru(StartTime) ->
+    LruTime = get_lru(),
+    if LruTime > StartTime ->
+        % this means we've looped through all our opened dbs and found them
+        % all in use.
+        {error, all_dbs_active};
+    true ->
+        [{_, DbName}] = ets:lookup(couch_dbs_by_lru, LruTime),
+        [{_, {opened, MainPid, LruTime}}] = ets:lookup(couch_dbs_by_name, DbName),
+        case couch_db:is_idle(MainPid) of
+        true ->
+            ok = shutdown_idle_db(DbName, MainPid, LruTime);
+        false ->
+            % this still has referrers. Go ahead and give it a current lru time
+            % and try the next one in the table.
+            NewLruTime = now(),
+            true = ets:insert(couch_dbs_by_name, {DbName, {opened, MainPid, NewLruTime}}),
+            true = ets:insert(couch_dbs_by_pid, {MainPid, DbName}),
+            true = ets:delete(couch_dbs_by_lru, LruTime),
+            true = ets:insert(couch_dbs_by_lru, {NewLruTime, DbName}),
+            try_close_lru(StartTime)
+        end
+    end.
+
+get_lru() ->
+    get_lru(ets:first(couch_dbs_by_lru)).
+
+get_lru(LruTime) ->
+    [{LruTime, DbName}] = ets:lookup(couch_dbs_by_lru, LruTime),
+    case ets:member(couch_sys_dbs, DbName) of
+    false ->
+        LruTime;
+    true ->
+        [{_, {opened, MainPid, _}}] = ets:lookup(couch_dbs_by_name, DbName),
+        case couch_db:is_idle(MainPid) of
+        true ->
+            NextLru = ets:next(couch_dbs_by_lru, LruTime),
+            ok = shutdown_idle_db(DbName, MainPid, LruTime),
+            get_lru(NextLru);
+        false ->
+            get_lru(ets:next(couch_dbs_by_lru, LruTime))
+        end
+    end.
+
+shutdown_idle_db(DbName, MainPid, LruTime) ->
+    couch_util:shutdown_sync(MainPid),
+    true = ets:delete(couch_dbs_by_lru, LruTime),
+    true = ets:delete(couch_dbs_by_name, DbName),
+    true = ets:delete(couch_dbs_by_pid, MainPid),
+    true = ets:delete(couch_sys_dbs, DbName),
+    ok.
+
+open_async(Server, From, DbName, Filepath, Options) ->
+    Parent = self(),
+    Opener = spawn_link(fun() ->
+            Res = couch_db:start_link(DbName, Filepath, Options),
+            gen_server:call(
+                Parent, {open_result, DbName, Res, Options}, infinity
+            ),
+            unlink(Parent),
+            case Res of
+            {ok, DbReader} ->
+                unlink(DbReader);
+            _ ->
+                ok
+            end
+        end),
+    true = ets:insert(couch_dbs_by_name, {DbName, {opening, Opener, [From]}}),
+    true = ets:insert(couch_dbs_by_pid, {Opener, DbName}),
+    DbsOpen = case lists:member(sys_db, Options) of
+    true ->
+        true = ets:insert(couch_sys_dbs, {DbName, true}),
+        Server#server.dbs_open;
+    false ->
+        Server#server.dbs_open + 1
+    end,
+    Server#server{dbs_open = DbsOpen}.
+
+handle_call({set_max_dbs_open, Max}, _From, Server) ->
+    {reply, ok, Server#server{max_dbs_open=Max}};
+handle_call(get_server, _From, Server) ->
+    {reply, {ok, Server}, Server};
+handle_call({open_result, DbName, {ok, OpenedDbPid}, Options}, _From, Server) ->
+    link(OpenedDbPid),
+    [{DbName, {opening,Opener,Froms}}] = ets:lookup(couch_dbs_by_name, DbName),
+    lists:foreach(fun({FromPid,_}=From) ->
+        gen_server:reply(From,
+                catch couch_db:open_ref_counted(OpenedDbPid, FromPid))
+    end, Froms),
+    LruTime = now(),
+    true = ets:insert(couch_dbs_by_name,
+            {DbName, {opened, OpenedDbPid, LruTime}}),
+    true = ets:delete(couch_dbs_by_pid, Opener),
+    true = ets:insert(couch_dbs_by_pid, {OpenedDbPid, DbName}),
+    true = ets:insert(couch_dbs_by_lru, {LruTime, DbName}),
+    case lists:member(create, Options) of
+    true ->
+        couch_db_update_notifier:notify({created, DbName});
+    false ->
+        ok
+    end,
+    {reply, ok, Server};
+handle_call({open_result, DbName, {error, eexist}, Options}, From, Server) ->
+    handle_call({open_result, DbName, file_exists, Options}, From, Server);
+handle_call({open_result, DbName, Error, Options}, _From, Server) ->
+    [{DbName, {opening,Opener,Froms}}] = ets:lookup(couch_dbs_by_name, DbName),
+    lists:foreach(fun(From) ->
+        gen_server:reply(From, Error)
+    end, Froms),
+    true = ets:delete(couch_dbs_by_name, DbName),
+    true = ets:delete(couch_dbs_by_pid, Opener),
+    DbsOpen = case lists:member(sys_db, Options) of
+    true ->
+        true = ets:delete(couch_sys_dbs, DbName),
+        Server#server.dbs_open;
+    false ->
+        Server#server.dbs_open - 1
+    end,
+    {reply, ok, Server#server{dbs_open = DbsOpen}};
+handle_call({open, DbName, Options}, {FromPid,_}=From, Server) ->
+    LruTime = now(),
+    case ets:lookup(couch_dbs_by_name, DbName) of
+    [] ->
+        open_db(DbName, Server, Options, From);
+    [{_, {opening, Opener, Froms}}] ->
+        true = ets:insert(couch_dbs_by_name, {DbName, {opening, Opener, [From|Froms]}}),
+        {noreply, Server};
+    [{_, {opened, MainPid, PrevLruTime}}] ->
+        true = ets:insert(couch_dbs_by_name, {DbName, {opened, MainPid, LruTime}}),
+        true = ets:delete(couch_dbs_by_lru, PrevLruTime),
+        true = ets:insert(couch_dbs_by_lru, {LruTime, DbName}),
+        {reply, couch_db:open_ref_counted(MainPid, FromPid), Server}
+    end;
+handle_call({create, DbName, Options}, From, Server) ->
+    case ets:lookup(couch_dbs_by_name, DbName) of
+    [] ->
+        open_db(DbName, Server, [create | Options], From);
+    [_AlreadyRunningDb] ->
+        {reply, file_exists, Server}
+    end;
+handle_call({delete, DbName, _Options}, _From, Server) ->
+    DbNameList = binary_to_list(DbName),
+    case check_dbname(Server, DbNameList) of
+    ok ->
+        FullFilepath = get_full_filename(Server, DbNameList),
+        UpdateState =
+        case ets:lookup(couch_dbs_by_name, DbName) of
+        [] -> false;
+        [{_, {opening, Pid, Froms}}] ->
+            couch_util:shutdown_sync(Pid),
+            true = ets:delete(couch_dbs_by_name, DbName),
+            true = ets:delete(couch_dbs_by_pid, Pid),
+            [gen_server:reply(F, not_found) || F <- Froms],
+            true;
+        [{_, {opened, Pid, LruTime}}] ->
+            couch_util:shutdown_sync(Pid),
+            true = ets:delete(couch_dbs_by_name, DbName),
+            true = ets:delete(couch_dbs_by_pid, Pid),
+            true = ets:delete(couch_dbs_by_lru, LruTime),
+            true
+        end,
+        Server2 = case UpdateState of
+        true ->
+            DbsOpen = case ets:member(couch_sys_dbs, DbName) of
+            true ->
+                true = ets:delete(couch_sys_dbs, DbName),
+                Server#server.dbs_open;
+            false ->
+                Server#server.dbs_open - 1
+            end,
+            Server#server{dbs_open = DbsOpen};
+        false ->
+            Server
+        end,
+
+        %% Delete any leftover .compact files.  If we don't do this a subsequent
+        %% request for this DB will try to open the .compact file and use it.
+        couch_file:delete(Server#server.root_dir, FullFilepath ++ ".compact"),
+
+        case couch_file:delete(Server#server.root_dir, FullFilepath) of
+        ok ->
+            couch_db_update_notifier:notify({deleted, DbName}),
+            {reply, ok, Server2};
+        {error, enoent} ->
+            {reply, not_found, Server2};
+        Else ->
+            {reply, Else, Server2}
+        end;
+    Error ->
+        {reply, Error, Server}
+    end.
+
+handle_cast(Msg, _Server) ->
+    exit({unknown_cast_message, Msg}).
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+    
+handle_info({'EXIT', _Pid, config_change}, Server) ->
+    {noreply, shutdown, Server};
+handle_info({'EXIT', Pid, Reason}, Server) ->
+    Server2 = case ets:lookup(couch_dbs_by_pid, Pid) of
+    [{Pid, DbName}] ->
+
+        % If the Pid is known, the name should be as well.
+        % If not, that's an error, which is why there is no [] clause.
+        case ets:lookup(couch_dbs_by_name, DbName) of
+        [{_, {opening, Pid, Froms}}] ->
+            Msg = case Reason of
+            snappy_nif_not_loaded ->
+                io_lib:format(
+                    "To open the database `~s`, Apache CouchDB "
+                    "must be built with Erlang OTP R13B04 or higher.",
+                    [DbName]
+                );
+            true ->
+                io_lib:format("Error opening database ~p: ~p", [DbName, Reason])
+            end,
+            ?LOG_ERROR(Msg, []),
+            lists:foreach(
+              fun(F) -> gen_server:reply(F, {bad_otp_release, Msg}) end,
+              Froms
+            );
+        [{_, {opened, Pid, LruTime}}] ->
+            ?LOG_ERROR(
+                "Unexpected exit of database process ~p [~p]: ~p",
+                [Pid, DbName, Reason]
+            ),
+            true = ets:delete(couch_dbs_by_lru, LruTime)
+        end,
+
+        true = ets:delete(couch_dbs_by_pid, DbName),
+        true = ets:delete(couch_dbs_by_name, DbName),
+
+        case ets:lookup(couch_sys_dbs, DbName) of
+        [{DbName, _}] ->
+            true = ets:delete(couch_sys_dbs, DbName),
+            Server;
+        [] ->
+            Server#server{dbs_open = Server#server.dbs_open - 1}
+        end
+    end,
+    {noreply, Server2};
+handle_info(Error, _Server) ->
+    ?LOG_ERROR("Unexpected message, restarting couch_server: ~p", [Error]),
+    exit(kill).
+
+open_db(DbName, Server, Options, From) ->
+    DbNameList = binary_to_list(DbName),
+    case check_dbname(Server, DbNameList) of
+    ok ->
+        Filepath = get_full_filename(Server, DbNameList),
+        case lists:member(sys_db, Options) of
+        true ->
+            {noreply, open_async(Server, From, DbName, Filepath, Options)};
+        false ->
+            case maybe_close_lru_db(Server) of
+            {ok, Server2} ->
+                {noreply, open_async(Server2, From, DbName, Filepath, Options)};
+            CloseError ->
+                {reply, CloseError, Server}
+            end
+        end;
+     Error ->
+        {reply, Error, Server}
+     end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_server_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch_server_sup.erl b/src/couch_server_sup.erl
new file mode 100644
index 0000000..be3c3a3
--- /dev/null
+++ b/src/couch_server_sup.erl
@@ -0,0 +1,164 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_server_sup).
+-behaviour(supervisor).
+
+
+-export([start_link/1,stop/0, couch_config_start_link_wrapper/2,
+        restart_core_server/0, config_change/2]).
+
+-include("couch_db.hrl").
+
+%% supervisor callbacks
+-export([init/1]).
+
+start_link(IniFiles) ->
+    case whereis(couch_server_sup) of
+    undefined ->
+        start_server(IniFiles);
+    _Else ->
+        {error, already_started}
+    end.
+
+restart_core_server() ->
+    init:restart().
+
+couch_config_start_link_wrapper(IniFiles, FirstConfigPid) ->
+    case is_process_alive(FirstConfigPid) of
+        true ->
+            link(FirstConfigPid),
+            {ok, FirstConfigPid};
+        false -> couch_config:start_link(IniFiles)
+    end.
+
+start_server(IniFiles) ->
+    case init:get_argument(pidfile) of
+    {ok, [PidFile]} ->
+        case file:write_file(PidFile, os:getpid()) of
+        ok -> ok;
+        {error, Reason} ->
+            io:format("Failed to write PID file ~s: ~s",
+                [PidFile, file:format_error(Reason)])
+        end;
+    _ -> ok
+    end,
+
+    {ok, ConfigPid} = couch_config:start_link(IniFiles),
+
+    LogLevel = couch_config:get("log", "level", "info"),
+    % announce startup
+    io:format("Apache CouchDB ~s (LogLevel=~s) is starting.~n", [
+        couch_server:get_version(),
+        LogLevel
+    ]),
+    case LogLevel of
+    "debug" ->
+        io:format("Configuration Settings ~p:~n", [IniFiles]),
+        [io:format("  [~s] ~s=~p~n", [Module, Variable, Value])
+            || {{Module, Variable}, Value} <- couch_config:all()];
+    _ -> ok
+    end,
+
+    BaseChildSpecs =
+    {{one_for_all, 10, 3600},
+        [{couch_config,
+            {couch_server_sup, couch_config_start_link_wrapper, [IniFiles, ConfigPid]},
+            permanent,
+            brutal_kill,
+            worker,
+            [couch_config]},
+        {couch_primary_services,
+            {couch_primary_sup, start_link, []},
+            permanent,
+            infinity,
+            supervisor,
+            [couch_primary_sup]},
+        {couch_secondary_services,
+            {couch_secondary_sup, start_link, []},
+            permanent,
+            infinity,
+            supervisor,
+            [couch_secondary_sup]}
+        ]},
+
+    % ensure these applications are running
+    application:start(ibrowse),
+    application:start(crypto),
+
+    {ok, Pid} = supervisor:start_link(
+        {local, couch_server_sup}, couch_server_sup, BaseChildSpecs),
+
+    % launch the icu bridge
+    % just restart if one of the config settings change.
+    couch_config:register(fun ?MODULE:config_change/2, Pid),
+
+    unlink(ConfigPid),
+
+    Ip = couch_config:get("httpd", "bind_address"),
+    io:format("Apache CouchDB has started. Time to relax.~n"),
+    Uris = [get_uri(Name, Ip) || Name <- [couch_httpd, https]],
+    [begin
+        case Uri of
+            undefined -> ok;
+            Uri -> ?LOG_INFO("Apache CouchDB has started on ~s", [Uri])
+        end
+    end
+    || Uri <- Uris],
+    case couch_config:get("couchdb", "uri_file", null) of 
+    null -> ok;
+    UriFile ->
+        Lines = [begin case Uri of
+            undefined -> [];
+            Uri -> io_lib:format("~s~n", [Uri])
+            end end || Uri <- Uris],
+        case file:write_file(UriFile, Lines) of
+        ok -> ok;
+        {error, Reason2} = Error ->
+            ?LOG_ERROR("Failed to write to URI file ~s: ~s",
+                [UriFile, file:format_error(Reason2)]),
+            throw(Error)
+        end
+    end,
+
+    {ok, Pid}.
+
+stop() ->
+    catch exit(whereis(couch_server_sup), normal).
+
+config_change("daemons", _) ->
+    supervisor:terminate_child(couch_server_sup, couch_secondary_services),
+    supervisor:restart_child(couch_server_sup, couch_secondary_services);
+config_change("couchdb", "util_driver_dir") ->
+    init:restart().
+
+init(ChildSpecs) ->
+    {ok, ChildSpecs}.
+
+get_uri(Name, Ip) ->
+    case get_port(Name) of
+        undefined ->
+            undefined;
+        Port ->
+            io_lib:format("~s://~s:~w/", [get_scheme(Name), Ip, Port])
+    end.
+
+get_scheme(couch_httpd) -> "http";
+get_scheme(https) -> "https".
+
+get_port(Name) ->
+    try
+        mochiweb_socket_server:get(Name, port)
+    catch
+        exit:{noproc, _}->
+            undefined
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_stats_aggregator.erl
----------------------------------------------------------------------
diff --git a/src/couch_stats_aggregator.erl b/src/couch_stats_aggregator.erl
new file mode 100644
index 0000000..6090355
--- /dev/null
+++ b/src/couch_stats_aggregator.erl
@@ -0,0 +1,297 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_stats_aggregator).
+-behaviour(gen_server).
+
+-export([start/0, start/1, stop/0]).
+-export([all/0, all/1, get/1, get/2, get_json/1, get_json/2, collect_sample/0]).
+
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+
+-record(aggregate, {
+    description = <<"">>,
+    seconds = 0,
+    count = 0,
+    current = null,
+    sum = null,
+    mean = null,
+    variance = null,
+    stddev = null,
+    min = null,
+    max = null,
+    samples = []
+}).
+
+
+start() ->
+    PrivDir = couch_util:priv_dir(),
+    start(filename:join(PrivDir, "stat_descriptions.cfg")).
+    
+start(FileName) ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [FileName], []).
+
+stop() ->
+    gen_server:cast(?MODULE, stop).
+
+all() ->
+    ?MODULE:all(0).
+all(Time) when is_binary(Time) ->
+    ?MODULE:all(list_to_integer(binary_to_list(Time)));
+all(Time) when is_atom(Time) ->
+    ?MODULE:all(list_to_integer(atom_to_list(Time)));
+all(Time) when is_integer(Time) ->
+    Aggs = ets:match(?MODULE, {{'$1', Time}, '$2'}),
+    Stats = lists:map(fun([Key, Agg]) -> {Key, Agg} end, Aggs),
+    case Stats of
+        [] ->
+            {[]};
+        _ ->
+            Ret = lists:foldl(fun({{Mod, Key}, Agg}, Acc) ->
+                CurrKeys = case proplists:lookup(Mod, Acc) of
+                    none -> [];
+                    {Mod, {Keys}} -> Keys
+                end,
+                NewMod = {[{Key, to_json_term(Agg)} | CurrKeys]},
+                [{Mod, NewMod} | proplists:delete(Mod, Acc)]
+            end, [], Stats),
+            {Ret}
+    end.
+
+get(Key) ->
+    ?MODULE:get(Key, 0).
+get(Key, Time) when is_binary(Time) ->
+    ?MODULE:get(Key, list_to_integer(binary_to_list(Time)));
+get(Key, Time) when is_atom(Time) ->
+    ?MODULE:get(Key, list_to_integer(atom_to_list(Time)));
+get(Key, Time) when is_integer(Time) ->
+    case ets:lookup(?MODULE, {make_key(Key), Time}) of
+        [] -> #aggregate{seconds=Time};
+        [{_, Agg}] -> Agg
+    end.
+
+get_json(Key) ->
+    get_json(Key, 0).
+get_json(Key, Time) ->
+    to_json_term(?MODULE:get(Key, Time)).
+
+collect_sample() ->
+    gen_server:call(?MODULE, collect_sample, infinity).
+
+
+init(StatDescsFileName) ->
+    % Create an aggregate entry for each {description, rate} pair.
+    ets:new(?MODULE, [named_table, set, protected]),
+    SampleStr = couch_config:get("stats", "samples", "[0]"),
+    {ok, Samples} = couch_util:parse_term(SampleStr),
+    {ok, Descs} = file:consult(StatDescsFileName),
+    lists:foreach(fun({Sect, Key, Value}) ->
+        lists:foreach(fun(Secs) ->
+            Agg = #aggregate{
+                description=list_to_binary(Value),
+                seconds=Secs
+            },
+            ets:insert(?MODULE, {{{Sect, Key}, Secs}, Agg})
+        end, Samples)
+    end, Descs),
+    
+    Self = self(),
+    ok = couch_config:register(
+        fun("stats", _) -> exit(Self, config_change) end
+    ),
+    
+    Rate = list_to_integer(couch_config:get("stats", "rate", "1000")),
+    % TODO: Add timer_start to kernel start options.
+    {ok, TRef} = timer:apply_after(Rate, ?MODULE, collect_sample, []),
+    {ok, {TRef, Rate}}.
+    
+terminate(_Reason, {TRef, _Rate}) ->
+    timer:cancel(TRef),
+    ok.
+
+handle_call(collect_sample, _, {OldTRef, SampleInterval}) ->
+    timer:cancel(OldTRef),
+    {ok, TRef} = timer:apply_after(SampleInterval, ?MODULE, collect_sample, []),
+    % Gather new stats values to add.
+    Incs = lists:map(fun({Key, Value}) ->
+        {Key, {incremental, Value}}
+    end, couch_stats_collector:all(incremental)),
+    Abs = lists:map(fun({Key, Values}) ->
+        couch_stats_collector:clear(Key),
+        Values2 = case Values of
+            X when is_list(X) -> X;
+            Else -> [Else]
+        end,
+        {_, Mean} = lists:foldl(fun(Val, {Count, Curr}) ->
+            {Count+1, Curr + (Val - Curr) / (Count+1)}
+        end, {0, 0}, Values2),
+        {Key, {absolute, Mean}}
+    end, couch_stats_collector:all(absolute)),
+    
+    Values = Incs ++ Abs,
+    Now = erlang:now(),
+    lists:foreach(fun({{Key, Rate}, Agg}) ->
+        NewAgg = case proplists:lookup(Key, Values) of
+            none ->
+                rem_values(Now, Agg);
+            {Key, {Type, Value}} ->
+                NewValue = new_value(Type, Value, Agg#aggregate.current),
+                Agg2 = add_value(Now, NewValue, Agg),
+                rem_values(Now, Agg2)
+        end,
+        ets:insert(?MODULE, {{Key, Rate}, NewAgg})
+    end, ets:tab2list(?MODULE)),
+    {reply, ok, {TRef, SampleInterval}}.
+
+handle_cast(stop, State) ->
+    {stop, normal, State}.
+
+handle_info(_Info, State) ->
+    {noreply, State}.
+
+code_change(_OldVersion, State, _Extra) ->
+    {ok, State}.
+
+
+new_value(incremental, Value, null) ->
+    Value;
+new_value(incremental, Value, Current) ->
+    Value - Current;
+new_value(absolute, Value, _Current) ->
+    Value.
+
+add_value(Time, Value, #aggregate{count=Count, seconds=Secs}=Agg) when Count < 1 ->
+    Samples = case Secs of
+        0 -> [];
+        _ -> [{Time, Value}]
+    end,
+    Agg#aggregate{
+        count=1,
+        current=Value,
+        sum=Value,
+        mean=Value,
+        variance=0.0,
+        stddev=null,
+        min=Value,
+        max=Value,
+        samples=Samples
+    };
+add_value(Time, Value, Agg) ->
+    #aggregate{
+        count=Count,
+        current=Current,
+        sum=Sum,
+        mean=Mean,
+        variance=Variance,
+        samples=Samples
+    } = Agg,
+    
+    NewCount = Count + 1,
+    NewMean = Mean + (Value - Mean) / NewCount,
+    NewVariance = Variance + (Value - Mean) * (Value - NewMean),
+    StdDev = case NewCount > 1 of
+        false -> null;
+        _ -> math:sqrt(NewVariance / (NewCount - 1))
+    end,
+    Agg2 = Agg#aggregate{
+        count=NewCount,
+        current=Current + Value,
+        sum=Sum + Value,
+        mean=NewMean,
+        variance=NewVariance,
+        stddev=StdDev,
+        min=lists:min([Agg#aggregate.min, Value]),
+        max=lists:max([Agg#aggregate.max, Value])
+    },
+    case Agg2#aggregate.seconds of
+        0 -> Agg2;
+        _ -> Agg2#aggregate{samples=[{Time, Value} | Samples]}
+    end.
+
+rem_values(Time, Agg) ->
+    Seconds = Agg#aggregate.seconds,
+    Samples = Agg#aggregate.samples,
+    Pred = fun({When, _Value}) ->
+        timer:now_diff(Time, When) =< (Seconds * 1000000)
+    end,
+    {Keep, Remove} = lists:splitwith(Pred, Samples),
+    Agg2 = lists:foldl(fun({_, Value}, Acc) ->
+        rem_value(Value, Acc)
+    end, Agg, Remove),
+    Agg2#aggregate{samples=Keep}.
+
+rem_value(_Value, #aggregate{count=Count, seconds=Secs}) when Count =< 1 ->
+    #aggregate{seconds=Secs};
+rem_value(Value, Agg) ->
+    #aggregate{
+        count=Count,
+        sum=Sum,
+        mean=Mean,
+        variance=Variance
+    } = Agg,
+
+    OldMean = (Mean * Count - Value) / (Count - 1),
+    OldVariance = Variance - (Value - OldMean) * (Value - Mean),
+    OldCount = Count - 1,
+    StdDev = case OldCount > 1 of
+        false -> null;
+        _ -> math:sqrt(clamp_value(OldVariance / (OldCount - 1)))
+    end,
+    Agg#aggregate{
+        count=OldCount,
+        sum=Sum-Value,
+        mean=clamp_value(OldMean),
+        variance=clamp_value(OldVariance),
+        stddev=StdDev
+    }.
+
+to_json_term(Agg) ->
+    {Min, Max} = case Agg#aggregate.seconds > 0 of
+        false ->
+            {Agg#aggregate.min, Agg#aggregate.max};
+        _ ->
+            case length(Agg#aggregate.samples) > 0 of
+                true ->
+                    Extract = fun({_Time, Value}) -> Value end,
+                    Samples = lists:map(Extract, Agg#aggregate.samples),
+                    {lists:min(Samples), lists:max(Samples)};
+                _ ->
+                    {null, null}
+            end
+    end,
+    {[
+        {description, Agg#aggregate.description},
+        {current, round_value(Agg#aggregate.sum)},
+        {sum, round_value(Agg#aggregate.sum)},
+        {mean, round_value(Agg#aggregate.mean)},
+        {stddev, round_value(Agg#aggregate.stddev)},
+        {min, Min},
+        {max, Max}
+    ]}.
+
+make_key({Mod, Val}) when is_integer(Val) ->
+    {Mod, list_to_atom(integer_to_list(Val))};
+make_key(Key) ->
+    Key.
+
+round_value(Val) when not is_number(Val) ->
+    Val;
+round_value(Val) when Val == 0 ->
+    Val;
+round_value(Val) ->
+    erlang:round(Val * 1000.0) / 1000.0.
+
+clamp_value(Val) when Val > 0.00000000000001 ->
+    Val;
+clamp_value(_) ->
+    0.0.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_stats_collector.erl
----------------------------------------------------------------------
diff --git a/src/couch_stats_collector.erl b/src/couch_stats_collector.erl
new file mode 100644
index 0000000..f7b9bb4
--- /dev/null
+++ b/src/couch_stats_collector.erl
@@ -0,0 +1,136 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% todo
+% - remove existance check on increment(), decrement() and record(). have
+%   modules initialize counters on startup.
+
+-module(couch_stats_collector).
+
+-behaviour(gen_server).
+
+-export([start/0, stop/0]).
+-export([all/0, all/1, get/1, increment/1, decrement/1, record/2, clear/1]).
+-export([track_process_count/1, track_process_count/2]).
+
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+
+-define(HIT_TABLE, stats_hit_table).
+-define(ABS_TABLE, stats_abs_table).
+
+start() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+stop() ->
+    gen_server:call(?MODULE, stop).
+
+all() ->
+    ets:tab2list(?HIT_TABLE) ++ abs_to_list().
+
+all(Type) ->
+    case Type of
+        incremental -> ets:tab2list(?HIT_TABLE);
+        absolute -> abs_to_list()
+    end.
+
+get(Key) ->
+    case ets:lookup(?HIT_TABLE, Key) of
+        [] ->
+            case ets:lookup(?ABS_TABLE, Key) of
+                [] ->
+                    nil;
+                AbsVals ->
+                    lists:map(fun({_, Value}) -> Value end, AbsVals)
+            end;
+        [{_, Counter}] ->
+            Counter
+    end.
+
+increment(Key) ->
+    Key2 = make_key(Key),
+    case catch ets:update_counter(?HIT_TABLE, Key2, 1) of
+        {'EXIT', {badarg, _}} ->
+            catch ets:insert(?HIT_TABLE, {Key2, 1}),
+            ok;
+        _ ->
+            ok
+    end.
+
+decrement(Key) ->
+    Key2 = make_key(Key),
+    case catch ets:update_counter(?HIT_TABLE, Key2, -1) of
+        {'EXIT', {badarg, _}} ->
+            catch ets:insert(?HIT_TABLE, {Key2, -1}),
+            ok;
+        _ -> ok
+    end.
+
+record(Key, Value) ->
+    catch ets:insert(?ABS_TABLE, {make_key(Key), Value}).
+
+clear(Key) ->
+    catch ets:delete(?ABS_TABLE, make_key(Key)).
+
+track_process_count(Stat) ->
+    track_process_count(self(), Stat).
+
+track_process_count(Pid, Stat) ->
+    MonitorFun = fun() ->
+        Ref = erlang:monitor(process, Pid),
+        receive {'DOWN', Ref, _, _, _} -> ok end,
+        couch_stats_collector:decrement(Stat)
+    end,
+    case (catch couch_stats_collector:increment(Stat)) of
+        ok -> spawn(MonitorFun);
+        _ -> ok
+    end.
+
+
+init(_) ->
+    ets:new(?HIT_TABLE, [named_table, set, public]),
+    ets:new(?ABS_TABLE, [named_table, duplicate_bag, public]),
+    {ok, nil}.
+
+terminate(_Reason, _State) ->
+    ok.
+
+handle_call(stop, _, State) ->
+    {stop, normal, stopped, State}.
+
+handle_cast(foo, State) ->
+    {noreply, State}.
+
+handle_info(_Info, State) ->
+    {noreply, State}.
+
+code_change(_OldVersion, State, _Extra) ->
+    {ok, State}.
+
+
+make_key({Module, Key}) when is_integer(Key) ->
+    {Module, list_to_atom(integer_to_list(Key))};
+make_key(Key) ->
+    Key.
+
+abs_to_list() ->
+    SortedKVs = lists:sort(ets:tab2list(?ABS_TABLE)),
+    lists:foldl(fun({Key, Val}, Acc) ->
+        case Acc of
+            [] ->
+                [{Key, [Val]}];
+            [{Key, Prev} | Rest] ->
+                [{Key, [Val | Prev]} | Rest];
+            Others ->
+                [{Key, [Val]} | Others]
+        end
+    end, [], SortedKVs).
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_stream.erl
----------------------------------------------------------------------
diff --git a/src/couch_stream.erl b/src/couch_stream.erl
new file mode 100644
index 0000000..959feef
--- /dev/null
+++ b/src/couch_stream.erl
@@ -0,0 +1,299 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_stream).
+-behaviour(gen_server).
+
+% public API
+-export([open/1, open/2, close/1]).
+-export([foldl/4, foldl/5, foldl_decode/6, range_foldl/6]).
+-export([copy_to_new_stream/3, write/2]).
+
+% gen_server callbacks
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_cast/2, handle_call/3, handle_info/2]).
+
+-include("couch_db.hrl").
+
+-define(DEFAULT_BUFFER_SIZE, 4096).
+
+-record(stream,
+    {fd = 0,
+    written_pointers=[],
+    buffer_list = [],
+    buffer_len = 0,
+    max_buffer,
+    written_len = 0,
+    md5,
+    % md5 of the content without any transformation applied (e.g. compression)
+    % needed for the attachment upload integrity check (ticket 558)
+    identity_md5,
+    identity_len = 0,
+    encoding_fun,
+    end_encoding_fun
+    }).
+
+
+%%% Interface functions %%%
+
+open(Fd) ->
+    open(Fd, []).
+
+open(Fd, Options) ->
+    gen_server:start_link(couch_stream, {Fd, Options}, []).
+
+close(Pid) ->
+    gen_server:call(Pid, close, infinity).
+
+copy_to_new_stream(Fd, PosList, DestFd) ->
+    {ok, Dest} = open(DestFd),
+    foldl(Fd, PosList,
+        fun(Bin, _) ->
+            ok = write(Dest, Bin)
+        end, ok),
+    close(Dest).
+
+foldl(_Fd, [], _Fun, Acc) ->
+    Acc;
+foldl(Fd, [Pos|Rest], Fun, Acc) ->
+    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
+    foldl(Fd, Rest, Fun, Fun(Bin, Acc)).
+
+foldl(Fd, PosList, <<>>, Fun, Acc) ->
+    foldl(Fd, PosList, Fun, Acc);
+foldl(Fd, PosList, Md5, Fun, Acc) ->
+    foldl(Fd, PosList, Md5, couch_util:md5_init(), Fun, Acc).
+
+foldl_decode(Fd, PosList, Md5, Enc, Fun, Acc) ->
+    {DecDataFun, DecEndFun} = case Enc of
+    gzip ->
+        ungzip_init();
+    identity ->
+        identity_enc_dec_funs()
+    end,
+    Result = foldl_decode(
+        DecDataFun, Fd, PosList, Md5, couch_util:md5_init(), Fun, Acc
+    ),
+    DecEndFun(),
+    Result.
+
+foldl(_Fd, [], Md5, Md5Acc, _Fun, Acc) ->
+    Md5 = couch_util:md5_final(Md5Acc),
+    Acc;
+foldl(Fd, [{Pos, _Size}], Md5, Md5Acc, Fun, Acc) -> % 0110 UPGRADE CODE
+    foldl(Fd, [Pos], Md5, Md5Acc, Fun, Acc);
+foldl(Fd, [Pos], Md5, Md5Acc, Fun, Acc) ->
+    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
+    Md5 = couch_util:md5_final(couch_util:md5_update(Md5Acc, Bin)),
+    Fun(Bin, Acc);
+foldl(Fd, [{Pos, _Size}|Rest], Md5, Md5Acc, Fun, Acc) ->
+    foldl(Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc);
+foldl(Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc) ->
+    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
+    foldl(Fd, Rest, Md5, couch_util:md5_update(Md5Acc, Bin), Fun, Fun(Bin, Acc)).
+
+range_foldl(Fd, PosList, From, To, Fun, Acc) ->
+    range_foldl(Fd, PosList, From, To, 0, Fun, Acc).
+
+range_foldl(_Fd, _PosList, _From, To, Off, _Fun, Acc) when Off >= To ->
+    Acc;
+range_foldl(Fd, [Pos|Rest], From, To, Off, Fun, Acc) when is_integer(Pos) -> % old-style attachment
+    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
+    range_foldl(Fd, [{Pos, iolist_size(Bin)}] ++ Rest, From, To, Off, Fun, Acc);
+range_foldl(Fd, [{_Pos, Size}|Rest], From, To, Off, Fun, Acc) when From > Off + Size ->
+    range_foldl(Fd, Rest, From, To, Off + Size, Fun, Acc);
+range_foldl(Fd, [{Pos, Size}|Rest], From, To, Off, Fun, Acc) ->
+    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
+    Bin1 = if
+        From =< Off andalso To >= Off + Size -> Bin; %% the whole block is covered
+        true ->
+            PrefixLen = clip(From - Off, 0, Size),
+            PostfixLen = clip(Off + Size - To, 0, Size),
+            MatchLen = Size - PrefixLen - PostfixLen,
+            <<_Prefix:PrefixLen/binary,Match:MatchLen/binary,_Postfix:PostfixLen/binary>> = iolist_to_binary(Bin),
+            Match
+    end,
+    range_foldl(Fd, Rest, From, To, Off + Size, Fun, Fun(Bin1, Acc)).
+
+clip(Value, Lo, Hi) ->
+    if
+        Value < Lo -> Lo;
+        Value > Hi -> Hi;
+        true -> Value
+    end.
+
+foldl_decode(_DecFun, _Fd, [], Md5, Md5Acc, _Fun, Acc) ->
+    Md5 = couch_util:md5_final(Md5Acc),
+    Acc;
+foldl_decode(DecFun, Fd, [{Pos, _Size}], Md5, Md5Acc, Fun, Acc) ->
+    foldl_decode(DecFun, Fd, [Pos], Md5, Md5Acc, Fun, Acc);
+foldl_decode(DecFun, Fd, [Pos], Md5, Md5Acc, Fun, Acc) ->
+    {ok, EncBin} = couch_file:pread_iolist(Fd, Pos),
+    Md5 = couch_util:md5_final(couch_util:md5_update(Md5Acc, EncBin)),
+    Bin = DecFun(EncBin),
+    Fun(Bin, Acc);
+foldl_decode(DecFun, Fd, [{Pos, _Size}|Rest], Md5, Md5Acc, Fun, Acc) ->
+    foldl_decode(DecFun, Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc);
+foldl_decode(DecFun, Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc) ->
+    {ok, EncBin} = couch_file:pread_iolist(Fd, Pos),
+    Bin = DecFun(EncBin),
+    Md5Acc2 = couch_util:md5_update(Md5Acc, EncBin),
+    foldl_decode(DecFun, Fd, Rest, Md5, Md5Acc2, Fun, Fun(Bin, Acc)).
+
+gzip_init(Options) ->
+    case couch_util:get_value(compression_level, Options, 0) of
+    Lvl when Lvl >= 1 andalso Lvl =< 9 ->
+        Z = zlib:open(),
+        % 15 = ?MAX_WBITS (defined in the zlib module)
+        % the 16 + ?MAX_WBITS formula was obtained by inspecting zlib:gzip/1
+        ok = zlib:deflateInit(Z, Lvl, deflated, 16 + 15, 8, default),
+        {
+            fun(Data) ->
+                zlib:deflate(Z, Data)
+            end,
+            fun() ->
+                Last = zlib:deflate(Z, [], finish),
+                ok = zlib:deflateEnd(Z),
+                ok = zlib:close(Z),
+                Last
+            end
+        };
+    _ ->
+        identity_enc_dec_funs()
+    end.
+
+ungzip_init() ->
+    Z = zlib:open(),
+    zlib:inflateInit(Z, 16 + 15),
+    {
+        fun(Data) ->
+            zlib:inflate(Z, Data)
+        end,
+        fun() ->
+            ok = zlib:inflateEnd(Z),
+            ok = zlib:close(Z)
+        end
+    }.
+
+identity_enc_dec_funs() ->
+    {
+        fun(Data) -> Data end,
+        fun() -> [] end
+    }.
+
+write(_Pid, <<>>) ->
+    ok;
+write(Pid, Bin) ->
+    gen_server:call(Pid, {write, Bin}, infinity).
+
+
+init({Fd, Options}) ->
+    {EncodingFun, EndEncodingFun} =
+    case couch_util:get_value(encoding, Options, identity) of
+    identity ->
+        identity_enc_dec_funs();
+    gzip ->
+        gzip_init(Options)
+    end,
+    {ok, #stream{
+            fd=Fd,
+            md5=couch_util:md5_init(),
+            identity_md5=couch_util:md5_init(),
+            encoding_fun=EncodingFun,
+            end_encoding_fun=EndEncodingFun,
+            max_buffer=couch_util:get_value(
+                buffer_size, Options, ?DEFAULT_BUFFER_SIZE)
+        }
+    }.
+
+terminate(_Reason, _Stream) ->
+    ok.
+
+handle_call({write, Bin}, _From, Stream) ->
+    BinSize = iolist_size(Bin),
+    #stream{
+        fd = Fd,
+        written_len = WrittenLen,
+        written_pointers = Written,
+        buffer_len = BufferLen,
+        buffer_list = Buffer,
+        max_buffer = Max,
+        md5 = Md5,
+        identity_md5 = IdenMd5,
+        identity_len = IdenLen,
+        encoding_fun = EncodingFun} = Stream,
+    if BinSize + BufferLen > Max ->
+        WriteBin = lists:reverse(Buffer, [Bin]),
+        IdenMd5_2 = couch_util:md5_update(IdenMd5, WriteBin),
+        case EncodingFun(WriteBin) of
+        [] ->
+            % case where the encoder did some internal buffering
+            % (zlib does it for example)
+            WrittenLen2 = WrittenLen,
+            Md5_2 = Md5,
+            Written2 = Written;
+        WriteBin2 ->
+            {ok, Pos, _} = couch_file:append_binary(Fd, WriteBin2),
+            WrittenLen2 = WrittenLen + iolist_size(WriteBin2),
+            Md5_2 = couch_util:md5_update(Md5, WriteBin2),
+            Written2 = [{Pos, iolist_size(WriteBin2)}|Written]
+        end,
+
+        {reply, ok, Stream#stream{
+                        written_len=WrittenLen2,
+                        written_pointers=Written2,
+                        buffer_list=[],
+                        buffer_len=0,
+                        md5=Md5_2,
+                        identity_md5=IdenMd5_2,
+                        identity_len=IdenLen + BinSize}};
+    true ->
+        {reply, ok, Stream#stream{
+                        buffer_list=[Bin|Buffer],
+                        buffer_len=BufferLen + BinSize,
+                        identity_len=IdenLen + BinSize}}
+    end;
+handle_call(close, _From, Stream) ->
+    #stream{
+        fd = Fd,
+        written_len = WrittenLen,
+        written_pointers = Written,
+        buffer_list = Buffer,
+        md5 = Md5,
+        identity_md5 = IdenMd5,
+        identity_len = IdenLen,
+        encoding_fun = EncodingFun,
+        end_encoding_fun = EndEncodingFun} = Stream,
+
+    WriteBin = lists:reverse(Buffer),
+    IdenMd5Final = couch_util:md5_final(couch_util:md5_update(IdenMd5, WriteBin)),
+    WriteBin2 = EncodingFun(WriteBin) ++ EndEncodingFun(),
+    Md5Final = couch_util:md5_final(couch_util:md5_update(Md5, WriteBin2)),
+    Result = case WriteBin2 of
+    [] ->
+        {lists:reverse(Written), WrittenLen, IdenLen, Md5Final, IdenMd5Final};
+    _ ->
+        {ok, Pos, _} = couch_file:append_binary(Fd, WriteBin2),
+        StreamInfo = lists:reverse(Written, [{Pos, iolist_size(WriteBin2)}]),
+        StreamLen = WrittenLen + iolist_size(WriteBin2),
+        {StreamInfo, StreamLen, IdenLen, Md5Final, IdenMd5Final}
+    end,
+    {stop, normal, Result, Stream}.
+
+handle_cast(_Msg, State) ->
+    {noreply,State}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+handle_info(_Info, State) ->
+    {noreply, State}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_task_status.erl
----------------------------------------------------------------------
diff --git a/src/couch_task_status.erl b/src/couch_task_status.erl
new file mode 100644
index 0000000..e23b560
--- /dev/null
+++ b/src/couch_task_status.erl
@@ -0,0 +1,151 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_task_status).
+-behaviour(gen_server).
+
+% This module is used to track the status of long running tasks.
+% Long running tasks register themselves, via a call to add_task/1, and then
+% update their status properties via update/1. The status of a task is a
+% list of properties. Each property is a tuple, with the first element being
+% either an atom or a binary and the second element must be an EJSON value. When
+% a task updates its status, it can override some or all of its properties.
+% The properties {started_on, UnitTimestamp}, {updated_on, UnixTimestamp} and
+% {pid, ErlangPid} are automatically added by this module.
+% When a tracked task dies, its status will be automatically removed from
+% memory. To get the tasks list, call the all/0 function.
+
+-export([start_link/0, stop/0]).
+-export([all/0, add_task/1, update/1, get/1, set_update_frequency/1]).
+-export([is_task_added/0]).
+
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+
+-include("couch_db.hrl").
+
+-define(set(L, K, V), lists:keystore(K, 1, L, {K, V})).
+
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+stop() ->
+    gen_server:cast(?MODULE, stop).
+
+
+all() ->
+    gen_server:call(?MODULE, all).
+
+
+add_task(Props) ->
+    put(task_status_update, {{0, 0, 0}, 0}),
+    Ts = timestamp(),
+    TaskProps = lists:ukeysort(
+        1, [{started_on, Ts}, {updated_on, Ts} | Props]),
+    put(task_status_props, TaskProps),
+    gen_server:call(?MODULE, {add_task, TaskProps}).
+
+
+is_task_added() ->
+    is_list(erlang:get(task_status_props)).
+
+
+set_update_frequency(Msecs) ->
+    put(task_status_update, {{0, 0, 0}, Msecs * 1000}).
+
+
+update(Props) ->
+    MergeProps = lists:ukeysort(1, Props),
+    TaskProps = lists:ukeymerge(1, MergeProps, erlang:get(task_status_props)),
+    put(task_status_props, TaskProps),
+    maybe_persist(TaskProps).
+
+
+get(Props) when is_list(Props) ->
+    TaskProps = erlang:get(task_status_props),
+    [couch_util:get_value(P, TaskProps) || P <- Props];
+get(Prop) ->
+    TaskProps = erlang:get(task_status_props),
+    couch_util:get_value(Prop, TaskProps).
+
+
+maybe_persist(TaskProps0) ->
+    {LastUpdateTime, Frequency} = erlang:get(task_status_update),
+    case timer:now_diff(Now = now(), LastUpdateTime) >= Frequency of
+    true ->
+        put(task_status_update, {Now, Frequency}),
+        TaskProps = ?set(TaskProps0, updated_on, timestamp(Now)),
+        gen_server:cast(?MODULE, {update_status, self(), TaskProps});
+    false ->
+        ok
+    end.
+
+
+init([]) ->
+    % read configuration settings and register for configuration changes
+    ets:new(?MODULE, [ordered_set, protected, named_table]),
+    {ok, nil}.
+
+
+terminate(_Reason,_State) ->
+    ok.
+
+
+handle_call({add_task, TaskProps}, {From, _}, Server) ->
+    case ets:lookup(?MODULE, From) of
+    [] ->
+        true = ets:insert(?MODULE, {From, TaskProps}),
+        erlang:monitor(process, From),
+        {reply, ok, Server};
+    [_] ->
+        {reply, {add_task_error, already_registered}, Server}
+    end;
+handle_call(all, _, Server) ->
+    All = [
+        [{pid, ?l2b(pid_to_list(Pid))} | TaskProps]
+        ||
+        {Pid, TaskProps} <- ets:tab2list(?MODULE)
+    ],
+    {reply, All, Server}.
+
+
+handle_cast({update_status, Pid, NewProps}, Server) ->
+    case ets:lookup(?MODULE, Pid) of
+    [{Pid, _CurProps}] ->
+        ?LOG_DEBUG("New task status for ~p: ~p", [Pid, NewProps]),
+        true = ets:insert(?MODULE, {Pid, NewProps});
+    _ ->
+        % Task finished/died in the meanwhile and we must have received
+        % a monitor message before this call - ignore.
+        ok
+    end,
+    {noreply, Server};
+handle_cast(stop, State) ->
+    {stop, normal, State}.
+
+handle_info({'DOWN', _MonitorRef, _Type, Pid, _Info}, Server) ->
+    %% should we also erlang:demonitor(_MonitorRef), ?
+    ets:delete(?MODULE, Pid),
+    {noreply, Server}.
+
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+
+timestamp() ->
+    timestamp(now()).
+
+timestamp({Mega, Secs, _}) ->
+    Mega * 1000000 + Secs.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_users_db.erl
----------------------------------------------------------------------
diff --git a/src/couch_users_db.erl b/src/couch_users_db.erl
new file mode 100644
index 0000000..9b875ba
--- /dev/null
+++ b/src/couch_users_db.erl
@@ -0,0 +1,121 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_users_db).
+
+-export([before_doc_update/2, after_doc_read/2, strip_non_public_fields/1]).
+
+-include("couch_db.hrl").
+
+-define(NAME, <<"name">>).
+-define(PASSWORD, <<"password">>).
+-define(DERIVED_KEY, <<"derived_key">>).
+-define(PASSWORD_SCHEME, <<"password_scheme">>).
+-define(PBKDF2, <<"pbkdf2">>).
+-define(ITERATIONS, <<"iterations">>).
+-define(SALT, <<"salt">>).
+-define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
+
+% If the request's userCtx identifies an admin
+%   -> save_doc (see below)
+%
+% If the request's userCtx.name is null:
+%   -> save_doc
+%   // this is an anonymous user registering a new document
+%   // in case a user doc with the same id already exists, the anonymous
+%   // user will get a regular doc update conflict.
+% If the request's userCtx.name doesn't match the doc's name
+%   -> 404 // Not Found
+% Else
+%   -> save_doc
+before_doc_update(Doc, #db{user_ctx = UserCtx} = Db) ->
+    #user_ctx{name=Name} = UserCtx,
+    DocName = get_doc_name(Doc),
+    case (catch couch_db:check_is_admin(Db)) of
+    ok ->
+        save_doc(Doc);
+    _ when Name =:= DocName orelse Name =:= null ->
+        save_doc(Doc);
+    _ ->
+        throw(not_found)
+    end.
+
+% If newDoc.password == null || newDoc.password == undefined:
+%   ->
+%   noop
+% Else -> // calculate password hash server side
+%    newDoc.password_sha = hash_pw(newDoc.password + salt)
+%    newDoc.salt = salt
+%    newDoc.password = null
+save_doc(#doc{body={Body}} = Doc) ->
+    case couch_util:get_value(?PASSWORD, Body) of
+    null -> % server admins don't have a user-db password entry
+        Doc;
+    undefined ->
+        Doc;
+    ClearPassword ->
+        Iterations = list_to_integer(couch_config:get("couch_httpd_auth", "iterations", "1000")),
+        Salt = couch_uuids:random(),
+        DerivedKey = couch_passwords:pbkdf2(ClearPassword, Salt, Iterations),
+        Body0 = [{?PASSWORD_SCHEME, ?PBKDF2}, {?ITERATIONS, Iterations}|Body],
+        Body1 = ?replace(Body0, ?DERIVED_KEY, DerivedKey),
+        Body2 = ?replace(Body1, ?SALT, Salt),
+        Body3 = proplists:delete(?PASSWORD, Body2),
+        Doc#doc{body={Body3}}
+    end.
+
+% If the doc is a design doc
+%   If the request's userCtx identifies an admin
+%     -> return doc
+%   Else
+%     -> 403 // Forbidden
+% If the request's userCtx identifies an admin
+%   -> return doc
+% If the request's userCtx.name doesn't match the doc's name
+%   -> 404 // Not Found
+% Else
+%   -> return doc
+after_doc_read(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, Db) ->
+    case (catch couch_db:check_is_admin(Db)) of
+    ok ->
+        Doc;
+    _ ->
+        throw({forbidden,
+        <<"Only administrators can view design docs in the users database.">>})
+    end;
+after_doc_read(Doc, #db{user_ctx = UserCtx} = Db) ->
+    #user_ctx{name=Name} = UserCtx,
+    DocName = get_doc_name(Doc),
+    case (catch couch_db:check_is_admin(Db)) of
+    ok ->
+        Doc;
+    _ when Name =:= DocName ->
+        Doc;
+    _ ->
+        Doc1 = strip_non_public_fields(Doc),
+        case Doc1 of
+          #doc{body={[]}} ->
+              throw(not_found);
+          _ ->
+              Doc1
+        end
+    end.
+
+get_doc_name(#doc{id= <<"org.couchdb.user:", Name/binary>>}) ->
+    Name;
+get_doc_name(_) ->
+    undefined.
+
+strip_non_public_fields(#doc{body={Props}}=Doc) ->
+    Public = re:split(couch_config:get("couch_httpd_auth", "public_fields", ""),
+                      "\\s*,\\s*", [{return, binary}]),
+    Doc#doc{body={[{K, V} || {K, V} <- Props, lists:member(K, Public)]}}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_util.erl
----------------------------------------------------------------------
diff --git a/src/couch_util.erl b/src/couch_util.erl
new file mode 100644
index 0000000..76a9293
--- /dev/null
+++ b/src/couch_util.erl
@@ -0,0 +1,487 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_util).
+
+-export([start_app_deps/1, ensure_started/1]).
+-export([priv_dir/0, normpath/1]).
+-export([should_flush/0, should_flush/1, to_existing_atom/1]).
+-export([rand32/0, implode/2]).
+-export([abs_pathname/1,abs_pathname/2, trim/1]).
+-export([encodeBase64Url/1, decodeBase64Url/1]).
+-export([validate_utf8/1, to_hex/1, parse_term/1, dict_find/3]).
+-export([get_nested_json_value/2, json_user_ctx/1]).
+-export([proplist_apply_field/2, json_apply_field/2]).
+-export([json_decode/1]).
+-export([to_binary/1, to_integer/1, to_list/1, url_encode/1]).
+-export([verify/2,simple_call/2,shutdown_sync/1]).
+-export([get_value/2, get_value/3]).
+-export([md5/1, md5_init/0, md5_update/2, md5_final/1]).
+-export([reorder_results/2]).
+-export([url_strip_password/1]).
+-export([encode_doc_id/1]).
+-export([with_db/2]).
+-export([rfc1123_date/0, rfc1123_date/1]).
+
+-include("couch_db.hrl").
+
+% arbitrarily chosen amount of memory to use before flushing to disk
+-define(FLUSH_MAX_MEM, 10000000).
+
+%% @spec start_app_deps(App :: atom()) -> ok
+%% @doc Start depedent applications of App.
+start_app_deps(App) ->
+    {ok, DepApps} = application:get_key(App, applications),
+    [ensure_started(A) || A <- DepApps],
+    ok.
+
+%% @spec ensure_started(Application :: atom()) -> ok
+%% @doc Start the named application if not already started.
+ensure_started(App) ->
+    case application:start(App) of
+	ok ->
+	    ok;
+	{error, {already_started, App}} ->
+	    ok
+    end.
+
+priv_dir() ->
+    case code:priv_dir(couch) of
+        {error, _} ->
+            %% try to get relative priv dir. useful for tests.
+            EbinDir = filename:dirname(code:which(?MODULE)),
+            AppPath = filename:dirname(EbinDir),
+            filename:join(AppPath, "priv");
+        Dir -> Dir
+    end.
+
+% Normalize a pathname by removing .. and . components.
+normpath(Path) ->
+    normparts(filename:split(Path), []).
+
+normparts([], Acc) ->
+    filename:join(lists:reverse(Acc));
+normparts([".." | RestParts], [_Drop | RestAcc]) ->
+    normparts(RestParts, RestAcc);
+normparts(["." | RestParts], Acc) ->
+    normparts(RestParts, Acc);
+normparts([Part | RestParts], Acc) ->
+    normparts(RestParts, [Part | Acc]).
+
+% works like list_to_existing_atom, except can be list or binary and it
+% gives you the original value instead of an error if no existing atom.
+to_existing_atom(V) when is_list(V) ->
+    try list_to_existing_atom(V) catch _:_ -> V end;
+to_existing_atom(V) when is_binary(V) ->
+    try list_to_existing_atom(?b2l(V)) catch _:_ -> V end;
+to_existing_atom(V) when is_atom(V) ->
+    V.
+
+shutdown_sync(Pid) when not is_pid(Pid)->
+    ok;
+shutdown_sync(Pid) ->
+    MRef = erlang:monitor(process, Pid),
+    try
+        catch unlink(Pid),
+        catch exit(Pid, shutdown),
+        receive
+        {'DOWN', MRef, _, _, _} ->
+            ok
+        end
+    after
+        erlang:demonitor(MRef, [flush])
+    end.
+
+
+simple_call(Pid, Message) ->
+    MRef = erlang:monitor(process, Pid),
+    try
+        Pid ! {self(), Message},
+        receive
+        {Pid, Result} ->
+            Result;
+        {'DOWN', MRef, _, _, Reason} ->
+            exit(Reason)
+        end
+    after
+        erlang:demonitor(MRef, [flush])
+    end.
+
+validate_utf8(Data) when is_list(Data) ->
+    validate_utf8(?l2b(Data));
+validate_utf8(Bin) when is_binary(Bin) ->
+    validate_utf8_fast(Bin, 0).
+
+validate_utf8_fast(B, O) ->
+    case B of
+        <<_:O/binary>> ->
+            true;
+        <<_:O/binary, C1, _/binary>> when
+                C1 < 128 ->
+            validate_utf8_fast(B, 1 + O);
+        <<_:O/binary, C1, C2, _/binary>> when
+                C1 >= 194, C1 =< 223,
+                C2 >= 128, C2 =< 191 ->
+            validate_utf8_fast(B, 2 + O);
+        <<_:O/binary, C1, C2, C3, _/binary>> when
+                C1 >= 224, C1 =< 239,
+                C2 >= 128, C2 =< 191,
+                C3 >= 128, C3 =< 191 ->
+            validate_utf8_fast(B, 3 + O);
+        <<_:O/binary, C1, C2, C3, C4, _/binary>> when
+                C1 >= 240, C1 =< 244,
+                C2 >= 128, C2 =< 191,
+                C3 >= 128, C3 =< 191,
+                C4 >= 128, C4 =< 191 ->
+            validate_utf8_fast(B, 4 + O);
+        _ ->
+            false
+    end.
+
+to_hex([]) ->
+    [];
+to_hex(Bin) when is_binary(Bin) ->
+    to_hex(binary_to_list(Bin));
+to_hex([H|T]) ->
+    [to_digit(H div 16), to_digit(H rem 16) | to_hex(T)].
+
+to_digit(N) when N < 10 -> $0 + N;
+to_digit(N)             -> $a + N-10.
+
+
+parse_term(Bin) when is_binary(Bin) ->
+    parse_term(binary_to_list(Bin));
+parse_term(List) ->
+    {ok, Tokens, _} = erl_scan:string(List ++ "."),
+    erl_parse:parse_term(Tokens).
+
+get_value(Key, List) ->
+    get_value(Key, List, undefined).
+
+get_value(Key, List, Default) ->
+    case lists:keysearch(Key, 1, List) of
+    {value, {Key,Value}} ->
+        Value;
+    false ->
+        Default
+    end.
+
+get_nested_json_value({Props}, [Key|Keys]) ->
+    case couch_util:get_value(Key, Props, nil) of
+    nil -> throw({not_found, <<"missing json key: ", Key/binary>>});
+    Value -> get_nested_json_value(Value, Keys)
+    end;
+get_nested_json_value(Value, []) ->
+    Value;
+get_nested_json_value(_NotJSONObj, _) ->
+    throw({not_found, json_mismatch}).
+
+proplist_apply_field(H, L) ->
+    {R} = json_apply_field(H, {L}),
+    R.
+
+json_apply_field(H, {L}) ->
+    json_apply_field(H, L, []).
+json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
+    json_apply_field({Key, NewValue}, Headers, Acc);
+json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
+    json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
+json_apply_field({Key, NewValue}, [], Acc) ->
+    {[{Key, NewValue}|Acc]}.
+
+json_user_ctx(#db{name=DbName, user_ctx=Ctx}) ->
+    {[{<<"db">>, DbName},
+            {<<"name">>,Ctx#user_ctx.name},
+            {<<"roles">>,Ctx#user_ctx.roles}]}.
+
+json_decode(D) ->
+    try
+        jiffy:decode(D)
+    catch
+        throw:Error ->
+            throw({invalid_json, Error})
+    end.
+
+% returns a random integer
+rand32() ->
+    crypto:rand_uniform(0, 16#100000000).
+
+% given a pathname "../foo/bar/" it gives back the fully qualified
+% absolute pathname.
+abs_pathname(" " ++ Filename) ->
+    % strip leading whitspace
+    abs_pathname(Filename);
+abs_pathname([$/ |_]=Filename) ->
+    Filename;
+abs_pathname(Filename) ->
+    {ok, Cwd} = file:get_cwd(),
+    {Filename2, Args} = separate_cmd_args(Filename, ""),
+    abs_pathname(Filename2, Cwd) ++ Args.
+
+abs_pathname(Filename, Dir) ->
+    Name = filename:absname(Filename, Dir ++ "/"),
+    OutFilename = filename:join(fix_path_list(filename:split(Name), [])),
+    % If the filename is a dir (last char slash, put back end slash
+    case string:right(Filename,1) of
+    "/" ->
+        OutFilename ++ "/";
+    "\\" ->
+        OutFilename ++ "/";
+    _Else->
+        OutFilename
+    end.
+
+% if this as an executable with arguments, seperate out the arguments
+% ""./foo\ bar.sh -baz=blah" -> {"./foo\ bar.sh", " -baz=blah"}
+separate_cmd_args("", CmdAcc) ->
+    {lists:reverse(CmdAcc), ""};
+separate_cmd_args("\\ " ++ Rest, CmdAcc) -> % handle skipped value
+    separate_cmd_args(Rest, " \\" ++ CmdAcc);
+separate_cmd_args(" " ++ Rest, CmdAcc) ->
+    {lists:reverse(CmdAcc), " " ++ Rest};
+separate_cmd_args([Char|Rest], CmdAcc) ->
+    separate_cmd_args(Rest, [Char | CmdAcc]).
+
+% Is a character whitespace?
+is_whitespace($\s) -> true;
+is_whitespace($\t) -> true;
+is_whitespace($\n) -> true;
+is_whitespace($\r) -> true;
+is_whitespace(_Else) -> false.
+
+
+% removes leading and trailing whitespace from a string
+trim(String) ->
+    String2 = lists:dropwhile(fun is_whitespace/1, String),
+    lists:reverse(lists:dropwhile(fun is_whitespace/1, lists:reverse(String2))).
+
+% takes a heirarchical list of dirs and removes the dots ".", double dots
+% ".." and the corresponding parent dirs.
+fix_path_list([], Acc) ->
+    lists:reverse(Acc);
+fix_path_list([".."|Rest], [_PrevAcc|RestAcc]) ->
+    fix_path_list(Rest, RestAcc);
+fix_path_list(["."|Rest], Acc) ->
+    fix_path_list(Rest, Acc);
+fix_path_list([Dir | Rest], Acc) ->
+    fix_path_list(Rest, [Dir | Acc]).
+
+
+implode(List, Sep) ->
+    implode(List, Sep, []).
+
+implode([], _Sep, Acc) ->
+    lists:flatten(lists:reverse(Acc));
+implode([H], Sep, Acc) ->
+    implode([], Sep, [H|Acc]);
+implode([H|T], Sep, Acc) ->
+    implode(T, Sep, [Sep,H|Acc]).
+
+should_flush() ->
+    should_flush(?FLUSH_MAX_MEM).
+
+should_flush(MemThreshHold) ->
+    {memory, ProcMem} = process_info(self(), memory),
+    BinMem = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
+        0, element(2,process_info(self(), binary))),
+    if ProcMem+BinMem > 2*MemThreshHold ->
+        garbage_collect(),
+        {memory, ProcMem2} = process_info(self(), memory),
+        BinMem2 = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
+            0, element(2,process_info(self(), binary))),
+        ProcMem2+BinMem2 > MemThreshHold;
+    true -> false end.
+
+encodeBase64Url(Url) ->
+    Url1 = re:replace(base64:encode(Url), ["=+", $$], ""),
+    Url2 = re:replace(Url1, "/", "_", [global]),
+    re:replace(Url2, "\\+", "-", [global, {return, binary}]).
+
+decodeBase64Url(Url64) ->
+    Url1 = re:replace(Url64, "-", "+", [global]),
+    Url2 = re:replace(Url1, "_", "/", [global]),
+    Padding = lists:duplicate((4 - iolist_size(Url2) rem 4) rem 4, $=),
+    base64:decode(iolist_to_binary([Url2, Padding])).
+
+dict_find(Key, Dict, DefaultValue) ->
+    case dict:find(Key, Dict) of
+    {ok, Value} ->
+        Value;
+    error ->
+        DefaultValue
+    end.
+
+to_binary(V) when is_binary(V) ->
+    V;
+to_binary(V) when is_list(V) ->
+    try
+        list_to_binary(V)
+    catch
+        _:_ ->
+            list_to_binary(io_lib:format("~p", [V]))
+    end;
+to_binary(V) when is_atom(V) ->
+    list_to_binary(atom_to_list(V));
+to_binary(V) ->
+    list_to_binary(io_lib:format("~p", [V])).
+
+to_integer(V) when is_integer(V) ->
+    V;
+to_integer(V) when is_list(V) ->
+    erlang:list_to_integer(V);
+to_integer(V) when is_binary(V) ->
+    erlang:list_to_integer(binary_to_list(V)).
+
+to_list(V) when is_list(V) ->
+    V;
+to_list(V) when is_binary(V) ->
+    binary_to_list(V);
+to_list(V) when is_atom(V) ->
+    atom_to_list(V);
+to_list(V) ->
+    lists:flatten(io_lib:format("~p", [V])).
+
+url_encode(Bin) when is_binary(Bin) ->
+    url_encode(binary_to_list(Bin));
+url_encode([H|T]) ->
+    if
+    H >= $a, $z >= H ->
+        [H|url_encode(T)];
+    H >= $A, $Z >= H ->
+        [H|url_encode(T)];
+    H >= $0, $9 >= H ->
+        [H|url_encode(T)];
+    H == $_; H == $.; H == $-; H == $: ->
+        [H|url_encode(T)];
+    true ->
+        case lists:flatten(io_lib:format("~.16.0B", [H])) of
+        [X, Y] ->
+            [$%, X, Y | url_encode(T)];
+        [X] ->
+            [$%, $0, X | url_encode(T)]
+        end
+    end;
+url_encode([]) ->
+    [].
+
+verify([X|RestX], [Y|RestY], Result) ->
+    verify(RestX, RestY, (X bxor Y) bor Result);
+verify([], [], Result) ->
+    Result == 0.
+
+verify(<<X/binary>>, <<Y/binary>>) ->
+    verify(?b2l(X), ?b2l(Y));
+verify(X, Y) when is_list(X) and is_list(Y) ->
+    case length(X) == length(Y) of
+        true ->
+            verify(X, Y, 0);
+        false ->
+            false
+    end;
+verify(_X, _Y) -> false.
+
+-spec md5(Data::(iolist() | binary())) -> Digest::binary().
+md5(Data) ->
+    try crypto:md5(Data) catch error:_ -> erlang:md5(Data) end.
+
+-spec md5_init() -> Context::binary().
+md5_init() ->
+    try crypto:md5_init() catch error:_ -> erlang:md5_init() end.
+
+-spec md5_update(Context::binary(), Data::(iolist() | binary())) ->
+    NewContext::binary().
+md5_update(Ctx, D) ->
+    try crypto:md5_update(Ctx,D) catch error:_ -> erlang:md5_update(Ctx,D) end.
+
+-spec md5_final(Context::binary()) -> Digest::binary().
+md5_final(Ctx) ->
+    try crypto:md5_final(Ctx) catch error:_ -> erlang:md5_final(Ctx) end.
+
+% linear search is faster for small lists, length() is 0.5 ms for 100k list
+reorder_results(Keys, SortedResults) when length(Keys) < 100 ->
+    [couch_util:get_value(Key, SortedResults) || Key <- Keys];
+reorder_results(Keys, SortedResults) ->
+    KeyDict = dict:from_list(SortedResults),
+    [dict:fetch(Key, KeyDict) || Key <- Keys].
+
+url_strip_password(Url) ->
+    re:replace(Url,
+        "http(s)?://([^:]+):[^@]+@(.*)$",
+        "http\\1://\\2:*****@\\3",
+        [{return, list}]).
+
+encode_doc_id(#doc{id = Id}) ->
+    encode_doc_id(Id);
+encode_doc_id(Id) when is_list(Id) ->
+    encode_doc_id(?l2b(Id));
+encode_doc_id(<<"_design/", Rest/binary>>) ->
+    "_design/" ++ url_encode(Rest);
+encode_doc_id(<<"_local/", Rest/binary>>) ->
+    "_local/" ++ url_encode(Rest);
+encode_doc_id(Id) ->
+    url_encode(Id).
+
+
+with_db(Db, Fun) when is_record(Db, db) ->
+    Fun(Db);
+with_db(DbName, Fun) ->
+    case couch_db:open_int(DbName, [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}]) of
+        {ok, Db} ->
+            try
+                Fun(Db)
+            after
+                catch couch_db:close(Db)
+            end;
+        Else ->
+            throw(Else)
+    end.
+
+rfc1123_date() ->
+    {{YYYY,MM,DD},{Hour,Min,Sec}} = calendar:universal_time(),
+    DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),
+    lists:flatten(
+      io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
+            [day(DayNumber),DD,month(MM),YYYY,Hour,Min,Sec])).
+
+rfc1123_date(undefined) ->
+    undefined;
+rfc1123_date(UniversalTime) ->
+    {{YYYY,MM,DD},{Hour,Min,Sec}} = UniversalTime,
+    DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),
+    lists:flatten(
+      io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
+            [day(DayNumber),DD,month(MM),YYYY,Hour,Min,Sec])).
+
+%% day
+
+day(1) -> "Mon";
+day(2) -> "Tue";
+day(3) -> "Wed";
+day(4) -> "Thu";
+day(5) -> "Fri";
+day(6) -> "Sat";
+day(7) -> "Sun".
+
+%% month
+
+month(1) -> "Jan";
+month(2) -> "Feb";
+month(3) -> "Mar";
+month(4) -> "Apr";
+month(5) -> "May";
+month(6) -> "Jun";
+month(7) -> "Jul";
+month(8) -> "Aug";
+month(9) -> "Sep";
+month(10) -> "Oct";
+month(11) -> "Nov";
+month(12) -> "Dec".

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_uuids.erl
----------------------------------------------------------------------
diff --git a/src/couch_uuids.erl b/src/couch_uuids.erl
new file mode 100644
index 0000000..6ed75a1
--- /dev/null
+++ b/src/couch_uuids.erl
@@ -0,0 +1,103 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+-module(couch_uuids).
+-include("couch_db.hrl").
+
+-behaviour(gen_server).
+
+-export([start/0, stop/0]).
+-export([new/0, random/0, utc_random/0]).
+
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+
+start() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+stop() ->
+    gen_server:cast(?MODULE, stop).
+
+new() ->
+    gen_server:call(?MODULE, create).
+
+random() ->
+    list_to_binary(couch_util:to_hex(crypto:rand_bytes(16))).
+
+utc_random() ->
+    utc_suffix(couch_util:to_hex(crypto:rand_bytes(9))).
+
+utc_suffix(Suffix) ->
+    Now = {_, _, Micro} = now(),
+    Nowish = calendar:now_to_universal_time(Now),
+    Nowsecs = calendar:datetime_to_gregorian_seconds(Nowish),
+    Then = calendar:datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}}),
+    Prefix = io_lib:format("~14.16.0b", [(Nowsecs - Then) * 1000000 + Micro]),
+    list_to_binary(Prefix ++ Suffix).
+
+init([]) ->
+    ok = couch_config:register(
+        fun("uuids", _) -> gen_server:cast(?MODULE, change) end
+    ),
+    {ok, state()}.
+
+terminate(_Reason, _State) ->
+    ok.
+
+handle_call(create, _From, random) ->
+    {reply, random(), random};
+handle_call(create, _From, utc_random) ->
+    {reply, utc_random(), utc_random};
+handle_call(create, _From, {utc_id, UtcIdSuffix}) ->
+    {reply, utc_suffix(UtcIdSuffix), {utc_id, UtcIdSuffix}};
+handle_call(create, _From, {sequential, Pref, Seq}) ->
+    Result = ?l2b(Pref ++ io_lib:format("~6.16.0b", [Seq])),
+    case Seq >= 16#fff000 of
+        true ->
+            {reply, Result, {sequential, new_prefix(), inc()}};
+        _ ->
+            {reply, Result, {sequential, Pref, Seq + inc()}}
+    end.
+
+handle_cast(change, _State) ->
+    {noreply, state()};
+handle_cast(stop, State) ->
+    {stop, normal, State};
+handle_cast(_Msg, State) ->
+    {noreply, State}.
+
+handle_info(_Info, State) ->
+    {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+new_prefix() ->
+    couch_util:to_hex((crypto:rand_bytes(13))).
+
+inc() ->
+    crypto:rand_uniform(1, 16#ffe).
+
+state() ->
+    AlgoStr = couch_config:get("uuids", "algorithm", "random"),
+    case couch_util:to_existing_atom(AlgoStr) of
+        random ->
+            random;
+        utc_random ->
+            utc_random;
+        utc_id ->
+            UtcIdSuffix = couch_config:get("uuids", "utc_id_suffix", ""),
+            {utc_id, UtcIdSuffix};
+        sequential ->
+            {sequential, new_prefix(), inc()};
+        Unknown ->
+            throw({unknown_uuid_algorithm, Unknown})
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_work_queue.erl
----------------------------------------------------------------------
diff --git a/src/couch_work_queue.erl b/src/couch_work_queue.erl
new file mode 100644
index 0000000..22968d7
--- /dev/null
+++ b/src/couch_work_queue.erl
@@ -0,0 +1,187 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_work_queue).
+-behaviour(gen_server).
+
+-include("couch_db.hrl").
+
+% public API
+-export([new/1, queue/2, dequeue/1, dequeue/2, close/1, item_count/1, size/1]).
+
+% gen_server callbacks
+-export([init/1, terminate/2]).
+-export([handle_call/3, handle_cast/2, code_change/3, handle_info/2]).
+
+-record(q, {
+    queue = queue:new(),
+    blocked = [],
+    max_size,
+    max_items,
+    items = 0,
+    size = 0,
+    work_waiters = [],
+    close_on_dequeue = false,
+    multi_workers = false
+}).
+
+
+new(Options) ->
+    gen_server:start_link(couch_work_queue, Options, []).
+
+
+queue(Wq, Item) when is_binary(Item) ->
+    gen_server:call(Wq, {queue, Item, byte_size(Item)}, infinity);
+queue(Wq, Item) ->
+    gen_server:call(Wq, {queue, Item, ?term_size(Item)}, infinity).
+
+
+dequeue(Wq) ->
+    dequeue(Wq, all).
+
+    
+dequeue(Wq, MaxItems) ->
+    try
+        gen_server:call(Wq, {dequeue, MaxItems}, infinity)
+    catch
+        _:_ -> closed
+    end.
+
+
+item_count(Wq) ->
+    try
+        gen_server:call(Wq, item_count, infinity)
+    catch
+        _:_ -> closed
+    end.
+
+
+size(Wq) ->
+    try
+        gen_server:call(Wq, size, infinity)
+    catch
+        _:_ -> closed
+    end.
+
+
+close(Wq) ->
+    gen_server:cast(Wq, close).
+    
+
+init(Options) ->
+    Q = #q{
+        max_size = couch_util:get_value(max_size, Options, nil),
+        max_items = couch_util:get_value(max_items, Options, nil),
+        multi_workers = couch_util:get_value(multi_workers, Options, false)
+    },
+    {ok, Q}.
+
+
+terminate(_Reason, #q{work_waiters=Workers}) ->
+    lists:foreach(fun({W, _}) -> gen_server:reply(W, closed) end, Workers).
+
+    
+handle_call({queue, Item, Size}, From, #q{work_waiters = []} = Q0) ->
+    Q = Q0#q{size = Q0#q.size + Size,
+                items = Q0#q.items + 1,
+                queue = queue:in({Item, Size}, Q0#q.queue)},
+    case (Q#q.size >= Q#q.max_size) orelse
+            (Q#q.items >= Q#q.max_items) of
+    true ->
+        {noreply, Q#q{blocked = [From | Q#q.blocked]}};
+    false ->
+        {reply, ok, Q}
+    end;
+
+handle_call({queue, Item, _}, _From, #q{work_waiters = [{W, _Max} | Rest]} = Q) ->
+    gen_server:reply(W, {ok, [Item]}),
+    {reply, ok, Q#q{work_waiters = Rest}};
+
+handle_call({dequeue, Max}, From, Q) ->
+    #q{work_waiters = Workers, multi_workers = Multi, items = Count} = Q,
+    case {Workers, Multi} of
+    {[_ | _], false} ->
+        exit("Only one caller allowed to wait for this work at a time");
+    {[_ | _], true} ->
+        {noreply, Q#q{work_waiters=Workers ++ [{From, Max}]}};
+    _ ->
+        case Count of
+        0 ->
+            {noreply, Q#q{work_waiters=Workers ++ [{From, Max}]}};
+        C when C > 0 ->
+            deliver_queue_items(Max, Q)
+        end
+    end;
+
+handle_call(item_count, _From, Q) ->
+    {reply, Q#q.items, Q};
+
+handle_call(size, _From, Q) ->
+    {reply, Q#q.size, Q}.
+
+
+deliver_queue_items(Max, Q) ->
+    #q{
+        queue = Queue,
+        items = Count,
+        size = Size,
+        close_on_dequeue = Close,
+        blocked = Blocked
+    } = Q,
+    case (Max =:= all) orelse (Max >= Count) of
+    false ->
+        {Items, Size2, Queue2, Blocked2} = dequeue_items(
+            Max, Size, Queue, Blocked, []),
+        Q2 = Q#q{
+            items = Count - Max, size = Size2, blocked = Blocked2, queue = Queue2
+        },
+        {reply, {ok, Items}, Q2};
+    true ->
+        lists:foreach(fun(F) -> gen_server:reply(F, ok) end, Blocked),
+        Q2 = Q#q{items = 0, size = 0, blocked = [], queue = queue:new()},
+        Items = [Item || {Item, _} <- queue:to_list(Queue)],
+        case Close of
+        false ->
+            {reply, {ok, Items}, Q2};
+        true ->
+            {stop, normal, {ok, Items}, Q2}
+        end
+    end.
+
+
+dequeue_items(0, Size, Queue, Blocked, DequeuedAcc) ->
+    {lists:reverse(DequeuedAcc), Size, Queue, Blocked};
+
+dequeue_items(NumItems, Size, Queue, Blocked, DequeuedAcc) ->
+    {{value, {Item, ItemSize}}, Queue2} = queue:out(Queue),
+    case Blocked of
+    [] ->
+        Blocked2 = Blocked;
+    [From | Blocked2] ->
+        gen_server:reply(From, ok)
+    end,
+    dequeue_items(
+        NumItems - 1, Size - ItemSize, Queue2, Blocked2, [Item | DequeuedAcc]).
+    
+
+handle_cast(close, #q{items = 0} = Q) ->
+    {stop, normal, Q};
+
+handle_cast(close, Q) ->
+    {noreply, Q#q{close_on_dequeue = true}}.
+
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+handle_info(X, Q) ->
+    {stop, X, Q}.


[38/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
couch_index_sup should be started by the couch application

I had this patch landing in a custommer repository for awhile. Since
couch_index is always dependent of couch it's better to add it to the
couch application supervision rather than on its own.


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/731f8404
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/731f8404
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/731f8404

Branch: refs/heads/import-rcouch
Commit: 731f8404fbe81ac6a09683bfa8d4993a7467451e
Parents: cad57e5
Author: benoitc <be...@apache.org>
Authored: Thu Jan 9 23:29:54 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:21 2014 -0600

----------------------------------------------------------------------
 src/couch.app.src.script  | 3 ++-
 src/couch_primary_sup.erl | 8 +++++++-
 2 files changed, 9 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/731f8404/src/couch.app.src.script
----------------------------------------------------------------------
diff --git a/src/couch.app.src.script b/src/couch.app.src.script
index c947ead..c406e02 100644
--- a/src/couch.app.src.script
+++ b/src/couch.app.src.script
@@ -48,6 +48,7 @@ end,
             couch_db_update,
             couch_db_update_notifier_sup,
             couch_external_manager,
+            couch_index_sup,
             couch_httpd,
             couch_log,
             couch_primary_services,
@@ -62,6 +63,6 @@ end,
         {mod, {couch_app, []}},
         {env, [{couch_rel, RelVsn}]},
         {applications, [kernel, stdlib, crypto, sasl, asn1, public_key, ssl,
-                        inets, oauth, ibrowse, mochiweb, os_mon]}
+                        inets, ibrowse, os_mon]}
     ]}
 ].

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/731f8404/src/couch_primary_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch_primary_sup.erl b/src/couch_primary_sup.erl
index 7c4fde2..d05cad0 100644
--- a/src/couch_primary_sup.erl
+++ b/src/couch_primary_sup.erl
@@ -42,7 +42,13 @@ init([]) ->
             permanent,
             brutal_kill,
             worker,
-            [couch_log]}
+            [couch_log]},
+        {couch_index_sup,
+             {couch_index_sup, start_link, []},
+             permanent,
+             infinity,
+             supervisor,
+             [couch_index_sup]}
     ],
     {ok, {{one_for_one, 10, 3600}, Children}}.
 


[09/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/json_stream_parse.erl
----------------------------------------------------------------------
diff --git a/json_stream_parse.erl b/json_stream_parse.erl
deleted file mode 100644
index b63e011..0000000
--- a/json_stream_parse.erl
+++ /dev/null
@@ -1,432 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(json_stream_parse).
-
-
--export([events/2, to_ejson/1, collect_object/2]).
-
--define(IS_WS(X), (X == $\  orelse X == $\t orelse X == $\n orelse X == $\r)).
--define(IS_DELIM(X), (X == $} orelse X == $] orelse X == $,)).
--define(IS_DIGIT(X), (X >= $0 andalso X =< $9)).
-
-
-
-% Parses the json into events.
-%
-% The DataFun param is a function that produces the data for parsing. When
-% called it must yield a tuple, or the atom done. The first element in the
-% tuple is the data itself, and the second element is a function to be called
-% next to get the next chunk of data in the stream.
-%
-% The EventFun is called everytime a json element is parsed. It must produce
-% a new function to be called for the next event.
-%
-% Events happen each time a new element in the json string is parsed.
-% For simple value types, the data itself is returned:
-% Strings
-% Integers
-% Floats
-% true
-% false
-% null
-%
-% For arrays, the start of the array is signaled by the event array_start
-% atom. The end is signaled by array_end. The events before the end are the
-% values, or nested values.
-%
-% For objects, the start of the object is signaled by the event object_start
-% atom. The end is signaled by object_end. Each key is signaled by
-% {key, KeyString}, and the following event is the value, or start of the
-% value (array_start, object_start).
-%
-events(Data,EventFun) when is_list(Data)->
-    events(list_to_binary(Data),EventFun);
-events(Data,EventFun) when is_binary(Data)->
-    events(fun() -> {Data, fun() -> done end} end,EventFun);
-events(DataFun,EventFun) ->
-    parse_one(DataFun, EventFun, <<>>).
-
-% converts the JSON directly to the erlang represention of Json
-to_ejson(DF) ->
-    {_DF2, EF, _Rest} = events(DF, fun(Ev) -> collect_events(Ev, []) end),
-    [[EJson]] = make_ejson(EF(get_results), [[]]),
-    EJson.
-
-
-% This function is used to return complete objects while parsing streams.
-%
-% Return this function from inside an event function right after getting an
-% object_start event. It then collects the remaining events for that object
-% and converts it to the erlang represention of Json.
-%
-% It then calls your ReturnControl function with the erlang object. Your
-% return control function then should yield another event function.
-%
-% This example stream parses an array of objects, calling
-% fun do_something_with_the_object/1 for each object.
-%
-%    ev_array(array_start) ->
-%        fun(Ev) -> ev_object_loop(Ev) end.
-%
-%    ev_object_loop(object_start) ->
-%        fun(Ev) ->
-%            json_stream_parse:collect_object(Ev,
-%                fun(Obj) ->
-%                    do_something_with_the_object(Obj),
-%                    fun(Ev2) -> ev_object_loop(Ev2) end
-%                end)
-%        end;
-%    ev_object_loop(array_end) ->
-%        ok
-%    end.
-%
-%    % invoke the parse
-%    main() ->
-%        ...
-%        events(Data, fun(Ev) -> ev_array(Ev) end).
-
-collect_object(Ev, ReturnControl) ->
-    collect_object(Ev, 0, ReturnControl, [object_start]).
-
-
-
-% internal methods
-
-parse_one(DF,EF,Acc) ->
-    case toke(DF, Acc) of
-    none ->
-        none;
-    {Token, DF2, Rest} ->
-        case Token of
-        "{" ->
-            EF2 = EF(object_start),
-            {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
-            {DF3, EF3(object_end), Rest2};
-        "[" ->
-            EF2 = EF(array_start),
-            {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
-            {DF3, EF3(array_end), Rest2};
-        Int when is_integer(Int)->
-            {DF2, EF(Int), Rest};
-        Float when is_float(Float)->
-            {DF2, EF(Float), Rest};
-        Atom when is_atom(Atom)->
-            {DF2, EF(Atom), Rest};
-        String when is_binary(String)->
-            {DF2, EF(String), Rest};
-        _OtherToken ->
-            err(unexpected_token)
-        end
-    end.
-
-must_parse_one(DF,EF,Acc,Error)->
-    case parse_one(DF, EF, Acc) of
-    none ->
-        err(Error);
-    Else ->
-        Else
-    end.
-
-must_toke(DF, Data, Error) ->
-    case toke(DF, Data) of
-    none ->
-        err(Error);
-    Result ->
-        Result
-    end.
-
-toke(DF, <<>>) ->
-    case DF() of
-    done ->
-        none;
-    {Data, DF2} ->
-        toke(DF2, Data)
-    end;
-toke(DF, <<C,Rest/binary>>) when ?IS_WS(C)->
-    toke(DF, Rest);
-toke(DF, <<${,Rest/binary>>) ->
-    {"{", DF, Rest};
-toke(DF, <<$},Rest/binary>>) ->
-    {"}", DF, Rest};
-toke(DF, <<$[,Rest/binary>>) ->
-    {"[", DF, Rest};
-toke(DF, <<$],Rest/binary>>) ->
-    {"]", DF, Rest};
-toke(DF, <<$",Rest/binary>>) ->
-    toke_string(DF,Rest,[]);
-toke(DF, <<$,,Rest/binary>>) ->
-    {",", DF, Rest};
-toke(DF, <<$:,Rest/binary>>) ->
-    {":", DF, Rest};
-toke(DF, <<$-,Rest/binary>>) ->
-    {<<C,_/binary>> = Data, DF2} = must_df(DF,1,Rest,expected_number),
-    case ?IS_DIGIT(C) of
-    true ->
-        toke_number_leading(DF2, Data, "-");
-    false ->
-        err(expected_number)
-    end;
-toke(DF, <<C,_/binary>> = Data) when ?IS_DIGIT(C) ->
-    toke_number_leading(DF, Data, []);
-toke(DF, <<$t,Rest/binary>>) ->
-    {Data, DF2} = must_match(<<"rue">>, DF, Rest),
-    {true, DF2, Data};
-toke(DF, <<$f,Rest/binary>>) ->
-    {Data, DF2} = must_match(<<"alse">>, DF, Rest),
-    {false, DF2, Data};
-toke(DF, <<$n,Rest/binary>>) ->
-    {Data, DF2} = must_match(<<"ull">>, DF, Rest),
-    {null, DF2, Data};
-toke(_, _) ->
-    err(bad_token).
-
-
-must_match(Pattern, DF, Data) ->
-    Size = size(Pattern),
-    case must_df(DF, Size, Data, bad_token) of
-    {<<Pattern:Size/binary,Data2/binary>>, DF2} ->
-        {Data2, DF2};
-    {_, _} ->
-        err(bad_token)
-    end.
-
-must_df(DF,Error)->
-    case DF() of
-    done ->
-        err(Error);
-    {Data, DF2} ->
-        {Data, DF2}
-    end.
-
-
-must_df(DF,NeedLen,Acc,Error)->
-    if size(Acc) >= NeedLen ->
-        {Acc, DF};
-    true ->
-        case DF() of
-        done ->
-            err(Error);
-        {Data, DF2} ->
-            must_df(DF2, NeedLen, <<Acc/binary, Data/binary>>, Error)
-        end
-    end.
-
-
-parse_object(DF,EF,Acc) ->
-    case must_toke(DF, Acc, unterminated_object) of
-    {String, DF2, Rest} when is_binary(String)->
-        EF2 = EF({key,String}),
-        case must_toke(DF2,Rest,unterminated_object) of
-        {":", DF3, Rest2} ->
-            {DF4, EF3, Rest3} = must_parse_one(DF3, EF2, Rest2, expected_value),
-            case must_toke(DF4,Rest3, unterminated_object) of
-            {",", DF5, Rest4} ->
-                parse_object(DF5, EF3, Rest4);
-            {"}", DF5, Rest4} ->
-                {DF5, EF3, Rest4};
-            {_, _, _} ->
-                err(unexpected_token)
-            end;
-        _Else ->
-            err(expected_colon)
-        end;
-    {"}", DF2, Rest} ->
-        {DF2, EF, Rest};
-    {_, _, _} ->
-        err(unexpected_token)
-    end.
-
-parse_array0(DF,EF,Acc) ->
-    case toke(DF, Acc) of
-    none ->
-        err(unterminated_array);
-    {",", DF2, Rest} ->
-        parse_array(DF2,EF,Rest);
-    {"]", DF2, Rest} ->
-        {DF2,EF,Rest};
-    _ ->
-        err(unexpected_token)
-    end.
-
-parse_array(DF,EF,Acc) ->
-    case toke(DF, Acc) of
-    none ->
-         err(unterminated_array);
-    {Token, DF2, Rest} ->
-        case Token of
-        "{" ->
-            EF2 = EF(object_start),
-            {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
-            parse_array0(DF3, EF3(object_end), Rest2);
-        "[" ->
-            EF2 = EF(array_start),
-            {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
-            parse_array0(DF3, EF3(array_end), Rest2);
-        Int when is_integer(Int)->
-            parse_array0(DF2, EF(Int), Rest);
-        Float when is_float(Float)->
-            parse_array0(DF2, EF(Float), Rest);
-        Atom when is_atom(Atom)->
-            parse_array0(DF2, EF(Atom), Rest);
-        String when is_binary(String)->
-            parse_array0(DF2, EF(String), Rest);
-        "]" ->
-            {DF2, EF, Rest};
-        _ ->
-            err(unexpected_token)
-        end
-    end.
-
-
-toke_string(DF, <<>>, Acc) ->
-    {Data, DF2} = must_df(DF, unterminated_string),
-    toke_string(DF2, Data, Acc);
-toke_string(DF, <<$\\,$",Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$" | Acc]);
-toke_string(DF, <<$\\,$\\,Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$\\ | Acc]);
-toke_string(DF, <<$\\,$/,Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$/ | Acc]);
-toke_string(DF, <<$\\,$b,Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$\b | Acc]);
-toke_string(DF, <<$\\,$f,Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$\f | Acc]);
-toke_string(DF, <<$\\,$n,Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$\n | Acc]);
-toke_string(DF, <<$\\,$r,Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$\r | Acc]);
-toke_string(DF, <<$\\,$t,Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [$\t | Acc]);
-toke_string(DF, <<$\\,$u,Rest/binary>>, Acc) ->
-    {<<A,B,C,D,Data/binary>>, DF2} = must_df(DF,4,Rest,missing_hex),
-    UTFChar = erlang:list_to_integer([A, B, C, D], 16),
-    if UTFChar == 16#FFFF orelse UTFChar == 16#FFFE ->
-        err(invalid_utf_char);
-    true ->
-        ok
-    end,
-    Chars = xmerl_ucs:to_utf8(UTFChar),
-    toke_string(DF2, Data, lists:reverse(Chars) ++ Acc);
-toke_string(DF, <<$\\>>, Acc) ->
-    {Data, DF2} = must_df(DF, unterminated_string),
-    toke_string(DF2, <<$\\,Data/binary>>, Acc);
-toke_string(_DF, <<$\\, _/binary>>, _Acc) ->
-    err(bad_escape);
-toke_string(DF, <<$", Rest/binary>>, Acc) ->
-    {list_to_binary(lists:reverse(Acc)), DF, Rest};
-toke_string(DF, <<C, Rest/binary>>, Acc) ->
-    toke_string(DF, Rest, [C | Acc]).
-
-
-toke_number_leading(DF, <<Digit,Rest/binary>>, Acc)
-        when ?IS_DIGIT(Digit) ->
-    toke_number_leading(DF, Rest, [Digit | Acc]);
-toke_number_leading(DF, <<C,_/binary>>=Rest, Acc)
-        when ?IS_WS(C) orelse ?IS_DELIM(C) ->
-    {list_to_integer(lists:reverse(Acc)), DF, Rest};
-toke_number_leading(DF, <<>>, Acc) ->
-    case DF() of
-    done ->
-         {list_to_integer(lists:reverse(Acc)), fun() -> done end, <<>>};
-    {Data, DF2} ->
-        toke_number_leading(DF2, Data, Acc)
-    end;
-toke_number_leading(DF, <<$., Rest/binary>>, Acc) ->
-    toke_number_trailing(DF, Rest, [$.|Acc]);
-toke_number_leading(DF, <<$e, Rest/binary>>, Acc) ->
-    toke_number_exponent(DF, Rest, [$e, $0, $.|Acc]);
-toke_number_leading(DF, <<$E, Rest/binary>>, Acc) ->
-    toke_number_exponent(DF, Rest, [$e, $0, $.|Acc]);
-toke_number_leading(_, _, _) ->
-    err(unexpected_character_in_number).
-
-toke_number_trailing(DF, <<Digit,Rest/binary>>, Acc)
-        when ?IS_DIGIT(Digit) ->
-    toke_number_trailing(DF, Rest, [Digit | Acc]);
-toke_number_trailing(DF, <<C,_/binary>>=Rest, Acc)
-        when ?IS_WS(C) orelse ?IS_DELIM(C) ->
-    {list_to_float(lists:reverse(Acc)), DF, Rest};
-toke_number_trailing(DF, <<>>, Acc) ->
-    case DF() of
-    done ->
-        {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
-    {Data, DF2} ->
-        toke_number_trailing(DF2, Data, Acc)
-    end;
-toke_number_trailing(DF, <<"e", Rest/binary>>, [C|_]=Acc) when C /= $. ->
-    toke_number_exponent(DF, Rest, [$e|Acc]);
-toke_number_trailing(DF, <<"E", Rest/binary>>, [C|_]=Acc) when C /= $. ->
-    toke_number_exponent(DF, Rest, [$e|Acc]);
-toke_number_trailing(_, _, _) ->
-    err(unexpected_character_in_number).
-
-
-toke_number_exponent(DF, <<Digit,Rest/binary>>, Acc) when ?IS_DIGIT(Digit) ->
-    toke_number_exponent(DF, Rest, [Digit | Acc]);
-toke_number_exponent(DF, <<Sign,Rest/binary>>, [$e|_]=Acc)
-        when Sign == $+ orelse Sign == $- ->
-    toke_number_exponent(DF, Rest, [Sign | Acc]);
-toke_number_exponent(DF, <<C,_/binary>>=Rest, Acc)
-        when ?IS_WS(C) orelse ?IS_DELIM(C) ->
-    {list_to_float(lists:reverse(Acc)), DF, Rest};
-toke_number_exponent(DF, <<>>, Acc) ->
-    case DF() of
-    done ->
-        {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
-    {Data, DF2} ->
-        toke_number_exponent(DF2, Data, Acc)
-    end;
-toke_number_exponent(_, _, _) ->
-        err(unexpected_character_in_number).
-
-
-err(Error)->
-    throw({parse_error,Error}).
-
-
-make_ejson([], Stack) ->
-    Stack;
-make_ejson([array_start | RevEvs], [ArrayValues, PrevValues | RestStack]) ->
-    make_ejson(RevEvs, [[ArrayValues | PrevValues] | RestStack]);
-make_ejson([array_end | RevEvs], Stack) ->
-    make_ejson(RevEvs, [[] | Stack]);
-make_ejson([object_start | RevEvs], [ObjValues, PrevValues | RestStack]) ->
-    make_ejson(RevEvs, [[{ObjValues} | PrevValues] | RestStack]);
-make_ejson([object_end | RevEvs], Stack) ->
-    make_ejson(RevEvs, [[] | Stack]);
-make_ejson([{key, String} | RevEvs], [[PrevValue|RestObject] | RestStack] = _Stack) ->
-    make_ejson(RevEvs, [[{String, PrevValue}|RestObject] | RestStack]);
-make_ejson([Value | RevEvs], [Vals | RestStack] = _Stack) ->
-    make_ejson(RevEvs, [[Value | Vals] | RestStack]).
-
-collect_events(get_results, Acc) ->
-    Acc;
-collect_events(Ev, Acc) ->
-    fun(NextEv) -> collect_events(NextEv, [Ev | Acc]) end.
-
-
-collect_object(object_end, 0, ReturnControl, Acc) ->
-    [[Obj]] = make_ejson([object_end | Acc], [[]]),
-    ReturnControl(Obj);
-collect_object(object_end, NestCount, ReturnControl, Acc) ->
-    fun(Ev) ->
-        collect_object(Ev, NestCount - 1, ReturnControl, [object_end | Acc])
-    end;
-collect_object(object_start, NestCount, ReturnControl, Acc) ->
-    fun(Ev) ->
-        collect_object(Ev, NestCount + 1, ReturnControl, [object_start | Acc])
-    end;
-collect_object(Ev, NestCount, ReturnControl, Acc) ->
-    fun(Ev2) ->
-        collect_object(Ev2, NestCount, ReturnControl, [Ev | Acc])
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/Makefile.am
----------------------------------------------------------------------
diff --git a/src/Makefile.am b/src/Makefile.am
new file mode 100644
index 0000000..9fe19bc
--- /dev/null
+++ b/src/Makefile.am
@@ -0,0 +1,198 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+##   http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+SUBDIRS = priv
+
+# devdocdir = $(localdocdir)/developer/couchdb
+couchlibdir = $(localerlanglibdir)/couch-$(version)
+couchincludedir = $(couchlibdir)/include
+couchebindir = $(couchlibdir)/ebin
+
+couchinclude_DATA = couch_db.hrl couch_js_functions.hrl
+couchebin_DATA = $(compiled_files)
+
+# dist_devdoc_DATA = $(doc_base) $(doc_modules)
+
+CLEANFILES = $(compiled_files) $(doc_base)
+
+# CLEANFILES = $(doc_modules) edoc-info
+
+source_files = \
+    couch.erl \
+    couch_app.erl \
+    couch_auth_cache.erl \
+    couch_btree.erl \
+    couch_changes.erl \
+    couch_compaction_daemon.erl \
+    couch_compress.erl \
+    couch_config.erl \
+    couch_config_writer.erl \
+    couch_db.erl \
+    couch_db_update_notifier.erl \
+    couch_db_update_notifier_sup.erl \
+    couch_doc.erl \
+    couch_drv.erl \
+    couch_ejson_compare.erl \
+    couch_event_sup.erl \
+    couch_external_manager.erl \
+    couch_external_server.erl \
+    couch_file.erl \
+    couch_httpd.erl \
+    couch_httpd_db.erl \
+    couch_httpd_auth.erl \
+    couch_httpd_cors.erl \
+    couch_httpd_oauth.erl \
+    couch_httpd_external.erl \
+    couch_httpd_misc_handlers.erl \
+    couch_httpd_proxy.erl \
+    couch_httpd_rewrite.erl \
+    couch_httpd_stats_handlers.erl \
+    couch_httpd_vhost.erl \
+    couch_key_tree.erl \
+    couch_log.erl \
+    couch_native_process.erl \
+    couch_os_daemons.erl \
+    couch_os_process.erl \
+    couch_passwords.erl \
+    couch_primary_sup.erl \
+    couch_query_servers.erl \
+    couch_ref_counter.erl \
+    couch_secondary_sup.erl \
+    couch_server.erl \
+    couch_server_sup.erl \
+    couch_stats_aggregator.erl \
+    couch_stats_collector.erl \
+    couch_stream.erl \
+    couch_task_status.erl \
+    couch_users_db.erl \
+    couch_util.erl \
+    couch_uuids.erl \
+    couch_db_updater.erl \
+    couch_work_queue.erl \
+    json_stream_parse.erl
+
+EXTRA_DIST = $(source_files) couch_db.hrl couch_js_functions.hrl
+
+compiled_files = \
+    couch.app \
+    couch.beam \
+    couch_app.beam \
+    couch_auth_cache.beam \
+    couch_btree.beam \
+    couch_changes.beam \
+    couch_compaction_daemon.beam \
+    couch_compress.beam \
+    couch_config.beam \
+    couch_config_writer.beam \
+    couch_db.beam \
+    couch_db_update_notifier.beam \
+    couch_db_update_notifier_sup.beam \
+    couch_doc.beam \
+    couch_drv.beam \
+    couch_ejson_compare.beam \
+    couch_event_sup.beam \
+    couch_external_manager.beam \
+    couch_external_server.beam \
+    couch_file.beam \
+    couch_httpd.beam \
+    couch_httpd_db.beam \
+    couch_httpd_auth.beam \
+    couch_httpd_oauth.beam \
+    couch_httpd_cors.beam \
+    couch_httpd_proxy.beam \
+    couch_httpd_external.beam \
+    couch_httpd_misc_handlers.beam \
+    couch_httpd_rewrite.beam \
+    couch_httpd_stats_handlers.beam \
+    couch_httpd_vhost.beam \
+    couch_key_tree.beam \
+    couch_log.beam \
+    couch_native_process.beam \
+    couch_os_daemons.beam \
+    couch_os_process.beam \
+    couch_passwords.beam \
+    couch_primary_sup.beam \
+    couch_query_servers.beam \
+    couch_ref_counter.beam \
+    couch_secondary_sup.beam \
+    couch_server.beam \
+    couch_server_sup.beam \
+    couch_stats_aggregator.beam \
+    couch_stats_collector.beam \
+    couch_stream.beam \
+    couch_task_status.beam \
+    couch_users_db.beam \
+    couch_util.beam \
+    couch_uuids.beam \
+    couch_db_updater.beam \
+    couch_work_queue.beam \
+    json_stream_parse.beam
+
+# doc_base = \
+#     erlang.png \
+#     index.html \
+#     modules-frame.html \
+#     overview-summary.html \
+#     packages-frame.html \
+#     stylesheet.css
+
+# doc_modules = \
+#     couch_btree.html \
+#     couch_config.html \
+#     couch_config_writer.html \
+#     couch_db.html \
+#     couch_db_update_notifier.html \
+#     couch_db_update_notifier_sup.html \
+#     couch_doc.html \
+#     couch_event_sup.html \
+#     couch_file.html \
+#     couch_httpd.html \
+#     couch_key_tree.html \
+#     couch_log.html \
+#     couch_query_servers.html \
+#     couch_rep.html \
+#     couch_rep_sup.html \
+#     couch_server.html \
+#     couch_server_sup.html \
+#     couch_stream.html \
+#     couch_util.html
+
+if WINDOWS
+couch.app: couch.app.tpl
+	modules=`find . -name "*.erl" \! -name ".*" -exec basename {} .erl \; | tr '\n' ',' | sed "s/,$$//"`; \
+	sed -e "s|%package_name%|@package_name@|g" \
+			-e "s|%version%|@version@|g" \
+			-e "s|@modules@|$$modules|g" \
+			-e "s|%localconfdir%|../etc/couchdb|g" \
+			-e "s|@defaultini@|default.ini|g" \
+			-e "s|@localini@|local.ini|g" > \
+	$@ < $<
+else
+couch.app: couch.app.tpl
+	modules=`{ find . -name "*.erl" \! -name ".*" -exec basename {} .erl \; | tr '\n' ','; echo ''; } | sed "s/,$$//"`; \
+	sed -e "s|%package_name%|@package_name@|g" \
+			-e "s|%version%|@version@|g" \
+			-e "s|@modules@|$$modules|g" \
+			-e "s|%localconfdir%|@localconfdir@|g" \
+			-e "s|@defaultini@|default.ini|g" \
+			-e "s|@localini@|local.ini|g" > \
+	$@ < $<
+	chmod +x $@
+endif
+
+# $(dist_devdoc_DATA): edoc-info
+
+# $(ERL) -noshell -run edoc_run files [\"$<\"]
+
+%.beam: %.erl couch_db.hrl couch_js_functions.hrl
+	$(ERLC) $(ERLC_FLAGS) ${TEST} $<;
+

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch.app.src
----------------------------------------------------------------------
diff --git a/src/couch.app.src b/src/couch.app.src
new file mode 100644
index 0000000..2d14148
--- /dev/null
+++ b/src/couch.app.src
@@ -0,0 +1,23 @@
+{application, couch, [
+    {description, "@package_name@"},
+    {vsn, "@version@"},
+    {registered, [
+        couch_config,
+        couch_db_update,
+        couch_db_update_notifier_sup,
+        couch_external_manager,
+        couch_httpd,
+        couch_log,
+        couch_primary_services,
+        couch_query_servers,
+        couch_secondary_services,
+        couch_server,
+        couch_server_sup,
+        couch_stats_aggregator,
+        couch_stats_collector,
+        couch_task_status
+    ]},
+    {mod, {couch_app, []}},
+    {applications, [kernel, stdlib, crypto, sasl, public_key, ssl,
+                    inets, oauth, ibrowse, mochiweb, os_mon]}
+]}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch.erl
----------------------------------------------------------------------
diff --git a/src/couch.erl b/src/couch.erl
new file mode 100644
index 0000000..80e3261
--- /dev/null
+++ b/src/couch.erl
@@ -0,0 +1,58 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch).
+
+-export([get_app_env/2,
+         version/0,
+         start/0,
+         stop/0,
+         restart/0,
+         reload/0]).
+
+get_app_env(Env, Default) ->
+    case application:get_env(couch, Env) of
+        {ok, Val} -> Val;
+        undefined -> Default
+    end.
+
+version() ->
+    case application:get_key(couch, vsn) of
+        {ok, FullVersion} ->
+            hd(string:tokens(FullVersion, "-"));
+        _ ->
+            "0.0.0"
+    end.
+
+start() ->
+    application:start(couch).
+
+stop() ->
+    application:stop(couch).
+
+restart() ->
+    case stop() of
+    ok ->
+        start();
+    {error, {not_started,couch}} ->
+        start();
+    {error, Reason} ->
+        {error, Reason}
+    end.
+
+reload() ->
+    case supervisor:terminate_child(couch_server_sup, couch_config) of
+    ok ->
+        supervisor:restart_child(couch_server_sup, couch_config);
+    {error, Reason} ->
+        {error, Reason}
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_app.erl
----------------------------------------------------------------------
diff --git a/src/couch_app.erl b/src/couch_app.erl
new file mode 100644
index 0000000..a8d215e
--- /dev/null
+++ b/src/couch_app.erl
@@ -0,0 +1,36 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_app).
+
+-behaviour(application).
+
+-include("couch_db.hrl").
+
+-export([start/2, stop/1]).
+
+-define(CONF_FILES, ["couch.ini", "couch_httpd.ini", "local.ini"]).
+
+start(_Type, _Args) ->
+    couch_util:start_app_deps(couch),
+    IniFiles = get_ini_files(),
+    couch_server_sup:start_link(IniFiles).
+
+stop(_) ->
+    ok.
+
+get_ini_files() ->
+    DefaultConfDir =  filename:join([code:root_dir(), "./etc"]),
+    Defaults = lists:map(fun(FName) ->
+                    filename:join(DefaultConfDir, FName)
+            end, ?CONF_FILES),
+    couch:get_app_env(config_files, Defaults).

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_auth_cache.erl
----------------------------------------------------------------------
diff --git a/src/couch_auth_cache.erl b/src/couch_auth_cache.erl
new file mode 100644
index 0000000..42ccd44
--- /dev/null
+++ b/src/couch_auth_cache.erl
@@ -0,0 +1,425 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_auth_cache).
+-behaviour(gen_server).
+
+% public API
+-export([get_user_creds/1]).
+
+% gen_server API
+-export([start_link/0, init/1, handle_call/3, handle_info/2, handle_cast/2]).
+-export([code_change/3, terminate/2]).
+
+-include("couch_db.hrl").
+-include("couch_js_functions.hrl").
+
+-define(STATE, auth_state_ets).
+-define(BY_USER, auth_by_user_ets).
+-define(BY_ATIME, auth_by_atime_ets).
+
+-record(state, {
+    max_cache_size = 0,
+    cache_size = 0,
+    db_notifier = nil,
+    db_mon_ref = nil
+}).
+
+
+-spec get_user_creds(UserName::string() | binary()) ->
+    Credentials::list() | nil.
+
+get_user_creds(UserName) when is_list(UserName) ->
+    get_user_creds(?l2b(UserName));
+
+get_user_creds(UserName) ->
+    UserCreds = case couch_config:get("admins", ?b2l(UserName)) of
+    "-hashed-" ++ HashedPwdAndSalt ->
+        % the name is an admin, now check to see if there is a user doc
+        % which has a matching name, salt, and password_sha
+        [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
+        case get_from_cache(UserName) of
+        nil ->
+            make_admin_doc(HashedPwd, Salt, []);
+        UserProps when is_list(UserProps) ->
+            make_admin_doc(HashedPwd, Salt, couch_util:get_value(<<"roles">>, UserProps))
+        end;
+    "-pbkdf2-" ++ HashedPwdSaltAndIterations ->
+        [HashedPwd, Salt, Iterations] = string:tokens(HashedPwdSaltAndIterations, ","),
+        case get_from_cache(UserName) of
+        nil ->
+            make_admin_doc(HashedPwd, Salt, Iterations, []);
+        UserProps when is_list(UserProps) ->
+            make_admin_doc(HashedPwd, Salt, Iterations, couch_util:get_value(<<"roles">>, UserProps))
+    end;
+    _Else ->
+        get_from_cache(UserName)
+    end,
+    validate_user_creds(UserCreds).
+
+make_admin_doc(HashedPwd, Salt, ExtraRoles) ->
+    [{<<"roles">>, [<<"_admin">>|ExtraRoles]},
+     {<<"salt">>, ?l2b(Salt)},
+     {<<"password_scheme">>, <<"simple">>},
+     {<<"password_sha">>, ?l2b(HashedPwd)}].
+
+make_admin_doc(DerivedKey, Salt, Iterations, ExtraRoles) ->
+    [{<<"roles">>, [<<"_admin">>|ExtraRoles]},
+     {<<"salt">>, ?l2b(Salt)},
+     {<<"iterations">>, list_to_integer(Iterations)},
+     {<<"password_scheme">>, <<"pbkdf2">>},
+     {<<"derived_key">>, ?l2b(DerivedKey)}].
+
+get_from_cache(UserName) ->
+    exec_if_auth_db(
+        fun(_AuthDb) ->
+            maybe_refresh_cache(),
+            case ets:lookup(?BY_USER, UserName) of
+            [] ->
+                gen_server:call(?MODULE, {fetch, UserName}, infinity);
+            [{UserName, {Credentials, _ATime}}] ->
+                couch_stats_collector:increment({couchdb, auth_cache_hits}),
+                gen_server:cast(?MODULE, {cache_hit, UserName}),
+                Credentials
+            end
+        end,
+        nil
+    ).
+
+
+validate_user_creds(nil) ->
+    nil;
+validate_user_creds(UserCreds) ->
+    case couch_util:get_value(<<"_conflicts">>, UserCreds) of
+    undefined ->
+        ok;
+    _ConflictList ->
+        throw({unauthorized,
+            <<"User document conflicts must be resolved before the document",
+              " is used for authentication purposes.">>
+        })
+    end,
+    UserCreds.
+
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+init(_) ->
+    ?STATE = ets:new(?STATE, [set, protected, named_table]),
+    ?BY_USER = ets:new(?BY_USER, [set, protected, named_table]),
+    ?BY_ATIME = ets:new(?BY_ATIME, [ordered_set, private, named_table]),
+    process_flag(trap_exit, true),
+    ok = couch_config:register(
+        fun("couch_httpd_auth", "auth_cache_size", SizeList) ->
+            Size = list_to_integer(SizeList),
+            ok = gen_server:call(?MODULE, {new_max_cache_size, Size}, infinity);
+        ("couch_httpd_auth", "authentication_db", _DbName) ->
+            ok = gen_server:call(?MODULE, reinit_cache, infinity)
+        end
+    ),
+    {ok, Notifier} = couch_db_update_notifier:start_link(fun handle_db_event/1),
+    State = #state{
+        db_notifier = Notifier,
+        max_cache_size = list_to_integer(
+            couch_config:get("couch_httpd_auth", "auth_cache_size", "50")
+        )
+    },
+    {ok, reinit_cache(State)}.
+
+
+handle_db_event({Event, DbName}) ->
+    [{auth_db_name, AuthDbName}] = ets:lookup(?STATE, auth_db_name),
+    case DbName =:= AuthDbName of
+    true ->
+        case Event of
+        created -> gen_server:call(?MODULE, reinit_cache, infinity);
+        compacted -> gen_server:call(?MODULE, auth_db_compacted, infinity);
+        _Else   -> ok
+        end;
+    false ->
+        ok
+    end.
+
+
+handle_call(reinit_cache, _From, State) ->
+    catch erlang:demonitor(State#state.db_mon_ref, [flush]),
+    exec_if_auth_db(fun(AuthDb) -> catch couch_db:close(AuthDb) end),
+    {reply, ok, reinit_cache(State)};
+
+handle_call(auth_db_compacted, _From, State) ->
+    exec_if_auth_db(
+        fun(AuthDb) ->
+            true = ets:insert(?STATE, {auth_db, reopen_auth_db(AuthDb)})
+        end
+    ),
+    {reply, ok, State};
+
+handle_call({new_max_cache_size, NewSize},
+        _From, #state{cache_size = Size} = State) when NewSize >= Size ->
+    {reply, ok, State#state{max_cache_size = NewSize}};
+
+handle_call({new_max_cache_size, NewSize}, _From, State) ->
+    free_mru_cache_entries(State#state.cache_size - NewSize),
+    {reply, ok, State#state{max_cache_size = NewSize, cache_size = NewSize}};
+
+handle_call({fetch, UserName}, _From, State) ->
+    {Credentials, NewState} = case ets:lookup(?BY_USER, UserName) of
+    [{UserName, {Creds, ATime}}] ->
+        couch_stats_collector:increment({couchdb, auth_cache_hits}),
+        cache_hit(UserName, Creds, ATime),
+        {Creds, State};
+    [] ->
+        couch_stats_collector:increment({couchdb, auth_cache_misses}),
+        Creds = get_user_props_from_db(UserName),
+        State1 = add_cache_entry(UserName, Creds, erlang:now(), State),
+        {Creds, State1}
+    end,
+    {reply, Credentials, NewState};
+
+handle_call(refresh, _From, State) ->
+    exec_if_auth_db(fun refresh_entries/1),
+    {reply, ok, State}.
+
+
+handle_cast({cache_hit, UserName}, State) ->
+    case ets:lookup(?BY_USER, UserName) of
+    [{UserName, {Credentials, ATime}}] ->
+        cache_hit(UserName, Credentials, ATime);
+    _ ->
+        ok
+    end,
+    {noreply, State}.
+
+
+handle_info({'DOWN', Ref, _, _, _Reason}, #state{db_mon_ref = Ref} = State) ->
+    {noreply, reinit_cache(State)}.
+
+
+terminate(_Reason, #state{db_notifier = Notifier}) ->
+    couch_db_update_notifier:stop(Notifier),
+    exec_if_auth_db(fun(AuthDb) -> catch couch_db:close(AuthDb) end),
+    true = ets:delete(?BY_USER),
+    true = ets:delete(?BY_ATIME),
+    true = ets:delete(?STATE).
+
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+
+clear_cache(State) ->
+    exec_if_auth_db(fun(AuthDb) -> catch couch_db:close(AuthDb) end),
+    true = ets:delete_all_objects(?BY_USER),
+    true = ets:delete_all_objects(?BY_ATIME),
+    State#state{cache_size = 0}.
+
+
+reinit_cache(State) ->
+    NewState = clear_cache(State),
+    AuthDbName = ?l2b(couch_config:get("couch_httpd_auth", "authentication_db")),
+    true = ets:insert(?STATE, {auth_db_name, AuthDbName}),
+    AuthDb = open_auth_db(),
+    true = ets:insert(?STATE, {auth_db, AuthDb}),
+    NewState#state{db_mon_ref = couch_db:monitor(AuthDb)}.
+
+
+add_cache_entry(_, _, _, #state{max_cache_size = 0} = State) ->
+    State;
+add_cache_entry(UserName, Credentials, ATime, State) ->
+    case State#state.cache_size >= State#state.max_cache_size of
+    true ->
+        free_mru_cache_entry();
+    false ->
+        ok
+    end,
+    true = ets:insert(?BY_ATIME, {ATime, UserName}),
+    true = ets:insert(?BY_USER, {UserName, {Credentials, ATime}}),
+    State#state{cache_size = couch_util:get_value(size, ets:info(?BY_USER))}.
+
+free_mru_cache_entries(0) ->
+    ok;
+free_mru_cache_entries(N) when N > 0 ->
+    free_mru_cache_entry(),
+    free_mru_cache_entries(N - 1).
+
+free_mru_cache_entry() ->
+    MruTime = ets:last(?BY_ATIME),
+    [{MruTime, UserName}] = ets:lookup(?BY_ATIME, MruTime),
+    true = ets:delete(?BY_ATIME, MruTime),
+    true = ets:delete(?BY_USER, UserName).
+
+
+cache_hit(UserName, Credentials, ATime) ->
+    NewATime = erlang:now(),
+    true = ets:delete(?BY_ATIME, ATime),
+    true = ets:insert(?BY_ATIME, {NewATime, UserName}),
+    true = ets:insert(?BY_USER, {UserName, {Credentials, NewATime}}).
+
+
+refresh_entries(AuthDb) ->
+    case reopen_auth_db(AuthDb) of
+    nil ->
+        ok;
+    AuthDb2 ->
+        case AuthDb2#db.update_seq > AuthDb#db.update_seq of
+        true ->
+            {ok, _, _} = couch_db:enum_docs_since(
+                AuthDb2,
+                AuthDb#db.update_seq,
+                fun(DocInfo, _, _) -> refresh_entry(AuthDb2, DocInfo) end,
+                AuthDb#db.update_seq,
+                []
+            ),
+            true = ets:insert(?STATE, {auth_db, AuthDb2});
+        false ->
+            ok
+        end
+    end.
+
+
+refresh_entry(Db, #doc_info{high_seq = DocSeq} = DocInfo) ->
+    case is_user_doc(DocInfo) of
+    {true, UserName} ->
+        case ets:lookup(?BY_USER, UserName) of
+        [] ->
+            ok;
+        [{UserName, {_OldCreds, ATime}}] ->
+            {ok, Doc} = couch_db:open_doc(Db, DocInfo, [conflicts, deleted]),
+            NewCreds = user_creds(Doc),
+            true = ets:insert(?BY_USER, {UserName, {NewCreds, ATime}})
+        end;
+    false ->
+        ok
+    end,
+    {ok, DocSeq}.
+
+
+user_creds(#doc{deleted = true}) ->
+    nil;
+user_creds(#doc{} = Doc) ->
+    {Creds} = couch_doc:to_json_obj(Doc, []),
+    Creds.
+
+
+is_user_doc(#doc_info{id = <<"org.couchdb.user:", UserName/binary>>}) ->
+    {true, UserName};
+is_user_doc(_) ->
+    false.
+
+
+maybe_refresh_cache() ->
+    case cache_needs_refresh() of
+    true ->
+        ok = gen_server:call(?MODULE, refresh, infinity);
+    false ->
+        ok
+    end.
+
+
+cache_needs_refresh() ->
+    exec_if_auth_db(
+        fun(AuthDb) ->
+            case reopen_auth_db(AuthDb) of
+            nil ->
+                false;
+            AuthDb2 ->
+                AuthDb2#db.update_seq > AuthDb#db.update_seq
+            end
+        end,
+        false
+    ).
+
+
+reopen_auth_db(AuthDb) ->
+    case (catch couch_db:reopen(AuthDb)) of
+    {ok, AuthDb2} ->
+        AuthDb2;
+    _ ->
+        nil
+    end.
+
+
+exec_if_auth_db(Fun) ->
+    exec_if_auth_db(Fun, ok).
+
+exec_if_auth_db(Fun, DefRes) ->
+    case ets:lookup(?STATE, auth_db) of
+    [{auth_db, #db{} = AuthDb}] ->
+        Fun(AuthDb);
+    _ ->
+        DefRes
+    end.
+
+
+open_auth_db() ->
+    [{auth_db_name, DbName}] = ets:lookup(?STATE, auth_db_name),
+    {ok, AuthDb} = ensure_users_db_exists(DbName, [sys_db]),
+    AuthDb.
+
+
+get_user_props_from_db(UserName) ->
+    exec_if_auth_db(
+        fun(AuthDb) ->
+            Db = reopen_auth_db(AuthDb),
+            DocId = <<"org.couchdb.user:", UserName/binary>>,
+            try
+                {ok, Doc} = couch_db:open_doc(Db, DocId, [conflicts]),
+                {DocProps} = couch_doc:to_json_obj(Doc, []),
+                DocProps
+            catch
+            _:_Error ->
+                nil
+            end
+        end,
+        nil
+    ).
+
+ensure_users_db_exists(DbName, Options) ->
+    Options1 = [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}, nologifmissing | Options],
+    case couch_db:open(DbName, Options1) of
+    {ok, Db} ->
+        ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
+        {ok, Db};
+    _Error ->
+        {ok, Db} = couch_db:create(DbName, Options1),
+        ok = ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
+        {ok, Db}
+    end.
+
+ensure_auth_ddoc_exists(Db, DDocId) ->
+    case couch_db:open_doc(Db, DDocId) of
+    {not_found, _Reason} ->
+        {ok, AuthDesign} = auth_design_doc(DDocId),
+        {ok, _Rev} = couch_db:update_doc(Db, AuthDesign, []);
+    {ok, Doc} ->
+        {Props} = couch_doc:to_json_obj(Doc, []),
+        case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
+            ?AUTH_DB_DOC_VALIDATE_FUNCTION ->
+                ok;
+            _ ->
+                Props1 = lists:keyreplace(<<"validate_doc_update">>, 1, Props,
+                    {<<"validate_doc_update">>,
+                    ?AUTH_DB_DOC_VALIDATE_FUNCTION}),
+                couch_db:update_doc(Db, couch_doc:from_json_obj({Props1}), [])
+        end
+    end,
+    ok.
+
+auth_design_doc(DocId) ->
+    DocProps = [
+        {<<"_id">>, DocId},
+        {<<"language">>,<<"javascript">>},
+        {<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION}
+    ],
+    {ok, couch_doc:from_json_obj({DocProps})}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_btree.erl
----------------------------------------------------------------------
diff --git a/src/couch_btree.erl b/src/couch_btree.erl
new file mode 100644
index 0000000..789819e
--- /dev/null
+++ b/src/couch_btree.erl
@@ -0,0 +1,714 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_btree).
+
+-export([open/2, open/3, query_modify/4, add/2, add_remove/3]).
+-export([fold/4, full_reduce/1, final_reduce/2, size/1, foldl/3, foldl/4]).
+-export([fold_reduce/4, lookup/2, get_state/1, set_options/2]).
+-export([less/3]).
+
+-include("couch_db.hrl").
+-define(CHUNK_THRESHOLD, 16#4ff).
+
+extract(#btree{extract_kv=Extract}, Value) ->
+    Extract(Value).
+
+assemble(#btree{assemble_kv=Assemble}, Key, Value) ->
+    Assemble(Key, Value).
+
+less(#btree{less=Less}, A, B) ->
+    Less(A, B).
+
+% pass in 'nil' for State if a new Btree.
+open(State, Fd) ->
+    {ok, #btree{root=State, fd=Fd}}.
+
+set_options(Bt, []) ->
+    Bt;
+set_options(Bt, [{split, Extract}|Rest]) ->
+    set_options(Bt#btree{extract_kv=Extract}, Rest);
+set_options(Bt, [{join, Assemble}|Rest]) ->
+    set_options(Bt#btree{assemble_kv=Assemble}, Rest);
+set_options(Bt, [{less, Less}|Rest]) ->
+    set_options(Bt#btree{less=Less}, Rest);
+set_options(Bt, [{reduce, Reduce}|Rest]) ->
+    set_options(Bt#btree{reduce=Reduce}, Rest);
+set_options(Bt, [{compression, Comp}|Rest]) ->
+    set_options(Bt#btree{compression=Comp}, Rest).
+
+open(State, Fd, Options) ->
+    {ok, set_options(#btree{root=State, fd=Fd}, Options)}.
+
+get_state(#btree{root=Root}) ->
+    Root.
+
+final_reduce(#btree{reduce=Reduce}, Val) ->
+    final_reduce(Reduce, Val);
+final_reduce(Reduce, {[], []}) ->
+    Reduce(reduce, []);
+final_reduce(_Bt, {[], [Red]}) ->
+    Red;
+final_reduce(Reduce, {[], Reductions}) ->
+    Reduce(rereduce, Reductions);
+final_reduce(Reduce, {KVs, Reductions}) ->
+    Red = Reduce(reduce, KVs),
+    final_reduce(Reduce, {[], [Red | Reductions]}).
+
+fold_reduce(#btree{root=Root}=Bt, Fun, Acc, Options) ->
+    Dir = couch_util:get_value(dir, Options, fwd),
+    StartKey = couch_util:get_value(start_key, Options),
+    InEndRangeFun = make_key_in_end_range_function(Bt, Dir, Options),
+    KeyGroupFun = couch_util:get_value(key_group_fun, Options, fun(_,_) -> true end),
+    try
+        {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
+            reduce_stream_node(Bt, Dir, Root, StartKey, InEndRangeFun, undefined, [], [],
+            KeyGroupFun, Fun, Acc),
+        if GroupedKey2 == undefined ->
+            {ok, Acc2};
+        true ->
+            case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of
+            {ok, Acc3} -> {ok, Acc3};
+            {stop, Acc3} -> {ok, Acc3}
+            end
+        end
+    catch
+        throw:{stop, AccDone} -> {ok, AccDone}
+    end.
+
+full_reduce(#btree{root=nil,reduce=Reduce}) ->
+    {ok, Reduce(reduce, [])};
+full_reduce(#btree{root=Root}) ->
+    {ok, element(2, Root)}.
+
+size(#btree{root = nil}) ->
+    0;
+size(#btree{root = {_P, _Red}}) ->
+    % pre 1.2 format
+    nil;
+size(#btree{root = {_P, _Red, Size}}) ->
+    Size.
+
+% wraps a 2 arity function with the proper 3 arity function
+convert_fun_arity(Fun) when is_function(Fun, 2) ->
+    fun
+        (visit, KV, _Reds, AccIn) -> Fun(KV, AccIn);
+        (traverse, _K, _Red, AccIn) -> {ok, AccIn}
+    end;
+convert_fun_arity(Fun) when is_function(Fun, 3) ->
+    fun
+        (visit, KV, Reds, AccIn) -> Fun(KV, Reds, AccIn);
+        (traverse, _K, _Red, AccIn) -> {ok, AccIn}
+    end;
+convert_fun_arity(Fun) when is_function(Fun, 4) ->
+    Fun.    % Already arity 4
+
+make_key_in_end_range_function(#btree{less=Less}, fwd, Options) ->
+    case couch_util:get_value(end_key_gt, Options) of
+    undefined ->
+        case couch_util:get_value(end_key, Options) of
+        undefined ->
+            fun(_Key) -> true end;
+        LastKey ->
+            fun(Key) -> not Less(LastKey, Key) end
+        end;
+    EndKey ->
+        fun(Key) -> Less(Key, EndKey) end
+    end;
+make_key_in_end_range_function(#btree{less=Less}, rev, Options) ->
+    case couch_util:get_value(end_key_gt, Options) of
+    undefined ->
+        case couch_util:get_value(end_key, Options) of
+        undefined ->
+            fun(_Key) -> true end;
+        LastKey ->
+            fun(Key) -> not Less(Key, LastKey) end
+        end;
+    EndKey ->
+        fun(Key) -> Less(EndKey, Key) end
+    end.
+
+
+foldl(Bt, Fun, Acc) ->
+    fold(Bt, Fun, Acc, []).
+
+foldl(Bt, Fun, Acc, Options) ->
+    fold(Bt, Fun, Acc, Options).
+
+
+fold(#btree{root=nil}, _Fun, Acc, _Options) ->
+    {ok, {[], []}, Acc};
+fold(#btree{root=Root}=Bt, Fun, Acc, Options) ->
+    Dir = couch_util:get_value(dir, Options, fwd),
+    InRange = make_key_in_end_range_function(Bt, Dir, Options),
+    Result =
+    case couch_util:get_value(start_key, Options) of
+    undefined ->
+        stream_node(Bt, [], Bt#btree.root, InRange, Dir,
+                convert_fun_arity(Fun), Acc);
+    StartKey ->
+        stream_node(Bt, [], Bt#btree.root, StartKey, InRange, Dir,
+                convert_fun_arity(Fun), Acc)
+    end,
+    case Result of
+    {ok, Acc2}->
+        FullReduction = element(2, Root),
+        {ok, {[], [FullReduction]}, Acc2};
+    {stop, LastReduction, Acc2} ->
+        {ok, LastReduction, Acc2}
+    end.
+
+add(Bt, InsertKeyValues) ->
+    add_remove(Bt, InsertKeyValues, []).
+
+add_remove(Bt, InsertKeyValues, RemoveKeys) ->
+    {ok, [], Bt2} = query_modify(Bt, [], InsertKeyValues, RemoveKeys),
+    {ok, Bt2}.
+
+query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) ->
+    #btree{root=Root} = Bt,
+    InsertActions = lists:map(
+        fun(KeyValue) ->
+            {Key, Value} = extract(Bt, KeyValue),
+            {insert, Key, Value}
+        end, InsertValues),
+    RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys],
+    FetchActions = [{fetch, Key, nil} || Key <- LookupKeys],
+    SortFun =
+        fun({OpA, A, _}, {OpB, B, _}) ->
+            case A == B of
+            % A and B are equal, sort by op.
+            true -> op_order(OpA) < op_order(OpB);
+            false ->
+                less(Bt, A, B)
+            end
+        end,
+    Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])),
+    {ok, KeyPointers, QueryResults} = modify_node(Bt, Root, Actions, []),
+    {ok, NewRoot} = complete_root(Bt, KeyPointers),
+    {ok, QueryResults, Bt#btree{root=NewRoot}}.
+
+% for ordering different operations with the same key.
+% fetch < remove < insert
+op_order(fetch) -> 1;
+op_order(remove) -> 2;
+op_order(insert) -> 3.
+
+lookup(#btree{root=Root, less=Less}=Bt, Keys) ->
+    SortedKeys = lists:sort(Less, Keys),
+    {ok, SortedResults} = lookup(Bt, Root, SortedKeys),
+    % We want to return the results in the same order as the keys were input
+    % but we may have changed the order when we sorted. So we need to put the
+    % order back into the results.
+    couch_util:reorder_results(Keys, SortedResults).
+
+lookup(_Bt, nil, Keys) ->
+    {ok, [{Key, not_found} || Key <- Keys]};
+lookup(Bt, Node, Keys) ->
+    Pointer = element(1, Node),
+    {NodeType, NodeList} = get_node(Bt, Pointer),
+    case NodeType of
+    kp_node ->
+        lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []);
+    kv_node ->
+        lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, [])
+    end.
+
+lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
+    {ok, lists:reverse(Output)};
+lookup_kpnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
+    {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
+lookup_kpnode(Bt, NodeTuple, LowerBound, [FirstLookupKey | _] = LookupKeys, Output) ->
+    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), FirstLookupKey),
+    {Key, PointerInfo} = element(N, NodeTuple),
+    SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end,
+    case lists:splitwith(SplitFun, LookupKeys) of
+    {[], GreaterQueries} ->
+        lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output);
+    {LessEqQueries, GreaterQueries} ->
+        {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries),
+        lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output))
+    end.
+
+
+lookup_kvnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
+    {ok, lists:reverse(Output)};
+lookup_kvnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
+    % keys not found
+    {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
+lookup_kvnode(Bt, NodeTuple, LowerBound, [LookupKey | RestLookupKeys], Output) ->
+    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), LookupKey),
+    {Key, Value} = element(N, NodeTuple),
+    case less(Bt, LookupKey, Key) of
+    true ->
+        % LookupKey is less than Key
+        lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]);
+    false ->
+        case less(Bt, Key, LookupKey) of
+        true ->
+            % LookupKey is greater than Key
+            lookup_kvnode(Bt, NodeTuple, N+1, RestLookupKeys, [{LookupKey, not_found} | Output]);
+        false ->
+            % LookupKey is equal to Key
+            lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output])
+        end
+    end.
+
+
+complete_root(_Bt, []) ->
+    {ok, nil};
+complete_root(_Bt, [{_Key, PointerInfo}])->
+    {ok, PointerInfo};
+complete_root(Bt, KPs) ->
+    {ok, ResultKeyPointers} = write_node(Bt, kp_node, KPs),
+    complete_root(Bt, ResultKeyPointers).
+
+%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%%
+% It is inaccurate as it does not account for compression when blocks are
+% written. Plus with the "case byte_size(term_to_binary(InList)) of" code
+% it's probably really inefficient.
+
+chunkify(InList) ->
+    case ?term_size(InList) of
+    Size when Size > ?CHUNK_THRESHOLD ->
+        NumberOfChunksLikely = ((Size div ?CHUNK_THRESHOLD) + 1),
+        ChunkThreshold = Size div NumberOfChunksLikely,
+        chunkify(InList, ChunkThreshold, [], 0, []);
+    _Else ->
+        [InList]
+    end.
+
+chunkify([], _ChunkThreshold, [], 0, OutputChunks) ->
+    lists:reverse(OutputChunks);
+chunkify([], _ChunkThreshold, OutList, _OutListSize, OutputChunks) ->
+    lists:reverse([lists:reverse(OutList) | OutputChunks]);
+chunkify([InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) ->
+    case ?term_size(InElement) of
+    Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] ->
+        chunkify(RestInList, ChunkThreshold, [], 0, [lists:reverse([InElement | OutList]) | OutputChunks]);
+    Size ->
+        chunkify(RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks)
+    end.
+
+modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
+    case RootPointerInfo of
+    nil ->
+        NodeType = kv_node,
+        NodeList = [];
+    _Tuple ->
+        Pointer = element(1, RootPointerInfo),
+        {NodeType, NodeList} = get_node(Bt, Pointer)
+    end,
+    NodeTuple = list_to_tuple(NodeList),
+
+    {ok, NewNodeList, QueryOutput2} =
+    case NodeType of
+    kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput);
+    kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput)
+    end,
+    case NewNodeList of
+    [] ->  % no nodes remain
+        {ok, [], QueryOutput2};
+    NodeList ->  % nothing changed
+        {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
+        {ok, [{LastKey, RootPointerInfo}], QueryOutput2};
+    _Else2 ->
+        {ok, ResultList} = write_node(Bt, NodeType, NewNodeList),
+        {ok, ResultList, QueryOutput2}
+    end.
+
+reduce_node(#btree{reduce=nil}, _NodeType, _NodeList) ->
+    [];
+reduce_node(#btree{reduce=R}, kp_node, NodeList) ->
+    R(rereduce, [element(2, Node) || {_K, Node} <- NodeList]);
+reduce_node(#btree{reduce=R}=Bt, kv_node, NodeList) ->
+    R(reduce, [assemble(Bt, K, V) || {K, V} <- NodeList]).
+
+reduce_tree_size(kv_node, NodeSize, _KvList) ->
+    NodeSize;
+reduce_tree_size(kp_node, NodeSize, []) ->
+    NodeSize;
+reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red}} | _]) ->
+    % pre 1.2 format
+    nil;
+reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red, nil}} | _]) ->
+    nil;
+reduce_tree_size(kp_node, NodeSize, [{_K, {_P, _Red, Sz}} | NodeList]) ->
+    reduce_tree_size(kp_node, NodeSize + Sz, NodeList).
+
+get_node(#btree{fd = Fd}, NodePos) ->
+    {ok, {NodeType, NodeList}} = couch_file:pread_term(Fd, NodePos),
+    {NodeType, NodeList}.
+
+write_node(#btree{fd = Fd, compression = Comp} = Bt, NodeType, NodeList) ->
+    % split up nodes into smaller sizes
+    NodeListList = chunkify(NodeList),
+    % now write out each chunk and return the KeyPointer pairs for those nodes
+    ResultList = [
+        begin
+            {ok, Pointer, Size} = couch_file:append_term(
+                Fd, {NodeType, ANodeList}, [{compression, Comp}]),
+            {LastKey, _} = lists:last(ANodeList),
+            SubTreeSize = reduce_tree_size(NodeType, Size, ANodeList),
+            {LastKey, {Pointer, reduce_node(Bt, NodeType, ANodeList), SubTreeSize}}
+        end
+    ||
+        ANodeList <- NodeListList
+    ],
+    {ok, ResultList}.
+
+modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) ->
+    modify_node(Bt, nil, Actions, QueryOutput);
+modify_kpnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
+    {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
+            tuple_size(NodeTuple), [])), QueryOutput};
+modify_kpnode(Bt, NodeTuple, LowerBound,
+        [{_, FirstActionKey, _}|_]=Actions, ResultNode, QueryOutput) ->
+    Sz = tuple_size(NodeTuple),
+    N = find_first_gteq(Bt, NodeTuple, LowerBound, Sz, FirstActionKey),
+    case N =:= Sz of
+    true  ->
+        % perform remaining actions on last node
+        {_, PointerInfo} = element(Sz, NodeTuple),
+        {ok, ChildKPs, QueryOutput2} =
+            modify_node(Bt, PointerInfo, Actions, QueryOutput),
+        NodeList = lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
+            Sz - 1, ChildKPs)),
+        {ok, NodeList, QueryOutput2};
+    false ->
+        {NodeKey, PointerInfo} = element(N, NodeTuple),
+        SplitFun = fun({_ActionType, ActionKey, _ActionValue}) ->
+                not less(Bt, NodeKey, ActionKey)
+            end,
+        {LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions),
+        {ok, ChildKPs, QueryOutput2} =
+                modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput),
+        ResultNode2 = lists:reverse(ChildKPs, bounded_tuple_to_revlist(NodeTuple,
+                LowerBound, N - 1, ResultNode)),
+        modify_kpnode(Bt, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2)
+    end.
+
+bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End ->
+    Tail;
+bounded_tuple_to_revlist(Tuple, Start, End, Tail) ->
+    bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]).
+
+bounded_tuple_to_list(Tuple, Start, End, Tail) ->
+    bounded_tuple_to_list2(Tuple, Start, End, [], Tail).
+
+bounded_tuple_to_list2(_Tuple, Start, End, Acc, Tail) when Start > End ->
+    lists:reverse(Acc, Tail);
+bounded_tuple_to_list2(Tuple, Start, End, Acc, Tail) ->
+    bounded_tuple_to_list2(Tuple, Start + 1, End, [element(Start, Tuple) | Acc], Tail).
+
+find_first_gteq(_Bt, _Tuple, Start, End, _Key) when Start == End ->
+    End;
+find_first_gteq(Bt, Tuple, Start, End, Key) ->
+    Mid = Start + ((End - Start) div 2),
+    {TupleKey, _} = element(Mid, Tuple),
+    case less(Bt, TupleKey, Key) of
+    true ->
+        find_first_gteq(Bt, Tuple, Mid+1, End, Key);
+    false ->
+        find_first_gteq(Bt, Tuple, Start, Mid, Key)
+    end.
+
+modify_kvnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
+    {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), [])), QueryOutput};
+modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) when LowerBound > tuple_size(NodeTuple) ->
+    case ActionType of
+    insert ->
+        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
+    remove ->
+        % just drop the action
+        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput);
+    fetch ->
+        % the key/value must not exist in the tree
+        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
+    end;
+modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], AccNode, QueryOutput) ->
+    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), ActionKey),
+    {Key, Value} = element(N, NodeTuple),
+    ResultNode =  bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode),
+    case less(Bt, ActionKey, Key) of
+    true ->
+        case ActionType of
+        insert ->
+            % ActionKey is less than the Key, so insert
+            modify_kvnode(Bt, NodeTuple, N, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
+        remove ->
+            % ActionKey is less than the Key, just drop the action
+            modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput);
+        fetch ->
+            % ActionKey is less than the Key, the key/value must not exist in the tree
+            modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
+        end;
+    false ->
+        % ActionKey and Key are maybe equal.
+        case less(Bt, Key, ActionKey) of
+        false ->
+            case ActionType of
+            insert ->
+                modify_kvnode(Bt, NodeTuple, N+1, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
+            remove ->
+                modify_kvnode(Bt, NodeTuple, N+1, RestActions, ResultNode, QueryOutput);
+            fetch ->
+                % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node
+                % since an identical action key can follow it.
+                modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{ok, assemble(Bt, Key, Value)} | QueryOutput])
+            end;
+        true ->
+            modify_kvnode(Bt, NodeTuple, N + 1, [{ActionType, ActionKey, ActionValue} | RestActions], [{Key, Value} | ResultNode], QueryOutput)
+        end
+    end.
+
+
+reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _InEndRangeFun, GroupedKey, GroupedKVsAcc,
+        GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) ->
+    {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
+reduce_stream_node(Bt, Dir, Node, KeyStart, InEndRangeFun, GroupedKey, GroupedKVsAcc,
+        GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
+    P = element(1, Node),
+    case get_node(Bt, P) of
+    {kp_node, NodeList} ->
+        NodeList2 = adjust_dir(Dir, NodeList),
+        reduce_stream_kp_node(Bt, Dir, NodeList2, KeyStart, InEndRangeFun, GroupedKey,
+                GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc);
+    {kv_node, KVs} ->
+        KVs2 = adjust_dir(Dir, KVs),
+        reduce_stream_kv_node(Bt, Dir, KVs2, KeyStart, InEndRangeFun, GroupedKey,
+                GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc)
+    end.
+
+reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, InEndRangeFun,
+                        GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
+                        KeyGroupFun, Fun, Acc) ->
+
+    GTEKeyStartKVs =
+    case KeyStart of
+    undefined ->
+        KVs;
+    _ ->
+        DropFun = case Dir of
+        fwd ->
+            fun({Key, _}) -> less(Bt, Key, KeyStart) end;
+        rev ->
+            fun({Key, _}) -> less(Bt, KeyStart, Key) end
+        end,
+        lists:dropwhile(DropFun, KVs)
+    end,
+    KVs2 = lists:takewhile(
+        fun({Key, _}) -> InEndRangeFun(Key) end, GTEKeyStartKVs),
+    reduce_stream_kv_node2(Bt, KVs2, GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
+                        KeyGroupFun, Fun, Acc).
+
+
+reduce_stream_kv_node2(_Bt, [], GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
+        _KeyGroupFun, _Fun, Acc) ->
+    {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
+reduce_stream_kv_node2(Bt, [{Key, Value}| RestKVs], GroupedKey, GroupedKVsAcc,
+        GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
+    case GroupedKey of
+    undefined ->
+        reduce_stream_kv_node2(Bt, RestKVs, Key,
+                [assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc);
+    _ ->
+
+        case KeyGroupFun(GroupedKey, Key) of
+        true ->
+            reduce_stream_kv_node2(Bt, RestKVs, GroupedKey,
+                [assemble(Bt,Key,Value)|GroupedKVsAcc], GroupedRedsAcc, KeyGroupFun,
+                Fun, Acc);
+        false ->
+            case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of
+            {ok, Acc2} ->
+                reduce_stream_kv_node2(Bt, RestKVs, Key, [assemble(Bt,Key,Value)],
+                    [], KeyGroupFun, Fun, Acc2);
+            {stop, Acc2} ->
+                throw({stop, Acc2})
+            end
+        end
+    end.
+
+reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
+                        GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
+                        KeyGroupFun, Fun, Acc) ->
+    Nodes =
+    case KeyStart of
+    undefined ->
+        NodeList;
+    _ ->
+        case Dir of
+        fwd ->
+            lists:dropwhile(fun({Key, _}) -> less(Bt, Key, KeyStart) end, NodeList);
+        rev ->
+            RevKPs = lists:reverse(NodeList),
+            case lists:splitwith(fun({Key, _}) -> less(Bt, Key, KeyStart) end, RevKPs) of
+            {_Before, []} ->
+                NodeList;
+            {Before, [FirstAfter | _]} ->
+                [FirstAfter | lists:reverse(Before)]
+            end
+        end
+    end,
+    {InRange, MaybeInRange} = lists:splitwith(
+        fun({Key, _}) -> InEndRangeFun(Key) end, Nodes),
+    NodesInRange = case MaybeInRange of
+    [FirstMaybeInRange | _] when Dir =:= fwd ->
+        InRange ++ [FirstMaybeInRange];
+    _ ->
+        InRange
+    end,
+    reduce_stream_kp_node2(Bt, Dir, NodesInRange, KeyStart, InEndRangeFun,
+        GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc).
+
+
+reduce_stream_kp_node2(Bt, Dir, [{_Key, NodeInfo} | RestNodeList], KeyStart, InEndRangeFun,
+                        undefined, [], [], KeyGroupFun, Fun, Acc) ->
+    {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
+            reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, undefined,
+                [], [], KeyGroupFun, Fun, Acc),
+    reduce_stream_kp_node2(Bt, Dir, RestNodeList, KeyStart, InEndRangeFun, GroupedKey2,
+            GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
+reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
+        GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
+    {Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) ->
+        KeyGroupFun(GroupedKey, Key) end, NodeList),
+    {GroupedNodes, UngroupedNodes} =
+    case Grouped0 of
+    [] ->
+        {Grouped0, Ungrouped0};
+    _ ->
+        [FirstGrouped | RestGrouped] = lists:reverse(Grouped0),
+        {RestGrouped, [FirstGrouped | Ungrouped0]}
+    end,
+    GroupedReds = [element(2, Node) || {_, Node} <- GroupedNodes],
+    case UngroupedNodes of
+    [{_Key, NodeInfo}|RestNodes] ->
+        {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
+            reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, GroupedKey,
+                GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc),
+        reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, InEndRangeFun, GroupedKey2,
+                GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
+    [] ->
+        {ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey}
+    end.
+
+adjust_dir(fwd, List) ->
+    List;
+adjust_dir(rev, List) ->
+    lists:reverse(List).
+
+stream_node(Bt, Reds, Node, StartKey, InRange, Dir, Fun, Acc) ->
+    Pointer = element(1, Node),
+    {NodeType, NodeList} = get_node(Bt, Pointer),
+    case NodeType of
+    kp_node ->
+        stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc);
+    kv_node ->
+        stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc)
+    end.
+
+stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc) ->
+    Pointer = element(1, Node),
+    {NodeType, NodeList} = get_node(Bt, Pointer),
+    case NodeType of
+    kp_node ->
+        stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc);
+    kv_node ->
+        stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc)
+    end.
+
+stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) ->
+    {ok, Acc};
+stream_kp_node(Bt, Reds, [{Key, Node} | Rest], InRange, Dir, Fun, Acc) ->
+    Red = element(2, Node),
+    case Fun(traverse, Key, Red, Acc) of
+    {ok, Acc2} ->
+        case stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc2) of
+        {ok, Acc3} ->
+            stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc3);
+        {stop, LastReds, Acc3} ->
+            {stop, LastReds, Acc3}
+        end;
+    {skip, Acc2} ->
+        stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2)
+    end.
+
+drop_nodes(_Bt, Reds, _StartKey, []) ->
+    {Reds, []};
+drop_nodes(Bt, Reds, StartKey, [{NodeKey, Node} | RestKPs]) ->
+    case less(Bt, NodeKey, StartKey) of
+    true ->
+        drop_nodes(Bt, [element(2, Node) | Reds], StartKey, RestKPs);
+    false ->
+        {Reds, [{NodeKey, Node} | RestKPs]}
+    end.
+
+stream_kp_node(Bt, Reds, KPs, StartKey, InRange, Dir, Fun, Acc) ->
+    {NewReds, NodesToStream} =
+    case Dir of
+    fwd ->
+        % drop all nodes sorting before the key
+        drop_nodes(Bt, Reds, StartKey, KPs);
+    rev ->
+        % keep all nodes sorting before the key, AND the first node to sort after
+        RevKPs = lists:reverse(KPs),
+         case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of
+        {_RevsBefore, []} ->
+            % everything sorts before it
+            {Reds, KPs};
+        {RevBefore, [FirstAfter | Drop]} ->
+            {[element(2, Node) || {_K, Node} <- Drop] ++ Reds,
+                 [FirstAfter | lists:reverse(RevBefore)]}
+        end
+    end,
+    case NodesToStream of
+    [] ->
+        {ok, Acc};
+    [{_Key, Node} | Rest] ->
+        case stream_node(Bt, NewReds, Node, StartKey, InRange, Dir, Fun, Acc) of
+        {ok, Acc2} ->
+            Red = element(2, Node),
+            stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2);
+        {stop, LastReds, Acc2} ->
+            {stop, LastReds, Acc2}
+        end
+    end.
+
+stream_kv_node(Bt, Reds, KVs, StartKey, InRange, Dir, Fun, Acc) ->
+    DropFun =
+    case Dir of
+    fwd ->
+        fun({Key, _}) -> less(Bt, Key, StartKey) end;
+    rev ->
+        fun({Key, _}) -> less(Bt, StartKey, Key) end
+    end,
+    {LTKVs, GTEKVs} = lists:splitwith(DropFun, KVs),
+    AssembleLTKVs = [assemble(Bt,K,V) || {K,V} <- LTKVs],
+    stream_kv_node2(Bt, Reds, AssembleLTKVs, GTEKVs, InRange, Dir, Fun, Acc).
+
+stream_kv_node2(_Bt, _Reds, _PrevKVs, [], _InRange, _Dir, _Fun, Acc) ->
+    {ok, Acc};
+stream_kv_node2(Bt, Reds, PrevKVs, [{K,V} | RestKVs], InRange, Dir, Fun, Acc) ->
+    case InRange(K) of
+    false ->
+        {stop, {PrevKVs, Reds}, Acc};
+    true ->
+        AssembledKV = assemble(Bt, K, V),
+        case Fun(visit, AssembledKV, {PrevKVs, Reds}, Acc) of
+        {ok, Acc2} ->
+            stream_kv_node2(Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2);
+        {stop, Acc2} ->
+            {stop, {PrevKVs, Reds}, Acc2}
+        end
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_changes.erl
----------------------------------------------------------------------
diff --git a/src/couch_changes.erl b/src/couch_changes.erl
new file mode 100644
index 0000000..6edde32
--- /dev/null
+++ b/src/couch_changes.erl
@@ -0,0 +1,577 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_changes).
+-include("couch_db.hrl").
+
+-export([handle_changes/3]).
+
+% For the builtin filter _docs_ids, this is the maximum number
+% of documents for which we trigger the optimized code path.
+-define(MAX_DOC_IDS, 100).
+
+-record(changes_acc, {
+    db,
+    seq,
+    prepend,
+    filter,
+    callback,
+    user_acc,
+    resp_type,
+    limit,
+    include_docs,
+    doc_options,
+    conflicts,
+    timeout,
+    timeout_fun
+}).
+
+%% @type Req -> #httpd{} | {json_req, JsonObj()}
+handle_changes(Args1, Req, Db0) ->
+    #changes_args{
+        style = Style,
+        filter = FilterName,
+        feed = Feed,
+        dir = Dir,
+        since = Since
+    } = Args1,
+    {FilterFun, FilterArgs} = make_filter_fun(FilterName, Style, Req, Db0),
+    Args = Args1#changes_args{filter_fun = FilterFun, filter_args = FilterArgs},
+    Start = fun() ->
+        {ok, Db} = couch_db:reopen(Db0),
+        StartSeq = case Dir of
+        rev ->
+            couch_db:get_update_seq(Db);
+        fwd ->
+            Since
+        end,
+        {Db, StartSeq}
+    end,
+    % begin timer to deal with heartbeat when filter function fails
+    case Args#changes_args.heartbeat of
+    undefined ->
+        erlang:erase(last_changes_heartbeat);
+    Val when is_integer(Val); Val =:= true ->
+        put(last_changes_heartbeat, now())
+    end,
+
+    case lists:member(Feed, ["continuous", "longpoll", "eventsource"]) of
+    true ->
+        fun(CallbackAcc) ->
+            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
+            Self = self(),
+            {ok, Notify} = couch_db_update_notifier:start_link(
+                fun({_, DbName}) when  Db0#db.name == DbName ->
+                    Self ! db_updated;
+                (_) ->
+                    ok
+                end
+            ),
+            {Db, StartSeq} = Start(),
+            UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
+            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
+            Acc0 = build_acc(Args, Callback, UserAcc2, Db, StartSeq,
+                             <<"">>, Timeout, TimeoutFun),
+            try
+                keep_sending_changes(
+                    Args#changes_args{dir=fwd},
+                    Acc0,
+                    true)
+            after
+                couch_db_update_notifier:stop(Notify),
+                get_rest_db_updated(ok) % clean out any remaining update messages
+            end
+        end;
+    false ->
+        fun(CallbackAcc) ->
+            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
+            UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
+            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
+            {Db, StartSeq} = Start(),
+            Acc0 = build_acc(Args#changes_args{feed="normal"}, Callback,
+                             UserAcc2, Db, StartSeq, <<>>, Timeout, TimeoutFun),
+            {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
+                send_changes(
+                    Args#changes_args{feed="normal"},
+                    Acc0,
+                    true),
+            end_sending_changes(Callback, UserAcc3, LastSeq, Feed)
+        end
+    end.
+
+get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 3) ->
+    Pair;
+get_callback_acc(Callback) when is_function(Callback, 2) ->
+    {fun(Ev, Data, _) -> Callback(Ev, Data) end, ok}.
+
+%% @type Req -> #httpd{} | {json_req, JsonObj()}
+make_filter_fun([$_ | _] = FilterName, Style, Req, Db) ->
+    builtin_filter_fun(FilterName, Style, Req, Db);
+make_filter_fun(FilterName, Style, Req, Db) ->
+    {os_filter_fun(FilterName, Style, Req, Db), []}.
+
+os_filter_fun(FilterName, Style, Req, Db) ->
+    case [list_to_binary(couch_httpd:unquote(Part))
+            || Part <- string:tokens(FilterName, "/")] of
+    [] ->
+        fun(_Db2, #doc_info{revs=Revs}) ->
+                builtin_results(Style, Revs)
+        end;
+    [DName, FName] ->
+        DesignId = <<"_design/", DName/binary>>,
+        DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
+        % validate that the ddoc has the filter fun
+        #doc{body={Props}} = DDoc,
+        couch_util:get_nested_json_value({Props}, [<<"filters">>, FName]),
+        fun(Db2, DocInfo) ->
+            DocInfos =
+            case Style of
+            main_only ->
+                [DocInfo];
+            all_docs ->
+                [DocInfo#doc_info{revs=[Rev]}|| Rev <- DocInfo#doc_info.revs]
+            end,
+            Docs = [Doc || {ok, Doc} <- [
+                    couch_db:open_doc(Db2, DocInfo2, [deleted, conflicts])
+                        || DocInfo2 <- DocInfos]],
+            {ok, Passes} = couch_query_servers:filter_docs(
+                Req, Db2, DDoc, FName, Docs
+            ),
+            [{[{<<"rev">>, couch_doc:rev_to_str({RevPos,RevId})}]}
+                || {Pass, #doc{revs={RevPos,[RevId|_]}}}
+                <- lists:zip(Passes, Docs), Pass == true]
+        end;
+    _Else ->
+        throw({bad_request,
+            "filter parameter must be of the form `designname/filtername`"})
+    end.
+
+builtin_filter_fun("_doc_ids", Style, {json_req, {Props}}, _Db) ->
+    DocIds = couch_util:get_value(<<"doc_ids">>, Props),
+    {filter_docids(DocIds, Style), DocIds};
+builtin_filter_fun("_doc_ids", Style, #httpd{method='POST'}=Req, _Db) ->
+    {Props} = couch_httpd:json_body_obj(Req),
+    DocIds =  couch_util:get_value(<<"doc_ids">>, Props, nil),
+    {filter_docids(DocIds, Style), DocIds};
+builtin_filter_fun("_doc_ids", Style, #httpd{method='GET'}=Req, _Db) ->
+    DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")),
+    {filter_docids(DocIds, Style), DocIds};
+builtin_filter_fun("_design", Style, _Req, _Db) ->
+    {filter_designdoc(Style), []};
+builtin_filter_fun("_view", Style, Req, Db) ->
+    ViewName = couch_httpd:qs_value(Req, "view", ""),
+    {filter_view(ViewName, Style, Db), []};
+builtin_filter_fun(_FilterName, _Style, _Req, _Db) ->
+    throw({bad_request, "unknown builtin filter name"}).
+
+filter_docids(DocIds, Style) when is_list(DocIds)->
+    fun(_Db, #doc_info{id=DocId, revs=Revs}) ->
+            case lists:member(DocId, DocIds) of
+                true ->
+                    builtin_results(Style, Revs);
+                _ -> []
+            end
+    end;
+filter_docids(_, _) ->
+    throw({bad_request, "`doc_ids` filter parameter is not a list."}).
+
+filter_designdoc(Style) ->
+    fun(_Db, #doc_info{id=DocId, revs=Revs}) ->
+            case DocId of
+            <<"_design", _/binary>> ->
+                    builtin_results(Style, Revs);
+                _ -> []
+            end
+    end.
+
+filter_view("", _Style, _Db) ->
+    throw({bad_request, "`view` filter parameter is not provided."});
+filter_view(ViewName, Style, Db) ->
+    case [list_to_binary(couch_httpd:unquote(Part))
+            || Part <- string:tokens(ViewName, "/")] of
+        [] ->
+            throw({bad_request, "Invalid `view` parameter."});
+        [DName, VName] ->
+            DesignId = <<"_design/", DName/binary>>,
+            DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
+            % validate that the ddoc has the filter fun
+            #doc{body={Props}} = DDoc,
+            couch_util:get_nested_json_value({Props}, [<<"views">>, VName]),
+            fun(Db2, DocInfo) ->
+                DocInfos =
+                case Style of
+                main_only ->
+                    [DocInfo];
+                all_docs ->
+                    [DocInfo#doc_info{revs=[Rev]}|| Rev <- DocInfo#doc_info.revs]
+                end,
+                Docs = [Doc || {ok, Doc} <- [
+                        couch_db:open_doc(Db2, DocInfo2, [deleted, conflicts])
+                            || DocInfo2 <- DocInfos]],
+                {ok, Passes} = couch_query_servers:filter_view(
+                    DDoc, VName, Docs
+                ),
+                [{[{<<"rev">>, couch_doc:rev_to_str({RevPos,RevId})}]}
+                    || {Pass, #doc{revs={RevPos,[RevId|_]}}}
+                    <- lists:zip(Passes, Docs), Pass == true]
+            end
+        end.
+
+builtin_results(Style, [#rev_info{rev=Rev}|_]=Revs) ->
+    case Style of
+        main_only ->
+            [{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}];
+        all_docs ->
+            [{[{<<"rev">>, couch_doc:rev_to_str(R)}]}
+                || #rev_info{rev=R} <- Revs]
+    end.
+
+get_changes_timeout(Args, Callback) ->
+    #changes_args{
+        heartbeat = Heartbeat,
+        timeout = Timeout,
+        feed = ResponseType
+    } = Args,
+    DefaultTimeout = list_to_integer(
+        couch_config:get("httpd", "changes_timeout", "60000")
+    ),
+    case Heartbeat of
+    undefined ->
+        case Timeout of
+        undefined ->
+            {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
+        infinity ->
+            {infinity, fun(UserAcc) -> {stop, UserAcc} end};
+        _ ->
+            {lists:min([DefaultTimeout, Timeout]),
+                fun(UserAcc) -> {stop, UserAcc} end}
+        end;
+    true ->
+        {DefaultTimeout,
+            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end};
+    _ ->
+        {lists:min([DefaultTimeout, Heartbeat]),
+            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end}
+    end.
+
+start_sending_changes(_Callback, UserAcc, ResponseType)
+        when ResponseType =:= "continuous"
+        orelse ResponseType =:= "eventsource" ->
+    UserAcc;
+start_sending_changes(Callback, UserAcc, ResponseType) ->
+    Callback(start, ResponseType, UserAcc).
+
+build_acc(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout, TimeoutFun) ->
+    #changes_args{
+        include_docs = IncludeDocs,
+        doc_options = DocOpts,
+        conflicts = Conflicts,
+        limit = Limit,
+        feed = ResponseType,
+        filter_fun = FilterFun
+    } = Args,
+    #changes_acc{
+        db = Db,
+        seq = StartSeq,
+        prepend = Prepend,
+        filter = FilterFun,
+        callback = Callback,
+        user_acc = UserAcc,
+        resp_type = ResponseType,
+        limit = Limit,
+        include_docs = IncludeDocs,
+        doc_options = DocOpts,
+        conflicts = Conflicts,
+        timeout = Timeout,
+        timeout_fun = TimeoutFun
+    }.
+
+send_changes(Args, Acc0, FirstRound) ->
+    #changes_args{
+        dir = Dir,
+        filter = FilterName,
+        filter_args = FilterArgs
+    } = Args,
+    #changes_acc{
+        db = Db,
+        seq = StartSeq
+    } = Acc0,
+    case FirstRound of
+    true ->
+        case FilterName of
+        "_doc_ids" when length(FilterArgs) =< ?MAX_DOC_IDS ->
+            send_changes_doc_ids(
+                FilterArgs, Db, StartSeq, Dir, fun changes_enumerator/2, Acc0);
+        "_design" ->
+            send_changes_design_docs(
+                Db, StartSeq, Dir, fun changes_enumerator/2, Acc0);
+        _ ->
+            couch_db:changes_since(
+                Db, StartSeq, fun changes_enumerator/2, [{dir, Dir}], Acc0)
+        end;
+    false ->
+        couch_db:changes_since(
+            Db, StartSeq, fun changes_enumerator/2, [{dir, Dir}], Acc0)
+    end.
+
+
+send_changes_doc_ids(DocIds, Db, StartSeq, Dir, Fun, Acc0) ->
+    Lookups = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, DocIds),
+    FullDocInfos = lists:foldl(
+        fun({ok, FDI}, Acc) ->
+            [FDI | Acc];
+        (not_found, Acc) ->
+            Acc
+        end,
+        [], Lookups),
+    send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0).
+
+
+send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0) ->
+    FoldFun = fun(FullDocInfo, _, Acc) ->
+        {ok, [FullDocInfo | Acc]}
+    end,
+    KeyOpts = [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}],
+    {ok, _, FullDocInfos} = couch_btree:fold(
+        Db#db.fulldocinfo_by_id_btree, FoldFun, [], KeyOpts),
+    send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0).
+
+
+send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
+    FoldFun = case Dir of
+    fwd ->
+        fun lists:foldl/3;
+    rev ->
+        fun lists:foldr/3
+    end,
+    GreaterFun = case Dir of
+    fwd ->
+        fun(A, B) -> A > B end;
+    rev ->
+        fun(A, B) -> A =< B end
+    end,
+    DocInfos = lists:foldl(
+        fun(FDI, Acc) ->
+            DI = couch_doc:to_doc_info(FDI),
+            case GreaterFun(DI#doc_info.high_seq, StartSeq) of
+            true ->
+                [DI | Acc];
+            false ->
+                Acc
+            end
+        end,
+        [], FullDocInfos),
+    SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
+    FinalAcc = try
+        FoldFun(
+            fun(DocInfo, Acc) ->
+                case Fun(DocInfo, Acc) of
+                {ok, NewAcc} ->
+                    NewAcc;
+                {stop, NewAcc} ->
+                    throw({stop, NewAcc})
+                end
+            end,
+            Acc0, SortedDocInfos)
+    catch
+    throw:{stop, Acc} ->
+        Acc
+    end,
+    case Dir of
+    fwd ->
+        {ok, FinalAcc#changes_acc{seq = couch_db:get_update_seq(Db)}};
+    rev ->
+        {ok, FinalAcc}
+    end.
+
+
+keep_sending_changes(Args, Acc0, FirstRound) ->
+    #changes_args{
+        feed = ResponseType,
+        limit = Limit,
+        db_open_options = DbOptions
+    } = Args,
+
+    {ok, ChangesAcc} = send_changes(
+        Args#changes_args{dir=fwd},
+        Acc0,
+        FirstRound),
+    #changes_acc{
+        db = Db, callback = Callback, timeout = Timeout, timeout_fun = TimeoutFun,
+        seq = EndSeq, prepend = Prepend2, user_acc = UserAcc2, limit = NewLimit
+    } = ChangesAcc,
+
+    couch_db:close(Db),
+    if Limit > NewLimit, ResponseType == "longpoll" ->
+        end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType);
+    true ->
+        case wait_db_updated(Timeout, TimeoutFun, UserAcc2) of
+        {updated, UserAcc4} ->
+            DbOptions1 = [{user_ctx, Db#db.user_ctx} | DbOptions],
+            case couch_db:open(Db#db.name, DbOptions1) of
+            {ok, Db2} ->
+                keep_sending_changes(
+                  Args#changes_args{limit=NewLimit},
+                  ChangesAcc#changes_acc{
+                    db = Db2,
+                    user_acc = UserAcc4,
+                    seq = EndSeq,
+                    prepend = Prepend2,
+                    timeout = Timeout,
+                    timeout_fun = TimeoutFun},
+                  false);
+            _Else ->
+                end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType)
+            end;
+        {stop, UserAcc4} ->
+            end_sending_changes(Callback, UserAcc4, EndSeq, ResponseType)
+        end
+    end.
+
+end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) ->
+    Callback({stop, EndSeq}, ResponseType, UserAcc).
+
+changes_enumerator(DocInfo, #changes_acc{resp_type = ResponseType} = Acc)
+        when ResponseType =:= "continuous"
+        orelse ResponseType =:= "eventsource" ->
+    #changes_acc{
+        filter = FilterFun, callback = Callback,
+        user_acc = UserAcc, limit = Limit, db = Db,
+        timeout = Timeout, timeout_fun = TimeoutFun
+    } = Acc,
+    #doc_info{high_seq = Seq} = DocInfo,
+    Results0 = FilterFun(Db, DocInfo),
+    Results = [Result || Result <- Results0, Result /= null],
+    %% TODO: I'm thinking this should be < 1 and not =< 1
+    Go = if Limit =< 1 -> stop; true -> ok end,
+    case Results of
+    [] ->
+        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
+        case Done of
+        stop ->
+            {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
+        ok ->
+            {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
+        end;
+    _ ->
+        ChangesRow = changes_row(Results, DocInfo, Acc),
+        UserAcc2 = Callback({change, ChangesRow, <<>>}, ResponseType, UserAcc),
+        reset_heartbeat(),
+        {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2, limit = Limit - 1}}
+    end;
+changes_enumerator(DocInfo, Acc) ->
+    #changes_acc{
+        filter = FilterFun, callback = Callback, prepend = Prepend,
+        user_acc = UserAcc, limit = Limit, resp_type = ResponseType, db = Db,
+        timeout = Timeout, timeout_fun = TimeoutFun
+    } = Acc,
+    #doc_info{high_seq = Seq} = DocInfo,
+    Results0 = FilterFun(Db, DocInfo),
+    Results = [Result || Result <- Results0, Result /= null],
+    Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
+    case Results of
+    [] ->
+        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
+        case Done of
+        stop ->
+            {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
+        ok ->
+            {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
+        end;
+    _ ->
+        ChangesRow = changes_row(Results, DocInfo, Acc),
+        UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
+        reset_heartbeat(),
+        {Go, Acc#changes_acc{
+            seq = Seq, prepend = <<",\n">>,
+            user_acc = UserAcc2, limit = Limit - 1}}
+    end.
+
+
+changes_row(Results, DocInfo, Acc) ->
+    #doc_info{
+        id = Id, high_seq = Seq, revs = [#rev_info{deleted = Del} | _]
+    } = DocInfo,
+    #changes_acc{
+        db = Db,
+        include_docs = IncDoc,
+        doc_options = DocOpts,
+        conflicts = Conflicts
+    } = Acc,
+    {[{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++
+        deleted_item(Del) ++ case IncDoc of
+            true ->
+                Opts = case Conflicts of
+                    true -> [deleted, conflicts];
+                    false -> [deleted]
+                end,
+                Doc = couch_index_util:load_doc(Db, DocInfo, Opts),
+                case Doc of
+                    null ->
+                        [{doc, null}];
+                    _ ->
+                        [{doc, couch_doc:to_json_obj(Doc, DocOpts)}]
+                end;
+            false ->
+                []
+        end}.
+
+deleted_item(true) -> [{<<"deleted">>, true}];
+deleted_item(_) -> [].
+
+% waits for a db_updated msg, if there are multiple msgs, collects them.
+wait_db_updated(Timeout, TimeoutFun, UserAcc) ->
+    receive
+    db_updated ->
+        get_rest_db_updated(UserAcc)
+    after Timeout ->
+        {Go, UserAcc2} = TimeoutFun(UserAcc),
+        case Go of
+        ok ->
+            wait_db_updated(Timeout, TimeoutFun, UserAcc2);
+        stop ->
+            {stop, UserAcc2}
+        end
+    end.
+
+get_rest_db_updated(UserAcc) ->
+    receive
+    db_updated ->
+        get_rest_db_updated(UserAcc)
+    after 0 ->
+        {updated, UserAcc}
+    end.
+
+reset_heartbeat() ->
+    case get(last_changes_heartbeat) of
+    undefined ->
+        ok;
+    _ ->
+        put(last_changes_heartbeat, now())
+    end.
+
+maybe_heartbeat(Timeout, TimeoutFun, Acc) ->
+    Before = get(last_changes_heartbeat),
+    case Before of
+    undefined ->
+        {ok, Acc};
+    _ ->
+        Now = now(),
+        case timer:now_diff(Now, Before) div 1000 >= Timeout of
+        true ->
+            Acc2 = TimeoutFun(Acc),
+            put(last_changes_heartbeat, Now),
+            Acc2;
+        false ->
+            {ok, Acc}
+        end
+    end.


[33/41] make couch_httpd a full couch application

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch_httpd_proxy.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_proxy.erl b/src/couch_httpd_proxy.erl
deleted file mode 100644
index 6a4557c..0000000
--- a/src/couch_httpd_proxy.erl
+++ /dev/null
@@ -1,426 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_httpd_proxy).
-
--export([handle_proxy_req/2]).
-
--include("couch_db.hrl").
--include_lib("ibrowse/include/ibrowse.hrl").
-
--define(TIMEOUT, infinity).
--define(PKT_SIZE, 4096).
-
-
-handle_proxy_req(Req, ProxyDest) ->
-    Method = get_method(Req),
-    Url = get_url(Req, ProxyDest),
-    Version = get_version(Req),
-    Headers = get_headers(Req),
-    Body = get_body(Req),
-    Options = [
-        {http_vsn, Version},
-        {headers_as_is, true},
-        {response_format, binary},
-        {stream_to, {self(), once}}
-    ],
-    case ibrowse:send_req(Url, Headers, Method, Body, Options, ?TIMEOUT) of
-        {ibrowse_req_id, ReqId} ->
-            stream_response(Req, ProxyDest, ReqId);
-        {error, Reason} ->
-            throw({error, Reason})
-    end.
-
-
-get_method(#httpd{mochi_req=MochiReq}) ->
-    case MochiReq:get(method) of
-        Method when is_atom(Method) ->
-            list_to_atom(string:to_lower(atom_to_list(Method)));
-        Method when is_list(Method) ->
-            list_to_atom(string:to_lower(Method));
-        Method when is_binary(Method) ->
-            list_to_atom(string:to_lower(?b2l(Method)))
-    end.
-
-
-get_url(Req, ProxyDest) when is_binary(ProxyDest) ->
-    get_url(Req, ?b2l(ProxyDest));
-get_url(#httpd{mochi_req=MochiReq}=Req, ProxyDest) ->
-    BaseUrl = case mochiweb_util:partition(ProxyDest, "/") of
-        {[], "/", _} -> couch_httpd:absolute_uri(Req, ProxyDest);
-        _ -> ProxyDest
-    end,
-    ProxyPrefix = "/" ++ ?b2l(hd(Req#httpd.path_parts)),
-    RequestedPath = MochiReq:get(raw_path),
-    case mochiweb_util:partition(RequestedPath, ProxyPrefix) of
-        {[], ProxyPrefix, []} ->
-            BaseUrl;
-        {[], ProxyPrefix, [$/ | DestPath]} ->
-            remove_trailing_slash(BaseUrl) ++ "/" ++ DestPath;
-        {[], ProxyPrefix, DestPath} ->
-            remove_trailing_slash(BaseUrl) ++ "/" ++ DestPath;
-        _Else ->
-            throw({invalid_url_path, {ProxyPrefix, RequestedPath}})
-    end.
-
-get_version(#httpd{mochi_req=MochiReq}) ->
-    MochiReq:get(version).
-
-
-get_headers(#httpd{mochi_req=MochiReq}) ->
-    to_ibrowse_headers(mochiweb_headers:to_list(MochiReq:get(headers)), []).
-
-to_ibrowse_headers([], Acc) ->
-    lists:reverse(Acc);
-to_ibrowse_headers([{K, V} | Rest], Acc) when is_atom(K) ->
-    to_ibrowse_headers([{atom_to_list(K), V} | Rest], Acc);
-to_ibrowse_headers([{K, V} | Rest], Acc) when is_list(K) ->
-    case string:to_lower(K) of
-        "content-length" ->
-            to_ibrowse_headers(Rest, [{content_length, V} | Acc]);
-        % This appears to make ibrowse too smart.
-        %"transfer-encoding" ->
-        %    to_ibrowse_headers(Rest, [{transfer_encoding, V} | Acc]);
-        _ ->
-            to_ibrowse_headers(Rest, [{K, V} | Acc])
-    end.
-
-get_body(#httpd{method='GET'}) ->
-    fun() -> eof end;
-get_body(#httpd{method='HEAD'}) ->
-    fun() -> eof end;
-get_body(#httpd{method='DELETE'}) ->
-    fun() -> eof end;
-get_body(#httpd{mochi_req=MochiReq}) ->
-    case MochiReq:get(body_length) of
-        undefined ->
-            <<>>;
-        {unknown_transfer_encoding, Unknown} ->
-            exit({unknown_transfer_encoding, Unknown});
-        chunked ->
-            {fun stream_chunked_body/1, {init, MochiReq, 0}};
-        0 ->
-            <<>>;
-        Length when is_integer(Length) andalso Length > 0 ->
-            {fun stream_length_body/1, {init, MochiReq, Length}};
-        Length ->
-            exit({invalid_body_length, Length})
-    end.
-
-
-remove_trailing_slash(Url) ->
-    rem_slash(lists:reverse(Url)).
-
-rem_slash([]) ->
-    [];
-rem_slash([$\s | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash([$\t | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash([$\r | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash([$\n | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash([$/ | RevUrl]) ->
-    rem_slash(RevUrl);
-rem_slash(RevUrl) ->
-    lists:reverse(RevUrl).
-
-
-stream_chunked_body({init, MReq, 0}) ->
-    % First chunk, do expect-continue dance.
-    init_body_stream(MReq),
-    stream_chunked_body({stream, MReq, 0, [], ?PKT_SIZE});
-stream_chunked_body({stream, MReq, 0, Buf, BRem}) ->
-    % Finished a chunk, get next length. If next length
-    % is 0, its time to try and read trailers.
-    {CRem, Data} = read_chunk_length(MReq),
-    case CRem of
-        0 ->
-            BodyData = lists:reverse(Buf, Data),
-            {ok, BodyData, {trailers, MReq, [], ?PKT_SIZE}};
-        _ ->
-            stream_chunked_body(
-                {stream, MReq, CRem, [Data | Buf], BRem-size(Data)}
-            )
-    end;
-stream_chunked_body({stream, MReq, CRem, Buf, BRem}) when BRem =< 0 ->
-    % Time to empty our buffers to the upstream socket.
-    BodyData = lists:reverse(Buf),
-    {ok, BodyData, {stream, MReq, CRem, [], ?PKT_SIZE}};
-stream_chunked_body({stream, MReq, CRem, Buf, BRem}) ->
-    % Buffer some more data from the client.
-    Length = lists:min([CRem, BRem]),
-    Socket = MReq:get(socket),
-    NewState = case mochiweb_socket:recv(Socket, Length, ?TIMEOUT) of
-        {ok, Data} when size(Data) == CRem ->
-            case mochiweb_socket:recv(Socket, 2, ?TIMEOUT) of
-                {ok, <<"\r\n">>} ->
-                    {stream, MReq, 0, [<<"\r\n">>, Data | Buf], BRem-Length-2};
-                _ ->
-                    exit(normal)
-            end;
-        {ok, Data} ->
-            {stream, MReq, CRem-Length, [Data | Buf], BRem-Length};
-        _ ->
-            exit(normal)
-    end,
-    stream_chunked_body(NewState);
-stream_chunked_body({trailers, MReq, Buf, BRem}) when BRem =< 0 ->
-    % Empty our buffers and send data upstream.
-    BodyData = lists:reverse(Buf),
-    {ok, BodyData, {trailers, MReq, [], ?PKT_SIZE}};
-stream_chunked_body({trailers, MReq, Buf, BRem}) ->
-    % Read another trailer into the buffer or stop on an
-    % empty line.
-    Socket = MReq:get(socket),
-    mochiweb_socket:setopts(Socket, [{packet, line}]),
-    case mochiweb_socket:recv(Socket, 0, ?TIMEOUT) of
-        {ok, <<"\r\n">>} ->
-            mochiweb_socket:setopts(Socket, [{packet, raw}]),
-            BodyData = lists:reverse(Buf, <<"\r\n">>),
-            {ok, BodyData, eof};
-        {ok, Footer} ->
-            mochiweb_socket:setopts(Socket, [{packet, raw}]),
-            NewState = {trailers, MReq, [Footer | Buf], BRem-size(Footer)},
-            stream_chunked_body(NewState);
-        _ ->
-            exit(normal)
-    end;
-stream_chunked_body(eof) ->
-    % Tell ibrowse we're done sending data.
-    eof.
-
-
-stream_length_body({init, MochiReq, Length}) ->
-    % Do the expect-continue dance
-    init_body_stream(MochiReq),
-    stream_length_body({stream, MochiReq, Length});
-stream_length_body({stream, _MochiReq, 0}) ->
-    % Finished streaming.
-    eof;
-stream_length_body({stream, MochiReq, Length}) ->
-    BufLen = lists:min([Length, ?PKT_SIZE]),
-    case MochiReq:recv(BufLen) of
-        <<>> -> eof;
-        Bin -> {ok, Bin, {stream, MochiReq, Length-BufLen}}
-    end.
-
-
-init_body_stream(MochiReq) ->
-    Expect = case MochiReq:get_header_value("expect") of
-        undefined ->
-            undefined;
-        Value when is_list(Value) ->
-            string:to_lower(Value)
-    end,
-    case Expect of
-        "100-continue" ->
-            MochiReq:start_raw_response({100, gb_trees:empty()});
-        _Else ->
-            ok
-    end.
-
-
-read_chunk_length(MochiReq) ->
-    Socket = MochiReq:get(socket),
-    mochiweb_socket:setopts(Socket, [{packet, line}]),
-    case mochiweb_socket:recv(Socket, 0, ?TIMEOUT) of
-        {ok, Header} ->
-            mochiweb_socket:setopts(Socket, [{packet, raw}]),
-            Splitter = fun(C) ->
-                C =/= $\r andalso C =/= $\n andalso C =/= $\s
-            end,
-            {Hex, _Rest} = lists:splitwith(Splitter, ?b2l(Header)),
-            {mochihex:to_int(Hex), Header};
-        _ ->
-            exit(normal)
-    end.
-
-
-stream_response(Req, ProxyDest, ReqId) ->
-    receive
-        {ibrowse_async_headers, ReqId, "100", _} ->
-            % ibrowse doesn't handle 100 Continue responses which
-            % means we have to discard them so the proxy client
-            % doesn't get confused.
-            ibrowse:stream_next(ReqId),
-            stream_response(Req, ProxyDest, ReqId);
-        {ibrowse_async_headers, ReqId, Status, Headers} ->
-            {Source, Dest} = get_urls(Req, ProxyDest),
-            FixedHeaders = fix_headers(Source, Dest, Headers, []),
-            case body_length(FixedHeaders) of
-                chunked ->
-                    {ok, Resp} = couch_httpd:start_chunked_response(
-                        Req, list_to_integer(Status), FixedHeaders
-                    ),
-                    ibrowse:stream_next(ReqId),
-                    stream_chunked_response(Req, ReqId, Resp),
-                    {ok, Resp};
-                Length when is_integer(Length) ->
-                    {ok, Resp} = couch_httpd:start_response_length(
-                        Req, list_to_integer(Status), FixedHeaders, Length
-                    ),
-                    ibrowse:stream_next(ReqId),
-                    stream_length_response(Req, ReqId, Resp),
-                    {ok, Resp};
-                _ ->
-                    {ok, Resp} = couch_httpd:start_response(
-                        Req, list_to_integer(Status), FixedHeaders
-                    ),
-                    ibrowse:stream_next(ReqId),
-                    stream_length_response(Req, ReqId, Resp),
-                    % XXX: MochiWeb apparently doesn't look at the
-                    % response to see if it must force close the
-                    % connection. So we help it out here.
-                    erlang:put(mochiweb_request_force_close, true),
-                    {ok, Resp}
-            end
-    end.
-
-
-stream_chunked_response(Req, ReqId, Resp) ->
-    receive
-        {ibrowse_async_response, ReqId, {error, Reason}} ->
-            throw({error, Reason});
-        {ibrowse_async_response, ReqId, Chunk} ->
-            couch_httpd:send_chunk(Resp, Chunk),
-            ibrowse:stream_next(ReqId),
-            stream_chunked_response(Req, ReqId, Resp);
-        {ibrowse_async_response_end, ReqId} ->
-            couch_httpd:last_chunk(Resp)
-    end.
-
-
-stream_length_response(Req, ReqId, Resp) ->
-    receive
-        {ibrowse_async_response, ReqId, {error, Reason}} ->
-            throw({error, Reason});
-        {ibrowse_async_response, ReqId, Chunk} ->
-            couch_httpd:send(Resp, Chunk),
-            ibrowse:stream_next(ReqId),
-            stream_length_response(Req, ReqId, Resp);
-        {ibrowse_async_response_end, ReqId} ->
-            ok
-    end.
-
-
-get_urls(Req, ProxyDest) ->
-    SourceUrl = couch_httpd:absolute_uri(Req, "/" ++ hd(Req#httpd.path_parts)),
-    Source = parse_url(?b2l(iolist_to_binary(SourceUrl))),
-    case (catch parse_url(ProxyDest)) of
-        Dest when is_record(Dest, url) ->
-            {Source, Dest};
-        _ ->
-            DestUrl = couch_httpd:absolute_uri(Req, ProxyDest),
-            {Source, parse_url(DestUrl)}
-    end.
-
-
-fix_headers(_, _, [], Acc) ->
-    lists:reverse(Acc);
-fix_headers(Source, Dest, [{K, V} | Rest], Acc) ->
-    Fixed = case string:to_lower(K) of
-        "location" -> rewrite_location(Source, Dest, V);
-        "content-location" -> rewrite_location(Source, Dest, V);
-        "uri" -> rewrite_location(Source, Dest, V);
-        "destination" -> rewrite_location(Source, Dest, V);
-        "set-cookie" -> rewrite_cookie(Source, Dest, V);
-        _ -> V
-    end,
-    fix_headers(Source, Dest, Rest, [{K, Fixed} | Acc]).
-
-
-rewrite_location(Source, #url{host=Host, port=Port, protocol=Proto}, Url) ->
-    case (catch parse_url(Url)) of
-        #url{host=Host, port=Port, protocol=Proto} = Location ->
-            DestLoc = #url{
-                protocol=Source#url.protocol,
-                host=Source#url.host,
-                port=Source#url.port,
-                path=join_url_path(Source#url.path, Location#url.path)
-            },
-            url_to_url(DestLoc);
-        #url{} ->
-            Url;
-        _ ->
-            url_to_url(Source#url{path=join_url_path(Source#url.path, Url)})
-    end.
-
-
-rewrite_cookie(_Source, _Dest, Cookie) ->
-    Cookie.
-
-
-parse_url(Url) when is_binary(Url) ->
-    ibrowse_lib:parse_url(?b2l(Url));
-parse_url(Url) when is_list(Url) ->
-    ibrowse_lib:parse_url(?b2l(iolist_to_binary(Url))).
-
-
-join_url_path(Src, Dst) ->
-    Src2 = case lists:reverse(Src) of
-        "/" ++ RestSrc -> lists:reverse(RestSrc);
-        _ -> Src
-    end,
-    Dst2 = case Dst of
-        "/" ++ RestDst -> RestDst;
-        _ -> Dst
-    end,
-    Src2 ++ "/" ++ Dst2.
-
-
-url_to_url(#url{host=Host, port=Port, path=Path, protocol=Proto} = Url) ->
-    LPort = case {Proto, Port} of
-        {http, 80} -> "";
-        {https, 443} -> "";
-        _ -> ":" ++ integer_to_list(Port)
-    end,
-    LPath = case Path of
-        "/" ++ _RestPath -> Path;
-        _ -> "/" ++ Path
-    end,
-    HostPart = case Url#url.host_type of
-        ipv6_address ->
-            "[" ++ Host ++ "]";
-        _ ->
-            Host
-    end,
-    atom_to_list(Proto) ++ "://" ++ HostPart ++ LPort ++ LPath.
-
-
-body_length(Headers) ->
-    case is_chunked(Headers) of
-        true -> chunked;
-        _ -> content_length(Headers)
-    end.
-
-
-is_chunked([]) ->
-    false;
-is_chunked([{K, V} | Rest]) ->
-    case string:to_lower(K) of
-        "transfer-encoding" ->
-            string:to_lower(V) == "chunked";
-        _ ->
-            is_chunked(Rest)
-    end.
-
-content_length([]) ->
-    undefined;
-content_length([{K, V} | Rest]) ->
-    case string:to_lower(K) of
-        "content-length" ->
-            list_to_integer(V);
-        _ ->
-            content_length(Rest)
-    end.
-

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch_httpd_rewrite.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_rewrite.erl b/src/couch_httpd_rewrite.erl
deleted file mode 100644
index 1187397..0000000
--- a/src/couch_httpd_rewrite.erl
+++ /dev/null
@@ -1,484 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% bind_path is based on bind method from Webmachine
-
-
-%% @doc Module for URL rewriting by pattern matching.
-
--module(couch_httpd_rewrite).
--export([handle_rewrite_req/3]).
--include("couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, <<"*">>}).
-
-
-%% doc The http rewrite handler. All rewriting is done from
-%% /dbname/_design/ddocname/_rewrite by default.
-%%
-%% each rules should be in rewrites member of the design doc.
-%% Ex of a complete rule :
-%%
-%%  {
-%%      ....
-%%      "rewrites": [
-%%      {
-%%          "from": "",
-%%          "to": "index.html",
-%%          "method": "GET",
-%%          "query": {}
-%%      }
-%%      ]
-%%  }
-%%
-%%  from: is the path rule used to bind current uri to the rule. It
-%% use pattern matching for that.
-%%
-%%  to: rule to rewrite an url. It can contain variables depending on binding
-%% variables discovered during pattern matching and query args (url args and from
-%% the query member.)
-%%
-%%  method: method to bind the request method to the rule. by default "*"
-%%  query: query args you want to define they can contain dynamic variable
-%% by binding the key to the bindings
-%%
-%%
-%% to and from are path with  patterns. pattern can be string starting with ":" or
-%% "*". ex:
-%% /somepath/:var/*
-%%
-%% This path is converted in erlang list by splitting "/". Each var are
-%% converted in atom. "*" is converted to '*' atom. The pattern matching is done
-%% by splitting "/" in request url in a list of token. A string pattern will
-%% match equal token. The star atom ('*' in single quotes) will match any number
-%% of tokens, but may only be present as the last pathtern in a pathspec. If all
-%% tokens are matched and all pathterms are used, then the pathspec matches. It works
-%% like webmachine. Each identified token will be reused in to rule and in query
-%%
-%% The pattern matching is done by first matching the request method to a rule. by
-%% default all methods match a rule. (method is equal to "*" by default). Then
-%% It will try to match the path to one rule. If no rule match, then a 404 error
-%% is displayed.
-%%
-%% Once a rule is found we rewrite the request url using the "to" and
-%% "query" members. The identified token are matched to the rule and
-%% will replace var. if '*' is found in the rule it will contain the remaining
-%% part if it exists.
-%%
-%% Examples:
-%%
-%% Dispatch rule            URL             TO                  Tokens
-%%
-%% {"from": "/a/b",         /a/b?k=v        /some/b?k=v         var =:= b
-%% "to": "/some/"}                                              k = v
-%%
-%% {"from": "/a/b",         /a/b            /some/b?var=b       var =:= b
-%% "to": "/some/:var"}
-%%
-%% {"from": "/a",           /a              /some
-%% "to": "/some/*"}
-%%
-%% {"from": "/a/*",         /a/b/c          /some/b/c
-%% "to": "/some/*"}
-%%
-%% {"from": "/a",           /a              /some
-%% "to": "/some/*"}
-%%
-%% {"from": "/a/:foo/*",    /a/b/c          /some/b/c?foo=b     foo =:= b
-%% "to": "/some/:foo/*"}
-%%
-%% {"from": "/a/:foo",     /a/b             /some/?k=b&foo=b    foo =:= b
-%% "to": "/some",
-%%  "query": {
-%%      "k": ":foo"
-%%  }}
-%%
-%% {"from": "/a",           /a?foo=b        /some/b             foo =:= b
-%% "to": "/some/:foo",
-%%  }}
-
-
-
-handle_rewrite_req(#httpd{
-        path_parts=[DbName, <<"_design">>, DesignName, _Rewrite|PathParts],
-        method=Method,
-        mochi_req=MochiReq}=Req, _Db, DDoc) ->
-
-    % we are in a design handler
-    DesignId = <<"_design/", DesignName/binary>>,
-    Prefix = <<"/", (?l2b(couch_util:url_encode(DbName)))/binary, "/", DesignId/binary>>,
-    QueryList = lists:map(fun decode_query_value/1, couch_httpd:qs(Req)),
-
-    RewritesSoFar = erlang:get(?REWRITE_COUNT),
-    MaxRewrites = list_to_integer(couch_config:get("httpd", "rewrite_limit", "100")),
-    case RewritesSoFar >= MaxRewrites of
-        true ->
-            throw({bad_request, <<"Exceeded rewrite recursion limit">>});
-        false ->
-            erlang:put(?REWRITE_COUNT, RewritesSoFar + 1)
-    end,
-
-    #doc{body={Props}} = DDoc,
-
-    % get rules from ddoc
-    case couch_util:get_value(<<"rewrites">>, Props) of
-        undefined ->
-            couch_httpd:send_error(Req, 404, <<"rewrite_error">>,
-                <<"Invalid path.">>);
-        Bin when is_binary(Bin) ->
-            couch_httpd:send_error(Req, 400, <<"rewrite_error">>,
-                <<"Rewrite rules are a String. They must be a JSON Array.">>);
-        Rules ->
-            % create dispatch list from rules
-            DispatchList =  [make_rule(Rule) || {Rule} <- Rules],
-            Method1 = couch_util:to_binary(Method),
-
-            %% get raw path by matching url to a rule.
-            RawPath = case try_bind_path(DispatchList, Method1, 
-                    PathParts, QueryList) of
-                no_dispatch_path ->
-                    throw(not_found);
-                {NewPathParts, Bindings} ->
-                    Parts = [quote_plus(X) || X <- NewPathParts],
-
-                    % build new path, reencode query args, eventually convert
-                    % them to json
-                    Bindings1 = maybe_encode_bindings(Bindings),
-                    Path = binary_to_list(
-                        iolist_to_binary([
-                                string:join(Parts, [?SEPARATOR]),
-                                [["?", mochiweb_util:urlencode(Bindings1)] 
-                                    || Bindings1 =/= [] ]
-                            ])),
-                    
-                    % if path is relative detect it and rewrite path
-                    case mochiweb_util:safe_relative_path(Path) of
-                        undefined ->
-                            ?b2l(Prefix) ++ "/" ++ Path;
-                        P1 ->
-                            ?b2l(Prefix) ++ "/" ++ P1
-                    end
-
-                end,
-
-            % normalize final path (fix levels "." and "..")
-            RawPath1 = ?b2l(iolist_to_binary(normalize_path(RawPath))),
-
-            % In order to do OAuth correctly, we have to save the
-            % requested path. We use default so chained rewriting
-            % wont replace the original header.
-            Headers = mochiweb_headers:default("x-couchdb-requested-path",
-                                             MochiReq:get(raw_path),
-                                             MochiReq:get(headers)),
-
-            ?LOG_DEBUG("rewrite to ~p ~n", [RawPath1]),
-
-            % build a new mochiweb request
-            MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
-                                             MochiReq:get(method),
-                                             RawPath1,
-                                             MochiReq:get(version),
-                                             Headers),
-
-            % cleanup, It force mochiweb to reparse raw uri.
-            MochiReq1:cleanup(),
-
-            #httpd{
-                db_url_handlers = DbUrlHandlers,
-                design_url_handlers = DesignUrlHandlers,
-                default_fun = DefaultFun,
-                url_handlers = UrlHandlers,
-                user_ctx = UserCtx,
-               auth = Auth
-            } = Req,
-
-            erlang:put(pre_rewrite_auth, Auth),
-            erlang:put(pre_rewrite_user_ctx, UserCtx),
-            couch_httpd:handle_request_int(MochiReq1, DefaultFun,
-                    UrlHandlers, DbUrlHandlers, DesignUrlHandlers)
-        end.
-
-quote_plus({bind, X}) ->
-    mochiweb_util:quote_plus(X);
-quote_plus(X) ->
-    mochiweb_util:quote_plus(X).
-
-%% @doc Try to find a rule matching current url. If none is found
-%% 404 error not_found is raised
-try_bind_path([], _Method, _PathParts, _QueryList) ->
-    no_dispatch_path;
-try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
-    [{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch,
-    case bind_method(Method1, Method) of
-        true ->
-            case bind_path(PathParts1, PathParts, []) of
-                {ok, Remaining, Bindings} ->
-                    Bindings1 = Bindings ++ QueryList,
-                    % we parse query args from the rule and fill
-                    % it eventually with bindings vars
-                    QueryArgs1 = make_query_list(QueryArgs, Bindings1,
-                        Formats, []),
-                    % remove params in QueryLists1 that are already in
-                    % QueryArgs1
-                    Bindings2 = lists:foldl(fun({K, V}, Acc) ->
-                        K1 = to_binding(K),
-                        KV = case couch_util:get_value(K1, QueryArgs1) of
-                            undefined -> [{K1, V}];
-                            _V1 -> []
-                        end,
-                        Acc ++ KV
-                    end, [], Bindings1),
-
-                    FinalBindings = Bindings2 ++ QueryArgs1,
-                    NewPathParts = make_new_path(RedirectPath, FinalBindings,
-                                    Remaining, []),
-                    {NewPathParts, FinalBindings};
-                fail ->
-                    try_bind_path(Rest, Method, PathParts, QueryList)
-            end;
-        false ->
-            try_bind_path(Rest, Method, PathParts, QueryList)
-    end.
-
-%% rewriting dynamically the quey list given as query member in
-%% rewrites. Each value is replaced by one binding or an argument
-%% passed in url.
-make_query_list([], _Bindings, _Formats, Acc) ->
-    Acc;
-make_query_list([{Key, {Value}}|Rest], Bindings, Formats, Acc) ->
-    Value1 = {Value},
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_binary(Value) ->
-    Value1 = replace_var(Value, Bindings, Formats),
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_list(Value) ->
-    Value1 = replace_var(Value, Bindings, Formats),
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) ->
-    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value}|Acc]).
-
-replace_var(<<"*">>=Value, Bindings, Formats) ->
-    get_var(Value, Bindings, Value, Formats);
-replace_var(<<":", Var/binary>> = Value, Bindings, Formats) ->
-    get_var(Var, Bindings, Value, Formats);
-replace_var(Value, _Bindings, _Formats) when is_binary(Value) ->
-    Value;
-replace_var(Value, Bindings, Formats) when is_list(Value) ->
-    lists:reverse(lists:foldl(fun
-                (<<":", Var/binary>>=Value1, Acc) ->
-                    [get_var(Var, Bindings, Value1, Formats)|Acc];
-                (Value1, Acc) ->
-                    [Value1|Acc]
-            end, [], Value));
-replace_var(Value, _Bindings, _Formats) ->
-    Value.
-                    
-maybe_json(Key, Value) ->
-    case lists:member(Key, [<<"key">>, <<"startkey">>, <<"start_key">>,
-                <<"endkey">>, <<"end_key">>, <<"keys">>]) of
-        true ->
-            ?JSON_ENCODE(Value);
-        false ->
-            Value
-    end.
-
-get_var(VarName, Props, Default, Formats) ->
-    VarName1 = to_binding(VarName),
-    Val = couch_util:get_value(VarName1, Props, Default),
-    maybe_format(VarName, Val, Formats).
-
-maybe_format(VarName, Value, Formats) ->
-    case couch_util:get_value(VarName, Formats) of
-        undefined ->
-             Value;
-        Format ->
-            format(Format, Value)
-    end.
-
-format(<<"int">>, Value) when is_integer(Value) ->
-    Value;
-format(<<"int">>, Value) when is_binary(Value) ->
-    format(<<"int">>, ?b2l(Value));
-format(<<"int">>, Value) when is_list(Value) ->
-    case (catch list_to_integer(Value)) of
-        IntVal when is_integer(IntVal) ->
-            IntVal;
-        _ ->
-            Value
-    end;
-format(<<"bool">>, Value) when is_binary(Value) ->
-    format(<<"bool">>, ?b2l(Value));
-format(<<"bool">>, Value) when is_list(Value) ->
-    case string:to_lower(Value) of
-        "true" -> true;
-        "false" -> false;
-        _ -> Value
-    end;
-format(_Format, Value) ->
-   Value. 
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-make_new_path([], _Bindings, _Remaining, Acc) ->
-    lists:reverse(Acc);
-make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) ->
-    P2 = case couch_util:get_value({bind, P}, Bindings) of
-        undefined -> << "undefined">>;
-        P1 -> 
-            iolist_to_binary(P1)
-    end,
-    make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
-make_new_path([P|Rest], Bindings, Remaining, Acc) ->
-    make_new_path(Rest, Bindings, Remaining, [P|Acc]).
-
-
-%% @doc If method of the query fith the rule method. If the
-%% method rule is '*', which is the default, all
-%% request method will bind. It allows us to make rules
-%% depending on HTTP method.
-bind_method(?MATCH_ALL, _Method ) ->
-    true;
-bind_method({bind, Method}, Method) ->
-    true;
-bind_method(_, _) ->
-    false.
-
-
-%% @doc bind path. Using the rule from we try to bind variables given
-%% to the current url by pattern matching
-bind_path([], [], Bindings) ->
-    {ok, [], Bindings};
-bind_path([?MATCH_ALL], [Match|_RestMatch]=Rest, Bindings) ->
-    {ok, Rest, [{?MATCH_ALL, Match}|Bindings]};
-bind_path(_, [], _) ->
-    fail;
-bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) ->
-    bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]);
-bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
-    bind_path(RestToken, RestMatch, Bindings);
-bind_path(_, _, _) ->
-    fail.
-
-
-%% normalize path.
-normalize_path(Path)  ->
-    "/" ++ string:join(normalize_path1(string:tokens(Path,
-                "/"), []), [?SEPARATOR]).
-
-
-normalize_path1([], Acc) ->
-    lists:reverse(Acc);
-normalize_path1([".."|Rest], Acc) ->
-    Acc1 = case Acc of
-        [] -> [".."|Acc];
-        [T|_] when T =:= ".." -> [".."|Acc];
-        [_|R] -> R
-    end,
-    normalize_path1(Rest, Acc1);
-normalize_path1(["."|Rest], Acc) ->
-    normalize_path1(Rest, Acc);
-normalize_path1([Path|Rest], Acc) ->
-    normalize_path1(Rest, [Path|Acc]).
-
-
-%% @doc transform json rule in erlang for pattern matching
-make_rule(Rule) ->
-    Method = case couch_util:get_value(<<"method">>, Rule) of
-        undefined -> ?MATCH_ALL;
-        M -> to_binding(M)
-    end,
-    QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
-        undefined -> [];
-        {Args} -> Args
-        end,
-    FromParts  = case couch_util:get_value(<<"from">>, Rule) of
-        undefined -> [?MATCH_ALL];
-        From ->
-            parse_path(From)
-        end,
-    ToParts  = case couch_util:get_value(<<"to">>, Rule) of
-        undefined ->
-            throw({error, invalid_rewrite_target});
-        To ->
-            parse_path(To)
-        end,
-    Formats = case couch_util:get_value(<<"formats">>, Rule) of
-        undefined -> [];
-        {Fmts} -> Fmts
-    end,
-    [{FromParts, Method}, ToParts, QueryArgs, Formats].
-
-parse_path(Path) ->
-    {ok, SlashRE} = re:compile(<<"\\/">>),
-    path_to_list(re:split(Path, SlashRE), [], 0).
-
-%% @doc convert a path rule (from or to) to an erlang list
-%% * and path variable starting by ":" are converted
-%% in erlang atom.
-path_to_list([], Acc, _DotDotCount) ->
-    lists:reverse(Acc);
-path_to_list([<<>>|R], Acc, DotDotCount) ->
-    path_to_list(R, Acc, DotDotCount);
-path_to_list([<<"*">>|R], Acc, DotDotCount) ->
-    path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
-path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
-    case couch_config:get("httpd", "secure_rewrites", "true") of
-    "false" ->
-        path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-    _Else ->
-        ?LOG_INFO("insecure_rewrite_rule ~p blocked", [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
-        throw({insecure_rewrite_rule, "too many ../.. segments"})
-    end;
-path_to_list([<<"..">>|R], Acc, DotDotCount) ->
-    path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-path_to_list([P|R], Acc, DotDotCount) ->
-    P1 = case P of
-        <<":", Var/binary>> ->
-            to_binding(Var);
-        _ -> P
-    end,
-    path_to_list(R, [P1|Acc], DotDotCount).
-
-maybe_encode_bindings([]) ->
-    [];
-maybe_encode_bindings(Props) -> 
-    lists:foldl(fun 
-            ({{bind, <<"*">>}, _V}, Acc) ->
-                Acc;
-            ({{bind, K}, V}, Acc) ->
-                V1 = iolist_to_binary(maybe_json(K, V)),
-                [{K, V1}|Acc]
-        end, [], Props).
-                
-decode_query_value({K,V}) ->
-    case lists:member(K, ["key", "startkey", "start_key",
-                "endkey", "end_key", "keys"]) of
-        true ->
-            {to_binding(K), ?JSON_DECODE(V)};
-        false ->
-            {to_binding(K), ?l2b(V)}
-    end.
-
-to_binding({bind, V}) ->
-    {bind, V};
-to_binding(V) when is_list(V) ->
-    to_binding(?l2b(V));
-to_binding(V) ->
-    {bind, V}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch_httpd_stats_handlers.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_stats_handlers.erl b/src/couch_httpd_stats_handlers.erl
deleted file mode 100644
index d6973f6..0000000
--- a/src/couch_httpd_stats_handlers.erl
+++ /dev/null
@@ -1,56 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_stats_handlers).
--include("couch_db.hrl").
-
--export([handle_stats_req/1]).
--import(couch_httpd, [
-    send_json/2, send_json/3, send_json/4, send_method_not_allowed/2,
-    start_json_response/2, send_chunk/2, end_json_response/1,
-    start_chunked_response/3, send_error/4
-]).
-
-handle_stats_req(#httpd{method='GET', path_parts=[_]}=Req) ->
-    flush(Req),
-    send_json(Req, couch_stats_aggregator:all(range(Req)));
-
-handle_stats_req(#httpd{method='GET', path_parts=[_, _Mod]}) ->
-    throw({bad_request, <<"Stat names must have exactly two parts.">>});
-
-handle_stats_req(#httpd{method='GET', path_parts=[_, Mod, Key]}=Req) ->
-    flush(Req),
-    Stats = couch_stats_aggregator:get_json({list_to_atom(binary_to_list(Mod)),
-        list_to_atom(binary_to_list(Key))}, range(Req)),
-    send_json(Req, {[{Mod, {[{Key, Stats}]}}]});
-
-handle_stats_req(#httpd{method='GET', path_parts=[_, _Mod, _Key | _Extra]}) ->
-    throw({bad_request, <<"Stat names must have exactly two parts.">>});
-
-handle_stats_req(Req) ->
-    send_method_not_allowed(Req, "GET").
-
-range(Req) ->
-    case couch_util:get_value("range", couch_httpd:qs(Req)) of
-        undefined ->
-            0;
-        Value ->
-            list_to_integer(Value)
-    end.
-
-flush(Req) ->
-    case couch_util:get_value("flush", couch_httpd:qs(Req)) of
-        "true" ->
-            couch_stats_aggregator:collect_sample();
-        _Else ->
-            ok
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch_httpd_vhost.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_vhost.erl b/src/couch_httpd_vhost.erl
deleted file mode 100644
index 4c3ebfe..0000000
--- a/src/couch_httpd_vhost.erl
+++ /dev/null
@@ -1,383 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_vhost).
--behaviour(gen_server).
-
--export([start_link/0, config_change/2, reload/0, get_state/0, dispatch_host/1]).
--export([urlsplit_netloc/2, redirect_to_vhost/2]).
--export([host/1, split_host_port/1]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
-
--include("couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, {bind, '*'}).
-
--record(vhosts_state, {
-        vhosts,
-        vhost_globals,
-        vhosts_fun}).
-
-%% doc the vhost manager.
-%% This gen_server keep state of vhosts added to the ini and try to
-%% match the Host header (or forwarded) against rules built against
-%% vhost list.
-%%
-%% Declaration of vhosts take place in the configuration file :
-%%
-%% [vhosts]
-%% example.com = /example
-%% *.example.com = /example
-%%
-%% The first line will rewrite the rquest to display the content of the
-%% example database. This rule works only if the Host header is
-%% 'example.com' and won't work for CNAMEs. Second rule on the other hand
-%% match all CNAMES to example db. So www.example.com or db.example.com
-%% will work.
-%%
-%% The wildcard ('*') should always be the last in the cnames:
-%%
-%%      "*.db.example.com = /"  will match all cname on top of db
-%% examples to the root of the machine.
-%%
-%%
-%% Rewriting Hosts to path
-%% -----------------------
-%%
-%% Like in the _rewrite handler you could match some variable and use
-%them to create the target path. Some examples:
-%%
-%%    [vhosts]
-%%    *.example.com = /*
-%%    :dbname.example.com = /:dbname
-%%    :ddocname.:dbname.example.com = /:dbname/_design/:ddocname/_rewrite
-%%
-%% First rule pass wildcard as dbname, second do the same but use a
-%% variable name and the third one allows you to use any app with
-%% @ddocname in any db with @dbname .
-%%
-%% You could also change the default function to handle request by
-%% changing the setting `redirect_vhost_handler` in `httpd` section of
-%% the Ini:
-%%
-%%    [httpd]
-%%    redirect_vhost_handler = {Module, Fun}
-%%
-%% The function take 2 args : the mochiweb request object and the target
-%%% path.
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-%% @doc reload vhosts rules
-reload() ->
-    gen_server:call(?MODULE, reload).
-
-get_state() ->
-    gen_server:call(?MODULE, get_state).
-
-%% @doc Try to find a rule matching current Host heade. some rule is
-%% found it rewrite the Mochiweb Request else it return current Request.
-dispatch_host(MochiReq) ->
-    #vhosts_state{
-        vhost_globals = VHostGlobals,
-        vhosts = VHosts,
-        vhosts_fun=Fun} = get_state(),
-
-    {"/" ++ VPath, Query, Fragment} = mochiweb_util:urlsplit_path(MochiReq:get(raw_path)),
-    VPathParts =  string:tokens(VPath, "/"),
-
-    VHost = host(MochiReq),
-    {VHostParts, VhostPort} = split_host_port(VHost),
-    FinalMochiReq = case try_bind_vhost(VHosts, lists:reverse(VHostParts),
-            VhostPort, VPathParts) of
-        no_vhost_matched -> MochiReq;
-        {VhostTarget, NewPath} ->
-            case vhost_global(VHostGlobals, MochiReq) of
-                true ->
-                    MochiReq;
-                _Else ->
-                    NewPath1 = mochiweb_util:urlunsplit_path({NewPath, Query,
-                                          Fragment}),
-                    MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
-                                      MochiReq:get(method),
-                                      NewPath1,
-                                      MochiReq:get(version),
-                                      MochiReq:get(headers)),
-                    Fun(MochiReq1, VhostTarget)
-            end
-    end,
-    FinalMochiReq.
-
-append_path("/"=_Target, "/"=_Path) ->
-    "/";
-append_path(Target, Path) ->
-    Target ++ Path.
-
-% default redirect vhost handler
-redirect_to_vhost(MochiReq, VhostTarget) ->
-    Path = MochiReq:get(raw_path),
-    Target = append_path(VhostTarget, Path),
-
-    ?LOG_DEBUG("Vhost Target: '~p'~n", [Target]),
-
-    Headers = mochiweb_headers:enter("x-couchdb-vhost-path", Path,
-        MochiReq:get(headers)),
-
-    % build a new mochiweb request
-    MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
-                                      MochiReq:get(method),
-                                      Target,
-                                      MochiReq:get(version),
-                                      Headers),
-    % cleanup, It force mochiweb to reparse raw uri.
-    MochiReq1:cleanup(),
-    MochiReq1.
-
-%% if so, then it will not be rewritten, but will run as a normal couchdb request.
-%* normally you'd use this for _uuids _utils and a few of the others you want to
-%% keep available on vhosts. You can also use it to make databases 'global'.
-vhost_global( VhostGlobals, MochiReq) ->
-    RawUri = MochiReq:get(raw_path),
-    {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
-
-    Front = case couch_httpd:partition(Path) of
-    {"", "", ""} ->
-        "/"; % Special case the root url handler
-    {FirstPart, _, _} ->
-        FirstPart
-    end,
-    [true] == [true||V <- VhostGlobals, V == Front].
-
-%% bind host
-%% first it try to bind the port then the hostname.
-try_bind_vhost([], _HostParts, _Port, _PathParts) ->
-    no_vhost_matched;
-try_bind_vhost([VhostSpec|Rest], HostParts, Port, PathParts) ->
-    {{VHostParts, VPort, VPath}, Path} = VhostSpec,
-    case bind_port(VPort, Port) of
-        ok ->
-            case bind_vhost(lists:reverse(VHostParts), HostParts, []) of
-                {ok, Bindings, Remainings} ->
-                    case bind_path(VPath, PathParts) of
-                        {ok, PathParts1} ->
-                            Path1 = make_target(Path, Bindings, Remainings, []),
-                            {make_path(Path1), make_path(PathParts1)};
-                        fail ->
-                            try_bind_vhost(Rest, HostParts, Port,
-                                PathParts)
-                    end;
-                fail -> try_bind_vhost(Rest, HostParts, Port, PathParts)
-            end;
-        fail ->  try_bind_vhost(Rest, HostParts, Port, PathParts)
-    end.
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-%% TODO: merge code with rewrite. But we need to make sure we are
-%% in string here.
-make_target([], _Bindings, _Remaining, Acc) ->
-    lists:reverse(Acc);
-make_target([?MATCH_ALL], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_target([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
-    Acc1 = lists:reverse(Acc) ++ Remaining,
-    Acc1;
-make_target([{bind, P}|Rest], Bindings, Remaining, Acc) ->
-    P2 = case couch_util:get_value({bind, P}, Bindings) of
-        undefined ->  "undefined";
-        P1 -> P1
-    end,
-    make_target(Rest, Bindings, Remaining, [P2|Acc]);
-make_target([P|Rest], Bindings, Remaining, Acc) ->
-    make_target(Rest, Bindings, Remaining, [P|Acc]).
-
-%% bind port
-bind_port(Port, Port) -> ok;
-bind_port('*', _) -> ok;
-bind_port(_,_) -> fail.
-
-%% bind bhost
-bind_vhost([],[], Bindings) -> {ok, Bindings, []};
-bind_vhost([?MATCH_ALL], [], _Bindings) -> fail;
-bind_vhost([?MATCH_ALL], Rest, Bindings) -> {ok, Bindings, Rest};
-bind_vhost([], _HostParts, _Bindings) -> fail;
-bind_vhost([{bind, Token}|Rest], [Match|RestHost], Bindings) ->
-    bind_vhost(Rest, RestHost, [{{bind, Token}, Match}|Bindings]);
-bind_vhost([Cname|Rest], [Cname|RestHost], Bindings) ->
-    bind_vhost(Rest, RestHost, Bindings);
-bind_vhost(_, _, _) -> fail.
-
-%% bind path
-bind_path([], PathParts) ->
-    {ok, PathParts};
-bind_path(_VPathParts, []) ->
-    fail;
-bind_path([Path|VRest],[Path|Rest]) ->
-   bind_path(VRest, Rest);
-bind_path(_, _) ->
-    fail.
-
-% utilities
-
-
-%% create vhost list from ini
-
-host(MochiReq) ->
-    XHost = couch_config:get("httpd", "x_forwarded_host",
-                             "X-Forwarded-Host"),
-    case MochiReq:get_header_value(XHost) of
-        undefined ->
-            case MochiReq:get_header_value("Host") of
-                undefined -> [];
-                Value1 -> Value1
-            end;
-        Value -> Value
-    end.
-
-make_vhosts() ->
-    Vhosts = lists:foldl(fun
-                ({_, ""}, Acc) ->
-                    Acc;
-                ({Vhost, Path}, Acc) ->
-                    [{parse_vhost(Vhost), split_path(Path)}|Acc]
-            end, [], couch_config:get("vhosts")),
-
-    lists:reverse(lists:usort(Vhosts)).
-
-
-parse_vhost(Vhost) ->
-    case urlsplit_netloc(Vhost, []) of
-        {[], Path} ->
-            {make_spec("*", []), '*', Path};
-        {HostPort, []} ->
-            {H, P} = split_host_port(HostPort),
-            H1 = make_spec(H, []),
-            {H1, P, []};
-        {HostPort, Path} ->
-            {H, P} = split_host_port(HostPort),
-            H1 = make_spec(H, []),
-            {H1, P, string:tokens(Path, "/")}
-    end.
-
-
-split_host_port(HostAsString) ->
-    case string:rchr(HostAsString, $:) of
-        0 ->
-            {split_host(HostAsString), '*'};
-        N ->
-            HostPart = string:substr(HostAsString, 1, N-1),
-            case (catch erlang:list_to_integer(string:substr(HostAsString,
-                            N+1, length(HostAsString)))) of
-                {'EXIT', _} ->
-                    {split_host(HostAsString), '*'};
-                Port ->
-                    {split_host(HostPart), Port}
-            end
-    end.
-
-split_host(HostAsString) ->
-    string:tokens(HostAsString, "\.").
-
-split_path(Path) ->
-    make_spec(string:tokens(Path, "/"), []).
-
-
-make_spec([], Acc) ->
-    lists:reverse(Acc);
-make_spec([""|R], Acc) ->
-    make_spec(R, Acc);
-make_spec(["*"|R], Acc) ->
-    make_spec(R, [?MATCH_ALL|Acc]);
-make_spec([P|R], Acc) ->
-    P1 = parse_var(P),
-    make_spec(R, [P1|Acc]).
-
-
-parse_var(P) ->
-    case P of
-        ":" ++ Var ->
-            {bind, Var};
-        _ -> P
-    end.
-
-
-% mochiweb doesn't export it.
-urlsplit_netloc("", Acc) ->
-    {lists:reverse(Acc), ""};
-urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
-    {lists:reverse(Acc), Rest};
-urlsplit_netloc([C | Rest], Acc) ->
-    urlsplit_netloc(Rest, [C | Acc]).
-
-make_path(Parts) ->
-     "/" ++ string:join(Parts,[?SEPARATOR]).
-
-init(_) ->
-    ok = couch_config:register(fun ?MODULE:config_change/2),
-
-    %% load configuration
-    {VHostGlobals, VHosts, Fun} = load_conf(),
-    State = #vhosts_state{
-        vhost_globals=VHostGlobals,
-        vhosts=VHosts,
-        vhosts_fun=Fun},
-    {ok, State}.
-
-handle_call(reload, _From, _State) ->
-    {VHostGlobals, VHosts, Fun} = load_conf(),
-    {reply, ok, #vhosts_state{
-            vhost_globals=VHostGlobals,
-            vhosts=VHosts,
-            vhosts_fun=Fun}};
-handle_call(get_state, _From, State) ->
-    {reply, State, State};
-handle_call(_Msg, _From, State) ->
-    {noreply, State}.
-
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-config_change("httpd", "vhost_global_handlers") ->
-    ?MODULE:reload();
-config_change("httpd", "redirect_vhost_handler") ->
-    ?MODULE:reload();
-config_change("vhosts", _) ->
-    ?MODULE:reload().
-
-load_conf() ->
-    %% get vhost globals
-    VHostGlobals = re:split(couch_config:get("httpd",
-            "vhost_global_handlers",""), "\\s*,\\s*",[{return, list}]),
-
-    %% build vhosts matching rules
-    VHosts = make_vhosts(),
-
-    %% build vhosts handler fun
-    DefaultVHostFun = "{couch_httpd_vhost, redirect_to_vhost}",
-    Fun = couch_httpd:make_arity_2_fun(couch_config:get("httpd",
-            "redirect_vhost_handler", DefaultVHostFun)),
-
-    {VHostGlobals, VHosts, Fun}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch_js_functions.hrl
----------------------------------------------------------------------
diff --git a/src/couch_js_functions.hrl b/src/couch_js_functions.hrl
deleted file mode 100644
index a48feae..0000000
--- a/src/couch_js_functions.hrl
+++ /dev/null
@@ -1,170 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License.  You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(AUTH_DB_DOC_VALIDATE_FUNCTION, <<"
-    function(newDoc, oldDoc, userCtx, secObj) {
-        if (newDoc._deleted === true) {
-            // allow deletes by admins and matching users
-            // without checking the other fields
-            if ((userCtx.roles.indexOf('_admin') !== -1) ||
-                (userCtx.name == oldDoc.name)) {
-                return;
-            } else {
-                throw({forbidden: 'Only admins may delete other user docs.'});
-            }
-        }
-
-        if ((oldDoc && oldDoc.type !== 'user') || newDoc.type !== 'user') {
-            throw({forbidden : 'doc.type must be user'});
-        } // we only allow user docs for now
-
-        if (!newDoc.name) {
-            throw({forbidden: 'doc.name is required'});
-        }
-
-        if (!newDoc.roles) {
-            throw({forbidden: 'doc.roles must exist'});
-        }
-
-        if (!isArray(newDoc.roles)) {
-            throw({forbidden: 'doc.roles must be an array'});
-        }
-
-        for (var idx = 0; idx < newDoc.roles.length; idx++) {
-            if (typeof newDoc.roles[idx] !== 'string') {
-                throw({forbidden: 'doc.roles can only contain strings'});
-            }
-        }
-
-        if (newDoc._id !== ('org.couchdb.user:' + newDoc.name)) {
-            throw({
-                forbidden: 'Doc ID must be of the form org.couchdb.user:name'
-            });
-        }
-
-        if (oldDoc) { // validate all updates
-            if (oldDoc.name !== newDoc.name) {
-                throw({forbidden: 'Usernames can not be changed.'});
-            }
-        }
-
-        if (newDoc.password_sha && !newDoc.salt) {
-            throw({
-                forbidden: 'Users with password_sha must have a salt.' +
-                    'See /_utils/script/couch.js for example code.'
-            });
-        }
-
-        if (newDoc.password_scheme === \"pbkdf2\") {
-            if (typeof(newDoc.iterations) !== \"number\") {
-               throw({forbidden: \"iterations must be a number.\"});
-            }
-            if (typeof(newDoc.derived_key) !== \"string\") {
-               throw({forbidden: \"derived_key must be a string.\"});
-            }
-        }
-
-        var is_server_or_database_admin = function(userCtx, secObj) {
-            // see if the user is a server admin
-            if(userCtx.roles.indexOf('_admin') !== -1) {
-                return true; // a server admin
-            }
-
-            // see if the user a database admin specified by name
-            if(secObj && secObj.admins && secObj.admins.names) {
-                if(secObj.admins.names.indexOf(userCtx.name) !== -1) {
-                    return true; // database admin
-                }
-            }
-
-            // see if the user a database admin specified by role
-            if(secObj && secObj.admins && secObj.admins.roles) {
-                var db_roles = secObj.admins.roles;
-                for(var idx = 0; idx < userCtx.roles.length; idx++) {
-                    var user_role = userCtx.roles[idx];
-                    if(db_roles.indexOf(user_role) !== -1) {
-                        return true; // role matches!
-                    }
-                }
-            }
-
-            return false; // default to no admin
-        }
-
-        if (!is_server_or_database_admin(userCtx, secObj)) {
-            if (oldDoc) { // validate non-admin updates
-                if (userCtx.name !== newDoc.name) {
-                    throw({
-                        forbidden: 'You may only update your own user document.'
-                    });
-                }
-                // validate role updates
-                var oldRoles = oldDoc.roles.sort();
-                var newRoles = newDoc.roles.sort();
-
-                if (oldRoles.length !== newRoles.length) {
-                    throw({forbidden: 'Only _admin may edit roles'});
-                }
-
-                for (var i = 0; i < oldRoles.length; i++) {
-                    if (oldRoles[i] !== newRoles[i]) {
-                        throw({forbidden: 'Only _admin may edit roles'});
-                    }
-                }
-            } else if (newDoc.roles.length > 0) {
-                throw({forbidden: 'Only _admin may set roles'});
-            }
-        }
-
-        // no system roles in users db
-        for (var i = 0; i < newDoc.roles.length; i++) {
-            if (newDoc.roles[i][0] === '_') {
-                throw({
-                    forbidden:
-                    'No system roles (starting with underscore) in users db.'
-                });
-            }
-        }
-
-        // no system names as names
-        if (newDoc.name[0] === '_') {
-            throw({forbidden: 'Username may not start with underscore.'});
-        }
-
-        var badUserNameChars = [':'];
-
-        for (var i = 0; i < badUserNameChars.length; i++) {
-            if (newDoc.name.indexOf(badUserNameChars[i]) >= 0) {
-                throw({forbidden: 'Character `' + badUserNameChars[i] +
-                        '` is not allowed in usernames.'});
-            }
-        }
-    }
-">>).
-
-
--define(OAUTH_MAP_FUN, <<"
-    function(doc) {
-        if (doc.type === 'user' && doc.oauth && doc.oauth.consumer_keys) {
-            for (var consumer_key in doc.oauth.consumer_keys) {
-                for (var token in doc.oauth.tokens) {
-                    var obj = {
-                        'consumer_secret': doc.oauth.consumer_keys[consumer_key],
-                        'token_secret': doc.oauth.tokens[token],
-                        'username': doc.name
-                    };
-                    emit([consumer_key, token], obj);
-                }
-            }
-        }
-    }
-">>).

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch_server_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch_server_sup.erl b/src/couch_server_sup.erl
index 379b384..7f37677 100644
--- a/src/couch_server_sup.erl
+++ b/src/couch_server_sup.erl
@@ -104,32 +104,6 @@ start_server(IniFiles) ->
 
     unlink(ConfigPid),
 
-    Ip = couch_config:get("httpd", "bind_address"),
-    io:format("Apache CouchDB has started. Time to relax.~n"),
-    Uris = [get_uri(Name, Ip) || Name <- [couch_httpd, https]],
-    [begin
-        case Uri of
-            undefined -> ok;
-            Uri -> ?LOG_INFO("Apache CouchDB has started on ~s", [Uri])
-        end
-    end
-    || Uri <- Uris],
-    case couch_config:get("couchdb", "uri_file", null) of
-    null -> ok;
-    UriFile ->
-        Lines = [begin case Uri of
-            undefined -> [];
-            Uri -> io_lib:format("~s~n", [Uri])
-            end end || Uri <- Uris],
-        case file:write_file(UriFile, Lines) of
-        ok -> ok;
-        {error, Reason2} = Error ->
-            ?LOG_ERROR("Failed to write to URI file ~s: ~s",
-                [UriFile, file:format_error(Reason2)]),
-            throw(Error)
-        end
-    end,
-
     {ok, Pid}.
 
 stop() ->
@@ -137,28 +111,7 @@ stop() ->
 
 config_change("daemons", _) ->
     supervisor:terminate_child(couch_server_sup, couch_secondary_services),
-    supervisor:restart_child(couch_server_sup, couch_secondary_services);
-config_change("couchdb", "util_driver_dir") ->
-    init:restart().
+    supervisor:restart_child(couch_server_sup, couch_secondary_services).
 
 init(ChildSpecs) ->
     {ok, ChildSpecs}.
-
-get_uri(Name, Ip) ->
-    case get_port(Name) of
-        undefined ->
-            undefined;
-        Port ->
-            io_lib:format("~s://~s:~w/", [get_scheme(Name), Ip, Port])
-    end.
-
-get_scheme(couch_httpd) -> "http";
-get_scheme(https) -> "https".
-
-get_port(Name) ->
-    try
-        mochiweb_socket_server:get(Name, port)
-    catch
-        exit:{noproc, _}->
-            undefined
-    end.


[35/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
make couch_httpd a full couch application

With this change, the HTTP API is now handled by its own erlang
application and supervision. It also improved the way you can reload the
configuration and the modules. Now upgrading a listener or its
configuration is first removing it from the supervsion then start the
new process with the new configuration. This behaviour is similar to the
one you have in nginx.


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/a278e0db
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/a278e0db
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/a278e0db

Branch: refs/heads/import-rcouch
Commit: a278e0db5c761878d54f5e1433dfc3aa9ee62d72
Parents: fd93bf9
Author: benoitc <be...@apache.org>
Authored: Sat Jan 11 11:20:27 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:21 2014 -0600

----------------------------------------------------------------------
 include/couch_js_functions.hrl     |  170 +++++
 src/couch.app.src.script           |    5 +-
 src/couch_app.erl                  |    2 -
 src/couch_httpd.erl                | 1114 ----------------------------
 src/couch_httpd_auth.erl           |  380 ----------
 src/couch_httpd_cors.erl           |  351 ---------
 src/couch_httpd_db.erl             | 1226 -------------------------------
 src/couch_httpd_external.erl       |  177 -----
 src/couch_httpd_misc_handlers.erl  |  318 --------
 src/couch_httpd_oauth.erl          |  387 ----------
 src/couch_httpd_proxy.erl          |  426 -----------
 src/couch_httpd_rewrite.erl        |  484 ------------
 src/couch_httpd_stats_handlers.erl |   56 --
 src/couch_httpd_vhost.erl          |  383 ----------
 src/couch_js_functions.hrl         |  170 -----
 src/couch_server_sup.erl           |   49 +-
 16 files changed, 173 insertions(+), 5525 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/include/couch_js_functions.hrl
----------------------------------------------------------------------
diff --git a/include/couch_js_functions.hrl b/include/couch_js_functions.hrl
new file mode 100644
index 0000000..a48feae
--- /dev/null
+++ b/include/couch_js_functions.hrl
@@ -0,0 +1,170 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License.  You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-define(AUTH_DB_DOC_VALIDATE_FUNCTION, <<"
+    function(newDoc, oldDoc, userCtx, secObj) {
+        if (newDoc._deleted === true) {
+            // allow deletes by admins and matching users
+            // without checking the other fields
+            if ((userCtx.roles.indexOf('_admin') !== -1) ||
+                (userCtx.name == oldDoc.name)) {
+                return;
+            } else {
+                throw({forbidden: 'Only admins may delete other user docs.'});
+            }
+        }
+
+        if ((oldDoc && oldDoc.type !== 'user') || newDoc.type !== 'user') {
+            throw({forbidden : 'doc.type must be user'});
+        } // we only allow user docs for now
+
+        if (!newDoc.name) {
+            throw({forbidden: 'doc.name is required'});
+        }
+
+        if (!newDoc.roles) {
+            throw({forbidden: 'doc.roles must exist'});
+        }
+
+        if (!isArray(newDoc.roles)) {
+            throw({forbidden: 'doc.roles must be an array'});
+        }
+
+        for (var idx = 0; idx < newDoc.roles.length; idx++) {
+            if (typeof newDoc.roles[idx] !== 'string') {
+                throw({forbidden: 'doc.roles can only contain strings'});
+            }
+        }
+
+        if (newDoc._id !== ('org.couchdb.user:' + newDoc.name)) {
+            throw({
+                forbidden: 'Doc ID must be of the form org.couchdb.user:name'
+            });
+        }
+
+        if (oldDoc) { // validate all updates
+            if (oldDoc.name !== newDoc.name) {
+                throw({forbidden: 'Usernames can not be changed.'});
+            }
+        }
+
+        if (newDoc.password_sha && !newDoc.salt) {
+            throw({
+                forbidden: 'Users with password_sha must have a salt.' +
+                    'See /_utils/script/couch.js for example code.'
+            });
+        }
+
+        if (newDoc.password_scheme === \"pbkdf2\") {
+            if (typeof(newDoc.iterations) !== \"number\") {
+               throw({forbidden: \"iterations must be a number.\"});
+            }
+            if (typeof(newDoc.derived_key) !== \"string\") {
+               throw({forbidden: \"derived_key must be a string.\"});
+            }
+        }
+
+        var is_server_or_database_admin = function(userCtx, secObj) {
+            // see if the user is a server admin
+            if(userCtx.roles.indexOf('_admin') !== -1) {
+                return true; // a server admin
+            }
+
+            // see if the user a database admin specified by name
+            if(secObj && secObj.admins && secObj.admins.names) {
+                if(secObj.admins.names.indexOf(userCtx.name) !== -1) {
+                    return true; // database admin
+                }
+            }
+
+            // see if the user a database admin specified by role
+            if(secObj && secObj.admins && secObj.admins.roles) {
+                var db_roles = secObj.admins.roles;
+                for(var idx = 0; idx < userCtx.roles.length; idx++) {
+                    var user_role = userCtx.roles[idx];
+                    if(db_roles.indexOf(user_role) !== -1) {
+                        return true; // role matches!
+                    }
+                }
+            }
+
+            return false; // default to no admin
+        }
+
+        if (!is_server_or_database_admin(userCtx, secObj)) {
+            if (oldDoc) { // validate non-admin updates
+                if (userCtx.name !== newDoc.name) {
+                    throw({
+                        forbidden: 'You may only update your own user document.'
+                    });
+                }
+                // validate role updates
+                var oldRoles = oldDoc.roles.sort();
+                var newRoles = newDoc.roles.sort();
+
+                if (oldRoles.length !== newRoles.length) {
+                    throw({forbidden: 'Only _admin may edit roles'});
+                }
+
+                for (var i = 0; i < oldRoles.length; i++) {
+                    if (oldRoles[i] !== newRoles[i]) {
+                        throw({forbidden: 'Only _admin may edit roles'});
+                    }
+                }
+            } else if (newDoc.roles.length > 0) {
+                throw({forbidden: 'Only _admin may set roles'});
+            }
+        }
+
+        // no system roles in users db
+        for (var i = 0; i < newDoc.roles.length; i++) {
+            if (newDoc.roles[i][0] === '_') {
+                throw({
+                    forbidden:
+                    'No system roles (starting with underscore) in users db.'
+                });
+            }
+        }
+
+        // no system names as names
+        if (newDoc.name[0] === '_') {
+            throw({forbidden: 'Username may not start with underscore.'});
+        }
+
+        var badUserNameChars = [':'];
+
+        for (var i = 0; i < badUserNameChars.length; i++) {
+            if (newDoc.name.indexOf(badUserNameChars[i]) >= 0) {
+                throw({forbidden: 'Character `' + badUserNameChars[i] +
+                        '` is not allowed in usernames.'});
+            }
+        }
+    }
+">>).
+
+
+-define(OAUTH_MAP_FUN, <<"
+    function(doc) {
+        if (doc.type === 'user' && doc.oauth && doc.oauth.consumer_keys) {
+            for (var consumer_key in doc.oauth.consumer_keys) {
+                for (var token in doc.oauth.tokens) {
+                    var obj = {
+                        'consumer_secret': doc.oauth.consumer_keys[consumer_key],
+                        'token_secret': doc.oauth.tokens[token],
+                        'username': doc.name
+                    };
+                    emit([consumer_key, token], obj);
+                }
+            }
+        }
+    }
+">>).

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch.app.src.script
----------------------------------------------------------------------
diff --git a/src/couch.app.src.script b/src/couch.app.src.script
index c406e02..d8962fa 100644
--- a/src/couch.app.src.script
+++ b/src/couch.app.src.script
@@ -49,7 +49,6 @@ end,
             couch_db_update_notifier_sup,
             couch_external_manager,
             couch_index_sup,
-            couch_httpd,
             couch_log,
             couch_primary_services,
             couch_query_servers,
@@ -62,7 +61,7 @@ end,
         ]},
         {mod, {couch_app, []}},
         {env, [{couch_rel, RelVsn}]},
-        {applications, [kernel, stdlib, crypto, sasl, asn1, public_key, ssl,
-                        inets, ibrowse, os_mon]}
+        {applications, [kernel, stdlib, crypto, sasl, asn1, public_key,
+                        ssl, os_mon, inets]}
     ]}
 ].

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch_app.erl
----------------------------------------------------------------------
diff --git a/src/couch_app.erl b/src/couch_app.erl
index 2e1e5bd..414a5c9 100644
--- a/src/couch_app.erl
+++ b/src/couch_app.erl
@@ -33,6 +33,4 @@ get_ini_files() ->
     Defaults = lists:map(fun(FName) ->
                     filename:join(DefaultConfDir, FName)
             end, ?CONF_FILES),
-    io:format("default files ~p~n", [couch:get_app_env(config_files,
-                                                       Defaults)]),
     couch:get_app_env(config_files, Defaults).

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch_httpd.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd.erl b/src/couch_httpd.erl
deleted file mode 100644
index 28932ba..0000000
--- a/src/couch_httpd.erl
+++ /dev/null
@@ -1,1114 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd).
--include("couch_db.hrl").
-
--export([start_link/0, start_link/1, stop/0, config_change/2,
-        handle_request/5]).
-
--export([header_value/2,header_value/3,qs_value/2,qs_value/3,qs/1,qs_json_value/3]).
--export([path/1,absolute_uri/2,body_length/1]).
--export([verify_is_server_admin/1,unquote/1,quote/1,recv/2,recv_chunked/4,error_info/1]).
--export([make_fun_spec_strs/1]).
--export([make_arity_1_fun/1, make_arity_2_fun/1, make_arity_3_fun/1]).
--export([parse_form/1,json_body/1,json_body_obj/1,body/1]).
--export([doc_etag/1, make_etag/1, etag_match/2, etag_respond/3, etag_maybe/2]).
--export([primary_header_value/2,partition/1,serve_file/3,serve_file/4, server_header/0]).
--export([start_chunked_response/3,send_chunk/2,log_request/2]).
--export([start_response_length/4, start_response/3, send/2]).
--export([start_json_response/2, start_json_response/3, end_json_response/1]).
--export([send_response/4,send_method_not_allowed/2,send_error/4, send_redirect/2,send_chunked_error/2]).
--export([send_json/2,send_json/3,send_json/4,last_chunk/1,parse_multipart_request/3]).
--export([accepted_encodings/1,handle_request_int/5,validate_referer/1,validate_ctype/2]).
--export([http_1_0_keep_alive/2]).
-
-start_link() ->
-    start_link(http).
-start_link(http) ->
-    Port = couch_config:get("httpd", "port", "5984"),
-    start_link(?MODULE, [{port, Port}]);
-start_link(https) ->
-    Port = couch_config:get("ssl", "port", "6984"),
-    CertFile = couch_config:get("ssl", "cert_file", nil),
-    KeyFile = couch_config:get("ssl", "key_file", nil),
-    Options = case CertFile /= nil andalso KeyFile /= nil of
-        true ->
-            SslOpts = [{certfile, CertFile}, {keyfile, KeyFile}],
-
-            %% set password if one is needed for the cert
-            SslOpts1 = case couch_config:get("ssl", "password", nil) of
-                nil -> SslOpts;
-                Password ->
-                    SslOpts ++ [{password, Password}]
-            end,
-            % do we verify certificates ?
-            FinalSslOpts = case couch_config:get("ssl",
-                    "verify_ssl_certificates", "false") of
-                "false" -> SslOpts1;
-                "true" ->
-                    case couch_config:get("ssl",
-                            "cacert_file", nil) of
-                        nil ->
-                            io:format("Verify SSL certificate "
-                                ++"enabled but file containing "
-                                ++"PEM encoded CA certificates is "
-                                ++"missing", []),
-                            throw({error, missing_cacerts});
-                        CaCertFile ->
-                            Depth = list_to_integer(couch_config:get("ssl",
-                                    "ssl_certificate_max_depth",
-                                    "1")),
-                            FinalOpts = [
-                                {cacertfile, CaCertFile},
-                                {depth, Depth},
-                                {verify, verify_peer}],
-                            % allows custom verify fun.
-                            case couch_config:get("ssl",
-                                    "verify_fun", nil) of
-                                nil -> FinalOpts;
-                                SpecStr ->
-                                    FinalOpts
-                                    ++ [{verify_fun, make_arity_3_fun(SpecStr)}]
-                            end
-                    end
-            end,
-
-            [{port, Port},
-                {ssl, true},
-                {ssl_opts, FinalSslOpts}];
-        false ->
-            io:format("SSL enabled but PEM certificates are missing.", []),
-            throw({error, missing_certs})
-    end,
-    start_link(https, Options).
-start_link(Name, Options) ->
-    % read config and register for configuration changes
-
-    % just stop if one of the config settings change. couch_server_sup
-    % will restart us and then we will pick up the new settings.
-
-    BindAddress = couch_config:get("httpd", "bind_address", any),
-    validate_bind_address(BindAddress),
-    DefaultSpec = "{couch_httpd_db, handle_request}",
-    DefaultFun = make_arity_1_fun(
-        couch_config:get("httpd", "default_handler", DefaultSpec)
-    ),
-
-    UrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
-        end, couch_config:get("httpd_global_handlers")),
-
-    DbUrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
-        end, couch_config:get("httpd_db_handlers")),
-
-    DesignUrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_3_fun(SpecStr)}
-        end, couch_config:get("httpd_design_handlers")),
-
-    UrlHandlers = dict:from_list(UrlHandlersList),
-    DbUrlHandlers = dict:from_list(DbUrlHandlersList),
-    DesignUrlHandlers = dict:from_list(DesignUrlHandlersList),
-    {ok, ServerOptions} = couch_util:parse_term(
-        couch_config:get("httpd", "server_options", "[]")),
-    {ok, SocketOptions} = couch_util:parse_term(
-        couch_config:get("httpd", "socket_options", "[]")),
-
-    set_auth_handlers(),
-
-    % ensure uuid is set so that concurrent replications
-    % get the same value.
-    couch_server:get_uuid(),
-
-    Loop = fun(Req)->
-        case SocketOptions of
-        [] ->
-            ok;
-        _ ->
-            ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions)
-        end,
-        apply(?MODULE, handle_request, [
-            Req, DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers
-        ])
-    end,
-
-    % set mochiweb options
-    FinalOptions = lists:append([Options, ServerOptions, [
-            {loop, Loop},
-            {name, Name},
-            {ip, BindAddress}]]),
-
-    % launch mochiweb
-    {ok, Pid} = case mochiweb_http:start(FinalOptions) of
-        {ok, MochiPid} ->
-            {ok, MochiPid};
-        {error, Reason} ->
-            io:format("Failure to start Mochiweb: ~s~n",[Reason]),
-            throw({error, Reason})
-    end,
-
-    ok = couch_config:register(fun ?MODULE:config_change/2, Pid),
-    {ok, Pid}.
-
-
-stop() ->
-    mochiweb_http:stop(couch_httpd),
-    mochiweb_http:stop(https).
-
-config_change("httpd", "bind_address") ->
-    ?MODULE:stop();
-config_change("httpd", "port") ->
-    ?MODULE:stop();
-config_change("httpd", "default_handler") ->
-    ?MODULE:stop();
-config_change("httpd", "server_options") ->
-    ?MODULE:stop();
-config_change("httpd", "socket_options") ->
-    ?MODULE:stop();
-config_change("httpd", "authentication_handlers") ->
-    set_auth_handlers();
-config_change("httpd_global_handlers", _) ->
-    ?MODULE:stop();
-config_change("httpd_db_handlers", _) ->
-    ?MODULE:stop();
-config_change("ssl", _) ->
-    ?MODULE:stop().
-
-set_auth_handlers() ->
-    AuthenticationSrcs = make_fun_spec_strs(
-        couch_config:get("httpd", "authentication_handlers", "")),
-    AuthHandlers = lists:map(
-        fun(A) -> {make_arity_1_fun(A), ?l2b(A)} end, AuthenticationSrcs),
-    ok = application:set_env(couch, auth_handlers, AuthHandlers).
-
-% SpecStr is a string like "{my_module, my_fun}"
-%  or "{my_module, my_fun, <<"my_arg">>}"
-make_arity_1_fun(SpecStr) ->
-    case couch_util:parse_term(SpecStr) of
-    {ok, {Mod, Fun, SpecArg}} ->
-        fun(Arg) -> Mod:Fun(Arg, SpecArg) end;
-    {ok, {Mod, Fun}} ->
-        fun(Arg) -> Mod:Fun(Arg) end
-    end.
-
-make_arity_2_fun(SpecStr) ->
-    case couch_util:parse_term(SpecStr) of
-    {ok, {Mod, Fun, SpecArg}} ->
-        fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2, SpecArg) end;
-    {ok, {Mod, Fun}} ->
-        fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2) end
-    end.
-
-make_arity_3_fun(SpecStr) ->
-    case couch_util:parse_term(SpecStr) of
-    {ok, {Mod, Fun, SpecArg}} ->
-        fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3, SpecArg) end;
-    {ok, {Mod, Fun}} ->
-        fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3) end
-    end.
-
-% SpecStr is "{my_module, my_fun}, {my_module2, my_fun2}"
-make_fun_spec_strs(SpecStr) ->
-    re:split(SpecStr, "(?<=})\\s*,\\s*(?={)", [{return, list}]).
-
-handle_request(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers,
-    DesignUrlHandlers) ->
-    %% reset rewrite count for new request
-    erlang:put(?REWRITE_COUNT, 0),
-
-    MochiReq1 = couch_httpd_vhost:dispatch_host(MochiReq),
-
-    handle_request_int(MochiReq1, DefaultFun,
-                UrlHandlers, DbUrlHandlers, DesignUrlHandlers).
-
-handle_request_int(MochiReq, DefaultFun,
-            UrlHandlers, DbUrlHandlers, DesignUrlHandlers) ->
-    Begin = now(),
-    % for the path, use the raw path with the query string and fragment
-    % removed, but URL quoting left intact
-    RawUri = MochiReq:get(raw_path),
-    {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
-
-    Headers = MochiReq:get(headers),
-
-    % get requested path
-    RequestedPath = case MochiReq:get_header_value("x-couchdb-vhost-path") of
-        undefined ->
-            case MochiReq:get_header_value("x-couchdb-requested-path") of
-                undefined -> RawUri;
-                R -> R
-            end;
-        P -> P
-    end,
-
-    HandlerKey =
-    case mochiweb_util:partition(Path, "/") of
-    {"", "", ""} ->
-        <<"/">>; % Special case the root url handler
-    {FirstPart, _, _} ->
-        list_to_binary(FirstPart)
-    end,
-    ?LOG_DEBUG("~p ~s ~p from ~p~nHeaders: ~p", [
-        MochiReq:get(method),
-        RawUri,
-        MochiReq:get(version),
-        MochiReq:get(peer),
-        mochiweb_headers:to_list(MochiReq:get(headers))
-    ]),
-
-    Method1 =
-    case MochiReq:get(method) of
-        % already an atom
-        Meth when is_atom(Meth) -> Meth;
-
-        % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
-        % possible (if any module references the atom, then it's existing).
-        Meth -> couch_util:to_existing_atom(Meth)
-    end,
-    increment_method_stats(Method1),
-
-    % allow broken HTTP clients to fake a full method vocabulary with an X-HTTP-METHOD-OVERRIDE header
-    MethodOverride = MochiReq:get_primary_header_value("X-HTTP-Method-Override"),
-    Method2 = case lists:member(MethodOverride, ["GET", "HEAD", "POST",
-                                                 "PUT", "DELETE",
-                                                 "TRACE", "CONNECT",
-                                                 "COPY"]) of
-    true ->
-        ?LOG_INFO("MethodOverride: ~s (real method was ~s)", [MethodOverride, Method1]),
-        case Method1 of
-        'POST' -> couch_util:to_existing_atom(MethodOverride);
-        _ ->
-            % Ignore X-HTTP-Method-Override when the original verb isn't POST.
-            % I'd like to send a 406 error to the client, but that'd require a nasty refactor.
-            % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>})
-            Method1
-        end;
-    _ -> Method1
-    end,
-
-    % alias HEAD to GET as mochiweb takes care of stripping the body
-    Method = case Method2 of
-        'HEAD' -> 'GET';
-        Other -> Other
-    end,
-
-    HttpReq = #httpd{
-        mochi_req = MochiReq,
-        peer = MochiReq:get(peer),
-        method = Method,
-        requested_path_parts =
-            [?l2b(unquote(Part)) || Part <- string:tokens(RequestedPath, "/")],
-        path_parts = [?l2b(unquote(Part)) || Part <- string:tokens(Path, "/")],
-        db_url_handlers = DbUrlHandlers,
-        design_url_handlers = DesignUrlHandlers,
-        default_fun = DefaultFun,
-        url_handlers = UrlHandlers,
-        user_ctx = erlang:erase(pre_rewrite_user_ctx),
-        auth = erlang:erase(pre_rewrite_auth)
-    },
-
-    HandlerFun = couch_util:dict_find(HandlerKey, UrlHandlers, DefaultFun),
-    {ok, AuthHandlers} = application:get_env(couch, auth_handlers),
-
-    {ok, Resp} =
-    try
-        case couch_httpd_cors:is_preflight_request(HttpReq) of
-        #httpd{} ->
-            case authenticate_request(HttpReq, AuthHandlers) of
-            #httpd{} = Req ->
-                HandlerFun(Req);
-            Response ->
-                Response
-            end;
-        Response ->
-            Response
-        end
-    catch
-        throw:{http_head_abort, Resp0} ->
-            {ok, Resp0};
-        throw:{invalid_json, S} ->
-            ?LOG_ERROR("attempted upload of invalid JSON (set log_level to debug to log it)", []),
-            ?LOG_DEBUG("Invalid JSON: ~p",[S]),
-            send_error(HttpReq, {bad_request, invalid_json});
-        throw:unacceptable_encoding ->
-            ?LOG_ERROR("unsupported encoding method for the response", []),
-            send_error(HttpReq, {not_acceptable, "unsupported encoding"});
-        throw:bad_accept_encoding_value ->
-            ?LOG_ERROR("received invalid Accept-Encoding header", []),
-            send_error(HttpReq, bad_request);
-        exit:normal ->
-            exit(normal);
-        exit:snappy_nif_not_loaded ->
-            ErrorReason = "To access the database or view index, Apache CouchDB"
-                " must be built with Erlang OTP R13B04 or higher.",
-            ?LOG_ERROR("~s", [ErrorReason]),
-            send_error(HttpReq, {bad_otp_release, ErrorReason});
-        exit:{body_too_large, _} ->
-            send_error(HttpReq, request_entity_too_large);
-        throw:Error ->
-            Stack = erlang:get_stacktrace(),
-            ?LOG_DEBUG("Minor error in HTTP request: ~p",[Error]),
-            ?LOG_DEBUG("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, Error);
-        error:badarg ->
-            Stack = erlang:get_stacktrace(),
-            ?LOG_ERROR("Badarg error in HTTP request",[]),
-            ?LOG_INFO("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, badarg);
-        error:function_clause ->
-            Stack = erlang:get_stacktrace(),
-            ?LOG_ERROR("function_clause error in HTTP request",[]),
-            ?LOG_INFO("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, function_clause);
-        Tag:Error ->
-            Stack = erlang:get_stacktrace(),
-            ?LOG_ERROR("Uncaught error in HTTP request: ~p",[{Tag, Error}]),
-            ?LOG_INFO("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, Error)
-    end,
-    RequestTime = round(timer:now_diff(now(), Begin)/1000),
-    couch_stats_collector:record({couchdb, request_time}, RequestTime),
-    couch_stats_collector:increment({httpd, requests}),
-    {ok, Resp}.
-
-% Try authentication handlers in order until one sets a user_ctx
-% the auth funs also have the option of returning a response
-% move this to couch_httpd_auth?
-authenticate_request(#httpd{user_ctx=#user_ctx{}} = Req, _AuthHandlers) ->
-    Req;
-authenticate_request(#httpd{} = Req, []) ->
-    case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of
-    "true" ->
-        throw({unauthorized, <<"Authentication required.">>});
-    "false" ->
-        Req#httpd{user_ctx=#user_ctx{}}
-    end;
-authenticate_request(#httpd{} = Req, [{AuthFun, AuthSrc} | RestAuthHandlers]) ->
-    R = case AuthFun(Req) of
-        #httpd{user_ctx=#user_ctx{}=UserCtx}=Req2 ->
-            Req2#httpd{user_ctx=UserCtx#user_ctx{handler=AuthSrc}};
-        Else -> Else
-    end,
-    authenticate_request(R, RestAuthHandlers);
-authenticate_request(Response, _AuthSrcs) ->
-    Response.
-
-increment_method_stats(Method) ->
-    couch_stats_collector:increment({httpd_request_methods, Method}).
-
-validate_referer(Req) ->
-    Host = host_for_request(Req),
-    Referer = header_value(Req, "Referer", fail),
-    case Referer of
-    fail ->
-        throw({bad_request, <<"Referer header required.">>});
-    Referer ->
-        {_,RefererHost,_,_,_} = mochiweb_util:urlsplit(Referer),
-        if
-            RefererHost =:= Host -> ok;
-            true -> throw({bad_request, <<"Referer header must match host.">>})
-        end
-    end.
-
-validate_ctype(Req, Ctype) ->
-    case header_value(Req, "Content-Type") of
-    undefined ->
-        throw({bad_ctype, "Content-Type must be "++Ctype});
-    ReqCtype ->
-        case string:tokens(ReqCtype, ";") of
-        [Ctype] -> ok;
-        [Ctype, _Rest] -> ok;
-        _Else ->
-            throw({bad_ctype, "Content-Type must be "++Ctype})
-        end
-    end.
-
-% Utilities
-
-partition(Path) ->
-    mochiweb_util:partition(Path, "/").
-
-header_value(#httpd{mochi_req=MochiReq}, Key) ->
-    MochiReq:get_header_value(Key).
-
-header_value(#httpd{mochi_req=MochiReq}, Key, Default) ->
-    case MochiReq:get_header_value(Key) of
-    undefined -> Default;
-    Value -> Value
-    end.
-
-primary_header_value(#httpd{mochi_req=MochiReq}, Key) ->
-    MochiReq:get_primary_header_value(Key).
-
-accepted_encodings(#httpd{mochi_req=MochiReq}) ->
-    case MochiReq:accepted_encodings(["gzip", "identity"]) of
-    bad_accept_encoding_value ->
-        throw(bad_accept_encoding_value);
-    [] ->
-        throw(unacceptable_encoding);
-    EncList ->
-        EncList
-    end.
-
-serve_file(Req, RelativePath, DocumentRoot) ->
-    serve_file(Req, RelativePath, DocumentRoot, []).
-
-serve_file(#httpd{mochi_req=MochiReq}=Req, RelativePath, DocumentRoot,
-           ExtraHeaders) ->
-    log_request(Req, 200),
-    ResponseHeaders = server_header()
-        ++ couch_httpd_auth:cookie_auth_header(Req, [])
-        ++ ExtraHeaders,
-    {ok, MochiReq:serve_file(RelativePath, DocumentRoot,
-            couch_httpd_cors:cors_headers(Req, ResponseHeaders))}.
-
-qs_value(Req, Key) ->
-    qs_value(Req, Key, undefined).
-
-qs_value(Req, Key, Default) ->
-    couch_util:get_value(Key, qs(Req), Default).
-
-qs_json_value(Req, Key, Default) ->
-    case qs_value(Req, Key, Default) of
-    Default ->
-        Default;
-    Result ->
-        ?JSON_DECODE(Result)
-    end.
-
-qs(#httpd{mochi_req=MochiReq}) ->
-    MochiReq:parse_qs().
-
-path(#httpd{mochi_req=MochiReq}) ->
-    MochiReq:get(path).
-
-host_for_request(#httpd{mochi_req=MochiReq}) ->
-    XHost = couch_config:get("httpd", "x_forwarded_host", "X-Forwarded-Host"),
-    case MochiReq:get_header_value(XHost) of
-        undefined ->
-            case MochiReq:get_header_value("Host") of
-                undefined ->
-                    {ok, {Address, Port}} = case MochiReq:get(socket) of
-                        {ssl, SslSocket} -> ssl:sockname(SslSocket);
-                        Socket -> inet:sockname(Socket)
-                    end,
-                    inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
-                Value1 ->
-                    Value1
-            end;
-        Value -> Value
-    end.
-
-absolute_uri(#httpd{mochi_req=MochiReq}=Req, Path) ->
-    Host = host_for_request(Req),
-    XSsl = couch_config:get("httpd", "x_forwarded_ssl", "X-Forwarded-Ssl"),
-    Scheme = case MochiReq:get_header_value(XSsl) of
-                 "on" -> "https";
-                 _ ->
-                     XProto = couch_config:get("httpd", "x_forwarded_proto", "X-Forwarded-Proto"),
-                     case MochiReq:get_header_value(XProto) of
-                         %% Restrict to "https" and "http" schemes only
-                         "https" -> "https";
-                         _ -> case MochiReq:get(scheme) of
-                                  https -> "https";
-                                  http -> "http"
-                              end
-                     end
-             end,
-    Scheme ++ "://" ++ Host ++ Path.
-
-unquote(UrlEncodedString) ->
-    mochiweb_util:unquote(UrlEncodedString).
-
-quote(UrlDecodedString) ->
-    mochiweb_util:quote_plus(UrlDecodedString).
-
-parse_form(#httpd{mochi_req=MochiReq}) ->
-    mochiweb_multipart:parse_form(MochiReq).
-
-recv(#httpd{mochi_req=MochiReq}, Len) ->
-    MochiReq:recv(Len).
-
-recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
-    % Fun is called once with each chunk
-    % Fun({Length, Binary}, State)
-    % called with Length == 0 on the last time.
-    MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState).
-
-body_length(#httpd{mochi_req=MochiReq}) ->
-    MochiReq:get(body_length).
-
-body(#httpd{mochi_req=MochiReq, req_body=undefined}) ->
-    MaxSize = list_to_integer(
-        couch_config:get("couchdb", "max_document_size", "4294967296")),
-    MochiReq:recv_body(MaxSize);
-body(#httpd{req_body=ReqBody}) ->
-    ReqBody.
-
-json_body(Httpd) ->
-    ?JSON_DECODE(body(Httpd)).
-
-json_body_obj(Httpd) ->
-    case json_body(Httpd) of
-        {Props} -> {Props};
-        _Else ->
-            throw({bad_request, "Request body must be a JSON object"})
-    end.
-
-
-
-doc_etag(#doc{revs={Start, [DiskRev|_]}}) ->
-    "\"" ++ ?b2l(couch_doc:rev_to_str({Start, DiskRev})) ++ "\"".
-
-make_etag(Term) ->
-    <<SigInt:128/integer>> = couch_util:md5(term_to_binary(Term)),
-    iolist_to_binary([$", io_lib:format("~.36B", [SigInt]), $"]).
-
-etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
-    etag_match(Req, binary_to_list(CurrentEtag));
-
-etag_match(Req, CurrentEtag) ->
-    EtagsToMatch = string:tokens(
-        header_value(Req, "If-None-Match", ""), ", "),
-    lists:member(CurrentEtag, EtagsToMatch).
-
-etag_respond(Req, CurrentEtag, RespFun) ->
-    case etag_match(Req, CurrentEtag) of
-    true ->
-        % the client has this in their cache.
-        send_response(Req, 304, [{"ETag", CurrentEtag}], <<>>);
-    false ->
-        % Run the function.
-        RespFun()
-    end.
-
-etag_maybe(Req, RespFun) ->
-    try
-        RespFun()
-    catch
-        throw:{etag_match, ETag} ->
-            send_response(Req, 304, [{"ETag", ETag}], <<>>)
-    end.
-
-verify_is_server_admin(#httpd{user_ctx=UserCtx}) ->
-    verify_is_server_admin(UserCtx);
-verify_is_server_admin(#user_ctx{roles=Roles}) ->
-    case lists:member(<<"_admin">>, Roles) of
-    true -> ok;
-    false -> throw({unauthorized, <<"You are not a server admin.">>})
-    end.
-
-log_request(#httpd{mochi_req=MochiReq,peer=Peer}=Req, Code) ->
-    ?LOG_INFO("~s - - ~s ~s ~B", [
-        Peer,
-        MochiReq:get(method),
-        MochiReq:get(raw_path),
-        Code
-    ]),
-    gen_event:notify(couch_plugin, {log_request, Req, Code}).
-
-
-start_response_length(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Length) ->
-    log_request(Req, Code),
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    Headers1 = Headers ++ server_header() ++
-               couch_httpd_auth:cookie_auth_header(Req, Headers),
-    Headers2 = couch_httpd_cors:cors_headers(Req, Headers1),
-    Resp = MochiReq:start_response_length({Code, Headers2, Length}),
-    case MochiReq:get(method) of
-    'HEAD' -> throw({http_head_abort, Resp});
-    _ -> ok
-    end,
-    {ok, Resp}.
-
-start_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers) ->
-    log_request(Req, Code),
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    CookieHeader = couch_httpd_auth:cookie_auth_header(Req, Headers),
-    Headers1 = Headers ++ server_header() ++ CookieHeader,
-    Headers2 = couch_httpd_cors:cors_headers(Req, Headers1),
-    Resp = MochiReq:start_response({Code, Headers2}),
-    case MochiReq:get(method) of
-        'HEAD' -> throw({http_head_abort, Resp});
-        _ -> ok
-    end,
-    {ok, Resp}.
-
-send(Resp, Data) ->
-    Resp:send(Data),
-    {ok, Resp}.
-
-no_resp_conn_header([]) ->
-    true;
-no_resp_conn_header([{Hdr, _}|Rest]) ->
-    case string:to_lower(Hdr) of
-        "connection" -> false;
-        _ -> no_resp_conn_header(Rest)
-    end.
-
-http_1_0_keep_alive(Req, Headers) ->
-    KeepOpen = Req:should_close() == false,
-    IsHttp10 = Req:get(version) == {1, 0},
-    NoRespHeader = no_resp_conn_header(Headers),
-    case KeepOpen andalso IsHttp10 andalso NoRespHeader of
-        true -> [{"Connection", "Keep-Alive"} | Headers];
-        false -> Headers
-    end.
-
-start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers) ->
-    log_request(Req, Code),
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    Headers1 = http_1_0_keep_alive(MochiReq, Headers),
-    Headers2 = Headers1 ++ server_header() ++
-               couch_httpd_auth:cookie_auth_header(Req, Headers1),
-    Headers3 = couch_httpd_cors:cors_headers(Req, Headers2),
-    Resp = MochiReq:respond({Code, Headers3, chunked}),
-    case MochiReq:get(method) of
-    'HEAD' -> throw({http_head_abort, Resp});
-    _ -> ok
-    end,
-    {ok, Resp}.
-
-send_chunk(Resp, Data) ->
-    case iolist_size(Data) of
-    0 -> ok; % do nothing
-    _ -> Resp:write_chunk(Data)
-    end,
-    {ok, Resp}.
-
-last_chunk(Resp) ->
-    Resp:write_chunk([]),
-    {ok, Resp}.
-
-send_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Body) ->
-    log_request(Req, Code),
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    Headers1 = http_1_0_keep_alive(MochiReq, Headers),
-    if Code >= 500 ->
-        ?LOG_ERROR("httpd ~p error response:~n ~s", [Code, Body]);
-    Code >= 400 ->
-        ?LOG_DEBUG("httpd ~p error response:~n ~s", [Code, Body]);
-    true -> ok
-    end,
-    Headers2 = Headers1 ++ server_header() ++
-               couch_httpd_auth:cookie_auth_header(Req, Headers1),
-    Headers3 = couch_httpd_cors:cors_headers(Req, Headers2),
-
-    {ok, MochiReq:respond({Code, Headers3, Body})}.
-
-send_method_not_allowed(Req, Methods) ->
-    send_error(Req, 405, [{"Allow", Methods}], <<"method_not_allowed">>, ?l2b("Only " ++ Methods ++ " allowed")).
-
-send_json(Req, Value) ->
-    send_json(Req, 200, Value).
-
-send_json(Req, Code, Value) ->
-    send_json(Req, Code, [], Value).
-
-send_json(Req, Code, Headers, Value) ->
-    initialize_jsonp(Req),
-    DefaultHeaders = [
-        {"Content-Type", negotiate_content_type(Req)},
-        {"Cache-Control", "must-revalidate"}
-    ],
-    Body = [start_jsonp(), ?JSON_ENCODE(Value), end_jsonp(), $\n],
-    send_response(Req, Code, DefaultHeaders ++ Headers, Body).
-
-start_json_response(Req, Code) ->
-    start_json_response(Req, Code, []).
-
-start_json_response(Req, Code, Headers) ->
-    initialize_jsonp(Req),
-    DefaultHeaders = [
-        {"Content-Type", negotiate_content_type(Req)},
-        {"Cache-Control", "must-revalidate"}
-    ],
-    {ok, Resp} = start_chunked_response(Req, Code, DefaultHeaders ++ Headers),
-    case start_jsonp() of
-        [] -> ok;
-        Start -> send_chunk(Resp, Start)
-    end,
-    {ok, Resp}.
-
-end_json_response(Resp) ->
-    send_chunk(Resp, end_jsonp() ++ [$\n]),
-    last_chunk(Resp).
-
-initialize_jsonp(Req) ->
-    case get(jsonp) of
-        undefined -> put(jsonp, qs_value(Req, "callback", no_jsonp));
-        _ -> ok
-    end,
-    case get(jsonp) of
-        no_jsonp -> [];
-        [] -> [];
-        CallBack ->
-            try
-                % make sure jsonp is configured on (default off)
-                case couch_config:get("httpd", "allow_jsonp", "false") of
-                "true" ->
-                    validate_callback(CallBack);
-                _Else ->
-                    put(jsonp, no_jsonp)
-                end
-            catch
-                Error ->
-                    put(jsonp, no_jsonp),
-                    throw(Error)
-            end
-    end.
-
-start_jsonp() ->
-    case get(jsonp) of
-        no_jsonp -> [];
-        [] -> [];
-        CallBack -> ["/* CouchDB */", CallBack, "("]
-    end.
-
-end_jsonp() ->
-    case erlang:erase(jsonp) of
-        no_jsonp -> [];
-        [] -> [];
-        _ -> ");"
-    end.
-
-validate_callback(CallBack) when is_binary(CallBack) ->
-    validate_callback(binary_to_list(CallBack));
-validate_callback([]) ->
-    ok;
-validate_callback([Char | Rest]) ->
-    case Char of
-        _ when Char >= $a andalso Char =< $z -> ok;
-        _ when Char >= $A andalso Char =< $Z -> ok;
-        _ when Char >= $0 andalso Char =< $9 -> ok;
-        _ when Char == $. -> ok;
-        _ when Char == $_ -> ok;
-        _ when Char == $[ -> ok;
-        _ when Char == $] -> ok;
-        _ ->
-            throw({bad_request, invalid_callback})
-    end,
-    validate_callback(Rest).
-
-
-error_info({Error, Reason}) when is_list(Reason) ->
-    error_info({Error, ?l2b(Reason)});
-error_info(bad_request) ->
-    {400, <<"bad_request">>, <<>>};
-error_info({bad_request, Reason}) ->
-    {400, <<"bad_request">>, Reason};
-error_info({query_parse_error, Reason}) ->
-    {400, <<"query_parse_error">>, Reason};
-% Prior art for md5 mismatch resulting in a 400 is from AWS S3
-error_info(md5_mismatch) ->
-    {400, <<"content_md5_mismatch">>, <<"Possible message corruption.">>};
-error_info(not_found) ->
-    {404, <<"not_found">>, <<"missing">>};
-error_info({not_found, Reason}) ->
-    {404, <<"not_found">>, Reason};
-error_info({not_acceptable, Reason}) ->
-    {406, <<"not_acceptable">>, Reason};
-error_info(conflict) ->
-    {409, <<"conflict">>, <<"Document update conflict.">>};
-error_info({forbidden, Msg}) ->
-    {403, <<"forbidden">>, Msg};
-error_info({unauthorized, Msg}) ->
-    {401, <<"unauthorized">>, Msg};
-error_info(file_exists) ->
-    {412, <<"file_exists">>, <<"The database could not be "
-        "created, the file already exists.">>};
-error_info(request_entity_too_large) ->
-    {413, <<"too_large">>, <<"the request entity is too large">>};
-error_info({bad_ctype, Reason}) ->
-    {415, <<"bad_content_type">>, Reason};
-error_info(requested_range_not_satisfiable) ->
-    {416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>};
-error_info({error, illegal_database_name, Name}) ->
-    Message = "Name: '" ++ Name ++ "'. Only lowercase characters (a-z), "
-        ++ "digits (0-9), and any of the characters _, $, (, ), +, -, and / "
-        ++ "are allowed. Must begin with a letter.",
-    {400, <<"illegal_database_name">>, couch_util:to_binary(Message)};
-error_info({missing_stub, Reason}) ->
-    {412, <<"missing_stub">>, Reason};
-error_info({Error, Reason}) ->
-    {500, couch_util:to_binary(Error), couch_util:to_binary(Reason)};
-error_info(Error) ->
-    {500, <<"unknown_error">>, couch_util:to_binary(Error)}.
-
-error_headers(#httpd{mochi_req=MochiReq}=Req, Code, ErrorStr, ReasonStr) ->
-    if Code == 401 ->
-        % this is where the basic auth popup is triggered
-        case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
-        undefined ->
-            case couch_config:get("httpd", "WWW-Authenticate", nil) of
-            nil ->
-                % If the client is a browser and the basic auth popup isn't turned on
-                % redirect to the session page.
-                case ErrorStr of
-                <<"unauthorized">> ->
-                    case couch_config:get("couch_httpd_auth", "authentication_redirect", nil) of
-                    nil -> {Code, []};
-                    AuthRedirect ->
-                        case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of
-                        "true" ->
-                            % send the browser popup header no matter what if we are require_valid_user
-                            {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]};
-                        _False ->
-                            case MochiReq:accepts_content_type("application/json") of
-                            true ->
-                                {Code, []};
-                            false ->
-                                case MochiReq:accepts_content_type("text/html") of
-                                true ->
-                                    % Redirect to the path the user requested, not
-                                    % the one that is used internally.
-                                    UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of
-                                    undefined ->
-                                        MochiReq:get(path);
-                                    VHostPath ->
-                                        VHostPath
-                                    end,
-                                    RedirectLocation = lists:flatten([
-                                        AuthRedirect,
-                                        "?return=", couch_util:url_encode(UrlReturnRaw),
-                                        "&reason=", couch_util:url_encode(ReasonStr)
-                                    ]),
-                                    {302, [{"Location", absolute_uri(Req, RedirectLocation)}]};
-                                false ->
-                                    {Code, []}
-                                end
-                            end
-                        end
-                    end;
-                _Else ->
-                    {Code, []}
-                end;
-            Type ->
-                {Code, [{"WWW-Authenticate", Type}]}
-            end;
-        Type ->
-           {Code, [{"WWW-Authenticate", Type}]}
-        end;
-    true ->
-        {Code, []}
-    end.
-
-send_error(_Req, {already_sent, Resp, _Error}) ->
-    {ok, Resp};
-
-send_error(Req, Error) ->
-    {Code, ErrorStr, ReasonStr} = error_info(Error),
-    {Code1, Headers} = error_headers(Req, Code, ErrorStr, ReasonStr),
-    send_error(Req, Code1, Headers, ErrorStr, ReasonStr).
-
-send_error(Req, Code, ErrorStr, ReasonStr) ->
-    send_error(Req, Code, [], ErrorStr, ReasonStr).
-
-send_error(Req, Code, Headers, ErrorStr, ReasonStr) ->
-    send_json(Req, Code, Headers,
-        {[{<<"error">>,  ErrorStr},
-         {<<"reason">>, ReasonStr}]}).
-
-% give the option for list functions to output html or other raw errors
-send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
-    send_chunk(Resp, Reason),
-    last_chunk(Resp);
-
-send_chunked_error(Resp, Error) ->
-    {Code, ErrorStr, ReasonStr} = error_info(Error),
-    JsonError = {[{<<"code">>, Code},
-        {<<"error">>,  ErrorStr},
-        {<<"reason">>, ReasonStr}]},
-    send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])),
-    last_chunk(Resp).
-
-send_redirect(Req, Path) ->
-     send_response(Req, 301, [{"Location", absolute_uri(Req, Path)}], <<>>).
-
-negotiate_content_type(Req) ->
-    case get(jsonp) of
-        no_jsonp -> negotiate_content_type1(Req);
-        [] -> negotiate_content_type1(Req);
-        _Callback -> "text/javascript"
-    end.
-
-negotiate_content_type1(#httpd{mochi_req=MochiReq}) ->
-    %% Determine the appropriate Content-Type header for a JSON response
-    %% depending on the Accept header in the request. A request that explicitly
-    %% lists the correct JSON MIME type will get that type, otherwise the
-    %% response will have the generic MIME type "text/plain"
-    AcceptedTypes = case MochiReq:get_header_value("Accept") of
-        undefined       -> [];
-        AcceptHeader    -> string:tokens(AcceptHeader, ", ")
-    end,
-    case lists:member("application/json", AcceptedTypes) of
-        true  -> "application/json";
-        false -> "text/plain; charset=utf-8"
-    end.
-
-server_header() ->
-    [{"Server", "CouchDB/" ++ couch_server:get_version() ++
-                " (Erlang OTP/" ++ erlang:system_info(otp_release) ++ ")"}].
-
-
--record(mp, {boundary, buffer, data_fun, callback}).
-
-
-parse_multipart_request(ContentType, DataFun, Callback) ->
-    Boundary0 = iolist_to_binary(get_boundary(ContentType)),
-    Boundary = <<"\r\n--", Boundary0/binary>>,
-    Mp = #mp{boundary= Boundary,
-            buffer= <<>>,
-            data_fun=DataFun,
-            callback=Callback},
-    {Mp2, _NilCallback} = read_until(Mp, <<"--", Boundary0/binary>>,
-        fun nil_callback/1),
-    #mp{buffer=Buffer, data_fun=DataFun2, callback=Callback2} =
-            parse_part_header(Mp2),
-    {Buffer, DataFun2, Callback2}.
-
-nil_callback(_Data)->
-    fun nil_callback/1.
-
-get_boundary({"multipart/" ++ _, Opts}) ->
-    case couch_util:get_value("boundary", Opts) of
-        S when is_list(S) ->
-            S
-    end;
-get_boundary(ContentType) ->
-    {"multipart/" ++ _ , Opts} = mochiweb_util:parse_header(ContentType),
-    get_boundary({"multipart/", Opts}).
-
-
-
-split_header(<<>>) ->
-    [];
-split_header(Line) ->
-    {Name, [$: | Value]} = lists:splitwith(fun (C) -> C =/= $: end,
-                                           binary_to_list(Line)),
-    [{string:to_lower(string:strip(Name)),
-     mochiweb_util:parse_header(Value)}].
-
-read_until(#mp{data_fun=DataFun, buffer=Buffer}=Mp, Pattern, Callback) ->
-    case find_in_binary(Pattern, Buffer) of
-    not_found ->
-        Callback2 = Callback(Buffer),
-        {Buffer2, DataFun2} = DataFun(),
-        Buffer3 = iolist_to_binary(Buffer2),
-        read_until(Mp#mp{data_fun=DataFun2,buffer=Buffer3}, Pattern, Callback2);
-    {partial, 0} ->
-        {NewData, DataFun2} = DataFun(),
-        read_until(Mp#mp{data_fun=DataFun2,
-                buffer= iolist_to_binary([Buffer,NewData])},
-                Pattern, Callback);
-    {partial, Skip} ->
-        <<DataChunk:Skip/binary, Rest/binary>> = Buffer,
-        Callback2 = Callback(DataChunk),
-        {NewData, DataFun2} = DataFun(),
-        read_until(Mp#mp{data_fun=DataFun2,
-                buffer= iolist_to_binary([Rest | NewData])},
-                Pattern, Callback2);
-    {exact, 0} ->
-        PatternLen = size(Pattern),
-        <<_:PatternLen/binary, Rest/binary>> = Buffer,
-        {Mp#mp{buffer= Rest}, Callback};
-    {exact, Skip} ->
-        PatternLen = size(Pattern),
-        <<DataChunk:Skip/binary, _:PatternLen/binary, Rest/binary>> = Buffer,
-        Callback2 = Callback(DataChunk),
-        {Mp#mp{buffer= Rest}, Callback2}
-    end.
-
-
-parse_part_header(#mp{callback=UserCallBack}=Mp) ->
-    {Mp2, AccCallback} = read_until(Mp, <<"\r\n\r\n">>,
-            fun(Next) -> acc_callback(Next, []) end),
-    HeaderData = AccCallback(get_data),
-
-    Headers =
-    lists:foldl(fun(Line, Acc) ->
-            split_header(Line) ++ Acc
-        end, [], re:split(HeaderData,<<"\r\n">>, [])),
-    NextCallback = UserCallBack({headers, Headers}),
-    parse_part_body(Mp2#mp{callback=NextCallback}).
-
-parse_part_body(#mp{boundary=Prefix, callback=Callback}=Mp) ->
-    {Mp2, WrappedCallback} = read_until(Mp, Prefix,
-            fun(Data) -> body_callback_wrapper(Data, Callback) end),
-    Callback2 = WrappedCallback(get_callback),
-    Callback3 = Callback2(body_end),
-    case check_for_last(Mp2#mp{callback=Callback3}) of
-    {last, #mp{callback=Callback3}=Mp3} ->
-        Mp3#mp{callback=Callback3(eof)};
-    {more, Mp3} ->
-        parse_part_header(Mp3)
-    end.
-
-acc_callback(get_data, Acc)->
-    iolist_to_binary(lists:reverse(Acc));
-acc_callback(Data, Acc)->
-    fun(Next) -> acc_callback(Next, [Data | Acc]) end.
-
-body_callback_wrapper(get_callback, Callback) ->
-    Callback;
-body_callback_wrapper(Data, Callback) ->
-    Callback2 = Callback({body, Data}),
-    fun(Next) -> body_callback_wrapper(Next, Callback2) end.
-
-
-check_for_last(#mp{buffer=Buffer, data_fun=DataFun}=Mp) ->
-    case Buffer of
-    <<"--",_/binary>> -> {last, Mp};
-    <<_, _, _/binary>> -> {more, Mp};
-    _ -> % not long enough
-        {Data, DataFun2} = DataFun(),
-        check_for_last(Mp#mp{buffer= <<Buffer/binary, Data/binary>>,
-                data_fun = DataFun2})
-    end.
-
-find_in_binary(_B, <<>>) ->
-    not_found;
-
-find_in_binary(B, Data) ->
-    case binary:match(Data, [B], []) of
-    nomatch ->
-        partial_find(binary:part(B, {0, byte_size(B) - 1}),
-                     binary:part(Data, {byte_size(Data), -byte_size(Data) + 1}), 1);
-    {Pos, _Len} ->
-        {exact, Pos}
-    end.
-
-partial_find(<<>>, _Data, _Pos) ->
-    not_found;
-
-partial_find(B, Data, N) when byte_size(Data) > 0 ->
-    case binary:match(Data, [B], []) of
-    nomatch ->
-        partial_find(binary:part(B, {0, byte_size(B) - 1}),
-                     binary:part(Data, {byte_size(Data), -byte_size(Data) + 1}), N + 1);
-    {Pos, _Len} ->
-        {partial, N + Pos}
-    end;
-
-partial_find(_B, _Data, _N) ->
-    not_found.
-
-
-validate_bind_address(Address) ->
-    case inet_parse:address(Address) of
-        {ok, _} -> ok;
-        _ -> throw({error, invalid_bind_address})
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch_httpd_auth.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_auth.erl b/src/couch_httpd_auth.erl
deleted file mode 100644
index b8c4e26..0000000
--- a/src/couch_httpd_auth.erl
+++ /dev/null
@@ -1,380 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License.  You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_auth).
--include("couch_db.hrl").
-
--export([default_authentication_handler/1,special_test_authentication_handler/1]).
--export([cookie_authentication_handler/1]).
--export([null_authentication_handler/1]).
--export([proxy_authentication_handler/1, proxy_authentification_handler/1]).
--export([cookie_auth_header/2]).
--export([handle_session_req/1]).
-
--import(couch_httpd, [header_value/2, send_json/2,send_json/4, send_method_not_allowed/2]).
-
-special_test_authentication_handler(Req) ->
-    case header_value(Req, "WWW-Authenticate") of
-    "X-Couch-Test-Auth " ++ NamePass ->
-        % NamePass is a colon separated string: "joe schmoe:a password".
-        [Name, Pass] = re:split(NamePass, ":", [{return, list}, {parts, 2}]),
-        case {Name, Pass} of
-        {"Jan Lehnardt", "apple"} -> ok;
-        {"Christopher Lenz", "dog food"} -> ok;
-        {"Noah Slater", "biggiesmalls endian"} -> ok;
-        {"Chris Anderson", "mp3"} -> ok;
-        {"Damien Katz", "pecan pie"} -> ok;
-        {_, _} ->
-            throw({unauthorized, <<"Name or password is incorrect.">>})
-        end,
-        Req#httpd{user_ctx=#user_ctx{name=?l2b(Name)}};
-    _ ->
-        % No X-Couch-Test-Auth credentials sent, give admin access so the
-        % previous authentication can be restored after the test
-        Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}
-    end.
-
-basic_name_pw(Req) ->
-    AuthorizationHeader = header_value(Req, "Authorization"),
-    case AuthorizationHeader of
-    "Basic " ++ Base64Value ->
-        case re:split(base64:decode(Base64Value), ":",
-                      [{return, list}, {parts, 2}]) of
-        ["_", "_"] ->
-            % special name and pass to be logged out
-            nil;
-        [User, Pass] ->
-            {User, Pass};
-        _ ->
-            nil
-        end;
-    _ ->
-        nil
-    end.
-
-default_authentication_handler(Req) ->
-    case basic_name_pw(Req) of
-    {User, Pass} ->
-        case couch_auth_cache:get_user_creds(User) of
-            nil ->
-                throw({unauthorized, <<"Name or password is incorrect.">>});
-            UserProps ->
-                case authenticate(?l2b(Pass), UserProps) of
-                    true ->
-                        Req#httpd{user_ctx=#user_ctx{
-                            name=?l2b(User),
-                            roles=couch_util:get_value(<<"roles">>, UserProps, [])
-                        }};
-                    _Else ->
-                        throw({unauthorized, <<"Name or password is incorrect.">>})
-                end
-        end;
-    nil ->
-        case couch_server:has_admins() of
-        true ->
-            Req;
-        false ->
-            case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of
-                "true" -> Req;
-                % If no admins, and no user required, then everyone is admin!
-                % Yay, admin party!
-                _ -> Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}
-            end
-        end
-    end.
-
-null_authentication_handler(Req) ->
-    Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}.
-
-%% @doc proxy auth handler.
-%
-% This handler allows creation of a userCtx object from a user authenticated remotly.
-% The client just pass specific headers to CouchDB and the handler create the userCtx.
-% Headers  name can be defined in local.ini. By thefault they are :
-%
-%   * X-Auth-CouchDB-UserName : contain the username, (x_auth_username in
-%   couch_httpd_auth section)
-%   * X-Auth-CouchDB-Roles : contain the user roles, list of roles separated by a
-%   comma (x_auth_roles in couch_httpd_auth section)
-%   * X-Auth-CouchDB-Token : token to authenticate the authorization (x_auth_token
-%   in couch_httpd_auth section). This token is an hmac-sha1 created from secret key
-%   and username. The secret key should be the same in the client and couchdb node. s
-%   ecret key is the secret key in couch_httpd_auth section of ini. This token is optional
-%   if value of proxy_use_secret key in couch_httpd_auth section of ini isn't true.
-%
-proxy_authentication_handler(Req) ->
-    case proxy_auth_user(Req) of
-        nil -> Req;
-        Req2 -> Req2
-    end.
-
-%% @deprecated
-proxy_authentification_handler(Req) ->
-    proxy_authentication_handler(Req).
-    
-proxy_auth_user(Req) ->
-    XHeaderUserName = couch_config:get("couch_httpd_auth", "x_auth_username",
-                                "X-Auth-CouchDB-UserName"),
-    XHeaderRoles = couch_config:get("couch_httpd_auth", "x_auth_roles",
-                                "X-Auth-CouchDB-Roles"),
-    XHeaderToken = couch_config:get("couch_httpd_auth", "x_auth_token",
-                                "X-Auth-CouchDB-Token"),
-    case header_value(Req, XHeaderUserName) of
-        undefined -> nil;
-        UserName ->
-            Roles = case header_value(Req, XHeaderRoles) of
-                undefined -> [];
-                Else ->
-                    [?l2b(R) || R <- string:tokens(Else, ",")]
-            end,
-            case couch_config:get("couch_httpd_auth", "proxy_use_secret", "false") of
-                "true" ->
-                    case couch_config:get("couch_httpd_auth", "secret", nil) of
-                        nil ->
-                            Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}};
-                        Secret ->
-                            ExpectedToken = couch_util:to_hex(crypto:sha_mac(Secret, UserName)),
-                            case header_value(Req, XHeaderToken) of
-                                Token when Token == ExpectedToken ->
-                                    Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName),
-                                                            roles=Roles}};
-                                _ -> nil
-                            end
-                    end;
-                _ ->
-                    Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}}
-            end
-    end.
-
-
-cookie_authentication_handler(#httpd{mochi_req=MochiReq}=Req) ->
-    case MochiReq:get_cookie_value("AuthSession") of
-    undefined -> Req;
-    [] -> Req;
-    Cookie ->
-        [User, TimeStr, HashStr] = try
-            AuthSession = couch_util:decodeBase64Url(Cookie),
-            [_A, _B, _Cs] = re:split(?b2l(AuthSession), ":",
-                                     [{return, list}, {parts, 3}])
-        catch
-            _:_Error ->
-                Reason = <<"Malformed AuthSession cookie. Please clear your cookies.">>,
-                throw({bad_request, Reason})
-        end,
-        % Verify expiry and hash
-        CurrentTime = make_cookie_time(),
-        case couch_config:get("couch_httpd_auth", "secret", nil) of
-        nil ->
-            ?LOG_DEBUG("cookie auth secret is not set",[]),
-            Req;
-        SecretStr ->
-            Secret = ?l2b(SecretStr),
-            case couch_auth_cache:get_user_creds(User) of
-            nil -> Req;
-            UserProps ->
-                UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<"">>),
-                FullSecret = <<Secret/binary, UserSalt/binary>>,
-                ExpectedHash = crypto:sha_mac(FullSecret, User ++ ":" ++ TimeStr),
-                Hash = ?l2b(HashStr),
-                Timeout = list_to_integer(
-                    couch_config:get("couch_httpd_auth", "timeout", "600")),
-                ?LOG_DEBUG("timeout ~p", [Timeout]),
-                case (catch erlang:list_to_integer(TimeStr, 16)) of
-                    TimeStamp when CurrentTime < TimeStamp + Timeout ->
-                        case couch_passwords:verify(ExpectedHash, Hash) of
-                            true ->
-                                TimeLeft = TimeStamp + Timeout - CurrentTime,
-                                ?LOG_DEBUG("Successful cookie auth as: ~p", [User]),
-                                Req#httpd{user_ctx=#user_ctx{
-                                    name=?l2b(User),
-                                    roles=couch_util:get_value(<<"roles">>, UserProps, [])
-                                }, auth={FullSecret, TimeLeft < Timeout*0.9}};
-                            _Else ->
-                                Req
-                        end;
-                    _Else ->
-                        Req
-                end
-            end
-        end
-    end.
-
-cookie_auth_header(#httpd{user_ctx=#user_ctx{name=null}}, _Headers) -> [];
-cookie_auth_header(#httpd{user_ctx=#user_ctx{name=User}, auth={Secret, true}}=Req, Headers) ->
-    % Note: we only set the AuthSession cookie if:
-    %  * a valid AuthSession cookie has been received
-    %  * we are outside a 10% timeout window
-    %  * and if an AuthSession cookie hasn't already been set e.g. by a login
-    %    or logout handler.
-    % The login and logout handlers need to set the AuthSession cookie
-    % themselves.
-    CookieHeader = couch_util:get_value("Set-Cookie", Headers, ""),
-    Cookies = mochiweb_cookies:parse_cookie(CookieHeader),
-    AuthSession = couch_util:get_value("AuthSession", Cookies),
-    if AuthSession == undefined ->
-        TimeStamp = make_cookie_time(),
-        [cookie_auth_cookie(Req, ?b2l(User), Secret, TimeStamp)];
-    true ->
-        []
-    end;
-cookie_auth_header(_Req, _Headers) -> [].
-
-cookie_auth_cookie(Req, User, Secret, TimeStamp) ->
-    SessionData = User ++ ":" ++ erlang:integer_to_list(TimeStamp, 16),
-    Hash = crypto:sha_mac(Secret, SessionData),
-    mochiweb_cookies:cookie("AuthSession",
-        couch_util:encodeBase64Url(SessionData ++ ":" ++ ?b2l(Hash)),
-        [{path, "/"}] ++ cookie_scheme(Req) ++ max_age()).
-
-ensure_cookie_auth_secret() ->
-    case couch_config:get("couch_httpd_auth", "secret", nil) of
-        nil ->
-            NewSecret = ?b2l(couch_uuids:random()),
-            couch_config:set("couch_httpd_auth", "secret", NewSecret),
-            NewSecret;
-        Secret -> Secret
-    end.
-
-% session handlers
-% Login handler with user db
-handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req) ->
-    ReqBody = MochiReq:recv_body(),
-    Form = case MochiReq:get_primary_header_value("content-type") of
-        % content type should be json
-        "application/x-www-form-urlencoded" ++ _ ->
-            mochiweb_util:parse_qs(ReqBody);
-        "application/json" ++ _ ->
-            {Pairs} = ?JSON_DECODE(ReqBody),
-            lists:map(fun({Key, Value}) ->
-              {?b2l(Key), ?b2l(Value)}
-            end, Pairs);
-        _ ->
-            []
-    end,
-    UserName = ?l2b(couch_util:get_value("name", Form, "")),
-    Password = ?l2b(couch_util:get_value("password", Form, "")),
-    ?LOG_DEBUG("Attempt Login: ~s",[UserName]),
-    User = case couch_auth_cache:get_user_creds(UserName) of
-        nil -> [];
-        Result -> Result
-    end,
-    UserSalt = couch_util:get_value(<<"salt">>, User, <<>>),
-    case authenticate(Password, User) of
-        true ->
-            % setup the session cookie
-            Secret = ?l2b(ensure_cookie_auth_secret()),
-            CurrentTime = make_cookie_time(),
-            Cookie = cookie_auth_cookie(Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime),
-            % TODO document the "next" feature in Futon
-            {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
-                nil ->
-                    {200, [Cookie]};
-                Redirect ->
-                    {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
-            end,
-            send_json(Req#httpd{req_body=ReqBody}, Code, Headers,
-                {[
-                    {ok, true},
-                    {name, couch_util:get_value(<<"name">>, User, null)},
-                    {roles, couch_util:get_value(<<"roles">>, User, [])}
-                ]});
-        _Else ->
-            % clear the session
-            Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}] ++ cookie_scheme(Req)),
-            {Code, Headers} = case couch_httpd:qs_value(Req, "fail", nil) of
-                nil ->
-                    {401, [Cookie]};
-                Redirect ->
-                    {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
-            end,
-            send_json(Req, Code, Headers, {[{error, <<"unauthorized">>},{reason, <<"Name or password is incorrect.">>}]})
-    end;
-% get user info
-% GET /_session
-handle_session_req(#httpd{method='GET', user_ctx=UserCtx}=Req) ->
-    Name = UserCtx#user_ctx.name,
-    ForceLogin = couch_httpd:qs_value(Req, "basic", "false"),
-    case {Name, ForceLogin} of
-        {null, "true"} ->
-            throw({unauthorized, <<"Please login.">>});
-        {Name, _} ->
-            send_json(Req, {[
-                % remove this ok
-                {ok, true},
-                {<<"userCtx">>, {[
-                    {name, Name},
-                    {roles, UserCtx#user_ctx.roles}
-                ]}},
-                {info, {[
-                    {authentication_db, ?l2b(couch_config:get("couch_httpd_auth", "authentication_db"))},
-                    {authentication_handlers, [auth_name(H) || H <- couch_httpd:make_fun_spec_strs(
-                            couch_config:get("httpd", "authentication_handlers"))]}
-                ] ++ maybe_value(authenticated, UserCtx#user_ctx.handler, fun(Handler) ->
-                        auth_name(?b2l(Handler))
-                    end)}}
-            ]})
-    end;
-% logout by deleting the session
-handle_session_req(#httpd{method='DELETE'}=Req) ->
-    Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}] ++ cookie_scheme(Req)),
-    {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
-        nil ->
-            {200, [Cookie]};
-        Redirect ->
-            {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
-    end,
-    send_json(Req, Code, Headers, {[{ok, true}]});
-handle_session_req(Req) ->
-    send_method_not_allowed(Req, "GET,HEAD,POST,DELETE").
-
-maybe_value(_Key, undefined, _Fun) -> [];
-maybe_value(Key, Else, Fun) ->
-    [{Key, Fun(Else)}].
-
-authenticate(Pass, UserProps) ->
-    UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<>>),
-    {PasswordHash, ExpectedHash} =
-        case couch_util:get_value(<<"password_scheme">>, UserProps, <<"simple">>) of
-        <<"simple">> ->
-            {couch_passwords:simple(Pass, UserSalt),
-            couch_util:get_value(<<"password_sha">>, UserProps, nil)};
-        <<"pbkdf2">> ->
-            Iterations = couch_util:get_value(<<"iterations">>, UserProps, 10000),
-            {couch_passwords:pbkdf2(Pass, UserSalt, Iterations),
-             couch_util:get_value(<<"derived_key">>, UserProps, nil)}
-    end,
-    couch_passwords:verify(PasswordHash, ExpectedHash).
-
-auth_name(String) when is_list(String) ->
-    [_,_,_,_,_,Name|_] = re:split(String, "[\\W_]", [{return, list}]),
-    ?l2b(Name).
-
-make_cookie_time() ->
-    {NowMS, NowS, _} = erlang:now(),
-    NowMS * 1000000 + NowS.
-
-cookie_scheme(#httpd{mochi_req=MochiReq}) ->
-    [{http_only, true}] ++
-    case MochiReq:get(scheme) of
-        http -> [];
-        https -> [{secure, true}]
-    end.
-
-max_age() ->
-    case couch_config:get("couch_httpd_auth", "allow_persistent_cookies", "false") of
-        "false" ->
-            [];
-        "true" ->
-            Timeout = list_to_integer(
-                couch_config:get("couch_httpd_auth", "timeout", "600")),
-            [{max_age, Timeout}]
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch_httpd_cors.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_cors.erl b/src/couch_httpd_cors.erl
deleted file mode 100644
index d9462d1..0000000
--- a/src/couch_httpd_cors.erl
+++ /dev/null
@@ -1,351 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% @doc module to handle Cross-Origin Resource Sharing
-%%
-%% This module handles CORS requests and preflight request for
-%% CouchDB. The configuration is done in the ini file.
-%%
-%% This implements http://www.w3.org/TR/cors/
-
-
--module(couch_httpd_cors).
-
--include("couch_db.hrl").
-
--export([is_preflight_request/1, cors_headers/2]).
-
--define(SUPPORTED_HEADERS, "Accept, Accept-Language, Content-Type," ++
-        "Expires, Last-Modified, Pragma, Origin, Content-Length," ++
-        "If-Match, Destination, X-Requested-With, " ++
-        "X-Http-Method-Override, Content-Range").
-
--define(SUPPORTED_METHODS, "GET, HEAD, POST, PUT, DELETE," ++
-        "TRACE, CONNECT, COPY, OPTIONS").
-
-% as defined in http://www.w3.org/TR/cors/#terminology
--define(SIMPLE_HEADERS, ["Cache-Control", "Content-Language",
-        "Content-Type", "Expires", "Last-Modified", "Pragma"]).
--define(ALLOWED_HEADERS, lists:sort(["Server", "Etag",
-        "Accept-Ranges" | ?SIMPLE_HEADERS])).
--define(SIMPLE_CONTENT_TYPE_VALUES, ["application/x-www-form-urlencoded",
-        "multipart/form-data", "text/plain"]).
-
-% TODO: - pick a sane default
--define(CORS_DEFAULT_MAX_AGE, 12345).
-
-%% is_preflight_request/1
-
-% http://www.w3.org/TR/cors/#resource-preflight-requests
-
-is_preflight_request(#httpd{method=Method}=Req) when Method /= 'OPTIONS' ->
-    Req;
-is_preflight_request(Req) ->
-    EnableCors = enable_cors(),
-    is_preflight_request(Req, EnableCors).
-
-is_preflight_request(Req, false) ->
-    Req;
-is_preflight_request(#httpd{mochi_req=MochiReq}=Req, true) ->
-    case preflight_request(MochiReq) of
-    {ok, PreflightHeaders} ->
-        send_preflight_response(Req, PreflightHeaders);
-    _ ->
-        Req
-    end.
-
-
-preflight_request(MochiReq) ->
-    Origin = MochiReq:get_header_value("Origin"),
-    preflight_request(MochiReq, Origin).
-
-preflight_request(MochiReq, undefined) ->
-    % If the Origin header is not present terminate this set of
-    % steps. The request is outside the scope of this specification.
-    % http://www.w3.org/TR/cors/#resource-preflight-requests
-    MochiReq;
-preflight_request(MochiReq, Origin) ->
-    Host = couch_httpd_vhost:host(MochiReq),
-    AcceptedOrigins = get_accepted_origins(Host),
-    AcceptAll = lists:member("*", AcceptedOrigins),
-
-    HandlerFun = fun() ->
-        OriginList = couch_util:to_list(Origin),
-        handle_preflight_request(OriginList, Host, MochiReq)
-    end,
-
-    case AcceptAll of
-    true ->
-        % Always matching is acceptable since the list of
-        % origins can be unbounded.
-        % http://www.w3.org/TR/cors/#resource-preflight-requests
-        HandlerFun();
-    false ->
-        case lists:member(Origin, AcceptedOrigins) of
-        % The Origin header can only contain a single origin as
-        % the user agent will not follow redirects.
-        % http://www.w3.org/TR/cors/#resource-preflight-requests
-        % TODO: Square against multi origin thinger in Security Considerations
-        true ->
-            HandlerFun();
-        false ->
-            % If the value of the Origin header is not a
-            % case-sensitive match for any of the values
-            % in list of origins do not set any additional
-            % headers and terminate this set of steps.
-            % http://www.w3.org/TR/cors/#resource-preflight-requests
-            false
-        end
-    end.
-
-
-handle_preflight_request(Origin, Host, MochiReq) ->
-    %% get supported methods
-    SupportedMethods = split_list(cors_config(Host, "methods",
-                                              ?SUPPORTED_METHODS)),
-
-    % get supported headers
-    AllSupportedHeaders = split_list(cors_config(Host, "headers",
-                                                 ?SUPPORTED_HEADERS)),
-
-    SupportedHeaders = [string:to_lower(H) || H <- AllSupportedHeaders],
-
-    % get max age
-    MaxAge = cors_config(Host, "max_age", ?CORS_DEFAULT_MAX_AGE),
-
-    PreflightHeaders0 = maybe_add_credentials(Origin, Host, [
-        {"Access-Control-Allow-Origin", Origin},
-        {"Access-Control-Max-Age", MaxAge},
-        {"Access-Control-Allow-Methods",
-            string:join(SupportedMethods, ", ")}]),
-
-    case MochiReq:get_header_value("Access-Control-Request-Method") of
-    undefined ->
-        % If there is no Access-Control-Request-Method header
-        % or if parsing failed, do not set any additional headers
-        % and terminate this set of steps. The request is outside
-        % the scope of this specification.
-        % http://www.w3.org/TR/cors/#resource-preflight-requests
-        {ok, PreflightHeaders0};
-    Method ->
-        case lists:member(Method, SupportedMethods) of
-        true ->
-            % method ok , check headers
-            AccessHeaders = MochiReq:get_header_value(
-                    "Access-Control-Request-Headers"),
-            {FinalReqHeaders, ReqHeaders} = case AccessHeaders of
-                undefined -> {"", []};
-                Headers ->
-                    % transform header list in something we
-                    % could check. make sure everything is a
-                    % list
-                    RH = [string:to_lower(H)
-                          || H <- split_headers(Headers)],
-                    {Headers, RH}
-            end,
-            % check if headers are supported
-            case ReqHeaders -- SupportedHeaders of
-            [] ->
-                PreflightHeaders = PreflightHeaders0 ++
-                                   [{"Access-Control-Allow-Headers",
-                                     FinalReqHeaders}],
-                {ok, PreflightHeaders};
-            _ ->
-                false
-            end;
-        false ->
-        % If method is not a case-sensitive match for any of
-        % the values in list of methods do not set any additional
-        % headers and terminate this set of steps.
-        % http://www.w3.org/TR/cors/#resource-preflight-requests
-            false
-        end
-    end.
-
-
-send_preflight_response(#httpd{mochi_req=MochiReq}=Req, Headers) ->
-    couch_httpd:log_request(Req, 204),
-    couch_stats_collector:increment({httpd_status_codes, 204}),
-    Headers1 = couch_httpd:http_1_0_keep_alive(MochiReq, Headers),
-    Headers2 = Headers1 ++ couch_httpd:server_header() ++
-               couch_httpd_auth:cookie_auth_header(Req, Headers1),
-    {ok, MochiReq:respond({204, Headers2, <<>>})}.
-
-
-% cors_headers/1
-
-cors_headers(MochiReq, RequestHeaders) ->
-    EnableCors = enable_cors(),
-    CorsHeaders = do_cors_headers(MochiReq, EnableCors),
-    maybe_apply_cors_headers(CorsHeaders, RequestHeaders).
-
-do_cors_headers(#httpd{mochi_req=MochiReq}, true) ->
-    Host = couch_httpd_vhost:host(MochiReq),
-    AcceptedOrigins = get_accepted_origins(Host),
-    case MochiReq:get_header_value("Origin") of
-    undefined ->
-        % If the Origin header is not present terminate
-        % this set of steps. The request is outside the scope
-        % of this specification.
-        % http://www.w3.org/TR/cors/#resource-processing-model
-        [];
-    Origin ->
-        handle_cors_headers(couch_util:to_list(Origin),
-                            Host, AcceptedOrigins)
-    end;
-do_cors_headers(_MochiReq, false) ->
-    [].
-
-maybe_apply_cors_headers([], RequestHeaders) ->
-    RequestHeaders;
-maybe_apply_cors_headers(CorsHeaders, RequestHeaders0) ->
-    % for each RequestHeader that isn't in SimpleHeaders,
-    % (or Content-Type with SIMPLE_CONTENT_TYPE_VALUES)
-    % append to Access-Control-Expose-Headers
-    % return: RequestHeaders ++ CorsHeaders ++ ACEH
-
-    RequestHeaders = [K || {K,_V} <- RequestHeaders0],
-    ExposedHeaders0 = reduce_headers(RequestHeaders, ?ALLOWED_HEADERS),
-
-    % here we may have not moved Content-Type into ExposedHeaders,
-    % now we need to check whether the Content-Type valus is
-    % in ?SIMPLE_CONTENT_TYPE_VALUES and if it isn’t add Content-
-    % Type to to ExposedHeaders
-    ContentType =  proplists:get_value("Content-Type", RequestHeaders0),
-    IncludeContentType = case ContentType of
-    undefined ->
-        false;
-    _ ->
-        ContentType_ = string:to_lower(ContentType),
-        lists:member(ContentType_, ?SIMPLE_CONTENT_TYPE_VALUES)
-    end,
-    ExposedHeaders = case IncludeContentType of
-    false ->
-        lists:umerge(ExposedHeaders0, ["Content-Type"]);
-    true ->
-        ExposedHeaders0
-    end,
-    CorsHeaders
-    ++ RequestHeaders0
-    ++ [{"Access-Control-Expose-Headers",
-            string:join(ExposedHeaders, ", ")}].
-
-
-reduce_headers(A, B) ->
-    reduce_headers0(A, B, []).
-
-reduce_headers0([], _B, Result) ->
-    lists:sort(Result);
-reduce_headers0([ElmA|RestA], B, Result) ->
-    R = case member_nocase(ElmA, B) of
-    false -> Result;
-    _Else -> [ElmA | Result]
-    end,
-    reduce_headers0(RestA, B, R).
-
-member_nocase(ElmA, List) ->
-    lists:any(fun(ElmB) ->
-        string:to_lower(ElmA) =:= string:to_lower(ElmB)
-    end, List).
-
-handle_cors_headers(_Origin, _Host, []) ->
-    [];
-handle_cors_headers(Origin, Host, AcceptedOrigins) ->
-    AcceptAll = lists:member("*", AcceptedOrigins),
-    case {AcceptAll, lists:member(Origin, AcceptedOrigins)} of
-    {true, _} ->
-        make_cors_header(Origin, Host);
-    {false, true}  ->
-        make_cors_header(Origin, Host);
-    _ ->
-        % If the value of the Origin header is not a
-        % case-sensitive match for any of the values
-        % in list of origins, do not set any additional
-        % headers and terminate this set of steps.
-        % http://www.w3.org/TR/cors/#resource-requests
-        []
-    end.
-
-
-make_cors_header(Origin, Host) ->
-    Headers = [{"Access-Control-Allow-Origin", Origin}],
-    maybe_add_credentials(Origin, Host, Headers).
-
-
-%% util
-
-maybe_add_credentials(Origin, Host, Headers) ->
-    maybe_add_credentials(Headers, allow_credentials(Origin, Host)).
-
-maybe_add_credentials(Headers, false) ->
-    Headers;
-maybe_add_credentials(Headers, true) ->
-    Headers ++ [{"Access-Control-Allow-Credentials", "true"}].
-
-
-allow_credentials("*", _Host) ->
-    false;
-allow_credentials(_Origin, Host) ->
-    Default = get_bool_config("cors", "credentials", false),
-    get_bool_config(cors_section(Host), "credentials", Default).
-
-
-
-cors_config(Host, Key, Default) ->
-    couch_config:get(cors_section(Host), Key,
-                     couch_config:get("cors", Key, Default)).
-
-cors_section(Host0) ->
-    {Host, _Port} = split_host_port(Host0),
-    "cors:" ++ Host.
-
-enable_cors() ->
-    get_bool_config("httpd", "enable_cors", false).
-
-get_bool_config(Section, Key, Default) ->
-    case couch_config:get(Section, Key) of
-    undefined ->
-        Default;
-    "true" ->
-        true;
-    "false" ->
-        false
-    end.
-
-get_accepted_origins(Host) ->
-    split_list(cors_config(Host, "origins", [])).
-
-split_list(S) ->
-    re:split(S, "\\s*,\\s*", [trim, {return, list}]).
-
-split_headers(H) ->
-    re:split(H, ",\\s*", [{return,list}, trim]).
-
-split_host_port(HostAsString) ->
-    % split at semicolon ":"
-    Split = string:rchr(HostAsString, $:),
-    split_host_port(HostAsString, Split).
-
-split_host_port(HostAsString, 0) ->
-    % no semicolon
-    {HostAsString, '*'};
-split_host_port(HostAsString, N) ->
-    HostPart = string:substr(HostAsString, 1, N-1),
-    % parse out port
-    % is there a nicer way?
-    case (catch erlang:list_to_integer(string:substr(HostAsString,
-                    N+1, length(HostAsString)))) of
-    {'EXIT', _} ->
-        {HostAsString, '*'};
-    Port ->
-        {HostPart, Port}
-    end.


[04/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_httpd_proxy.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_proxy.erl b/src/couch_httpd_proxy.erl
new file mode 100644
index 0000000..6a4557c
--- /dev/null
+++ b/src/couch_httpd_proxy.erl
@@ -0,0 +1,426 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+-module(couch_httpd_proxy).
+
+-export([handle_proxy_req/2]).
+
+-include("couch_db.hrl").
+-include_lib("ibrowse/include/ibrowse.hrl").
+
+-define(TIMEOUT, infinity).
+-define(PKT_SIZE, 4096).
+
+
+handle_proxy_req(Req, ProxyDest) ->
+    Method = get_method(Req),
+    Url = get_url(Req, ProxyDest),
+    Version = get_version(Req),
+    Headers = get_headers(Req),
+    Body = get_body(Req),
+    Options = [
+        {http_vsn, Version},
+        {headers_as_is, true},
+        {response_format, binary},
+        {stream_to, {self(), once}}
+    ],
+    case ibrowse:send_req(Url, Headers, Method, Body, Options, ?TIMEOUT) of
+        {ibrowse_req_id, ReqId} ->
+            stream_response(Req, ProxyDest, ReqId);
+        {error, Reason} ->
+            throw({error, Reason})
+    end.
+
+
+get_method(#httpd{mochi_req=MochiReq}) ->
+    case MochiReq:get(method) of
+        Method when is_atom(Method) ->
+            list_to_atom(string:to_lower(atom_to_list(Method)));
+        Method when is_list(Method) ->
+            list_to_atom(string:to_lower(Method));
+        Method when is_binary(Method) ->
+            list_to_atom(string:to_lower(?b2l(Method)))
+    end.
+
+
+get_url(Req, ProxyDest) when is_binary(ProxyDest) ->
+    get_url(Req, ?b2l(ProxyDest));
+get_url(#httpd{mochi_req=MochiReq}=Req, ProxyDest) ->
+    BaseUrl = case mochiweb_util:partition(ProxyDest, "/") of
+        {[], "/", _} -> couch_httpd:absolute_uri(Req, ProxyDest);
+        _ -> ProxyDest
+    end,
+    ProxyPrefix = "/" ++ ?b2l(hd(Req#httpd.path_parts)),
+    RequestedPath = MochiReq:get(raw_path),
+    case mochiweb_util:partition(RequestedPath, ProxyPrefix) of
+        {[], ProxyPrefix, []} ->
+            BaseUrl;
+        {[], ProxyPrefix, [$/ | DestPath]} ->
+            remove_trailing_slash(BaseUrl) ++ "/" ++ DestPath;
+        {[], ProxyPrefix, DestPath} ->
+            remove_trailing_slash(BaseUrl) ++ "/" ++ DestPath;
+        _Else ->
+            throw({invalid_url_path, {ProxyPrefix, RequestedPath}})
+    end.
+
+get_version(#httpd{mochi_req=MochiReq}) ->
+    MochiReq:get(version).
+
+
+get_headers(#httpd{mochi_req=MochiReq}) ->
+    to_ibrowse_headers(mochiweb_headers:to_list(MochiReq:get(headers)), []).
+
+to_ibrowse_headers([], Acc) ->
+    lists:reverse(Acc);
+to_ibrowse_headers([{K, V} | Rest], Acc) when is_atom(K) ->
+    to_ibrowse_headers([{atom_to_list(K), V} | Rest], Acc);
+to_ibrowse_headers([{K, V} | Rest], Acc) when is_list(K) ->
+    case string:to_lower(K) of
+        "content-length" ->
+            to_ibrowse_headers(Rest, [{content_length, V} | Acc]);
+        % This appears to make ibrowse too smart.
+        %"transfer-encoding" ->
+        %    to_ibrowse_headers(Rest, [{transfer_encoding, V} | Acc]);
+        _ ->
+            to_ibrowse_headers(Rest, [{K, V} | Acc])
+    end.
+
+get_body(#httpd{method='GET'}) ->
+    fun() -> eof end;
+get_body(#httpd{method='HEAD'}) ->
+    fun() -> eof end;
+get_body(#httpd{method='DELETE'}) ->
+    fun() -> eof end;
+get_body(#httpd{mochi_req=MochiReq}) ->
+    case MochiReq:get(body_length) of
+        undefined ->
+            <<>>;
+        {unknown_transfer_encoding, Unknown} ->
+            exit({unknown_transfer_encoding, Unknown});
+        chunked ->
+            {fun stream_chunked_body/1, {init, MochiReq, 0}};
+        0 ->
+            <<>>;
+        Length when is_integer(Length) andalso Length > 0 ->
+            {fun stream_length_body/1, {init, MochiReq, Length}};
+        Length ->
+            exit({invalid_body_length, Length})
+    end.
+
+
+remove_trailing_slash(Url) ->
+    rem_slash(lists:reverse(Url)).
+
+rem_slash([]) ->
+    [];
+rem_slash([$\s | RevUrl]) ->
+    rem_slash(RevUrl);
+rem_slash([$\t | RevUrl]) ->
+    rem_slash(RevUrl);
+rem_slash([$\r | RevUrl]) ->
+    rem_slash(RevUrl);
+rem_slash([$\n | RevUrl]) ->
+    rem_slash(RevUrl);
+rem_slash([$/ | RevUrl]) ->
+    rem_slash(RevUrl);
+rem_slash(RevUrl) ->
+    lists:reverse(RevUrl).
+
+
+stream_chunked_body({init, MReq, 0}) ->
+    % First chunk, do expect-continue dance.
+    init_body_stream(MReq),
+    stream_chunked_body({stream, MReq, 0, [], ?PKT_SIZE});
+stream_chunked_body({stream, MReq, 0, Buf, BRem}) ->
+    % Finished a chunk, get next length. If next length
+    % is 0, its time to try and read trailers.
+    {CRem, Data} = read_chunk_length(MReq),
+    case CRem of
+        0 ->
+            BodyData = lists:reverse(Buf, Data),
+            {ok, BodyData, {trailers, MReq, [], ?PKT_SIZE}};
+        _ ->
+            stream_chunked_body(
+                {stream, MReq, CRem, [Data | Buf], BRem-size(Data)}
+            )
+    end;
+stream_chunked_body({stream, MReq, CRem, Buf, BRem}) when BRem =< 0 ->
+    % Time to empty our buffers to the upstream socket.
+    BodyData = lists:reverse(Buf),
+    {ok, BodyData, {stream, MReq, CRem, [], ?PKT_SIZE}};
+stream_chunked_body({stream, MReq, CRem, Buf, BRem}) ->
+    % Buffer some more data from the client.
+    Length = lists:min([CRem, BRem]),
+    Socket = MReq:get(socket),
+    NewState = case mochiweb_socket:recv(Socket, Length, ?TIMEOUT) of
+        {ok, Data} when size(Data) == CRem ->
+            case mochiweb_socket:recv(Socket, 2, ?TIMEOUT) of
+                {ok, <<"\r\n">>} ->
+                    {stream, MReq, 0, [<<"\r\n">>, Data | Buf], BRem-Length-2};
+                _ ->
+                    exit(normal)
+            end;
+        {ok, Data} ->
+            {stream, MReq, CRem-Length, [Data | Buf], BRem-Length};
+        _ ->
+            exit(normal)
+    end,
+    stream_chunked_body(NewState);
+stream_chunked_body({trailers, MReq, Buf, BRem}) when BRem =< 0 ->
+    % Empty our buffers and send data upstream.
+    BodyData = lists:reverse(Buf),
+    {ok, BodyData, {trailers, MReq, [], ?PKT_SIZE}};
+stream_chunked_body({trailers, MReq, Buf, BRem}) ->
+    % Read another trailer into the buffer or stop on an
+    % empty line.
+    Socket = MReq:get(socket),
+    mochiweb_socket:setopts(Socket, [{packet, line}]),
+    case mochiweb_socket:recv(Socket, 0, ?TIMEOUT) of
+        {ok, <<"\r\n">>} ->
+            mochiweb_socket:setopts(Socket, [{packet, raw}]),
+            BodyData = lists:reverse(Buf, <<"\r\n">>),
+            {ok, BodyData, eof};
+        {ok, Footer} ->
+            mochiweb_socket:setopts(Socket, [{packet, raw}]),
+            NewState = {trailers, MReq, [Footer | Buf], BRem-size(Footer)},
+            stream_chunked_body(NewState);
+        _ ->
+            exit(normal)
+    end;
+stream_chunked_body(eof) ->
+    % Tell ibrowse we're done sending data.
+    eof.
+
+
+stream_length_body({init, MochiReq, Length}) ->
+    % Do the expect-continue dance
+    init_body_stream(MochiReq),
+    stream_length_body({stream, MochiReq, Length});
+stream_length_body({stream, _MochiReq, 0}) ->
+    % Finished streaming.
+    eof;
+stream_length_body({stream, MochiReq, Length}) ->
+    BufLen = lists:min([Length, ?PKT_SIZE]),
+    case MochiReq:recv(BufLen) of
+        <<>> -> eof;
+        Bin -> {ok, Bin, {stream, MochiReq, Length-BufLen}}
+    end.
+
+
+init_body_stream(MochiReq) ->
+    Expect = case MochiReq:get_header_value("expect") of
+        undefined ->
+            undefined;
+        Value when is_list(Value) ->
+            string:to_lower(Value)
+    end,
+    case Expect of
+        "100-continue" ->
+            MochiReq:start_raw_response({100, gb_trees:empty()});
+        _Else ->
+            ok
+    end.
+
+
+read_chunk_length(MochiReq) ->
+    Socket = MochiReq:get(socket),
+    mochiweb_socket:setopts(Socket, [{packet, line}]),
+    case mochiweb_socket:recv(Socket, 0, ?TIMEOUT) of
+        {ok, Header} ->
+            mochiweb_socket:setopts(Socket, [{packet, raw}]),
+            Splitter = fun(C) ->
+                C =/= $\r andalso C =/= $\n andalso C =/= $\s
+            end,
+            {Hex, _Rest} = lists:splitwith(Splitter, ?b2l(Header)),
+            {mochihex:to_int(Hex), Header};
+        _ ->
+            exit(normal)
+    end.
+
+
+stream_response(Req, ProxyDest, ReqId) ->
+    receive
+        {ibrowse_async_headers, ReqId, "100", _} ->
+            % ibrowse doesn't handle 100 Continue responses which
+            % means we have to discard them so the proxy client
+            % doesn't get confused.
+            ibrowse:stream_next(ReqId),
+            stream_response(Req, ProxyDest, ReqId);
+        {ibrowse_async_headers, ReqId, Status, Headers} ->
+            {Source, Dest} = get_urls(Req, ProxyDest),
+            FixedHeaders = fix_headers(Source, Dest, Headers, []),
+            case body_length(FixedHeaders) of
+                chunked ->
+                    {ok, Resp} = couch_httpd:start_chunked_response(
+                        Req, list_to_integer(Status), FixedHeaders
+                    ),
+                    ibrowse:stream_next(ReqId),
+                    stream_chunked_response(Req, ReqId, Resp),
+                    {ok, Resp};
+                Length when is_integer(Length) ->
+                    {ok, Resp} = couch_httpd:start_response_length(
+                        Req, list_to_integer(Status), FixedHeaders, Length
+                    ),
+                    ibrowse:stream_next(ReqId),
+                    stream_length_response(Req, ReqId, Resp),
+                    {ok, Resp};
+                _ ->
+                    {ok, Resp} = couch_httpd:start_response(
+                        Req, list_to_integer(Status), FixedHeaders
+                    ),
+                    ibrowse:stream_next(ReqId),
+                    stream_length_response(Req, ReqId, Resp),
+                    % XXX: MochiWeb apparently doesn't look at the
+                    % response to see if it must force close the
+                    % connection. So we help it out here.
+                    erlang:put(mochiweb_request_force_close, true),
+                    {ok, Resp}
+            end
+    end.
+
+
+stream_chunked_response(Req, ReqId, Resp) ->
+    receive
+        {ibrowse_async_response, ReqId, {error, Reason}} ->
+            throw({error, Reason});
+        {ibrowse_async_response, ReqId, Chunk} ->
+            couch_httpd:send_chunk(Resp, Chunk),
+            ibrowse:stream_next(ReqId),
+            stream_chunked_response(Req, ReqId, Resp);
+        {ibrowse_async_response_end, ReqId} ->
+            couch_httpd:last_chunk(Resp)
+    end.
+
+
+stream_length_response(Req, ReqId, Resp) ->
+    receive
+        {ibrowse_async_response, ReqId, {error, Reason}} ->
+            throw({error, Reason});
+        {ibrowse_async_response, ReqId, Chunk} ->
+            couch_httpd:send(Resp, Chunk),
+            ibrowse:stream_next(ReqId),
+            stream_length_response(Req, ReqId, Resp);
+        {ibrowse_async_response_end, ReqId} ->
+            ok
+    end.
+
+
+get_urls(Req, ProxyDest) ->
+    SourceUrl = couch_httpd:absolute_uri(Req, "/" ++ hd(Req#httpd.path_parts)),
+    Source = parse_url(?b2l(iolist_to_binary(SourceUrl))),
+    case (catch parse_url(ProxyDest)) of
+        Dest when is_record(Dest, url) ->
+            {Source, Dest};
+        _ ->
+            DestUrl = couch_httpd:absolute_uri(Req, ProxyDest),
+            {Source, parse_url(DestUrl)}
+    end.
+
+
+fix_headers(_, _, [], Acc) ->
+    lists:reverse(Acc);
+fix_headers(Source, Dest, [{K, V} | Rest], Acc) ->
+    Fixed = case string:to_lower(K) of
+        "location" -> rewrite_location(Source, Dest, V);
+        "content-location" -> rewrite_location(Source, Dest, V);
+        "uri" -> rewrite_location(Source, Dest, V);
+        "destination" -> rewrite_location(Source, Dest, V);
+        "set-cookie" -> rewrite_cookie(Source, Dest, V);
+        _ -> V
+    end,
+    fix_headers(Source, Dest, Rest, [{K, Fixed} | Acc]).
+
+
+rewrite_location(Source, #url{host=Host, port=Port, protocol=Proto}, Url) ->
+    case (catch parse_url(Url)) of
+        #url{host=Host, port=Port, protocol=Proto} = Location ->
+            DestLoc = #url{
+                protocol=Source#url.protocol,
+                host=Source#url.host,
+                port=Source#url.port,
+                path=join_url_path(Source#url.path, Location#url.path)
+            },
+            url_to_url(DestLoc);
+        #url{} ->
+            Url;
+        _ ->
+            url_to_url(Source#url{path=join_url_path(Source#url.path, Url)})
+    end.
+
+
+rewrite_cookie(_Source, _Dest, Cookie) ->
+    Cookie.
+
+
+parse_url(Url) when is_binary(Url) ->
+    ibrowse_lib:parse_url(?b2l(Url));
+parse_url(Url) when is_list(Url) ->
+    ibrowse_lib:parse_url(?b2l(iolist_to_binary(Url))).
+
+
+join_url_path(Src, Dst) ->
+    Src2 = case lists:reverse(Src) of
+        "/" ++ RestSrc -> lists:reverse(RestSrc);
+        _ -> Src
+    end,
+    Dst2 = case Dst of
+        "/" ++ RestDst -> RestDst;
+        _ -> Dst
+    end,
+    Src2 ++ "/" ++ Dst2.
+
+
+url_to_url(#url{host=Host, port=Port, path=Path, protocol=Proto} = Url) ->
+    LPort = case {Proto, Port} of
+        {http, 80} -> "";
+        {https, 443} -> "";
+        _ -> ":" ++ integer_to_list(Port)
+    end,
+    LPath = case Path of
+        "/" ++ _RestPath -> Path;
+        _ -> "/" ++ Path
+    end,
+    HostPart = case Url#url.host_type of
+        ipv6_address ->
+            "[" ++ Host ++ "]";
+        _ ->
+            Host
+    end,
+    atom_to_list(Proto) ++ "://" ++ HostPart ++ LPort ++ LPath.
+
+
+body_length(Headers) ->
+    case is_chunked(Headers) of
+        true -> chunked;
+        _ -> content_length(Headers)
+    end.
+
+
+is_chunked([]) ->
+    false;
+is_chunked([{K, V} | Rest]) ->
+    case string:to_lower(K) of
+        "transfer-encoding" ->
+            string:to_lower(V) == "chunked";
+        _ ->
+            is_chunked(Rest)
+    end.
+
+content_length([]) ->
+    undefined;
+content_length([{K, V} | Rest]) ->
+    case string:to_lower(K) of
+        "content-length" ->
+            list_to_integer(V);
+        _ ->
+            content_length(Rest)
+    end.
+

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_httpd_rewrite.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_rewrite.erl b/src/couch_httpd_rewrite.erl
new file mode 100644
index 0000000..1187397
--- /dev/null
+++ b/src/couch_httpd_rewrite.erl
@@ -0,0 +1,484 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+%
+% bind_path is based on bind method from Webmachine
+
+
+%% @doc Module for URL rewriting by pattern matching.
+
+-module(couch_httpd_rewrite).
+-export([handle_rewrite_req/3]).
+-include("couch_db.hrl").
+
+-define(SEPARATOR, $\/).
+-define(MATCH_ALL, {bind, <<"*">>}).
+
+
+%% doc The http rewrite handler. All rewriting is done from
+%% /dbname/_design/ddocname/_rewrite by default.
+%%
+%% each rules should be in rewrites member of the design doc.
+%% Ex of a complete rule :
+%%
+%%  {
+%%      ....
+%%      "rewrites": [
+%%      {
+%%          "from": "",
+%%          "to": "index.html",
+%%          "method": "GET",
+%%          "query": {}
+%%      }
+%%      ]
+%%  }
+%%
+%%  from: is the path rule used to bind current uri to the rule. It
+%% use pattern matching for that.
+%%
+%%  to: rule to rewrite an url. It can contain variables depending on binding
+%% variables discovered during pattern matching and query args (url args and from
+%% the query member.)
+%%
+%%  method: method to bind the request method to the rule. by default "*"
+%%  query: query args you want to define they can contain dynamic variable
+%% by binding the key to the bindings
+%%
+%%
+%% to and from are path with  patterns. pattern can be string starting with ":" or
+%% "*". ex:
+%% /somepath/:var/*
+%%
+%% This path is converted in erlang list by splitting "/". Each var are
+%% converted in atom. "*" is converted to '*' atom. The pattern matching is done
+%% by splitting "/" in request url in a list of token. A string pattern will
+%% match equal token. The star atom ('*' in single quotes) will match any number
+%% of tokens, but may only be present as the last pathtern in a pathspec. If all
+%% tokens are matched and all pathterms are used, then the pathspec matches. It works
+%% like webmachine. Each identified token will be reused in to rule and in query
+%%
+%% The pattern matching is done by first matching the request method to a rule. by
+%% default all methods match a rule. (method is equal to "*" by default). Then
+%% It will try to match the path to one rule. If no rule match, then a 404 error
+%% is displayed.
+%%
+%% Once a rule is found we rewrite the request url using the "to" and
+%% "query" members. The identified token are matched to the rule and
+%% will replace var. if '*' is found in the rule it will contain the remaining
+%% part if it exists.
+%%
+%% Examples:
+%%
+%% Dispatch rule            URL             TO                  Tokens
+%%
+%% {"from": "/a/b",         /a/b?k=v        /some/b?k=v         var =:= b
+%% "to": "/some/"}                                              k = v
+%%
+%% {"from": "/a/b",         /a/b            /some/b?var=b       var =:= b
+%% "to": "/some/:var"}
+%%
+%% {"from": "/a",           /a              /some
+%% "to": "/some/*"}
+%%
+%% {"from": "/a/*",         /a/b/c          /some/b/c
+%% "to": "/some/*"}
+%%
+%% {"from": "/a",           /a              /some
+%% "to": "/some/*"}
+%%
+%% {"from": "/a/:foo/*",    /a/b/c          /some/b/c?foo=b     foo =:= b
+%% "to": "/some/:foo/*"}
+%%
+%% {"from": "/a/:foo",     /a/b             /some/?k=b&foo=b    foo =:= b
+%% "to": "/some",
+%%  "query": {
+%%      "k": ":foo"
+%%  }}
+%%
+%% {"from": "/a",           /a?foo=b        /some/b             foo =:= b
+%% "to": "/some/:foo",
+%%  }}
+
+
+
+handle_rewrite_req(#httpd{
+        path_parts=[DbName, <<"_design">>, DesignName, _Rewrite|PathParts],
+        method=Method,
+        mochi_req=MochiReq}=Req, _Db, DDoc) ->
+
+    % we are in a design handler
+    DesignId = <<"_design/", DesignName/binary>>,
+    Prefix = <<"/", (?l2b(couch_util:url_encode(DbName)))/binary, "/", DesignId/binary>>,
+    QueryList = lists:map(fun decode_query_value/1, couch_httpd:qs(Req)),
+
+    RewritesSoFar = erlang:get(?REWRITE_COUNT),
+    MaxRewrites = list_to_integer(couch_config:get("httpd", "rewrite_limit", "100")),
+    case RewritesSoFar >= MaxRewrites of
+        true ->
+            throw({bad_request, <<"Exceeded rewrite recursion limit">>});
+        false ->
+            erlang:put(?REWRITE_COUNT, RewritesSoFar + 1)
+    end,
+
+    #doc{body={Props}} = DDoc,
+
+    % get rules from ddoc
+    case couch_util:get_value(<<"rewrites">>, Props) of
+        undefined ->
+            couch_httpd:send_error(Req, 404, <<"rewrite_error">>,
+                <<"Invalid path.">>);
+        Bin when is_binary(Bin) ->
+            couch_httpd:send_error(Req, 400, <<"rewrite_error">>,
+                <<"Rewrite rules are a String. They must be a JSON Array.">>);
+        Rules ->
+            % create dispatch list from rules
+            DispatchList =  [make_rule(Rule) || {Rule} <- Rules],
+            Method1 = couch_util:to_binary(Method),
+
+            %% get raw path by matching url to a rule.
+            RawPath = case try_bind_path(DispatchList, Method1, 
+                    PathParts, QueryList) of
+                no_dispatch_path ->
+                    throw(not_found);
+                {NewPathParts, Bindings} ->
+                    Parts = [quote_plus(X) || X <- NewPathParts],
+
+                    % build new path, reencode query args, eventually convert
+                    % them to json
+                    Bindings1 = maybe_encode_bindings(Bindings),
+                    Path = binary_to_list(
+                        iolist_to_binary([
+                                string:join(Parts, [?SEPARATOR]),
+                                [["?", mochiweb_util:urlencode(Bindings1)] 
+                                    || Bindings1 =/= [] ]
+                            ])),
+                    
+                    % if path is relative detect it and rewrite path
+                    case mochiweb_util:safe_relative_path(Path) of
+                        undefined ->
+                            ?b2l(Prefix) ++ "/" ++ Path;
+                        P1 ->
+                            ?b2l(Prefix) ++ "/" ++ P1
+                    end
+
+                end,
+
+            % normalize final path (fix levels "." and "..")
+            RawPath1 = ?b2l(iolist_to_binary(normalize_path(RawPath))),
+
+            % In order to do OAuth correctly, we have to save the
+            % requested path. We use default so chained rewriting
+            % wont replace the original header.
+            Headers = mochiweb_headers:default("x-couchdb-requested-path",
+                                             MochiReq:get(raw_path),
+                                             MochiReq:get(headers)),
+
+            ?LOG_DEBUG("rewrite to ~p ~n", [RawPath1]),
+
+            % build a new mochiweb request
+            MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
+                                             MochiReq:get(method),
+                                             RawPath1,
+                                             MochiReq:get(version),
+                                             Headers),
+
+            % cleanup, It force mochiweb to reparse raw uri.
+            MochiReq1:cleanup(),
+
+            #httpd{
+                db_url_handlers = DbUrlHandlers,
+                design_url_handlers = DesignUrlHandlers,
+                default_fun = DefaultFun,
+                url_handlers = UrlHandlers,
+                user_ctx = UserCtx,
+               auth = Auth
+            } = Req,
+
+            erlang:put(pre_rewrite_auth, Auth),
+            erlang:put(pre_rewrite_user_ctx, UserCtx),
+            couch_httpd:handle_request_int(MochiReq1, DefaultFun,
+                    UrlHandlers, DbUrlHandlers, DesignUrlHandlers)
+        end.
+
+quote_plus({bind, X}) ->
+    mochiweb_util:quote_plus(X);
+quote_plus(X) ->
+    mochiweb_util:quote_plus(X).
+
+%% @doc Try to find a rule matching current url. If none is found
+%% 404 error not_found is raised
+try_bind_path([], _Method, _PathParts, _QueryList) ->
+    no_dispatch_path;
+try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
+    [{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch,
+    case bind_method(Method1, Method) of
+        true ->
+            case bind_path(PathParts1, PathParts, []) of
+                {ok, Remaining, Bindings} ->
+                    Bindings1 = Bindings ++ QueryList,
+                    % we parse query args from the rule and fill
+                    % it eventually with bindings vars
+                    QueryArgs1 = make_query_list(QueryArgs, Bindings1,
+                        Formats, []),
+                    % remove params in QueryLists1 that are already in
+                    % QueryArgs1
+                    Bindings2 = lists:foldl(fun({K, V}, Acc) ->
+                        K1 = to_binding(K),
+                        KV = case couch_util:get_value(K1, QueryArgs1) of
+                            undefined -> [{K1, V}];
+                            _V1 -> []
+                        end,
+                        Acc ++ KV
+                    end, [], Bindings1),
+
+                    FinalBindings = Bindings2 ++ QueryArgs1,
+                    NewPathParts = make_new_path(RedirectPath, FinalBindings,
+                                    Remaining, []),
+                    {NewPathParts, FinalBindings};
+                fail ->
+                    try_bind_path(Rest, Method, PathParts, QueryList)
+            end;
+        false ->
+            try_bind_path(Rest, Method, PathParts, QueryList)
+    end.
+
+%% rewriting dynamically the quey list given as query member in
+%% rewrites. Each value is replaced by one binding or an argument
+%% passed in url.
+make_query_list([], _Bindings, _Formats, Acc) ->
+    Acc;
+make_query_list([{Key, {Value}}|Rest], Bindings, Formats, Acc) ->
+    Value1 = {Value},
+    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
+make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_binary(Value) ->
+    Value1 = replace_var(Value, Bindings, Formats),
+    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
+make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_list(Value) ->
+    Value1 = replace_var(Value, Bindings, Formats),
+    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
+make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) ->
+    make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value}|Acc]).
+
+replace_var(<<"*">>=Value, Bindings, Formats) ->
+    get_var(Value, Bindings, Value, Formats);
+replace_var(<<":", Var/binary>> = Value, Bindings, Formats) ->
+    get_var(Var, Bindings, Value, Formats);
+replace_var(Value, _Bindings, _Formats) when is_binary(Value) ->
+    Value;
+replace_var(Value, Bindings, Formats) when is_list(Value) ->
+    lists:reverse(lists:foldl(fun
+                (<<":", Var/binary>>=Value1, Acc) ->
+                    [get_var(Var, Bindings, Value1, Formats)|Acc];
+                (Value1, Acc) ->
+                    [Value1|Acc]
+            end, [], Value));
+replace_var(Value, _Bindings, _Formats) ->
+    Value.
+                    
+maybe_json(Key, Value) ->
+    case lists:member(Key, [<<"key">>, <<"startkey">>, <<"start_key">>,
+                <<"endkey">>, <<"end_key">>, <<"keys">>]) of
+        true ->
+            ?JSON_ENCODE(Value);
+        false ->
+            Value
+    end.
+
+get_var(VarName, Props, Default, Formats) ->
+    VarName1 = to_binding(VarName),
+    Val = couch_util:get_value(VarName1, Props, Default),
+    maybe_format(VarName, Val, Formats).
+
+maybe_format(VarName, Value, Formats) ->
+    case couch_util:get_value(VarName, Formats) of
+        undefined ->
+             Value;
+        Format ->
+            format(Format, Value)
+    end.
+
+format(<<"int">>, Value) when is_integer(Value) ->
+    Value;
+format(<<"int">>, Value) when is_binary(Value) ->
+    format(<<"int">>, ?b2l(Value));
+format(<<"int">>, Value) when is_list(Value) ->
+    case (catch list_to_integer(Value)) of
+        IntVal when is_integer(IntVal) ->
+            IntVal;
+        _ ->
+            Value
+    end;
+format(<<"bool">>, Value) when is_binary(Value) ->
+    format(<<"bool">>, ?b2l(Value));
+format(<<"bool">>, Value) when is_list(Value) ->
+    case string:to_lower(Value) of
+        "true" -> true;
+        "false" -> false;
+        _ -> Value
+    end;
+format(_Format, Value) ->
+   Value. 
+
+%% doc: build new patch from bindings. bindings are query args
+%% (+ dynamic query rewritten if needed) and bindings found in
+%% bind_path step.
+make_new_path([], _Bindings, _Remaining, Acc) ->
+    lists:reverse(Acc);
+make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
+    Acc1 = lists:reverse(Acc) ++ Remaining,
+    Acc1;
+make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
+    Acc1 = lists:reverse(Acc) ++ Remaining,
+    Acc1;
+make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) ->
+    P2 = case couch_util:get_value({bind, P}, Bindings) of
+        undefined -> << "undefined">>;
+        P1 -> 
+            iolist_to_binary(P1)
+    end,
+    make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
+make_new_path([P|Rest], Bindings, Remaining, Acc) ->
+    make_new_path(Rest, Bindings, Remaining, [P|Acc]).
+
+
+%% @doc If method of the query fith the rule method. If the
+%% method rule is '*', which is the default, all
+%% request method will bind. It allows us to make rules
+%% depending on HTTP method.
+bind_method(?MATCH_ALL, _Method ) ->
+    true;
+bind_method({bind, Method}, Method) ->
+    true;
+bind_method(_, _) ->
+    false.
+
+
+%% @doc bind path. Using the rule from we try to bind variables given
+%% to the current url by pattern matching
+bind_path([], [], Bindings) ->
+    {ok, [], Bindings};
+bind_path([?MATCH_ALL], [Match|_RestMatch]=Rest, Bindings) ->
+    {ok, Rest, [{?MATCH_ALL, Match}|Bindings]};
+bind_path(_, [], _) ->
+    fail;
+bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) ->
+    bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]);
+bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
+    bind_path(RestToken, RestMatch, Bindings);
+bind_path(_, _, _) ->
+    fail.
+
+
+%% normalize path.
+normalize_path(Path)  ->
+    "/" ++ string:join(normalize_path1(string:tokens(Path,
+                "/"), []), [?SEPARATOR]).
+
+
+normalize_path1([], Acc) ->
+    lists:reverse(Acc);
+normalize_path1([".."|Rest], Acc) ->
+    Acc1 = case Acc of
+        [] -> [".."|Acc];
+        [T|_] when T =:= ".." -> [".."|Acc];
+        [_|R] -> R
+    end,
+    normalize_path1(Rest, Acc1);
+normalize_path1(["."|Rest], Acc) ->
+    normalize_path1(Rest, Acc);
+normalize_path1([Path|Rest], Acc) ->
+    normalize_path1(Rest, [Path|Acc]).
+
+
+%% @doc transform json rule in erlang for pattern matching
+make_rule(Rule) ->
+    Method = case couch_util:get_value(<<"method">>, Rule) of
+        undefined -> ?MATCH_ALL;
+        M -> to_binding(M)
+    end,
+    QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
+        undefined -> [];
+        {Args} -> Args
+        end,
+    FromParts  = case couch_util:get_value(<<"from">>, Rule) of
+        undefined -> [?MATCH_ALL];
+        From ->
+            parse_path(From)
+        end,
+    ToParts  = case couch_util:get_value(<<"to">>, Rule) of
+        undefined ->
+            throw({error, invalid_rewrite_target});
+        To ->
+            parse_path(To)
+        end,
+    Formats = case couch_util:get_value(<<"formats">>, Rule) of
+        undefined -> [];
+        {Fmts} -> Fmts
+    end,
+    [{FromParts, Method}, ToParts, QueryArgs, Formats].
+
+parse_path(Path) ->
+    {ok, SlashRE} = re:compile(<<"\\/">>),
+    path_to_list(re:split(Path, SlashRE), [], 0).
+
+%% @doc convert a path rule (from or to) to an erlang list
+%% * and path variable starting by ":" are converted
+%% in erlang atom.
+path_to_list([], Acc, _DotDotCount) ->
+    lists:reverse(Acc);
+path_to_list([<<>>|R], Acc, DotDotCount) ->
+    path_to_list(R, Acc, DotDotCount);
+path_to_list([<<"*">>|R], Acc, DotDotCount) ->
+    path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
+path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
+    case couch_config:get("httpd", "secure_rewrites", "true") of
+    "false" ->
+        path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
+    _Else ->
+        ?LOG_INFO("insecure_rewrite_rule ~p blocked", [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
+        throw({insecure_rewrite_rule, "too many ../.. segments"})
+    end;
+path_to_list([<<"..">>|R], Acc, DotDotCount) ->
+    path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
+path_to_list([P|R], Acc, DotDotCount) ->
+    P1 = case P of
+        <<":", Var/binary>> ->
+            to_binding(Var);
+        _ -> P
+    end,
+    path_to_list(R, [P1|Acc], DotDotCount).
+
+maybe_encode_bindings([]) ->
+    [];
+maybe_encode_bindings(Props) -> 
+    lists:foldl(fun 
+            ({{bind, <<"*">>}, _V}, Acc) ->
+                Acc;
+            ({{bind, K}, V}, Acc) ->
+                V1 = iolist_to_binary(maybe_json(K, V)),
+                [{K, V1}|Acc]
+        end, [], Props).
+                
+decode_query_value({K,V}) ->
+    case lists:member(K, ["key", "startkey", "start_key",
+                "endkey", "end_key", "keys"]) of
+        true ->
+            {to_binding(K), ?JSON_DECODE(V)};
+        false ->
+            {to_binding(K), ?l2b(V)}
+    end.
+
+to_binding({bind, V}) ->
+    {bind, V};
+to_binding(V) when is_list(V) ->
+    to_binding(?l2b(V));
+to_binding(V) ->
+    {bind, V}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_httpd_stats_handlers.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_stats_handlers.erl b/src/couch_httpd_stats_handlers.erl
new file mode 100644
index 0000000..d6973f6
--- /dev/null
+++ b/src/couch_httpd_stats_handlers.erl
@@ -0,0 +1,56 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_stats_handlers).
+-include("couch_db.hrl").
+
+-export([handle_stats_req/1]).
+-import(couch_httpd, [
+    send_json/2, send_json/3, send_json/4, send_method_not_allowed/2,
+    start_json_response/2, send_chunk/2, end_json_response/1,
+    start_chunked_response/3, send_error/4
+]).
+
+handle_stats_req(#httpd{method='GET', path_parts=[_]}=Req) ->
+    flush(Req),
+    send_json(Req, couch_stats_aggregator:all(range(Req)));
+
+handle_stats_req(#httpd{method='GET', path_parts=[_, _Mod]}) ->
+    throw({bad_request, <<"Stat names must have exactly two parts.">>});
+
+handle_stats_req(#httpd{method='GET', path_parts=[_, Mod, Key]}=Req) ->
+    flush(Req),
+    Stats = couch_stats_aggregator:get_json({list_to_atom(binary_to_list(Mod)),
+        list_to_atom(binary_to_list(Key))}, range(Req)),
+    send_json(Req, {[{Mod, {[{Key, Stats}]}}]});
+
+handle_stats_req(#httpd{method='GET', path_parts=[_, _Mod, _Key | _Extra]}) ->
+    throw({bad_request, <<"Stat names must have exactly two parts.">>});
+
+handle_stats_req(Req) ->
+    send_method_not_allowed(Req, "GET").
+
+range(Req) ->
+    case couch_util:get_value("range", couch_httpd:qs(Req)) of
+        undefined ->
+            0;
+        Value ->
+            list_to_integer(Value)
+    end.
+
+flush(Req) ->
+    case couch_util:get_value("flush", couch_httpd:qs(Req)) of
+        "true" ->
+            couch_stats_aggregator:collect_sample();
+        _Else ->
+            ok
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_httpd_vhost.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_vhost.erl b/src/couch_httpd_vhost.erl
new file mode 100644
index 0000000..4c3ebfe
--- /dev/null
+++ b/src/couch_httpd_vhost.erl
@@ -0,0 +1,383 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_vhost).
+-behaviour(gen_server).
+
+-export([start_link/0, config_change/2, reload/0, get_state/0, dispatch_host/1]).
+-export([urlsplit_netloc/2, redirect_to_vhost/2]).
+-export([host/1, split_host_port/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
+
+-include("couch_db.hrl").
+
+-define(SEPARATOR, $\/).
+-define(MATCH_ALL, {bind, '*'}).
+
+-record(vhosts_state, {
+        vhosts,
+        vhost_globals,
+        vhosts_fun}).
+
+%% doc the vhost manager.
+%% This gen_server keep state of vhosts added to the ini and try to
+%% match the Host header (or forwarded) against rules built against
+%% vhost list.
+%%
+%% Declaration of vhosts take place in the configuration file :
+%%
+%% [vhosts]
+%% example.com = /example
+%% *.example.com = /example
+%%
+%% The first line will rewrite the rquest to display the content of the
+%% example database. This rule works only if the Host header is
+%% 'example.com' and won't work for CNAMEs. Second rule on the other hand
+%% match all CNAMES to example db. So www.example.com or db.example.com
+%% will work.
+%%
+%% The wildcard ('*') should always be the last in the cnames:
+%%
+%%      "*.db.example.com = /"  will match all cname on top of db
+%% examples to the root of the machine.
+%%
+%%
+%% Rewriting Hosts to path
+%% -----------------------
+%%
+%% Like in the _rewrite handler you could match some variable and use
+%them to create the target path. Some examples:
+%%
+%%    [vhosts]
+%%    *.example.com = /*
+%%    :dbname.example.com = /:dbname
+%%    :ddocname.:dbname.example.com = /:dbname/_design/:ddocname/_rewrite
+%%
+%% First rule pass wildcard as dbname, second do the same but use a
+%% variable name and the third one allows you to use any app with
+%% @ddocname in any db with @dbname .
+%%
+%% You could also change the default function to handle request by
+%% changing the setting `redirect_vhost_handler` in `httpd` section of
+%% the Ini:
+%%
+%%    [httpd]
+%%    redirect_vhost_handler = {Module, Fun}
+%%
+%% The function take 2 args : the mochiweb request object and the target
+%%% path.
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+%% @doc reload vhosts rules
+reload() ->
+    gen_server:call(?MODULE, reload).
+
+get_state() ->
+    gen_server:call(?MODULE, get_state).
+
+%% @doc Try to find a rule matching current Host heade. some rule is
+%% found it rewrite the Mochiweb Request else it return current Request.
+dispatch_host(MochiReq) ->
+    #vhosts_state{
+        vhost_globals = VHostGlobals,
+        vhosts = VHosts,
+        vhosts_fun=Fun} = get_state(),
+
+    {"/" ++ VPath, Query, Fragment} = mochiweb_util:urlsplit_path(MochiReq:get(raw_path)),
+    VPathParts =  string:tokens(VPath, "/"),
+
+    VHost = host(MochiReq),
+    {VHostParts, VhostPort} = split_host_port(VHost),
+    FinalMochiReq = case try_bind_vhost(VHosts, lists:reverse(VHostParts),
+            VhostPort, VPathParts) of
+        no_vhost_matched -> MochiReq;
+        {VhostTarget, NewPath} ->
+            case vhost_global(VHostGlobals, MochiReq) of
+                true ->
+                    MochiReq;
+                _Else ->
+                    NewPath1 = mochiweb_util:urlunsplit_path({NewPath, Query,
+                                          Fragment}),
+                    MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
+                                      MochiReq:get(method),
+                                      NewPath1,
+                                      MochiReq:get(version),
+                                      MochiReq:get(headers)),
+                    Fun(MochiReq1, VhostTarget)
+            end
+    end,
+    FinalMochiReq.
+
+append_path("/"=_Target, "/"=_Path) ->
+    "/";
+append_path(Target, Path) ->
+    Target ++ Path.
+
+% default redirect vhost handler
+redirect_to_vhost(MochiReq, VhostTarget) ->
+    Path = MochiReq:get(raw_path),
+    Target = append_path(VhostTarget, Path),
+
+    ?LOG_DEBUG("Vhost Target: '~p'~n", [Target]),
+
+    Headers = mochiweb_headers:enter("x-couchdb-vhost-path", Path,
+        MochiReq:get(headers)),
+
+    % build a new mochiweb request
+    MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
+                                      MochiReq:get(method),
+                                      Target,
+                                      MochiReq:get(version),
+                                      Headers),
+    % cleanup, It force mochiweb to reparse raw uri.
+    MochiReq1:cleanup(),
+    MochiReq1.
+
+%% if so, then it will not be rewritten, but will run as a normal couchdb request.
+%* normally you'd use this for _uuids _utils and a few of the others you want to
+%% keep available on vhosts. You can also use it to make databases 'global'.
+vhost_global( VhostGlobals, MochiReq) ->
+    RawUri = MochiReq:get(raw_path),
+    {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
+
+    Front = case couch_httpd:partition(Path) of
+    {"", "", ""} ->
+        "/"; % Special case the root url handler
+    {FirstPart, _, _} ->
+        FirstPart
+    end,
+    [true] == [true||V <- VhostGlobals, V == Front].
+
+%% bind host
+%% first it try to bind the port then the hostname.
+try_bind_vhost([], _HostParts, _Port, _PathParts) ->
+    no_vhost_matched;
+try_bind_vhost([VhostSpec|Rest], HostParts, Port, PathParts) ->
+    {{VHostParts, VPort, VPath}, Path} = VhostSpec,
+    case bind_port(VPort, Port) of
+        ok ->
+            case bind_vhost(lists:reverse(VHostParts), HostParts, []) of
+                {ok, Bindings, Remainings} ->
+                    case bind_path(VPath, PathParts) of
+                        {ok, PathParts1} ->
+                            Path1 = make_target(Path, Bindings, Remainings, []),
+                            {make_path(Path1), make_path(PathParts1)};
+                        fail ->
+                            try_bind_vhost(Rest, HostParts, Port,
+                                PathParts)
+                    end;
+                fail -> try_bind_vhost(Rest, HostParts, Port, PathParts)
+            end;
+        fail ->  try_bind_vhost(Rest, HostParts, Port, PathParts)
+    end.
+
+%% doc: build new patch from bindings. bindings are query args
+%% (+ dynamic query rewritten if needed) and bindings found in
+%% bind_path step.
+%% TODO: merge code with rewrite. But we need to make sure we are
+%% in string here.
+make_target([], _Bindings, _Remaining, Acc) ->
+    lists:reverse(Acc);
+make_target([?MATCH_ALL], _Bindings, Remaining, Acc) ->
+    Acc1 = lists:reverse(Acc) ++ Remaining,
+    Acc1;
+make_target([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
+    Acc1 = lists:reverse(Acc) ++ Remaining,
+    Acc1;
+make_target([{bind, P}|Rest], Bindings, Remaining, Acc) ->
+    P2 = case couch_util:get_value({bind, P}, Bindings) of
+        undefined ->  "undefined";
+        P1 -> P1
+    end,
+    make_target(Rest, Bindings, Remaining, [P2|Acc]);
+make_target([P|Rest], Bindings, Remaining, Acc) ->
+    make_target(Rest, Bindings, Remaining, [P|Acc]).
+
+%% bind port
+bind_port(Port, Port) -> ok;
+bind_port('*', _) -> ok;
+bind_port(_,_) -> fail.
+
+%% bind bhost
+bind_vhost([],[], Bindings) -> {ok, Bindings, []};
+bind_vhost([?MATCH_ALL], [], _Bindings) -> fail;
+bind_vhost([?MATCH_ALL], Rest, Bindings) -> {ok, Bindings, Rest};
+bind_vhost([], _HostParts, _Bindings) -> fail;
+bind_vhost([{bind, Token}|Rest], [Match|RestHost], Bindings) ->
+    bind_vhost(Rest, RestHost, [{{bind, Token}, Match}|Bindings]);
+bind_vhost([Cname|Rest], [Cname|RestHost], Bindings) ->
+    bind_vhost(Rest, RestHost, Bindings);
+bind_vhost(_, _, _) -> fail.
+
+%% bind path
+bind_path([], PathParts) ->
+    {ok, PathParts};
+bind_path(_VPathParts, []) ->
+    fail;
+bind_path([Path|VRest],[Path|Rest]) ->
+   bind_path(VRest, Rest);
+bind_path(_, _) ->
+    fail.
+
+% utilities
+
+
+%% create vhost list from ini
+
+host(MochiReq) ->
+    XHost = couch_config:get("httpd", "x_forwarded_host",
+                             "X-Forwarded-Host"),
+    case MochiReq:get_header_value(XHost) of
+        undefined ->
+            case MochiReq:get_header_value("Host") of
+                undefined -> [];
+                Value1 -> Value1
+            end;
+        Value -> Value
+    end.
+
+make_vhosts() ->
+    Vhosts = lists:foldl(fun
+                ({_, ""}, Acc) ->
+                    Acc;
+                ({Vhost, Path}, Acc) ->
+                    [{parse_vhost(Vhost), split_path(Path)}|Acc]
+            end, [], couch_config:get("vhosts")),
+
+    lists:reverse(lists:usort(Vhosts)).
+
+
+parse_vhost(Vhost) ->
+    case urlsplit_netloc(Vhost, []) of
+        {[], Path} ->
+            {make_spec("*", []), '*', Path};
+        {HostPort, []} ->
+            {H, P} = split_host_port(HostPort),
+            H1 = make_spec(H, []),
+            {H1, P, []};
+        {HostPort, Path} ->
+            {H, P} = split_host_port(HostPort),
+            H1 = make_spec(H, []),
+            {H1, P, string:tokens(Path, "/")}
+    end.
+
+
+split_host_port(HostAsString) ->
+    case string:rchr(HostAsString, $:) of
+        0 ->
+            {split_host(HostAsString), '*'};
+        N ->
+            HostPart = string:substr(HostAsString, 1, N-1),
+            case (catch erlang:list_to_integer(string:substr(HostAsString,
+                            N+1, length(HostAsString)))) of
+                {'EXIT', _} ->
+                    {split_host(HostAsString), '*'};
+                Port ->
+                    {split_host(HostPart), Port}
+            end
+    end.
+
+split_host(HostAsString) ->
+    string:tokens(HostAsString, "\.").
+
+split_path(Path) ->
+    make_spec(string:tokens(Path, "/"), []).
+
+
+make_spec([], Acc) ->
+    lists:reverse(Acc);
+make_spec([""|R], Acc) ->
+    make_spec(R, Acc);
+make_spec(["*"|R], Acc) ->
+    make_spec(R, [?MATCH_ALL|Acc]);
+make_spec([P|R], Acc) ->
+    P1 = parse_var(P),
+    make_spec(R, [P1|Acc]).
+
+
+parse_var(P) ->
+    case P of
+        ":" ++ Var ->
+            {bind, Var};
+        _ -> P
+    end.
+
+
+% mochiweb doesn't export it.
+urlsplit_netloc("", Acc) ->
+    {lists:reverse(Acc), ""};
+urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
+    {lists:reverse(Acc), Rest};
+urlsplit_netloc([C | Rest], Acc) ->
+    urlsplit_netloc(Rest, [C | Acc]).
+
+make_path(Parts) ->
+     "/" ++ string:join(Parts,[?SEPARATOR]).
+
+init(_) ->
+    ok = couch_config:register(fun ?MODULE:config_change/2),
+
+    %% load configuration
+    {VHostGlobals, VHosts, Fun} = load_conf(),
+    State = #vhosts_state{
+        vhost_globals=VHostGlobals,
+        vhosts=VHosts,
+        vhosts_fun=Fun},
+    {ok, State}.
+
+handle_call(reload, _From, _State) ->
+    {VHostGlobals, VHosts, Fun} = load_conf(),
+    {reply, ok, #vhosts_state{
+            vhost_globals=VHostGlobals,
+            vhosts=VHosts,
+            vhosts_fun=Fun}};
+handle_call(get_state, _From, State) ->
+    {reply, State, State};
+handle_call(_Msg, _From, State) ->
+    {noreply, State}.
+
+handle_cast(_Msg, State) ->
+    {noreply, State}.
+
+handle_info(_Info, State) ->
+    {noreply, State}.
+
+terminate(_Reason, _State) ->
+    ok.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+config_change("httpd", "vhost_global_handlers") ->
+    ?MODULE:reload();
+config_change("httpd", "redirect_vhost_handler") ->
+    ?MODULE:reload();
+config_change("vhosts", _) ->
+    ?MODULE:reload().
+
+load_conf() ->
+    %% get vhost globals
+    VHostGlobals = re:split(couch_config:get("httpd",
+            "vhost_global_handlers",""), "\\s*,\\s*",[{return, list}]),
+
+    %% build vhosts matching rules
+    VHosts = make_vhosts(),
+
+    %% build vhosts handler fun
+    DefaultVHostFun = "{couch_httpd_vhost, redirect_to_vhost}",
+    Fun = couch_httpd:make_arity_2_fun(couch_config:get("httpd",
+            "redirect_vhost_handler", DefaultVHostFun)),
+
+    {VHostGlobals, VHosts, Fun}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_js_functions.hrl
----------------------------------------------------------------------
diff --git a/src/couch_js_functions.hrl b/src/couch_js_functions.hrl
new file mode 100644
index 0000000..a48feae
--- /dev/null
+++ b/src/couch_js_functions.hrl
@@ -0,0 +1,170 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License.  You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-define(AUTH_DB_DOC_VALIDATE_FUNCTION, <<"
+    function(newDoc, oldDoc, userCtx, secObj) {
+        if (newDoc._deleted === true) {
+            // allow deletes by admins and matching users
+            // without checking the other fields
+            if ((userCtx.roles.indexOf('_admin') !== -1) ||
+                (userCtx.name == oldDoc.name)) {
+                return;
+            } else {
+                throw({forbidden: 'Only admins may delete other user docs.'});
+            }
+        }
+
+        if ((oldDoc && oldDoc.type !== 'user') || newDoc.type !== 'user') {
+            throw({forbidden : 'doc.type must be user'});
+        } // we only allow user docs for now
+
+        if (!newDoc.name) {
+            throw({forbidden: 'doc.name is required'});
+        }
+
+        if (!newDoc.roles) {
+            throw({forbidden: 'doc.roles must exist'});
+        }
+
+        if (!isArray(newDoc.roles)) {
+            throw({forbidden: 'doc.roles must be an array'});
+        }
+
+        for (var idx = 0; idx < newDoc.roles.length; idx++) {
+            if (typeof newDoc.roles[idx] !== 'string') {
+                throw({forbidden: 'doc.roles can only contain strings'});
+            }
+        }
+
+        if (newDoc._id !== ('org.couchdb.user:' + newDoc.name)) {
+            throw({
+                forbidden: 'Doc ID must be of the form org.couchdb.user:name'
+            });
+        }
+
+        if (oldDoc) { // validate all updates
+            if (oldDoc.name !== newDoc.name) {
+                throw({forbidden: 'Usernames can not be changed.'});
+            }
+        }
+
+        if (newDoc.password_sha && !newDoc.salt) {
+            throw({
+                forbidden: 'Users with password_sha must have a salt.' +
+                    'See /_utils/script/couch.js for example code.'
+            });
+        }
+
+        if (newDoc.password_scheme === \"pbkdf2\") {
+            if (typeof(newDoc.iterations) !== \"number\") {
+               throw({forbidden: \"iterations must be a number.\"});
+            }
+            if (typeof(newDoc.derived_key) !== \"string\") {
+               throw({forbidden: \"derived_key must be a string.\"});
+            }
+        }
+
+        var is_server_or_database_admin = function(userCtx, secObj) {
+            // see if the user is a server admin
+            if(userCtx.roles.indexOf('_admin') !== -1) {
+                return true; // a server admin
+            }
+
+            // see if the user a database admin specified by name
+            if(secObj && secObj.admins && secObj.admins.names) {
+                if(secObj.admins.names.indexOf(userCtx.name) !== -1) {
+                    return true; // database admin
+                }
+            }
+
+            // see if the user a database admin specified by role
+            if(secObj && secObj.admins && secObj.admins.roles) {
+                var db_roles = secObj.admins.roles;
+                for(var idx = 0; idx < userCtx.roles.length; idx++) {
+                    var user_role = userCtx.roles[idx];
+                    if(db_roles.indexOf(user_role) !== -1) {
+                        return true; // role matches!
+                    }
+                }
+            }
+
+            return false; // default to no admin
+        }
+
+        if (!is_server_or_database_admin(userCtx, secObj)) {
+            if (oldDoc) { // validate non-admin updates
+                if (userCtx.name !== newDoc.name) {
+                    throw({
+                        forbidden: 'You may only update your own user document.'
+                    });
+                }
+                // validate role updates
+                var oldRoles = oldDoc.roles.sort();
+                var newRoles = newDoc.roles.sort();
+
+                if (oldRoles.length !== newRoles.length) {
+                    throw({forbidden: 'Only _admin may edit roles'});
+                }
+
+                for (var i = 0; i < oldRoles.length; i++) {
+                    if (oldRoles[i] !== newRoles[i]) {
+                        throw({forbidden: 'Only _admin may edit roles'});
+                    }
+                }
+            } else if (newDoc.roles.length > 0) {
+                throw({forbidden: 'Only _admin may set roles'});
+            }
+        }
+
+        // no system roles in users db
+        for (var i = 0; i < newDoc.roles.length; i++) {
+            if (newDoc.roles[i][0] === '_') {
+                throw({
+                    forbidden:
+                    'No system roles (starting with underscore) in users db.'
+                });
+            }
+        }
+
+        // no system names as names
+        if (newDoc.name[0] === '_') {
+            throw({forbidden: 'Username may not start with underscore.'});
+        }
+
+        var badUserNameChars = [':'];
+
+        for (var i = 0; i < badUserNameChars.length; i++) {
+            if (newDoc.name.indexOf(badUserNameChars[i]) >= 0) {
+                throw({forbidden: 'Character `' + badUserNameChars[i] +
+                        '` is not allowed in usernames.'});
+            }
+        }
+    }
+">>).
+
+
+-define(OAUTH_MAP_FUN, <<"
+    function(doc) {
+        if (doc.type === 'user' && doc.oauth && doc.oauth.consumer_keys) {
+            for (var consumer_key in doc.oauth.consumer_keys) {
+                for (var token in doc.oauth.tokens) {
+                    var obj = {
+                        'consumer_secret': doc.oauth.consumer_keys[consumer_key],
+                        'token_secret': doc.oauth.tokens[token],
+                        'username': doc.name
+                    };
+                    emit([consumer_key, token], obj);
+                }
+            }
+        }
+    }
+">>).

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_key_tree.erl
----------------------------------------------------------------------
diff --git a/src/couch_key_tree.erl b/src/couch_key_tree.erl
new file mode 100644
index 0000000..ce45ab8
--- /dev/null
+++ b/src/couch_key_tree.erl
@@ -0,0 +1,422 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%% @doc Data structure used to represent document edit histories.
+
+%% A key tree is used to represent the edit history of a document. Each node of
+%% the tree represents a particular version. Relations between nodes represent
+%% the order that these edits were applied. For instance, a set of three edits
+%% would produce a tree of versions A->B->C indicating that edit C was based on
+%% version B which was in turn based on A. In a world without replication (and
+%% no ability to disable MVCC checks), all histories would be forced to be
+%% linear lists of edits due to constraints imposed by MVCC (ie, new edits must
+%% be based on the current version). However, we have replication, so we must
+%% deal with not so easy cases, which lead to trees.
+%%
+%% Consider a document in state A. This doc is replicated to a second node. We
+%% then edit the document on each node leaving it in two different states, B
+%% and C. We now have two key trees, A->B and A->C. When we go to replicate a
+%% second time, the key tree must combine these two trees which gives us
+%% A->(B|C). This is how conflicts are introduced. In terms of the key tree, we
+%% say that we have two leaves (B and C) that are not deleted. The presense of
+%% the multiple leaves indicate conflict. To remove a conflict, one of the
+%% edits (B or C) can be deleted, which results in, A->(B|C->D) where D is an
+%% edit that is specially marked with the a deleted=true flag.
+%%
+%% What makes this a bit more complicated is that there is a limit to the
+%% number of revisions kept, specified in couch_db.hrl (default is 1000). When
+%% this limit is exceeded only the last 1000 are kept. This comes in to play
+%% when branches are merged. The comparison has to begin at the same place in
+%% the branches. A revision id is of the form N-XXXXXXX where N is the current
+%% revision. So each path will have a start number, calculated in
+%% couch_doc:to_path using the formula N - length(RevIds) + 1 So, .eg. if a doc
+%% was edit 1003 times this start number would be 4, indicating that 3
+%% revisions were truncated.
+%%
+%% This comes into play in @see merge_at/3 which recursively walks down one
+%% tree or the other until they begin at the same revision.
+
+-module(couch_key_tree).
+
+-export([merge/3, find_missing/2, get_key_leafs/2, get_full_key_paths/2, get/2]).
+-export([get_all_leafs/1, count_leafs/1, remove_leafs/2, get_all_leafs_full/1, stem/2]).
+-export([map/2, mapfold/3, map_leafs/2, fold/3]).
+
+-include("couch_db.hrl").
+
+%% @doc Merge a path with a list of paths and stem to the given length.
+-spec merge([path()], path(), pos_integer()) -> {[path()],
+    conflicts | no_conflicts}.
+merge(Paths, Path, Depth) ->
+    {Merged, Conflicts} = merge(Paths, Path),
+    {stem(Merged, Depth), Conflicts}.
+
+%% @doc Merge a path with an existing list of paths, returning a new list of
+%% paths. A return of conflicts indicates a new conflict was discovered in this
+%% merge. Conflicts may already exist in the original list of paths.
+-spec merge([path()], path()) -> {[path()], conflicts | no_conflicts}.
+merge(Paths, Path) ->
+    {ok, Merged, HasConflicts} = merge_one(Paths, Path, [], false),
+    if HasConflicts ->
+        Conflicts = conflicts;
+    (length(Merged) =/= length(Paths)) and (length(Merged) =/= 1) ->
+        Conflicts = conflicts;
+    true ->
+        Conflicts = no_conflicts
+    end,
+    {lists:sort(Merged), Conflicts}.
+
+-spec merge_one(Original::[path()], Inserted::path(), [path()], boolean()) ->
+    {ok, Merged::[path()], NewConflicts::boolean()}.
+merge_one([], Insert, OutAcc, ConflictsAcc) ->
+    {ok, [Insert | OutAcc], ConflictsAcc};
+merge_one([{Start, Tree}|Rest], {StartInsert, TreeInsert}, Acc, HasConflicts) ->
+    case merge_at([Tree], StartInsert - Start, [TreeInsert]) of
+    {ok, [Merged], Conflicts} ->
+        MergedStart = lists:min([Start, StartInsert]),
+        {ok, Rest ++ [{MergedStart, Merged} | Acc], Conflicts or HasConflicts};
+    no ->
+        AccOut = [{Start, Tree} | Acc],
+        merge_one(Rest, {StartInsert, TreeInsert}, AccOut, HasConflicts)
+    end.
+
+-spec merge_at(tree(), Place::integer(), tree()) ->
+    {ok, Merged::tree(), HasConflicts::boolean()} | no.
+merge_at(_Ours, _Place, []) ->
+    no;
+merge_at([], _Place, _Insert) ->
+    no;
+merge_at([{Key, Value, SubTree}|Sibs], Place, InsertTree) when Place > 0 ->
+    % inserted starts later than committed, need to drill into committed subtree
+    case merge_at(SubTree, Place - 1, InsertTree) of
+    {ok, Merged, Conflicts} ->
+        {ok, [{Key, Value, Merged} | Sibs], Conflicts};
+    no ->
+        % first branch didn't merge, move to next branch
+        case merge_at(Sibs, Place, InsertTree) of
+        {ok, Merged, Conflicts} ->
+            {ok, [{Key, Value, SubTree} | Merged], Conflicts};
+        no ->
+            no
+        end
+    end;
+merge_at(OurTree, Place, [{Key, Value, SubTree}]) when Place < 0 ->
+    % inserted starts earlier than committed, need to drill into insert subtree
+    case merge_at(OurTree, Place + 1, SubTree) of
+    {ok, Merged, Conflicts} ->
+        {ok, [{Key, Value, Merged}], Conflicts};
+    no ->
+        no
+    end;
+merge_at([{Key, V1, SubTree}|Sibs], 0, [{Key, V2, InsertSubTree}]) ->
+    {Merged, Conflicts} = merge_simple(SubTree, InsertSubTree),
+    {ok, [{Key, value_pref(V1, V2), Merged} | Sibs], Conflicts};
+merge_at([{OurKey, _, _} | _], 0, [{Key, _, _}]) when OurKey > Key ->
+    % siblings keys are ordered, no point in continuing
+    no;
+merge_at([Tree | Sibs], 0, InsertTree) ->
+    case merge_at(Sibs, 0, InsertTree) of
+    {ok, Merged, Conflicts} ->
+        {ok, [Tree | Merged], Conflicts};
+    no ->
+        no
+    end.
+
+% key tree functions
+
+-spec merge_simple(tree(), tree()) -> {Merged::tree(), NewConflicts::boolean()}.
+merge_simple([], B) ->
+    {B, false};
+merge_simple(A, []) ->
+    {A, false};
+merge_simple([{Key, V1, SubA} | NextA], [{Key, V2, SubB} | NextB]) ->
+    {MergedSubTree, Conflict1} = merge_simple(SubA, SubB),
+    {MergedNextTree, Conflict2} = merge_simple(NextA, NextB),
+    Value = value_pref(V1, V2),
+    {[{Key, Value, MergedSubTree} | MergedNextTree], Conflict1 or Conflict2};
+merge_simple([{A, _, _} = Tree | Next], [{B, _, _} | _] = Insert) when A < B ->
+    {Merged, Conflict} = merge_simple(Next, Insert),
+    % if Merged has more branches than the input we added a new conflict
+    {[Tree | Merged], Conflict orelse (length(Merged) > length(Next))};
+merge_simple(Ours, [Tree | Next]) ->
+    {Merged, Conflict} = merge_simple(Ours, Next),
+    {[Tree | Merged], Conflict orelse (length(Merged) > length(Next))}.
+
+find_missing(_Tree, []) ->
+    [];
+find_missing([], SeachKeys) ->
+    SeachKeys;
+find_missing([{Start, {Key, Value, SubTree}} | RestTree], SeachKeys) ->
+    PossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos >= Start],
+    ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Start],
+    Missing = find_missing_simple(Start, [{Key, Value, SubTree}], PossibleKeys),
+    find_missing(RestTree, ImpossibleKeys ++ Missing).
+
+find_missing_simple(_Pos, _Tree, []) ->
+    [];
+find_missing_simple(_Pos, [], SeachKeys) ->
+    SeachKeys;
+find_missing_simple(Pos, [{Key, _, SubTree} | RestTree], SeachKeys) ->
+    PossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos >= Pos],
+    ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Pos],
+
+    SrcKeys2 = PossibleKeys -- [{Pos, Key}],
+    SrcKeys3 = find_missing_simple(Pos + 1, SubTree, SrcKeys2),
+    ImpossibleKeys ++ find_missing_simple(Pos, RestTree, SrcKeys3).
+
+
+filter_leafs([], _Keys, FilteredAcc, RemovedKeysAcc) ->
+    {FilteredAcc, RemovedKeysAcc};
+filter_leafs([{Pos, [{LeafKey, _}|_]} = Path |Rest], Keys, FilteredAcc, RemovedKeysAcc) ->
+    FilteredKeys = lists:delete({Pos, LeafKey}, Keys),
+    if FilteredKeys == Keys ->
+        % this leaf is not a key we are looking to remove
+        filter_leafs(Rest, Keys, [Path | FilteredAcc], RemovedKeysAcc);
+    true ->
+        % this did match a key, remove both the node and the input key
+        filter_leafs(Rest, FilteredKeys, FilteredAcc, [{Pos, LeafKey} | RemovedKeysAcc])
+    end.
+
+% Removes any branches from the tree whose leaf node(s) are in the Keys
+remove_leafs(Trees, Keys) ->
+    % flatten each branch in a tree into a tree path
+    Paths = get_all_leafs_full(Trees),
+
+    % filter out any that are in the keys list.
+    {FilteredPaths, RemovedKeys} = filter_leafs(Paths, Keys, [], []),
+
+    SortedPaths = lists:sort(
+        [{Pos + 1 - length(Path), Path} || {Pos, Path} <- FilteredPaths]
+    ),
+
+    % convert paths back to trees
+    NewTree = lists:foldl(
+        fun({StartPos, Path},TreeAcc) ->
+            [SingleTree] = lists:foldl(
+                fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path),
+            {NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}),
+            NewTrees
+        end, [], SortedPaths),
+    {NewTree, RemovedKeys}.
+
+
+% get the leafs in the tree matching the keys. The matching key nodes can be
+% leafs or an inner nodes. If an inner node, then the leafs for that node
+% are returned.
+get_key_leafs(Tree, Keys) ->
+    get_key_leafs(Tree, Keys, []).
+
+get_key_leafs(_, [], Acc) ->
+    {Acc, []};
+get_key_leafs([], Keys, Acc) ->
+    {Acc, Keys};
+get_key_leafs([{Pos, Tree}|Rest], Keys, Acc) ->
+    {Gotten, RemainingKeys} = get_key_leafs_simple(Pos, [Tree], Keys, []),
+    get_key_leafs(Rest, RemainingKeys, Gotten ++ Acc).
+
+get_key_leafs_simple(_Pos, _Tree, [], _KeyPathAcc) ->
+    {[], []};
+get_key_leafs_simple(_Pos, [], KeysToGet, _KeyPathAcc) ->
+    {[], KeysToGet};
+get_key_leafs_simple(Pos, [{Key, _Value, SubTree}=Tree | RestTree], KeysToGet, KeyPathAcc) ->
+    case lists:delete({Pos, Key}, KeysToGet) of
+    KeysToGet -> % same list, key not found
+        {LeafsFound, KeysToGet2} = get_key_leafs_simple(Pos + 1, SubTree, KeysToGet, [Key | KeyPathAcc]),
+        {RestLeafsFound, KeysRemaining} = get_key_leafs_simple(Pos, RestTree, KeysToGet2, KeyPathAcc),
+        {LeafsFound ++ RestLeafsFound, KeysRemaining};
+    KeysToGet2 ->
+        LeafsFound = get_all_leafs_simple(Pos, [Tree], KeyPathAcc),
+        LeafKeysFound = [{LeafPos, LeafRev} || {_, {LeafPos, [LeafRev|_]}}
+            <- LeafsFound],
+        KeysToGet3 = KeysToGet2 -- LeafKeysFound,
+        {RestLeafsFound, KeysRemaining} = get_key_leafs_simple(Pos, RestTree, KeysToGet3, KeyPathAcc),
+        {LeafsFound ++ RestLeafsFound, KeysRemaining}
+    end.
+
+get(Tree, KeysToGet) ->
+    {KeyPaths, KeysNotFound} = get_full_key_paths(Tree, KeysToGet),
+    FixedResults = [ {Value, {Pos, [Key0 || {Key0, _} <- Path]}} || {Pos, [{_Key, Value}|_]=Path} <- KeyPaths],
+    {FixedResults, KeysNotFound}.
+
+get_full_key_paths(Tree, Keys) ->
+    get_full_key_paths(Tree, Keys, []).
+
+get_full_key_paths(_, [], Acc) ->
+    {Acc, []};
+get_full_key_paths([], Keys, Acc) ->
+    {Acc, Keys};
+get_full_key_paths([{Pos, Tree}|Rest], Keys, Acc) ->
+    {Gotten, RemainingKeys} = get_full_key_paths(Pos, [Tree], Keys, []),
+    get_full_key_paths(Rest, RemainingKeys, Gotten ++ Acc).
+
+
+get_full_key_paths(_Pos, _Tree, [], _KeyPathAcc) ->
+    {[], []};
+get_full_key_paths(_Pos, [], KeysToGet, _KeyPathAcc) ->
+    {[], KeysToGet};
+get_full_key_paths(Pos, [{KeyId, Value, SubTree} | RestTree], KeysToGet, KeyPathAcc) ->
+    KeysToGet2 = KeysToGet -- [{Pos, KeyId}],
+    CurrentNodeResult =
+    case length(KeysToGet2) =:= length(KeysToGet) of
+    true -> % not in the key list.
+        [];
+    false -> % this node is the key list. return it
+        [{Pos, [{KeyId, Value} | KeyPathAcc]}]
+    end,
+    {KeysGotten, KeysRemaining} = get_full_key_paths(Pos + 1, SubTree, KeysToGet2, [{KeyId, Value} | KeyPathAcc]),
+    {KeysGotten2, KeysRemaining2} = get_full_key_paths(Pos, RestTree, KeysRemaining, KeyPathAcc),
+    {CurrentNodeResult ++ KeysGotten ++ KeysGotten2, KeysRemaining2}.
+
+get_all_leafs_full(Tree) ->
+    get_all_leafs_full(Tree, []).
+
+get_all_leafs_full([], Acc) ->
+    Acc;
+get_all_leafs_full([{Pos, Tree} | Rest], Acc) ->
+    get_all_leafs_full(Rest, get_all_leafs_full_simple(Pos, [Tree], []) ++ Acc).
+
+get_all_leafs_full_simple(_Pos, [], _KeyPathAcc) ->
+    [];
+get_all_leafs_full_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
+    [{Pos, [{KeyId, Value} | KeyPathAcc]} | get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc)];
+get_all_leafs_full_simple(Pos, [{KeyId, Value, SubTree} | RestTree], KeyPathAcc) ->
+    get_all_leafs_full_simple(Pos + 1, SubTree, [{KeyId, Value} | KeyPathAcc]) ++ get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc).
+
+get_all_leafs(Trees) ->
+    get_all_leafs(Trees, []).
+
+get_all_leafs([], Acc) ->
+    Acc;
+get_all_leafs([{Pos, Tree}|Rest], Acc) ->
+    get_all_leafs(Rest, get_all_leafs_simple(Pos, [Tree], []) ++ Acc).
+
+get_all_leafs_simple(_Pos, [], _KeyPathAcc) ->
+    [];
+get_all_leafs_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
+    [{Value, {Pos, [KeyId | KeyPathAcc]}} | get_all_leafs_simple(Pos, RestTree, KeyPathAcc)];
+get_all_leafs_simple(Pos, [{KeyId, _Value, SubTree} | RestTree], KeyPathAcc) ->
+    get_all_leafs_simple(Pos + 1, SubTree, [KeyId | KeyPathAcc]) ++ get_all_leafs_simple(Pos, RestTree, KeyPathAcc).
+
+
+count_leafs([]) ->
+    0;
+count_leafs([{_Pos,Tree}|Rest]) ->
+    count_leafs_simple([Tree]) + count_leafs(Rest).
+
+count_leafs_simple([]) ->
+    0;
+count_leafs_simple([{_Key, _Value, []} | RestTree]) ->
+    1 + count_leafs_simple(RestTree);
+count_leafs_simple([{_Key, _Value, SubTree} | RestTree]) ->
+    count_leafs_simple(SubTree) + count_leafs_simple(RestTree).
+
+
+fold(_Fun, Acc, []) ->
+    Acc;
+fold(Fun, Acc0, [{Pos, Tree}|Rest]) ->
+    Acc1 = fold_simple(Fun, Acc0, Pos, [Tree]),
+    fold(Fun, Acc1, Rest).
+
+fold_simple(_Fun, Acc, _Pos, []) ->
+    Acc;
+fold_simple(Fun, Acc0, Pos, [{Key, Value, SubTree} | RestTree]) ->
+    Type = if SubTree == [] -> leaf; true -> branch end,
+    Acc1 = Fun({Pos, Key}, Value, Type, Acc0),
+    Acc2 = fold_simple(Fun, Acc1, Pos+1, SubTree),
+    fold_simple(Fun, Acc2, Pos, RestTree).
+
+
+map(_Fun, []) ->
+    [];
+map(Fun, [{Pos, Tree}|Rest]) ->
+    case erlang:fun_info(Fun, arity) of
+    {arity, 2} ->
+        [NewTree] = map_simple(fun(A,B,_C) -> Fun(A,B) end, Pos, [Tree]),
+        [{Pos, NewTree} | map(Fun, Rest)];
+    {arity, 3} ->
+        [NewTree] = map_simple(Fun, Pos, [Tree]),
+        [{Pos, NewTree} | map(Fun, Rest)]
+    end.
+
+map_simple(_Fun, _Pos, []) ->
+    [];
+map_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
+    Value2 = Fun({Pos, Key}, Value,
+            if SubTree == [] -> leaf; true -> branch end),
+    [{Key, Value2, map_simple(Fun, Pos + 1, SubTree)} | map_simple(Fun, Pos, RestTree)].
+
+
+mapfold(_Fun, Acc, []) ->
+    {[], Acc};
+mapfold(Fun, Acc, [{Pos, Tree} | Rest]) ->
+    {[NewTree], Acc2} = mapfold_simple(Fun, Acc, Pos, [Tree]),
+    {Rest2, Acc3} = mapfold(Fun, Acc2, Rest),
+    {[{Pos, NewTree} | Rest2], Acc3}.
+
+mapfold_simple(_Fun, Acc, _Pos, []) ->
+    {[], Acc};
+mapfold_simple(Fun, Acc, Pos, [{Key, Value, SubTree} | RestTree]) ->
+    {Value2, Acc2} = Fun({Pos, Key}, Value,
+            if SubTree == [] -> leaf; true -> branch end, Acc),
+    {SubTree2, Acc3} = mapfold_simple(Fun, Acc2, Pos + 1, SubTree),
+    {RestTree2, Acc4} = mapfold_simple(Fun, Acc3, Pos, RestTree),
+    {[{Key, Value2, SubTree2} | RestTree2], Acc4}.
+
+
+map_leafs(_Fun, []) ->
+    [];
+map_leafs(Fun, [{Pos, Tree}|Rest]) ->
+    [NewTree] = map_leafs_simple(Fun, Pos, [Tree]),
+    [{Pos, NewTree} | map_leafs(Fun, Rest)].
+
+map_leafs_simple(_Fun, _Pos, []) ->
+    [];
+map_leafs_simple(Fun, Pos, [{Key, Value, []} | RestTree]) ->
+    Value2 = Fun({Pos, Key}, Value),
+    [{Key, Value2, []} | map_leafs_simple(Fun, Pos, RestTree)];
+map_leafs_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
+    [{Key, Value, map_leafs_simple(Fun, Pos + 1, SubTree)} | map_leafs_simple(Fun, Pos, RestTree)].
+
+
+stem(Trees, Limit) ->
+    % flatten each branch in a tree into a tree path, sort by starting rev #
+    Paths = lists:sort(lists:map(fun({Pos, Path}) ->
+        StemmedPath = lists:sublist(Path, Limit),
+        {Pos + 1 - length(StemmedPath), StemmedPath}
+    end, get_all_leafs_full(Trees))),
+
+    % convert paths back to trees
+    lists:foldl(
+        fun({StartPos, Path},TreeAcc) ->
+            [SingleTree] = lists:foldl(
+                fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path),
+            {NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}),
+            NewTrees
+        end, [], Paths).
+
+
+value_pref(Tuple, _) when is_tuple(Tuple),
+        (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) ->
+    Tuple;
+value_pref(_, Tuple) when is_tuple(Tuple),
+        (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) ->
+    Tuple;
+value_pref(?REV_MISSING, Other) ->
+    Other;
+value_pref(Other, ?REV_MISSING) ->
+    Other;
+value_pref(Last, _) ->
+    Last.
+
+
+% Tests moved to test/etap/06?-*.t
+

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_log.erl
----------------------------------------------------------------------
diff --git a/src/couch_log.erl b/src/couch_log.erl
new file mode 100644
index 0000000..cd4bbbb
--- /dev/null
+++ b/src/couch_log.erl
@@ -0,0 +1,254 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_log).
+-behaviour(gen_event).
+
+% public API
+-export([start_link/0, stop/0]).
+-export([debug/2, info/2, warn/2, error/2]).
+-export([debug_on/0, info_on/0, warn_on/0, get_level/0, get_level_integer/0, set_level/1]).
+-export([debug_on/1, info_on/1, warn_on/1, get_level/1, get_level_integer/1, set_level/2]).
+-export([read/2]).
+
+% gen_event callbacks
+-export([init/1, handle_event/2, terminate/2, code_change/3]).
+-export([handle_info/2, handle_call/2]).
+
+-define(LEVEL_ERROR, 4).
+-define(LEVEL_WARN, 3).
+-define(LEVEL_INFO, 2).
+-define(LEVEL_DEBUG, 1).
+
+-record(state, {
+    fd,
+    level,
+    sasl
+}).
+
+debug(Format, Args) ->
+    {ConsoleMsg, FileMsg} = get_log_messages(self(), debug, Format, Args),
+    gen_event:sync_notify(error_logger, {couch_debug, ConsoleMsg, FileMsg}).
+
+info(Format, Args) ->
+    {ConsoleMsg, FileMsg} = get_log_messages(self(), info, Format, Args),
+    gen_event:sync_notify(error_logger, {couch_info, ConsoleMsg, FileMsg}).
+
+warn(Format, Args) ->
+    {ConsoleMsg, FileMsg} = get_log_messages(self(), warn, Format, Args),
+    gen_event:sync_notify(error_logger, {couch_warn, ConsoleMsg, FileMsg}).
+
+error(Format, Args) ->
+    {ConsoleMsg, FileMsg} = get_log_messages(self(), error, Format, Args),
+    gen_event:sync_notify(error_logger, {couch_error, ConsoleMsg, FileMsg}).
+
+
+level_integer(error)    -> ?LEVEL_ERROR;
+level_integer(warn)     -> ?LEVEL_WARN;
+level_integer(info)     -> ?LEVEL_INFO;
+level_integer(debug)    -> ?LEVEL_DEBUG;
+level_integer(_Else)    -> ?LEVEL_ERROR. % anything else default to ERROR level
+
+level_atom(?LEVEL_ERROR) -> error;
+level_atom(?LEVEL_WARN) -> warn;
+level_atom(?LEVEL_INFO) -> info;
+level_atom(?LEVEL_DEBUG) -> debug.
+
+
+start_link() ->
+    couch_event_sup:start_link({local, couch_log}, error_logger, couch_log, []).
+
+stop() ->
+    couch_event_sup:stop(couch_log).
+
+init([]) ->
+    % read config and register for configuration changes
+
+    % just stop if one of the config settings change. couch_server_sup
+    % will restart us and then we will pick up the new settings.
+    ok = couch_config:register(
+        fun("log", "file") ->
+            ?MODULE:stop();
+        ("log", "level") ->
+            ?MODULE:stop();
+        ("log", "include_sasl") ->
+            ?MODULE:stop();
+        ("log_level_by_module", _) ->
+            ?MODULE:stop()
+        end),
+
+    Filename = couch_config:get("log", "file", "couchdb.log"),
+    Level = level_integer(list_to_atom(couch_config:get("log", "level", "info"))),
+    Sasl = couch_config:get("log", "include_sasl", "true") =:= "true",
+    LevelByModule = couch_config:get("log_level_by_module"),
+
+    case ets:info(?MODULE) of
+    undefined -> ets:new(?MODULE, [named_table]);
+    _ -> ok
+    end,
+    ets:insert(?MODULE, {level, Level}),
+    lists:foreach(fun({Module, ModuleLevel}) ->
+        ModuleLevelInteger = level_integer(list_to_atom(ModuleLevel)),
+        ets:insert(?MODULE, {Module, ModuleLevelInteger})
+    end, LevelByModule),
+
+
+    case file:open(Filename, [append]) of
+    {ok, Fd} ->
+        {ok, #state{fd = Fd, level = Level, sasl = Sasl}};
+    {error, Reason} ->
+        ReasonStr = file:format_error(Reason),
+        io:format("Error opening log file ~s: ~s", [Filename, ReasonStr]),
+        {stop, {error, ReasonStr, Filename}}
+    end.
+
+debug_on() ->
+    get_level_integer() =< ?LEVEL_DEBUG.
+
+info_on() ->
+    get_level_integer() =< ?LEVEL_INFO.
+
+warn_on() ->
+    get_level_integer() =< ?LEVEL_WARN.
+
+debug_on(Module) ->
+    get_level_integer(Module) =< ?LEVEL_DEBUG.
+
+info_on(Module) ->
+    get_level_integer(Module) =< ?LEVEL_INFO.
+
+warn_on(Module) ->
+    get_level_integer(Module) =< ?LEVEL_WARN.
+
+set_level(LevelAtom) ->
+    set_level_integer(level_integer(LevelAtom)).
+
+set_level(Module, LevelAtom) ->
+    set_level_integer(Module, level_integer(LevelAtom)).
+
+get_level() ->
+    level_atom(get_level_integer()).
+
+get_level(Module) ->
+    level_atom(get_level_integer(Module)).
+
+get_level_integer() ->
+    try
+        ets:lookup_element(?MODULE, level, 2)
+    catch error:badarg ->
+        ?LEVEL_ERROR
+    end.
+
+get_level_integer(Module0) ->
+    Module = atom_to_list(Module0),
+    try
+        [{_Module, Level}] = ets:lookup(?MODULE, Module),
+        Level
+    catch error:_ ->
+        get_level_integer()
+    end.
+
+set_level_integer(Int) ->
+    gen_event:call(error_logger, couch_log, {set_level_integer, Int}).
+
+set_level_integer(Module, Int) ->
+    gen_event:call(error_logger, couch_log, {set_level_integer, Module, Int}).
+
+handle_event({couch_error, ConMsg, FileMsg}, State) ->
+    log(State, ConMsg, FileMsg),
+    {ok, State};
+handle_event({couch_warn, ConMsg, FileMsg}, State) ->
+    log(State, ConMsg, FileMsg),
+    {ok, State};
+handle_event({couch_info, ConMsg, FileMsg}, State) ->
+    log(State, ConMsg, FileMsg),
+    {ok, State};
+handle_event({couch_debug, ConMsg, FileMsg}, State) ->
+    log(State, ConMsg, FileMsg),
+    {ok, State};
+handle_event({error_report, _, {Pid, _, _}}=Event, #state{sasl = true} = St) ->
+    {ConMsg, FileMsg} = get_log_messages(Pid, error, "~p", [Event]),
+    log(St, ConMsg, FileMsg),
+    {ok, St};
+handle_event({error, _, {Pid, Format, Args}}, #state{sasl = true} = State) ->
+    {ConMsg, FileMsg} = get_log_messages(Pid, error, Format, Args),
+    log(State, ConMsg, FileMsg),
+    {ok, State};
+handle_event(_Event, State) ->
+    {ok, State}.
+
+handle_call({set_level_integer, NewLevel}, State) ->
+    ets:insert(?MODULE, {level, NewLevel}),
+    {ok, ok, State#state{level = NewLevel}};
+
+handle_call({set_level_integer, Module, NewLevel}, State) ->
+    ets:insert(?MODULE, {Module, NewLevel}),
+    {ok, ok, State#state{level = NewLevel}}.
+
+handle_info(_Info, State) ->
+    {ok, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+terminate(_Arg, #state{fd = Fd}) ->
+    file:close(Fd).
+
+log(#state{fd = Fd}, ConsoleMsg, FileMsg) ->
+    ok = io:put_chars(ConsoleMsg),
+    ok = io:put_chars(Fd, FileMsg).
+
+get_log_messages(Pid, Level, Format, Args) ->
+    ConsoleMsg = unicode:characters_to_binary(io_lib:format(
+        "[~s] [~p] " ++ Format ++ "~n", [Level, Pid | Args])),
+    FileMsg = ["[", couch_util:rfc1123_date(), "] ", ConsoleMsg],
+    {ConsoleMsg, iolist_to_binary(FileMsg)}.
+
+
+% Read Bytes bytes from the end of log file, jumping Offset bytes towards
+% the beginning of the file first.
+%
+%  Log File    FilePos
+%  ----------
+% |          |  10
+% |          |  20
+% |          |  30
+% |          |  40
+% |          |  50
+% |          |  60
+% |          |  70 -- Bytes = 20  --
+% |          |  80                 | Chunk
+% |          |  90 -- Offset = 10 --
+% |__________| 100
+
+read(Bytes, Offset) ->
+    LogFileName = couch_config:get("log", "file"),
+    LogFileSize = filelib:file_size(LogFileName),
+    MaxChunkSize = list_to_integer(
+        couch_config:get("httpd", "log_max_chunk_size", "1000000")),
+    case Bytes > MaxChunkSize of
+    true ->
+        throw({bad_request, "'bytes' cannot exceed " ++
+            integer_to_list(MaxChunkSize)});
+    false ->
+        ok
+    end,
+
+    {ok, Fd} = file:open(LogFileName, [read]),
+    Start = lists:max([LogFileSize - Bytes - Offset, 0]),
+
+    % TODO: truncate chopped first line
+    % TODO: make streaming
+
+    {ok, Chunk} = file:pread(Fd, Start, Bytes),
+    ok = file:close(Fd),
+    Chunk.


[23/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
remove Makefile.am from apps/ and fix couchspawnkillable


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/4c23323e
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/4c23323e
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/4c23323e

Branch: refs/heads/import-rcouch
Commit: 4c23323ee1fb7456368c02785618cca8b0a61f7c
Parents: 70ce400
Author: benoitc <be...@apache.org>
Authored: Tue Jan 7 19:15:43 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:20 2014 -0600

----------------------------------------------------------------------
 c_src/spawnkillable/couchspawnkillable_win.c | 145 ++++++++++++++++
 priv/Makefile.am                             | 151 -----------------
 priv/couchspawnkillable.sh                   |  20 +++
 priv/spawnkillable/couchspawnkillable.sh     |  20 ---
 priv/spawnkillable/couchspawnkillable_win.c  | 145 ----------------
 rebar.config.script                          |  13 +-
 src/Makefile.am                              | 198 ----------------------
 7 files changed, 177 insertions(+), 515 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/4c23323e/c_src/spawnkillable/couchspawnkillable_win.c
----------------------------------------------------------------------
diff --git a/c_src/spawnkillable/couchspawnkillable_win.c b/c_src/spawnkillable/couchspawnkillable_win.c
new file mode 100644
index 0000000..0678231
--- /dev/null
+++ b/c_src/spawnkillable/couchspawnkillable_win.c
@@ -0,0 +1,145 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License.  You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do what 2 lines of shell script in couchspawnkillable does...
+// * Create a new suspended process with the same (duplicated) standard 
+//   handles as us.
+// * Write a line to stdout, consisting of the path to ourselves, plus
+//   '--kill {pid}' where {pid} is the PID of the newly created process.
+// * Un-suspend the new process.
+// * Wait for the process to terminate.
+// * Terminate with the child's exit-code.
+
+// Later, couch will call us with --kill and the PID, so we dutifully
+// terminate the specified PID.
+
+#include <stdlib.h>
+#include "windows.h"
+
+char *get_child_cmdline(int argc, char **argv)
+{
+    // make a new command-line, but skipping me.
+    // XXX - todo - spaces etc in args???
+    int i;
+    char *p, *cmdline;
+    int nchars = 0;
+    int nthis = 1;
+    for (i=1;i<argc;i++)
+        nchars += strlen(argv[i])+1;
+    cmdline = p = malloc(nchars+1);
+    if (!cmdline)
+        return NULL;
+    for (i=1;i<argc;i++) {
+        nthis = strlen(argv[i]);
+        strncpy(p, argv[i], nthis);
+        p[nthis] = ' ';
+        p += nthis+1;
+    }
+    // Replace the last space we added above with a '\0'
+    cmdline[nchars-1] = '\0';
+    return cmdline;
+}
+
+// create the child process, returning 0, or the exit-code we will
+// terminate with.
+int create_child(int argc, char **argv, PROCESS_INFORMATION *pi)
+{
+    char buf[1024];
+    DWORD dwcreate;
+    STARTUPINFO si;
+    char *cmdline;
+    if (argc < 2)
+        return 1;
+    cmdline = get_child_cmdline(argc, argv);
+    if (!cmdline)
+        return 2;
+
+    memset(&si, 0, sizeof(si));
+    si.cb = sizeof(si);
+    // depending on how *our* parent is started, we may or may not have
+    // a valid stderr stream - so although we try and duplicate it, only
+    // failing to duplicate stdin and stdout are considered fatal.
+    if (!DuplicateHandle(GetCurrentProcess(),
+                       GetStdHandle(STD_INPUT_HANDLE),
+                       GetCurrentProcess(),
+                       &si.hStdInput,
+                       0,
+                       TRUE, // inheritable
+                       DUPLICATE_SAME_ACCESS) ||
+       !DuplicateHandle(GetCurrentProcess(),
+                       GetStdHandle(STD_OUTPUT_HANDLE),
+                       GetCurrentProcess(),
+                       &si.hStdOutput,
+                       0,
+                       TRUE, // inheritable
+                       DUPLICATE_SAME_ACCESS)) {
+        return 3;
+    }
+    DuplicateHandle(GetCurrentProcess(),
+                   GetStdHandle(STD_ERROR_HANDLE),
+                   GetCurrentProcess(),
+                   &si.hStdError,
+                   0,
+                   TRUE, // inheritable
+                   DUPLICATE_SAME_ACCESS);
+
+    si.dwFlags = STARTF_USESTDHANDLES;
+    dwcreate = CREATE_SUSPENDED;
+    if (!CreateProcess( NULL, cmdline,
+                        NULL,
+                        NULL,
+                        TRUE, // inherit handles
+                        dwcreate,
+                        NULL, // environ
+                        NULL, // cwd
+                        &si,
+                        pi))
+        return 4;
+    return 0;
+}
+
+// and here we go...
+int main(int argc, char **argv)
+{
+    char out_buf[1024];
+    int rc;
+    DWORD cbwritten;
+    DWORD exitcode;
+    PROCESS_INFORMATION pi;
+    if (argc==3 && strcmp(argv[1], "--kill")==0) {
+        HANDLE h = OpenProcess(PROCESS_TERMINATE, 0, atoi(argv[2]));
+        if (!h)
+            return 1;
+        if (!TerminateProcess(h, 0))
+            return 2;
+        CloseHandle(h);
+        return 0;
+    }
+    // spawn the new suspended process
+    rc = create_child(argc, argv, &pi);
+    if (rc)
+        return rc;
+    // Write the 'terminate' command, which includes this PID, back to couch.
+    // *sob* - what about spaces etc?
+    sprintf_s(out_buf, sizeof(out_buf), "%s --kill %d\n", 
+              argv[0], pi.dwProcessId);
+    WriteFile(GetStdHandle(STD_OUTPUT_HANDLE), out_buf, strlen(out_buf), 
+              &cbwritten, NULL);
+    // Let the child process go...
+    ResumeThread(pi.hThread);
+    // Wait for the process to terminate so we can reflect the exit code
+    // back to couch.
+    WaitForSingleObject(pi.hProcess, INFINITE);
+    if (!GetExitCodeProcess(pi.hProcess, &exitcode))
+        return 6;
+    return exitcode;
+}

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/4c23323e/priv/Makefile.am
----------------------------------------------------------------------
diff --git a/priv/Makefile.am b/priv/Makefile.am
deleted file mode 100644
index 9a24222..0000000
--- a/priv/Makefile.am
+++ /dev/null
@@ -1,151 +0,0 @@
-## Licensed under the Apache License, Version 2.0 (the "License"); you may not
-## use this file except in compliance with the License. You may obtain a copy of
-## the License at
-##
-##   http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-## License for the specific language governing permissions and limitations under
-## the License.
-
-MAKE_SAFE = $(MAKE)
-
-couchlibdir = $(localerlanglibdir)/couch-$(version)
-couchprivdir = $(couchlibdir)/priv
-couchprivlibdir = $(couchlibdir)/priv/lib
-man1dir = $(mandir)/man1
-
-man_file = couchjs.1
-
-if BUILD_MAN
-man_file_build = $(man_file)
-else
-man_file_build =
-endif
-
-BUILT_SOURCES = $(man_file_build)
-
-EXTRA_DIST = \
-	spawnkillable/couchspawnkillable.sh \
-	stat_descriptions.cfg.in \
-	couch_ejson_compare/erl_nif_compat.h \
-	couch_js/sm170.c \
-	couch_js/sm180.c \
-	couch_js/sm185.c \
-	$(man_file_build)
-
-CLEANFILES = $(man_file_build) stat_descriptions.cfg
-
-couchprivlib_LTLIBRARIES = couch_icu_driver.la
-if USE_EJSON_COMPARE_NIF
-couchprivlib_LTLIBRARIES += couch_ejson_compare.la
-couch_ejson_compare_la_SOURCES = couch_ejson_compare/couch_ejson_compare.c
-couch_ejson_compare_la_CPPFLAGS = -D_BSD_SOURCE $(ICU_CPPFLAGS) $(ERLANG_FLAGS)
-couch_ejson_compare_la_LDFLAGS = -module -avoid-version
-couch_ejson_compare_la_LIBADD = $(ICU_LIBS)
-if WINDOWS
-couch_ejson_compare_la_LDFLAGS += -no-undefined
-endif
-endif
-couch_icu_driver_la_SOURCES = icu_driver/couch_icu_driver.c
-couch_icu_driver_la_LDFLAGS = -module -avoid-version
-couch_icu_driver_la_CPPFLAGS = $(ICU_CPPFLAGS) $(ERLANG_FLAGS)
-couch_icu_driver_la_LIBADD = $(ICU_LIBS)
-
-if WINDOWS
-couch_icu_driver_la_LDFLAGS += -no-undefined
-endif
-
-COUCHJS_SRCS = \
-	couch_js/help.h \
-	couch_js/http.c \
-	couch_js/http.h \
-	couch_js/main.c \
-	couch_js/utf8.c \
-	couch_js/utf8.h \
-	couch_js/util.h \
-	couch_js/util.c
-
-locallibbin_PROGRAMS = couchjs
-couchjs_SOURCES = $(COUCHJS_SRCS)
-couchjs_CFLAGS = -g -Wall -Werror -D_BSD_SOURCE $(CURL_CFLAGS) $(JS_CFLAGS)
-couchjs_LDADD = $(CURL_LIBS) $(JS_LIBS)
-
-couchpriv_DATA = stat_descriptions.cfg
-couchpriv_PROGRAMS = couchspawnkillable
-
-# Depend on source files so distributed man pages are not rebuilt for end user.
-
-$(man_file): $(COUCHJS_SRCS)
-	$(MAKE_SAFE) -f Makefile couchjs; \
-	$(top_srcdir)/build-aux/missing --run \
-	    help2man \
-	        --no-info \
-	        --help-option="-h" \
-	        --version-option="-V" \
-	        --name="$(package_name) JavaScript interpreter" \
-	        ./couchjs --output $@
-
-install-data-local:
-	if test -s $(man_file); then \
-	    if test `cat $(man_file) | wc -l` -gt 1; then \
-	        $(INSTALL) -d $(DESTDIR)$(man1dir); \
-	        $(INSTALL_DATA) $(man_file) $(DESTDIR)$(man1dir)/$(man_file); \
-	    fi \
-	fi
-
-%.cfg: %.cfg.in
-	cp $< $@
-
-if WINDOWS
-couchspawnkillable_SOURCES = spawnkillable/couchspawnkillable_win.c
-endif
-
-if !WINDOWS
-couchspawnkillable: spawnkillable/couchspawnkillable.sh
-	cp $< $@
-	chmod +x $@
-endif
-
-# libtool and automake have defeated markh.  For each of our executables
-# we end up with 2 copies - one directly in the 'target' folder (eg, 'priv')
-# and another - the correct one - in .libs.  The former doesn't work but is
-# what gets installed for 'couchspawnkillable' - but the correct one for
-# couchjs.exe *does* get copied.  *shrug*  So just clobber it with the
-# correct one as the last step. See bug COUCHDB-439
-install-data-hook:
-	if test -f "$(DESTDIR)$(couchprivlibdir)/couch_icu_driver"; then \
-	    rm -f "$(DESTDIR)$(couchprivlibdir)/couch_icu_driver.so"; \
-	    cd "$(DESTDIR)$(couchprivlibdir)" && \
-	        $(LN_S) couch_icu_driver couch_icu_driver.so; \
-	fi
-	if test -f "$(DESTDIR)$(couchprivlibdir)/couch_ejson_compare_nif"; then \
-	    rm -f "$(DESTDIR)$(couchprivlibdir)/couch_ejson_compare_nif.so"; \
-	    cd "$(DESTDIR)$(couchprivlibdir)" && \
-	        $(LN_S) couch_ejson_compare_nif couch_ejson_compare_nif.so; \
-	fi
-if WINDOWS
-	$(INSTALL) $(ICU_BIN)/icuuc*.dll $(bindir)
-	$(INSTALL) $(ICU_BIN)/icudt*.dll $(bindir)
-	$(INSTALL) $(ICU_BIN)/icuin*.dll $(bindir)
-	$(INSTALL) $(JS_LIB_BINARY) $(bindir)
-	$(INSTALL) .libs/couchspawnkillable.exe \
-		"$(DESTDIR)$(couchprivdir)/couchspawnkillable.exe"
-endif
-
-uninstall-local:
-	rm -f $(DESTDIR)$(man1dir)/$(man_file)
-	if test -f "$(DESTDIR)$(couchprivlibdir)/couch_erl_driver"; then \
-	    rm -f "$(DESTDIR)$(couchprivlibdir)/couch_erl_driver.so"; \
-	fi
-
-distcheck-hook:
-	if test ! -s $(man_file); then \
-	    $(top_srcdir)/build-aux/dist-error $(man_file); \
-	else \
-	    if test ! `cat $(man_file) | wc -l` -gt 1; then \
-	        $(top_srcdir)/build-aux/dist-error $(man_file); \
-	    fi \
-	fi

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/4c23323e/priv/couchspawnkillable.sh
----------------------------------------------------------------------
diff --git a/priv/couchspawnkillable.sh b/priv/couchspawnkillable.sh
new file mode 100755
index 0000000..f8d042e
--- /dev/null
+++ b/priv/couchspawnkillable.sh
@@ -0,0 +1,20 @@
+#! /bin/sh -e
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# The purpose of this script is to echo an OS specific command before launching
+# the actual process. This provides a way for Erlang to hard-kill its external
+# processes.
+
+echo "kill -9 $$"
+exec $*

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/4c23323e/priv/spawnkillable/couchspawnkillable.sh
----------------------------------------------------------------------
diff --git a/priv/spawnkillable/couchspawnkillable.sh b/priv/spawnkillable/couchspawnkillable.sh
deleted file mode 100644
index f8d042e..0000000
--- a/priv/spawnkillable/couchspawnkillable.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#! /bin/sh -e
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# The purpose of this script is to echo an OS specific command before launching
-# the actual process. This provides a way for Erlang to hard-kill its external
-# processes.
-
-echo "kill -9 $$"
-exec $*

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/4c23323e/priv/spawnkillable/couchspawnkillable_win.c
----------------------------------------------------------------------
diff --git a/priv/spawnkillable/couchspawnkillable_win.c b/priv/spawnkillable/couchspawnkillable_win.c
deleted file mode 100644
index 0678231..0000000
--- a/priv/spawnkillable/couchspawnkillable_win.c
+++ /dev/null
@@ -1,145 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License.  You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-// Do what 2 lines of shell script in couchspawnkillable does...
-// * Create a new suspended process with the same (duplicated) standard 
-//   handles as us.
-// * Write a line to stdout, consisting of the path to ourselves, plus
-//   '--kill {pid}' where {pid} is the PID of the newly created process.
-// * Un-suspend the new process.
-// * Wait for the process to terminate.
-// * Terminate with the child's exit-code.
-
-// Later, couch will call us with --kill and the PID, so we dutifully
-// terminate the specified PID.
-
-#include <stdlib.h>
-#include "windows.h"
-
-char *get_child_cmdline(int argc, char **argv)
-{
-    // make a new command-line, but skipping me.
-    // XXX - todo - spaces etc in args???
-    int i;
-    char *p, *cmdline;
-    int nchars = 0;
-    int nthis = 1;
-    for (i=1;i<argc;i++)
-        nchars += strlen(argv[i])+1;
-    cmdline = p = malloc(nchars+1);
-    if (!cmdline)
-        return NULL;
-    for (i=1;i<argc;i++) {
-        nthis = strlen(argv[i]);
-        strncpy(p, argv[i], nthis);
-        p[nthis] = ' ';
-        p += nthis+1;
-    }
-    // Replace the last space we added above with a '\0'
-    cmdline[nchars-1] = '\0';
-    return cmdline;
-}
-
-// create the child process, returning 0, or the exit-code we will
-// terminate with.
-int create_child(int argc, char **argv, PROCESS_INFORMATION *pi)
-{
-    char buf[1024];
-    DWORD dwcreate;
-    STARTUPINFO si;
-    char *cmdline;
-    if (argc < 2)
-        return 1;
-    cmdline = get_child_cmdline(argc, argv);
-    if (!cmdline)
-        return 2;
-
-    memset(&si, 0, sizeof(si));
-    si.cb = sizeof(si);
-    // depending on how *our* parent is started, we may or may not have
-    // a valid stderr stream - so although we try and duplicate it, only
-    // failing to duplicate stdin and stdout are considered fatal.
-    if (!DuplicateHandle(GetCurrentProcess(),
-                       GetStdHandle(STD_INPUT_HANDLE),
-                       GetCurrentProcess(),
-                       &si.hStdInput,
-                       0,
-                       TRUE, // inheritable
-                       DUPLICATE_SAME_ACCESS) ||
-       !DuplicateHandle(GetCurrentProcess(),
-                       GetStdHandle(STD_OUTPUT_HANDLE),
-                       GetCurrentProcess(),
-                       &si.hStdOutput,
-                       0,
-                       TRUE, // inheritable
-                       DUPLICATE_SAME_ACCESS)) {
-        return 3;
-    }
-    DuplicateHandle(GetCurrentProcess(),
-                   GetStdHandle(STD_ERROR_HANDLE),
-                   GetCurrentProcess(),
-                   &si.hStdError,
-                   0,
-                   TRUE, // inheritable
-                   DUPLICATE_SAME_ACCESS);
-
-    si.dwFlags = STARTF_USESTDHANDLES;
-    dwcreate = CREATE_SUSPENDED;
-    if (!CreateProcess( NULL, cmdline,
-                        NULL,
-                        NULL,
-                        TRUE, // inherit handles
-                        dwcreate,
-                        NULL, // environ
-                        NULL, // cwd
-                        &si,
-                        pi))
-        return 4;
-    return 0;
-}
-
-// and here we go...
-int main(int argc, char **argv)
-{
-    char out_buf[1024];
-    int rc;
-    DWORD cbwritten;
-    DWORD exitcode;
-    PROCESS_INFORMATION pi;
-    if (argc==3 && strcmp(argv[1], "--kill")==0) {
-        HANDLE h = OpenProcess(PROCESS_TERMINATE, 0, atoi(argv[2]));
-        if (!h)
-            return 1;
-        if (!TerminateProcess(h, 0))
-            return 2;
-        CloseHandle(h);
-        return 0;
-    }
-    // spawn the new suspended process
-    rc = create_child(argc, argv, &pi);
-    if (rc)
-        return rc;
-    // Write the 'terminate' command, which includes this PID, back to couch.
-    // *sob* - what about spaces etc?
-    sprintf_s(out_buf, sizeof(out_buf), "%s --kill %d\n", 
-              argv[0], pi.dwProcessId);
-    WriteFile(GetStdHandle(STD_OUTPUT_HANDLE), out_buf, strlen(out_buf), 
-              &cbwritten, NULL);
-    // Let the child process go...
-    ResumeThread(pi.hThread);
-    // Wait for the process to terminate so we can reflect the exit code
-    // back to couch.
-    WaitForSingleObject(pi.hProcess, INFINITE);
-    if (!GetExitCodeProcess(pi.hProcess, &exitcode))
-        return 6;
-    return exitcode;
-}

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/4c23323e/rebar.config.script
----------------------------------------------------------------------
diff --git a/rebar.config.script b/rebar.config.script
index fb93f97..ca79b39 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -85,13 +85,24 @@ end,
     _ -> {CFLAGS, LDFLAGS ++ " -lcurl"}
 end,
 
+PortSpecs0 = case os:type() of
+    {win32, _} ->
+        [{filename:join(["priv", "couchspawnkillable"]),
+            ["c_src/spawnkillable/*.c"]}];
+    _ ->
+        {ok, _} = file:copy("priv/couchspawnkillable.sh",
+                            "priv/couchspawnkillable"),
+        os:cmd("chmod +x priv/couchspawnkillable"),
+        []
+    end,
+
 PortEnv = [{port_env, [
             {"CFLAGS",  "$CFLAGS -Wall -c -g -O2 " ++ CFLAGS1},
             {"LDFLAGS", "$LDFLAGS " ++ LDFLAGS1}]},
 
            {port_specs, [
             {filename:join(["priv", CouchJSName]),
-            ["c_src/couch_js/*.c"]}]}
+            ["c_src/couch_js/*.c"]}] ++ PortSpecs0}
 ],
 
 lists:keymerge(1,lists:keysort(1, PortEnv), lists:keysort(1, CONFIG)).

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/4c23323e/src/Makefile.am
----------------------------------------------------------------------
diff --git a/src/Makefile.am b/src/Makefile.am
deleted file mode 100644
index 9fe19bc..0000000
--- a/src/Makefile.am
+++ /dev/null
@@ -1,198 +0,0 @@
-## Licensed under the Apache License, Version 2.0 (the "License"); you may not
-## use this file except in compliance with the License. You may obtain a copy of
-## the License at
-##
-##   http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-## License for the specific language governing permissions and limitations under
-## the License.
-
-SUBDIRS = priv
-
-# devdocdir = $(localdocdir)/developer/couchdb
-couchlibdir = $(localerlanglibdir)/couch-$(version)
-couchincludedir = $(couchlibdir)/include
-couchebindir = $(couchlibdir)/ebin
-
-couchinclude_DATA = couch_db.hrl couch_js_functions.hrl
-couchebin_DATA = $(compiled_files)
-
-# dist_devdoc_DATA = $(doc_base) $(doc_modules)
-
-CLEANFILES = $(compiled_files) $(doc_base)
-
-# CLEANFILES = $(doc_modules) edoc-info
-
-source_files = \
-    couch.erl \
-    couch_app.erl \
-    couch_auth_cache.erl \
-    couch_btree.erl \
-    couch_changes.erl \
-    couch_compaction_daemon.erl \
-    couch_compress.erl \
-    couch_config.erl \
-    couch_config_writer.erl \
-    couch_db.erl \
-    couch_db_update_notifier.erl \
-    couch_db_update_notifier_sup.erl \
-    couch_doc.erl \
-    couch_drv.erl \
-    couch_ejson_compare.erl \
-    couch_event_sup.erl \
-    couch_external_manager.erl \
-    couch_external_server.erl \
-    couch_file.erl \
-    couch_httpd.erl \
-    couch_httpd_db.erl \
-    couch_httpd_auth.erl \
-    couch_httpd_cors.erl \
-    couch_httpd_oauth.erl \
-    couch_httpd_external.erl \
-    couch_httpd_misc_handlers.erl \
-    couch_httpd_proxy.erl \
-    couch_httpd_rewrite.erl \
-    couch_httpd_stats_handlers.erl \
-    couch_httpd_vhost.erl \
-    couch_key_tree.erl \
-    couch_log.erl \
-    couch_native_process.erl \
-    couch_os_daemons.erl \
-    couch_os_process.erl \
-    couch_passwords.erl \
-    couch_primary_sup.erl \
-    couch_query_servers.erl \
-    couch_ref_counter.erl \
-    couch_secondary_sup.erl \
-    couch_server.erl \
-    couch_server_sup.erl \
-    couch_stats_aggregator.erl \
-    couch_stats_collector.erl \
-    couch_stream.erl \
-    couch_task_status.erl \
-    couch_users_db.erl \
-    couch_util.erl \
-    couch_uuids.erl \
-    couch_db_updater.erl \
-    couch_work_queue.erl \
-    json_stream_parse.erl
-
-EXTRA_DIST = $(source_files) couch_db.hrl couch_js_functions.hrl
-
-compiled_files = \
-    couch.app \
-    couch.beam \
-    couch_app.beam \
-    couch_auth_cache.beam \
-    couch_btree.beam \
-    couch_changes.beam \
-    couch_compaction_daemon.beam \
-    couch_compress.beam \
-    couch_config.beam \
-    couch_config_writer.beam \
-    couch_db.beam \
-    couch_db_update_notifier.beam \
-    couch_db_update_notifier_sup.beam \
-    couch_doc.beam \
-    couch_drv.beam \
-    couch_ejson_compare.beam \
-    couch_event_sup.beam \
-    couch_external_manager.beam \
-    couch_external_server.beam \
-    couch_file.beam \
-    couch_httpd.beam \
-    couch_httpd_db.beam \
-    couch_httpd_auth.beam \
-    couch_httpd_oauth.beam \
-    couch_httpd_cors.beam \
-    couch_httpd_proxy.beam \
-    couch_httpd_external.beam \
-    couch_httpd_misc_handlers.beam \
-    couch_httpd_rewrite.beam \
-    couch_httpd_stats_handlers.beam \
-    couch_httpd_vhost.beam \
-    couch_key_tree.beam \
-    couch_log.beam \
-    couch_native_process.beam \
-    couch_os_daemons.beam \
-    couch_os_process.beam \
-    couch_passwords.beam \
-    couch_primary_sup.beam \
-    couch_query_servers.beam \
-    couch_ref_counter.beam \
-    couch_secondary_sup.beam \
-    couch_server.beam \
-    couch_server_sup.beam \
-    couch_stats_aggregator.beam \
-    couch_stats_collector.beam \
-    couch_stream.beam \
-    couch_task_status.beam \
-    couch_users_db.beam \
-    couch_util.beam \
-    couch_uuids.beam \
-    couch_db_updater.beam \
-    couch_work_queue.beam \
-    json_stream_parse.beam
-
-# doc_base = \
-#     erlang.png \
-#     index.html \
-#     modules-frame.html \
-#     overview-summary.html \
-#     packages-frame.html \
-#     stylesheet.css
-
-# doc_modules = \
-#     couch_btree.html \
-#     couch_config.html \
-#     couch_config_writer.html \
-#     couch_db.html \
-#     couch_db_update_notifier.html \
-#     couch_db_update_notifier_sup.html \
-#     couch_doc.html \
-#     couch_event_sup.html \
-#     couch_file.html \
-#     couch_httpd.html \
-#     couch_key_tree.html \
-#     couch_log.html \
-#     couch_query_servers.html \
-#     couch_rep.html \
-#     couch_rep_sup.html \
-#     couch_server.html \
-#     couch_server_sup.html \
-#     couch_stream.html \
-#     couch_util.html
-
-if WINDOWS
-couch.app: couch.app.tpl
-	modules=`find . -name "*.erl" \! -name ".*" -exec basename {} .erl \; | tr '\n' ',' | sed "s/,$$//"`; \
-	sed -e "s|%package_name%|@package_name@|g" \
-			-e "s|%version%|@version@|g" \
-			-e "s|@modules@|$$modules|g" \
-			-e "s|%localconfdir%|../etc/couchdb|g" \
-			-e "s|@defaultini@|default.ini|g" \
-			-e "s|@localini@|local.ini|g" > \
-	$@ < $<
-else
-couch.app: couch.app.tpl
-	modules=`{ find . -name "*.erl" \! -name ".*" -exec basename {} .erl \; | tr '\n' ','; echo ''; } | sed "s/,$$//"`; \
-	sed -e "s|%package_name%|@package_name@|g" \
-			-e "s|%version%|@version@|g" \
-			-e "s|@modules@|$$modules|g" \
-			-e "s|%localconfdir%|@localconfdir@|g" \
-			-e "s|@defaultini@|default.ini|g" \
-			-e "s|@localini@|local.ini|g" > \
-	$@ < $<
-	chmod +x $@
-endif
-
-# $(dist_devdoc_DATA): edoc-info
-
-# $(ERL) -noshell -run edoc_run files [\"$<\"]
-
-%.beam: %.erl couch_db.hrl couch_js_functions.hrl
-	$(ERLC) $(ERLC_FLAGS) ${TEST} $<;
-


[41/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
couch_server_sup -> couch_sup


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/7f9c06d9
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/7f9c06d9
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/7f9c06d9

Branch: refs/heads/import-rcouch
Commit: 7f9c06d97715fef7897d7bd23ecdaa85a595b9cf
Parents: a3f9478
Author: benoitc <be...@apache.org>
Authored: Mon Jan 13 23:06:26 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:21 2014 -0600

----------------------------------------------------------------------
 src/couch.app.src.script |   2 +-
 src/couch.erl            |   4 +-
 src/couch_app.erl        |   2 +-
 src/couch_log.erl        |   2 +-
 src/couch_server.erl     |   2 +-
 src/couch_server_sup.erl | 117 ------------------------------------------
 src/couch_sup.erl        | 117 ++++++++++++++++++++++++++++++++++++++++++
 7 files changed, 123 insertions(+), 123 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/7f9c06d9/src/couch.app.src.script
----------------------------------------------------------------------
diff --git a/src/couch.app.src.script b/src/couch.app.src.script
index 1e14e3d..ac8156d 100644
--- a/src/couch.app.src.script
+++ b/src/couch.app.src.script
@@ -54,7 +54,7 @@ end,
             couch_query_servers,
             couch_secondary_services,
             couch_server,
-            couch_server_sup,
+            couch_sup,
             couch_stats_aggregator,
             couch_stats_collector,
             couch_task_status

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/7f9c06d9/src/couch.erl
----------------------------------------------------------------------
diff --git a/src/couch.erl b/src/couch.erl
index 4b2032a..fe4f1a8 100644
--- a/src/couch.erl
+++ b/src/couch.erl
@@ -60,9 +60,9 @@ restart() ->
     end.
 
 reload() ->
-    case supervisor:terminate_child(couch_server_sup, couch_config) of
+    case supervisor:terminate_child(couch_sup, couch_config) of
     ok ->
-        supervisor:restart_child(couch_server_sup, couch_config);
+        supervisor:restart_child(couch_sup, couch_config);
     {error, Reason} ->
         {error, Reason}
     end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/7f9c06d9/src/couch_app.erl
----------------------------------------------------------------------
diff --git a/src/couch_app.erl b/src/couch_app.erl
index 414a5c9..1d537e6 100644
--- a/src/couch_app.erl
+++ b/src/couch_app.erl
@@ -23,7 +23,7 @@
 start(_Type, _Args) ->
     couch_util:start_app_deps(couch),
     IniFiles = get_ini_files(),
-    couch_server_sup:start_link(IniFiles).
+    couch_sup:start_link(IniFiles).
 
 stop(_) ->
     ok.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/7f9c06d9/src/couch_log.erl
----------------------------------------------------------------------
diff --git a/src/couch_log.erl b/src/couch_log.erl
index cd4bbbb..7cfd47b 100644
--- a/src/couch_log.erl
+++ b/src/couch_log.erl
@@ -73,7 +73,7 @@ stop() ->
 init([]) ->
     % read config and register for configuration changes
 
-    % just stop if one of the config settings change. couch_server_sup
+    % just stop if one of the config settings change. couch_sup
     % will restart us and then we will pick up the new settings.
     ok = couch_config:register(
         fun("log", "file") ->

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/7f9c06d9/src/couch_server.erl
----------------------------------------------------------------------
diff --git a/src/couch_server.erl b/src/couch_server.erl
index 4b80dfb..91b4bc7 100644
--- a/src/couch_server.erl
+++ b/src/couch_server.erl
@@ -146,7 +146,7 @@ hash_admin_passwords(Persist) ->
 init([]) ->
     % read config and register for configuration changes
 
-    % just stop if one of the config settings change. couch_server_sup
+    % just stop if one of the config settings change. couch_sup
     % will restart us and then we will pick up the new settings.
 
     RootDir = couch_config:get("couchdb", "database_dir", "."),

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/7f9c06d9/src/couch_server_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch_server_sup.erl b/src/couch_server_sup.erl
deleted file mode 100644
index 7f37677..0000000
--- a/src/couch_server_sup.erl
+++ /dev/null
@@ -1,117 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_server_sup).
--behaviour(supervisor).
-
-
--export([start_link/1,stop/0, couch_config_start_link_wrapper/2,
-        restart_core_server/0, config_change/2]).
-
--include("couch_db.hrl").
-
-%% supervisor callbacks
--export([init/1]).
-
-start_link(IniFiles) ->
-    case whereis(couch_server_sup) of
-    undefined ->
-        start_server(IniFiles);
-    _Else ->
-        {error, already_started}
-    end.
-
-restart_core_server() ->
-    init:restart().
-
-couch_config_start_link_wrapper(IniFiles, FirstConfigPid) ->
-    case is_process_alive(FirstConfigPid) of
-        true ->
-            link(FirstConfigPid),
-            {ok, FirstConfigPid};
-        false -> couch_config:start_link(IniFiles)
-    end.
-
-start_server(IniFiles) ->
-    case init:get_argument(pidfile) of
-    {ok, [PidFile]} ->
-        case file:write_file(PidFile, os:getpid()) of
-        ok -> ok;
-        {error, Reason} ->
-            io:format("Failed to write PID file ~s: ~s",
-                [PidFile, file:format_error(Reason)])
-        end;
-    _ -> ok
-    end,
-
-    {ok, ConfigPid} = couch_config:start_link(IniFiles),
-
-    LogLevel = couch_config:get("log", "level", "info"),
-    % announce startup
-    io:format("Apache CouchDB ~s (LogLevel=~s) is starting.~n", [
-        couch_server:get_version(),
-        LogLevel
-    ]),
-    case LogLevel of
-    "debug" ->
-        io:format("Configuration Settings ~p:~n", [IniFiles]),
-        [io:format("  [~s] ~s=~p~n", [Module, Variable, Value])
-            || {{Module, Variable}, Value} <- couch_config:all()];
-    _ -> ok
-    end,
-
-    BaseChildSpecs =
-    {{one_for_all, 10, 3600},
-        [{couch_config,
-            {couch_server_sup, couch_config_start_link_wrapper, [IniFiles, ConfigPid]},
-            permanent,
-            brutal_kill,
-            worker,
-            [couch_config]},
-        {couch_primary_services,
-            {couch_primary_sup, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_primary_sup]},
-        {couch_secondary_services,
-            {couch_secondary_sup, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_secondary_sup]}
-        ]},
-
-    % ensure these applications are running
-    application:start(ibrowse),
-    application:start(crypto),
-
-    {ok, Pid} = supervisor:start_link(
-        {local, couch_server_sup}, couch_server_sup, BaseChildSpecs),
-
-    % launch the icu bridge
-    % just restart if one of the config settings change.
-    couch_config:register(fun ?MODULE:config_change/2, Pid),
-
-    unlink(ConfigPid),
-
-    {ok, Pid}.
-
-stop() ->
-    catch exit(whereis(couch_server_sup), normal).
-
-config_change("daemons", _) ->
-    supervisor:terminate_child(couch_server_sup, couch_secondary_services),
-    supervisor:restart_child(couch_server_sup, couch_secondary_services).
-
-init(ChildSpecs) ->
-    {ok, ChildSpecs}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/7f9c06d9/src/couch_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch_sup.erl b/src/couch_sup.erl
new file mode 100644
index 0000000..1f04aea
--- /dev/null
+++ b/src/couch_sup.erl
@@ -0,0 +1,117 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_sup).
+-behaviour(supervisor).
+
+
+-export([start_link/1,stop/0, couch_config_start_link_wrapper/2,
+        restart_core_server/0, config_change/2]).
+
+-include("couch_db.hrl").
+
+%% supervisor callbacks
+-export([init/1]).
+
+start_link(IniFiles) ->
+    case whereis(couch_sup) of
+    undefined ->
+        start_server(IniFiles);
+    _Else ->
+        {error, already_started}
+    end.
+
+restart_core_server() ->
+    init:restart().
+
+couch_config_start_link_wrapper(IniFiles, FirstConfigPid) ->
+    case is_process_alive(FirstConfigPid) of
+        true ->
+            link(FirstConfigPid),
+            {ok, FirstConfigPid};
+        false -> couch_config:start_link(IniFiles)
+    end.
+
+start_server(IniFiles) ->
+    case init:get_argument(pidfile) of
+    {ok, [PidFile]} ->
+        case file:write_file(PidFile, os:getpid()) of
+        ok -> ok;
+        {error, Reason} ->
+            io:format("Failed to write PID file ~s: ~s",
+                [PidFile, file:format_error(Reason)])
+        end;
+    _ -> ok
+    end,
+
+    {ok, ConfigPid} = couch_config:start_link(IniFiles),
+
+    LogLevel = couch_config:get("log", "level", "info"),
+    % announce startup
+    io:format("Apache CouchDB ~s (LogLevel=~s) is starting.~n", [
+        couch_server:get_version(),
+        LogLevel
+    ]),
+    case LogLevel of
+    "debug" ->
+        io:format("Configuration Settings ~p:~n", [IniFiles]),
+        [io:format("  [~s] ~s=~p~n", [Module, Variable, Value])
+            || {{Module, Variable}, Value} <- couch_config:all()];
+    _ -> ok
+    end,
+
+    BaseChildSpecs =
+    {{one_for_all, 10, 3600},
+        [{couch_config,
+            {couch_sup, couch_config_start_link_wrapper, [IniFiles, ConfigPid]},
+            permanent,
+            brutal_kill,
+            worker,
+            [couch_config]},
+        {couch_primary_services,
+            {couch_primary_sup, start_link, []},
+            permanent,
+            infinity,
+            supervisor,
+            [couch_primary_sup]},
+        {couch_secondary_services,
+            {couch_secondary_sup, start_link, []},
+            permanent,
+            infinity,
+            supervisor,
+            [couch_secondary_sup]}
+        ]},
+
+    % ensure these applications are running
+    application:start(ibrowse),
+    application:start(crypto),
+
+    {ok, Pid} = supervisor:start_link(
+        {local, couch_sup}, couch_sup, BaseChildSpecs),
+
+    % launch the icu bridge
+    % just restart if one of the config settings change.
+    couch_config:register(fun ?MODULE:config_change/2, Pid),
+
+    unlink(ConfigPid),
+
+    {ok, Pid}.
+
+stop() ->
+    catch exit(whereis(couch_sup), normal).
+
+config_change("daemons", _) ->
+    supervisor:terminate_child(couch_sup, couch_secondary_services),
+    supervisor:restart_child(couch_sup, couch_secondary_services).
+
+init(ChildSpecs) ->
+    {ok, ChildSpecs}.


[05/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_httpd_db.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_db.erl b/src/couch_httpd_db.erl
new file mode 100644
index 0000000..0a7c17c
--- /dev/null
+++ b/src/couch_httpd_db.erl
@@ -0,0 +1,1226 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_db).
+-include("couch_db.hrl").
+
+-export([handle_request/1, handle_compact_req/2, handle_design_req/2,
+    db_req/2, couch_doc_open/4,handle_changes_req/2,
+    update_doc_result_to_json/1, update_doc_result_to_json/2,
+    handle_design_info_req/3]).
+
+-import(couch_httpd,
+    [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
+    start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1,
+    start_chunked_response/3, absolute_uri/2, send/2,
+    start_response_length/4, send_error/4]).
+
+-record(doc_query_args, {
+    options = [],
+    rev = nil,
+    open_revs = [],
+    update_type = interactive_edit,
+    atts_since = nil
+}).
+
+% Database request handlers
+handle_request(#httpd{path_parts=[DbName|RestParts],method=Method,
+        db_url_handlers=DbUrlHandlers}=Req)->
+    case {Method, RestParts} of
+    {'PUT', []} ->
+        create_db_req(Req, DbName);
+    {'DELETE', []} ->
+        % if we get ?rev=... the user is using a faulty script where the
+        % document id is empty by accident. Let them recover safely.
+        case couch_httpd:qs_value(Req, "rev", false) of
+            false -> delete_db_req(Req, DbName);
+            _Rev -> throw({bad_request,
+                "You tried to DELETE a database with a ?=rev parameter. "
+                ++ "Did you mean to DELETE a document instead?"})
+        end;
+    {_, []} ->
+        do_db_req(Req, fun db_req/2);
+    {_, [SecondPart|_]} ->
+        Handler = couch_util:dict_find(SecondPart, DbUrlHandlers, fun db_req/2),
+        do_db_req(Req, Handler)
+    end.
+
+handle_changes_req(#httpd{method='POST'}=Req, Db) ->
+    couch_httpd:validate_ctype(Req, "application/json"),
+    handle_changes_req1(Req, Db);
+handle_changes_req(#httpd{method='GET'}=Req, Db) ->
+    handle_changes_req1(Req, Db);
+handle_changes_req(#httpd{path_parts=[_,<<"_changes">>]}=Req, _Db) ->
+    send_method_not_allowed(Req, "GET,HEAD,POST").
+
+handle_changes_req1(Req, #db{name=DbName}=Db) ->
+    AuthDbName = ?l2b(couch_config:get("couch_httpd_auth", "authentication_db")),
+    case AuthDbName of
+    DbName ->
+        % in the authentication database, _changes is admin-only.
+        ok = couch_db:check_is_admin(Db);
+    _Else ->
+        % on other databases, _changes is free for all.
+        ok
+    end,
+    handle_changes_req2(Req, Db).
+
+handle_changes_req2(Req, Db) ->
+    MakeCallback = fun(Resp) ->
+        fun({change, {ChangeProp}=Change, _}, "eventsource") ->
+            Seq = proplists:get_value(<<"seq">>, ChangeProp),
+            send_chunk(Resp, ["data: ", ?JSON_ENCODE(Change),
+                              "\n", "id: ", ?JSON_ENCODE(Seq),
+                              "\n\n"]);
+        ({change, Change, _}, "continuous") ->
+            send_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]);
+        ({change, Change, Prepend}, _) ->
+            send_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]);
+        (start, "eventsource") ->
+            ok;
+        (start, "continuous") ->
+            ok;
+        (start, _) ->
+            send_chunk(Resp, "{\"results\":[\n");
+        ({stop, _EndSeq}, "eventsource") ->
+            end_json_response(Resp);
+        ({stop, EndSeq}, "continuous") ->
+            send_chunk(
+                Resp,
+                [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]
+            ),
+            end_json_response(Resp);
+        ({stop, EndSeq}, _) ->
+            send_chunk(
+                Resp,
+                io_lib:format("\n],\n\"last_seq\":~w}\n", [EndSeq])
+            ),
+            end_json_response(Resp);
+        (timeout, _) ->
+            send_chunk(Resp, "\n")
+        end
+    end,
+    ChangesArgs = parse_changes_query(Req, Db),
+    ChangesFun = couch_changes:handle_changes(ChangesArgs, Req, Db),
+    WrapperFun = case ChangesArgs#changes_args.feed of
+    "normal" ->
+        {ok, Info} = couch_db:get_db_info(Db),
+        CurrentEtag = couch_httpd:make_etag(Info),
+        fun(FeedChangesFun) ->
+            couch_httpd:etag_respond(
+                Req,
+                CurrentEtag,
+                fun() ->
+                    {ok, Resp} = couch_httpd:start_json_response(
+                         Req, 200, [{"ETag", CurrentEtag}]
+                    ),
+                    FeedChangesFun(MakeCallback(Resp))
+                end
+            )
+        end;
+    "eventsource" ->
+        Headers = [
+            {"Content-Type", "text/event-stream"},
+            {"Cache-Control", "no-cache"}
+        ],
+        {ok, Resp} = couch_httpd:start_chunked_response(Req, 200, Headers),
+        fun(FeedChangesFun) ->
+            FeedChangesFun(MakeCallback(Resp))
+        end;
+    _ ->
+        % "longpoll" or "continuous"
+        {ok, Resp} = couch_httpd:start_json_response(Req, 200),
+        fun(FeedChangesFun) ->
+            FeedChangesFun(MakeCallback(Resp))
+        end
+    end,
+    couch_stats_collector:increment(
+        {httpd, clients_requesting_changes}
+    ),
+    try
+        WrapperFun(ChangesFun)
+    after
+    couch_stats_collector:decrement(
+        {httpd, clients_requesting_changes}
+    )
+    end.
+
+handle_compact_req(#httpd{method='POST'}=Req, Db) ->
+    case Req#httpd.path_parts of
+        [_DbName, <<"_compact">>] ->
+            ok = couch_db:check_is_admin(Db),
+            couch_httpd:validate_ctype(Req, "application/json"),
+            {ok, _} = couch_db:start_compact(Db),
+            send_json(Req, 202, {[{ok, true}]});
+        [_DbName, <<"_compact">>, DesignName | _] ->
+            DesignId = <<"_design/", DesignName/binary>>,
+            DDoc = couch_httpd_db:couch_doc_open(
+                Db, DesignId, nil, [ejson_body]
+            ),
+            couch_mrview_http:handle_compact_req(Req, Db, DDoc)
+    end;
+
+handle_compact_req(Req, _Db) ->
+    send_method_not_allowed(Req, "POST").
+
+
+handle_design_req(#httpd{
+        path_parts=[_DbName, _Design, DesignName, <<"_",_/binary>> = Action | _Rest],
+        design_url_handlers = DesignUrlHandlers
+    }=Req, Db) ->
+    case couch_db:is_system_db(Db) of
+    true ->
+        case (catch couch_db:check_is_admin(Db)) of
+        ok -> ok;
+        _ ->
+            throw({forbidden, <<"Only admins can access design document",
+                " actions for system databases.">>})
+        end;
+    false -> ok
+    end,
+
+    % load ddoc
+    DesignId = <<"_design/", DesignName/binary>>,
+    DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
+    Handler = couch_util:dict_find(Action, DesignUrlHandlers, fun(_, _, _) ->
+        throw({not_found, <<"missing handler: ", Action/binary>>})
+    end),
+    Handler(Req, Db, DDoc);
+
+handle_design_req(Req, Db) ->
+    db_req(Req, Db).
+
+handle_design_info_req(#httpd{
+            method='GET',
+            path_parts=[_DbName, _Design, DesignName, _]
+        }=Req, Db, _DDoc) ->
+    DesignId = <<"_design/", DesignName/binary>>,
+    DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
+    couch_mrview_http:handle_info_req(Req, Db, DDoc).
+
+create_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
+    ok = couch_httpd:verify_is_server_admin(Req),
+    case couch_server:create(DbName, [{user_ctx, UserCtx}]) of
+    {ok, Db} ->
+        couch_db:close(Db),
+        DbUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
+        send_json(Req, 201, [{"Location", DbUrl}], {[{ok, true}]});
+    Error ->
+        throw(Error)
+    end.
+
+delete_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
+    ok = couch_httpd:verify_is_server_admin(Req),
+    case couch_server:delete(DbName, [{user_ctx, UserCtx}]) of
+    ok ->
+        send_json(Req, 200, {[{ok, true}]});
+    Error ->
+        throw(Error)
+    end.
+
+do_db_req(#httpd{user_ctx=UserCtx,path_parts=[DbName|_]}=Req, Fun) ->
+    case couch_db:open(DbName, [{user_ctx, UserCtx}]) of
+    {ok, Db} ->
+        try
+            Fun(Req, Db)
+        after
+            catch couch_db:close(Db)
+        end;
+    Error ->
+        throw(Error)
+    end.
+
+db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) ->
+    {ok, DbInfo} = couch_db:get_db_info(Db),
+    send_json(Req, {DbInfo});
+
+db_req(#httpd{method='POST',path_parts=[_DbName]}=Req, Db) ->
+    couch_httpd:validate_ctype(Req, "application/json"),
+    Doc = couch_doc:from_json_obj(couch_httpd:json_body(Req)),
+    validate_attachment_names(Doc),
+    Doc2 = case Doc#doc.id of
+        <<"">> ->
+            Doc#doc{id=couch_uuids:new(), revs={0, []}};
+        _ ->
+            Doc
+    end,
+    DocId = Doc2#doc.id,
+    update_doc(Req, Db, DocId, Doc2);
+
+db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
+    send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, Db) ->
+    couch_httpd:validate_ctype(Req, "application/json"),
+    UpdateSeq = couch_db:get_update_seq(Db),
+    CommittedSeq = couch_db:get_committed_update_seq(Db),
+    {ok, StartTime} =
+    case couch_httpd:qs_value(Req, "seq") of
+    undefined ->
+        couch_db:ensure_full_commit(Db);
+    RequiredStr ->
+        RequiredSeq = list_to_integer(RequiredStr),
+        if RequiredSeq > UpdateSeq ->
+            throw({bad_request,
+                "can't do a full commit ahead of current update_seq"});
+        RequiredSeq > CommittedSeq ->
+            couch_db:ensure_full_commit(Db);
+        true ->
+            {ok, Db#db.instance_start_time}
+        end
+    end,
+    send_json(Req, 201, {[
+        {ok, true},
+        {instance_start_time, StartTime}
+    ]});
+
+db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
+    send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
+    couch_stats_collector:increment({httpd, bulk_requests}),
+    couch_httpd:validate_ctype(Req, "application/json"),
+    {JsonProps} = couch_httpd:json_body_obj(Req),
+    case couch_util:get_value(<<"docs">>, JsonProps) of
+    undefined ->
+        send_error(Req, 400, <<"bad_request">>, <<"Missing JSON list of 'docs'">>);
+    DocsArray ->
+        case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
+        "true" ->
+            Options = [full_commit];
+        "false" ->
+            Options = [delay_commit];
+        _ ->
+            Options = []
+        end,
+        case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
+        true ->
+            Docs = lists:map(
+                fun({ObjProps} = JsonObj) ->
+                    Doc = couch_doc:from_json_obj(JsonObj),
+                    validate_attachment_names(Doc),
+                    Id = case Doc#doc.id of
+                        <<>> -> couch_uuids:new();
+                        Id0 -> Id0
+                    end,
+                    case couch_util:get_value(<<"_rev">>, ObjProps) of
+                    undefined ->
+                       Revs = {0, []};
+                    Rev  ->
+                        {Pos, RevId} = couch_doc:parse_rev(Rev),
+                        Revs = {Pos, [RevId]}
+                    end,
+                    Doc#doc{id=Id,revs=Revs}
+                end,
+                DocsArray),
+            Options2 =
+            case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
+            true  -> [all_or_nothing|Options];
+            _ -> Options
+            end,
+            case couch_db:update_docs(Db, Docs, Options2) of
+            {ok, Results} ->
+                % output the results
+                DocResults = lists:zipwith(fun update_doc_result_to_json/2,
+                    Docs, Results),
+                send_json(Req, 201, DocResults);
+            {aborted, Errors} ->
+                ErrorsJson =
+                    lists:map(fun update_doc_result_to_json/1, Errors),
+                send_json(Req, 417, ErrorsJson)
+            end;
+        false ->
+            Docs = lists:map(fun(JsonObj) ->
+                    Doc = couch_doc:from_json_obj(JsonObj),
+                    validate_attachment_names(Doc),
+                    Doc
+                end, DocsArray),
+            {ok, Errors} = couch_db:update_docs(Db, Docs, Options, replicated_changes),
+            ErrorsJson =
+                lists:map(fun update_doc_result_to_json/1, Errors),
+            send_json(Req, 201, ErrorsJson)
+        end
+    end;
+db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) ->
+    send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
+    couch_httpd:validate_ctype(Req, "application/json"),
+    {IdsRevs} = couch_httpd:json_body_obj(Req),
+    IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
+
+    case couch_db:purge_docs(Db, IdsRevs2) of
+    {ok, PurgeSeq, PurgedIdsRevs} ->
+        PurgedIdsRevs2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs} <- PurgedIdsRevs],
+        send_json(Req, 200, {[{<<"purge_seq">>, PurgeSeq}, {<<"purged">>, {PurgedIdsRevs2}}]});
+    Error ->
+        throw(Error)
+    end;
+
+db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
+    send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
+    {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
+    JsonDocIdRevs2 = [{Id, [couch_doc:parse_rev(RevStr) || RevStr <- RevStrs]} || {Id, RevStrs} <- JsonDocIdRevs],
+    {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
+    Results2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs, _} <- Results],
+    send_json(Req, {[
+        {missing_revs, {Results2}}
+    ]});
+
+db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
+    send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
+    {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
+    JsonDocIdRevs2 =
+        [{Id, couch_doc:parse_revs(RevStrs)} || {Id, RevStrs} <- JsonDocIdRevs],
+    {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
+    Results2 =
+    lists:map(fun({Id, MissingRevs, PossibleAncestors}) ->
+        {Id,
+            {[{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
+                if PossibleAncestors == [] ->
+                    [];
+                true ->
+                    [{possible_ancestors,
+                        couch_doc:revs_to_strs(PossibleAncestors)}]
+                end}}
+    end, Results),
+    send_json(Req, {Results2});
+
+db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) ->
+    send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>]}=Req, Db) ->
+    SecObj = couch_httpd:json_body(Req),
+    ok = couch_db:set_security(Db, SecObj),
+    send_json(Req, {[{<<"ok">>, true}]});
+
+db_req(#httpd{method='GET',path_parts=[_,<<"_security">>]}=Req, Db) ->
+    send_json(Req, couch_db:get_security(Db));
+
+db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) ->
+    send_method_not_allowed(Req, "PUT,GET");
+
+db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>]}=Req,
+        Db) ->
+    Limit = couch_httpd:json_body(Req),
+   case is_integer(Limit) of
+   true ->
+       ok = couch_db:set_revs_limit(Db, Limit),
+       send_json(Req, {[{<<"ok">>, true}]});
+   false ->
+       throw({bad_request, <<"Rev limit has to be an integer">>})
+   end;
+
+db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
+    send_json(Req, couch_db:get_revs_limit(Db));
+
+db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
+    send_method_not_allowed(Req, "PUT,GET");
+
+% Special case to enable using an unencoded slash in the URL of design docs,
+% as slashes in document IDs must otherwise be URL encoded.
+db_req(#httpd{method='GET',mochi_req=MochiReq, path_parts=[DbName,<<"_design/",_/binary>>|_]}=Req, _Db) ->
+    PathFront = "/" ++ couch_httpd:quote(binary_to_list(DbName)) ++ "/",
+    [_|PathTail] = re:split(MochiReq:get(raw_path), "_design%2F",
+        [{return, list}]),
+    couch_httpd:send_redirect(Req, PathFront ++ "_design/" ++
+        mochiweb_util:join(PathTail, "_design%2F"));
+
+db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) ->
+    db_doc_req(Req, Db, <<"_design/",Name/binary>>);
+
+db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) ->
+    db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts);
+
+
+% Special case to allow for accessing local documents without %2F
+% encoding the docid. Throws out requests that don't have the second
+% path part or that specify an attachment name.
+db_req(#httpd{path_parts=[_DbName, <<"_local">>]}, _Db) ->
+    throw({bad_request, <<"Invalid _local document id.">>});
+
+db_req(#httpd{path_parts=[_DbName, <<"_local/">>]}, _Db) ->
+    throw({bad_request, <<"Invalid _local document id.">>});
+
+db_req(#httpd{path_parts=[_DbName, <<"_local">>, Name]}=Req, Db) ->
+    db_doc_req(Req, Db, <<"_local/", Name/binary>>);
+
+db_req(#httpd{path_parts=[_DbName, <<"_local">> | _Rest]}, _Db) ->
+    throw({bad_request, <<"_local documents do not accept attachments.">>});
+
+db_req(#httpd{path_parts=[_, DocId]}=Req, Db) ->
+    db_doc_req(Req, Db, DocId);
+
+db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
+    db_attachment_req(Req, Db, DocId, FileNameParts).
+
+db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
+    % check for the existence of the doc to handle the 404 case.
+    couch_doc_open(Db, DocId, nil, []),
+    case couch_httpd:qs_value(Req, "rev") of
+    undefined ->
+        update_doc(Req, Db, DocId,
+                couch_doc_from_req(Req, DocId, {[{<<"_deleted">>,true}]}));
+    Rev ->
+        update_doc(Req, Db, DocId,
+                couch_doc_from_req(Req, DocId,
+                    {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]}))
+    end;
+
+db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
+    #doc_query_args{
+        rev = Rev,
+        open_revs = Revs,
+        options = Options1,
+        atts_since = AttsSince
+    } = parse_doc_query(Req),
+    Options = case AttsSince of
+    nil ->
+        Options1;
+    RevList when is_list(RevList) ->
+        [{atts_since, RevList}, attachments | Options1]
+    end,
+    case Revs of
+    [] ->
+        Doc = couch_doc_open(Db, DocId, Rev, Options),
+        send_doc(Req, Doc, Options);
+    _ ->
+        {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options),
+        case MochiReq:accepts_content_type("multipart/mixed") of
+        false ->
+            {ok, Resp} = start_json_response(Req, 200),
+            send_chunk(Resp, "["),
+            % We loop through the docs. The first time through the separator
+            % is whitespace, then a comma on subsequent iterations.
+            lists:foldl(
+                fun(Result, AccSeparator) ->
+                    case Result of
+                    {ok, Doc} ->
+                        JsonDoc = couch_doc:to_json_obj(Doc, Options),
+                        Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
+                        send_chunk(Resp, AccSeparator ++ Json);
+                    {{not_found, missing}, RevId} ->
+                        RevStr = couch_doc:rev_to_str(RevId),
+                        Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
+                        send_chunk(Resp, AccSeparator ++ Json)
+                    end,
+                    "," % AccSeparator now has a comma
+                end,
+                "", Results),
+            send_chunk(Resp, "]"),
+            end_json_response(Resp);
+        true ->
+            send_docs_multipart(Req, Results, Options)
+        end
+    end;
+
+
+db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
+    couch_httpd:validate_referer(Req),
+    couch_doc:validate_docid(DocId),
+    couch_httpd:validate_ctype(Req, "multipart/form-data"),
+    Form = couch_httpd:parse_form(Req),
+    case couch_util:get_value("_doc", Form) of
+    undefined ->
+        Rev = couch_doc:parse_rev(couch_util:get_value("_rev", Form)),
+        {ok, [{ok, Doc}]} = couch_db:open_doc_revs(Db, DocId, [Rev], []);
+    Json ->
+        Doc = couch_doc_from_req(Req, DocId, ?JSON_DECODE(Json))
+    end,
+    UpdatedAtts = [
+        #att{name=validate_attachment_name(Name),
+            type=list_to_binary(ContentType),
+            data=Content} ||
+        {Name, {ContentType, _}, Content} <-
+        proplists:get_all_values("_attachments", Form)
+    ],
+    #doc{atts=OldAtts} = Doc,
+    OldAtts2 = lists:flatmap(
+        fun(#att{name=OldName}=Att) ->
+            case [1 || A <- UpdatedAtts, A#att.name == OldName] of
+            [] -> [Att]; % the attachment wasn't in the UpdatedAtts, return it
+            _ -> [] % the attachment was in the UpdatedAtts, drop it
+            end
+        end, OldAtts),
+    NewDoc = Doc#doc{
+        atts = UpdatedAtts ++ OldAtts2
+    },
+    update_doc(Req, Db, DocId, NewDoc);
+
+db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) ->
+    couch_doc:validate_docid(DocId),
+
+    case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
+    ("multipart/related;" ++ _) = ContentType ->
+        {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(
+            ContentType, fun() -> receive_request_data(Req) end),
+        Doc = couch_doc_from_req(Req, DocId, Doc0),
+        try
+            Result = update_doc(Req, Db, DocId, Doc),
+            WaitFun(),
+            Result
+        catch throw:Err ->
+            % Document rejected by a validate_doc_update function.
+            couch_doc:abort_multi_part_stream(Parser),
+            throw(Err)
+        end;
+    _Else ->
+        Body = couch_httpd:json_body(Req),
+        Doc = couch_doc_from_req(Req, DocId, Body),
+        update_doc(Req, Db, DocId, Doc)
+    end;
+
+db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
+    SourceRev =
+    case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
+        missing_rev -> nil;
+        Rev -> Rev
+    end,
+    {TargetDocId, TargetRevs} = parse_copy_destination_header(Req),
+    % open old doc
+    Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
+    % save new doc
+    update_doc(Req, Db, TargetDocId, Doc#doc{id=TargetDocId, revs=TargetRevs});
+
+db_doc_req(Req, _Db, _DocId) ->
+    send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
+
+
+send_doc(Req, Doc, Options) ->
+    case Doc#doc.meta of
+    [] ->
+        DiskEtag = couch_httpd:doc_etag(Doc),
+        % output etag only when we have no meta
+        couch_httpd:etag_respond(Req, DiskEtag, fun() ->
+            send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options)
+        end);
+    _ ->
+        send_doc_efficiently(Req, Doc, [], Options)
+    end.
+
+
+send_doc_efficiently(Req, #doc{atts=[]}=Doc, Headers, Options) ->
+        send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
+send_doc_efficiently(#httpd{mochi_req = MochiReq} = Req,
+    #doc{atts = Atts} = Doc, Headers, Options) ->
+    case lists:member(attachments, Options) of
+    true ->
+        case MochiReq:accepts_content_type("multipart/related") of
+        false ->
+            send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
+        true ->
+            Boundary = couch_uuids:random(),
+            JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc,
+                    [attachments, follows, att_encoding_info | Options])),
+            {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
+                    Boundary,JsonBytes, Atts, true),
+            CType = {"Content-Type", ?b2l(ContentType)},
+            {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len),
+            couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts,
+                    fun(Data) -> couch_httpd:send(Resp, Data) end, true)
+        end;
+    false ->
+        send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
+    end.
+
+send_docs_multipart(Req, Results, Options1) ->
+    OuterBoundary = couch_uuids:random(),
+    InnerBoundary = couch_uuids:random(),
+    Options = [attachments, follows, att_encoding_info | Options1],
+    CType = {"Content-Type",
+        "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
+    {ok, Resp} = start_chunked_response(Req, 200, [CType]),
+    couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
+    lists:foreach(
+        fun({ok, #doc{atts=Atts}=Doc}) ->
+            JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
+            {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
+                    InnerBoundary, JsonBytes, Atts, true),
+            couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ",
+                    ContentType/binary, "\r\n\r\n">>),
+            couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
+                    fun(Data) -> couch_httpd:send_chunk(Resp, Data)
+                    end, true),
+             couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>);
+        ({{not_found, missing}, RevId}) ->
+             RevStr = couch_doc:rev_to_str(RevId),
+             Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
+             couch_httpd:send_chunk(Resp,
+                [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
+                Json,
+                <<"\r\n--", OuterBoundary/binary>>])
+         end, Results),
+    couch_httpd:send_chunk(Resp, <<"--">>),
+    couch_httpd:last_chunk(Resp).
+
+send_ranges_multipart(Req, ContentType, Len, Att, Ranges) ->
+    Boundary = couch_uuids:random(),
+    CType = {"Content-Type",
+        "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
+    {ok, Resp} = start_chunked_response(Req, 206, [CType]),
+    couch_httpd:send_chunk(Resp, <<"--", Boundary/binary>>),
+    lists:foreach(fun({From, To}) ->
+        ContentRange = ?l2b(make_content_range(From, To, Len)),
+        couch_httpd:send_chunk(Resp,
+            <<"\r\nContent-Type: ", ContentType/binary, "\r\n",
+            "Content-Range: ", ContentRange/binary, "\r\n",
+           "\r\n">>),
+        couch_doc:range_att_foldl(Att, From, To + 1,
+            fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
+        couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
+    end, Ranges),
+    couch_httpd:send_chunk(Resp, <<"--">>),
+    couch_httpd:last_chunk(Resp),
+    {ok, Resp}.
+
+receive_request_data(Req) ->
+    receive_request_data(Req, couch_httpd:body_length(Req)).
+
+receive_request_data(Req, LenLeft) when LenLeft > 0 ->
+    Len = erlang:min(4096, LenLeft),
+    Data = couch_httpd:recv(Req, Len),
+    {Data, fun() -> receive_request_data(Req, LenLeft - iolist_size(Data)) end};
+receive_request_data(_Req, _) ->
+    throw(<<"expected more data">>).
+
+make_content_range(From, To, Len) ->
+    io_lib:format("bytes ~B-~B/~B", [From, To, Len]).
+
+update_doc_result_to_json({{Id, Rev}, Error}) ->
+        {_Code, Err, Msg} = couch_httpd:error_info(Error),
+        {[{id, Id}, {rev, couch_doc:rev_to_str(Rev)},
+            {error, Err}, {reason, Msg}]}.
+
+update_doc_result_to_json(#doc{id=DocId}, Result) ->
+    update_doc_result_to_json(DocId, Result);
+update_doc_result_to_json(DocId, {ok, NewRev}) ->
+    {[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
+update_doc_result_to_json(DocId, Error) ->
+    {_Code, ErrorStr, Reason} = couch_httpd:error_info(Error),
+    {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
+
+
+update_doc(Req, Db, DocId, #doc{deleted=false}=Doc) ->
+    Loc = absolute_uri(Req, "/" ++ ?b2l(Db#db.name) ++ "/" ++ ?b2l(DocId)),
+    update_doc(Req, Db, DocId, Doc, [{"Location", Loc}]);
+update_doc(Req, Db, DocId, Doc) ->
+    update_doc(Req, Db, DocId, Doc, []).
+
+update_doc(Req, Db, DocId, Doc, Headers) ->
+    #doc_query_args{
+        update_type = UpdateType
+    } = parse_doc_query(Req),
+    update_doc(Req, Db, DocId, Doc, Headers, UpdateType).
+
+update_doc(Req, Db, DocId, #doc{deleted=Deleted}=Doc, Headers, UpdateType) ->
+    case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
+    "true" ->
+        Options = [full_commit];
+    "false" ->
+        Options = [delay_commit];
+    _ ->
+        Options = []
+    end,
+    case couch_httpd:qs_value(Req, "batch") of
+    "ok" ->
+        % async batching
+        spawn(fun() ->
+                case catch(couch_db:update_doc(Db, Doc, Options, UpdateType)) of
+                {ok, _} -> ok;
+                Error ->
+                    ?LOG_INFO("Batch doc error (~s): ~p",[DocId, Error])
+                end
+            end),
+        send_json(Req, 202, Headers, {[
+            {ok, true},
+            {id, DocId}
+        ]});
+    _Normal ->
+        % normal
+        {ok, NewRev} = couch_db:update_doc(Db, Doc, Options, UpdateType),
+        NewRevStr = couch_doc:rev_to_str(NewRev),
+        ResponseHeaders = [{"ETag", <<"\"", NewRevStr/binary, "\"">>}] ++ Headers,
+        send_json(Req,
+            if Deleted orelse Req#httpd.method == 'DELETE' -> 200;
+            true -> 201 end,
+            ResponseHeaders, {[
+                {ok, true},
+                {id, DocId},
+                {rev, NewRevStr}]})
+    end.
+
+couch_doc_from_req(Req, DocId, #doc{revs=Revs}=Doc) ->
+    validate_attachment_names(Doc),
+    Rev = case couch_httpd:qs_value(Req, "rev") of
+    undefined ->
+        undefined;
+    QSRev ->
+        couch_doc:parse_rev(QSRev)
+    end,
+    Revs2 =
+    case Revs of
+    {Start, [RevId|_]} ->
+        if Rev /= undefined andalso Rev /= {Start, RevId} ->
+            throw({bad_request, "Document rev from request body and query "
+                   "string have different values"});
+        true ->
+            case extract_header_rev(Req, {Start, RevId}) of
+            missing_rev -> {0, []};
+            _ -> Revs
+            end
+        end;
+    _ ->
+        case extract_header_rev(Req, Rev) of
+        missing_rev -> {0, []};
+        {Pos, RevId2} -> {Pos, [RevId2]}
+        end
+    end,
+    Doc#doc{id=DocId, revs=Revs2};
+couch_doc_from_req(Req, DocId, Json) ->
+    couch_doc_from_req(Req, DocId, couch_doc:from_json_obj(Json)).
+
+% Useful for debugging
+% couch_doc_open(Db, DocId) ->
+%   couch_doc_open(Db, DocId, nil, []).
+
+couch_doc_open(Db, DocId, Rev, Options) ->
+    case Rev of
+    nil -> % open most recent rev
+        case couch_db:open_doc(Db, DocId, Options) of
+        {ok, Doc} ->
+            Doc;
+         Error ->
+             throw(Error)
+         end;
+  _ -> % open a specific rev (deletions come back as stubs)
+      case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of
+          {ok, [{ok, Doc}]} ->
+              Doc;
+          {ok, [{{not_found, missing}, Rev}]} ->
+              throw(not_found);
+          {ok, [Else]} ->
+              throw(Else)
+      end
+  end.
+
+% Attachment request handlers
+
+db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNameParts) ->
+    FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1, FileNameParts),"/")),
+    #doc_query_args{
+        rev=Rev,
+        options=Options
+    } = parse_doc_query(Req),
+    #doc{
+        atts=Atts
+    } = Doc = couch_doc_open(Db, DocId, Rev, Options),
+    case [A || A <- Atts, A#att.name == FileName] of
+    [] ->
+        throw({not_found, "Document is missing attachment"});
+    [#att{type=Type, encoding=Enc, disk_len=DiskLen, att_len=AttLen}=Att] ->
+        Etag = case Att#att.md5 of
+            <<>> -> couch_httpd:doc_etag(Doc);
+            Md5 -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\""
+        end,
+        ReqAcceptsAttEnc = lists:member(
+           atom_to_list(Enc),
+           couch_httpd:accepted_encodings(Req)
+        ),
+        Len = case {Enc, ReqAcceptsAttEnc} of
+        {identity, _} ->
+            % stored and served in identity form
+            DiskLen;
+        {_, false} when DiskLen =/= AttLen ->
+            % Stored encoded, but client doesn't accept the encoding we used,
+            % so we need to decode on the fly.  DiskLen is the identity length
+            % of the attachment.
+            DiskLen;
+        {_, true} ->
+            % Stored and served encoded.  AttLen is the encoded length.
+            AttLen;
+        _ ->
+            % We received an encoded attachment and stored it as such, so we
+            % don't know the identity length.  The client doesn't accept the
+            % encoding, and since we cannot serve a correct Content-Length
+            % header we'll fall back to a chunked response.
+            undefined
+        end,
+        Headers = [
+            {"ETag", Etag},
+            {"Cache-Control", "must-revalidate"},
+            {"Content-Type", binary_to_list(Type)}
+        ] ++ case ReqAcceptsAttEnc of
+        true when Enc =/= identity ->
+            % RFC 2616 says that the 'identify' encoding should not be used in
+            % the Content-Encoding header
+            [{"Content-Encoding", atom_to_list(Enc)}];
+        _ ->
+            []
+        end ++ case Enc of
+            identity ->
+                [{"Accept-Ranges", "bytes"}];
+            _ ->
+                [{"Accept-Ranges", "none"}]
+        end,
+        AttFun = case ReqAcceptsAttEnc of
+        false ->
+            fun couch_doc:att_foldl_decode/3;
+        true ->
+            fun couch_doc:att_foldl/3
+        end,
+        couch_httpd:etag_respond(
+            Req,
+            Etag,
+            fun() ->
+                case Len of
+                undefined ->
+                    {ok, Resp} = start_chunked_response(Req, 200, Headers),
+                    AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
+                    last_chunk(Resp);
+                _ ->
+                    Ranges = parse_ranges(MochiReq:get(range), Len),
+                    case {Enc, Ranges} of
+                        {identity, [{From, To}]} ->
+                            Headers1 = [{"Content-Range", make_content_range(From, To, Len)}]
+                                ++ Headers,
+                            {ok, Resp} = start_response_length(Req, 206, Headers1, To - From + 1),
+                            couch_doc:range_att_foldl(Att, From, To + 1,
+                                fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp});
+                        {identity, Ranges} when is_list(Ranges) andalso length(Ranges) < 10 ->
+                            send_ranges_multipart(Req, Type, Len, Att, Ranges);
+                        _ ->
+                            Headers1 = Headers ++
+                                if Enc =:= identity orelse ReqAcceptsAttEnc =:= true ->
+                                    [{"Content-MD5", base64:encode(Att#att.md5)}];
+                                true ->
+                                    []
+                            end,
+                            {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
+                            AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
+                    end
+                end
+            end
+        )
+    end;
+
+
+db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileNameParts)
+        when (Method == 'PUT') or (Method == 'DELETE') ->
+    FileName = validate_attachment_name(
+                    mochiweb_util:join(
+                        lists:map(fun binary_to_list/1,
+                            FileNameParts),"/")),
+
+    NewAtt = case Method of
+        'DELETE' ->
+            [];
+        _ ->
+            [#att{
+                name = FileName,
+                type = case couch_httpd:header_value(Req,"Content-Type") of
+                    undefined ->
+                        % We could throw an error here or guess by the FileName.
+                        % Currently, just giving it a default.
+                        <<"application/octet-stream">>;
+                    CType ->
+                        list_to_binary(CType)
+                    end,
+                data = case couch_httpd:body_length(Req) of
+                    undefined ->
+                        <<"">>;
+                    {unknown_transfer_encoding, Unknown} ->
+                        exit({unknown_transfer_encoding, Unknown});
+                    chunked ->
+                        fun(MaxChunkSize, ChunkFun, InitState) ->
+                            couch_httpd:recv_chunked(Req, MaxChunkSize,
+                                ChunkFun, InitState)
+                        end;
+                    0 ->
+                        <<"">>;
+                    Length when is_integer(Length) ->
+                        Expect = case couch_httpd:header_value(Req, "expect") of
+                                     undefined ->
+                                         undefined;
+                                     Value when is_list(Value) ->
+                                         string:to_lower(Value)
+                                 end,
+                        case Expect of
+                            "100-continue" ->
+                                MochiReq:start_raw_response({100, gb_trees:empty()});
+                            _Else ->
+                                ok
+                        end,
+
+
+                        fun(Size) -> couch_httpd:recv(Req, Size) end
+                    end,
+                att_len = case couch_httpd:header_value(Req,"Content-Length") of
+                    undefined ->
+                        undefined;
+                    Length ->
+                        list_to_integer(Length)
+                    end,
+                md5 = get_md5_header(Req),
+                encoding = case string:to_lower(string:strip(
+                    couch_httpd:header_value(Req,"Content-Encoding","identity")
+                )) of
+                "identity" ->
+                   identity;
+                "gzip" ->
+                   gzip;
+                _ ->
+                   throw({
+                       bad_ctype,
+                       "Only gzip and identity content-encodings are supported"
+                   })
+                end
+            }]
+    end,
+
+    Doc = case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
+        missing_rev -> % make the new doc
+            couch_doc:validate_docid(DocId),
+            #doc{id=DocId};
+        Rev ->
+            case couch_db:open_doc_revs(Db, DocId, [Rev], []) of
+                {ok, [{ok, Doc0}]} -> Doc0;
+                {ok, [{{not_found, missing}, Rev}]} -> throw(conflict);
+                {ok, [Error]} -> throw(Error)
+            end
+    end,
+
+    #doc{atts=Atts} = Doc,
+    DocEdited = Doc#doc{
+        atts = NewAtt ++ [A || A <- Atts, A#att.name /= FileName]
+    },
+
+    Headers = case Method of
+    'DELETE' ->
+        [];
+    _ ->
+        [{"Location", absolute_uri(Req, "/" ++
+            ?b2l(Db#db.name) ++ "/" ++
+            ?b2l(DocId) ++ "/" ++
+            ?b2l(FileName)
+        )}]
+    end,
+    update_doc(Req, Db, DocId, DocEdited, Headers);
+
+db_attachment_req(Req, _Db, _DocId, _FileNameParts) ->
+    send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT").
+
+parse_ranges(undefined, _Len) ->
+    undefined;
+parse_ranges(fail, _Len) ->
+    undefined;
+parse_ranges(Ranges, Len) ->
+    parse_ranges(Ranges, Len, []).
+
+parse_ranges([], _Len, Acc) ->
+    lists:reverse(Acc);
+parse_ranges([{0, none}|_], _Len, _Acc) ->
+    undefined;
+parse_ranges([{From, To}|_], _Len, _Acc) when is_integer(From) andalso is_integer(To) andalso To < From ->
+    throw(requested_range_not_satisfiable);
+parse_ranges([{From, To}|Rest], Len, Acc) when is_integer(To) andalso To >= Len ->
+    parse_ranges([{From, Len-1}] ++ Rest, Len, Acc);
+parse_ranges([{none, To}|Rest], Len, Acc) ->
+    parse_ranges([{Len - To, Len - 1}] ++ Rest, Len, Acc);
+parse_ranges([{From, none}|Rest], Len, Acc) ->
+    parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
+parse_ranges([{From,To}|Rest], Len, Acc) ->
+    parse_ranges(Rest, Len, [{From, To}] ++ Acc).
+
+get_md5_header(Req) ->
+    ContentMD5 = couch_httpd:header_value(Req, "Content-MD5"),
+    Length = couch_httpd:body_length(Req),
+    Trailer = couch_httpd:header_value(Req, "Trailer"),
+    case {ContentMD5, Length, Trailer} of
+        _ when is_list(ContentMD5) orelse is_binary(ContentMD5) ->
+            base64:decode(ContentMD5);
+        {_, chunked, undefined} ->
+            <<>>;
+        {_, chunked, _} ->
+            case re:run(Trailer, "\\bContent-MD5\\b", [caseless]) of
+                {match, _} ->
+                    md5_in_footer;
+                _ ->
+                    <<>>
+            end;
+        _ ->
+            <<>>
+    end.
+
+parse_doc_query(Req) ->
+    lists:foldl(fun({Key,Value}, Args) ->
+        case {Key, Value} of
+        {"attachments", "true"} ->
+            Options = [attachments | Args#doc_query_args.options],
+            Args#doc_query_args{options=Options};
+        {"meta", "true"} ->
+            Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
+            Args#doc_query_args{options=Options};
+        {"revs", "true"} ->
+            Options = [revs | Args#doc_query_args.options],
+            Args#doc_query_args{options=Options};
+        {"local_seq", "true"} ->
+            Options = [local_seq | Args#doc_query_args.options],
+            Args#doc_query_args{options=Options};
+        {"revs_info", "true"} ->
+            Options = [revs_info | Args#doc_query_args.options],
+            Args#doc_query_args{options=Options};
+        {"conflicts", "true"} ->
+            Options = [conflicts | Args#doc_query_args.options],
+            Args#doc_query_args{options=Options};
+        {"deleted_conflicts", "true"} ->
+            Options = [deleted_conflicts | Args#doc_query_args.options],
+            Args#doc_query_args{options=Options};
+        {"rev", Rev} ->
+            Args#doc_query_args{rev=couch_doc:parse_rev(Rev)};
+        {"open_revs", "all"} ->
+            Args#doc_query_args{open_revs=all};
+        {"open_revs", RevsJsonStr} ->
+            JsonArray = ?JSON_DECODE(RevsJsonStr),
+            Args#doc_query_args{open_revs=couch_doc:parse_revs(JsonArray)};
+        {"latest", "true"} ->
+            Options = [latest | Args#doc_query_args.options],
+            Args#doc_query_args{options=Options};
+        {"atts_since", RevsJsonStr} ->
+            JsonArray = ?JSON_DECODE(RevsJsonStr),
+            Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
+        {"new_edits", "false"} ->
+            Args#doc_query_args{update_type=replicated_changes};
+        {"new_edits", "true"} ->
+            Args#doc_query_args{update_type=interactive_edit};
+        {"att_encoding_info", "true"} ->
+            Options = [att_encoding_info | Args#doc_query_args.options],
+            Args#doc_query_args{options=Options};
+        _Else -> % unknown key value pair, ignore.
+            Args
+        end
+    end, #doc_query_args{}, couch_httpd:qs(Req)).
+
+parse_changes_query(Req, Db) ->
+    ChangesArgs = lists:foldl(fun({Key, Value}, Args) ->
+        case {string:to_lower(Key), Value} of
+        {"feed", _} ->
+            Args#changes_args{feed=Value};
+        {"descending", "true"} ->
+            Args#changes_args{dir=rev};
+        {"since", "now"} ->
+            UpdateSeq = couch_util:with_db(Db#db.name, fun(WDb) ->
+                                        couch_db:get_update_seq(WDb)
+                                end),
+            Args#changes_args{since=UpdateSeq};
+        {"since", _} ->
+            Args#changes_args{since=list_to_integer(Value)};
+        {"last-event-id", _} ->
+            Args#changes_args{since=list_to_integer(Value)};
+        {"limit", _} ->
+            Args#changes_args{limit=list_to_integer(Value)};
+        {"style", _} ->
+            Args#changes_args{style=list_to_existing_atom(Value)};
+        {"heartbeat", "true"} ->
+            Args#changes_args{heartbeat=true};
+        {"heartbeat", _} ->
+            Args#changes_args{heartbeat=list_to_integer(Value)};
+        {"timeout", _} ->
+            Args#changes_args{timeout=list_to_integer(Value)};
+        {"include_docs", "true"} ->
+            Args#changes_args{include_docs=true};
+        {"attachments", "true"} ->
+            Opts = Args#changes_args.doc_options,
+            Args#changes_args{doc_options=[attachments|Opts]};
+        {"att_encoding_info", "true"} ->
+            Opts = Args#changes_args.doc_options,
+            Args#changes_args{doc_options=[att_encoding_info|Opts]};
+        {"conflicts", "true"} ->
+            Args#changes_args{conflicts=true};
+        {"filter", _} ->
+            Args#changes_args{filter=Value};
+        _Else -> % unknown key value pair, ignore.
+            Args
+        end
+    end, #changes_args{}, couch_httpd:qs(Req)),
+    %% if it's an EventSource request with a Last-event-ID header
+    %% that should override the `since` query string, since it's
+    %% probably the browser reconnecting.
+    case ChangesArgs#changes_args.feed of
+        "eventsource" ->
+            case couch_httpd:header_value(Req, "last-event-id") of
+                undefined ->
+                    ChangesArgs;
+                Value ->
+                    ChangesArgs#changes_args{since=list_to_integer(Value)}
+            end;
+        _ ->
+            ChangesArgs
+    end.
+
+extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
+    extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
+extract_header_rev(Req, ExplicitRev) ->
+    Etag = case couch_httpd:header_value(Req, "If-Match") of
+        undefined -> undefined;
+        Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
+    end,
+    case {ExplicitRev, Etag} of
+    {undefined, undefined} -> missing_rev;
+    {_, undefined} -> ExplicitRev;
+    {undefined, _} -> Etag;
+    _ when ExplicitRev == Etag -> Etag;
+    _ ->
+        throw({bad_request, "Document rev and etag have different values"})
+    end.
+
+
+parse_copy_destination_header(Req) ->
+    case couch_httpd:header_value(Req, "Destination") of
+    undefined ->
+        throw({bad_request, "Destination header is mandatory for COPY."});
+    Destination ->
+        case re:run(Destination, "^https?://", [{capture, none}]) of
+        match ->
+            throw({bad_request, "Destination URL must be relative."});
+        nomatch ->
+            % see if ?rev=revid got appended to the Destination header
+            case re:run(Destination, "\\?", [{capture, none}]) of
+            nomatch ->
+                {list_to_binary(Destination), {0, []}};
+            match ->
+                [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
+                [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
+                {Pos, RevId} = couch_doc:parse_rev(Rev),
+                {list_to_binary(DocId), {Pos, [RevId]}}
+            end
+        end
+    end.
+
+validate_attachment_names(Doc) ->
+    lists:foreach(fun(#att{name=Name}) ->
+        validate_attachment_name(Name)
+    end, Doc#doc.atts).
+
+validate_attachment_name(Name) when is_list(Name) ->
+    validate_attachment_name(list_to_binary(Name));
+validate_attachment_name(<<"_",_/binary>>) ->
+    throw({bad_request, <<"Attachment name can't start with '_'">>});
+validate_attachment_name(Name) ->
+    case couch_util:validate_utf8(Name) of
+        true -> Name;
+        false -> throw({bad_request, <<"Attachment name is not UTF-8 encoded">>})
+    end.
+

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_httpd_external.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_external.erl b/src/couch_httpd_external.erl
new file mode 100644
index 0000000..2036d25
--- /dev/null
+++ b/src/couch_httpd_external.erl
@@ -0,0 +1,177 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_external).
+
+-export([handle_external_req/2, handle_external_req/3]).
+-export([send_external_response/2, json_req_obj/2, json_req_obj/3]).
+-export([default_or_content_type/2, parse_external_response/1]).
+
+-import(couch_httpd,[send_error/4]).
+
+-include("couch_db.hrl").
+
+% handle_external_req/2
+% for the old type of config usage:
+% _external = {couch_httpd_external, handle_external_req}
+% with urls like
+% /db/_external/action/design/name
+handle_external_req(#httpd{
+                        path_parts=[_DbName, _External, UrlName | _Path]
+                    }=HttpReq, Db) ->
+    process_external_req(HttpReq, Db, UrlName);
+handle_external_req(#httpd{path_parts=[_, _]}=Req, _Db) ->
+    send_error(Req, 404, <<"external_server_error">>, <<"No server name specified.">>);
+handle_external_req(Req, _) ->
+    send_error(Req, 404, <<"external_server_error">>, <<"Broken assumption">>).
+
+% handle_external_req/3
+% for this type of config usage:
+% _action = {couch_httpd_external, handle_external_req, <<"action">>}
+% with urls like
+% /db/_action/design/name
+handle_external_req(HttpReq, Db, Name) ->
+    process_external_req(HttpReq, Db, Name).
+
+process_external_req(HttpReq, Db, Name) ->
+
+    Response = couch_external_manager:execute(binary_to_list(Name),
+        json_req_obj(HttpReq, Db)),
+
+    case Response of
+    {unknown_external_server, Msg} ->
+        send_error(HttpReq, 404, <<"external_server_error">>, Msg);
+    _ ->
+        send_external_response(HttpReq, Response)
+    end.
+json_req_obj(Req, Db) -> json_req_obj(Req, Db, null).
+json_req_obj(#httpd{mochi_req=Req,
+               method=Method,
+               requested_path_parts=RequestedPath,
+               path_parts=Path,
+               req_body=ReqBody
+            }, Db, DocId) ->
+    Body = case ReqBody of
+        undefined ->
+            MaxSize = list_to_integer(
+                couch_config:get("couchdb", "max_document_size", "4294967296")),
+            Req:recv_body(MaxSize);
+        Else -> Else
+    end,
+    ParsedForm = case Req:get_primary_header_value("content-type") of
+        "application/x-www-form-urlencoded" ++ _ ->
+            case Body of
+            undefined -> [];
+            _ -> mochiweb_util:parse_qs(Body)
+            end;
+        _ ->
+            []
+    end,
+    Headers = Req:get(headers),
+    Hlist = mochiweb_headers:to_list(Headers),
+    {ok, Info} = couch_db:get_db_info(Db),
+    
+% add headers...
+    {[{<<"info">>, {Info}},
+        {<<"id">>, DocId},
+        {<<"uuid">>, couch_uuids:new()},
+        {<<"method">>, Method},
+        {<<"requested_path">>, RequestedPath},
+        {<<"path">>, Path},
+        {<<"raw_path">>, ?l2b(Req:get(raw_path))},
+        {<<"query">>, json_query_keys(to_json_terms(Req:parse_qs()))},
+        {<<"headers">>, to_json_terms(Hlist)},
+        {<<"body">>, Body},
+        {<<"peer">>, ?l2b(Req:get(peer))},
+        {<<"form">>, to_json_terms(ParsedForm)},
+        {<<"cookie">>, to_json_terms(Req:parse_cookie())},
+        {<<"userCtx">>, couch_util:json_user_ctx(Db)},
+        {<<"secObj">>, couch_db:get_security(Db)}]}.
+
+to_json_terms(Data) ->
+    to_json_terms(Data, []).
+
+to_json_terms([], Acc) ->
+    {lists:reverse(Acc)};
+to_json_terms([{Key, Value} | Rest], Acc) when is_atom(Key) ->
+    to_json_terms(Rest, [{list_to_binary(atom_to_list(Key)), list_to_binary(Value)} | Acc]);
+to_json_terms([{Key, Value} | Rest], Acc) ->
+    to_json_terms(Rest, [{list_to_binary(Key), list_to_binary(Value)} | Acc]).
+
+json_query_keys({Json}) ->
+    json_query_keys(Json, []).
+json_query_keys([], Acc) ->
+    {lists:reverse(Acc)};
+json_query_keys([{<<"startkey">>, Value} | Rest], Acc) ->
+    json_query_keys(Rest, [{<<"startkey">>, ?JSON_DECODE(Value)}|Acc]);
+json_query_keys([{<<"endkey">>, Value} | Rest], Acc) ->
+    json_query_keys(Rest, [{<<"endkey">>, ?JSON_DECODE(Value)}|Acc]);
+json_query_keys([{<<"key">>, Value} | Rest], Acc) ->
+    json_query_keys(Rest, [{<<"key">>, ?JSON_DECODE(Value)}|Acc]);
+json_query_keys([Term | Rest], Acc) ->
+    json_query_keys(Rest, [Term|Acc]).
+
+send_external_response(Req, Response) ->
+    #extern_resp_args{
+        code = Code,
+        data = Data,
+        ctype = CType,
+        headers = Headers,
+        json = Json
+    } = parse_external_response(Response),
+    Headers1 = default_or_content_type(CType, Headers),
+    case Json of
+    nil ->
+        couch_httpd:send_response(Req, Code, Headers1, Data);
+    Json ->
+        couch_httpd:send_json(Req, Code, Headers1, Json)
+    end.
+
+parse_external_response({Response}) ->
+    lists:foldl(fun({Key,Value}, Args) ->
+        case {Key, Value} of
+            {"", _} ->
+                Args;
+            {<<"code">>, Value} ->
+                Args#extern_resp_args{code=Value};
+            {<<"stop">>, true} ->
+                Args#extern_resp_args{stop=true};
+            {<<"json">>, Value} ->
+                Args#extern_resp_args{
+                    json=Value,
+                    ctype="application/json"};
+            {<<"body">>, Value} ->
+                Args#extern_resp_args{data=Value, ctype="text/html; charset=utf-8"};
+            {<<"base64">>, Value} ->
+                Args#extern_resp_args{
+                    data=base64:decode(Value),
+                    ctype="application/binary"
+                };
+            {<<"headers">>, {Headers}} ->
+                NewHeaders = lists:map(fun({Header, HVal}) ->
+                    {binary_to_list(Header), binary_to_list(HVal)}
+                end, Headers),
+                Args#extern_resp_args{headers=NewHeaders};
+            _ -> % unknown key
+                Msg = lists:flatten(io_lib:format("Invalid data from external server: ~p", [{Key, Value}])),
+                throw({external_response_error, Msg})
+            end
+        end, #extern_resp_args{}, Response).
+
+default_or_content_type(DefaultContentType, Headers) ->
+    IsContentType = fun({X, _}) -> string:to_lower(X) == "content-type" end,
+    case lists:any(IsContentType, Headers) of
+    false ->
+        [{"Content-Type", DefaultContentType} | Headers];
+    true ->
+        Headers
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_httpd_misc_handlers.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_misc_handlers.erl b/src/couch_httpd_misc_handlers.erl
new file mode 100644
index 0000000..96a05c6
--- /dev/null
+++ b/src/couch_httpd_misc_handlers.erl
@@ -0,0 +1,318 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_misc_handlers).
+
+-export([handle_welcome_req/2,handle_favicon_req/2,handle_utils_dir_req/2,
+    handle_all_dbs_req/1,handle_restart_req/1,
+    handle_uuids_req/1,handle_config_req/1,handle_log_req/1,
+    handle_task_status_req/1, handle_file_req/2]).
+
+-export([increment_update_seq_req/2]).
+
+
+-include("couch_db.hrl").
+
+-import(couch_httpd,
+    [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
+    start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1,
+    start_chunked_response/3, send_error/4]).
+
+% httpd global handlers
+
+handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) ->
+    send_json(Req, {[
+        {couchdb, WelcomeMessage},
+        {uuid, couch_server:get_uuid()},
+        {version, list_to_binary(couch_server:get_version())}
+        ] ++ case couch_config:get("vendor") of
+        [] ->
+            [];
+        Properties ->
+            [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}]
+        end
+    });
+handle_welcome_req(Req, _) ->
+    send_method_not_allowed(Req, "GET,HEAD").
+
+handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) ->
+    {{Year,Month,Day},Time} = erlang:universaltime(),
+    OneYearFromNow = {{Year+1,Month,Day},Time},
+    CachingHeaders = [
+        %favicon should expire a year from now
+        {"Cache-Control", "public, max-age=31536000"},
+        {"Expires", couch_util:rfc1123_date(OneYearFromNow)}
+    ],
+    couch_httpd:serve_file(Req, "favicon.ico", DocumentRoot, CachingHeaders);
+
+handle_favicon_req(Req, _) ->
+    send_method_not_allowed(Req, "GET,HEAD").
+
+handle_file_req(#httpd{method='GET'}=Req, Document) ->
+    couch_httpd:serve_file(Req, filename:basename(Document), filename:dirname(Document));
+
+handle_file_req(Req, _) ->
+    send_method_not_allowed(Req, "GET,HEAD").
+
+handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) ->
+    "/" ++ UrlPath = couch_httpd:path(Req),
+    case couch_httpd:partition(UrlPath) of
+    {_ActionKey, "/", RelativePath} ->
+        % GET /_utils/path or GET /_utils/
+        CachingHeaders =
+                [{"Cache-Control", "private, must-revalidate"}],
+        couch_httpd:serve_file(Req, RelativePath, DocumentRoot, CachingHeaders);
+    {_ActionKey, "", _RelativePath} ->
+        % GET /_utils
+        RedirectPath = couch_httpd:path(Req) ++ "/",
+        couch_httpd:send_redirect(Req, RedirectPath)
+    end;
+handle_utils_dir_req(Req, _) ->
+    send_method_not_allowed(Req, "GET,HEAD").
+
+handle_all_dbs_req(#httpd{method='GET'}=Req) ->
+    {ok, DbNames} = couch_server:all_databases(),
+    send_json(Req, DbNames);
+handle_all_dbs_req(Req) ->
+    send_method_not_allowed(Req, "GET,HEAD").
+
+
+handle_task_status_req(#httpd{method='GET'}=Req) ->
+    ok = couch_httpd:verify_is_server_admin(Req),
+    % convert the list of prop lists to a list of json objects
+    send_json(Req, [{Props} || Props <- couch_task_status:all()]);
+handle_task_status_req(Req) ->
+    send_method_not_allowed(Req, "GET,HEAD").
+
+
+handle_restart_req(#httpd{method='POST'}=Req) ->
+    couch_httpd:validate_ctype(Req, "application/json"),
+    ok = couch_httpd:verify_is_server_admin(Req),
+    Result = send_json(Req, 202, {[{ok, true}]}),
+    couch_server_sup:restart_core_server(),
+    Result;
+handle_restart_req(Req) ->
+    send_method_not_allowed(Req, "POST").
+
+
+handle_uuids_req(#httpd{method='GET'}=Req) ->
+    Count = list_to_integer(couch_httpd:qs_value(Req, "count", "1")),
+    UUIDs = [couch_uuids:new() || _ <- lists:seq(1, Count)],
+    Etag = couch_httpd:make_etag(UUIDs),
+    couch_httpd:etag_respond(Req, Etag, fun() ->
+        CacheBustingHeaders = [
+            {"Date", couch_util:rfc1123_date()},
+            {"Cache-Control", "no-cache"},
+            % Past date, ON PURPOSE!
+            {"Expires", "Fri, 01 Jan 1990 00:00:00 GMT"},
+            {"Pragma", "no-cache"},
+            {"ETag", Etag}
+        ],
+        send_json(Req, 200, CacheBustingHeaders, {[{<<"uuids">>, UUIDs}]})
+    end);
+handle_uuids_req(Req) ->
+    send_method_not_allowed(Req, "GET").
+
+
+% Config request handler
+
+
+% GET /_config/
+% GET /_config
+handle_config_req(#httpd{method='GET', path_parts=[_]}=Req) ->
+    ok = couch_httpd:verify_is_server_admin(Req),
+    Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
+        case dict:is_key(Section, Acc) of
+        true ->
+            dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
+        false ->
+            dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
+        end
+    end, dict:new(), couch_config:all()),
+    KVs = dict:fold(fun(Section, Values, Acc) ->
+        [{list_to_binary(Section), {Values}} | Acc]
+    end, [], Grouped),
+    send_json(Req, 200, {KVs});
+% GET /_config/Section
+handle_config_req(#httpd{method='GET', path_parts=[_,Section]}=Req) ->
+    ok = couch_httpd:verify_is_server_admin(Req),
+    KVs = [{list_to_binary(Key), list_to_binary(Value)}
+            || {Key, Value} <- couch_config:get(Section)],
+    send_json(Req, 200, {KVs});
+% GET /_config/Section/Key
+handle_config_req(#httpd{method='GET', path_parts=[_, Section, Key]}=Req) ->
+    ok = couch_httpd:verify_is_server_admin(Req),
+    case couch_config:get(Section, Key, null) of
+    null ->
+        throw({not_found, unknown_config_value});
+    Value ->
+        send_json(Req, 200, list_to_binary(Value))
+    end;
+% PUT or DELETE /_config/Section/Key
+handle_config_req(#httpd{method=Method, path_parts=[_, Section, Key]}=Req)
+      when (Method == 'PUT') or (Method == 'DELETE') ->
+    ok = couch_httpd:verify_is_server_admin(Req),
+    Persist = couch_httpd:header_value(Req, "X-Couch-Persist") /= "false",
+    case couch_config:get(<<"httpd">>, <<"config_whitelist">>, null) of
+        null ->
+            % No whitelist; allow all changes.
+            handle_approved_config_req(Req, Persist);
+        WhitelistValue ->
+            % Provide a failsafe to protect against inadvertently locking
+            % onesself out of the config by supplying a syntactically-incorrect
+            % Erlang term. To intentionally lock down the whitelist, supply a
+            % well-formed list which does not include the whitelist config
+            % variable itself.
+            FallbackWhitelist = [{<<"httpd">>, <<"config_whitelist">>}],
+
+            Whitelist = case couch_util:parse_term(WhitelistValue) of
+                {ok, Value} when is_list(Value) ->
+                    Value;
+                {ok, _NonListValue} ->
+                    FallbackWhitelist;
+                {error, _} ->
+                    [{WhitelistSection, WhitelistKey}] = FallbackWhitelist,
+                    ?LOG_ERROR("Only whitelisting ~s/~s due to error parsing: ~p",
+                               [WhitelistSection, WhitelistKey, WhitelistValue]),
+                    FallbackWhitelist
+            end,
+
+            IsRequestedKeyVal = fun(Element) ->
+                case Element of
+                    {A, B} ->
+                        % For readability, tuples may be used instead of binaries
+                        % in the whitelist.
+                        case {couch_util:to_binary(A), couch_util:to_binary(B)} of
+                            {Section, Key} ->
+                                true;
+                            {Section, <<"*">>} ->
+                                true;
+                            _Else ->
+                                false
+                        end;
+                    _Else ->
+                        false
+                end
+            end,
+
+            case lists:any(IsRequestedKeyVal, Whitelist) of
+                true ->
+                    % Allow modifying this whitelisted variable.
+                    handle_approved_config_req(Req, Persist);
+                _NotWhitelisted ->
+                    % Disallow modifying this non-whitelisted variable.
+                    send_error(Req, 400, <<"modification_not_allowed">>,
+                               ?l2b("This config variable is read-only"))
+            end
+    end;
+handle_config_req(Req) ->
+    send_method_not_allowed(Req, "GET,PUT,DELETE").
+
+% PUT /_config/Section/Key
+% "value"
+handle_approved_config_req(Req, Persist) ->
+    Query = couch_httpd:qs(Req),
+    UseRawValue = case lists:keyfind("raw", 1, Query) of
+    false            -> false; % Not specified
+    {"raw", ""}      -> false; % Specified with no value, i.e. "?raw" and "?raw="
+    {"raw", "false"} -> false;
+    {"raw", "true"}  -> true;
+    {"raw", InvalidValue} -> InvalidValue
+    end,
+    handle_approved_config_req(Req, Persist, UseRawValue).
+
+handle_approved_config_req(#httpd{method='PUT', path_parts=[_, Section, Key]}=Req,
+                           Persist, UseRawValue)
+        when UseRawValue =:= false orelse UseRawValue =:= true ->
+    RawValue = couch_httpd:json_body(Req),
+    Value = case UseRawValue of
+    true ->
+        % Client requests no change to the provided value.
+        RawValue;
+    false ->
+        % Pre-process the value as necessary.
+        case Section of
+        <<"admins">> ->
+            couch_passwords:hash_admin_password(RawValue);
+        _ ->
+            RawValue
+        end
+    end,
+
+    OldValue = couch_config:get(Section, Key, ""),
+    case couch_config:set(Section, Key, ?b2l(Value), Persist) of
+    ok ->
+        send_json(Req, 200, list_to_binary(OldValue));
+    Error ->
+        throw(Error)
+    end;
+
+handle_approved_config_req(#httpd{method='PUT'}=Req, _Persist, UseRawValue) ->
+    Err = io_lib:format("Bad value for 'raw' option: ~s", [UseRawValue]),
+    send_json(Req, 400, {[{error, ?l2b(Err)}]});
+
+% DELETE /_config/Section/Key
+handle_approved_config_req(#httpd{method='DELETE',path_parts=[_,Section,Key]}=Req,
+                           Persist, _UseRawValue) ->
+    case couch_config:get(Section, Key, null) of
+    null ->
+        throw({not_found, unknown_config_value});
+    OldValue ->
+        couch_config:delete(Section, Key, Persist),
+        send_json(Req, 200, list_to_binary(OldValue))
+    end.
+
+
+% httpd db handlers
+
+increment_update_seq_req(#httpd{method='POST'}=Req, Db) ->
+    couch_httpd:validate_ctype(Req, "application/json"),
+    {ok, NewSeq} = couch_db:increment_update_seq(Db),
+    send_json(Req, {[{ok, true},
+        {update_seq, NewSeq}
+    ]});
+increment_update_seq_req(Req, _Db) ->
+    send_method_not_allowed(Req, "POST").
+
+% httpd log handlers
+
+handle_log_req(#httpd{method='GET'}=Req) ->
+    ok = couch_httpd:verify_is_server_admin(Req),
+    Bytes = list_to_integer(couch_httpd:qs_value(Req, "bytes", "1000")),
+    Offset = list_to_integer(couch_httpd:qs_value(Req, "offset", "0")),
+    Chunk = couch_log:read(Bytes, Offset),
+    {ok, Resp} = start_chunked_response(Req, 200, [
+        % send a plaintext response
+        {"Content-Type", "text/plain; charset=utf-8"},
+        {"Content-Length", integer_to_list(length(Chunk))}
+    ]),
+    send_chunk(Resp, Chunk),
+    last_chunk(Resp);
+handle_log_req(#httpd{method='POST'}=Req) ->
+    {PostBody} = couch_httpd:json_body_obj(Req),
+    Level = couch_util:get_value(<<"level">>, PostBody),
+    Message = ?b2l(couch_util:get_value(<<"message">>, PostBody)),
+    case Level of
+    <<"debug">> ->
+        ?LOG_DEBUG(Message, []),
+        send_json(Req, 200, {[{ok, true}]});
+    <<"info">> ->
+        ?LOG_INFO(Message, []),
+        send_json(Req, 200, {[{ok, true}]});
+    <<"error">> ->
+        ?LOG_ERROR(Message, []),
+        send_json(Req, 200, {[{ok, true}]});
+    _ ->
+        send_json(Req, 400, {[{error, ?l2b(io_lib:format("Unrecognized log level '~s'", [Level]))}]})
+    end;
+handle_log_req(Req) ->
+    send_method_not_allowed(Req, "GET,POST").

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_httpd_oauth.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_oauth.erl b/src/couch_httpd_oauth.erl
new file mode 100644
index 0000000..2094c08
--- /dev/null
+++ b/src/couch_httpd_oauth.erl
@@ -0,0 +1,387 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License.  You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_oauth).
+
+-include("couch_db.hrl").
+-include("couch_js_functions.hrl").
+
+-export([oauth_authentication_handler/1, handle_oauth_req/1]).
+
+-define(OAUTH_DDOC_ID, <<"_design/oauth">>).
+-define(OAUTH_VIEW_NAME, <<"oauth_credentials">>).
+
+-record(callback_params, {
+    consumer,
+    token,
+    token_secret,
+    url,
+    signature,
+    params,
+    username
+}).
+
+% OAuth auth handler using per-node user db
+oauth_authentication_handler(Req) ->
+    serve_oauth(Req, fun oauth_auth_callback/2, true).
+
+
+oauth_auth_callback(Req, #callback_params{token_secret = undefined}) ->
+    couch_httpd:send_error(
+         Req, 400, <<"invalid_token">>, <<"Invalid OAuth token.">>);
+
+oauth_auth_callback(#httpd{mochi_req = MochiReq} = Req, CbParams) ->
+    Method = atom_to_list(MochiReq:get(method)),
+    #callback_params{
+        consumer = Consumer,
+        token_secret = TokenSecret,
+        url = Url,
+        signature = Sig,
+        params = Params,
+        username = User
+    } = CbParams,
+    case oauth:verify(Sig, Method, Url, Params, Consumer, TokenSecret) of
+    true ->
+        set_user_ctx(Req, User);
+    false ->
+        ?LOG_DEBUG("OAuth handler: signature verification failed for user `~p`~n"
+            "Received signature is `~p`~n"
+            "HTTP method is `~p`~n"
+            "URL is `~p`~n"
+            "Parameters are `~p`~n"
+            "Consumer is `~p`, token secret is `~p`~n"
+            "Expected signature was `~p`~n",
+            [User, Sig, Method, Url, Params, Consumer, TokenSecret,
+                oauth:signature(Method, Url, Params, Consumer, TokenSecret)]),
+        Req
+    end.
+
+
+% Look up the consumer key and get the roles to give the consumer
+set_user_ctx(_Req, undefined) ->
+    throw({bad_request, unknown_oauth_token});
+set_user_ctx(Req, Name) ->
+    case couch_auth_cache:get_user_creds(Name) of
+        nil ->
+            ?LOG_DEBUG("OAuth handler: user `~p` credentials not found", [Name]),
+            Req;
+        User ->
+            Roles = couch_util:get_value(<<"roles">>, User, []),
+            Req#httpd{user_ctx=#user_ctx{name=Name, roles=Roles}}
+    end.
+
+% OAuth request_token
+handle_oauth_req(#httpd{path_parts=[_OAuth, <<"request_token">>], method=Method}=Req1) ->
+    serve_oauth(Req1, fun(Req, CbParams) ->
+        #callback_params{
+            consumer = Consumer,
+            token_secret = TokenSecret,
+            url = Url,
+            signature = Sig,
+            params = Params
+        } = CbParams,
+        case oauth:verify(
+            Sig, atom_to_list(Method), Url, Params, Consumer, TokenSecret) of
+        true ->
+            ok(Req, <<"oauth_token=requestkey&oauth_token_secret=requestsecret">>);
+        false ->
+            invalid_signature(Req)
+        end
+    end, false);
+handle_oauth_req(#httpd{path_parts=[_OAuth, <<"authorize">>]}=Req) ->
+    {ok, serve_oauth_authorize(Req)};
+handle_oauth_req(#httpd{path_parts=[_OAuth, <<"access_token">>], method='GET'}=Req1) ->
+    serve_oauth(Req1, fun(Req, CbParams) ->
+        #callback_params{
+            consumer = Consumer,
+            token = Token,
+            url = Url,
+            signature = Sig,
+            params = Params
+        } = CbParams,
+        case Token of
+        "requestkey" ->
+            case oauth:verify(
+                Sig, "GET", Url, Params, Consumer, "requestsecret") of
+            true ->
+                ok(Req,
+                    <<"oauth_token=accesskey&oauth_token_secret=accesssecret">>);
+            false ->
+                invalid_signature(Req)
+            end;
+        _ ->
+            couch_httpd:send_error(
+                Req, 400, <<"invalid_token">>, <<"Invalid OAuth token.">>)
+        end
+    end, false);
+handle_oauth_req(#httpd{path_parts=[_OAuth, <<"access_token">>]}=Req) ->
+    couch_httpd:send_method_not_allowed(Req, "GET").
+
+invalid_signature(Req) ->
+    couch_httpd:send_error(Req, 400, <<"invalid_signature">>, <<"Invalid signature value.">>).
+
+% This needs to be protected i.e. force user to login using HTTP Basic Auth or form-based login.
+serve_oauth_authorize(#httpd{method=Method}=Req1) ->
+    case Method of
+        'GET' ->
+            % Confirm with the User that they want to authenticate the Consumer
+            serve_oauth(Req1, fun(Req, CbParams) ->
+                #callback_params{
+                    consumer = Consumer,
+                    token_secret = TokenSecret,
+                    url = Url,
+                    signature = Sig,
+                    params = Params
+                } = CbParams,
+                case oauth:verify(
+                    Sig, "GET", Url, Params, Consumer, TokenSecret) of
+                true ->
+                    ok(Req, <<"oauth_token=requestkey&",
+                        "oauth_token_secret=requestsecret">>);
+                false ->
+                    invalid_signature(Req)
+                end
+            end, false);
+        'POST' ->
+            % If the User has confirmed, we direct the User back to the Consumer with a verification code
+            serve_oauth(Req1, fun(Req, CbParams) ->
+                #callback_params{
+                    consumer = Consumer,
+                    token_secret = TokenSecret,
+                    url = Url,
+                    signature = Sig,
+                    params = Params
+                } = CbParams,
+                case oauth:verify(
+                    Sig, "POST", Url, Params, Consumer, TokenSecret) of
+                true ->
+                    %redirect(oauth_callback, oauth_token, oauth_verifier),
+                    ok(Req, <<"oauth_token=requestkey&",
+                        "oauth_token_secret=requestsecret">>);
+                false ->
+                    invalid_signature(Req)
+                end
+            end, false);
+        _ ->
+            couch_httpd:send_method_not_allowed(Req1, "GET,POST")
+    end.
+
+serve_oauth(#httpd{mochi_req=MochiReq}=Req, Fun, FailSilently) ->
+    % 1. In the HTTP Authorization header as defined in OAuth HTTP Authorization Scheme.
+    % 2. As the HTTP POST request body with a content-type of application/x-www-form-urlencoded.
+    % 3. Added to the URLs in the query part (as defined by [RFC3986] section 3).
+    AuthHeader = case MochiReq:get_header_value("authorization") of
+        undefined ->
+            "";
+        Else ->
+            [Head | Tail] = re:split(Else, "\\s", [{parts, 2}, {return, list}]),
+            case [string:to_lower(Head) | Tail] of
+                ["oauth", Rest] -> Rest;
+                _ -> ""
+            end
+    end,
+    HeaderParams = oauth:header_params_decode(AuthHeader),
+    %Realm = couch_util:get_value("realm", HeaderParams),
+
+    % get requested path
+    RequestedPath = case MochiReq:get_header_value("x-couchdb-requested-path") of
+        undefined ->
+            case MochiReq:get_header_value("x-couchdb-vhost-path") of
+                undefined ->
+                    MochiReq:get(raw_path);
+                VHostPath ->
+                    VHostPath
+            end;
+        RequestedPath0 ->
+           RequestedPath0
+    end,
+    {_, QueryString, _} = mochiweb_util:urlsplit_path(RequestedPath),
+
+    Params = proplists:delete("realm", HeaderParams) ++ mochiweb_util:parse_qs(QueryString),
+
+    ?LOG_DEBUG("OAuth Params: ~p", [Params]),
+    case couch_util:get_value("oauth_version", Params, "1.0") of
+        "1.0" ->
+            case couch_util:get_value("oauth_consumer_key", Params, undefined) of
+                undefined ->
+                    case FailSilently of
+                        true -> Req;
+                        false -> couch_httpd:send_error(Req, 400, <<"invalid_consumer">>, <<"Invalid consumer.">>)
+                    end;
+                ConsumerKey ->
+                    Url = couch_httpd:absolute_uri(Req, RequestedPath),
+                    case get_callback_params(ConsumerKey, Params, Url) of
+                        {ok, CallbackParams} ->
+                            Fun(Req, CallbackParams);
+                        invalid_consumer_token_pair ->
+                            couch_httpd:send_error(
+                                Req, 400,
+                                <<"invalid_consumer_token_pair">>,
+                                <<"Invalid consumer and token pair.">>);
+                        {error, {Error, Reason}} ->
+                            couch_httpd:send_error(Req, 400, Error, Reason)
+                    end
+            end;
+        _ ->
+            couch_httpd:send_error(Req, 400, <<"invalid_oauth_version">>, <<"Invalid OAuth version.">>)
+    end.
+
+
+get_callback_params(ConsumerKey, Params, Url) ->
+    Token = couch_util:get_value("oauth_token", Params),
+    SigMethod = sig_method(Params),
+    CbParams0 = #callback_params{
+        token = Token,
+        signature = couch_util:get_value("oauth_signature", Params),
+        params = proplists:delete("oauth_signature", Params),
+        url = Url
+    },
+    case oauth_credentials_info(Token, ConsumerKey) of
+    nil ->
+        invalid_consumer_token_pair;
+    {error, _} = Err ->
+        Err;
+    {OauthCreds} ->
+        User = couch_util:get_value(<<"username">>, OauthCreds, []),
+        ConsumerSecret = ?b2l(couch_util:get_value(
+            <<"consumer_secret">>, OauthCreds, <<>>)),
+        TokenSecret = ?b2l(couch_util:get_value(
+            <<"token_secret">>, OauthCreds, <<>>)),
+        case (User =:= []) orelse (ConsumerSecret =:= []) orelse
+            (TokenSecret =:= []) of
+        true ->
+            invalid_consumer_token_pair;
+        false ->
+            CbParams = CbParams0#callback_params{
+                consumer = {ConsumerKey, ConsumerSecret, SigMethod},
+                token_secret = TokenSecret,
+                username = User
+            },
+            ?LOG_DEBUG("Got OAuth credentials, for ConsumerKey `~p` and "
+                "Token `~p`, from the views, User: `~p`, "
+                "ConsumerSecret: `~p`, TokenSecret: `~p`",
+                [ConsumerKey, Token, User, ConsumerSecret, TokenSecret]),
+            {ok, CbParams}
+        end
+    end.
+
+
+sig_method(Params) ->
+    sig_method_1(couch_util:get_value("oauth_signature_method", Params)).
+sig_method_1("PLAINTEXT") ->
+    plaintext;
+% sig_method_1("RSA-SHA1") ->
+%    rsa_sha1;
+sig_method_1("HMAC-SHA1") ->
+    hmac_sha1;
+sig_method_1(_) ->
+    undefined.
+
+
+ok(#httpd{mochi_req=MochiReq}, Body) ->
+    {ok, MochiReq:respond({200, [], Body})}.
+
+
+oauth_credentials_info(Token, ConsumerKey) ->
+    case use_auth_db() of
+    {ok, Db} ->
+        Result = case query_oauth_view(Db, [?l2b(ConsumerKey), ?l2b(Token)]) of
+        [] ->
+            nil;
+        [Creds] ->
+            Creds;
+        [_ | _] ->
+            Reason = iolist_to_binary(
+                io_lib:format("Found multiple OAuth credentials for the pair "
+                    " (consumer_key: `~p`, token: `~p`)", [ConsumerKey, Token])),
+            {error, {<<"oauth_token_consumer_key_pair">>, Reason}}
+        end,
+        couch_db:close(Db),
+        Result;
+    nil ->
+        {
+            case couch_config:get("oauth_consumer_secrets", ConsumerKey) of
+            undefined -> [];
+            ConsumerSecret -> [{<<"consumer_secret">>, ?l2b(ConsumerSecret)}]
+            end
+            ++
+            case couch_config:get("oauth_token_secrets", Token) of
+            undefined -> [];
+            TokenSecret -> [{<<"token_secret">>, ?l2b(TokenSecret)}]
+            end
+            ++
+            case couch_config:get("oauth_token_users", Token) of
+            undefined -> [];
+            User -> [{<<"username">>, ?l2b(User)}]
+            end
+        }
+    end.
+
+
+use_auth_db() ->
+    case couch_config:get("couch_httpd_oauth", "use_users_db", "false") of
+    "false" ->
+        nil;
+    "true" ->
+        AuthDb = open_auth_db(),
+        {ok, _AuthDb2} = ensure_oauth_views_exist(AuthDb)
+    end.
+
+
+open_auth_db() ->
+    DbName = ?l2b(couch_config:get("couch_httpd_auth", "authentication_db")),
+    DbOptions = [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}],
+    {ok, AuthDb} = couch_db:open_int(DbName, DbOptions),
+    AuthDb.
+
+
+ensure_oauth_views_exist(AuthDb) ->
+    case couch_db:open_doc(AuthDb, ?OAUTH_DDOC_ID, []) of
+    {ok, _DDoc} ->
+        {ok, AuthDb};
+    _ ->
+        {ok, DDoc} = get_oauth_ddoc(),
+        {ok, _Rev} = couch_db:update_doc(AuthDb, DDoc, []),
+        {ok, _AuthDb2} = couch_db:reopen(AuthDb)
+    end.
+
+
+get_oauth_ddoc() ->
+    Json = {[
+        {<<"_id">>, ?OAUTH_DDOC_ID},
+        {<<"language">>, <<"javascript">>},
+        {<<"views">>,
+            {[
+                {?OAUTH_VIEW_NAME,
+                    {[
+                        {<<"map">>, ?OAUTH_MAP_FUN}
+                    ]}
+                }
+            ]}
+        }
+    ]},
+    {ok, couch_doc:from_json_obj(Json)}.
+
+
+query_oauth_view(Db, Key) ->
+    ViewOptions = [
+        {start_key, Key},
+        {end_key, Key}
+    ],
+    Callback = fun({row, Row}, Acc) ->
+            {ok, [couch_util:get_value(value, Row) | Acc]};
+        (_, Acc) ->
+            {ok, Acc}
+    end,
+    {ok, Result} = couch_mrview:query_view(
+        Db, ?OAUTH_DDOC_ID, ?OAUTH_VIEW_NAME, ViewOptions, Callback, []),
+    Result.


[13/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_httpd_cors.erl
----------------------------------------------------------------------
diff --git a/couch_httpd_cors.erl b/couch_httpd_cors.erl
deleted file mode 100644
index d9462d1..0000000
--- a/couch_httpd_cors.erl
+++ /dev/null
@@ -1,351 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% @doc module to handle Cross-Origin Resource Sharing
-%%
-%% This module handles CORS requests and preflight request for
-%% CouchDB. The configuration is done in the ini file.
-%%
-%% This implements http://www.w3.org/TR/cors/
-
-
--module(couch_httpd_cors).
-
--include("couch_db.hrl").
-
--export([is_preflight_request/1, cors_headers/2]).
-
--define(SUPPORTED_HEADERS, "Accept, Accept-Language, Content-Type," ++
-        "Expires, Last-Modified, Pragma, Origin, Content-Length," ++
-        "If-Match, Destination, X-Requested-With, " ++
-        "X-Http-Method-Override, Content-Range").
-
--define(SUPPORTED_METHODS, "GET, HEAD, POST, PUT, DELETE," ++
-        "TRACE, CONNECT, COPY, OPTIONS").
-
-% as defined in http://www.w3.org/TR/cors/#terminology
--define(SIMPLE_HEADERS, ["Cache-Control", "Content-Language",
-        "Content-Type", "Expires", "Last-Modified", "Pragma"]).
--define(ALLOWED_HEADERS, lists:sort(["Server", "Etag",
-        "Accept-Ranges" | ?SIMPLE_HEADERS])).
--define(SIMPLE_CONTENT_TYPE_VALUES, ["application/x-www-form-urlencoded",
-        "multipart/form-data", "text/plain"]).
-
-% TODO: - pick a sane default
--define(CORS_DEFAULT_MAX_AGE, 12345).
-
-%% is_preflight_request/1
-
-% http://www.w3.org/TR/cors/#resource-preflight-requests
-
-is_preflight_request(#httpd{method=Method}=Req) when Method /= 'OPTIONS' ->
-    Req;
-is_preflight_request(Req) ->
-    EnableCors = enable_cors(),
-    is_preflight_request(Req, EnableCors).
-
-is_preflight_request(Req, false) ->
-    Req;
-is_preflight_request(#httpd{mochi_req=MochiReq}=Req, true) ->
-    case preflight_request(MochiReq) of
-    {ok, PreflightHeaders} ->
-        send_preflight_response(Req, PreflightHeaders);
-    _ ->
-        Req
-    end.
-
-
-preflight_request(MochiReq) ->
-    Origin = MochiReq:get_header_value("Origin"),
-    preflight_request(MochiReq, Origin).
-
-preflight_request(MochiReq, undefined) ->
-    % If the Origin header is not present terminate this set of
-    % steps. The request is outside the scope of this specification.
-    % http://www.w3.org/TR/cors/#resource-preflight-requests
-    MochiReq;
-preflight_request(MochiReq, Origin) ->
-    Host = couch_httpd_vhost:host(MochiReq),
-    AcceptedOrigins = get_accepted_origins(Host),
-    AcceptAll = lists:member("*", AcceptedOrigins),
-
-    HandlerFun = fun() ->
-        OriginList = couch_util:to_list(Origin),
-        handle_preflight_request(OriginList, Host, MochiReq)
-    end,
-
-    case AcceptAll of
-    true ->
-        % Always matching is acceptable since the list of
-        % origins can be unbounded.
-        % http://www.w3.org/TR/cors/#resource-preflight-requests
-        HandlerFun();
-    false ->
-        case lists:member(Origin, AcceptedOrigins) of
-        % The Origin header can only contain a single origin as
-        % the user agent will not follow redirects.
-        % http://www.w3.org/TR/cors/#resource-preflight-requests
-        % TODO: Square against multi origin thinger in Security Considerations
-        true ->
-            HandlerFun();
-        false ->
-            % If the value of the Origin header is not a
-            % case-sensitive match for any of the values
-            % in list of origins do not set any additional
-            % headers and terminate this set of steps.
-            % http://www.w3.org/TR/cors/#resource-preflight-requests
-            false
-        end
-    end.
-
-
-handle_preflight_request(Origin, Host, MochiReq) ->
-    %% get supported methods
-    SupportedMethods = split_list(cors_config(Host, "methods",
-                                              ?SUPPORTED_METHODS)),
-
-    % get supported headers
-    AllSupportedHeaders = split_list(cors_config(Host, "headers",
-                                                 ?SUPPORTED_HEADERS)),
-
-    SupportedHeaders = [string:to_lower(H) || H <- AllSupportedHeaders],
-
-    % get max age
-    MaxAge = cors_config(Host, "max_age", ?CORS_DEFAULT_MAX_AGE),
-
-    PreflightHeaders0 = maybe_add_credentials(Origin, Host, [
-        {"Access-Control-Allow-Origin", Origin},
-        {"Access-Control-Max-Age", MaxAge},
-        {"Access-Control-Allow-Methods",
-            string:join(SupportedMethods, ", ")}]),
-
-    case MochiReq:get_header_value("Access-Control-Request-Method") of
-    undefined ->
-        % If there is no Access-Control-Request-Method header
-        % or if parsing failed, do not set any additional headers
-        % and terminate this set of steps. The request is outside
-        % the scope of this specification.
-        % http://www.w3.org/TR/cors/#resource-preflight-requests
-        {ok, PreflightHeaders0};
-    Method ->
-        case lists:member(Method, SupportedMethods) of
-        true ->
-            % method ok , check headers
-            AccessHeaders = MochiReq:get_header_value(
-                    "Access-Control-Request-Headers"),
-            {FinalReqHeaders, ReqHeaders} = case AccessHeaders of
-                undefined -> {"", []};
-                Headers ->
-                    % transform header list in something we
-                    % could check. make sure everything is a
-                    % list
-                    RH = [string:to_lower(H)
-                          || H <- split_headers(Headers)],
-                    {Headers, RH}
-            end,
-            % check if headers are supported
-            case ReqHeaders -- SupportedHeaders of
-            [] ->
-                PreflightHeaders = PreflightHeaders0 ++
-                                   [{"Access-Control-Allow-Headers",
-                                     FinalReqHeaders}],
-                {ok, PreflightHeaders};
-            _ ->
-                false
-            end;
-        false ->
-        % If method is not a case-sensitive match for any of
-        % the values in list of methods do not set any additional
-        % headers and terminate this set of steps.
-        % http://www.w3.org/TR/cors/#resource-preflight-requests
-            false
-        end
-    end.
-
-
-send_preflight_response(#httpd{mochi_req=MochiReq}=Req, Headers) ->
-    couch_httpd:log_request(Req, 204),
-    couch_stats_collector:increment({httpd_status_codes, 204}),
-    Headers1 = couch_httpd:http_1_0_keep_alive(MochiReq, Headers),
-    Headers2 = Headers1 ++ couch_httpd:server_header() ++
-               couch_httpd_auth:cookie_auth_header(Req, Headers1),
-    {ok, MochiReq:respond({204, Headers2, <<>>})}.
-
-
-% cors_headers/1
-
-cors_headers(MochiReq, RequestHeaders) ->
-    EnableCors = enable_cors(),
-    CorsHeaders = do_cors_headers(MochiReq, EnableCors),
-    maybe_apply_cors_headers(CorsHeaders, RequestHeaders).
-
-do_cors_headers(#httpd{mochi_req=MochiReq}, true) ->
-    Host = couch_httpd_vhost:host(MochiReq),
-    AcceptedOrigins = get_accepted_origins(Host),
-    case MochiReq:get_header_value("Origin") of
-    undefined ->
-        % If the Origin header is not present terminate
-        % this set of steps. The request is outside the scope
-        % of this specification.
-        % http://www.w3.org/TR/cors/#resource-processing-model
-        [];
-    Origin ->
-        handle_cors_headers(couch_util:to_list(Origin),
-                            Host, AcceptedOrigins)
-    end;
-do_cors_headers(_MochiReq, false) ->
-    [].
-
-maybe_apply_cors_headers([], RequestHeaders) ->
-    RequestHeaders;
-maybe_apply_cors_headers(CorsHeaders, RequestHeaders0) ->
-    % for each RequestHeader that isn't in SimpleHeaders,
-    % (or Content-Type with SIMPLE_CONTENT_TYPE_VALUES)
-    % append to Access-Control-Expose-Headers
-    % return: RequestHeaders ++ CorsHeaders ++ ACEH
-
-    RequestHeaders = [K || {K,_V} <- RequestHeaders0],
-    ExposedHeaders0 = reduce_headers(RequestHeaders, ?ALLOWED_HEADERS),
-
-    % here we may have not moved Content-Type into ExposedHeaders,
-    % now we need to check whether the Content-Type valus is
-    % in ?SIMPLE_CONTENT_TYPE_VALUES and if it isn’t add Content-
-    % Type to to ExposedHeaders
-    ContentType =  proplists:get_value("Content-Type", RequestHeaders0),
-    IncludeContentType = case ContentType of
-    undefined ->
-        false;
-    _ ->
-        ContentType_ = string:to_lower(ContentType),
-        lists:member(ContentType_, ?SIMPLE_CONTENT_TYPE_VALUES)
-    end,
-    ExposedHeaders = case IncludeContentType of
-    false ->
-        lists:umerge(ExposedHeaders0, ["Content-Type"]);
-    true ->
-        ExposedHeaders0
-    end,
-    CorsHeaders
-    ++ RequestHeaders0
-    ++ [{"Access-Control-Expose-Headers",
-            string:join(ExposedHeaders, ", ")}].
-
-
-reduce_headers(A, B) ->
-    reduce_headers0(A, B, []).
-
-reduce_headers0([], _B, Result) ->
-    lists:sort(Result);
-reduce_headers0([ElmA|RestA], B, Result) ->
-    R = case member_nocase(ElmA, B) of
-    false -> Result;
-    _Else -> [ElmA | Result]
-    end,
-    reduce_headers0(RestA, B, R).
-
-member_nocase(ElmA, List) ->
-    lists:any(fun(ElmB) ->
-        string:to_lower(ElmA) =:= string:to_lower(ElmB)
-    end, List).
-
-handle_cors_headers(_Origin, _Host, []) ->
-    [];
-handle_cors_headers(Origin, Host, AcceptedOrigins) ->
-    AcceptAll = lists:member("*", AcceptedOrigins),
-    case {AcceptAll, lists:member(Origin, AcceptedOrigins)} of
-    {true, _} ->
-        make_cors_header(Origin, Host);
-    {false, true}  ->
-        make_cors_header(Origin, Host);
-    _ ->
-        % If the value of the Origin header is not a
-        % case-sensitive match for any of the values
-        % in list of origins, do not set any additional
-        % headers and terminate this set of steps.
-        % http://www.w3.org/TR/cors/#resource-requests
-        []
-    end.
-
-
-make_cors_header(Origin, Host) ->
-    Headers = [{"Access-Control-Allow-Origin", Origin}],
-    maybe_add_credentials(Origin, Host, Headers).
-
-
-%% util
-
-maybe_add_credentials(Origin, Host, Headers) ->
-    maybe_add_credentials(Headers, allow_credentials(Origin, Host)).
-
-maybe_add_credentials(Headers, false) ->
-    Headers;
-maybe_add_credentials(Headers, true) ->
-    Headers ++ [{"Access-Control-Allow-Credentials", "true"}].
-
-
-allow_credentials("*", _Host) ->
-    false;
-allow_credentials(_Origin, Host) ->
-    Default = get_bool_config("cors", "credentials", false),
-    get_bool_config(cors_section(Host), "credentials", Default).
-
-
-
-cors_config(Host, Key, Default) ->
-    couch_config:get(cors_section(Host), Key,
-                     couch_config:get("cors", Key, Default)).
-
-cors_section(Host0) ->
-    {Host, _Port} = split_host_port(Host0),
-    "cors:" ++ Host.
-
-enable_cors() ->
-    get_bool_config("httpd", "enable_cors", false).
-
-get_bool_config(Section, Key, Default) ->
-    case couch_config:get(Section, Key) of
-    undefined ->
-        Default;
-    "true" ->
-        true;
-    "false" ->
-        false
-    end.
-
-get_accepted_origins(Host) ->
-    split_list(cors_config(Host, "origins", [])).
-
-split_list(S) ->
-    re:split(S, "\\s*,\\s*", [trim, {return, list}]).
-
-split_headers(H) ->
-    re:split(H, ",\\s*", [{return,list}, trim]).
-
-split_host_port(HostAsString) ->
-    % split at semicolon ":"
-    Split = string:rchr(HostAsString, $:),
-    split_host_port(HostAsString, Split).
-
-split_host_port(HostAsString, 0) ->
-    % no semicolon
-    {HostAsString, '*'};
-split_host_port(HostAsString, N) ->
-    HostPart = string:substr(HostAsString, 1, N-1),
-    % parse out port
-    % is there a nicer way?
-    case (catch erlang:list_to_integer(string:substr(HostAsString,
-                    N+1, length(HostAsString)))) of
-    {'EXIT', _} ->
-        {HostAsString, '*'};
-    Port ->
-        {HostPart, Port}
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_httpd_db.erl
----------------------------------------------------------------------
diff --git a/couch_httpd_db.erl b/couch_httpd_db.erl
deleted file mode 100644
index 0a7c17c..0000000
--- a/couch_httpd_db.erl
+++ /dev/null
@@ -1,1226 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_db).
--include("couch_db.hrl").
-
--export([handle_request/1, handle_compact_req/2, handle_design_req/2,
-    db_req/2, couch_doc_open/4,handle_changes_req/2,
-    update_doc_result_to_json/1, update_doc_result_to_json/2,
-    handle_design_info_req/3]).
-
--import(couch_httpd,
-    [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
-    start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1,
-    start_chunked_response/3, absolute_uri/2, send/2,
-    start_response_length/4, send_error/4]).
-
--record(doc_query_args, {
-    options = [],
-    rev = nil,
-    open_revs = [],
-    update_type = interactive_edit,
-    atts_since = nil
-}).
-
-% Database request handlers
-handle_request(#httpd{path_parts=[DbName|RestParts],method=Method,
-        db_url_handlers=DbUrlHandlers}=Req)->
-    case {Method, RestParts} of
-    {'PUT', []} ->
-        create_db_req(Req, DbName);
-    {'DELETE', []} ->
-        % if we get ?rev=... the user is using a faulty script where the
-        % document id is empty by accident. Let them recover safely.
-        case couch_httpd:qs_value(Req, "rev", false) of
-            false -> delete_db_req(Req, DbName);
-            _Rev -> throw({bad_request,
-                "You tried to DELETE a database with a ?=rev parameter. "
-                ++ "Did you mean to DELETE a document instead?"})
-        end;
-    {_, []} ->
-        do_db_req(Req, fun db_req/2);
-    {_, [SecondPart|_]} ->
-        Handler = couch_util:dict_find(SecondPart, DbUrlHandlers, fun db_req/2),
-        do_db_req(Req, Handler)
-    end.
-
-handle_changes_req(#httpd{method='POST'}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    handle_changes_req1(Req, Db);
-handle_changes_req(#httpd{method='GET'}=Req, Db) ->
-    handle_changes_req1(Req, Db);
-handle_changes_req(#httpd{path_parts=[_,<<"_changes">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "GET,HEAD,POST").
-
-handle_changes_req1(Req, #db{name=DbName}=Db) ->
-    AuthDbName = ?l2b(couch_config:get("couch_httpd_auth", "authentication_db")),
-    case AuthDbName of
-    DbName ->
-        % in the authentication database, _changes is admin-only.
-        ok = couch_db:check_is_admin(Db);
-    _Else ->
-        % on other databases, _changes is free for all.
-        ok
-    end,
-    handle_changes_req2(Req, Db).
-
-handle_changes_req2(Req, Db) ->
-    MakeCallback = fun(Resp) ->
-        fun({change, {ChangeProp}=Change, _}, "eventsource") ->
-            Seq = proplists:get_value(<<"seq">>, ChangeProp),
-            send_chunk(Resp, ["data: ", ?JSON_ENCODE(Change),
-                              "\n", "id: ", ?JSON_ENCODE(Seq),
-                              "\n\n"]);
-        ({change, Change, _}, "continuous") ->
-            send_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]);
-        ({change, Change, Prepend}, _) ->
-            send_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]);
-        (start, "eventsource") ->
-            ok;
-        (start, "continuous") ->
-            ok;
-        (start, _) ->
-            send_chunk(Resp, "{\"results\":[\n");
-        ({stop, _EndSeq}, "eventsource") ->
-            end_json_response(Resp);
-        ({stop, EndSeq}, "continuous") ->
-            send_chunk(
-                Resp,
-                [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]
-            ),
-            end_json_response(Resp);
-        ({stop, EndSeq}, _) ->
-            send_chunk(
-                Resp,
-                io_lib:format("\n],\n\"last_seq\":~w}\n", [EndSeq])
-            ),
-            end_json_response(Resp);
-        (timeout, _) ->
-            send_chunk(Resp, "\n")
-        end
-    end,
-    ChangesArgs = parse_changes_query(Req, Db),
-    ChangesFun = couch_changes:handle_changes(ChangesArgs, Req, Db),
-    WrapperFun = case ChangesArgs#changes_args.feed of
-    "normal" ->
-        {ok, Info} = couch_db:get_db_info(Db),
-        CurrentEtag = couch_httpd:make_etag(Info),
-        fun(FeedChangesFun) ->
-            couch_httpd:etag_respond(
-                Req,
-                CurrentEtag,
-                fun() ->
-                    {ok, Resp} = couch_httpd:start_json_response(
-                         Req, 200, [{"ETag", CurrentEtag}]
-                    ),
-                    FeedChangesFun(MakeCallback(Resp))
-                end
-            )
-        end;
-    "eventsource" ->
-        Headers = [
-            {"Content-Type", "text/event-stream"},
-            {"Cache-Control", "no-cache"}
-        ],
-        {ok, Resp} = couch_httpd:start_chunked_response(Req, 200, Headers),
-        fun(FeedChangesFun) ->
-            FeedChangesFun(MakeCallback(Resp))
-        end;
-    _ ->
-        % "longpoll" or "continuous"
-        {ok, Resp} = couch_httpd:start_json_response(Req, 200),
-        fun(FeedChangesFun) ->
-            FeedChangesFun(MakeCallback(Resp))
-        end
-    end,
-    couch_stats_collector:increment(
-        {httpd, clients_requesting_changes}
-    ),
-    try
-        WrapperFun(ChangesFun)
-    after
-    couch_stats_collector:decrement(
-        {httpd, clients_requesting_changes}
-    )
-    end.
-
-handle_compact_req(#httpd{method='POST'}=Req, Db) ->
-    case Req#httpd.path_parts of
-        [_DbName, <<"_compact">>] ->
-            ok = couch_db:check_is_admin(Db),
-            couch_httpd:validate_ctype(Req, "application/json"),
-            {ok, _} = couch_db:start_compact(Db),
-            send_json(Req, 202, {[{ok, true}]});
-        [_DbName, <<"_compact">>, DesignName | _] ->
-            DesignId = <<"_design/", DesignName/binary>>,
-            DDoc = couch_httpd_db:couch_doc_open(
-                Db, DesignId, nil, [ejson_body]
-            ),
-            couch_mrview_http:handle_compact_req(Req, Db, DDoc)
-    end;
-
-handle_compact_req(Req, _Db) ->
-    send_method_not_allowed(Req, "POST").
-
-
-handle_design_req(#httpd{
-        path_parts=[_DbName, _Design, DesignName, <<"_",_/binary>> = Action | _Rest],
-        design_url_handlers = DesignUrlHandlers
-    }=Req, Db) ->
-    case couch_db:is_system_db(Db) of
-    true ->
-        case (catch couch_db:check_is_admin(Db)) of
-        ok -> ok;
-        _ ->
-            throw({forbidden, <<"Only admins can access design document",
-                " actions for system databases.">>})
-        end;
-    false -> ok
-    end,
-
-    % load ddoc
-    DesignId = <<"_design/", DesignName/binary>>,
-    DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
-    Handler = couch_util:dict_find(Action, DesignUrlHandlers, fun(_, _, _) ->
-        throw({not_found, <<"missing handler: ", Action/binary>>})
-    end),
-    Handler(Req, Db, DDoc);
-
-handle_design_req(Req, Db) ->
-    db_req(Req, Db).
-
-handle_design_info_req(#httpd{
-            method='GET',
-            path_parts=[_DbName, _Design, DesignName, _]
-        }=Req, Db, _DDoc) ->
-    DesignId = <<"_design/", DesignName/binary>>,
-    DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
-    couch_mrview_http:handle_info_req(Req, Db, DDoc).
-
-create_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    case couch_server:create(DbName, [{user_ctx, UserCtx}]) of
-    {ok, Db} ->
-        couch_db:close(Db),
-        DbUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
-        send_json(Req, 201, [{"Location", DbUrl}], {[{ok, true}]});
-    Error ->
-        throw(Error)
-    end.
-
-delete_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    case couch_server:delete(DbName, [{user_ctx, UserCtx}]) of
-    ok ->
-        send_json(Req, 200, {[{ok, true}]});
-    Error ->
-        throw(Error)
-    end.
-
-do_db_req(#httpd{user_ctx=UserCtx,path_parts=[DbName|_]}=Req, Fun) ->
-    case couch_db:open(DbName, [{user_ctx, UserCtx}]) of
-    {ok, Db} ->
-        try
-            Fun(Req, Db)
-        after
-            catch couch_db:close(Db)
-        end;
-    Error ->
-        throw(Error)
-    end.
-
-db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) ->
-    {ok, DbInfo} = couch_db:get_db_info(Db),
-    send_json(Req, {DbInfo});
-
-db_req(#httpd{method='POST',path_parts=[_DbName]}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    Doc = couch_doc:from_json_obj(couch_httpd:json_body(Req)),
-    validate_attachment_names(Doc),
-    Doc2 = case Doc#doc.id of
-        <<"">> ->
-            Doc#doc{id=couch_uuids:new(), revs={0, []}};
-        _ ->
-            Doc
-    end,
-    DocId = Doc2#doc.id,
-    update_doc(Req, Db, DocId, Doc2);
-
-db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
-    send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    UpdateSeq = couch_db:get_update_seq(Db),
-    CommittedSeq = couch_db:get_committed_update_seq(Db),
-    {ok, StartTime} =
-    case couch_httpd:qs_value(Req, "seq") of
-    undefined ->
-        couch_db:ensure_full_commit(Db);
-    RequiredStr ->
-        RequiredSeq = list_to_integer(RequiredStr),
-        if RequiredSeq > UpdateSeq ->
-            throw({bad_request,
-                "can't do a full commit ahead of current update_seq"});
-        RequiredSeq > CommittedSeq ->
-            couch_db:ensure_full_commit(Db);
-        true ->
-            {ok, Db#db.instance_start_time}
-        end
-    end,
-    send_json(Req, 201, {[
-        {ok, true},
-        {instance_start_time, StartTime}
-    ]});
-
-db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
-    couch_stats_collector:increment({httpd, bulk_requests}),
-    couch_httpd:validate_ctype(Req, "application/json"),
-    {JsonProps} = couch_httpd:json_body_obj(Req),
-    case couch_util:get_value(<<"docs">>, JsonProps) of
-    undefined ->
-        send_error(Req, 400, <<"bad_request">>, <<"Missing JSON list of 'docs'">>);
-    DocsArray ->
-        case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
-        "true" ->
-            Options = [full_commit];
-        "false" ->
-            Options = [delay_commit];
-        _ ->
-            Options = []
-        end,
-        case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
-        true ->
-            Docs = lists:map(
-                fun({ObjProps} = JsonObj) ->
-                    Doc = couch_doc:from_json_obj(JsonObj),
-                    validate_attachment_names(Doc),
-                    Id = case Doc#doc.id of
-                        <<>> -> couch_uuids:new();
-                        Id0 -> Id0
-                    end,
-                    case couch_util:get_value(<<"_rev">>, ObjProps) of
-                    undefined ->
-                       Revs = {0, []};
-                    Rev  ->
-                        {Pos, RevId} = couch_doc:parse_rev(Rev),
-                        Revs = {Pos, [RevId]}
-                    end,
-                    Doc#doc{id=Id,revs=Revs}
-                end,
-                DocsArray),
-            Options2 =
-            case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
-            true  -> [all_or_nothing|Options];
-            _ -> Options
-            end,
-            case couch_db:update_docs(Db, Docs, Options2) of
-            {ok, Results} ->
-                % output the results
-                DocResults = lists:zipwith(fun update_doc_result_to_json/2,
-                    Docs, Results),
-                send_json(Req, 201, DocResults);
-            {aborted, Errors} ->
-                ErrorsJson =
-                    lists:map(fun update_doc_result_to_json/1, Errors),
-                send_json(Req, 417, ErrorsJson)
-            end;
-        false ->
-            Docs = lists:map(fun(JsonObj) ->
-                    Doc = couch_doc:from_json_obj(JsonObj),
-                    validate_attachment_names(Doc),
-                    Doc
-                end, DocsArray),
-            {ok, Errors} = couch_db:update_docs(Db, Docs, Options, replicated_changes),
-            ErrorsJson =
-                lists:map(fun update_doc_result_to_json/1, Errors),
-            send_json(Req, 201, ErrorsJson)
-        end
-    end;
-db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    {IdsRevs} = couch_httpd:json_body_obj(Req),
-    IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
-
-    case couch_db:purge_docs(Db, IdsRevs2) of
-    {ok, PurgeSeq, PurgedIdsRevs} ->
-        PurgedIdsRevs2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs} <- PurgedIdsRevs],
-        send_json(Req, 200, {[{<<"purge_seq">>, PurgeSeq}, {<<"purged">>, {PurgedIdsRevs2}}]});
-    Error ->
-        throw(Error)
-    end;
-
-db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
-    {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
-    JsonDocIdRevs2 = [{Id, [couch_doc:parse_rev(RevStr) || RevStr <- RevStrs]} || {Id, RevStrs} <- JsonDocIdRevs],
-    {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
-    Results2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs, _} <- Results],
-    send_json(Req, {[
-        {missing_revs, {Results2}}
-    ]});
-
-db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
-    {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
-    JsonDocIdRevs2 =
-        [{Id, couch_doc:parse_revs(RevStrs)} || {Id, RevStrs} <- JsonDocIdRevs],
-    {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
-    Results2 =
-    lists:map(fun({Id, MissingRevs, PossibleAncestors}) ->
-        {Id,
-            {[{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
-                if PossibleAncestors == [] ->
-                    [];
-                true ->
-                    [{possible_ancestors,
-                        couch_doc:revs_to_strs(PossibleAncestors)}]
-                end}}
-    end, Results),
-    send_json(Req, {Results2});
-
-db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>]}=Req, Db) ->
-    SecObj = couch_httpd:json_body(Req),
-    ok = couch_db:set_security(Db, SecObj),
-    send_json(Req, {[{<<"ok">>, true}]});
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_security">>]}=Req, Db) ->
-    send_json(Req, couch_db:get_security(Db));
-
-db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "PUT,GET");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>]}=Req,
-        Db) ->
-    Limit = couch_httpd:json_body(Req),
-   case is_integer(Limit) of
-   true ->
-       ok = couch_db:set_revs_limit(Db, Limit),
-       send_json(Req, {[{<<"ok">>, true}]});
-   false ->
-       throw({bad_request, <<"Rev limit has to be an integer">>})
-   end;
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
-    send_json(Req, couch_db:get_revs_limit(Db));
-
-db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "PUT,GET");
-
-% Special case to enable using an unencoded slash in the URL of design docs,
-% as slashes in document IDs must otherwise be URL encoded.
-db_req(#httpd{method='GET',mochi_req=MochiReq, path_parts=[DbName,<<"_design/",_/binary>>|_]}=Req, _Db) ->
-    PathFront = "/" ++ couch_httpd:quote(binary_to_list(DbName)) ++ "/",
-    [_|PathTail] = re:split(MochiReq:get(raw_path), "_design%2F",
-        [{return, list}]),
-    couch_httpd:send_redirect(Req, PathFront ++ "_design/" ++
-        mochiweb_util:join(PathTail, "_design%2F"));
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) ->
-    db_doc_req(Req, Db, <<"_design/",Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) ->
-    db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts);
-
-
-% Special case to allow for accessing local documents without %2F
-% encoding the docid. Throws out requests that don't have the second
-% path part or that specify an attachment name.
-db_req(#httpd{path_parts=[_DbName, <<"_local">>]}, _Db) ->
-    throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local/">>]}, _Db) ->
-    throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">>, Name]}=Req, Db) ->
-    db_doc_req(Req, Db, <<"_local/", Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">> | _Rest]}, _Db) ->
-    throw({bad_request, <<"_local documents do not accept attachments.">>});
-
-db_req(#httpd{path_parts=[_, DocId]}=Req, Db) ->
-    db_doc_req(Req, Db, DocId);
-
-db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
-    db_attachment_req(Req, Db, DocId, FileNameParts).
-
-db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
-    % check for the existence of the doc to handle the 404 case.
-    couch_doc_open(Db, DocId, nil, []),
-    case couch_httpd:qs_value(Req, "rev") of
-    undefined ->
-        update_doc(Req, Db, DocId,
-                couch_doc_from_req(Req, DocId, {[{<<"_deleted">>,true}]}));
-    Rev ->
-        update_doc(Req, Db, DocId,
-                couch_doc_from_req(Req, DocId,
-                    {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]}))
-    end;
-
-db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
-    #doc_query_args{
-        rev = Rev,
-        open_revs = Revs,
-        options = Options1,
-        atts_since = AttsSince
-    } = parse_doc_query(Req),
-    Options = case AttsSince of
-    nil ->
-        Options1;
-    RevList when is_list(RevList) ->
-        [{atts_since, RevList}, attachments | Options1]
-    end,
-    case Revs of
-    [] ->
-        Doc = couch_doc_open(Db, DocId, Rev, Options),
-        send_doc(Req, Doc, Options);
-    _ ->
-        {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options),
-        case MochiReq:accepts_content_type("multipart/mixed") of
-        false ->
-            {ok, Resp} = start_json_response(Req, 200),
-            send_chunk(Resp, "["),
-            % We loop through the docs. The first time through the separator
-            % is whitespace, then a comma on subsequent iterations.
-            lists:foldl(
-                fun(Result, AccSeparator) ->
-                    case Result of
-                    {ok, Doc} ->
-                        JsonDoc = couch_doc:to_json_obj(Doc, Options),
-                        Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
-                        send_chunk(Resp, AccSeparator ++ Json);
-                    {{not_found, missing}, RevId} ->
-                        RevStr = couch_doc:rev_to_str(RevId),
-                        Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
-                        send_chunk(Resp, AccSeparator ++ Json)
-                    end,
-                    "," % AccSeparator now has a comma
-                end,
-                "", Results),
-            send_chunk(Resp, "]"),
-            end_json_response(Resp);
-        true ->
-            send_docs_multipart(Req, Results, Options)
-        end
-    end;
-
-
-db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
-    couch_httpd:validate_referer(Req),
-    couch_doc:validate_docid(DocId),
-    couch_httpd:validate_ctype(Req, "multipart/form-data"),
-    Form = couch_httpd:parse_form(Req),
-    case couch_util:get_value("_doc", Form) of
-    undefined ->
-        Rev = couch_doc:parse_rev(couch_util:get_value("_rev", Form)),
-        {ok, [{ok, Doc}]} = couch_db:open_doc_revs(Db, DocId, [Rev], []);
-    Json ->
-        Doc = couch_doc_from_req(Req, DocId, ?JSON_DECODE(Json))
-    end,
-    UpdatedAtts = [
-        #att{name=validate_attachment_name(Name),
-            type=list_to_binary(ContentType),
-            data=Content} ||
-        {Name, {ContentType, _}, Content} <-
-        proplists:get_all_values("_attachments", Form)
-    ],
-    #doc{atts=OldAtts} = Doc,
-    OldAtts2 = lists:flatmap(
-        fun(#att{name=OldName}=Att) ->
-            case [1 || A <- UpdatedAtts, A#att.name == OldName] of
-            [] -> [Att]; % the attachment wasn't in the UpdatedAtts, return it
-            _ -> [] % the attachment was in the UpdatedAtts, drop it
-            end
-        end, OldAtts),
-    NewDoc = Doc#doc{
-        atts = UpdatedAtts ++ OldAtts2
-    },
-    update_doc(Req, Db, DocId, NewDoc);
-
-db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) ->
-    couch_doc:validate_docid(DocId),
-
-    case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
-    ("multipart/related;" ++ _) = ContentType ->
-        {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(
-            ContentType, fun() -> receive_request_data(Req) end),
-        Doc = couch_doc_from_req(Req, DocId, Doc0),
-        try
-            Result = update_doc(Req, Db, DocId, Doc),
-            WaitFun(),
-            Result
-        catch throw:Err ->
-            % Document rejected by a validate_doc_update function.
-            couch_doc:abort_multi_part_stream(Parser),
-            throw(Err)
-        end;
-    _Else ->
-        Body = couch_httpd:json_body(Req),
-        Doc = couch_doc_from_req(Req, DocId, Body),
-        update_doc(Req, Db, DocId, Doc)
-    end;
-
-db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
-    SourceRev =
-    case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
-        missing_rev -> nil;
-        Rev -> Rev
-    end,
-    {TargetDocId, TargetRevs} = parse_copy_destination_header(Req),
-    % open old doc
-    Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
-    % save new doc
-    update_doc(Req, Db, TargetDocId, Doc#doc{id=TargetDocId, revs=TargetRevs});
-
-db_doc_req(Req, _Db, _DocId) ->
-    send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
-
-
-send_doc(Req, Doc, Options) ->
-    case Doc#doc.meta of
-    [] ->
-        DiskEtag = couch_httpd:doc_etag(Doc),
-        % output etag only when we have no meta
-        couch_httpd:etag_respond(Req, DiskEtag, fun() ->
-            send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options)
-        end);
-    _ ->
-        send_doc_efficiently(Req, Doc, [], Options)
-    end.
-
-
-send_doc_efficiently(Req, #doc{atts=[]}=Doc, Headers, Options) ->
-        send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-send_doc_efficiently(#httpd{mochi_req = MochiReq} = Req,
-    #doc{atts = Atts} = Doc, Headers, Options) ->
-    case lists:member(attachments, Options) of
-    true ->
-        case MochiReq:accepts_content_type("multipart/related") of
-        false ->
-            send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-        true ->
-            Boundary = couch_uuids:random(),
-            JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc,
-                    [attachments, follows, att_encoding_info | Options])),
-            {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
-                    Boundary,JsonBytes, Atts, true),
-            CType = {"Content-Type", ?b2l(ContentType)},
-            {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len),
-            couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts,
-                    fun(Data) -> couch_httpd:send(Resp, Data) end, true)
-        end;
-    false ->
-        send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
-    end.
-
-send_docs_multipart(Req, Results, Options1) ->
-    OuterBoundary = couch_uuids:random(),
-    InnerBoundary = couch_uuids:random(),
-    Options = [attachments, follows, att_encoding_info | Options1],
-    CType = {"Content-Type",
-        "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
-    {ok, Resp} = start_chunked_response(Req, 200, [CType]),
-    couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
-    lists:foreach(
-        fun({ok, #doc{atts=Atts}=Doc}) ->
-            JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
-            {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
-                    InnerBoundary, JsonBytes, Atts, true),
-            couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ",
-                    ContentType/binary, "\r\n\r\n">>),
-            couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
-                    fun(Data) -> couch_httpd:send_chunk(Resp, Data)
-                    end, true),
-             couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>);
-        ({{not_found, missing}, RevId}) ->
-             RevStr = couch_doc:rev_to_str(RevId),
-             Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
-             couch_httpd:send_chunk(Resp,
-                [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
-                Json,
-                <<"\r\n--", OuterBoundary/binary>>])
-         end, Results),
-    couch_httpd:send_chunk(Resp, <<"--">>),
-    couch_httpd:last_chunk(Resp).
-
-send_ranges_multipart(Req, ContentType, Len, Att, Ranges) ->
-    Boundary = couch_uuids:random(),
-    CType = {"Content-Type",
-        "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
-    {ok, Resp} = start_chunked_response(Req, 206, [CType]),
-    couch_httpd:send_chunk(Resp, <<"--", Boundary/binary>>),
-    lists:foreach(fun({From, To}) ->
-        ContentRange = ?l2b(make_content_range(From, To, Len)),
-        couch_httpd:send_chunk(Resp,
-            <<"\r\nContent-Type: ", ContentType/binary, "\r\n",
-            "Content-Range: ", ContentRange/binary, "\r\n",
-           "\r\n">>),
-        couch_doc:range_att_foldl(Att, From, To + 1,
-            fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
-        couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
-    end, Ranges),
-    couch_httpd:send_chunk(Resp, <<"--">>),
-    couch_httpd:last_chunk(Resp),
-    {ok, Resp}.
-
-receive_request_data(Req) ->
-    receive_request_data(Req, couch_httpd:body_length(Req)).
-
-receive_request_data(Req, LenLeft) when LenLeft > 0 ->
-    Len = erlang:min(4096, LenLeft),
-    Data = couch_httpd:recv(Req, Len),
-    {Data, fun() -> receive_request_data(Req, LenLeft - iolist_size(Data)) end};
-receive_request_data(_Req, _) ->
-    throw(<<"expected more data">>).
-
-make_content_range(From, To, Len) ->
-    io_lib:format("bytes ~B-~B/~B", [From, To, Len]).
-
-update_doc_result_to_json({{Id, Rev}, Error}) ->
-        {_Code, Err, Msg} = couch_httpd:error_info(Error),
-        {[{id, Id}, {rev, couch_doc:rev_to_str(Rev)},
-            {error, Err}, {reason, Msg}]}.
-
-update_doc_result_to_json(#doc{id=DocId}, Result) ->
-    update_doc_result_to_json(DocId, Result);
-update_doc_result_to_json(DocId, {ok, NewRev}) ->
-    {[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
-update_doc_result_to_json(DocId, Error) ->
-    {_Code, ErrorStr, Reason} = couch_httpd:error_info(Error),
-    {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
-
-
-update_doc(Req, Db, DocId, #doc{deleted=false}=Doc) ->
-    Loc = absolute_uri(Req, "/" ++ ?b2l(Db#db.name) ++ "/" ++ ?b2l(DocId)),
-    update_doc(Req, Db, DocId, Doc, [{"Location", Loc}]);
-update_doc(Req, Db, DocId, Doc) ->
-    update_doc(Req, Db, DocId, Doc, []).
-
-update_doc(Req, Db, DocId, Doc, Headers) ->
-    #doc_query_args{
-        update_type = UpdateType
-    } = parse_doc_query(Req),
-    update_doc(Req, Db, DocId, Doc, Headers, UpdateType).
-
-update_doc(Req, Db, DocId, #doc{deleted=Deleted}=Doc, Headers, UpdateType) ->
-    case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
-    "true" ->
-        Options = [full_commit];
-    "false" ->
-        Options = [delay_commit];
-    _ ->
-        Options = []
-    end,
-    case couch_httpd:qs_value(Req, "batch") of
-    "ok" ->
-        % async batching
-        spawn(fun() ->
-                case catch(couch_db:update_doc(Db, Doc, Options, UpdateType)) of
-                {ok, _} -> ok;
-                Error ->
-                    ?LOG_INFO("Batch doc error (~s): ~p",[DocId, Error])
-                end
-            end),
-        send_json(Req, 202, Headers, {[
-            {ok, true},
-            {id, DocId}
-        ]});
-    _Normal ->
-        % normal
-        {ok, NewRev} = couch_db:update_doc(Db, Doc, Options, UpdateType),
-        NewRevStr = couch_doc:rev_to_str(NewRev),
-        ResponseHeaders = [{"ETag", <<"\"", NewRevStr/binary, "\"">>}] ++ Headers,
-        send_json(Req,
-            if Deleted orelse Req#httpd.method == 'DELETE' -> 200;
-            true -> 201 end,
-            ResponseHeaders, {[
-                {ok, true},
-                {id, DocId},
-                {rev, NewRevStr}]})
-    end.
-
-couch_doc_from_req(Req, DocId, #doc{revs=Revs}=Doc) ->
-    validate_attachment_names(Doc),
-    Rev = case couch_httpd:qs_value(Req, "rev") of
-    undefined ->
-        undefined;
-    QSRev ->
-        couch_doc:parse_rev(QSRev)
-    end,
-    Revs2 =
-    case Revs of
-    {Start, [RevId|_]} ->
-        if Rev /= undefined andalso Rev /= {Start, RevId} ->
-            throw({bad_request, "Document rev from request body and query "
-                   "string have different values"});
-        true ->
-            case extract_header_rev(Req, {Start, RevId}) of
-            missing_rev -> {0, []};
-            _ -> Revs
-            end
-        end;
-    _ ->
-        case extract_header_rev(Req, Rev) of
-        missing_rev -> {0, []};
-        {Pos, RevId2} -> {Pos, [RevId2]}
-        end
-    end,
-    Doc#doc{id=DocId, revs=Revs2};
-couch_doc_from_req(Req, DocId, Json) ->
-    couch_doc_from_req(Req, DocId, couch_doc:from_json_obj(Json)).
-
-% Useful for debugging
-% couch_doc_open(Db, DocId) ->
-%   couch_doc_open(Db, DocId, nil, []).
-
-couch_doc_open(Db, DocId, Rev, Options) ->
-    case Rev of
-    nil -> % open most recent rev
-        case couch_db:open_doc(Db, DocId, Options) of
-        {ok, Doc} ->
-            Doc;
-         Error ->
-             throw(Error)
-         end;
-  _ -> % open a specific rev (deletions come back as stubs)
-      case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of
-          {ok, [{ok, Doc}]} ->
-              Doc;
-          {ok, [{{not_found, missing}, Rev}]} ->
-              throw(not_found);
-          {ok, [Else]} ->
-              throw(Else)
-      end
-  end.
-
-% Attachment request handlers
-
-db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNameParts) ->
-    FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1, FileNameParts),"/")),
-    #doc_query_args{
-        rev=Rev,
-        options=Options
-    } = parse_doc_query(Req),
-    #doc{
-        atts=Atts
-    } = Doc = couch_doc_open(Db, DocId, Rev, Options),
-    case [A || A <- Atts, A#att.name == FileName] of
-    [] ->
-        throw({not_found, "Document is missing attachment"});
-    [#att{type=Type, encoding=Enc, disk_len=DiskLen, att_len=AttLen}=Att] ->
-        Etag = case Att#att.md5 of
-            <<>> -> couch_httpd:doc_etag(Doc);
-            Md5 -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\""
-        end,
-        ReqAcceptsAttEnc = lists:member(
-           atom_to_list(Enc),
-           couch_httpd:accepted_encodings(Req)
-        ),
-        Len = case {Enc, ReqAcceptsAttEnc} of
-        {identity, _} ->
-            % stored and served in identity form
-            DiskLen;
-        {_, false} when DiskLen =/= AttLen ->
-            % Stored encoded, but client doesn't accept the encoding we used,
-            % so we need to decode on the fly.  DiskLen is the identity length
-            % of the attachment.
-            DiskLen;
-        {_, true} ->
-            % Stored and served encoded.  AttLen is the encoded length.
-            AttLen;
-        _ ->
-            % We received an encoded attachment and stored it as such, so we
-            % don't know the identity length.  The client doesn't accept the
-            % encoding, and since we cannot serve a correct Content-Length
-            % header we'll fall back to a chunked response.
-            undefined
-        end,
-        Headers = [
-            {"ETag", Etag},
-            {"Cache-Control", "must-revalidate"},
-            {"Content-Type", binary_to_list(Type)}
-        ] ++ case ReqAcceptsAttEnc of
-        true when Enc =/= identity ->
-            % RFC 2616 says that the 'identify' encoding should not be used in
-            % the Content-Encoding header
-            [{"Content-Encoding", atom_to_list(Enc)}];
-        _ ->
-            []
-        end ++ case Enc of
-            identity ->
-                [{"Accept-Ranges", "bytes"}];
-            _ ->
-                [{"Accept-Ranges", "none"}]
-        end,
-        AttFun = case ReqAcceptsAttEnc of
-        false ->
-            fun couch_doc:att_foldl_decode/3;
-        true ->
-            fun couch_doc:att_foldl/3
-        end,
-        couch_httpd:etag_respond(
-            Req,
-            Etag,
-            fun() ->
-                case Len of
-                undefined ->
-                    {ok, Resp} = start_chunked_response(Req, 200, Headers),
-                    AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
-                    last_chunk(Resp);
-                _ ->
-                    Ranges = parse_ranges(MochiReq:get(range), Len),
-                    case {Enc, Ranges} of
-                        {identity, [{From, To}]} ->
-                            Headers1 = [{"Content-Range", make_content_range(From, To, Len)}]
-                                ++ Headers,
-                            {ok, Resp} = start_response_length(Req, 206, Headers1, To - From + 1),
-                            couch_doc:range_att_foldl(Att, From, To + 1,
-                                fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp});
-                        {identity, Ranges} when is_list(Ranges) andalso length(Ranges) < 10 ->
-                            send_ranges_multipart(Req, Type, Len, Att, Ranges);
-                        _ ->
-                            Headers1 = Headers ++
-                                if Enc =:= identity orelse ReqAcceptsAttEnc =:= true ->
-                                    [{"Content-MD5", base64:encode(Att#att.md5)}];
-                                true ->
-                                    []
-                            end,
-                            {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
-                            AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
-                    end
-                end
-            end
-        )
-    end;
-
-
-db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileNameParts)
-        when (Method == 'PUT') or (Method == 'DELETE') ->
-    FileName = validate_attachment_name(
-                    mochiweb_util:join(
-                        lists:map(fun binary_to_list/1,
-                            FileNameParts),"/")),
-
-    NewAtt = case Method of
-        'DELETE' ->
-            [];
-        _ ->
-            [#att{
-                name = FileName,
-                type = case couch_httpd:header_value(Req,"Content-Type") of
-                    undefined ->
-                        % We could throw an error here or guess by the FileName.
-                        % Currently, just giving it a default.
-                        <<"application/octet-stream">>;
-                    CType ->
-                        list_to_binary(CType)
-                    end,
-                data = case couch_httpd:body_length(Req) of
-                    undefined ->
-                        <<"">>;
-                    {unknown_transfer_encoding, Unknown} ->
-                        exit({unknown_transfer_encoding, Unknown});
-                    chunked ->
-                        fun(MaxChunkSize, ChunkFun, InitState) ->
-                            couch_httpd:recv_chunked(Req, MaxChunkSize,
-                                ChunkFun, InitState)
-                        end;
-                    0 ->
-                        <<"">>;
-                    Length when is_integer(Length) ->
-                        Expect = case couch_httpd:header_value(Req, "expect") of
-                                     undefined ->
-                                         undefined;
-                                     Value when is_list(Value) ->
-                                         string:to_lower(Value)
-                                 end,
-                        case Expect of
-                            "100-continue" ->
-                                MochiReq:start_raw_response({100, gb_trees:empty()});
-                            _Else ->
-                                ok
-                        end,
-
-
-                        fun(Size) -> couch_httpd:recv(Req, Size) end
-                    end,
-                att_len = case couch_httpd:header_value(Req,"Content-Length") of
-                    undefined ->
-                        undefined;
-                    Length ->
-                        list_to_integer(Length)
-                    end,
-                md5 = get_md5_header(Req),
-                encoding = case string:to_lower(string:strip(
-                    couch_httpd:header_value(Req,"Content-Encoding","identity")
-                )) of
-                "identity" ->
-                   identity;
-                "gzip" ->
-                   gzip;
-                _ ->
-                   throw({
-                       bad_ctype,
-                       "Only gzip and identity content-encodings are supported"
-                   })
-                end
-            }]
-    end,
-
-    Doc = case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
-        missing_rev -> % make the new doc
-            couch_doc:validate_docid(DocId),
-            #doc{id=DocId};
-        Rev ->
-            case couch_db:open_doc_revs(Db, DocId, [Rev], []) of
-                {ok, [{ok, Doc0}]} -> Doc0;
-                {ok, [{{not_found, missing}, Rev}]} -> throw(conflict);
-                {ok, [Error]} -> throw(Error)
-            end
-    end,
-
-    #doc{atts=Atts} = Doc,
-    DocEdited = Doc#doc{
-        atts = NewAtt ++ [A || A <- Atts, A#att.name /= FileName]
-    },
-
-    Headers = case Method of
-    'DELETE' ->
-        [];
-    _ ->
-        [{"Location", absolute_uri(Req, "/" ++
-            ?b2l(Db#db.name) ++ "/" ++
-            ?b2l(DocId) ++ "/" ++
-            ?b2l(FileName)
-        )}]
-    end,
-    update_doc(Req, Db, DocId, DocEdited, Headers);
-
-db_attachment_req(Req, _Db, _DocId, _FileNameParts) ->
-    send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT").
-
-parse_ranges(undefined, _Len) ->
-    undefined;
-parse_ranges(fail, _Len) ->
-    undefined;
-parse_ranges(Ranges, Len) ->
-    parse_ranges(Ranges, Len, []).
-
-parse_ranges([], _Len, Acc) ->
-    lists:reverse(Acc);
-parse_ranges([{0, none}|_], _Len, _Acc) ->
-    undefined;
-parse_ranges([{From, To}|_], _Len, _Acc) when is_integer(From) andalso is_integer(To) andalso To < From ->
-    throw(requested_range_not_satisfiable);
-parse_ranges([{From, To}|Rest], Len, Acc) when is_integer(To) andalso To >= Len ->
-    parse_ranges([{From, Len-1}] ++ Rest, Len, Acc);
-parse_ranges([{none, To}|Rest], Len, Acc) ->
-    parse_ranges([{Len - To, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From, none}|Rest], Len, Acc) ->
-    parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From,To}|Rest], Len, Acc) ->
-    parse_ranges(Rest, Len, [{From, To}] ++ Acc).
-
-get_md5_header(Req) ->
-    ContentMD5 = couch_httpd:header_value(Req, "Content-MD5"),
-    Length = couch_httpd:body_length(Req),
-    Trailer = couch_httpd:header_value(Req, "Trailer"),
-    case {ContentMD5, Length, Trailer} of
-        _ when is_list(ContentMD5) orelse is_binary(ContentMD5) ->
-            base64:decode(ContentMD5);
-        {_, chunked, undefined} ->
-            <<>>;
-        {_, chunked, _} ->
-            case re:run(Trailer, "\\bContent-MD5\\b", [caseless]) of
-                {match, _} ->
-                    md5_in_footer;
-                _ ->
-                    <<>>
-            end;
-        _ ->
-            <<>>
-    end.
-
-parse_doc_query(Req) ->
-    lists:foldl(fun({Key,Value}, Args) ->
-        case {Key, Value} of
-        {"attachments", "true"} ->
-            Options = [attachments | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"meta", "true"} ->
-            Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"revs", "true"} ->
-            Options = [revs | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"local_seq", "true"} ->
-            Options = [local_seq | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"revs_info", "true"} ->
-            Options = [revs_info | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"conflicts", "true"} ->
-            Options = [conflicts | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"deleted_conflicts", "true"} ->
-            Options = [deleted_conflicts | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"rev", Rev} ->
-            Args#doc_query_args{rev=couch_doc:parse_rev(Rev)};
-        {"open_revs", "all"} ->
-            Args#doc_query_args{open_revs=all};
-        {"open_revs", RevsJsonStr} ->
-            JsonArray = ?JSON_DECODE(RevsJsonStr),
-            Args#doc_query_args{open_revs=couch_doc:parse_revs(JsonArray)};
-        {"latest", "true"} ->
-            Options = [latest | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"atts_since", RevsJsonStr} ->
-            JsonArray = ?JSON_DECODE(RevsJsonStr),
-            Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
-        {"new_edits", "false"} ->
-            Args#doc_query_args{update_type=replicated_changes};
-        {"new_edits", "true"} ->
-            Args#doc_query_args{update_type=interactive_edit};
-        {"att_encoding_info", "true"} ->
-            Options = [att_encoding_info | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        _Else -> % unknown key value pair, ignore.
-            Args
-        end
-    end, #doc_query_args{}, couch_httpd:qs(Req)).
-
-parse_changes_query(Req, Db) ->
-    ChangesArgs = lists:foldl(fun({Key, Value}, Args) ->
-        case {string:to_lower(Key), Value} of
-        {"feed", _} ->
-            Args#changes_args{feed=Value};
-        {"descending", "true"} ->
-            Args#changes_args{dir=rev};
-        {"since", "now"} ->
-            UpdateSeq = couch_util:with_db(Db#db.name, fun(WDb) ->
-                                        couch_db:get_update_seq(WDb)
-                                end),
-            Args#changes_args{since=UpdateSeq};
-        {"since", _} ->
-            Args#changes_args{since=list_to_integer(Value)};
-        {"last-event-id", _} ->
-            Args#changes_args{since=list_to_integer(Value)};
-        {"limit", _} ->
-            Args#changes_args{limit=list_to_integer(Value)};
-        {"style", _} ->
-            Args#changes_args{style=list_to_existing_atom(Value)};
-        {"heartbeat", "true"} ->
-            Args#changes_args{heartbeat=true};
-        {"heartbeat", _} ->
-            Args#changes_args{heartbeat=list_to_integer(Value)};
-        {"timeout", _} ->
-            Args#changes_args{timeout=list_to_integer(Value)};
-        {"include_docs", "true"} ->
-            Args#changes_args{include_docs=true};
-        {"attachments", "true"} ->
-            Opts = Args#changes_args.doc_options,
-            Args#changes_args{doc_options=[attachments|Opts]};
-        {"att_encoding_info", "true"} ->
-            Opts = Args#changes_args.doc_options,
-            Args#changes_args{doc_options=[att_encoding_info|Opts]};
-        {"conflicts", "true"} ->
-            Args#changes_args{conflicts=true};
-        {"filter", _} ->
-            Args#changes_args{filter=Value};
-        _Else -> % unknown key value pair, ignore.
-            Args
-        end
-    end, #changes_args{}, couch_httpd:qs(Req)),
-    %% if it's an EventSource request with a Last-event-ID header
-    %% that should override the `since` query string, since it's
-    %% probably the browser reconnecting.
-    case ChangesArgs#changes_args.feed of
-        "eventsource" ->
-            case couch_httpd:header_value(Req, "last-event-id") of
-                undefined ->
-                    ChangesArgs;
-                Value ->
-                    ChangesArgs#changes_args{since=list_to_integer(Value)}
-            end;
-        _ ->
-            ChangesArgs
-    end.
-
-extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
-    extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
-extract_header_rev(Req, ExplicitRev) ->
-    Etag = case couch_httpd:header_value(Req, "If-Match") of
-        undefined -> undefined;
-        Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
-    end,
-    case {ExplicitRev, Etag} of
-    {undefined, undefined} -> missing_rev;
-    {_, undefined} -> ExplicitRev;
-    {undefined, _} -> Etag;
-    _ when ExplicitRev == Etag -> Etag;
-    _ ->
-        throw({bad_request, "Document rev and etag have different values"})
-    end.
-
-
-parse_copy_destination_header(Req) ->
-    case couch_httpd:header_value(Req, "Destination") of
-    undefined ->
-        throw({bad_request, "Destination header is mandatory for COPY."});
-    Destination ->
-        case re:run(Destination, "^https?://", [{capture, none}]) of
-        match ->
-            throw({bad_request, "Destination URL must be relative."});
-        nomatch ->
-            % see if ?rev=revid got appended to the Destination header
-            case re:run(Destination, "\\?", [{capture, none}]) of
-            nomatch ->
-                {list_to_binary(Destination), {0, []}};
-            match ->
-                [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
-                [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
-                {Pos, RevId} = couch_doc:parse_rev(Rev),
-                {list_to_binary(DocId), {Pos, [RevId]}}
-            end
-        end
-    end.
-
-validate_attachment_names(Doc) ->
-    lists:foreach(fun(#att{name=Name}) ->
-        validate_attachment_name(Name)
-    end, Doc#doc.atts).
-
-validate_attachment_name(Name) when is_list(Name) ->
-    validate_attachment_name(list_to_binary(Name));
-validate_attachment_name(<<"_",_/binary>>) ->
-    throw({bad_request, <<"Attachment name can't start with '_'">>});
-validate_attachment_name(Name) ->
-    case couch_util:validate_utf8(Name) of
-        true -> Name;
-        false -> throw({bad_request, <<"Attachment name is not UTF-8 encoded">>})
-    end.
-

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_httpd_external.erl
----------------------------------------------------------------------
diff --git a/couch_httpd_external.erl b/couch_httpd_external.erl
deleted file mode 100644
index 2036d25..0000000
--- a/couch_httpd_external.erl
+++ /dev/null
@@ -1,177 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_external).
-
--export([handle_external_req/2, handle_external_req/3]).
--export([send_external_response/2, json_req_obj/2, json_req_obj/3]).
--export([default_or_content_type/2, parse_external_response/1]).
-
--import(couch_httpd,[send_error/4]).
-
--include("couch_db.hrl").
-
-% handle_external_req/2
-% for the old type of config usage:
-% _external = {couch_httpd_external, handle_external_req}
-% with urls like
-% /db/_external/action/design/name
-handle_external_req(#httpd{
-                        path_parts=[_DbName, _External, UrlName | _Path]
-                    }=HttpReq, Db) ->
-    process_external_req(HttpReq, Db, UrlName);
-handle_external_req(#httpd{path_parts=[_, _]}=Req, _Db) ->
-    send_error(Req, 404, <<"external_server_error">>, <<"No server name specified.">>);
-handle_external_req(Req, _) ->
-    send_error(Req, 404, <<"external_server_error">>, <<"Broken assumption">>).
-
-% handle_external_req/3
-% for this type of config usage:
-% _action = {couch_httpd_external, handle_external_req, <<"action">>}
-% with urls like
-% /db/_action/design/name
-handle_external_req(HttpReq, Db, Name) ->
-    process_external_req(HttpReq, Db, Name).
-
-process_external_req(HttpReq, Db, Name) ->
-
-    Response = couch_external_manager:execute(binary_to_list(Name),
-        json_req_obj(HttpReq, Db)),
-
-    case Response of
-    {unknown_external_server, Msg} ->
-        send_error(HttpReq, 404, <<"external_server_error">>, Msg);
-    _ ->
-        send_external_response(HttpReq, Response)
-    end.
-json_req_obj(Req, Db) -> json_req_obj(Req, Db, null).
-json_req_obj(#httpd{mochi_req=Req,
-               method=Method,
-               requested_path_parts=RequestedPath,
-               path_parts=Path,
-               req_body=ReqBody
-            }, Db, DocId) ->
-    Body = case ReqBody of
-        undefined ->
-            MaxSize = list_to_integer(
-                couch_config:get("couchdb", "max_document_size", "4294967296")),
-            Req:recv_body(MaxSize);
-        Else -> Else
-    end,
-    ParsedForm = case Req:get_primary_header_value("content-type") of
-        "application/x-www-form-urlencoded" ++ _ ->
-            case Body of
-            undefined -> [];
-            _ -> mochiweb_util:parse_qs(Body)
-            end;
-        _ ->
-            []
-    end,
-    Headers = Req:get(headers),
-    Hlist = mochiweb_headers:to_list(Headers),
-    {ok, Info} = couch_db:get_db_info(Db),
-    
-% add headers...
-    {[{<<"info">>, {Info}},
-        {<<"id">>, DocId},
-        {<<"uuid">>, couch_uuids:new()},
-        {<<"method">>, Method},
-        {<<"requested_path">>, RequestedPath},
-        {<<"path">>, Path},
-        {<<"raw_path">>, ?l2b(Req:get(raw_path))},
-        {<<"query">>, json_query_keys(to_json_terms(Req:parse_qs()))},
-        {<<"headers">>, to_json_terms(Hlist)},
-        {<<"body">>, Body},
-        {<<"peer">>, ?l2b(Req:get(peer))},
-        {<<"form">>, to_json_terms(ParsedForm)},
-        {<<"cookie">>, to_json_terms(Req:parse_cookie())},
-        {<<"userCtx">>, couch_util:json_user_ctx(Db)},
-        {<<"secObj">>, couch_db:get_security(Db)}]}.
-
-to_json_terms(Data) ->
-    to_json_terms(Data, []).
-
-to_json_terms([], Acc) ->
-    {lists:reverse(Acc)};
-to_json_terms([{Key, Value} | Rest], Acc) when is_atom(Key) ->
-    to_json_terms(Rest, [{list_to_binary(atom_to_list(Key)), list_to_binary(Value)} | Acc]);
-to_json_terms([{Key, Value} | Rest], Acc) ->
-    to_json_terms(Rest, [{list_to_binary(Key), list_to_binary(Value)} | Acc]).
-
-json_query_keys({Json}) ->
-    json_query_keys(Json, []).
-json_query_keys([], Acc) ->
-    {lists:reverse(Acc)};
-json_query_keys([{<<"startkey">>, Value} | Rest], Acc) ->
-    json_query_keys(Rest, [{<<"startkey">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([{<<"endkey">>, Value} | Rest], Acc) ->
-    json_query_keys(Rest, [{<<"endkey">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([{<<"key">>, Value} | Rest], Acc) ->
-    json_query_keys(Rest, [{<<"key">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([Term | Rest], Acc) ->
-    json_query_keys(Rest, [Term|Acc]).
-
-send_external_response(Req, Response) ->
-    #extern_resp_args{
-        code = Code,
-        data = Data,
-        ctype = CType,
-        headers = Headers,
-        json = Json
-    } = parse_external_response(Response),
-    Headers1 = default_or_content_type(CType, Headers),
-    case Json of
-    nil ->
-        couch_httpd:send_response(Req, Code, Headers1, Data);
-    Json ->
-        couch_httpd:send_json(Req, Code, Headers1, Json)
-    end.
-
-parse_external_response({Response}) ->
-    lists:foldl(fun({Key,Value}, Args) ->
-        case {Key, Value} of
-            {"", _} ->
-                Args;
-            {<<"code">>, Value} ->
-                Args#extern_resp_args{code=Value};
-            {<<"stop">>, true} ->
-                Args#extern_resp_args{stop=true};
-            {<<"json">>, Value} ->
-                Args#extern_resp_args{
-                    json=Value,
-                    ctype="application/json"};
-            {<<"body">>, Value} ->
-                Args#extern_resp_args{data=Value, ctype="text/html; charset=utf-8"};
-            {<<"base64">>, Value} ->
-                Args#extern_resp_args{
-                    data=base64:decode(Value),
-                    ctype="application/binary"
-                };
-            {<<"headers">>, {Headers}} ->
-                NewHeaders = lists:map(fun({Header, HVal}) ->
-                    {binary_to_list(Header), binary_to_list(HVal)}
-                end, Headers),
-                Args#extern_resp_args{headers=NewHeaders};
-            _ -> % unknown key
-                Msg = lists:flatten(io_lib:format("Invalid data from external server: ~p", [{Key, Value}])),
-                throw({external_response_error, Msg})
-            end
-        end, #extern_resp_args{}, Response).
-
-default_or_content_type(DefaultContentType, Headers) ->
-    IsContentType = fun({X, _}) -> string:to_lower(X) == "content-type" end,
-    case lists:any(IsContentType, Headers) of
-    false ->
-        [{"Content-Type", DefaultContentType} | Headers];
-    true ->
-        Headers
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_httpd_misc_handlers.erl
----------------------------------------------------------------------
diff --git a/couch_httpd_misc_handlers.erl b/couch_httpd_misc_handlers.erl
deleted file mode 100644
index 96a05c6..0000000
--- a/couch_httpd_misc_handlers.erl
+++ /dev/null
@@ -1,318 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_misc_handlers).
-
--export([handle_welcome_req/2,handle_favicon_req/2,handle_utils_dir_req/2,
-    handle_all_dbs_req/1,handle_restart_req/1,
-    handle_uuids_req/1,handle_config_req/1,handle_log_req/1,
-    handle_task_status_req/1, handle_file_req/2]).
-
--export([increment_update_seq_req/2]).
-
-
--include("couch_db.hrl").
-
--import(couch_httpd,
-    [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
-    start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1,
-    start_chunked_response/3, send_error/4]).
-
-% httpd global handlers
-
-handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) ->
-    send_json(Req, {[
-        {couchdb, WelcomeMessage},
-        {uuid, couch_server:get_uuid()},
-        {version, list_to_binary(couch_server:get_version())}
-        ] ++ case couch_config:get("vendor") of
-        [] ->
-            [];
-        Properties ->
-            [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}]
-        end
-    });
-handle_welcome_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) ->
-    {{Year,Month,Day},Time} = erlang:universaltime(),
-    OneYearFromNow = {{Year+1,Month,Day},Time},
-    CachingHeaders = [
-        %favicon should expire a year from now
-        {"Cache-Control", "public, max-age=31536000"},
-        {"Expires", couch_util:rfc1123_date(OneYearFromNow)}
-    ],
-    couch_httpd:serve_file(Req, "favicon.ico", DocumentRoot, CachingHeaders);
-
-handle_favicon_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_file_req(#httpd{method='GET'}=Req, Document) ->
-    couch_httpd:serve_file(Req, filename:basename(Document), filename:dirname(Document));
-
-handle_file_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) ->
-    "/" ++ UrlPath = couch_httpd:path(Req),
-    case couch_httpd:partition(UrlPath) of
-    {_ActionKey, "/", RelativePath} ->
-        % GET /_utils/path or GET /_utils/
-        CachingHeaders =
-                [{"Cache-Control", "private, must-revalidate"}],
-        couch_httpd:serve_file(Req, RelativePath, DocumentRoot, CachingHeaders);
-    {_ActionKey, "", _RelativePath} ->
-        % GET /_utils
-        RedirectPath = couch_httpd:path(Req) ++ "/",
-        couch_httpd:send_redirect(Req, RedirectPath)
-    end;
-handle_utils_dir_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_all_dbs_req(#httpd{method='GET'}=Req) ->
-    {ok, DbNames} = couch_server:all_databases(),
-    send_json(Req, DbNames);
-handle_all_dbs_req(Req) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-
-handle_task_status_req(#httpd{method='GET'}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    % convert the list of prop lists to a list of json objects
-    send_json(Req, [{Props} || Props <- couch_task_status:all()]);
-handle_task_status_req(Req) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-
-handle_restart_req(#httpd{method='POST'}=Req) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    ok = couch_httpd:verify_is_server_admin(Req),
-    Result = send_json(Req, 202, {[{ok, true}]}),
-    couch_server_sup:restart_core_server(),
-    Result;
-handle_restart_req(Req) ->
-    send_method_not_allowed(Req, "POST").
-
-
-handle_uuids_req(#httpd{method='GET'}=Req) ->
-    Count = list_to_integer(couch_httpd:qs_value(Req, "count", "1")),
-    UUIDs = [couch_uuids:new() || _ <- lists:seq(1, Count)],
-    Etag = couch_httpd:make_etag(UUIDs),
-    couch_httpd:etag_respond(Req, Etag, fun() ->
-        CacheBustingHeaders = [
-            {"Date", couch_util:rfc1123_date()},
-            {"Cache-Control", "no-cache"},
-            % Past date, ON PURPOSE!
-            {"Expires", "Fri, 01 Jan 1990 00:00:00 GMT"},
-            {"Pragma", "no-cache"},
-            {"ETag", Etag}
-        ],
-        send_json(Req, 200, CacheBustingHeaders, {[{<<"uuids">>, UUIDs}]})
-    end);
-handle_uuids_req(Req) ->
-    send_method_not_allowed(Req, "GET").
-
-
-% Config request handler
-
-
-% GET /_config/
-% GET /_config
-handle_config_req(#httpd{method='GET', path_parts=[_]}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
-        case dict:is_key(Section, Acc) of
-        true ->
-            dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
-        false ->
-            dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
-        end
-    end, dict:new(), couch_config:all()),
-    KVs = dict:fold(fun(Section, Values, Acc) ->
-        [{list_to_binary(Section), {Values}} | Acc]
-    end, [], Grouped),
-    send_json(Req, 200, {KVs});
-% GET /_config/Section
-handle_config_req(#httpd{method='GET', path_parts=[_,Section]}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    KVs = [{list_to_binary(Key), list_to_binary(Value)}
-            || {Key, Value} <- couch_config:get(Section)],
-    send_json(Req, 200, {KVs});
-% GET /_config/Section/Key
-handle_config_req(#httpd{method='GET', path_parts=[_, Section, Key]}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    case couch_config:get(Section, Key, null) of
-    null ->
-        throw({not_found, unknown_config_value});
-    Value ->
-        send_json(Req, 200, list_to_binary(Value))
-    end;
-% PUT or DELETE /_config/Section/Key
-handle_config_req(#httpd{method=Method, path_parts=[_, Section, Key]}=Req)
-      when (Method == 'PUT') or (Method == 'DELETE') ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    Persist = couch_httpd:header_value(Req, "X-Couch-Persist") /= "false",
-    case couch_config:get(<<"httpd">>, <<"config_whitelist">>, null) of
-        null ->
-            % No whitelist; allow all changes.
-            handle_approved_config_req(Req, Persist);
-        WhitelistValue ->
-            % Provide a failsafe to protect against inadvertently locking
-            % onesself out of the config by supplying a syntactically-incorrect
-            % Erlang term. To intentionally lock down the whitelist, supply a
-            % well-formed list which does not include the whitelist config
-            % variable itself.
-            FallbackWhitelist = [{<<"httpd">>, <<"config_whitelist">>}],
-
-            Whitelist = case couch_util:parse_term(WhitelistValue) of
-                {ok, Value} when is_list(Value) ->
-                    Value;
-                {ok, _NonListValue} ->
-                    FallbackWhitelist;
-                {error, _} ->
-                    [{WhitelistSection, WhitelistKey}] = FallbackWhitelist,
-                    ?LOG_ERROR("Only whitelisting ~s/~s due to error parsing: ~p",
-                               [WhitelistSection, WhitelistKey, WhitelistValue]),
-                    FallbackWhitelist
-            end,
-
-            IsRequestedKeyVal = fun(Element) ->
-                case Element of
-                    {A, B} ->
-                        % For readability, tuples may be used instead of binaries
-                        % in the whitelist.
-                        case {couch_util:to_binary(A), couch_util:to_binary(B)} of
-                            {Section, Key} ->
-                                true;
-                            {Section, <<"*">>} ->
-                                true;
-                            _Else ->
-                                false
-                        end;
-                    _Else ->
-                        false
-                end
-            end,
-
-            case lists:any(IsRequestedKeyVal, Whitelist) of
-                true ->
-                    % Allow modifying this whitelisted variable.
-                    handle_approved_config_req(Req, Persist);
-                _NotWhitelisted ->
-                    % Disallow modifying this non-whitelisted variable.
-                    send_error(Req, 400, <<"modification_not_allowed">>,
-                               ?l2b("This config variable is read-only"))
-            end
-    end;
-handle_config_req(Req) ->
-    send_method_not_allowed(Req, "GET,PUT,DELETE").
-
-% PUT /_config/Section/Key
-% "value"
-handle_approved_config_req(Req, Persist) ->
-    Query = couch_httpd:qs(Req),
-    UseRawValue = case lists:keyfind("raw", 1, Query) of
-    false            -> false; % Not specified
-    {"raw", ""}      -> false; % Specified with no value, i.e. "?raw" and "?raw="
-    {"raw", "false"} -> false;
-    {"raw", "true"}  -> true;
-    {"raw", InvalidValue} -> InvalidValue
-    end,
-    handle_approved_config_req(Req, Persist, UseRawValue).
-
-handle_approved_config_req(#httpd{method='PUT', path_parts=[_, Section, Key]}=Req,
-                           Persist, UseRawValue)
-        when UseRawValue =:= false orelse UseRawValue =:= true ->
-    RawValue = couch_httpd:json_body(Req),
-    Value = case UseRawValue of
-    true ->
-        % Client requests no change to the provided value.
-        RawValue;
-    false ->
-        % Pre-process the value as necessary.
-        case Section of
-        <<"admins">> ->
-            couch_passwords:hash_admin_password(RawValue);
-        _ ->
-            RawValue
-        end
-    end,
-
-    OldValue = couch_config:get(Section, Key, ""),
-    case couch_config:set(Section, Key, ?b2l(Value), Persist) of
-    ok ->
-        send_json(Req, 200, list_to_binary(OldValue));
-    Error ->
-        throw(Error)
-    end;
-
-handle_approved_config_req(#httpd{method='PUT'}=Req, _Persist, UseRawValue) ->
-    Err = io_lib:format("Bad value for 'raw' option: ~s", [UseRawValue]),
-    send_json(Req, 400, {[{error, ?l2b(Err)}]});
-
-% DELETE /_config/Section/Key
-handle_approved_config_req(#httpd{method='DELETE',path_parts=[_,Section,Key]}=Req,
-                           Persist, _UseRawValue) ->
-    case couch_config:get(Section, Key, null) of
-    null ->
-        throw({not_found, unknown_config_value});
-    OldValue ->
-        couch_config:delete(Section, Key, Persist),
-        send_json(Req, 200, list_to_binary(OldValue))
-    end.
-
-
-% httpd db handlers
-
-increment_update_seq_req(#httpd{method='POST'}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    {ok, NewSeq} = couch_db:increment_update_seq(Db),
-    send_json(Req, {[{ok, true},
-        {update_seq, NewSeq}
-    ]});
-increment_update_seq_req(Req, _Db) ->
-    send_method_not_allowed(Req, "POST").
-
-% httpd log handlers
-
-handle_log_req(#httpd{method='GET'}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    Bytes = list_to_integer(couch_httpd:qs_value(Req, "bytes", "1000")),
-    Offset = list_to_integer(couch_httpd:qs_value(Req, "offset", "0")),
-    Chunk = couch_log:read(Bytes, Offset),
-    {ok, Resp} = start_chunked_response(Req, 200, [
-        % send a plaintext response
-        {"Content-Type", "text/plain; charset=utf-8"},
-        {"Content-Length", integer_to_list(length(Chunk))}
-    ]),
-    send_chunk(Resp, Chunk),
-    last_chunk(Resp);
-handle_log_req(#httpd{method='POST'}=Req) ->
-    {PostBody} = couch_httpd:json_body_obj(Req),
-    Level = couch_util:get_value(<<"level">>, PostBody),
-    Message = ?b2l(couch_util:get_value(<<"message">>, PostBody)),
-    case Level of
-    <<"debug">> ->
-        ?LOG_DEBUG(Message, []),
-        send_json(Req, 200, {[{ok, true}]});
-    <<"info">> ->
-        ?LOG_INFO(Message, []),
-        send_json(Req, 200, {[{ok, true}]});
-    <<"error">> ->
-        ?LOG_ERROR(Message, []),
-        send_json(Req, 200, {[{ok, true}]});
-    _ ->
-        send_json(Req, 400, {[{error, ?l2b(io_lib:format("Unrecognized log level '~s'", [Level]))}]})
-    end;
-handle_log_req(Req) ->
-    send_method_not_allowed(Req, "GET,POST").


[10/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_server.erl
----------------------------------------------------------------------
diff --git a/couch_server.erl b/couch_server.erl
deleted file mode 100644
index 7cee0f5..0000000
--- a/couch_server.erl
+++ /dev/null
@@ -1,499 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_server).
--behaviour(gen_server).
-
--export([open/2,create/2,delete/2,get_version/0,get_version/1,get_uuid/0]).
--export([all_databases/0, all_databases/2]).
--export([init/1, handle_call/3,sup_start_link/0]).
--export([handle_cast/2,code_change/3,handle_info/2,terminate/2]).
--export([dev_start/0,is_admin/2,has_admins/0,get_stats/0]).
-
--include("couch_db.hrl").
-
--record(server,{
-    root_dir = [],
-    dbname_regexp,
-    max_dbs_open=100,
-    dbs_open=0,
-    start_time=""
-    }).
-
-dev_start() ->
-    couch:stop(),
-    up_to_date = make:all([load, debug_info]),
-    couch:start().
-
-get_version() ->
-    Apps = application:loaded_applications(),
-    case lists:keysearch(couch, 1, Apps) of
-    {value, {_, _, Vsn}} ->
-        Vsn;
-    false ->
-        "0.0.0"
-    end.
-get_version(short) ->
-  %% strip git hash from version string
-  [Version|_Rest] = string:tokens(get_version(), "+"),
-  Version.
-
-
-get_uuid() ->
-    case couch_config:get("couchdb", "uuid", nil) of
-        nil ->
-            UUID = couch_uuids:random(),
-            couch_config:set("couchdb", "uuid", ?b2l(UUID)),
-            UUID;
-        UUID -> ?l2b(UUID)
-    end.
-
-get_stats() ->
-    {ok, #server{start_time=Time,dbs_open=Open}} =
-            gen_server:call(couch_server, get_server),
-    [{start_time, ?l2b(Time)}, {dbs_open, Open}].
-
-sup_start_link() ->
-    gen_server:start_link({local, couch_server}, couch_server, [], []).
-
-open(DbName, Options0) ->
-    Options = maybe_add_sys_db_callbacks(DbName, Options0),
-    case gen_server:call(couch_server, {open, DbName, Options}, infinity) of
-    {ok, Db} ->
-        Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
-        {ok, Db#db{user_ctx=Ctx}};
-    Error ->
-        Error
-    end.
-
-create(DbName, Options0) ->
-    Options = maybe_add_sys_db_callbacks(DbName, Options0),
-    case gen_server:call(couch_server, {create, DbName, Options}, infinity) of
-    {ok, Db} ->
-        Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
-        {ok, Db#db{user_ctx=Ctx}};
-    Error ->
-        Error
-    end.
-
-delete(DbName, Options) ->
-    gen_server:call(couch_server, {delete, DbName, Options}, infinity).
-
-maybe_add_sys_db_callbacks(DbName, Options) when is_binary(DbName) ->
-    maybe_add_sys_db_callbacks(?b2l(DbName), Options);
-maybe_add_sys_db_callbacks(DbName, Options) ->
-    case couch_config:get("replicator", "db", "_replicator") of
-    DbName ->
-        [
-            {before_doc_update, fun couch_replicator_manager:before_doc_update/2},
-            {after_doc_read, fun couch_replicator_manager:after_doc_read/2},
-            sys_db | Options
-        ];
-    _ ->
-        case couch_config:get("couch_httpd_auth", "authentication_db", "_users") of
-        DbName ->
-        [
-            {before_doc_update, fun couch_users_db:before_doc_update/2},
-            {after_doc_read, fun couch_users_db:after_doc_read/2},
-            sys_db | Options
-        ];
-        _ ->
-            Options
-        end
-    end.
-
-check_dbname(#server{dbname_regexp=RegExp}, DbName) ->
-    case re:run(DbName, RegExp, [{capture, none}]) of
-    nomatch ->
-        case DbName of
-            "_users" -> ok;
-            "_replicator" -> ok;
-            _Else ->
-                {error, illegal_database_name, DbName}
-            end;
-    match ->
-        ok
-    end.
-
-is_admin(User, ClearPwd) ->
-    case couch_config:get("admins", User) of
-    "-hashed-" ++ HashedPwdAndSalt ->
-        [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
-        couch_util:to_hex(crypto:sha(ClearPwd ++ Salt)) == HashedPwd;
-    _Else ->
-        false
-    end.
-
-has_admins() ->
-    couch_config:get("admins") /= [].
-
-get_full_filename(Server, DbName) ->
-    filename:join([Server#server.root_dir, "./" ++ DbName ++ ".couch"]).
-
-hash_admin_passwords() ->
-    hash_admin_passwords(true).
-
-hash_admin_passwords(Persist) ->
-    lists:foreach(
-        fun({User, ClearPassword}) ->
-            HashedPassword = couch_passwords:hash_admin_password(ClearPassword),
-            couch_config:set("admins", User, ?b2l(HashedPassword), Persist)
-        end, couch_passwords:get_unhashed_admins()).
-
-init([]) ->
-    % read config and register for configuration changes
-
-    % just stop if one of the config settings change. couch_server_sup
-    % will restart us and then we will pick up the new settings.
-
-    RootDir = couch_config:get("couchdb", "database_dir", "."),
-    MaxDbsOpen = list_to_integer(
-            couch_config:get("couchdb", "max_dbs_open")),
-    Self = self(),
-    ok = couch_config:register(
-        fun("couchdb", "database_dir") ->
-            exit(Self, config_change)
-        end),
-    ok = couch_config:register(
-        fun("couchdb", "max_dbs_open", Max) ->
-            gen_server:call(couch_server,
-                    {set_max_dbs_open, list_to_integer(Max)})
-        end),
-    ok = couch_file:init_delete_dir(RootDir),
-    hash_admin_passwords(),
-    ok = couch_config:register(
-        fun("admins", _Key, _Value, Persist) ->
-            % spawn here so couch_config doesn't try to call itself
-            spawn(fun() -> hash_admin_passwords(Persist) end)
-        end, false),
-    {ok, RegExp} = re:compile("^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*$"),
-    ets:new(couch_dbs_by_name, [set, private, named_table]),
-    ets:new(couch_dbs_by_pid, [set, private, named_table]),
-    ets:new(couch_dbs_by_lru, [ordered_set, private, named_table]),
-    ets:new(couch_sys_dbs, [set, private, named_table]),
-    process_flag(trap_exit, true),
-    {ok, #server{root_dir=RootDir,
-                dbname_regexp=RegExp,
-                max_dbs_open=MaxDbsOpen,
-                start_time=couch_util:rfc1123_date()}}.
-
-terminate(_Reason, _Srv) ->
-    lists:foreach(
-        fun({_, {_, Pid, _}}) ->
-                couch_util:shutdown_sync(Pid)
-        end,
-        ets:tab2list(couch_dbs_by_name)).
-
-all_databases() ->
-    {ok, DbList} = all_databases(
-        fun(DbName, Acc) -> {ok, [DbName | Acc]} end, []),
-    {ok, lists:usort(DbList)}.
-
-all_databases(Fun, Acc0) ->
-    {ok, #server{root_dir=Root}} = gen_server:call(couch_server, get_server),
-    NormRoot = couch_util:normpath(Root),
-    FinalAcc = try
-        filelib:fold_files(Root, "^[a-z0-9\\_\\$()\\+\\-]*[\\.]couch$", true,
-            fun(Filename, AccIn) ->
-                NormFilename = couch_util:normpath(Filename),
-                case NormFilename -- NormRoot of
-                [$/ | RelativeFilename] -> ok;
-                RelativeFilename -> ok
-                end,
-                case Fun(?l2b(filename:rootname(RelativeFilename, ".couch")), AccIn) of
-                {ok, NewAcc} -> NewAcc;
-                {stop, NewAcc} -> throw({stop, Fun, NewAcc})
-                end
-            end, Acc0)
-    catch throw:{stop, Fun, Acc1} ->
-         Acc1
-    end,
-    {ok, FinalAcc}.
-
-
-maybe_close_lru_db(#server{dbs_open=NumOpen, max_dbs_open=MaxOpen}=Server)
-        when NumOpen < MaxOpen ->
-    {ok, Server};
-maybe_close_lru_db(#server{dbs_open=NumOpen}=Server) ->
-    % must free up the lru db.
-    case try_close_lru(now()) of
-    ok ->
-        {ok, Server#server{dbs_open=NumOpen - 1}};
-    Error -> Error
-    end.
-
-try_close_lru(StartTime) ->
-    LruTime = get_lru(),
-    if LruTime > StartTime ->
-        % this means we've looped through all our opened dbs and found them
-        % all in use.
-        {error, all_dbs_active};
-    true ->
-        [{_, DbName}] = ets:lookup(couch_dbs_by_lru, LruTime),
-        [{_, {opened, MainPid, LruTime}}] = ets:lookup(couch_dbs_by_name, DbName),
-        case couch_db:is_idle(MainPid) of
-        true ->
-            ok = shutdown_idle_db(DbName, MainPid, LruTime);
-        false ->
-            % this still has referrers. Go ahead and give it a current lru time
-            % and try the next one in the table.
-            NewLruTime = now(),
-            true = ets:insert(couch_dbs_by_name, {DbName, {opened, MainPid, NewLruTime}}),
-            true = ets:insert(couch_dbs_by_pid, {MainPid, DbName}),
-            true = ets:delete(couch_dbs_by_lru, LruTime),
-            true = ets:insert(couch_dbs_by_lru, {NewLruTime, DbName}),
-            try_close_lru(StartTime)
-        end
-    end.
-
-get_lru() ->
-    get_lru(ets:first(couch_dbs_by_lru)).
-
-get_lru(LruTime) ->
-    [{LruTime, DbName}] = ets:lookup(couch_dbs_by_lru, LruTime),
-    case ets:member(couch_sys_dbs, DbName) of
-    false ->
-        LruTime;
-    true ->
-        [{_, {opened, MainPid, _}}] = ets:lookup(couch_dbs_by_name, DbName),
-        case couch_db:is_idle(MainPid) of
-        true ->
-            NextLru = ets:next(couch_dbs_by_lru, LruTime),
-            ok = shutdown_idle_db(DbName, MainPid, LruTime),
-            get_lru(NextLru);
-        false ->
-            get_lru(ets:next(couch_dbs_by_lru, LruTime))
-        end
-    end.
-
-shutdown_idle_db(DbName, MainPid, LruTime) ->
-    couch_util:shutdown_sync(MainPid),
-    true = ets:delete(couch_dbs_by_lru, LruTime),
-    true = ets:delete(couch_dbs_by_name, DbName),
-    true = ets:delete(couch_dbs_by_pid, MainPid),
-    true = ets:delete(couch_sys_dbs, DbName),
-    ok.
-
-open_async(Server, From, DbName, Filepath, Options) ->
-    Parent = self(),
-    Opener = spawn_link(fun() ->
-            Res = couch_db:start_link(DbName, Filepath, Options),
-            gen_server:call(
-                Parent, {open_result, DbName, Res, Options}, infinity
-            ),
-            unlink(Parent),
-            case Res of
-            {ok, DbReader} ->
-                unlink(DbReader);
-            _ ->
-                ok
-            end
-        end),
-    true = ets:insert(couch_dbs_by_name, {DbName, {opening, Opener, [From]}}),
-    true = ets:insert(couch_dbs_by_pid, {Opener, DbName}),
-    DbsOpen = case lists:member(sys_db, Options) of
-    true ->
-        true = ets:insert(couch_sys_dbs, {DbName, true}),
-        Server#server.dbs_open;
-    false ->
-        Server#server.dbs_open + 1
-    end,
-    Server#server{dbs_open = DbsOpen}.
-
-handle_call({set_max_dbs_open, Max}, _From, Server) ->
-    {reply, ok, Server#server{max_dbs_open=Max}};
-handle_call(get_server, _From, Server) ->
-    {reply, {ok, Server}, Server};
-handle_call({open_result, DbName, {ok, OpenedDbPid}, Options}, _From, Server) ->
-    link(OpenedDbPid),
-    [{DbName, {opening,Opener,Froms}}] = ets:lookup(couch_dbs_by_name, DbName),
-    lists:foreach(fun({FromPid,_}=From) ->
-        gen_server:reply(From,
-                catch couch_db:open_ref_counted(OpenedDbPid, FromPid))
-    end, Froms),
-    LruTime = now(),
-    true = ets:insert(couch_dbs_by_name,
-            {DbName, {opened, OpenedDbPid, LruTime}}),
-    true = ets:delete(couch_dbs_by_pid, Opener),
-    true = ets:insert(couch_dbs_by_pid, {OpenedDbPid, DbName}),
-    true = ets:insert(couch_dbs_by_lru, {LruTime, DbName}),
-    case lists:member(create, Options) of
-    true ->
-        couch_db_update_notifier:notify({created, DbName});
-    false ->
-        ok
-    end,
-    {reply, ok, Server};
-handle_call({open_result, DbName, {error, eexist}, Options}, From, Server) ->
-    handle_call({open_result, DbName, file_exists, Options}, From, Server);
-handle_call({open_result, DbName, Error, Options}, _From, Server) ->
-    [{DbName, {opening,Opener,Froms}}] = ets:lookup(couch_dbs_by_name, DbName),
-    lists:foreach(fun(From) ->
-        gen_server:reply(From, Error)
-    end, Froms),
-    true = ets:delete(couch_dbs_by_name, DbName),
-    true = ets:delete(couch_dbs_by_pid, Opener),
-    DbsOpen = case lists:member(sys_db, Options) of
-    true ->
-        true = ets:delete(couch_sys_dbs, DbName),
-        Server#server.dbs_open;
-    false ->
-        Server#server.dbs_open - 1
-    end,
-    {reply, ok, Server#server{dbs_open = DbsOpen}};
-handle_call({open, DbName, Options}, {FromPid,_}=From, Server) ->
-    LruTime = now(),
-    case ets:lookup(couch_dbs_by_name, DbName) of
-    [] ->
-        open_db(DbName, Server, Options, From);
-    [{_, {opening, Opener, Froms}}] ->
-        true = ets:insert(couch_dbs_by_name, {DbName, {opening, Opener, [From|Froms]}}),
-        {noreply, Server};
-    [{_, {opened, MainPid, PrevLruTime}}] ->
-        true = ets:insert(couch_dbs_by_name, {DbName, {opened, MainPid, LruTime}}),
-        true = ets:delete(couch_dbs_by_lru, PrevLruTime),
-        true = ets:insert(couch_dbs_by_lru, {LruTime, DbName}),
-        {reply, couch_db:open_ref_counted(MainPid, FromPid), Server}
-    end;
-handle_call({create, DbName, Options}, From, Server) ->
-    case ets:lookup(couch_dbs_by_name, DbName) of
-    [] ->
-        open_db(DbName, Server, [create | Options], From);
-    [_AlreadyRunningDb] ->
-        {reply, file_exists, Server}
-    end;
-handle_call({delete, DbName, _Options}, _From, Server) ->
-    DbNameList = binary_to_list(DbName),
-    case check_dbname(Server, DbNameList) of
-    ok ->
-        FullFilepath = get_full_filename(Server, DbNameList),
-        UpdateState =
-        case ets:lookup(couch_dbs_by_name, DbName) of
-        [] -> false;
-        [{_, {opening, Pid, Froms}}] ->
-            couch_util:shutdown_sync(Pid),
-            true = ets:delete(couch_dbs_by_name, DbName),
-            true = ets:delete(couch_dbs_by_pid, Pid),
-            [gen_server:reply(F, not_found) || F <- Froms],
-            true;
-        [{_, {opened, Pid, LruTime}}] ->
-            couch_util:shutdown_sync(Pid),
-            true = ets:delete(couch_dbs_by_name, DbName),
-            true = ets:delete(couch_dbs_by_pid, Pid),
-            true = ets:delete(couch_dbs_by_lru, LruTime),
-            true
-        end,
-        Server2 = case UpdateState of
-        true ->
-            DbsOpen = case ets:member(couch_sys_dbs, DbName) of
-            true ->
-                true = ets:delete(couch_sys_dbs, DbName),
-                Server#server.dbs_open;
-            false ->
-                Server#server.dbs_open - 1
-            end,
-            Server#server{dbs_open = DbsOpen};
-        false ->
-            Server
-        end,
-
-        %% Delete any leftover .compact files.  If we don't do this a subsequent
-        %% request for this DB will try to open the .compact file and use it.
-        couch_file:delete(Server#server.root_dir, FullFilepath ++ ".compact"),
-
-        case couch_file:delete(Server#server.root_dir, FullFilepath) of
-        ok ->
-            couch_db_update_notifier:notify({deleted, DbName}),
-            {reply, ok, Server2};
-        {error, enoent} ->
-            {reply, not_found, Server2};
-        Else ->
-            {reply, Else, Server2}
-        end;
-    Error ->
-        {reply, Error, Server}
-    end.
-
-handle_cast(Msg, _Server) ->
-    exit({unknown_cast_message, Msg}).
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-    
-handle_info({'EXIT', _Pid, config_change}, Server) ->
-    {noreply, shutdown, Server};
-handle_info({'EXIT', Pid, Reason}, Server) ->
-    Server2 = case ets:lookup(couch_dbs_by_pid, Pid) of
-    [{Pid, DbName}] ->
-
-        % If the Pid is known, the name should be as well.
-        % If not, that's an error, which is why there is no [] clause.
-        case ets:lookup(couch_dbs_by_name, DbName) of
-        [{_, {opening, Pid, Froms}}] ->
-            Msg = case Reason of
-            snappy_nif_not_loaded ->
-                io_lib:format(
-                    "To open the database `~s`, Apache CouchDB "
-                    "must be built with Erlang OTP R13B04 or higher.",
-                    [DbName]
-                );
-            true ->
-                io_lib:format("Error opening database ~p: ~p", [DbName, Reason])
-            end,
-            ?LOG_ERROR(Msg, []),
-            lists:foreach(
-              fun(F) -> gen_server:reply(F, {bad_otp_release, Msg}) end,
-              Froms
-            );
-        [{_, {opened, Pid, LruTime}}] ->
-            ?LOG_ERROR(
-                "Unexpected exit of database process ~p [~p]: ~p",
-                [Pid, DbName, Reason]
-            ),
-            true = ets:delete(couch_dbs_by_lru, LruTime)
-        end,
-
-        true = ets:delete(couch_dbs_by_pid, DbName),
-        true = ets:delete(couch_dbs_by_name, DbName),
-
-        case ets:lookup(couch_sys_dbs, DbName) of
-        [{DbName, _}] ->
-            true = ets:delete(couch_sys_dbs, DbName),
-            Server;
-        [] ->
-            Server#server{dbs_open = Server#server.dbs_open - 1}
-        end
-    end,
-    {noreply, Server2};
-handle_info(Error, _Server) ->
-    ?LOG_ERROR("Unexpected message, restarting couch_server: ~p", [Error]),
-    exit(kill).
-
-open_db(DbName, Server, Options, From) ->
-    DbNameList = binary_to_list(DbName),
-    case check_dbname(Server, DbNameList) of
-    ok ->
-        Filepath = get_full_filename(Server, DbNameList),
-        case lists:member(sys_db, Options) of
-        true ->
-            {noreply, open_async(Server, From, DbName, Filepath, Options)};
-        false ->
-            case maybe_close_lru_db(Server) of
-            {ok, Server2} ->
-                {noreply, open_async(Server2, From, DbName, Filepath, Options)};
-            CloseError ->
-                {reply, CloseError, Server}
-            end
-        end;
-     Error ->
-        {reply, Error, Server}
-     end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_server_sup.erl
----------------------------------------------------------------------
diff --git a/couch_server_sup.erl b/couch_server_sup.erl
deleted file mode 100644
index be3c3a3..0000000
--- a/couch_server_sup.erl
+++ /dev/null
@@ -1,164 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_server_sup).
--behaviour(supervisor).
-
-
--export([start_link/1,stop/0, couch_config_start_link_wrapper/2,
-        restart_core_server/0, config_change/2]).
-
--include("couch_db.hrl").
-
-%% supervisor callbacks
--export([init/1]).
-
-start_link(IniFiles) ->
-    case whereis(couch_server_sup) of
-    undefined ->
-        start_server(IniFiles);
-    _Else ->
-        {error, already_started}
-    end.
-
-restart_core_server() ->
-    init:restart().
-
-couch_config_start_link_wrapper(IniFiles, FirstConfigPid) ->
-    case is_process_alive(FirstConfigPid) of
-        true ->
-            link(FirstConfigPid),
-            {ok, FirstConfigPid};
-        false -> couch_config:start_link(IniFiles)
-    end.
-
-start_server(IniFiles) ->
-    case init:get_argument(pidfile) of
-    {ok, [PidFile]} ->
-        case file:write_file(PidFile, os:getpid()) of
-        ok -> ok;
-        {error, Reason} ->
-            io:format("Failed to write PID file ~s: ~s",
-                [PidFile, file:format_error(Reason)])
-        end;
-    _ -> ok
-    end,
-
-    {ok, ConfigPid} = couch_config:start_link(IniFiles),
-
-    LogLevel = couch_config:get("log", "level", "info"),
-    % announce startup
-    io:format("Apache CouchDB ~s (LogLevel=~s) is starting.~n", [
-        couch_server:get_version(),
-        LogLevel
-    ]),
-    case LogLevel of
-    "debug" ->
-        io:format("Configuration Settings ~p:~n", [IniFiles]),
-        [io:format("  [~s] ~s=~p~n", [Module, Variable, Value])
-            || {{Module, Variable}, Value} <- couch_config:all()];
-    _ -> ok
-    end,
-
-    BaseChildSpecs =
-    {{one_for_all, 10, 3600},
-        [{couch_config,
-            {couch_server_sup, couch_config_start_link_wrapper, [IniFiles, ConfigPid]},
-            permanent,
-            brutal_kill,
-            worker,
-            [couch_config]},
-        {couch_primary_services,
-            {couch_primary_sup, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_primary_sup]},
-        {couch_secondary_services,
-            {couch_secondary_sup, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_secondary_sup]}
-        ]},
-
-    % ensure these applications are running
-    application:start(ibrowse),
-    application:start(crypto),
-
-    {ok, Pid} = supervisor:start_link(
-        {local, couch_server_sup}, couch_server_sup, BaseChildSpecs),
-
-    % launch the icu bridge
-    % just restart if one of the config settings change.
-    couch_config:register(fun ?MODULE:config_change/2, Pid),
-
-    unlink(ConfigPid),
-
-    Ip = couch_config:get("httpd", "bind_address"),
-    io:format("Apache CouchDB has started. Time to relax.~n"),
-    Uris = [get_uri(Name, Ip) || Name <- [couch_httpd, https]],
-    [begin
-        case Uri of
-            undefined -> ok;
-            Uri -> ?LOG_INFO("Apache CouchDB has started on ~s", [Uri])
-        end
-    end
-    || Uri <- Uris],
-    case couch_config:get("couchdb", "uri_file", null) of 
-    null -> ok;
-    UriFile ->
-        Lines = [begin case Uri of
-            undefined -> [];
-            Uri -> io_lib:format("~s~n", [Uri])
-            end end || Uri <- Uris],
-        case file:write_file(UriFile, Lines) of
-        ok -> ok;
-        {error, Reason2} = Error ->
-            ?LOG_ERROR("Failed to write to URI file ~s: ~s",
-                [UriFile, file:format_error(Reason2)]),
-            throw(Error)
-        end
-    end,
-
-    {ok, Pid}.
-
-stop() ->
-    catch exit(whereis(couch_server_sup), normal).
-
-config_change("daemons", _) ->
-    supervisor:terminate_child(couch_server_sup, couch_secondary_services),
-    supervisor:restart_child(couch_server_sup, couch_secondary_services);
-config_change("couchdb", "util_driver_dir") ->
-    init:restart().
-
-init(ChildSpecs) ->
-    {ok, ChildSpecs}.
-
-get_uri(Name, Ip) ->
-    case get_port(Name) of
-        undefined ->
-            undefined;
-        Port ->
-            io_lib:format("~s://~s:~w/", [get_scheme(Name), Ip, Port])
-    end.
-
-get_scheme(couch_httpd) -> "http";
-get_scheme(https) -> "https".
-
-get_port(Name) ->
-    try
-        mochiweb_socket_server:get(Name, port)
-    catch
-        exit:{noproc, _}->
-            undefined
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_stats_aggregator.erl
----------------------------------------------------------------------
diff --git a/couch_stats_aggregator.erl b/couch_stats_aggregator.erl
deleted file mode 100644
index 6090355..0000000
--- a/couch_stats_aggregator.erl
+++ /dev/null
@@ -1,297 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stats_aggregator).
--behaviour(gen_server).
-
--export([start/0, start/1, stop/0]).
--export([all/0, all/1, get/1, get/2, get_json/1, get_json/2, collect_sample/0]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--record(aggregate, {
-    description = <<"">>,
-    seconds = 0,
-    count = 0,
-    current = null,
-    sum = null,
-    mean = null,
-    variance = null,
-    stddev = null,
-    min = null,
-    max = null,
-    samples = []
-}).
-
-
-start() ->
-    PrivDir = couch_util:priv_dir(),
-    start(filename:join(PrivDir, "stat_descriptions.cfg")).
-    
-start(FileName) ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [FileName], []).
-
-stop() ->
-    gen_server:cast(?MODULE, stop).
-
-all() ->
-    ?MODULE:all(0).
-all(Time) when is_binary(Time) ->
-    ?MODULE:all(list_to_integer(binary_to_list(Time)));
-all(Time) when is_atom(Time) ->
-    ?MODULE:all(list_to_integer(atom_to_list(Time)));
-all(Time) when is_integer(Time) ->
-    Aggs = ets:match(?MODULE, {{'$1', Time}, '$2'}),
-    Stats = lists:map(fun([Key, Agg]) -> {Key, Agg} end, Aggs),
-    case Stats of
-        [] ->
-            {[]};
-        _ ->
-            Ret = lists:foldl(fun({{Mod, Key}, Agg}, Acc) ->
-                CurrKeys = case proplists:lookup(Mod, Acc) of
-                    none -> [];
-                    {Mod, {Keys}} -> Keys
-                end,
-                NewMod = {[{Key, to_json_term(Agg)} | CurrKeys]},
-                [{Mod, NewMod} | proplists:delete(Mod, Acc)]
-            end, [], Stats),
-            {Ret}
-    end.
-
-get(Key) ->
-    ?MODULE:get(Key, 0).
-get(Key, Time) when is_binary(Time) ->
-    ?MODULE:get(Key, list_to_integer(binary_to_list(Time)));
-get(Key, Time) when is_atom(Time) ->
-    ?MODULE:get(Key, list_to_integer(atom_to_list(Time)));
-get(Key, Time) when is_integer(Time) ->
-    case ets:lookup(?MODULE, {make_key(Key), Time}) of
-        [] -> #aggregate{seconds=Time};
-        [{_, Agg}] -> Agg
-    end.
-
-get_json(Key) ->
-    get_json(Key, 0).
-get_json(Key, Time) ->
-    to_json_term(?MODULE:get(Key, Time)).
-
-collect_sample() ->
-    gen_server:call(?MODULE, collect_sample, infinity).
-
-
-init(StatDescsFileName) ->
-    % Create an aggregate entry for each {description, rate} pair.
-    ets:new(?MODULE, [named_table, set, protected]),
-    SampleStr = couch_config:get("stats", "samples", "[0]"),
-    {ok, Samples} = couch_util:parse_term(SampleStr),
-    {ok, Descs} = file:consult(StatDescsFileName),
-    lists:foreach(fun({Sect, Key, Value}) ->
-        lists:foreach(fun(Secs) ->
-            Agg = #aggregate{
-                description=list_to_binary(Value),
-                seconds=Secs
-            },
-            ets:insert(?MODULE, {{{Sect, Key}, Secs}, Agg})
-        end, Samples)
-    end, Descs),
-    
-    Self = self(),
-    ok = couch_config:register(
-        fun("stats", _) -> exit(Self, config_change) end
-    ),
-    
-    Rate = list_to_integer(couch_config:get("stats", "rate", "1000")),
-    % TODO: Add timer_start to kernel start options.
-    {ok, TRef} = timer:apply_after(Rate, ?MODULE, collect_sample, []),
-    {ok, {TRef, Rate}}.
-    
-terminate(_Reason, {TRef, _Rate}) ->
-    timer:cancel(TRef),
-    ok.
-
-handle_call(collect_sample, _, {OldTRef, SampleInterval}) ->
-    timer:cancel(OldTRef),
-    {ok, TRef} = timer:apply_after(SampleInterval, ?MODULE, collect_sample, []),
-    % Gather new stats values to add.
-    Incs = lists:map(fun({Key, Value}) ->
-        {Key, {incremental, Value}}
-    end, couch_stats_collector:all(incremental)),
-    Abs = lists:map(fun({Key, Values}) ->
-        couch_stats_collector:clear(Key),
-        Values2 = case Values of
-            X when is_list(X) -> X;
-            Else -> [Else]
-        end,
-        {_, Mean} = lists:foldl(fun(Val, {Count, Curr}) ->
-            {Count+1, Curr + (Val - Curr) / (Count+1)}
-        end, {0, 0}, Values2),
-        {Key, {absolute, Mean}}
-    end, couch_stats_collector:all(absolute)),
-    
-    Values = Incs ++ Abs,
-    Now = erlang:now(),
-    lists:foreach(fun({{Key, Rate}, Agg}) ->
-        NewAgg = case proplists:lookup(Key, Values) of
-            none ->
-                rem_values(Now, Agg);
-            {Key, {Type, Value}} ->
-                NewValue = new_value(Type, Value, Agg#aggregate.current),
-                Agg2 = add_value(Now, NewValue, Agg),
-                rem_values(Now, Agg2)
-        end,
-        ets:insert(?MODULE, {{Key, Rate}, NewAgg})
-    end, ets:tab2list(?MODULE)),
-    {reply, ok, {TRef, SampleInterval}}.
-
-handle_cast(stop, State) ->
-    {stop, normal, State}.
-
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-code_change(_OldVersion, State, _Extra) ->
-    {ok, State}.
-
-
-new_value(incremental, Value, null) ->
-    Value;
-new_value(incremental, Value, Current) ->
-    Value - Current;
-new_value(absolute, Value, _Current) ->
-    Value.
-
-add_value(Time, Value, #aggregate{count=Count, seconds=Secs}=Agg) when Count < 1 ->
-    Samples = case Secs of
-        0 -> [];
-        _ -> [{Time, Value}]
-    end,
-    Agg#aggregate{
-        count=1,
-        current=Value,
-        sum=Value,
-        mean=Value,
-        variance=0.0,
-        stddev=null,
-        min=Value,
-        max=Value,
-        samples=Samples
-    };
-add_value(Time, Value, Agg) ->
-    #aggregate{
-        count=Count,
-        current=Current,
-        sum=Sum,
-        mean=Mean,
-        variance=Variance,
-        samples=Samples
-    } = Agg,
-    
-    NewCount = Count + 1,
-    NewMean = Mean + (Value - Mean) / NewCount,
-    NewVariance = Variance + (Value - Mean) * (Value - NewMean),
-    StdDev = case NewCount > 1 of
-        false -> null;
-        _ -> math:sqrt(NewVariance / (NewCount - 1))
-    end,
-    Agg2 = Agg#aggregate{
-        count=NewCount,
-        current=Current + Value,
-        sum=Sum + Value,
-        mean=NewMean,
-        variance=NewVariance,
-        stddev=StdDev,
-        min=lists:min([Agg#aggregate.min, Value]),
-        max=lists:max([Agg#aggregate.max, Value])
-    },
-    case Agg2#aggregate.seconds of
-        0 -> Agg2;
-        _ -> Agg2#aggregate{samples=[{Time, Value} | Samples]}
-    end.
-
-rem_values(Time, Agg) ->
-    Seconds = Agg#aggregate.seconds,
-    Samples = Agg#aggregate.samples,
-    Pred = fun({When, _Value}) ->
-        timer:now_diff(Time, When) =< (Seconds * 1000000)
-    end,
-    {Keep, Remove} = lists:splitwith(Pred, Samples),
-    Agg2 = lists:foldl(fun({_, Value}, Acc) ->
-        rem_value(Value, Acc)
-    end, Agg, Remove),
-    Agg2#aggregate{samples=Keep}.
-
-rem_value(_Value, #aggregate{count=Count, seconds=Secs}) when Count =< 1 ->
-    #aggregate{seconds=Secs};
-rem_value(Value, Agg) ->
-    #aggregate{
-        count=Count,
-        sum=Sum,
-        mean=Mean,
-        variance=Variance
-    } = Agg,
-
-    OldMean = (Mean * Count - Value) / (Count - 1),
-    OldVariance = Variance - (Value - OldMean) * (Value - Mean),
-    OldCount = Count - 1,
-    StdDev = case OldCount > 1 of
-        false -> null;
-        _ -> math:sqrt(clamp_value(OldVariance / (OldCount - 1)))
-    end,
-    Agg#aggregate{
-        count=OldCount,
-        sum=Sum-Value,
-        mean=clamp_value(OldMean),
-        variance=clamp_value(OldVariance),
-        stddev=StdDev
-    }.
-
-to_json_term(Agg) ->
-    {Min, Max} = case Agg#aggregate.seconds > 0 of
-        false ->
-            {Agg#aggregate.min, Agg#aggregate.max};
-        _ ->
-            case length(Agg#aggregate.samples) > 0 of
-                true ->
-                    Extract = fun({_Time, Value}) -> Value end,
-                    Samples = lists:map(Extract, Agg#aggregate.samples),
-                    {lists:min(Samples), lists:max(Samples)};
-                _ ->
-                    {null, null}
-            end
-    end,
-    {[
-        {description, Agg#aggregate.description},
-        {current, round_value(Agg#aggregate.sum)},
-        {sum, round_value(Agg#aggregate.sum)},
-        {mean, round_value(Agg#aggregate.mean)},
-        {stddev, round_value(Agg#aggregate.stddev)},
-        {min, Min},
-        {max, Max}
-    ]}.
-
-make_key({Mod, Val}) when is_integer(Val) ->
-    {Mod, list_to_atom(integer_to_list(Val))};
-make_key(Key) ->
-    Key.
-
-round_value(Val) when not is_number(Val) ->
-    Val;
-round_value(Val) when Val == 0 ->
-    Val;
-round_value(Val) ->
-    erlang:round(Val * 1000.0) / 1000.0.
-
-clamp_value(Val) when Val > 0.00000000000001 ->
-    Val;
-clamp_value(_) ->
-    0.0.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_stats_collector.erl
----------------------------------------------------------------------
diff --git a/couch_stats_collector.erl b/couch_stats_collector.erl
deleted file mode 100644
index f7b9bb4..0000000
--- a/couch_stats_collector.erl
+++ /dev/null
@@ -1,136 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% todo
-% - remove existance check on increment(), decrement() and record(). have
-%   modules initialize counters on startup.
-
--module(couch_stats_collector).
-
--behaviour(gen_server).
-
--export([start/0, stop/0]).
--export([all/0, all/1, get/1, increment/1, decrement/1, record/2, clear/1]).
--export([track_process_count/1, track_process_count/2]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--define(HIT_TABLE, stats_hit_table).
--define(ABS_TABLE, stats_abs_table).
-
-start() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-stop() ->
-    gen_server:call(?MODULE, stop).
-
-all() ->
-    ets:tab2list(?HIT_TABLE) ++ abs_to_list().
-
-all(Type) ->
-    case Type of
-        incremental -> ets:tab2list(?HIT_TABLE);
-        absolute -> abs_to_list()
-    end.
-
-get(Key) ->
-    case ets:lookup(?HIT_TABLE, Key) of
-        [] ->
-            case ets:lookup(?ABS_TABLE, Key) of
-                [] ->
-                    nil;
-                AbsVals ->
-                    lists:map(fun({_, Value}) -> Value end, AbsVals)
-            end;
-        [{_, Counter}] ->
-            Counter
-    end.
-
-increment(Key) ->
-    Key2 = make_key(Key),
-    case catch ets:update_counter(?HIT_TABLE, Key2, 1) of
-        {'EXIT', {badarg, _}} ->
-            catch ets:insert(?HIT_TABLE, {Key2, 1}),
-            ok;
-        _ ->
-            ok
-    end.
-
-decrement(Key) ->
-    Key2 = make_key(Key),
-    case catch ets:update_counter(?HIT_TABLE, Key2, -1) of
-        {'EXIT', {badarg, _}} ->
-            catch ets:insert(?HIT_TABLE, {Key2, -1}),
-            ok;
-        _ -> ok
-    end.
-
-record(Key, Value) ->
-    catch ets:insert(?ABS_TABLE, {make_key(Key), Value}).
-
-clear(Key) ->
-    catch ets:delete(?ABS_TABLE, make_key(Key)).
-
-track_process_count(Stat) ->
-    track_process_count(self(), Stat).
-
-track_process_count(Pid, Stat) ->
-    MonitorFun = fun() ->
-        Ref = erlang:monitor(process, Pid),
-        receive {'DOWN', Ref, _, _, _} -> ok end,
-        couch_stats_collector:decrement(Stat)
-    end,
-    case (catch couch_stats_collector:increment(Stat)) of
-        ok -> spawn(MonitorFun);
-        _ -> ok
-    end.
-
-
-init(_) ->
-    ets:new(?HIT_TABLE, [named_table, set, public]),
-    ets:new(?ABS_TABLE, [named_table, duplicate_bag, public]),
-    {ok, nil}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-handle_call(stop, _, State) ->
-    {stop, normal, stopped, State}.
-
-handle_cast(foo, State) ->
-    {noreply, State}.
-
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-code_change(_OldVersion, State, _Extra) ->
-    {ok, State}.
-
-
-make_key({Module, Key}) when is_integer(Key) ->
-    {Module, list_to_atom(integer_to_list(Key))};
-make_key(Key) ->
-    Key.
-
-abs_to_list() ->
-    SortedKVs = lists:sort(ets:tab2list(?ABS_TABLE)),
-    lists:foldl(fun({Key, Val}, Acc) ->
-        case Acc of
-            [] ->
-                [{Key, [Val]}];
-            [{Key, Prev} | Rest] ->
-                [{Key, [Val | Prev]} | Rest];
-            Others ->
-                [{Key, [Val]} | Others]
-        end
-    end, [], SortedKVs).
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_stream.erl
----------------------------------------------------------------------
diff --git a/couch_stream.erl b/couch_stream.erl
deleted file mode 100644
index 959feef..0000000
--- a/couch_stream.erl
+++ /dev/null
@@ -1,299 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_stream).
--behaviour(gen_server).
-
-% public API
--export([open/1, open/2, close/1]).
--export([foldl/4, foldl/5, foldl_decode/6, range_foldl/6]).
--export([copy_to_new_stream/3, write/2]).
-
-% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_cast/2, handle_call/3, handle_info/2]).
-
--include("couch_db.hrl").
-
--define(DEFAULT_BUFFER_SIZE, 4096).
-
--record(stream,
-    {fd = 0,
-    written_pointers=[],
-    buffer_list = [],
-    buffer_len = 0,
-    max_buffer,
-    written_len = 0,
-    md5,
-    % md5 of the content without any transformation applied (e.g. compression)
-    % needed for the attachment upload integrity check (ticket 558)
-    identity_md5,
-    identity_len = 0,
-    encoding_fun,
-    end_encoding_fun
-    }).
-
-
-%%% Interface functions %%%
-
-open(Fd) ->
-    open(Fd, []).
-
-open(Fd, Options) ->
-    gen_server:start_link(couch_stream, {Fd, Options}, []).
-
-close(Pid) ->
-    gen_server:call(Pid, close, infinity).
-
-copy_to_new_stream(Fd, PosList, DestFd) ->
-    {ok, Dest} = open(DestFd),
-    foldl(Fd, PosList,
-        fun(Bin, _) ->
-            ok = write(Dest, Bin)
-        end, ok),
-    close(Dest).
-
-foldl(_Fd, [], _Fun, Acc) ->
-    Acc;
-foldl(Fd, [Pos|Rest], Fun, Acc) ->
-    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
-    foldl(Fd, Rest, Fun, Fun(Bin, Acc)).
-
-foldl(Fd, PosList, <<>>, Fun, Acc) ->
-    foldl(Fd, PosList, Fun, Acc);
-foldl(Fd, PosList, Md5, Fun, Acc) ->
-    foldl(Fd, PosList, Md5, couch_util:md5_init(), Fun, Acc).
-
-foldl_decode(Fd, PosList, Md5, Enc, Fun, Acc) ->
-    {DecDataFun, DecEndFun} = case Enc of
-    gzip ->
-        ungzip_init();
-    identity ->
-        identity_enc_dec_funs()
-    end,
-    Result = foldl_decode(
-        DecDataFun, Fd, PosList, Md5, couch_util:md5_init(), Fun, Acc
-    ),
-    DecEndFun(),
-    Result.
-
-foldl(_Fd, [], Md5, Md5Acc, _Fun, Acc) ->
-    Md5 = couch_util:md5_final(Md5Acc),
-    Acc;
-foldl(Fd, [{Pos, _Size}], Md5, Md5Acc, Fun, Acc) -> % 0110 UPGRADE CODE
-    foldl(Fd, [Pos], Md5, Md5Acc, Fun, Acc);
-foldl(Fd, [Pos], Md5, Md5Acc, Fun, Acc) ->
-    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
-    Md5 = couch_util:md5_final(couch_util:md5_update(Md5Acc, Bin)),
-    Fun(Bin, Acc);
-foldl(Fd, [{Pos, _Size}|Rest], Md5, Md5Acc, Fun, Acc) ->
-    foldl(Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc);
-foldl(Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc) ->
-    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
-    foldl(Fd, Rest, Md5, couch_util:md5_update(Md5Acc, Bin), Fun, Fun(Bin, Acc)).
-
-range_foldl(Fd, PosList, From, To, Fun, Acc) ->
-    range_foldl(Fd, PosList, From, To, 0, Fun, Acc).
-
-range_foldl(_Fd, _PosList, _From, To, Off, _Fun, Acc) when Off >= To ->
-    Acc;
-range_foldl(Fd, [Pos|Rest], From, To, Off, Fun, Acc) when is_integer(Pos) -> % old-style attachment
-    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
-    range_foldl(Fd, [{Pos, iolist_size(Bin)}] ++ Rest, From, To, Off, Fun, Acc);
-range_foldl(Fd, [{_Pos, Size}|Rest], From, To, Off, Fun, Acc) when From > Off + Size ->
-    range_foldl(Fd, Rest, From, To, Off + Size, Fun, Acc);
-range_foldl(Fd, [{Pos, Size}|Rest], From, To, Off, Fun, Acc) ->
-    {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
-    Bin1 = if
-        From =< Off andalso To >= Off + Size -> Bin; %% the whole block is covered
-        true ->
-            PrefixLen = clip(From - Off, 0, Size),
-            PostfixLen = clip(Off + Size - To, 0, Size),
-            MatchLen = Size - PrefixLen - PostfixLen,
-            <<_Prefix:PrefixLen/binary,Match:MatchLen/binary,_Postfix:PostfixLen/binary>> = iolist_to_binary(Bin),
-            Match
-    end,
-    range_foldl(Fd, Rest, From, To, Off + Size, Fun, Fun(Bin1, Acc)).
-
-clip(Value, Lo, Hi) ->
-    if
-        Value < Lo -> Lo;
-        Value > Hi -> Hi;
-        true -> Value
-    end.
-
-foldl_decode(_DecFun, _Fd, [], Md5, Md5Acc, _Fun, Acc) ->
-    Md5 = couch_util:md5_final(Md5Acc),
-    Acc;
-foldl_decode(DecFun, Fd, [{Pos, _Size}], Md5, Md5Acc, Fun, Acc) ->
-    foldl_decode(DecFun, Fd, [Pos], Md5, Md5Acc, Fun, Acc);
-foldl_decode(DecFun, Fd, [Pos], Md5, Md5Acc, Fun, Acc) ->
-    {ok, EncBin} = couch_file:pread_iolist(Fd, Pos),
-    Md5 = couch_util:md5_final(couch_util:md5_update(Md5Acc, EncBin)),
-    Bin = DecFun(EncBin),
-    Fun(Bin, Acc);
-foldl_decode(DecFun, Fd, [{Pos, _Size}|Rest], Md5, Md5Acc, Fun, Acc) ->
-    foldl_decode(DecFun, Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc);
-foldl_decode(DecFun, Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc) ->
-    {ok, EncBin} = couch_file:pread_iolist(Fd, Pos),
-    Bin = DecFun(EncBin),
-    Md5Acc2 = couch_util:md5_update(Md5Acc, EncBin),
-    foldl_decode(DecFun, Fd, Rest, Md5, Md5Acc2, Fun, Fun(Bin, Acc)).
-
-gzip_init(Options) ->
-    case couch_util:get_value(compression_level, Options, 0) of
-    Lvl when Lvl >= 1 andalso Lvl =< 9 ->
-        Z = zlib:open(),
-        % 15 = ?MAX_WBITS (defined in the zlib module)
-        % the 16 + ?MAX_WBITS formula was obtained by inspecting zlib:gzip/1
-        ok = zlib:deflateInit(Z, Lvl, deflated, 16 + 15, 8, default),
-        {
-            fun(Data) ->
-                zlib:deflate(Z, Data)
-            end,
-            fun() ->
-                Last = zlib:deflate(Z, [], finish),
-                ok = zlib:deflateEnd(Z),
-                ok = zlib:close(Z),
-                Last
-            end
-        };
-    _ ->
-        identity_enc_dec_funs()
-    end.
-
-ungzip_init() ->
-    Z = zlib:open(),
-    zlib:inflateInit(Z, 16 + 15),
-    {
-        fun(Data) ->
-            zlib:inflate(Z, Data)
-        end,
-        fun() ->
-            ok = zlib:inflateEnd(Z),
-            ok = zlib:close(Z)
-        end
-    }.
-
-identity_enc_dec_funs() ->
-    {
-        fun(Data) -> Data end,
-        fun() -> [] end
-    }.
-
-write(_Pid, <<>>) ->
-    ok;
-write(Pid, Bin) ->
-    gen_server:call(Pid, {write, Bin}, infinity).
-
-
-init({Fd, Options}) ->
-    {EncodingFun, EndEncodingFun} =
-    case couch_util:get_value(encoding, Options, identity) of
-    identity ->
-        identity_enc_dec_funs();
-    gzip ->
-        gzip_init(Options)
-    end,
-    {ok, #stream{
-            fd=Fd,
-            md5=couch_util:md5_init(),
-            identity_md5=couch_util:md5_init(),
-            encoding_fun=EncodingFun,
-            end_encoding_fun=EndEncodingFun,
-            max_buffer=couch_util:get_value(
-                buffer_size, Options, ?DEFAULT_BUFFER_SIZE)
-        }
-    }.
-
-terminate(_Reason, _Stream) ->
-    ok.
-
-handle_call({write, Bin}, _From, Stream) ->
-    BinSize = iolist_size(Bin),
-    #stream{
-        fd = Fd,
-        written_len = WrittenLen,
-        written_pointers = Written,
-        buffer_len = BufferLen,
-        buffer_list = Buffer,
-        max_buffer = Max,
-        md5 = Md5,
-        identity_md5 = IdenMd5,
-        identity_len = IdenLen,
-        encoding_fun = EncodingFun} = Stream,
-    if BinSize + BufferLen > Max ->
-        WriteBin = lists:reverse(Buffer, [Bin]),
-        IdenMd5_2 = couch_util:md5_update(IdenMd5, WriteBin),
-        case EncodingFun(WriteBin) of
-        [] ->
-            % case where the encoder did some internal buffering
-            % (zlib does it for example)
-            WrittenLen2 = WrittenLen,
-            Md5_2 = Md5,
-            Written2 = Written;
-        WriteBin2 ->
-            {ok, Pos, _} = couch_file:append_binary(Fd, WriteBin2),
-            WrittenLen2 = WrittenLen + iolist_size(WriteBin2),
-            Md5_2 = couch_util:md5_update(Md5, WriteBin2),
-            Written2 = [{Pos, iolist_size(WriteBin2)}|Written]
-        end,
-
-        {reply, ok, Stream#stream{
-                        written_len=WrittenLen2,
-                        written_pointers=Written2,
-                        buffer_list=[],
-                        buffer_len=0,
-                        md5=Md5_2,
-                        identity_md5=IdenMd5_2,
-                        identity_len=IdenLen + BinSize}};
-    true ->
-        {reply, ok, Stream#stream{
-                        buffer_list=[Bin|Buffer],
-                        buffer_len=BufferLen + BinSize,
-                        identity_len=IdenLen + BinSize}}
-    end;
-handle_call(close, _From, Stream) ->
-    #stream{
-        fd = Fd,
-        written_len = WrittenLen,
-        written_pointers = Written,
-        buffer_list = Buffer,
-        md5 = Md5,
-        identity_md5 = IdenMd5,
-        identity_len = IdenLen,
-        encoding_fun = EncodingFun,
-        end_encoding_fun = EndEncodingFun} = Stream,
-
-    WriteBin = lists:reverse(Buffer),
-    IdenMd5Final = couch_util:md5_final(couch_util:md5_update(IdenMd5, WriteBin)),
-    WriteBin2 = EncodingFun(WriteBin) ++ EndEncodingFun(),
-    Md5Final = couch_util:md5_final(couch_util:md5_update(Md5, WriteBin2)),
-    Result = case WriteBin2 of
-    [] ->
-        {lists:reverse(Written), WrittenLen, IdenLen, Md5Final, IdenMd5Final};
-    _ ->
-        {ok, Pos, _} = couch_file:append_binary(Fd, WriteBin2),
-        StreamInfo = lists:reverse(Written, [{Pos, iolist_size(WriteBin2)}]),
-        StreamLen = WrittenLen + iolist_size(WriteBin2),
-        {StreamInfo, StreamLen, IdenLen, Md5Final, IdenMd5Final}
-    end,
-    {stop, normal, Result, Stream}.
-
-handle_cast(_Msg, State) ->
-    {noreply,State}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-handle_info(_Info, State) ->
-    {noreply, State}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_task_status.erl
----------------------------------------------------------------------
diff --git a/couch_task_status.erl b/couch_task_status.erl
deleted file mode 100644
index e23b560..0000000
--- a/couch_task_status.erl
+++ /dev/null
@@ -1,151 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_task_status).
--behaviour(gen_server).
-
-% This module is used to track the status of long running tasks.
-% Long running tasks register themselves, via a call to add_task/1, and then
-% update their status properties via update/1. The status of a task is a
-% list of properties. Each property is a tuple, with the first element being
-% either an atom or a binary and the second element must be an EJSON value. When
-% a task updates its status, it can override some or all of its properties.
-% The properties {started_on, UnitTimestamp}, {updated_on, UnixTimestamp} and
-% {pid, ErlangPid} are automatically added by this module.
-% When a tracked task dies, its status will be automatically removed from
-% memory. To get the tasks list, call the all/0 function.
-
--export([start_link/0, stop/0]).
--export([all/0, add_task/1, update/1, get/1, set_update_frequency/1]).
--export([is_task_added/0]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--include("couch_db.hrl").
-
--define(set(L, K, V), lists:keystore(K, 1, L, {K, V})).
-
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-stop() ->
-    gen_server:cast(?MODULE, stop).
-
-
-all() ->
-    gen_server:call(?MODULE, all).
-
-
-add_task(Props) ->
-    put(task_status_update, {{0, 0, 0}, 0}),
-    Ts = timestamp(),
-    TaskProps = lists:ukeysort(
-        1, [{started_on, Ts}, {updated_on, Ts} | Props]),
-    put(task_status_props, TaskProps),
-    gen_server:call(?MODULE, {add_task, TaskProps}).
-
-
-is_task_added() ->
-    is_list(erlang:get(task_status_props)).
-
-
-set_update_frequency(Msecs) ->
-    put(task_status_update, {{0, 0, 0}, Msecs * 1000}).
-
-
-update(Props) ->
-    MergeProps = lists:ukeysort(1, Props),
-    TaskProps = lists:ukeymerge(1, MergeProps, erlang:get(task_status_props)),
-    put(task_status_props, TaskProps),
-    maybe_persist(TaskProps).
-
-
-get(Props) when is_list(Props) ->
-    TaskProps = erlang:get(task_status_props),
-    [couch_util:get_value(P, TaskProps) || P <- Props];
-get(Prop) ->
-    TaskProps = erlang:get(task_status_props),
-    couch_util:get_value(Prop, TaskProps).
-
-
-maybe_persist(TaskProps0) ->
-    {LastUpdateTime, Frequency} = erlang:get(task_status_update),
-    case timer:now_diff(Now = now(), LastUpdateTime) >= Frequency of
-    true ->
-        put(task_status_update, {Now, Frequency}),
-        TaskProps = ?set(TaskProps0, updated_on, timestamp(Now)),
-        gen_server:cast(?MODULE, {update_status, self(), TaskProps});
-    false ->
-        ok
-    end.
-
-
-init([]) ->
-    % read configuration settings and register for configuration changes
-    ets:new(?MODULE, [ordered_set, protected, named_table]),
-    {ok, nil}.
-
-
-terminate(_Reason,_State) ->
-    ok.
-
-
-handle_call({add_task, TaskProps}, {From, _}, Server) ->
-    case ets:lookup(?MODULE, From) of
-    [] ->
-        true = ets:insert(?MODULE, {From, TaskProps}),
-        erlang:monitor(process, From),
-        {reply, ok, Server};
-    [_] ->
-        {reply, {add_task_error, already_registered}, Server}
-    end;
-handle_call(all, _, Server) ->
-    All = [
-        [{pid, ?l2b(pid_to_list(Pid))} | TaskProps]
-        ||
-        {Pid, TaskProps} <- ets:tab2list(?MODULE)
-    ],
-    {reply, All, Server}.
-
-
-handle_cast({update_status, Pid, NewProps}, Server) ->
-    case ets:lookup(?MODULE, Pid) of
-    [{Pid, _CurProps}] ->
-        ?LOG_DEBUG("New task status for ~p: ~p", [Pid, NewProps]),
-        true = ets:insert(?MODULE, {Pid, NewProps});
-    _ ->
-        % Task finished/died in the meanwhile and we must have received
-        % a monitor message before this call - ignore.
-        ok
-    end,
-    {noreply, Server};
-handle_cast(stop, State) ->
-    {stop, normal, State}.
-
-handle_info({'DOWN', _MonitorRef, _Type, Pid, _Info}, Server) ->
-    %% should we also erlang:demonitor(_MonitorRef), ?
-    ets:delete(?MODULE, Pid),
-    {noreply, Server}.
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-timestamp() ->
-    timestamp(now()).
-
-timestamp({Mega, Secs, _}) ->
-    Mega * 1000000 + Secs.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_users_db.erl
----------------------------------------------------------------------
diff --git a/couch_users_db.erl b/couch_users_db.erl
deleted file mode 100644
index 9b875ba..0000000
--- a/couch_users_db.erl
+++ /dev/null
@@ -1,121 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_users_db).
-
--export([before_doc_update/2, after_doc_read/2, strip_non_public_fields/1]).
-
--include("couch_db.hrl").
-
--define(NAME, <<"name">>).
--define(PASSWORD, <<"password">>).
--define(DERIVED_KEY, <<"derived_key">>).
--define(PASSWORD_SCHEME, <<"password_scheme">>).
--define(PBKDF2, <<"pbkdf2">>).
--define(ITERATIONS, <<"iterations">>).
--define(SALT, <<"salt">>).
--define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
-
-% If the request's userCtx identifies an admin
-%   -> save_doc (see below)
-%
-% If the request's userCtx.name is null:
-%   -> save_doc
-%   // this is an anonymous user registering a new document
-%   // in case a user doc with the same id already exists, the anonymous
-%   // user will get a regular doc update conflict.
-% If the request's userCtx.name doesn't match the doc's name
-%   -> 404 // Not Found
-% Else
-%   -> save_doc
-before_doc_update(Doc, #db{user_ctx = UserCtx} = Db) ->
-    #user_ctx{name=Name} = UserCtx,
-    DocName = get_doc_name(Doc),
-    case (catch couch_db:check_is_admin(Db)) of
-    ok ->
-        save_doc(Doc);
-    _ when Name =:= DocName orelse Name =:= null ->
-        save_doc(Doc);
-    _ ->
-        throw(not_found)
-    end.
-
-% If newDoc.password == null || newDoc.password == undefined:
-%   ->
-%   noop
-% Else -> // calculate password hash server side
-%    newDoc.password_sha = hash_pw(newDoc.password + salt)
-%    newDoc.salt = salt
-%    newDoc.password = null
-save_doc(#doc{body={Body}} = Doc) ->
-    case couch_util:get_value(?PASSWORD, Body) of
-    null -> % server admins don't have a user-db password entry
-        Doc;
-    undefined ->
-        Doc;
-    ClearPassword ->
-        Iterations = list_to_integer(couch_config:get("couch_httpd_auth", "iterations", "1000")),
-        Salt = couch_uuids:random(),
-        DerivedKey = couch_passwords:pbkdf2(ClearPassword, Salt, Iterations),
-        Body0 = [{?PASSWORD_SCHEME, ?PBKDF2}, {?ITERATIONS, Iterations}|Body],
-        Body1 = ?replace(Body0, ?DERIVED_KEY, DerivedKey),
-        Body2 = ?replace(Body1, ?SALT, Salt),
-        Body3 = proplists:delete(?PASSWORD, Body2),
-        Doc#doc{body={Body3}}
-    end.
-
-% If the doc is a design doc
-%   If the request's userCtx identifies an admin
-%     -> return doc
-%   Else
-%     -> 403 // Forbidden
-% If the request's userCtx identifies an admin
-%   -> return doc
-% If the request's userCtx.name doesn't match the doc's name
-%   -> 404 // Not Found
-% Else
-%   -> return doc
-after_doc_read(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, Db) ->
-    case (catch couch_db:check_is_admin(Db)) of
-    ok ->
-        Doc;
-    _ ->
-        throw({forbidden,
-        <<"Only administrators can view design docs in the users database.">>})
-    end;
-after_doc_read(Doc, #db{user_ctx = UserCtx} = Db) ->
-    #user_ctx{name=Name} = UserCtx,
-    DocName = get_doc_name(Doc),
-    case (catch couch_db:check_is_admin(Db)) of
-    ok ->
-        Doc;
-    _ when Name =:= DocName ->
-        Doc;
-    _ ->
-        Doc1 = strip_non_public_fields(Doc),
-        case Doc1 of
-          #doc{body={[]}} ->
-              throw(not_found);
-          _ ->
-              Doc1
-        end
-    end.
-
-get_doc_name(#doc{id= <<"org.couchdb.user:", Name/binary>>}) ->
-    Name;
-get_doc_name(_) ->
-    undefined.
-
-strip_non_public_fields(#doc{body={Props}}=Doc) ->
-    Public = re:split(couch_config:get("couch_httpd_auth", "public_fields", ""),
-                      "\\s*,\\s*", [{return, binary}]),
-    Doc#doc{body={[{K, V} || {K, V} <- Props, lists:member(K, Public)]}}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_util.erl
----------------------------------------------------------------------
diff --git a/couch_util.erl b/couch_util.erl
deleted file mode 100644
index afe3528..0000000
--- a/couch_util.erl
+++ /dev/null
@@ -1,489 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_util).
-
--export([priv_dir/0, normpath/1]).
--export([should_flush/0, should_flush/1, to_existing_atom/1]).
--export([rand32/0, implode/2, collate/2, collate/3]).
--export([abs_pathname/1,abs_pathname/2, trim/1]).
--export([encodeBase64Url/1, decodeBase64Url/1]).
--export([validate_utf8/1, to_hex/1, parse_term/1, dict_find/3]).
--export([get_nested_json_value/2, json_user_ctx/1]).
--export([proplist_apply_field/2, json_apply_field/2]).
--export([to_binary/1, to_integer/1, to_list/1, url_encode/1]).
--export([verify/2,simple_call/2,shutdown_sync/1]).
--export([get_value/2, get_value/3]).
--export([md5/1, md5_init/0, md5_update/2, md5_final/1]).
--export([reorder_results/2]).
--export([url_strip_password/1]).
--export([encode_doc_id/1]).
--export([with_db/2]).
--export([rfc1123_date/0, rfc1123_date/1]).
-
--include("couch_db.hrl").
-
-% arbitrarily chosen amount of memory to use before flushing to disk
--define(FLUSH_MAX_MEM, 10000000).
-
-priv_dir() ->
-    case code:priv_dir(couch) of
-        {error, bad_name} ->
-            % small hack, in dev mode "app" is couchdb. Fixing requires
-            % renaming src/couch to src/couch. Not really worth the hassle.
-            % -Damien
-            code:priv_dir(couchdb);
-        Dir -> Dir
-    end.
-
-% Normalize a pathname by removing .. and . components.
-normpath(Path) ->
-    normparts(filename:split(Path), []).
-
-normparts([], Acc) ->
-    filename:join(lists:reverse(Acc));
-normparts([".." | RestParts], [_Drop | RestAcc]) ->
-    normparts(RestParts, RestAcc);
-normparts(["." | RestParts], Acc) ->
-    normparts(RestParts, Acc);
-normparts([Part | RestParts], Acc) ->
-    normparts(RestParts, [Part | Acc]).
-
-% works like list_to_existing_atom, except can be list or binary and it
-% gives you the original value instead of an error if no existing atom.
-to_existing_atom(V) when is_list(V) ->
-    try list_to_existing_atom(V) catch _:_ -> V end;
-to_existing_atom(V) when is_binary(V) ->
-    try list_to_existing_atom(?b2l(V)) catch _:_ -> V end;
-to_existing_atom(V) when is_atom(V) ->
-    V.
-
-shutdown_sync(Pid) when not is_pid(Pid)->
-    ok;
-shutdown_sync(Pid) ->
-    MRef = erlang:monitor(process, Pid),
-    try
-        catch unlink(Pid),
-        catch exit(Pid, shutdown),
-        receive
-        {'DOWN', MRef, _, _, _} ->
-            ok
-        end
-    after
-        erlang:demonitor(MRef, [flush])
-    end.
-
-
-simple_call(Pid, Message) ->
-    MRef = erlang:monitor(process, Pid),
-    try
-        Pid ! {self(), Message},
-        receive
-        {Pid, Result} ->
-            Result;
-        {'DOWN', MRef, _, _, Reason} ->
-            exit(Reason)
-        end
-    after
-        erlang:demonitor(MRef, [flush])
-    end.
-
-validate_utf8(Data) when is_list(Data) ->
-    validate_utf8(?l2b(Data));
-validate_utf8(Bin) when is_binary(Bin) ->
-    validate_utf8_fast(Bin, 0).
-
-validate_utf8_fast(B, O) ->
-    case B of
-        <<_:O/binary>> ->
-            true;
-        <<_:O/binary, C1, _/binary>> when
-                C1 < 128 ->
-            validate_utf8_fast(B, 1 + O);
-        <<_:O/binary, C1, C2, _/binary>> when
-                C1 >= 194, C1 =< 223,
-                C2 >= 128, C2 =< 191 ->
-            validate_utf8_fast(B, 2 + O);
-        <<_:O/binary, C1, C2, C3, _/binary>> when
-                C1 >= 224, C1 =< 239,
-                C2 >= 128, C2 =< 191,
-                C3 >= 128, C3 =< 191 ->
-            validate_utf8_fast(B, 3 + O);
-        <<_:O/binary, C1, C2, C3, C4, _/binary>> when
-                C1 >= 240, C1 =< 244,
-                C2 >= 128, C2 =< 191,
-                C3 >= 128, C3 =< 191,
-                C4 >= 128, C4 =< 191 ->
-            validate_utf8_fast(B, 4 + O);
-        _ ->
-            false
-    end.
-
-to_hex([]) ->
-    [];
-to_hex(Bin) when is_binary(Bin) ->
-    to_hex(binary_to_list(Bin));
-to_hex([H|T]) ->
-    [to_digit(H div 16), to_digit(H rem 16) | to_hex(T)].
-
-to_digit(N) when N < 10 -> $0 + N;
-to_digit(N)             -> $a + N-10.
-
-
-parse_term(Bin) when is_binary(Bin) ->
-    parse_term(binary_to_list(Bin));
-parse_term(List) ->
-    {ok, Tokens, _} = erl_scan:string(List ++ "."),
-    erl_parse:parse_term(Tokens).
-
-get_value(Key, List) ->
-    get_value(Key, List, undefined).
-
-get_value(Key, List, Default) ->
-    case lists:keysearch(Key, 1, List) of
-    {value, {Key,Value}} ->
-        Value;
-    false ->
-        Default
-    end.
-
-get_nested_json_value({Props}, [Key|Keys]) ->
-    case couch_util:get_value(Key, Props, nil) of
-    nil -> throw({not_found, <<"missing json key: ", Key/binary>>});
-    Value -> get_nested_json_value(Value, Keys)
-    end;
-get_nested_json_value(Value, []) ->
-    Value;
-get_nested_json_value(_NotJSONObj, _) ->
-    throw({not_found, json_mismatch}).
-
-proplist_apply_field(H, L) ->
-    {R} = json_apply_field(H, {L}),
-    R.
-
-json_apply_field(H, {L}) ->
-    json_apply_field(H, L, []).
-json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
-    json_apply_field({Key, NewValue}, Headers, Acc);
-json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
-    json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
-json_apply_field({Key, NewValue}, [], Acc) ->
-    {[{Key, NewValue}|Acc]}.
-
-json_user_ctx(#db{name=DbName, user_ctx=Ctx}) ->
-    {[{<<"db">>, DbName},
-            {<<"name">>,Ctx#user_ctx.name},
-            {<<"roles">>,Ctx#user_ctx.roles}]}.
-
-
-% returns a random integer
-rand32() ->
-    crypto:rand_uniform(0, 16#100000000).
-
-% given a pathname "../foo/bar/" it gives back the fully qualified
-% absolute pathname.
-abs_pathname(" " ++ Filename) ->
-    % strip leading whitspace
-    abs_pathname(Filename);
-abs_pathname([$/ |_]=Filename) ->
-    Filename;
-abs_pathname(Filename) ->
-    {ok, Cwd} = file:get_cwd(),
-    {Filename2, Args} = separate_cmd_args(Filename, ""),
-    abs_pathname(Filename2, Cwd) ++ Args.
-
-abs_pathname(Filename, Dir) ->
-    Name = filename:absname(Filename, Dir ++ "/"),
-    OutFilename = filename:join(fix_path_list(filename:split(Name), [])),
-    % If the filename is a dir (last char slash, put back end slash
-    case string:right(Filename,1) of
-    "/" ->
-        OutFilename ++ "/";
-    "\\" ->
-        OutFilename ++ "/";
-    _Else->
-        OutFilename
-    end.
-
-% if this as an executable with arguments, seperate out the arguments
-% ""./foo\ bar.sh -baz=blah" -> {"./foo\ bar.sh", " -baz=blah"}
-separate_cmd_args("", CmdAcc) ->
-    {lists:reverse(CmdAcc), ""};
-separate_cmd_args("\\ " ++ Rest, CmdAcc) -> % handle skipped value
-    separate_cmd_args(Rest, " \\" ++ CmdAcc);
-separate_cmd_args(" " ++ Rest, CmdAcc) ->
-    {lists:reverse(CmdAcc), " " ++ Rest};
-separate_cmd_args([Char|Rest], CmdAcc) ->
-    separate_cmd_args(Rest, [Char | CmdAcc]).
-
-% Is a character whitespace?
-is_whitespace($\s) -> true;
-is_whitespace($\t) -> true;
-is_whitespace($\n) -> true;
-is_whitespace($\r) -> true;
-is_whitespace(_Else) -> false.
-
-
-% removes leading and trailing whitespace from a string
-trim(String) ->
-    String2 = lists:dropwhile(fun is_whitespace/1, String),
-    lists:reverse(lists:dropwhile(fun is_whitespace/1, lists:reverse(String2))).
-
-% takes a heirarchical list of dirs and removes the dots ".", double dots
-% ".." and the corresponding parent dirs.
-fix_path_list([], Acc) ->
-    lists:reverse(Acc);
-fix_path_list([".."|Rest], [_PrevAcc|RestAcc]) ->
-    fix_path_list(Rest, RestAcc);
-fix_path_list(["."|Rest], Acc) ->
-    fix_path_list(Rest, Acc);
-fix_path_list([Dir | Rest], Acc) ->
-    fix_path_list(Rest, [Dir | Acc]).
-
-
-implode(List, Sep) ->
-    implode(List, Sep, []).
-
-implode([], _Sep, Acc) ->
-    lists:flatten(lists:reverse(Acc));
-implode([H], Sep, Acc) ->
-    implode([], Sep, [H|Acc]);
-implode([H|T], Sep, Acc) ->
-    implode(T, Sep, [Sep,H|Acc]).
-
-
-drv_port() ->
-    case get(couch_drv_port) of
-    undefined ->
-        Port = open_port({spawn, "couch_icu_driver"}, []),
-        put(couch_drv_port, Port),
-        Port;
-    Port ->
-        Port
-    end.
-
-collate(A, B) ->
-    collate(A, B, []).
-
-collate(A, B, Options) when is_binary(A), is_binary(B) ->
-    Operation =
-    case lists:member(nocase, Options) of
-        true -> 1; % Case insensitive
-        false -> 0 % Case sensitive
-    end,
-    SizeA = byte_size(A),
-    SizeB = byte_size(B),
-    Bin = <<SizeA:32/native, A/binary, SizeB:32/native, B/binary>>,
-    [Result] = erlang:port_control(drv_port(), Operation, Bin),
-    % Result is 0 for lt, 1 for eq and 2 for gt. Subtract 1 to return the
-    % expected typical -1, 0, 1
-    Result - 1.
-
-should_flush() ->
-    should_flush(?FLUSH_MAX_MEM).
-
-should_flush(MemThreshHold) ->
-    {memory, ProcMem} = process_info(self(), memory),
-    BinMem = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
-        0, element(2,process_info(self(), binary))),
-    if ProcMem+BinMem > 2*MemThreshHold ->
-        garbage_collect(),
-        {memory, ProcMem2} = process_info(self(), memory),
-        BinMem2 = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
-            0, element(2,process_info(self(), binary))),
-        ProcMem2+BinMem2 > MemThreshHold;
-    true -> false end.
-
-encodeBase64Url(Url) ->
-    Url1 = re:replace(base64:encode(Url), ["=+", $$], ""),
-    Url2 = re:replace(Url1, "/", "_", [global]),
-    re:replace(Url2, "\\+", "-", [global, {return, binary}]).
-
-decodeBase64Url(Url64) ->
-    Url1 = re:replace(Url64, "-", "+", [global]),
-    Url2 = re:replace(Url1, "_", "/", [global]),
-    Padding = lists:duplicate((4 - iolist_size(Url2) rem 4) rem 4, $=),
-    base64:decode(iolist_to_binary([Url2, Padding])).
-
-dict_find(Key, Dict, DefaultValue) ->
-    case dict:find(Key, Dict) of
-    {ok, Value} ->
-        Value;
-    error ->
-        DefaultValue
-    end.
-
-to_binary(V) when is_binary(V) ->
-    V;
-to_binary(V) when is_list(V) ->
-    try
-        list_to_binary(V)
-    catch
-        _:_ ->
-            list_to_binary(io_lib:format("~p", [V]))
-    end;
-to_binary(V) when is_atom(V) ->
-    list_to_binary(atom_to_list(V));
-to_binary(V) ->
-    list_to_binary(io_lib:format("~p", [V])).
-
-to_integer(V) when is_integer(V) ->
-    V;
-to_integer(V) when is_list(V) ->
-    erlang:list_to_integer(V);
-to_integer(V) when is_binary(V) ->
-    erlang:list_to_integer(binary_to_list(V)).
-
-to_list(V) when is_list(V) ->
-    V;
-to_list(V) when is_binary(V) ->
-    binary_to_list(V);
-to_list(V) when is_atom(V) ->
-    atom_to_list(V);
-to_list(V) ->
-    lists:flatten(io_lib:format("~p", [V])).
-
-url_encode(Bin) when is_binary(Bin) ->
-    url_encode(binary_to_list(Bin));
-url_encode([H|T]) ->
-    if
-    H >= $a, $z >= H ->
-        [H|url_encode(T)];
-    H >= $A, $Z >= H ->
-        [H|url_encode(T)];
-    H >= $0, $9 >= H ->
-        [H|url_encode(T)];
-    H == $_; H == $.; H == $-; H == $: ->
-        [H|url_encode(T)];
-    true ->
-        case lists:flatten(io_lib:format("~.16.0B", [H])) of
-        [X, Y] ->
-            [$%, X, Y | url_encode(T)];
-        [X] ->
-            [$%, $0, X | url_encode(T)]
-        end
-    end;
-url_encode([]) ->
-    [].
-
-verify([X|RestX], [Y|RestY], Result) ->
-    verify(RestX, RestY, (X bxor Y) bor Result);
-verify([], [], Result) ->
-    Result == 0.
-
-verify(<<X/binary>>, <<Y/binary>>) ->
-    verify(?b2l(X), ?b2l(Y));
-verify(X, Y) when is_list(X) and is_list(Y) ->
-    case length(X) == length(Y) of
-        true ->
-            verify(X, Y, 0);
-        false ->
-            false
-    end;
-verify(_X, _Y) -> false.
-
--spec md5(Data::(iolist() | binary())) -> Digest::binary().
-md5(Data) ->
-    try crypto:md5(Data) catch error:_ -> erlang:md5(Data) end.
-
--spec md5_init() -> Context::binary().
-md5_init() ->
-    try crypto:md5_init() catch error:_ -> erlang:md5_init() end.
-
--spec md5_update(Context::binary(), Data::(iolist() | binary())) ->
-    NewContext::binary().
-md5_update(Ctx, D) ->
-    try crypto:md5_update(Ctx,D) catch error:_ -> erlang:md5_update(Ctx,D) end.
-
--spec md5_final(Context::binary()) -> Digest::binary().
-md5_final(Ctx) ->
-    try crypto:md5_final(Ctx) catch error:_ -> erlang:md5_final(Ctx) end.
-
-% linear search is faster for small lists, length() is 0.5 ms for 100k list
-reorder_results(Keys, SortedResults) when length(Keys) < 100 ->
-    [couch_util:get_value(Key, SortedResults) || Key <- Keys];
-reorder_results(Keys, SortedResults) ->
-    KeyDict = dict:from_list(SortedResults),
-    [dict:fetch(Key, KeyDict) || Key <- Keys].
-
-url_strip_password(Url) ->
-    re:replace(Url,
-        "http(s)?://([^:]+):[^@]+@(.*)$",
-        "http\\1://\\2:*****@\\3",
-        [{return, list}]).
-
-encode_doc_id(#doc{id = Id}) ->
-    encode_doc_id(Id);
-encode_doc_id(Id) when is_list(Id) ->
-    encode_doc_id(?l2b(Id));
-encode_doc_id(<<"_design/", Rest/binary>>) ->
-    "_design/" ++ url_encode(Rest);
-encode_doc_id(<<"_local/", Rest/binary>>) ->
-    "_local/" ++ url_encode(Rest);
-encode_doc_id(Id) ->
-    url_encode(Id).
-
-
-with_db(Db, Fun) when is_record(Db, db) ->
-    Fun(Db);
-with_db(DbName, Fun) ->
-    case couch_db:open_int(DbName, [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}]) of
-        {ok, Db} ->
-            try
-                Fun(Db)
-            after
-                catch couch_db:close(Db)
-            end;
-        Else ->
-            throw(Else)
-    end.
-
-rfc1123_date() ->
-    {{YYYY,MM,DD},{Hour,Min,Sec}} = calendar:universal_time(),
-    DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),
-    lists:flatten(
-      io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
-            [day(DayNumber),DD,month(MM),YYYY,Hour,Min,Sec])).
-
-rfc1123_date(undefined) ->
-    undefined;
-rfc1123_date(UniversalTime) ->
-    {{YYYY,MM,DD},{Hour,Min,Sec}} = UniversalTime,
-    DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),
-    lists:flatten(
-      io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
-            [day(DayNumber),DD,month(MM),YYYY,Hour,Min,Sec])).
-
-%% day
-
-day(1) -> "Mon";
-day(2) -> "Tue";
-day(3) -> "Wed";
-day(4) -> "Thu";
-day(5) -> "Fri";
-day(6) -> "Sat";
-day(7) -> "Sun".
-
-%% month
-
-month(1) -> "Jan";
-month(2) -> "Feb";
-month(3) -> "Mar";
-month(4) -> "Apr";
-month(5) -> "May";
-month(6) -> "Jun";
-month(7) -> "Jul";
-month(8) -> "Aug";
-month(9) -> "Sep";
-month(10) -> "Oct";
-month(11) -> "Nov";
-month(12) -> "Dec".

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_uuids.erl
----------------------------------------------------------------------
diff --git a/couch_uuids.erl b/couch_uuids.erl
deleted file mode 100644
index 6ed75a1..0000000
--- a/couch_uuids.erl
+++ /dev/null
@@ -1,103 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_uuids).
--include("couch_db.hrl").
-
--behaviour(gen_server).
-
--export([start/0, stop/0]).
--export([new/0, random/0, utc_random/0]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-start() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-stop() ->
-    gen_server:cast(?MODULE, stop).
-
-new() ->
-    gen_server:call(?MODULE, create).
-
-random() ->
-    list_to_binary(couch_util:to_hex(crypto:rand_bytes(16))).
-
-utc_random() ->
-    utc_suffix(couch_util:to_hex(crypto:rand_bytes(9))).
-
-utc_suffix(Suffix) ->
-    Now = {_, _, Micro} = now(),
-    Nowish = calendar:now_to_universal_time(Now),
-    Nowsecs = calendar:datetime_to_gregorian_seconds(Nowish),
-    Then = calendar:datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}}),
-    Prefix = io_lib:format("~14.16.0b", [(Nowsecs - Then) * 1000000 + Micro]),
-    list_to_binary(Prefix ++ Suffix).
-
-init([]) ->
-    ok = couch_config:register(
-        fun("uuids", _) -> gen_server:cast(?MODULE, change) end
-    ),
-    {ok, state()}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-handle_call(create, _From, random) ->
-    {reply, random(), random};
-handle_call(create, _From, utc_random) ->
-    {reply, utc_random(), utc_random};
-handle_call(create, _From, {utc_id, UtcIdSuffix}) ->
-    {reply, utc_suffix(UtcIdSuffix), {utc_id, UtcIdSuffix}};
-handle_call(create, _From, {sequential, Pref, Seq}) ->
-    Result = ?l2b(Pref ++ io_lib:format("~6.16.0b", [Seq])),
-    case Seq >= 16#fff000 of
-        true ->
-            {reply, Result, {sequential, new_prefix(), inc()}};
-        _ ->
-            {reply, Result, {sequential, Pref, Seq + inc()}}
-    end.
-
-handle_cast(change, _State) ->
-    {noreply, state()};
-handle_cast(stop, State) ->
-    {stop, normal, State};
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-new_prefix() ->
-    couch_util:to_hex((crypto:rand_bytes(13))).
-
-inc() ->
-    crypto:rand_uniform(1, 16#ffe).
-
-state() ->
-    AlgoStr = couch_config:get("uuids", "algorithm", "random"),
-    case couch_util:to_existing_atom(AlgoStr) of
-        random ->
-            random;
-        utc_random ->
-            utc_random;
-        utc_id ->
-            UtcIdSuffix = couch_config:get("uuids", "utc_id_suffix", ""),
-            {utc_id, UtcIdSuffix};
-        sequential ->
-            {sequential, new_prefix(), inc()};
-        Unknown ->
-            throw({unknown_uuid_algorithm, Unknown})
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_work_queue.erl
----------------------------------------------------------------------
diff --git a/couch_work_queue.erl b/couch_work_queue.erl
deleted file mode 100644
index 22968d7..0000000
--- a/couch_work_queue.erl
+++ /dev/null
@@ -1,187 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_work_queue).
--behaviour(gen_server).
-
--include("couch_db.hrl").
-
-% public API
--export([new/1, queue/2, dequeue/1, dequeue/2, close/1, item_count/1, size/1]).
-
-% gen_server callbacks
--export([init/1, terminate/2]).
--export([handle_call/3, handle_cast/2, code_change/3, handle_info/2]).
-
--record(q, {
-    queue = queue:new(),
-    blocked = [],
-    max_size,
-    max_items,
-    items = 0,
-    size = 0,
-    work_waiters = [],
-    close_on_dequeue = false,
-    multi_workers = false
-}).
-
-
-new(Options) ->
-    gen_server:start_link(couch_work_queue, Options, []).
-
-
-queue(Wq, Item) when is_binary(Item) ->
-    gen_server:call(Wq, {queue, Item, byte_size(Item)}, infinity);
-queue(Wq, Item) ->
-    gen_server:call(Wq, {queue, Item, ?term_size(Item)}, infinity).
-
-
-dequeue(Wq) ->
-    dequeue(Wq, all).
-
-    
-dequeue(Wq, MaxItems) ->
-    try
-        gen_server:call(Wq, {dequeue, MaxItems}, infinity)
-    catch
-        _:_ -> closed
-    end.
-
-
-item_count(Wq) ->
-    try
-        gen_server:call(Wq, item_count, infinity)
-    catch
-        _:_ -> closed
-    end.
-
-
-size(Wq) ->
-    try
-        gen_server:call(Wq, size, infinity)
-    catch
-        _:_ -> closed
-    end.
-
-
-close(Wq) ->
-    gen_server:cast(Wq, close).
-    
-
-init(Options) ->
-    Q = #q{
-        max_size = couch_util:get_value(max_size, Options, nil),
-        max_items = couch_util:get_value(max_items, Options, nil),
-        multi_workers = couch_util:get_value(multi_workers, Options, false)
-    },
-    {ok, Q}.
-
-
-terminate(_Reason, #q{work_waiters=Workers}) ->
-    lists:foreach(fun({W, _}) -> gen_server:reply(W, closed) end, Workers).
-
-    
-handle_call({queue, Item, Size}, From, #q{work_waiters = []} = Q0) ->
-    Q = Q0#q{size = Q0#q.size + Size,
-                items = Q0#q.items + 1,
-                queue = queue:in({Item, Size}, Q0#q.queue)},
-    case (Q#q.size >= Q#q.max_size) orelse
-            (Q#q.items >= Q#q.max_items) of
-    true ->
-        {noreply, Q#q{blocked = [From | Q#q.blocked]}};
-    false ->
-        {reply, ok, Q}
-    end;
-
-handle_call({queue, Item, _}, _From, #q{work_waiters = [{W, _Max} | Rest]} = Q) ->
-    gen_server:reply(W, {ok, [Item]}),
-    {reply, ok, Q#q{work_waiters = Rest}};
-
-handle_call({dequeue, Max}, From, Q) ->
-    #q{work_waiters = Workers, multi_workers = Multi, items = Count} = Q,
-    case {Workers, Multi} of
-    {[_ | _], false} ->
-        exit("Only one caller allowed to wait for this work at a time");
-    {[_ | _], true} ->
-        {noreply, Q#q{work_waiters=Workers ++ [{From, Max}]}};
-    _ ->
-        case Count of
-        0 ->
-            {noreply, Q#q{work_waiters=Workers ++ [{From, Max}]}};
-        C when C > 0 ->
-            deliver_queue_items(Max, Q)
-        end
-    end;
-
-handle_call(item_count, _From, Q) ->
-    {reply, Q#q.items, Q};
-
-handle_call(size, _From, Q) ->
-    {reply, Q#q.size, Q}.
-
-
-deliver_queue_items(Max, Q) ->
-    #q{
-        queue = Queue,
-        items = Count,
-        size = Size,
-        close_on_dequeue = Close,
-        blocked = Blocked
-    } = Q,
-    case (Max =:= all) orelse (Max >= Count) of
-    false ->
-        {Items, Size2, Queue2, Blocked2} = dequeue_items(
-            Max, Size, Queue, Blocked, []),
-        Q2 = Q#q{
-            items = Count - Max, size = Size2, blocked = Blocked2, queue = Queue2
-        },
-        {reply, {ok, Items}, Q2};
-    true ->
-        lists:foreach(fun(F) -> gen_server:reply(F, ok) end, Blocked),
-        Q2 = Q#q{items = 0, size = 0, blocked = [], queue = queue:new()},
-        Items = [Item || {Item, _} <- queue:to_list(Queue)],
-        case Close of
-        false ->
-            {reply, {ok, Items}, Q2};
-        true ->
-            {stop, normal, {ok, Items}, Q2}
-        end
-    end.
-
-
-dequeue_items(0, Size, Queue, Blocked, DequeuedAcc) ->
-    {lists:reverse(DequeuedAcc), Size, Queue, Blocked};
-
-dequeue_items(NumItems, Size, Queue, Blocked, DequeuedAcc) ->
-    {{value, {Item, ItemSize}}, Queue2} = queue:out(Queue),
-    case Blocked of
-    [] ->
-        Blocked2 = Blocked;
-    [From | Blocked2] ->
-        gen_server:reply(From, ok)
-    end,
-    dequeue_items(
-        NumItems - 1, Size - ItemSize, Queue2, Blocked2, [Item | DequeuedAcc]).
-    
-
-handle_cast(close, #q{items = 0} = Q) ->
-    {stop, normal, Q};
-
-handle_cast(close, Q) ->
-    {noreply, Q#q{close_on_dequeue = true}}.
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-handle_info(X, Q) ->
-    {stop, X, Q}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/include/couch_db.hrl
----------------------------------------------------------------------
diff --git a/include/couch_db.hrl b/include/couch_db.hrl
new file mode 100644
index 0000000..e0a1c82
--- /dev/null
+++ b/include/couch_db.hrl
@@ -0,0 +1,286 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-define(LOCAL_DOC_PREFIX, "_local/").
+-define(DESIGN_DOC_PREFIX0, "_design").
+-define(DESIGN_DOC_PREFIX, "_design/").
+-define(DEFAULT_COMPRESSION, snappy).
+
+-define(MIN_STR, <<"">>).
+-define(MAX_STR, <<255>>). % illegal utf string
+
+% the lowest possible database sequence number
+-define(LOWEST_SEQ, 0).
+
+-define(REWRITE_COUNT, couch_rewrite_count).
+
+-define(JSON_ENCODE(V), jiffy:encode(V, [uescape])).
+-define(JSON_DECODE(V), couch_util:json_decode(V)).
+
+-define(b2l(V), binary_to_list(V)).
+-define(l2b(V), list_to_binary(V)).
+-define(term_to_bin(T), term_to_binary(T, [{minor_version, 1}])).
+-define(term_size(T),
+    try
+        erlang:external_size(T)
+    catch _:_ ->
+        byte_size(?term_to_bin(T))
+    end).
+
+-define(DEFAULT_ATTACHMENT_CONTENT_TYPE, <<"application/octet-stream">>).
+
+-define(LOG_DEBUG(Format, Args),
+    case couch_log:debug_on(?MODULE) of
+        true ->
+            couch_log:debug(Format, Args);
+        false -> ok
+    end).
+
+-define(LOG_INFO(Format, Args),
+    case couch_log:info_on(?MODULE) of
+        true ->
+            couch_log:info(Format, Args);
+        false -> ok
+    end).
+
+-define(LOG_WARN(Format, Args),
+    case couch_log:warn_on(?MODULE) of
+        true ->
+            couch_log:warn(Format, Args);
+        false -> ok
+    end).
+
+-define(LOG_ERROR(Format, Args), couch_log:error(Format, Args)).
+
+% Tree::term() is really a tree(), but we don't want to require R13B04 yet
+-type branch() :: {Key::term(), Value::term(), Tree::term()}.
+-type path() :: {Start::pos_integer(), branch()}.
+-type tree() :: [branch()]. % sorted by key
+
+-record(rev_info,
+    {
+    rev,
+    seq = 0,
+    deleted = false,
+    body_sp = nil % stream pointer
+    }).
+
+-record(doc_info,
+    {
+    id = <<"">>,
+    high_seq = 0,
+    revs = [] % rev_info
+    }).
+
+-record(full_doc_info,
+    {id = <<"">>,
+    update_seq = 0,
+    deleted = false,
+    rev_tree = [],
+    leafs_size = 0
+    }).
+
+-record(httpd,
+    {mochi_req,
+    peer,
+    method,
+    requested_path_parts,
+    path_parts,
+    db_url_handlers,
+    user_ctx,
+    req_body = undefined,
+    design_url_handlers,
+    auth,
+    default_fun,
+    url_handlers
+    }).
+
+
+-record(doc,
+    {
+    id = <<"">>,
+    revs = {0, []},
+
+    % the json body object.
+    body = {[]},
+
+    atts = [], % attachments
+
+    deleted = false,
+
+    % key/value tuple of meta information, provided when using special options:
+    % couch_db:open_doc(Db, Id, Options).
+    meta = []
+    }).
+
+
+-record(att,
+    {
+    name,
+    type,
+    att_len,
+    disk_len, % length of the attachment in its identity form
+              % (that is, without a content encoding applied to it)
+              % differs from att_len when encoding /= identity
+    md5= <<>>,
+    revpos=0,
+    data,
+    encoding=identity % currently supported values are:
+                      %     identity, gzip
+                      % additional values to support in the future:
+                      %     deflate, compress
+    }).
+
+
+-record(user_ctx,
+    {
+    name=null,
+    roles=[],
+    handler
+    }).
+
+% This should be updated anytime a header change happens that requires more
+% than filling in new defaults.
+%
+% As long the changes are limited to new header fields (with inline
+% defaults) added to the end of the record, then there is no need to increment
+% the disk revision number.
+%
+% if the disk revision is incremented, then new upgrade logic will need to be
+% added to couch_db_updater:init_db.
+
+-define(LATEST_DISK_VERSION, 6).
+
+-record(db_header,
+    {disk_version = ?LATEST_DISK_VERSION,
+     update_seq = 0,
+     unused = 0,
+     fulldocinfo_by_id_btree_state = nil,
+     docinfo_by_seq_btree_state = nil,
+     local_docs_btree_state = nil,
+     purge_seq = 0,
+     purged_docs = nil,
+     security_ptr = nil,
+     revs_limit = 1000
+    }).
+
+-record(db,
+    {main_pid = nil,
+    update_pid = nil,
+    compactor_pid = nil,
+    instance_start_time, % number of microsecs since jan 1 1970 as a binary string
+    fd,
+    updater_fd,
+    fd_ref_counter,
+    header = #db_header{},
+    committed_update_seq,
+    fulldocinfo_by_id_btree,
+    docinfo_by_seq_btree,
+    local_docs_btree,
+    update_seq,
+    name,
+    filepath,
+    validate_doc_funs = [],
+    security = [],
+    security_ptr = nil,
+    user_ctx = #user_ctx{},
+    waiting_delayed_commit = nil,
+    revs_limit = 1000,
+    fsync_options = [],
+    options = [],
+    compression,
+    before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc
+    after_doc_read = nil     % nil | fun(Doc, Db) -> NewDoc
+    }).
+
+
+-record(view_query_args, {
+    start_key,
+    end_key,
+    start_docid = ?MIN_STR,
+    end_docid = ?MAX_STR,
+
+    direction = fwd,
+    inclusive_end=true, % aka a closed-interval
+
+    limit = 10000000000, % Huge number to simplify logic
+    skip = 0,
+
+    group_level = 0,
+
+    view_type = nil,
+    include_docs = false,
+    doc_options = [],
+    conflicts = false,
+    stale = false,
+    multi_get = false,
+    callback = nil,
+    list = nil
+}).
+
+-record(view_fold_helper_funs, {
+    reduce_count,
+    passed_end,
+    start_response,
+    send_row
+}).
+
+-record(reduce_fold_helper_funs, {
+    start_response,
+    send_row
+}).
+
+-record(extern_resp_args, {
+    code = 200,
+    stop = false,
+    data = <<>>,
+    ctype = "application/json",
+    headers = [],
+    json = nil
+}).
+
+-record(index_header,
+    {seq=0,
+    purge_seq=0,
+    id_btree_state=nil,
+    view_states=nil
+    }).
+
+% small value used in revision trees to indicate the revision isn't stored
+-define(REV_MISSING, []).
+
+-record(changes_args, {
+    feed = "normal",
+    dir = fwd,
+    since = 0,
+    limit = 1000000000000000,
+    style = main_only,
+    heartbeat,
+    timeout,
+    filter = "",
+    filter_fun,
+    filter_args = [],
+    include_docs = false,
+    doc_options = [],
+    conflicts = false,
+    db_open_options = []
+}).
+
+-record(btree, {
+    fd,
+    root,
+    extract_kv = fun({_Key, _Value} = KV) -> KV end,
+    assemble_kv = fun(Key, Value) -> {Key, Value} end,
+    less = fun(A, B) -> A < B end,
+    reduce = nil,
+    compression = ?DEFAULT_COMPRESSION
+}).


[07/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_db_updater.erl
----------------------------------------------------------------------
diff --git a/src/couch_db_updater.erl b/src/couch_db_updater.erl
new file mode 100644
index 0000000..af7578e
--- /dev/null
+++ b/src/couch_db_updater.erl
@@ -0,0 +1,1035 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_db_updater).
+-behaviour(gen_server).
+
+-export([btree_by_id_reduce/2,btree_by_seq_reduce/2]).
+-export([make_doc_summary/2]).
+-export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
+
+-include("couch_db.hrl").
+
+
+init({MainPid, DbName, Filepath, Fd, Options}) ->
+    process_flag(trap_exit, true),
+    case lists:member(create, Options) of
+    true ->
+        % create a new header and writes it to the file
+        Header =  #db_header{},
+        ok = couch_file:write_header(Fd, Header),
+        % delete any old compaction files that might be hanging around
+        RootDir = couch_config:get("couchdb", "database_dir", "."),
+        couch_file:delete(RootDir, Filepath ++ ".compact");
+    false ->
+        case couch_file:read_header(Fd) of
+        {ok, Header} ->
+            ok;
+        no_valid_header ->
+            % create a new header and writes it to the file
+            Header =  #db_header{},
+            ok = couch_file:write_header(Fd, Header),
+            % delete any old compaction files that might be hanging around
+            file:delete(Filepath ++ ".compact")
+        end
+    end,
+    ReaderFd = open_reader_fd(Filepath, Options),
+    Db = init_db(DbName, Filepath, Fd, ReaderFd, Header, Options),
+    Db2 = refresh_validate_doc_funs(Db),
+    {ok, Db2#db{main_pid = MainPid}}.
+
+
+terminate(_Reason, Db) ->
+    ok = couch_file:close(Db#db.updater_fd),
+    ok = couch_file:close(Db#db.fd),
+    couch_util:shutdown_sync(Db#db.compactor_pid),
+    couch_util:shutdown_sync(Db#db.fd_ref_counter),
+    ok.
+
+handle_call(get_db, _From, Db) ->
+    {reply, {ok, Db}, Db};
+handle_call(full_commit, _From, #db{waiting_delayed_commit=nil}=Db) ->
+    {reply, ok, Db}; % no data waiting, return ok immediately
+handle_call(full_commit, _From,  Db) ->
+    {reply, ok, commit_data(Db)}; % commit the data and return ok
+handle_call(increment_update_seq, _From, Db) ->
+    Db2 = commit_data(Db#db{update_seq=Db#db.update_seq+1}),
+    ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
+    couch_db_update_notifier:notify({updated, Db#db.name}),
+    {reply, {ok, Db2#db.update_seq}, Db2};
+
+handle_call({set_security, NewSec}, _From, #db{compression = Comp} = Db) ->
+    {ok, Ptr, _} = couch_file:append_term(
+        Db#db.updater_fd, NewSec, [{compression, Comp}]),
+    Db2 = commit_data(Db#db{security=NewSec, security_ptr=Ptr,
+            update_seq=Db#db.update_seq+1}),
+    ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
+    {reply, ok, Db2};
+
+handle_call({set_revs_limit, Limit}, _From, Db) ->
+    Db2 = commit_data(Db#db{revs_limit=Limit,
+            update_seq=Db#db.update_seq+1}),
+    ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
+    {reply, ok, Db2};
+
+handle_call({purge_docs, _IdRevs}, _From,
+        #db{compactor_pid=Pid}=Db) when Pid /= nil ->
+    {reply, {error, purge_during_compaction}, Db};
+handle_call({purge_docs, IdRevs}, _From, Db) ->
+    #db{
+        updater_fd = Fd,
+        fulldocinfo_by_id_btree = DocInfoByIdBTree,
+        docinfo_by_seq_btree = DocInfoBySeqBTree,
+        update_seq = LastSeq,
+        header = Header = #db_header{purge_seq=PurgeSeq},
+        compression = Comp
+        } = Db,
+    DocLookups = couch_btree:lookup(DocInfoByIdBTree,
+            [Id || {Id, _Revs} <- IdRevs]),
+
+    NewDocInfos = lists:zipwith(
+        fun({_Id, Revs}, {ok, #full_doc_info{rev_tree=Tree}=FullDocInfo}) ->
+            case couch_key_tree:remove_leafs(Tree, Revs) of
+            {_, []=_RemovedRevs} -> % no change
+                nil;
+            {NewTree, RemovedRevs} ->
+                {FullDocInfo#full_doc_info{rev_tree=NewTree},RemovedRevs}
+            end;
+        (_, not_found) ->
+            nil
+        end,
+        IdRevs, DocLookups),
+
+    SeqsToRemove = [Seq
+            || {#full_doc_info{update_seq=Seq},_} <- NewDocInfos],
+
+    FullDocInfoToUpdate = [FullInfo
+            || {#full_doc_info{rev_tree=Tree}=FullInfo,_}
+            <- NewDocInfos, Tree /= []],
+
+    IdRevsPurged = [{Id, Revs}
+            || {#full_doc_info{id=Id}, Revs} <- NewDocInfos],
+
+    {DocInfoToUpdate, NewSeq} = lists:mapfoldl(
+        fun(#full_doc_info{rev_tree=Tree}=FullInfo, SeqAcc) ->
+            Tree2 = couch_key_tree:map_leafs(
+                fun(_RevId, LeafVal) ->
+                    IsDeleted = element(1, LeafVal),
+                    BodyPointer = element(2, LeafVal),
+                    {IsDeleted, BodyPointer, SeqAcc + 1}
+                end, Tree),
+            {couch_doc:to_doc_info(FullInfo#full_doc_info{rev_tree=Tree2}),
+                SeqAcc + 1}
+        end, LastSeq, FullDocInfoToUpdate),
+
+    IdsToRemove = [Id || {#full_doc_info{id=Id,rev_tree=[]},_}
+            <- NewDocInfos],
+
+    {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree,
+            DocInfoToUpdate, SeqsToRemove),
+    {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree,
+            FullDocInfoToUpdate, IdsToRemove),
+    {ok, Pointer, _} = couch_file:append_term(
+            Fd, IdRevsPurged, [{compression, Comp}]),
+
+    Db2 = commit_data(
+        Db#db{
+            fulldocinfo_by_id_btree = DocInfoByIdBTree2,
+            docinfo_by_seq_btree = DocInfoBySeqBTree2,
+            update_seq = NewSeq + 1,
+            header=Header#db_header{purge_seq=PurgeSeq+1, purged_docs=Pointer}}),
+
+    ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
+    couch_db_update_notifier:notify({updated, Db#db.name}),
+    {reply, {ok, (Db2#db.header)#db_header.purge_seq, IdRevsPurged}, Db2};
+handle_call(start_compact, _From, Db) ->
+    case Db#db.compactor_pid of
+    nil ->
+        ?LOG_INFO("Starting compaction for db \"~s\"", [Db#db.name]),
+        Pid = spawn_link(fun() -> start_copy_compact(Db) end),
+        Db2 = Db#db{compactor_pid=Pid},
+        ok = gen_server:call(Db#db.main_pid, {db_updated, Db2}),
+        {reply, {ok, Pid}, Db2};
+    _ ->
+        % compact currently running, this is a no-op
+        {reply, {ok, Db#db.compactor_pid}, Db}
+    end;
+handle_call(cancel_compact, _From, #db{compactor_pid = nil} = Db) ->
+    {reply, ok, Db};
+handle_call(cancel_compact, _From, #db{compactor_pid = Pid} = Db) ->
+    unlink(Pid),
+    exit(Pid, kill),
+    RootDir = couch_config:get("couchdb", "database_dir", "."),
+    ok = couch_file:delete(RootDir, Db#db.filepath ++ ".compact"),
+    {reply, ok, Db#db{compactor_pid = nil}};
+
+
+handle_call({compact_done, CompactFilepath}, _From, #db{filepath=Filepath}=Db) ->
+    {ok, NewFd} = couch_file:open(CompactFilepath),
+    ReaderFd = open_reader_fd(CompactFilepath, Db#db.options),
+    {ok, NewHeader} = couch_file:read_header(NewFd),
+    #db{update_seq=NewSeq} = NewDb =
+        init_db(Db#db.name, Filepath, NewFd, ReaderFd, NewHeader, Db#db.options),
+    unlink(NewFd),
+    case Db#db.update_seq == NewSeq of
+    true ->
+        % suck up all the local docs into memory and write them to the new db
+        {ok, _, LocalDocs} = couch_btree:foldl(Db#db.local_docs_btree,
+                fun(Value, _Offset, Acc) -> {ok, [Value | Acc]} end, []),
+        {ok, NewLocalBtree} = couch_btree:add(NewDb#db.local_docs_btree, LocalDocs),
+
+        NewDb2 = commit_data(NewDb#db{
+            local_docs_btree = NewLocalBtree,
+            main_pid = Db#db.main_pid,
+            filepath = Filepath,
+            instance_start_time = Db#db.instance_start_time,
+            revs_limit = Db#db.revs_limit
+        }),
+
+        ?LOG_DEBUG("CouchDB swapping files ~s and ~s.",
+                [Filepath, CompactFilepath]),
+        RootDir = couch_config:get("couchdb", "database_dir", "."),
+        couch_file:delete(RootDir, Filepath),
+        ok = file:rename(CompactFilepath, Filepath),
+        close_db(Db),
+        NewDb3 = refresh_validate_doc_funs(NewDb2),
+        ok = gen_server:call(Db#db.main_pid, {db_updated, NewDb3}, infinity),
+        couch_db_update_notifier:notify({compacted, NewDb3#db.name}),
+        ?LOG_INFO("Compaction for db \"~s\" completed.", [Db#db.name]),
+        {reply, ok, NewDb3#db{compactor_pid=nil}};
+    false ->
+        ?LOG_INFO("Compaction file still behind main file "
+            "(update seq=~p. compact update seq=~p). Retrying.",
+            [Db#db.update_seq, NewSeq]),
+        close_db(NewDb),
+        {reply, {retry, Db}, Db}
+    end.
+
+
+handle_cast(Msg, #db{name = Name} = Db) ->
+    ?LOG_ERROR("Database `~s` updater received unexpected cast: ~p", [Name, Msg]),
+    {stop, Msg, Db}.
+
+
+handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts,
+        FullCommit}, Db) ->
+    GroupedDocs2 = [[{Client, D} || D <- DocGroup] || DocGroup <- GroupedDocs],
+    if NonRepDocs == [] ->
+        {GroupedDocs3, Clients, FullCommit2} = collect_updates(GroupedDocs2,
+                [Client], MergeConflicts, FullCommit);
+    true ->
+        GroupedDocs3 = GroupedDocs2,
+        FullCommit2 = FullCommit,
+        Clients = [Client]
+    end,
+    NonRepDocs2 = [{Client, NRDoc} || NRDoc <- NonRepDocs],
+    try update_docs_int(Db, GroupedDocs3, NonRepDocs2, MergeConflicts,
+                FullCommit2) of
+    {ok, Db2, UpdatedDDocIds} ->
+        ok = gen_server:call(Db#db.main_pid, {db_updated, Db2}),
+        if Db2#db.update_seq /= Db#db.update_seq ->
+            couch_db_update_notifier:notify({updated, Db2#db.name});
+        true -> ok
+        end,
+        [catch(ClientPid ! {done, self()}) || ClientPid <- Clients],
+        lists:foreach(fun(DDocId) ->
+            couch_db_update_notifier:notify({ddoc_updated, {Db#db.name, DDocId}})
+        end, UpdatedDDocIds),
+        {noreply, Db2}
+    catch
+        throw: retry ->
+            [catch(ClientPid ! {retry, self()}) || ClientPid <- Clients],
+            {noreply, Db}
+    end;
+handle_info(delayed_commit, #db{waiting_delayed_commit=nil}=Db) ->
+    %no outstanding delayed commits, ignore
+    {noreply, Db};
+handle_info(delayed_commit, Db) ->
+    case commit_data(Db) of
+        Db ->
+            {noreply, Db};
+        Db2 ->
+            ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
+            {noreply, Db2}
+    end;
+handle_info({'EXIT', _Pid, normal}, Db) ->
+    {noreply, Db};
+handle_info({'EXIT', _Pid, Reason}, Db) ->
+    {stop, Reason, Db}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+
+merge_updates([], RestB, AccOutGroups) ->
+    lists:reverse(AccOutGroups, RestB);
+merge_updates(RestA, [], AccOutGroups) ->
+    lists:reverse(AccOutGroups, RestA);
+merge_updates([[{_, {#doc{id=IdA}, _}}|_]=GroupA | RestA],
+        [[{_, {#doc{id=IdB}, _}}|_]=GroupB | RestB], AccOutGroups) ->
+    if IdA == IdB ->
+        merge_updates(RestA, RestB, [GroupA ++ GroupB | AccOutGroups]);
+    IdA < IdB ->
+        merge_updates(RestA, [GroupB | RestB], [GroupA | AccOutGroups]);
+    true ->
+        merge_updates([GroupA | RestA], RestB, [GroupB | AccOutGroups])
+    end.
+
+collect_updates(GroupedDocsAcc, ClientsAcc, MergeConflicts, FullCommit) ->
+    receive
+        % Only collect updates with the same MergeConflicts flag and without
+        % local docs. It's easier to just avoid multiple _local doc
+        % updaters than deal with their possible conflicts, and local docs
+        % writes are relatively rare. Can be optmized later if really needed.
+        {update_docs, Client, GroupedDocs, [], MergeConflicts, FullCommit2} ->
+            GroupedDocs2 = [[{Client, Doc} || Doc <- DocGroup]
+                    || DocGroup <- GroupedDocs],
+            GroupedDocsAcc2 =
+                merge_updates(GroupedDocsAcc, GroupedDocs2, []),
+            collect_updates(GroupedDocsAcc2, [Client | ClientsAcc],
+                    MergeConflicts, (FullCommit or FullCommit2))
+    after 0 ->
+        {GroupedDocsAcc, ClientsAcc, FullCommit}
+    end.
+
+
+btree_by_seq_split(#doc_info{id=Id, high_seq=KeySeq, revs=Revs}) ->
+    {RevInfos, DeletedRevInfos} = lists:foldl(
+        fun(#rev_info{deleted = false, seq = Seq} = Ri, {Acc, AccDel}) ->
+                {[{Ri#rev_info.rev, Seq, Ri#rev_info.body_sp} | Acc], AccDel};
+            (#rev_info{deleted = true, seq = Seq} = Ri, {Acc, AccDel}) ->
+                {Acc, [{Ri#rev_info.rev, Seq, Ri#rev_info.body_sp} | AccDel]}
+        end,
+        {[], []}, Revs),
+    {KeySeq, {Id, lists:reverse(RevInfos), lists:reverse(DeletedRevInfos)}}.
+
+btree_by_seq_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) ->
+    #doc_info{
+        id = Id,
+        high_seq=KeySeq,
+        revs =
+            [#rev_info{rev=Rev,seq=Seq,deleted=false,body_sp = Bp} ||
+                {Rev, Seq, Bp} <- RevInfos] ++
+            [#rev_info{rev=Rev,seq=Seq,deleted=true,body_sp = Bp} ||
+                {Rev, Seq, Bp} <- DeletedRevInfos]}.
+
+btree_by_id_split(#full_doc_info{id=Id, update_seq=Seq,
+        deleted=Deleted, rev_tree=Tree}) ->
+    DiskTree =
+    couch_key_tree:map(
+        fun(_RevId, ?REV_MISSING) ->
+            ?REV_MISSING;
+        (_RevId, RevValue) ->
+            IsDeleted = element(1, RevValue),
+            BodyPointer = element(2, RevValue),
+            UpdateSeq = element(3, RevValue),
+            Size = case tuple_size(RevValue) of
+            4 ->
+                element(4, RevValue);
+            3 ->
+                % pre 1.2 format, will be upgraded on compaction
+                nil
+            end,
+            {if IsDeleted -> 1; true -> 0 end, BodyPointer, UpdateSeq, Size}
+        end, Tree),
+    {Id, {Seq, if Deleted -> 1; true -> 0 end, DiskTree}}.
+
+btree_by_id_join(Id, {HighSeq, Deleted, DiskTree}) ->
+    {Tree, LeafsSize} =
+    couch_key_tree:mapfold(
+        fun(_RevId, {IsDeleted, BodyPointer, UpdateSeq}, leaf, _Acc) ->
+            % pre 1.2 format, will be upgraded on compaction
+            {{IsDeleted == 1, BodyPointer, UpdateSeq, nil}, nil};
+        (_RevId, {IsDeleted, BodyPointer, UpdateSeq}, branch, Acc) ->
+            {{IsDeleted == 1, BodyPointer, UpdateSeq, nil}, Acc};
+        (_RevId, {IsDeleted, BodyPointer, UpdateSeq, Size}, leaf, Acc) ->
+            Acc2 = sum_leaf_sizes(Acc, Size),
+            {{IsDeleted == 1, BodyPointer, UpdateSeq, Size}, Acc2};
+        (_RevId, {IsDeleted, BodyPointer, UpdateSeq, Size}, branch, Acc) ->
+            {{IsDeleted == 1, BodyPointer, UpdateSeq, Size}, Acc};
+        (_RevId, ?REV_MISSING, _Type, Acc) ->
+            {?REV_MISSING, Acc}
+        end, 0, DiskTree),
+    #full_doc_info{
+        id = Id,
+        update_seq = HighSeq,
+        deleted = (Deleted == 1),
+        rev_tree = Tree,
+        leafs_size = LeafsSize
+    }.
+
+btree_by_id_reduce(reduce, FullDocInfos) ->
+    lists:foldl(
+        fun(Info, {NotDeleted, Deleted, Size}) ->
+            Size2 = sum_leaf_sizes(Size, Info#full_doc_info.leafs_size),
+            case Info#full_doc_info.deleted of
+            true ->
+                {NotDeleted, Deleted + 1, Size2};
+            false ->
+                {NotDeleted + 1, Deleted, Size2}
+            end
+        end,
+        {0, 0, 0}, FullDocInfos);
+btree_by_id_reduce(rereduce, Reds) ->
+    lists:foldl(
+        fun({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted, _AccSize}) ->
+            % pre 1.2 format, will be upgraded on compaction
+            {AccNotDeleted + NotDeleted, AccDeleted + Deleted, nil};
+        ({NotDeleted, Deleted, Size}, {AccNotDeleted, AccDeleted, AccSize}) ->
+            AccSize2 = sum_leaf_sizes(AccSize, Size),
+            {AccNotDeleted + NotDeleted, AccDeleted + Deleted, AccSize2}
+        end,
+        {0, 0, 0}, Reds).
+
+sum_leaf_sizes(nil, _) ->
+    nil;
+sum_leaf_sizes(_, nil) ->
+    nil;
+sum_leaf_sizes(Size1, Size2) ->
+    Size1 + Size2.
+
+btree_by_seq_reduce(reduce, DocInfos) ->
+    % count the number of documents
+    length(DocInfos);
+btree_by_seq_reduce(rereduce, Reds) ->
+    lists:sum(Reds).
+
+simple_upgrade_record(Old, New) when tuple_size(Old) < tuple_size(New) ->
+    OldSz = tuple_size(Old),
+    NewValuesTail =
+        lists:sublist(tuple_to_list(New), OldSz + 1, tuple_size(New) - OldSz),
+    list_to_tuple(tuple_to_list(Old) ++ NewValuesTail);
+simple_upgrade_record(Old, _New) ->
+    Old.
+
+-define(OLD_DISK_VERSION_ERROR,
+    "Database files from versions smaller than 0.10.0 are no longer supported").
+
+init_db(DbName, Filepath, Fd, ReaderFd, Header0, Options) ->
+    Header1 = simple_upgrade_record(Header0, #db_header{}),
+    Header =
+    case element(2, Header1) of
+    1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
+    2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
+    3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
+    4 -> Header1#db_header{security_ptr = nil}; % 0.10 and pre 0.11
+    5 -> Header1; % pre 1.2
+    ?LATEST_DISK_VERSION -> Header1;
+    _ -> throw({database_disk_version_error, "Incorrect disk header version"})
+    end,
+
+    {ok, FsyncOptions} = couch_util:parse_term(
+            couch_config:get("couchdb", "fsync_options",
+                    "[before_header, after_header, on_file_open]")),
+
+    case lists:member(on_file_open, FsyncOptions) of
+    true -> ok = couch_file:sync(Fd);
+    _ -> ok
+    end,
+
+    Compression = couch_compress:get_compression_method(),
+
+    {ok, IdBtree} = couch_btree:open(Header#db_header.fulldocinfo_by_id_btree_state, Fd,
+        [{split, fun(X) -> btree_by_id_split(X) end},
+        {join, fun(X,Y) -> btree_by_id_join(X,Y) end},
+        {reduce, fun(X,Y) -> btree_by_id_reduce(X,Y) end},
+        {compression, Compression}]),
+    {ok, SeqBtree} = couch_btree:open(Header#db_header.docinfo_by_seq_btree_state, Fd,
+            [{split, fun(X) -> btree_by_seq_split(X) end},
+            {join, fun(X,Y) -> btree_by_seq_join(X,Y) end},
+            {reduce, fun(X,Y) -> btree_by_seq_reduce(X,Y) end},
+            {compression, Compression}]),
+    {ok, LocalDocsBtree} = couch_btree:open(Header#db_header.local_docs_btree_state, Fd,
+        [{compression, Compression}]),
+    case Header#db_header.security_ptr of
+    nil ->
+        Security = [],
+        SecurityPtr = nil;
+    SecurityPtr ->
+        {ok, Security} = couch_file:pread_term(Fd, SecurityPtr)
+    end,
+    % convert start time tuple to microsecs and store as a binary string
+    {MegaSecs, Secs, MicroSecs} = now(),
+    StartTime = ?l2b(io_lib:format("~p",
+            [(MegaSecs*1000000*1000000) + (Secs*1000000) + MicroSecs])),
+    {ok, RefCntr} = couch_ref_counter:start([Fd, ReaderFd]),
+    #db{
+        update_pid=self(),
+        fd = ReaderFd,
+        updater_fd = Fd,
+        fd_ref_counter = RefCntr,
+        header=Header,
+        fulldocinfo_by_id_btree = IdBtree,
+        docinfo_by_seq_btree = SeqBtree,
+        local_docs_btree = LocalDocsBtree,
+        committed_update_seq = Header#db_header.update_seq,
+        update_seq = Header#db_header.update_seq,
+        name = DbName,
+        filepath = Filepath,
+        security = Security,
+        security_ptr = SecurityPtr,
+        instance_start_time = StartTime,
+        revs_limit = Header#db_header.revs_limit,
+        fsync_options = FsyncOptions,
+        options = Options,
+        compression = Compression,
+        before_doc_update = couch_util:get_value(before_doc_update, Options, nil),
+        after_doc_read = couch_util:get_value(after_doc_read, Options, nil)
+        }.
+
+open_reader_fd(Filepath, Options) ->
+    {ok, Fd} = case lists:member(sys_db, Options) of
+    true ->
+        couch_file:open(Filepath, [read_only, sys_db]);
+    false ->
+        couch_file:open(Filepath, [read_only])
+    end,
+    unlink(Fd),
+    Fd.
+
+close_db(#db{fd_ref_counter = RefCntr}) ->
+    couch_ref_counter:drop(RefCntr).
+
+
+refresh_validate_doc_funs(Db0) ->
+    Db = Db0#db{user_ctx = #user_ctx{roles=[<<"_admin">>]}},
+    DesignDocs = couch_db:get_design_docs(Db),
+    ProcessDocFuns = lists:flatmap(
+        fun(DesignDocInfo) ->
+            {ok, DesignDoc} = couch_db:open_doc_int(
+                Db, DesignDocInfo, [ejson_body]),
+            case couch_doc:get_validate_doc_fun(DesignDoc) of
+            nil -> [];
+            Fun -> [Fun]
+            end
+        end, DesignDocs),
+    Db0#db{validate_doc_funs=ProcessDocFuns}.
+
+% rev tree functions
+
+flush_trees(_Db, [], AccFlushedTrees) ->
+    {ok, lists:reverse(AccFlushedTrees)};
+flush_trees(#db{updater_fd = Fd} = Db,
+        [InfoUnflushed | RestUnflushed], AccFlushed) ->
+    #full_doc_info{update_seq=UpdateSeq, rev_tree=Unflushed} = InfoUnflushed,
+    {Flushed, LeafsSize} = couch_key_tree:mapfold(
+        fun(_Rev, Value, Type, Acc) ->
+            case Value of
+            #doc{deleted = IsDeleted, body = {summary, Summary, AttsFd}} ->
+                % this node value is actually an unwritten document summary,
+                % write to disk.
+                % make sure the Fd in the written bins is the same Fd we are
+                % and convert bins, removing the FD.
+                % All bins should have been written to disk already.
+                case {AttsFd, Fd} of
+                {nil, _} ->
+                    ok;
+                {SameFd, SameFd} ->
+                    ok;
+                _ ->
+                    % Fd where the attachments were written to is not the same
+                    % as our Fd. This can happen when a database is being
+                    % switched out during a compaction.
+                    ?LOG_DEBUG("File where the attachments are written has"
+                            " changed. Possibly retrying.", []),
+                    throw(retry)
+                end,
+                {ok, NewSummaryPointer, SummarySize} =
+                    couch_file:append_raw_chunk(Fd, Summary),
+                TotalSize = lists:foldl(
+                    fun(#att{att_len = L}, A) -> A + L end,
+                    SummarySize, Value#doc.atts),
+                NewValue = {IsDeleted, NewSummaryPointer, UpdateSeq, TotalSize},
+                case Type of
+                leaf ->
+                    {NewValue, Acc + TotalSize};
+                branch ->
+                    {NewValue, Acc}
+                end;
+             {_, _, _, LeafSize} when Type =:= leaf, LeafSize =/= nil ->
+                {Value, Acc + LeafSize};
+             _ ->
+                {Value, Acc}
+            end
+        end, 0, Unflushed),
+    InfoFlushed = InfoUnflushed#full_doc_info{
+        rev_tree = Flushed,
+        leafs_size = LeafsSize
+    },
+    flush_trees(Db, RestUnflushed, [InfoFlushed | AccFlushed]).
+
+
+send_result(Client, Ref, NewResult) ->
+    % used to send a result to the client
+    catch(Client ! {result, self(), {Ref, NewResult}}).
+
+merge_rev_trees(_Limit, _Merge, [], [], AccNewInfos, AccRemoveSeqs, AccSeq) ->
+    {ok, lists:reverse(AccNewInfos), AccRemoveSeqs, AccSeq};
+merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList],
+        [OldDocInfo|RestOldInfo], AccNewInfos, AccRemoveSeqs, AccSeq) ->
+    #full_doc_info{id=Id,rev_tree=OldTree,deleted=OldDeleted0,update_seq=OldSeq}
+            = OldDocInfo,
+    {NewRevTree, _} = lists:foldl(
+        fun({Client, {#doc{revs={Pos,[_Rev|PrevRevs]}}=NewDoc, Ref}}, {AccTree, OldDeleted}) ->
+            if not MergeConflicts ->
+                case couch_key_tree:merge(AccTree, couch_doc:to_path(NewDoc),
+                    Limit) of
+                {_NewTree, conflicts} when (not OldDeleted) ->
+                    send_result(Client, Ref, conflict),
+                    {AccTree, OldDeleted};
+                {NewTree, conflicts} when PrevRevs /= [] ->
+                    % Check to be sure if prev revision was specified, it's
+                    % a leaf node in the tree
+                    Leafs = couch_key_tree:get_all_leafs(AccTree),
+                    IsPrevLeaf = lists:any(fun({_, {LeafPos, [LeafRevId|_]}}) ->
+                            {LeafPos, LeafRevId} == {Pos-1, hd(PrevRevs)}
+                        end, Leafs),
+                    if IsPrevLeaf ->
+                        {NewTree, OldDeleted};
+                    true ->
+                        send_result(Client, Ref, conflict),
+                        {AccTree, OldDeleted}
+                    end;
+                {NewTree, no_conflicts} when  AccTree == NewTree ->
+                    % the tree didn't change at all
+                    % meaning we are saving a rev that's already
+                    % been editted again.
+                    if (Pos == 1) and OldDeleted ->
+                        % this means we are recreating a brand new document
+                        % into a state that already existed before.
+                        % put the rev into a subsequent edit of the deletion
+                        #doc_info{revs=[#rev_info{rev={OldPos,OldRev}}|_]} =
+                                couch_doc:to_doc_info(OldDocInfo),
+                        NewRevId = couch_db:new_revid(
+                                NewDoc#doc{revs={OldPos, [OldRev]}}),
+                        NewDoc2 = NewDoc#doc{revs={OldPos + 1, [NewRevId, OldRev]}},
+                        {NewTree2, _} = couch_key_tree:merge(AccTree,
+                                couch_doc:to_path(NewDoc2), Limit),
+                        % we changed the rev id, this tells the caller we did
+                        send_result(Client, Ref, {ok, {OldPos + 1, NewRevId}}),
+                        {NewTree2, OldDeleted};
+                    true ->
+                        send_result(Client, Ref, conflict),
+                        {AccTree, OldDeleted}
+                    end;
+                {NewTree, _} ->
+                    {NewTree, NewDoc#doc.deleted}
+                end;
+            true ->
+                {NewTree, _} = couch_key_tree:merge(AccTree,
+                            couch_doc:to_path(NewDoc), Limit),
+                {NewTree, OldDeleted}
+            end
+        end,
+        {OldTree, OldDeleted0}, NewDocs),
+    if NewRevTree == OldTree ->
+        % nothing changed
+        merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
+            AccNewInfos, AccRemoveSeqs, AccSeq);
+    true ->
+        % we have updated the document, give it a new seq #
+        NewInfo = #full_doc_info{id=Id,update_seq=AccSeq+1,rev_tree=NewRevTree},
+        RemoveSeqs = case OldSeq of
+            0 -> AccRemoveSeqs;
+            _ -> [OldSeq | AccRemoveSeqs]
+        end,
+        merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
+            [NewInfo|AccNewInfos], RemoveSeqs, AccSeq+1)
+    end.
+
+
+
+new_index_entries([], AccById, AccBySeq, AccDDocIds) ->
+    {AccById, AccBySeq, AccDDocIds};
+new_index_entries([FullDocInfo|RestInfos], AccById, AccBySeq, AccDDocIds) ->
+    #doc_info{revs=[#rev_info{deleted=Deleted}|_], id=Id} = DocInfo =
+            couch_doc:to_doc_info(FullDocInfo),
+    AccDDocIds2 = case Id of
+    <<?DESIGN_DOC_PREFIX, _/binary>> ->
+        [Id | AccDDocIds];
+    _ ->
+        AccDDocIds
+    end,
+    new_index_entries(RestInfos,
+        [FullDocInfo#full_doc_info{deleted=Deleted}|AccById],
+        [DocInfo|AccBySeq],
+        AccDDocIds2).
+
+
+stem_full_doc_infos(#db{revs_limit=Limit}, DocInfos) ->
+    [Info#full_doc_info{rev_tree=couch_key_tree:stem(Tree, Limit)} ||
+            #full_doc_info{rev_tree=Tree}=Info <- DocInfos].
+
+update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) ->
+    #db{
+        fulldocinfo_by_id_btree = DocInfoByIdBTree,
+        docinfo_by_seq_btree = DocInfoBySeqBTree,
+        update_seq = LastSeq,
+        revs_limit = RevsLimit
+        } = Db,
+    Ids = [Id || [{_Client, {#doc{id=Id}, _Ref}}|_] <- DocsList],
+    % lookup up the old documents, if they exist.
+    OldDocLookups = couch_btree:lookup(DocInfoByIdBTree, Ids),
+    OldDocInfos = lists:zipwith(
+        fun(_Id, {ok, FullDocInfo}) ->
+            FullDocInfo;
+        (Id, not_found) ->
+            #full_doc_info{id=Id}
+        end,
+        Ids, OldDocLookups),
+    % Merge the new docs into the revision trees.
+    {ok, NewFullDocInfos, RemoveSeqs, NewSeq} = merge_rev_trees(RevsLimit,
+            MergeConflicts, DocsList, OldDocInfos, [], [], LastSeq),
+
+    % All documents are now ready to write.
+
+    {ok, Db2}  = update_local_docs(Db, NonRepDocs),
+
+    % Write out the document summaries (the bodies are stored in the nodes of
+    % the trees, the attachments are already written to disk)
+    {ok, FlushedFullDocInfos} = flush_trees(Db2, NewFullDocInfos, []),
+
+    {IndexFullDocInfos, IndexDocInfos, UpdatedDDocIds} =
+            new_index_entries(FlushedFullDocInfos, [], [], []),
+
+    % and the indexes
+    {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree, IndexFullDocInfos, []),
+    {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree, IndexDocInfos, RemoveSeqs),
+
+    Db3 = Db2#db{
+        fulldocinfo_by_id_btree = DocInfoByIdBTree2,
+        docinfo_by_seq_btree = DocInfoBySeqBTree2,
+        update_seq = NewSeq},
+
+    % Check if we just updated any design documents, and update the validation
+    % funs if we did.
+    Db4 = case UpdatedDDocIds of
+    [] ->
+        Db3;
+    _ ->
+        refresh_validate_doc_funs(Db3)
+    end,
+
+    {ok, commit_data(Db4, not FullCommit), UpdatedDDocIds}.
+
+update_local_docs(Db, []) ->
+    {ok, Db};
+update_local_docs(#db{local_docs_btree=Btree}=Db, Docs) ->
+    Ids = [Id || {_Client, {#doc{id=Id}, _Ref}} <- Docs],
+    OldDocLookups = couch_btree:lookup(Btree, Ids),
+    BtreeEntries = lists:zipwith(
+        fun({Client, {#doc{id=Id,deleted=Delete,revs={0,PrevRevs},body=Body}, Ref}}, OldDocLookup) ->
+            case PrevRevs of
+            [RevStr|_] ->
+                PrevRev = list_to_integer(?b2l(RevStr));
+            [] ->
+                PrevRev = 0
+            end,
+            OldRev =
+            case OldDocLookup of
+                {ok, {_, {OldRev0, _}}} -> OldRev0;
+                not_found -> 0
+            end,
+            case OldRev == PrevRev of
+            true ->
+                case Delete of
+                    false ->
+                        send_result(Client, Ref, {ok,
+                                {0, ?l2b(integer_to_list(PrevRev + 1))}}),
+                        {update, {Id, {PrevRev + 1, Body}}};
+                    true  ->
+                        send_result(Client, Ref,
+                                {ok, {0, <<"0">>}}),
+                        {remove, Id}
+                end;
+            false ->
+                send_result(Client, Ref, conflict),
+                ignore
+            end
+        end, Docs, OldDocLookups),
+
+    BtreeIdsRemove = [Id || {remove, Id} <- BtreeEntries],
+    BtreeIdsUpdate = [{Key, Val} || {update, {Key, Val}} <- BtreeEntries],
+
+    {ok, Btree2} =
+        couch_btree:add_remove(Btree, BtreeIdsUpdate, BtreeIdsRemove),
+
+    {ok, Db#db{local_docs_btree = Btree2}}.
+
+
+commit_data(Db) ->
+    commit_data(Db, false).
+
+db_to_header(Db, Header) ->
+    Header#db_header{
+        update_seq = Db#db.update_seq,
+        docinfo_by_seq_btree_state = couch_btree:get_state(Db#db.docinfo_by_seq_btree),
+        fulldocinfo_by_id_btree_state = couch_btree:get_state(Db#db.fulldocinfo_by_id_btree),
+        local_docs_btree_state = couch_btree:get_state(Db#db.local_docs_btree),
+        security_ptr = Db#db.security_ptr,
+        revs_limit = Db#db.revs_limit}.
+
+commit_data(#db{waiting_delayed_commit=nil} = Db, true) ->
+    Db#db{waiting_delayed_commit=erlang:send_after(1000,self(),delayed_commit)};
+commit_data(Db, true) ->
+    Db;
+commit_data(Db, _) ->
+    #db{
+        updater_fd = Fd,
+        filepath = Filepath,
+        header = OldHeader,
+        fsync_options = FsyncOptions,
+        waiting_delayed_commit = Timer
+    } = Db,
+    if is_reference(Timer) -> erlang:cancel_timer(Timer); true -> ok end,
+    case db_to_header(Db, OldHeader) of
+    OldHeader ->
+        Db#db{waiting_delayed_commit=nil};
+    Header ->
+        case lists:member(before_header, FsyncOptions) of
+        true -> ok = couch_file:sync(Filepath);
+        _    -> ok
+        end,
+
+        ok = couch_file:write_header(Fd, Header),
+
+        case lists:member(after_header, FsyncOptions) of
+        true -> ok = couch_file:sync(Filepath);
+        _    -> ok
+        end,
+
+        Db#db{waiting_delayed_commit=nil,
+            header=Header,
+            committed_update_seq=Db#db.update_seq}
+    end.
+
+
+copy_doc_attachments(#db{updater_fd = SrcFd} = SrcDb, SrcSp, DestFd) ->
+    {ok, {BodyData, BinInfos0}} = couch_db:read_doc(SrcDb, SrcSp),
+    BinInfos = case BinInfos0 of
+    _ when is_binary(BinInfos0) ->
+        couch_compress:decompress(BinInfos0);
+    _ when is_list(BinInfos0) ->
+        % pre 1.2 file format
+        BinInfos0
+    end,
+    % copy the bin values
+    NewBinInfos = lists:map(
+        fun({Name, Type, BinSp, AttLen, RevPos, Md5}) ->
+            % 010 UPGRADE CODE
+            {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} =
+                couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
+            {Name, Type, NewBinSp, AttLen, AttLen, RevPos, Md5, identity};
+        ({Name, Type, BinSp, AttLen, DiskLen, RevPos, Md5, Enc1}) ->
+            {NewBinSp, AttLen, _, Md5, _IdentityMd5} =
+                couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
+            Enc = case Enc1 of
+            true ->
+                % 0110 UPGRADE CODE
+                gzip;
+            false ->
+                % 0110 UPGRADE CODE
+                identity;
+            _ ->
+                Enc1
+            end,
+            {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, Md5, Enc}
+        end, BinInfos),
+    {BodyData, NewBinInfos}.
+
+copy_docs(Db, #db{updater_fd = DestFd} = NewDb, InfoBySeq0, Retry) ->
+    % COUCHDB-968, make sure we prune duplicates during compaction
+    InfoBySeq = lists:usort(fun(#doc_info{id=A}, #doc_info{id=B}) -> A =< B end,
+        InfoBySeq0),
+    Ids = [Id || #doc_info{id=Id} <- InfoBySeq],
+    LookupResults = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids),
+
+    NewFullDocInfos1 = lists:map(
+        fun({ok, #full_doc_info{rev_tree=RevTree}=Info}) ->
+            Info#full_doc_info{rev_tree=couch_key_tree:map(
+                fun(_, _, branch) ->
+                    ?REV_MISSING;
+                (_Rev, LeafVal, leaf) ->
+                    IsDel = element(1, LeafVal),
+                    Sp = element(2, LeafVal),
+                    Seq = element(3, LeafVal),
+                    {_Body, AttsInfo} = Summary = copy_doc_attachments(
+                        Db, Sp, DestFd),
+                    SummaryChunk = make_doc_summary(NewDb, Summary),
+                    {ok, Pos, SummarySize} = couch_file:append_raw_chunk(
+                        DestFd, SummaryChunk),
+                    TotalLeafSize = lists:foldl(
+                        fun({_, _, _, AttLen, _, _, _, _}, S) -> S + AttLen end,
+                        SummarySize, AttsInfo),
+                    {IsDel, Pos, Seq, TotalLeafSize}
+                end, RevTree)}
+        end, LookupResults),
+
+    NewFullDocInfos = stem_full_doc_infos(Db, NewFullDocInfos1),
+    NewDocInfos = [couch_doc:to_doc_info(Info) || Info <- NewFullDocInfos],
+    RemoveSeqs =
+    case Retry of
+    false ->
+        [];
+    true ->
+        % We are retrying a compaction, meaning the documents we are copying may
+        % already exist in our file and must be removed from the by_seq index.
+        Existing = couch_btree:lookup(NewDb#db.fulldocinfo_by_id_btree, Ids),
+        [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing]
+    end,
+
+    {ok, DocInfoBTree} = couch_btree:add_remove(
+            NewDb#db.docinfo_by_seq_btree, NewDocInfos, RemoveSeqs),
+    {ok, FullDocInfoBTree} = couch_btree:add_remove(
+            NewDb#db.fulldocinfo_by_id_btree, NewFullDocInfos, []),
+    update_compact_task(length(NewFullDocInfos)),
+    NewDb#db{ fulldocinfo_by_id_btree=FullDocInfoBTree,
+              docinfo_by_seq_btree=DocInfoBTree}.
+
+
+
+copy_compact(Db, NewDb0, Retry) ->
+    FsyncOptions = [Op || Op <- NewDb0#db.fsync_options, Op == before_header],
+    Compression = couch_compress:get_compression_method(),
+    NewDb = NewDb0#db{fsync_options=FsyncOptions, compression=Compression},
+    TotalChanges = couch_db:count_changes_since(Db, NewDb#db.update_seq),
+    BufferSize = list_to_integer(
+        couch_config:get("database_compaction", "doc_buffer_size", "524288")),
+    CheckpointAfter = couch_util:to_integer(
+        couch_config:get("database_compaction", "checkpoint_after",
+            BufferSize * 10)),
+
+    EnumBySeqFun =
+    fun(#doc_info{high_seq=Seq}=DocInfo, _Offset,
+        {AccNewDb, AccUncopied, AccUncopiedSize, AccCopiedSize}) ->
+
+        AccUncopiedSize2 = AccUncopiedSize + ?term_size(DocInfo),
+        if AccUncopiedSize2 >= BufferSize ->
+            NewDb2 = copy_docs(
+                Db, AccNewDb, lists:reverse([DocInfo | AccUncopied]), Retry),
+            AccCopiedSize2 = AccCopiedSize + AccUncopiedSize2,
+            if AccCopiedSize2 >= CheckpointAfter ->
+                {ok, {commit_data(NewDb2#db{update_seq = Seq}), [], 0, 0}};
+            true ->
+                {ok, {NewDb2#db{update_seq = Seq}, [], 0, AccCopiedSize2}}
+            end;
+        true ->
+            {ok, {AccNewDb, [DocInfo | AccUncopied], AccUncopiedSize2,
+                AccCopiedSize}}
+        end
+    end,
+
+    TaskProps0 = [
+        {type, database_compaction},
+        {database, Db#db.name},
+        {progress, 0},
+        {changes_done, 0},
+        {total_changes, TotalChanges}
+    ],
+    case Retry and couch_task_status:is_task_added() of
+    true ->
+        couch_task_status:update([
+            {retry, true},
+            {progress, 0},
+            {changes_done, 0},
+            {total_changes, TotalChanges}
+        ]);
+    false ->
+        couch_task_status:add_task(TaskProps0),
+        couch_task_status:set_update_frequency(500)
+    end,
+
+    {ok, _, {NewDb2, Uncopied, _, _}} =
+        couch_btree:foldl(Db#db.docinfo_by_seq_btree, EnumBySeqFun,
+            {NewDb, [], 0, 0},
+            [{start_key, NewDb#db.update_seq + 1}]),
+
+    NewDb3 = copy_docs(Db, NewDb2, lists:reverse(Uncopied), Retry),
+
+    % copy misc header values
+    if NewDb3#db.security /= Db#db.security ->
+        {ok, Ptr, _} = couch_file:append_term(
+            NewDb3#db.updater_fd, Db#db.security,
+            [{compression, NewDb3#db.compression}]),
+        NewDb4 = NewDb3#db{security=Db#db.security, security_ptr=Ptr};
+    true ->
+        NewDb4 = NewDb3
+    end,
+
+    commit_data(NewDb4#db{update_seq=Db#db.update_seq}).
+
+start_copy_compact(#db{name=Name,filepath=Filepath,header=#db_header{purge_seq=PurgeSeq}}=Db) ->
+    CompactFile = Filepath ++ ".compact",
+    ?LOG_DEBUG("Compaction process spawned for db \"~s\"", [Name]),
+    case couch_file:open(CompactFile, [nologifmissing]) of
+    {ok, Fd} ->
+        Retry = true,
+        case couch_file:read_header(Fd) of
+        {ok, Header} ->
+            ok;
+        no_valid_header ->
+            ok = couch_file:write_header(Fd, Header=#db_header{})
+        end;
+    {error, enoent} ->
+        {ok, Fd} = couch_file:open(CompactFile, [create]),
+        Retry = false,
+        ok = couch_file:write_header(Fd, Header=#db_header{})
+    end,
+    ReaderFd = open_reader_fd(CompactFile, Db#db.options),
+    NewDb = init_db(Name, CompactFile, Fd, ReaderFd, Header, Db#db.options),
+    NewDb2 = if PurgeSeq > 0 ->
+        {ok, PurgedIdsRevs} = couch_db:get_last_purged(Db),
+        {ok, Pointer, _} = couch_file:append_term(
+            Fd, PurgedIdsRevs, [{compression, NewDb#db.compression}]),
+        NewDb#db{header=Header#db_header{purge_seq=PurgeSeq, purged_docs=Pointer}};
+    true ->
+        NewDb
+    end,
+    unlink(Fd),
+
+    NewDb3 = copy_compact(Db, NewDb2, Retry),
+    close_db(NewDb3),
+    case gen_server:call(
+        Db#db.update_pid, {compact_done, CompactFile}, infinity) of
+    ok ->
+        ok;
+    {retry, CurrentDb} ->
+        start_copy_compact(CurrentDb)
+    end.
+
+update_compact_task(NumChanges) ->
+    [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
+    Changes2 = Changes + NumChanges,
+    Progress = case Total of
+    0 ->
+        0;
+    _ ->
+        (Changes2 * 100) div Total
+    end,
+    couch_task_status:update([{changes_done, Changes2}, {progress, Progress}]).
+
+make_doc_summary(#db{compression = Comp}, {Body0, Atts0}) ->
+    Body = case couch_compress:is_compressed(Body0, Comp) of
+    true ->
+        Body0;
+    false ->
+        % pre 1.2 database file format
+        couch_compress:compress(Body0, Comp)
+    end,
+    Atts = case couch_compress:is_compressed(Atts0, Comp) of
+    true ->
+        Atts0;
+    false ->
+        couch_compress:compress(Atts0, Comp)
+    end,
+    SummaryBin = ?term_to_bin({Body, Atts}),
+    couch_file:assemble_file_chunk(SummaryBin, couch_util:md5(SummaryBin)).

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_doc.erl
----------------------------------------------------------------------
diff --git a/src/couch_doc.erl b/src/couch_doc.erl
new file mode 100644
index 0000000..4047370
--- /dev/null
+++ b/src/couch_doc.erl
@@ -0,0 +1,650 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_doc).
+
+-export([to_doc_info/1,to_doc_info_path/1,parse_rev/1,parse_revs/1,rev_to_str/1,revs_to_strs/1]).
+-export([att_foldl/3,range_att_foldl/5,att_foldl_decode/3,get_validate_doc_fun/1]).
+-export([from_json_obj/1,to_json_obj/2,has_stubs/1, merge_stubs/2]).
+-export([validate_docid/1]).
+-export([doc_from_multi_part_stream/2]).
+-export([doc_to_multi_part_stream/5, len_doc_to_multi_part_stream/4]).
+-export([abort_multi_part_stream/1]).
+-export([to_path/1]).
+-export([mp_parse_doc/2]).
+-export([with_ejson_body/1]).
+
+-include("couch_db.hrl").
+
+-spec to_path(#doc{}) -> path().
+to_path(#doc{revs={Start, RevIds}}=Doc) ->
+    [Branch] = to_branch(Doc, lists:reverse(RevIds)),
+    {Start - length(RevIds) + 1, Branch}.
+
+-spec to_branch(#doc{}, [RevId::binary()]) -> [branch()].
+to_branch(Doc, [RevId]) ->
+    [{RevId, Doc, []}];
+to_branch(Doc, [RevId | Rest]) ->
+    [{RevId, ?REV_MISSING, to_branch(Doc, Rest)}].
+
+% helpers used by to_json_obj
+to_json_rev(0, []) ->
+    [];
+to_json_rev(Start, [FirstRevId|_]) ->
+    [{<<"_rev">>, ?l2b([integer_to_list(Start),"-",revid_to_str(FirstRevId)])}].
+
+to_json_body(true, {Body}) ->
+    Body ++ [{<<"_deleted">>, true}];
+to_json_body(false, {Body}) ->
+    Body.
+
+to_json_revisions(Options, Start, RevIds) ->
+    case lists:member(revs, Options) of
+    false -> [];
+    true ->
+        [{<<"_revisions">>, {[{<<"start">>, Start},
+                {<<"ids">>, [revid_to_str(R) ||R <- RevIds]}]}}]
+    end.
+
+revid_to_str(RevId) when size(RevId) =:= 16 ->
+    ?l2b(couch_util:to_hex(RevId));
+revid_to_str(RevId) ->
+    RevId.
+
+rev_to_str({Pos, RevId}) ->
+    ?l2b([integer_to_list(Pos),"-",revid_to_str(RevId)]).
+
+
+revs_to_strs([]) ->
+    [];
+revs_to_strs([{Pos, RevId}| Rest]) ->
+    [rev_to_str({Pos, RevId}) | revs_to_strs(Rest)].
+
+to_json_meta(Meta) ->
+    lists:map(
+        fun({revs_info, Start, RevsInfo}) ->
+            {JsonRevsInfo, _Pos}  = lists:mapfoldl(
+                fun({RevId, Status}, PosAcc) ->
+                    JsonObj = {[{<<"rev">>, rev_to_str({PosAcc, RevId})},
+                        {<<"status">>, ?l2b(atom_to_list(Status))}]},
+                    {JsonObj, PosAcc - 1}
+                end, Start, RevsInfo),
+            {<<"_revs_info">>, JsonRevsInfo};
+        ({local_seq, Seq}) ->
+            {<<"_local_seq">>, Seq};
+        ({conflicts, Conflicts}) ->
+            {<<"_conflicts">>, revs_to_strs(Conflicts)};
+        ({deleted_conflicts, DConflicts}) ->
+            {<<"_deleted_conflicts">>, revs_to_strs(DConflicts)}
+        end, Meta).
+
+to_json_attachments(Attachments, Options) ->
+    to_json_attachments(
+        Attachments,
+        lists:member(attachments, Options),
+        lists:member(follows, Options),
+        lists:member(att_encoding_info, Options)
+    ).
+
+to_json_attachments([], _OutputData, _DataToFollow, _ShowEncInfo) ->
+    [];
+to_json_attachments(Atts, OutputData, DataToFollow, ShowEncInfo) ->
+    AttProps = lists:map(
+        fun(#att{disk_len=DiskLen, att_len=AttLen, encoding=Enc}=Att) ->
+            {Att#att.name, {[
+                {<<"content_type">>, Att#att.type},
+                {<<"revpos">>, Att#att.revpos}] ++
+                case Att#att.md5 of
+                    <<>> ->
+                        [];
+                    Md5 ->
+                        EncodedMd5 = base64:encode(Md5),
+                        [{<<"digest">>, <<"md5-",EncodedMd5/binary>>}]
+                end ++
+                if not OutputData orelse Att#att.data == stub ->
+                    [{<<"length">>, DiskLen}, {<<"stub">>, true}];
+                true ->
+                    if DataToFollow ->
+                        [{<<"length">>, DiskLen}, {<<"follows">>, true}];
+                    true ->
+                        AttData = case Enc of
+                        gzip ->
+                            zlib:gunzip(att_to_bin(Att));
+                        identity ->
+                            att_to_bin(Att)
+                        end,
+                        [{<<"data">>, base64:encode(AttData)}]
+                    end
+                end ++
+                    case {ShowEncInfo, Enc} of
+                    {false, _} ->
+                        [];
+                    {true, identity} ->
+                        [];
+                    {true, _} ->
+                        [
+                            {<<"encoding">>, couch_util:to_binary(Enc)},
+                            {<<"encoded_length">>, AttLen}
+                        ]
+                    end
+            }}
+        end, Atts),
+    [{<<"_attachments">>, {AttProps}}].
+
+to_json_obj(Doc, Options) ->
+    doc_to_json_obj(with_ejson_body(Doc), Options).
+
+doc_to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds},
+            meta=Meta}=Doc,Options)->
+    {[{<<"_id">>, Id}]
+        ++ to_json_rev(Start, RevIds)
+        ++ to_json_body(Del, Body)
+        ++ to_json_revisions(Options, Start, RevIds)
+        ++ to_json_meta(Meta)
+        ++ to_json_attachments(Doc#doc.atts, Options)
+    }.
+
+from_json_obj({Props}) ->
+    transfer_fields(Props, #doc{body=[]});
+
+from_json_obj(_Other) ->
+    throw({bad_request, "Document must be a JSON object"}).
+
+parse_revid(RevId) when size(RevId) =:= 32 ->
+    RevInt = erlang:list_to_integer(?b2l(RevId), 16),
+     <<RevInt:128>>;
+parse_revid(RevId) when length(RevId) =:= 32 ->
+    RevInt = erlang:list_to_integer(RevId, 16),
+     <<RevInt:128>>;
+parse_revid(RevId) when is_binary(RevId) ->
+    RevId;
+parse_revid(RevId) when is_list(RevId) ->
+    ?l2b(RevId).
+
+
+parse_rev(Rev) when is_binary(Rev) ->
+    parse_rev(?b2l(Rev));
+parse_rev(Rev) when is_list(Rev) ->
+    SplitRev = lists:splitwith(fun($-) -> false; (_) -> true end, Rev),
+    case SplitRev of
+        {Pos, [$- | RevId]} -> {list_to_integer(Pos), parse_revid(RevId)};
+        _Else -> throw({bad_request, <<"Invalid rev format">>})
+    end;
+parse_rev(_BadRev) ->
+    throw({bad_request, <<"Invalid rev format">>}).
+
+parse_revs([]) ->
+    [];
+parse_revs([Rev | Rest]) ->
+    [parse_rev(Rev) | parse_revs(Rest)].
+
+
+validate_docid(<<"">>) ->
+    throw({bad_request, <<"Document id must not be empty">>});
+validate_docid(Id) when is_binary(Id) ->
+    case couch_util:validate_utf8(Id) of
+        false -> throw({bad_request, <<"Document id must be valid UTF-8">>});
+        true -> ok
+    end,
+    case Id of
+    <<"_design/", _/binary>> -> ok;
+    <<"_local/", _/binary>> -> ok;
+    <<"_", _/binary>> ->
+        throw({bad_request, <<"Only reserved document ids may start with underscore.">>});
+    _Else -> ok
+    end;
+validate_docid(Id) ->
+    ?LOG_DEBUG("Document id is not a string: ~p", [Id]),
+    throw({bad_request, <<"Document id must be a string">>}).
+
+transfer_fields([], #doc{body=Fields}=Doc) ->
+    % convert fields back to json object
+    Doc#doc{body={lists:reverse(Fields)}};
+
+transfer_fields([{<<"_id">>, Id} | Rest], Doc) ->
+    validate_docid(Id),
+    transfer_fields(Rest, Doc#doc{id=Id});
+
+transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc) ->
+    {Pos, RevId} = parse_rev(Rev),
+    transfer_fields(Rest,
+            Doc#doc{revs={Pos, [RevId]}});
+
+transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc) ->
+    % we already got the rev from the _revisions
+    transfer_fields(Rest,Doc);
+
+transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) ->
+    Atts = lists:map(fun({Name, {BinProps}}) ->
+        Md5 = case couch_util:get_value(<<"digest">>, BinProps) of
+            <<"md5-",EncodedMd5/binary>> ->
+                base64:decode(EncodedMd5);
+            _ ->
+               <<>>
+        end,
+        case couch_util:get_value(<<"stub">>, BinProps) of
+        true ->
+            Type = couch_util:get_value(<<"content_type">>, BinProps),
+            RevPos = couch_util:get_value(<<"revpos">>, BinProps, nil),
+            DiskLen = couch_util:get_value(<<"length">>, BinProps),
+            {Enc, EncLen} = att_encoding_info(BinProps),
+            #att{name=Name, data=stub, type=Type, att_len=EncLen,
+                disk_len=DiskLen, encoding=Enc, revpos=RevPos, md5=Md5};
+        _ ->
+            Type = couch_util:get_value(<<"content_type">>, BinProps,
+                    ?DEFAULT_ATTACHMENT_CONTENT_TYPE),
+            RevPos = couch_util:get_value(<<"revpos">>, BinProps, 0),
+            case couch_util:get_value(<<"follows">>, BinProps) of
+            true ->
+                DiskLen = couch_util:get_value(<<"length">>, BinProps),
+                {Enc, EncLen} = att_encoding_info(BinProps),
+                #att{name=Name, data=follows, type=Type, encoding=Enc,
+                    att_len=EncLen, disk_len=DiskLen, revpos=RevPos, md5=Md5};
+            _ ->
+                Value = couch_util:get_value(<<"data">>, BinProps),
+                Bin = base64:decode(Value),
+                LenBin = size(Bin),
+                #att{name=Name, data=Bin, type=Type, att_len=LenBin,
+                        disk_len=LenBin, revpos=RevPos}
+            end
+        end
+    end, JsonBins),
+    transfer_fields(Rest, Doc#doc{atts=Atts});
+
+transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) ->
+    RevIds = couch_util:get_value(<<"ids">>, Props),
+    Start = couch_util:get_value(<<"start">>, Props),
+    if not is_integer(Start) ->
+        throw({doc_validation, "_revisions.start isn't an integer."});
+    not is_list(RevIds) ->
+        throw({doc_validation, "_revisions.ids isn't a array."});
+    true ->
+        ok
+    end,
+    [throw({doc_validation, "RevId isn't a string"}) ||
+            RevId <- RevIds, not is_binary(RevId)],
+    RevIds2 = [parse_revid(RevId) || RevId <- RevIds],
+    transfer_fields(Rest, Doc#doc{revs={Start, RevIds2}});
+
+transfer_fields([{<<"_deleted">>, B} | Rest], Doc) when is_boolean(B) ->
+    transfer_fields(Rest, Doc#doc{deleted=B});
+
+% ignored fields
+transfer_fields([{<<"_revs_info">>, _} | Rest], Doc) ->
+    transfer_fields(Rest, Doc);
+transfer_fields([{<<"_local_seq">>, _} | Rest], Doc) ->
+    transfer_fields(Rest, Doc);
+transfer_fields([{<<"_conflicts">>, _} | Rest], Doc) ->
+    transfer_fields(Rest, Doc);
+transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc) ->
+    transfer_fields(Rest, Doc);
+
+% special fields for replication documents
+transfer_fields([{<<"_replication_state">>, _} = Field | Rest],
+    #doc{body=Fields} = Doc) ->
+    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+transfer_fields([{<<"_replication_state_time">>, _} = Field | Rest],
+    #doc{body=Fields} = Doc) ->
+    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+transfer_fields([{<<"_replication_state_reason">>, _} = Field | Rest],
+    #doc{body=Fields} = Doc) ->
+    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+transfer_fields([{<<"_replication_id">>, _} = Field | Rest],
+    #doc{body=Fields} = Doc) ->
+    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+transfer_fields([{<<"_replication_stats">>, _} = Field | Rest],
+    #doc{body=Fields} = Doc) ->
+    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+
+% unknown special field
+transfer_fields([{<<"_",Name/binary>>, _} | _], _) ->
+    throw({doc_validation,
+            ?l2b(io_lib:format("Bad special document member: _~s", [Name]))});
+
+transfer_fields([Field | Rest], #doc{body=Fields}=Doc) ->
+    transfer_fields(Rest, Doc#doc{body=[Field|Fields]}).
+
+att_encoding_info(BinProps) ->
+    DiskLen = couch_util:get_value(<<"length">>, BinProps),
+    case couch_util:get_value(<<"encoding">>, BinProps) of
+    undefined ->
+        {identity, DiskLen};
+    Enc ->
+        EncodedLen = couch_util:get_value(<<"encoded_length">>, BinProps, DiskLen),
+        {list_to_existing_atom(?b2l(Enc)), EncodedLen}
+    end.
+
+to_doc_info(FullDocInfo) ->
+    {DocInfo, _Path} = to_doc_info_path(FullDocInfo),
+    DocInfo.
+
+max_seq(Tree, UpdateSeq) ->
+    FoldFun = fun({_Pos, _Key}, Value, _Type, MaxOldSeq) ->
+        case Value of
+            {_Deleted, _DiskPos, OldTreeSeq} ->
+                % Older versions didn't track data sizes.
+                erlang:max(MaxOldSeq, OldTreeSeq);
+            {_Deleted, _DiskPos, OldTreeSeq, _Size} ->
+                erlang:max(MaxOldSeq, OldTreeSeq);
+            _ ->
+                MaxOldSeq
+        end
+    end,
+    couch_key_tree:fold(FoldFun, UpdateSeq, Tree).
+
+to_doc_info_path(#full_doc_info{id=Id,rev_tree=Tree,update_seq=Seq}) ->
+    RevInfosAndPath = [
+        {#rev_info{
+            deleted = element(1, LeafVal),
+            body_sp = element(2, LeafVal),
+            seq = element(3, LeafVal),
+            rev = {Pos, RevId}
+        }, Path} || {LeafVal, {Pos, [RevId | _]} = Path} <-
+            couch_key_tree:get_all_leafs(Tree)
+    ],
+    SortedRevInfosAndPath = lists:sort(
+            fun({#rev_info{deleted=DeletedA,rev=RevA}, _PathA},
+                {#rev_info{deleted=DeletedB,rev=RevB}, _PathB}) ->
+            % sort descending by {not deleted, rev}
+            {not DeletedA, RevA} > {not DeletedB, RevB}
+        end, RevInfosAndPath),
+    [{_RevInfo, WinPath}|_] = SortedRevInfosAndPath,
+    RevInfos = [RevInfo || {RevInfo, _Path} <- SortedRevInfosAndPath],
+    {#doc_info{id=Id, high_seq=max_seq(Tree, Seq), revs=RevInfos}, WinPath}.
+
+
+
+
+att_foldl(#att{data=Bin}, Fun, Acc) when is_binary(Bin) ->
+    Fun(Bin, Acc);
+att_foldl(#att{data={Fd,Sp},md5=Md5}, Fun, Acc) ->
+    couch_stream:foldl(Fd, Sp, Md5, Fun, Acc);
+att_foldl(#att{data=DataFun,att_len=Len}, Fun, Acc) when is_function(DataFun) ->
+   fold_streamed_data(DataFun, Len, Fun, Acc).
+
+range_att_foldl(#att{data={Fd,Sp}}, From, To, Fun, Acc) ->
+   couch_stream:range_foldl(Fd, Sp, From, To, Fun, Acc).
+
+att_foldl_decode(#att{data={Fd,Sp},md5=Md5,encoding=Enc}, Fun, Acc) ->
+    couch_stream:foldl_decode(Fd, Sp, Md5, Enc, Fun, Acc);
+att_foldl_decode(#att{data=Fun2,att_len=Len, encoding=identity}, Fun, Acc) ->
+       fold_streamed_data(Fun2, Len, Fun, Acc).
+
+att_to_bin(#att{data=Bin}) when is_binary(Bin) ->
+    Bin;
+att_to_bin(#att{data=Iolist}) when is_list(Iolist) ->
+    iolist_to_binary(Iolist);
+att_to_bin(#att{data={_Fd,_Sp}}=Att) ->
+    iolist_to_binary(
+        lists:reverse(att_foldl(
+                Att,
+                fun(Bin,Acc) -> [Bin|Acc] end,
+                []
+        ))
+    );
+att_to_bin(#att{data=DataFun, att_len=Len}) when is_function(DataFun)->
+    iolist_to_binary(
+        lists:reverse(fold_streamed_data(
+            DataFun,
+            Len,
+            fun(Data, Acc) -> [Data | Acc] end,
+            []
+        ))
+    ).
+
+get_validate_doc_fun(#doc{body={Props}}=DDoc) ->
+    case couch_util:get_value(<<"validate_doc_update">>, Props) of
+    undefined ->
+        nil;
+    _Else ->
+        fun(EditDoc, DiskDoc, Ctx, SecObj) ->
+            couch_query_servers:validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj)
+        end
+    end.
+
+
+has_stubs(#doc{atts=Atts}) ->
+    has_stubs(Atts);
+has_stubs([]) ->
+    false;
+has_stubs([#att{data=stub}|_]) ->
+    true;
+has_stubs([_Att|Rest]) ->
+    has_stubs(Rest).
+
+merge_stubs(#doc{id = Id}, nil) ->
+    throw({missing_stub, <<"Previous revision missing for document ", Id/binary>>});
+merge_stubs(#doc{id=Id,atts=MemBins}=StubsDoc, #doc{atts=DiskBins}) ->
+    BinDict = dict:from_list([{Name, Att} || #att{name=Name}=Att <- DiskBins]),
+    MergedBins = lists:map(
+        fun(#att{name=Name, data=stub, revpos=StubRevPos}) ->
+            case dict:find(Name, BinDict) of
+            {ok, #att{revpos=DiskRevPos}=DiskAtt}
+                    when DiskRevPos == StubRevPos orelse StubRevPos == nil ->
+                DiskAtt;
+            _ ->
+                throw({missing_stub,
+                        <<"id:", Id/binary, ", name:", Name/binary>>})
+            end;
+        (Att) ->
+            Att
+        end, MemBins),
+    StubsDoc#doc{atts= MergedBins}.
+
+fold_streamed_data(_RcvFun, 0, _Fun, Acc) ->
+    Acc;
+fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0->
+    Bin = RcvFun(),
+    ResultAcc = Fun(Bin, Acc),
+    fold_streamed_data(RcvFun, LenLeft - size(Bin), Fun, ResultAcc).
+
+len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, SendEncodedAtts) ->
+    AttsSize = lists:foldl(fun(Att, AccAttsSize) ->
+            #att{
+                data=Data,
+                name=Name,
+                att_len=AttLen,
+                disk_len=DiskLen,
+                type=Type,
+                encoding=Encoding
+            } = Att,
+            case Data of
+            stub ->
+                AccAttsSize;
+            _ ->
+                AccAttsSize +
+                4 + % "\r\n\r\n"
+                case SendEncodedAtts of
+                true ->
+                    % header
+                    length(integer_to_list(AttLen)) +
+                    AttLen;
+                _ ->
+                    % header
+                    length(integer_to_list(DiskLen)) +
+                    DiskLen
+                end +
+                4 + % "\r\n--"
+                size(Boundary) +
+
+                % attachment headers
+                % (the length of the Content-Length has already been set)
+                size(Name) +
+                size(Type) +
+                length("\r\nContent-Disposition: attachment; filename=\"\"") +
+                length("\r\nContent-Type: ") +
+                length("\r\nContent-Length: ") +
+                case Encoding of
+                identity ->
+                    0;
+                 _ ->
+                    length(atom_to_list(Encoding)) +
+                    length("\r\nContent-Encoding: ")
+                end
+            end
+        end, 0, Atts),
+    if AttsSize == 0 ->
+        {<<"application/json">>, iolist_size(JsonBytes)};
+    true ->
+        {<<"multipart/related; boundary=\"", Boundary/binary, "\"">>,
+            2 + % "--"
+            size(Boundary) +
+            36 + % "\r\ncontent-type: application/json\r\n\r\n"
+            iolist_size(JsonBytes) +
+            4 + % "\r\n--"
+            size(Boundary) +
+            + AttsSize +
+            2 % "--"
+            }
+    end.
+
+doc_to_multi_part_stream(Boundary, JsonBytes, Atts, WriteFun,
+    SendEncodedAtts) ->
+    case lists:any(fun(#att{data=Data})-> Data /= stub end, Atts) of
+    true ->
+        WriteFun([<<"--", Boundary/binary,
+                "\r\nContent-Type: application/json\r\n\r\n">>,
+                JsonBytes, <<"\r\n--", Boundary/binary>>]),
+        atts_to_mp(Atts, Boundary, WriteFun, SendEncodedAtts);
+    false ->
+        WriteFun(JsonBytes)
+    end.
+
+atts_to_mp([], _Boundary, WriteFun, _SendEncAtts) ->
+    WriteFun(<<"--">>);
+atts_to_mp([#att{data=stub} | RestAtts], Boundary, WriteFun,
+        SendEncodedAtts) ->
+    atts_to_mp(RestAtts, Boundary, WriteFun, SendEncodedAtts);
+atts_to_mp([Att | RestAtts], Boundary, WriteFun,
+        SendEncodedAtts)  ->
+    #att{
+        name=Name,
+        att_len=AttLen,
+        disk_len=DiskLen,
+        type=Type,
+        encoding=Encoding
+    } = Att,
+
+    % write headers
+    LengthBin = case SendEncodedAtts of
+    true -> list_to_binary(integer_to_list(AttLen));
+    false -> list_to_binary(integer_to_list(DiskLen))
+    end,
+    WriteFun(<<"\r\nContent-Disposition: attachment; filename=\"", Name/binary, "\"">>),
+    WriteFun(<<"\r\nContent-Type: ", Type/binary>>),
+    WriteFun(<<"\r\nContent-Length: ", LengthBin/binary>>),
+    case Encoding of
+    identity ->
+        ok;
+    _ ->
+        EncodingBin = atom_to_binary(Encoding, latin1),
+        WriteFun(<<"\r\nContent-Encoding: ", EncodingBin/binary>>)
+    end,
+
+    % write data
+    WriteFun(<<"\r\n\r\n">>),
+    AttFun = case SendEncodedAtts of
+    false ->
+        fun att_foldl_decode/3;
+    true ->
+        fun att_foldl/3
+    end,
+    AttFun(Att, fun(Data, _) -> WriteFun(Data) end, ok),
+    WriteFun(<<"\r\n--", Boundary/binary>>),
+    atts_to_mp(RestAtts, Boundary, WriteFun, SendEncodedAtts).
+
+
+doc_from_multi_part_stream(ContentType, DataFun) ->
+    Parent = self(),
+    Parser = spawn_link(fun() ->
+        {<<"--",_/binary>>, _, _} = couch_httpd:parse_multipart_request(
+            ContentType, DataFun,
+            fun(Next) -> mp_parse_doc(Next, []) end),
+        unlink(Parent),
+        Parent ! {self(), finished}
+        end),
+    Ref = make_ref(),
+    Parser ! {get_doc_bytes, Ref, self()},
+    receive
+    {doc_bytes, Ref, DocBytes} ->
+        Doc = from_json_obj(?JSON_DECODE(DocBytes)),
+        % go through the attachments looking for 'follows' in the data,
+        % replace with function that reads the data from MIME stream.
+        ReadAttachmentDataFun = fun() ->
+            Parser ! {get_bytes, Ref, self()},
+            receive {bytes, Ref, Bytes} -> Bytes end
+        end,
+        Atts2 = lists:map(
+            fun(#att{data=follows}=A) ->
+                A#att{data=ReadAttachmentDataFun};
+            (A) ->
+                A
+            end, Doc#doc.atts),
+        WaitFun = fun() ->
+            receive {Parser, finished} -> ok end,
+            erlang:put(mochiweb_request_recv, true)
+        end,
+        {ok, Doc#doc{atts=Atts2}, WaitFun, Parser}
+    end.
+
+mp_parse_doc({headers, H}, []) ->
+    case couch_util:get_value("content-type", H) of
+    {"application/json", _} ->
+        fun (Next) ->
+            mp_parse_doc(Next, [])
+        end
+    end;
+mp_parse_doc({body, Bytes}, AccBytes) ->
+    fun (Next) ->
+        mp_parse_doc(Next, [Bytes | AccBytes])
+    end;
+mp_parse_doc(body_end, AccBytes) ->
+    receive {get_doc_bytes, Ref, From} ->
+        From ! {doc_bytes, Ref, lists:reverse(AccBytes)}
+    end,
+    fun mp_parse_atts/1.
+
+mp_parse_atts(eof) ->
+    ok;
+mp_parse_atts({headers, _H}) ->
+    fun mp_parse_atts/1;
+mp_parse_atts({body, Bytes}) ->
+    receive {get_bytes, Ref, From} ->
+        From ! {bytes, Ref, Bytes}
+    end,
+    fun mp_parse_atts/1;
+mp_parse_atts(body_end) ->
+    fun mp_parse_atts/1.
+
+
+abort_multi_part_stream(Parser) ->
+    abort_multi_part_stream(Parser, erlang:monitor(process, Parser)).
+
+abort_multi_part_stream(Parser, MonRef) ->
+    case is_process_alive(Parser) of
+    true ->
+        Parser ! {get_bytes, nil, self()},
+        receive
+        {bytes, nil, _Bytes} ->
+             abort_multi_part_stream(Parser, MonRef);
+        {'DOWN', MonRef, _, _, _} ->
+             ok
+        end;
+    false ->
+        erlang:demonitor(MonRef, [flush])
+    end.
+
+
+with_ejson_body(#doc{body = Body} = Doc) when is_binary(Body) ->
+    Doc#doc{body = couch_compress:decompress(Body)};
+with_ejson_body(#doc{body = {_}} = Doc) ->
+    Doc.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_ejson_compare.erl
----------------------------------------------------------------------
diff --git a/src/couch_ejson_compare.erl b/src/couch_ejson_compare.erl
new file mode 100644
index 0000000..f46ec35
--- /dev/null
+++ b/src/couch_ejson_compare.erl
@@ -0,0 +1,81 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_ejson_compare).
+
+-export([less/2, less_json_ids/2, less_json/2]).
+
+less_json_ids({JsonA, IdA}, {JsonB, IdB}) ->
+    case less(JsonA, JsonB) of
+    0 ->
+        IdA < IdB;
+    Result ->
+        Result < 0
+    end.
+
+less_json(A,B) ->
+    less(A, B) < 0.
+
+less(A,A)                                 -> 0;
+
+less(A,B) when is_atom(A), is_atom(B)     -> atom_sort(A) - atom_sort(B);
+less(A,_) when is_atom(A)                 -> -1;
+less(_,B) when is_atom(B)                 -> 1;
+
+less(A,B) when is_number(A), is_number(B) -> A - B;
+less(A,_) when is_number(A)               -> -1;
+less(_,B) when is_number(B)               -> 1;
+
+less(A,B) when is_binary(A), is_binary(B) -> couch_collate:collate(A,B);
+less(A,_) when is_binary(A)               -> -1;
+less(_,B) when is_binary(B)               -> 1;
+
+less(A,B) when is_list(A), is_list(B)     -> less_list(A,B);
+less(A,_) when is_list(A)                 -> -1;
+less(_,B) when is_list(B)                 -> 1;
+
+less({A},{B}) when is_list(A), is_list(B) -> less_props(A,B);
+less({A},_) when is_list(A)               -> -1;
+less(_,{B}) when is_list(B)               -> 1.
+
+atom_sort(null) -> 1;
+atom_sort(false) -> 2;
+atom_sort(true) -> 3.
+
+less_props([], [_|_]) ->
+    -1;
+less_props(_, []) ->
+    1;
+less_props([{AKey, AValue}|RestA], [{BKey, BValue}|RestB]) ->
+    case couch_collate:collate(AKey, BKey) of
+    0 ->
+        case less(AValue, BValue) of
+        0 ->
+            less_props(RestA, RestB);
+        Result ->
+            Result
+        end;
+    Result ->
+        Result
+    end.
+
+less_list([], [_|_]) ->
+    -1;
+less_list(_, []) ->
+    1;
+less_list([A|RestA], [B|RestB]) ->
+    case less(A,B) of
+    0 ->
+        less_list(RestA, RestB);
+    Result ->
+        Result
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_event_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch_event_sup.erl b/src/couch_event_sup.erl
new file mode 100644
index 0000000..07c4879
--- /dev/null
+++ b/src/couch_event_sup.erl
@@ -0,0 +1,73 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%% The purpose of this module is to allow event handlers to particpate in Erlang
+%% supervisor trees. It provide a monitorable process that crashes if the event
+%% handler fails. The process, when shutdown, deregisters the event handler.
+
+-module(couch_event_sup).
+-behaviour(gen_server).
+
+-include("couch_db.hrl").
+
+-export([start_link/3,start_link/4, stop/1]).
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3]).
+
+%
+% Instead calling the
+% ok = gen_event:add_sup_handler(error_logger, my_log, Args)
+%
+% do this:
+% {ok, LinkedPid} = couch_event_sup:start_link(error_logger, my_log, Args)
+%
+% The benefit is the event is now part of the process tree, and can be
+% started, restarted and shutdown consistently like the rest of the server
+% components.
+%
+% And now if the "event" crashes, the supervisor is notified and can restart
+% the event handler.
+%
+% Use this form to named process:
+% {ok, LinkedPid} = couch_event_sup:start_link({local, my_log}, error_logger, my_log, Args)
+%
+
+start_link(EventMgr, EventHandler, Args) ->
+    gen_server:start_link(couch_event_sup, {EventMgr, EventHandler, Args}, []).
+
+start_link(ServerName, EventMgr, EventHandler, Args) ->
+    gen_server:start_link(ServerName, couch_event_sup, {EventMgr, EventHandler, Args}, []).
+
+stop(Pid) ->
+    gen_server:cast(Pid, stop).
+
+init({EventMgr, EventHandler, Args}) ->
+    case gen_event:add_sup_handler(EventMgr, EventHandler, Args) of
+    ok ->
+        {ok, {EventMgr, EventHandler}};
+    {stop, Error} ->
+        {stop, Error}
+    end.
+
+terminate(_Reason, _State) ->
+    ok.
+
+handle_call(_Whatever, _From, State) ->
+    {ok, State}.
+
+handle_cast(stop, State) ->
+    {stop, normal, State}.
+
+handle_info({gen_event_EXIT, _Handler, Reason}, State) ->
+    {stop, Reason, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_external_manager.erl
----------------------------------------------------------------------
diff --git a/src/couch_external_manager.erl b/src/couch_external_manager.erl
new file mode 100644
index 0000000..0c66ef8
--- /dev/null
+++ b/src/couch_external_manager.erl
@@ -0,0 +1,101 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_external_manager).
+-behaviour(gen_server).
+
+-export([start_link/0, execute/2, config_change/2]).
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2]).
+
+-include("couch_db.hrl").
+
+start_link() ->
+    gen_server:start_link({local, couch_external_manager},
+        couch_external_manager, [], []).
+
+execute(UrlName, JsonReq) ->
+    Pid = gen_server:call(couch_external_manager, {get, UrlName}),
+    case Pid of
+    {error, Reason} ->
+        Reason;
+    _ ->
+        couch_external_server:execute(Pid, JsonReq)
+    end.
+
+config_change("external", UrlName) ->
+    gen_server:call(couch_external_manager, {config, UrlName}).
+
+% gen_server API
+
+init([]) ->
+    process_flag(trap_exit, true),
+    Handlers = ets:new(couch_external_manager_handlers, [set, private]),
+    couch_config:register(fun ?MODULE:config_change/2),
+    {ok, Handlers}.
+
+terminate(_Reason, Handlers) ->
+    ets:foldl(fun({_UrlName, Pid}, nil) ->
+        couch_external_server:stop(Pid),
+        nil
+    end, nil, Handlers),
+    ok.
+
+handle_call({get, UrlName}, _From, Handlers) ->
+    case ets:lookup(Handlers, UrlName) of
+    [] ->
+        case couch_config:get("external", UrlName, nil) of
+        nil ->
+            Msg = lists:flatten(
+                io_lib:format("No server configured for ~p.", [UrlName])),
+            {reply, {error, {unknown_external_server, ?l2b(Msg)}}, Handlers};
+        Command ->
+            {ok, NewPid} = couch_external_server:start_link(UrlName, Command),
+            true = ets:insert(Handlers, {UrlName, NewPid}),
+            {reply, NewPid, Handlers}
+        end;
+    [{UrlName, Pid}] ->
+        {reply, Pid, Handlers}
+    end;
+handle_call({config, UrlName}, _From, Handlers) ->
+    % A newly added handler and a handler that had it's command
+    % changed are treated exactly the same.
+
+    % Shutdown the old handler.
+    case ets:lookup(Handlers, UrlName) of
+    [{UrlName, Pid}] ->
+        couch_external_server:stop(Pid);
+    [] ->
+        ok
+    end,
+    % Wait for next request to boot the handler.
+    {reply, ok, Handlers}.
+
+handle_cast(_Whatever, State) ->
+    {noreply, State}.
+
+handle_info({'EXIT', Pid, normal}, Handlers) ->
+    ?LOG_INFO("EXTERNAL: Server ~p terminated normally", [Pid]),
+    % The process terminated normally without us asking - Remove Pid from the
+    % handlers table so we don't attempt to reuse it
+    ets:match_delete(Handlers, {'_', Pid}),
+    {noreply, Handlers};
+
+handle_info({'EXIT', Pid, Reason}, Handlers) ->
+    ?LOG_INFO("EXTERNAL: Server ~p died. (reason: ~p)", [Pid, Reason]),
+    % Remove Pid from the handlers table so we don't try closing
+    % it a second time in terminate/2.
+    ets:match_delete(Handlers, {'_', Pid}),
+    {stop, normal, Handlers}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_external_server.erl
----------------------------------------------------------------------
diff --git a/src/couch_external_server.erl b/src/couch_external_server.erl
new file mode 100644
index 0000000..b52c7ff
--- /dev/null
+++ b/src/couch_external_server.erl
@@ -0,0 +1,70 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_external_server).
+-behaviour(gen_server).
+
+-export([start_link/2, stop/1, execute/2]).
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
+
+-include("couch_db.hrl").
+
+% External API
+
+start_link(Name, Command) ->
+    gen_server:start_link(couch_external_server, [Name, Command], []).
+
+stop(Pid) ->
+    gen_server:cast(Pid, stop).
+
+execute(Pid, JsonReq) ->
+    {json, Json} = gen_server:call(Pid, {execute, JsonReq}, infinity),
+    ?JSON_DECODE(Json).
+
+% Gen Server Handlers
+
+init([Name, Command]) ->
+    ?LOG_INFO("EXTERNAL: Starting process for: ~s", [Name]),
+    ?LOG_INFO("COMMAND: ~s", [Command]),
+    process_flag(trap_exit, true),
+    Timeout = list_to_integer(couch_config:get("couchdb", "os_process_timeout",
+        "5000")),
+    {ok, Pid} = couch_os_process:start_link(Command, [{timeout, Timeout}]),
+    couch_config:register(fun("couchdb", "os_process_timeout", NewTimeout) ->
+        couch_os_process:set_timeout(Pid, list_to_integer(NewTimeout))
+    end),
+    {ok, {Name, Command, Pid}}.
+
+terminate(_Reason, {_Name, _Command, Pid}) ->
+    couch_os_process:stop(Pid),
+    ok.
+
+handle_call({execute, JsonReq}, _From, {Name, Command, Pid}) ->
+    {reply, couch_os_process:prompt(Pid, JsonReq), {Name, Command, Pid}}.
+
+handle_info({'EXIT', _Pid, normal}, State) ->
+    {noreply, State};
+handle_info({'EXIT', Pid, Reason}, {Name, Command, Pid}) ->
+    ?LOG_INFO("EXTERNAL: Process for ~s exiting. (reason: ~w)", [Name, Reason]),
+    {stop, Reason, {Name, Command, Pid}}.
+
+handle_cast(stop, {Name, Command, Pid}) ->
+    ?LOG_INFO("EXTERNAL: Shutting down ~s", [Name]),
+    exit(Pid, normal),
+    {stop, normal, {Name, Command, Pid}};
+handle_cast(_Whatever, State) ->
+    {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+


[16/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_compaction_daemon.erl
----------------------------------------------------------------------
diff --git a/couch_compaction_daemon.erl b/couch_compaction_daemon.erl
deleted file mode 100644
index 18a51a4..0000000
--- a/couch_compaction_daemon.erl
+++ /dev/null
@@ -1,504 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_compaction_daemon).
--behaviour(gen_server).
-
-% public API
--export([start_link/0, config_change/3]).
-
-% gen_server callbacks
--export([init/1, handle_call/3, handle_info/2, handle_cast/2]).
--export([code_change/3, terminate/2]).
-
--include("couch_db.hrl").
-
--define(CONFIG_ETS, couch_compaction_daemon_config).
-
--record(state, {
-    loop_pid
-}).
-
--record(config, {
-    db_frag = nil,
-    view_frag = nil,
-    period = nil,
-    cancel = false,
-    parallel_view_compact = false
-}).
-
--record(period, {
-    from = nil,
-    to = nil
-}).
-
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-init(_) ->
-    process_flag(trap_exit, true),
-    ?CONFIG_ETS = ets:new(?CONFIG_ETS, [named_table, set, protected]),
-    ok = couch_config:register(fun ?MODULE:config_change/3),
-    load_config(),
-    Server = self(),
-    Loop = spawn_link(fun() -> compact_loop(Server) end),
-    {ok, #state{loop_pid = Loop}}.
-
-
-config_change("compactions", DbName, NewValue) ->
-    ok = gen_server:cast(?MODULE, {config_update, DbName, NewValue}).
-
-
-handle_cast({config_update, DbName, deleted}, State) ->
-    true = ets:delete(?CONFIG_ETS, ?l2b(DbName)),
-    {noreply, State};
-
-handle_cast({config_update, DbName, Config}, #state{loop_pid = Loop} = State) ->
-    case parse_config(DbName, Config) of
-    {ok, NewConfig} ->
-        WasEmpty = (ets:info(?CONFIG_ETS, size) =:= 0),
-        true = ets:insert(?CONFIG_ETS, {?l2b(DbName), NewConfig}),
-        case WasEmpty of
-        true ->
-            Loop ! {self(), have_config};
-        false ->
-            ok
-        end;
-    error ->
-        ok
-    end,
-    {noreply, State}.
-
-
-handle_call(Msg, _From, State) ->
-    {stop, {unexpected_call, Msg}, State}.
-
-
-handle_info({'EXIT', Pid, Reason}, #state{loop_pid = Pid} = State) ->
-    {stop, {compaction_loop_died, Reason}, State}.
-
-
-terminate(_Reason, _State) ->
-    true = ets:delete(?CONFIG_ETS).
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-compact_loop(Parent) ->
-    {ok, _} = couch_server:all_databases(
-        fun(DbName, Acc) ->
-            case ets:info(?CONFIG_ETS, size) =:= 0 of
-            true ->
-                {stop, Acc};
-            false ->
-                case get_db_config(DbName) of
-                nil ->
-                    ok;
-                {ok, Config} ->
-                    case check_period(Config) of
-                    true ->
-                        maybe_compact_db(DbName, Config);
-                    false ->
-                        ok
-                    end
-                end,
-                {ok, Acc}
-            end
-        end, ok),
-    case ets:info(?CONFIG_ETS, size) =:= 0 of
-    true ->
-        receive {Parent, have_config} -> ok end;
-    false ->
-        PausePeriod = list_to_integer(
-            couch_config:get("compaction_daemon", "check_interval", "300")),
-        ok = timer:sleep(PausePeriod * 1000)
-    end,
-    compact_loop(Parent).
-
-
-maybe_compact_db(DbName, Config) ->
-    case (catch couch_db:open_int(DbName, [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}])) of
-    {ok, Db} ->
-        DDocNames = db_ddoc_names(Db),
-        case can_db_compact(Config, Db) of
-        true ->
-            {ok, DbCompactPid} = couch_db:start_compact(Db),
-            TimeLeft = compact_time_left(Config),
-            case Config#config.parallel_view_compact of
-            true ->
-                ViewsCompactPid = spawn_link(fun() ->
-                    maybe_compact_views(DbName, DDocNames, Config)
-                end),
-                ViewsMonRef = erlang:monitor(process, ViewsCompactPid);
-            false ->
-                ViewsCompactPid = nil,
-                ViewsMonRef = nil
-            end,
-            DbMonRef = erlang:monitor(process, DbCompactPid),
-            receive
-            {'DOWN', DbMonRef, process, _, normal} ->
-                couch_db:close(Db),
-                case Config#config.parallel_view_compact of
-                true ->
-                    ok;
-                false ->
-                    maybe_compact_views(DbName, DDocNames, Config)
-                end;
-            {'DOWN', DbMonRef, process, _, Reason} ->
-                couch_db:close(Db),
-                ?LOG_ERROR("Compaction daemon - an error ocurred while"
-                    " compacting the database `~s`: ~p", [DbName, Reason])
-            after TimeLeft ->
-                ?LOG_INFO("Compaction daemon - canceling compaction for database"
-                    " `~s` because it's exceeding the allowed period.",
-                    [DbName]),
-                erlang:demonitor(DbMonRef, [flush]),
-                ok = couch_db:cancel_compact(Db),
-                couch_db:close(Db)
-            end,
-            case ViewsMonRef of
-            nil ->
-                ok;
-            _ ->
-                receive
-                {'DOWN', ViewsMonRef, process, _, _Reason} ->
-                    ok
-                after TimeLeft + 1000 ->
-                    % Under normal circunstances, the view compaction process
-                    % should have finished already.
-                    erlang:demonitor(ViewsMonRef, [flush]),
-                    unlink(ViewsCompactPid),
-                    exit(ViewsCompactPid, kill)
-                end
-            end;
-        false ->
-            couch_db:close(Db),
-            maybe_compact_views(DbName, DDocNames, Config)
-        end;
-    _ ->
-        ok
-    end.
-
-
-maybe_compact_views(_DbName, [], _Config) ->
-    ok;
-maybe_compact_views(DbName, [DDocName | Rest], Config) ->
-    case check_period(Config) of
-    true ->
-        case maybe_compact_view(DbName, DDocName, Config) of
-        ok ->
-            maybe_compact_views(DbName, Rest, Config);
-        timeout ->
-            ok
-        end;
-    false ->
-        ok
-    end.
-
-
-db_ddoc_names(Db) ->
-    {ok, _, DDocNames} = couch_db:enum_docs(
-        Db,
-        fun(#full_doc_info{id = <<"_design/", _/binary>>, deleted = true}, _, Acc) ->
-            {ok, Acc};
-        (#full_doc_info{id = <<"_design/", Id/binary>>}, _, Acc) ->
-            {ok, [Id | Acc]};
-        (_, _, Acc) ->
-            {stop, Acc}
-        end, [], [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}]),
-    DDocNames.
-
-
-maybe_compact_view(DbName, GroupId, Config) ->
-    DDocId = <<"_design/", GroupId/binary>>,
-    case (catch couch_mrview:get_info(DbName, DDocId)) of
-    {ok, GroupInfo} ->
-        case can_view_compact(Config, DbName, GroupId, GroupInfo) of
-        true ->
-            {ok, MonRef} = couch_mrview:compact(DbName, DDocId, [monitor]),
-            TimeLeft = compact_time_left(Config),
-            receive
-            {'DOWN', MonRef, process, _, normal} ->
-                ok;
-            {'DOWN', MonRef, process, _, Reason} ->
-                ?LOG_ERROR("Compaction daemon - an error ocurred while compacting"
-                    " the view group `~s` from database `~s`: ~p",
-                    [GroupId, DbName, Reason]),
-                ok
-            after TimeLeft ->
-                ?LOG_INFO("Compaction daemon - canceling the compaction for the "
-                    "view group `~s` of the database `~s` because it's exceeding"
-                    " the allowed period.", [GroupId, DbName]),
-                erlang:demonitor(MonRef, [flush]),
-                ok = couch_mrview:cancel_compaction(DbName, DDocId),
-                timeout
-            end;
-        false ->
-            ok
-        end;
-    Error ->
-        ?LOG_ERROR("Error opening view group `~s` from database `~s`: ~p",
-            [GroupId, DbName, Error]),
-        ok
-    end.
-
-
-compact_time_left(#config{cancel = false}) ->
-    infinity;
-compact_time_left(#config{period = nil}) ->
-    infinity;
-compact_time_left(#config{period = #period{to = {ToH, ToM} = To}}) ->
-    {H, M, _} = time(),
-    case To > {H, M} of
-    true ->
-        ((ToH - H) * 60 * 60 * 1000) + (abs(ToM - M) * 60 * 1000);
-    false ->
-        ((24 - H + ToH) * 60 * 60 * 1000) + (abs(ToM - M) * 60 * 1000)
-    end.
-
-
-get_db_config(DbName) ->
-    case ets:lookup(?CONFIG_ETS, DbName) of
-    [] ->
-        case ets:lookup(?CONFIG_ETS, <<"_default">>) of
-        [] ->
-            nil;
-        [{<<"_default">>, Config}] ->
-            {ok, Config}
-        end;
-    [{DbName, Config}] ->
-        {ok, Config}
-    end.
-
-
-can_db_compact(#config{db_frag = Threshold} = Config, Db) ->
-    case check_period(Config) of
-    false ->
-        false;
-    true ->
-        {ok, DbInfo} = couch_db:get_db_info(Db),
-        {Frag, SpaceRequired} = frag(DbInfo),
-        ?LOG_DEBUG("Fragmentation for database `~s` is ~p%, estimated space for"
-           " compaction is ~p bytes.", [Db#db.name, Frag, SpaceRequired]),
-        case check_frag(Threshold, Frag) of
-        false ->
-            false;
-        true ->
-            Free = free_space(couch_config:get("couchdb", "database_dir")),
-            case Free >= SpaceRequired of
-            true ->
-                true;
-            false ->
-                ?LOG_WARN("Compaction daemon - skipping database `~s` "
-                    "compaction: the estimated necessary disk space is about ~p"
-                    " bytes but the currently available disk space is ~p bytes.",
-                   [Db#db.name, SpaceRequired, Free]),
-                false
-            end
-        end
-    end.
-
-can_view_compact(Config, DbName, GroupId, GroupInfo) ->
-    case check_period(Config) of
-    false ->
-        false;
-    true ->
-        case couch_util:get_value(updater_running, GroupInfo) of
-        true ->
-            false;
-        false ->
-            {Frag, SpaceRequired} = frag(GroupInfo),
-            ?LOG_DEBUG("Fragmentation for view group `~s` (database `~s`) is "
-                "~p%, estimated space for compaction is ~p bytes.",
-                [GroupId, DbName, Frag, SpaceRequired]),
-            case check_frag(Config#config.view_frag, Frag) of
-            false ->
-                false;
-            true ->
-                Free = free_space(couch_index_util:root_dir()),
-                case Free >= SpaceRequired of
-                true ->
-                    true;
-                false ->
-                    ?LOG_WARN("Compaction daemon - skipping view group `~s` "
-                        "compaction (database `~s`): the estimated necessary "
-                        "disk space is about ~p bytes but the currently available"
-                        " disk space is ~p bytes.",
-                        [GroupId, DbName, SpaceRequired, Free]),
-                    false
-                end
-            end
-        end
-    end.
-
-
-check_period(#config{period = nil}) ->
-    true;
-check_period(#config{period = #period{from = From, to = To}}) ->
-    {HH, MM, _} = erlang:time(),
-    case From < To of
-    true ->
-        ({HH, MM} >= From) andalso ({HH, MM} < To);
-    false ->
-        ({HH, MM} >= From) orelse ({HH, MM} < To)
-    end.
-
-
-check_frag(nil, _) ->
-    true;
-check_frag(Threshold, Frag) ->
-    Frag >= Threshold.
-
-
-frag(Props) ->
-    FileSize = couch_util:get_value(disk_size, Props),
-    MinFileSize = list_to_integer(
-        couch_config:get("compaction_daemon", "min_file_size", "131072")),
-    case FileSize < MinFileSize of
-    true ->
-        {0, FileSize};
-    false ->
-        case couch_util:get_value(data_size, Props) of
-        null ->
-            {100, FileSize};
-        0 ->
-            {0, FileSize};
-        DataSize ->
-            Frag = round(((FileSize - DataSize) / FileSize * 100)),
-            {Frag, space_required(DataSize)}
-        end
-    end.
-
-% Rough, and pessimistic, estimation of necessary disk space to compact a
-% database or view index.
-space_required(DataSize) ->
-    round(DataSize * 2.0).
-
-
-load_config() ->
-    lists:foreach(
-        fun({DbName, ConfigString}) ->
-            case parse_config(DbName, ConfigString) of
-            {ok, Config} ->
-                true = ets:insert(?CONFIG_ETS, {?l2b(DbName), Config});
-            error ->
-                ok
-            end
-        end,
-        couch_config:get("compactions")).
-
-parse_config(DbName, ConfigString) ->
-    case (catch do_parse_config(ConfigString)) of
-    {ok, Conf} ->
-        {ok, Conf};
-    incomplete_period ->
-        ?LOG_ERROR("Incomplete period ('to' or 'from' missing) in the compaction"
-            " configuration for database `~s`", [DbName]),
-        error;
-    _ ->
-        ?LOG_ERROR("Invalid compaction configuration for database "
-            "`~s`: `~s`", [DbName, ConfigString]),
-        error
-    end.
-
-do_parse_config(ConfigString) ->
-    {ok, ConfProps} = couch_util:parse_term(ConfigString),
-    {ok, #config{period = Period} = Conf} = config_record(ConfProps, #config{}),
-    case Period of
-    nil ->
-        {ok, Conf};
-    #period{from = From, to = To} when From =/= nil, To =/= nil ->
-        {ok, Conf};
-    #period{} ->
-        incomplete_period
-    end.
-
-config_record([], Config) ->
-    {ok, Config};
-
-config_record([{db_fragmentation, V} | Rest], Config) ->
-    [Frag] = string:tokens(V, "%"),
-    config_record(Rest, Config#config{db_frag = list_to_integer(Frag)});
-
-config_record([{view_fragmentation, V} | Rest], Config) ->
-    [Frag] = string:tokens(V, "%"),
-    config_record(Rest, Config#config{view_frag = list_to_integer(Frag)});
-
-config_record([{from, V} | Rest], #config{period = Period0} = Config) ->
-    Time = parse_time(V),
-    Period = case Period0 of
-    nil ->
-        #period{from = Time};
-    #period{} ->
-        Period0#period{from = Time}
-    end,
-    config_record(Rest, Config#config{period = Period});
-
-config_record([{to, V} | Rest], #config{period = Period0} = Config) ->
-    Time = parse_time(V),
-    Period = case Period0 of
-    nil ->
-        #period{to = Time};
-    #period{} ->
-        Period0#period{to = Time}
-    end,
-    config_record(Rest, Config#config{period = Period});
-
-config_record([{strict_window, true} | Rest], Config) ->
-    config_record(Rest, Config#config{cancel = true});
-
-config_record([{strict_window, false} | Rest], Config) ->
-    config_record(Rest, Config#config{cancel = false});
-
-config_record([{parallel_view_compaction, true} | Rest], Config) ->
-    config_record(Rest, Config#config{parallel_view_compact = true});
-
-config_record([{parallel_view_compaction, false} | Rest], Config) ->
-    config_record(Rest, Config#config{parallel_view_compact = false}).
-
-
-parse_time(String) ->
-    [HH, MM] = string:tokens(String, ":"),
-    {list_to_integer(HH), list_to_integer(MM)}.
-
-
-free_space(Path) ->
-    DiskData = lists:sort(
-        fun({PathA, _, _}, {PathB, _, _}) ->
-            length(filename:split(PathA)) > length(filename:split(PathB))
-        end,
-        disksup:get_disk_data()),
-    free_space_rec(abs_path(Path), DiskData).
-
-free_space_rec(_Path, []) ->
-    undefined;
-free_space_rec(Path, [{MountPoint0, Total, Usage} | Rest]) ->
-    MountPoint = abs_path(MountPoint0),
-    case MountPoint =:= string:substr(Path, 1, length(MountPoint)) of
-    false ->
-        free_space_rec(Path, Rest);
-    true ->
-        trunc(Total - (Total * (Usage / 100))) * 1024
-    end.
-
-abs_path(Path0) ->
-    Path = filename:absname(Path0),
-    case lists:last(Path) of
-    $/ ->
-        Path;
-    _ ->
-        Path ++ "/"
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_compress.erl
----------------------------------------------------------------------
diff --git a/couch_compress.erl b/couch_compress.erl
deleted file mode 100644
index ac386fd..0000000
--- a/couch_compress.erl
+++ /dev/null
@@ -1,84 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_compress).
-
--export([compress/2, decompress/1, is_compressed/2]).
--export([get_compression_method/0]).
-
--include("couch_db.hrl").
-
-% binaries compressed with snappy have their first byte set to this value
--define(SNAPPY_PREFIX, 1).
-% Term prefixes documented at:
-%      http://www.erlang.org/doc/apps/erts/erl_ext_dist.html
--define(TERM_PREFIX, 131).
--define(COMPRESSED_TERM_PREFIX, 131, 80).
-
-
-get_compression_method() ->
-    case couch_config:get("couchdb", "file_compression") of
-    undefined ->
-        ?DEFAULT_COMPRESSION;
-    Method1 ->
-        case string:tokens(Method1, "_") of
-        [Method] ->
-            list_to_existing_atom(Method);
-        [Method, Level] ->
-            {list_to_existing_atom(Method), list_to_integer(Level)}
-        end
-    end.
-
-
-compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, snappy) ->
-    Bin;
-compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, Method) ->
-    compress(decompress(Bin), Method);
-compress(<<?TERM_PREFIX, _/binary>> = Bin, Method) ->
-    compress(decompress(Bin), Method);
-compress(Term, none) ->
-    ?term_to_bin(Term);
-compress(Term, {deflate, Level}) ->
-    term_to_binary(Term, [{minor_version, 1}, {compressed, Level}]);
-compress(Term, snappy) ->
-    Bin = ?term_to_bin(Term),
-    try
-        {ok, CompressedBin} = snappy:compress(Bin),
-        case byte_size(CompressedBin) < byte_size(Bin) of
-        true ->
-            <<?SNAPPY_PREFIX, CompressedBin/binary>>;
-        false ->
-            Bin
-        end
-    catch exit:snappy_nif_not_loaded ->
-        Bin
-    end.
-
-
-decompress(<<?SNAPPY_PREFIX, Rest/binary>>) ->
-    {ok, TermBin} = snappy:decompress(Rest),
-    binary_to_term(TermBin);
-decompress(<<?TERM_PREFIX, _/binary>> = Bin) ->
-    binary_to_term(Bin).
-
-
-is_compressed(<<?SNAPPY_PREFIX, _/binary>>, Method) ->
-    Method =:= snappy;
-is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, {deflate, _Level}) ->
-    true;
-is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, _Method) ->
-    false;
-is_compressed(<<?TERM_PREFIX, _/binary>>, Method) ->
-    Method =:= none;
-is_compressed(Term, _Method) when not is_binary(Term) ->
-    false.
-

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_config.erl
----------------------------------------------------------------------
diff --git a/couch_config.erl b/couch_config.erl
deleted file mode 100644
index 22d7cdc..0000000
--- a/couch_config.erl
+++ /dev/null
@@ -1,251 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% Reads CouchDB's ini file and gets queried for configuration parameters.
-% This module is initialized with a list of ini files that it consecutively
-% reads Key/Value pairs from and saves them in an ets table. If more than one
-% ini file is specified, the last one is used to write changes that are made
-% with store/2 back to that ini file.
-
--module(couch_config).
--behaviour(gen_server).
-
--include("couch_db.hrl").
-
-
--export([start_link/1, stop/0]).
--export([all/0, get/1, get/2, get/3, set/3, set/4, delete/2, delete/3]).
--export([register/1, register/2]).
--export([parse_ini_file/1]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
--record(config, {
-    notify_funs=[],
-    write_filename=undefined
-}).
-
-
-start_link(IniFiles) ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, IniFiles, []).
-
-stop() ->
-    gen_server:cast(?MODULE, stop).
-
-
-all() ->
-    lists:sort(gen_server:call(?MODULE, all, infinity)).
-
-
-get(Section) when is_binary(Section) ->
-    ?MODULE:get(?b2l(Section));
-get(Section) ->
-    Matches = ets:match(?MODULE, {{Section, '$1'}, '$2'}),
-    [{Key, Value} || [Key, Value] <- Matches].
-
-get(Section, Key) ->
-    ?MODULE:get(Section, Key, undefined).
-
-get(Section, Key, Default) when is_binary(Section) and is_binary(Key) ->
-    ?MODULE:get(?b2l(Section), ?b2l(Key), Default);
-get(Section, Key, Default) ->
-    case ets:lookup(?MODULE, {Section, Key}) of
-        [] -> Default;
-        [{_, Match}] -> Match
-    end.
-
-set(Section, Key, Value) ->
-    ?MODULE:set(Section, Key, Value, true).
-
-set(Section, Key, Value, Persist) when is_binary(Section) and is_binary(Key)  ->
-    ?MODULE:set(?b2l(Section), ?b2l(Key), Value, Persist);
-set(Section, Key, Value, Persist) ->
-    gen_server:call(?MODULE, {set, Section, Key, Value, Persist}).
-
-
-delete(Section, Key) when is_binary(Section) and is_binary(Key) ->
-    delete(?b2l(Section), ?b2l(Key));
-delete(Section, Key) ->
-    delete(Section, Key, true).
-
-delete(Section, Key, Persist) when is_binary(Section) and is_binary(Key) ->
-    delete(?b2l(Section), ?b2l(Key), Persist);
-delete(Section, Key, Persist) ->
-    gen_server:call(?MODULE, {delete, Section, Key, Persist}).
-
-
-register(Fun) ->
-    ?MODULE:register(Fun, self()).
-
-register(Fun, Pid) ->
-    gen_server:call(?MODULE, {register, Fun, Pid}).
-
-
-init(IniFiles) ->
-    ets:new(?MODULE, [named_table, set, protected]),
-    try
-        lists:foreach(fun(IniFile) ->
-            {ok, ParsedIniValues} = parse_ini_file(IniFile),
-            ets:insert(?MODULE, ParsedIniValues)
-        end, IniFiles),
-        WriteFile = case IniFiles of
-            [_|_] -> lists:last(IniFiles);
-            _ -> undefined
-        end,
-        {ok, #config{write_filename = WriteFile}}
-    catch _Tag:Error ->
-        {stop, Error}
-    end.
-
-
-terminate(_Reason, _State) ->
-    ok.
-
-
-handle_call(all, _From, Config) ->
-    Resp = lists:sort((ets:tab2list(?MODULE))),
-    {reply, Resp, Config};
-handle_call({set, Sec, Key, Val, Persist}, From, Config) ->
-    Result = case {Persist, Config#config.write_filename} of
-        {true, undefined} ->
-            ok;
-        {true, FileName} ->
-            couch_config_writer:save_to_file({{Sec, Key}, Val}, FileName);
-        _ ->
-            ok
-    end,
-    case Result of
-    ok ->
-        true = ets:insert(?MODULE, {{Sec, Key}, Val}),
-        spawn_link(fun() ->
-            [catch F(Sec, Key, Val, Persist) || {_Pid, F} <- Config#config.notify_funs],
-                gen_server:reply(From, ok)
-        end),
-        {noreply, Config};
-    _Error ->
-        {reply, Result, Config}
-    end;
-handle_call({delete, Sec, Key, Persist}, From, Config) ->
-    true = ets:delete(?MODULE, {Sec,Key}),
-    case {Persist, Config#config.write_filename} of
-        {true, undefined} ->
-            ok;
-        {true, FileName} ->
-            couch_config_writer:save_to_file({{Sec, Key}, ""}, FileName);
-        _ ->
-            ok
-    end,
-    spawn_link(fun() ->
-        [catch F(Sec, Key, deleted, Persist) || {_Pid, F} <- Config#config.notify_funs],
-            gen_server:reply(From, ok)
-    end),
-    {noreply, Config};
-handle_call({register, Fun, Pid}, _From, #config{notify_funs=PidFuns}=Config) ->
-    erlang:monitor(process, Pid),
-    % convert 1 and 2 arity to 3 arity
-    Fun2 =
-    case Fun of
-        _ when is_function(Fun, 1) ->
-            fun(Section, _Key, _Value, _Persist) -> Fun(Section) end;
-        _ when is_function(Fun, 2) ->
-            fun(Section, Key, _Value, _Persist) -> Fun(Section, Key) end;
-        _ when is_function(Fun, 3) ->
-            fun(Section, Key, Value, _Persist) -> Fun(Section, Key, Value) end;
-        _ when is_function(Fun, 4) ->
-            Fun
-    end,
-    {reply, ok, Config#config{notify_funs=[{Pid, Fun2} | PidFuns]}}.
-
-
-handle_cast(stop, State) ->
-    {stop, normal, State};
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-handle_info({'DOWN', _, _, DownPid, _}, #config{notify_funs=PidFuns}=Config) ->
-    % remove any funs registered by the downed process
-    FilteredPidFuns = [{Pid,Fun} || {Pid,Fun} <- PidFuns, Pid /= DownPid],
-    {noreply, Config#config{notify_funs=FilteredPidFuns}}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-parse_ini_file(IniFile) ->
-    IniFilename = couch_util:abs_pathname(IniFile),
-    IniBin =
-    case file:read_file(IniFilename) of
-        {ok, IniBin0} ->
-            IniBin0;
-        {error, Reason} = Error ->
-            ?LOG_ERROR("Could not read server configuration file ~s: ~s",
-                [IniFilename, file:format_error(Reason)]),
-            throw(Error)
-    end,
-
-    Lines = re:split(IniBin, "\r\n|\n|\r|\032", [{return, list}]),
-    {_, ParsedIniValues} =
-    lists:foldl(fun(Line, {AccSectionName, AccValues}) ->
-            case string:strip(Line) of
-            "[" ++ Rest ->
-                case re:split(Rest, "\\]", [{return, list}]) of
-                [NewSectionName, ""] ->
-                    {NewSectionName, AccValues};
-                _Else -> % end bracket not at end, ignore this line
-                    {AccSectionName, AccValues}
-                end;
-            ";" ++ _Comment ->
-                {AccSectionName, AccValues};
-            Line2 ->
-                case re:split(Line2, "\s*=\s*", [{return, list}]) of
-                [Value] ->
-                    MultiLineValuePart = case re:run(Line, "^ \\S", []) of
-                    {match, _} ->
-                        true;
-                    _ ->
-                        false
-                    end,
-                    case {MultiLineValuePart, AccValues} of
-                    {true, [{{_, ValueName}, PrevValue} | AccValuesRest]} ->
-                        % remove comment
-                        case re:split(Value, " ;|\t;", [{return, list}]) of
-                        [[]] ->
-                            % empty line
-                            {AccSectionName, AccValues};
-                        [LineValue | _Rest] ->
-                            E = {{AccSectionName, ValueName},
-                                PrevValue ++ " " ++ LineValue},
-                            {AccSectionName, [E | AccValuesRest]}
-                        end;
-                    _ ->
-                        {AccSectionName, AccValues}
-                    end;
-                [""|_LineValues] -> % line begins with "=", ignore
-                    {AccSectionName, AccValues};
-                [ValueName|LineValues] -> % yeehaw, got a line!
-                    RemainingLine = couch_util:implode(LineValues, "="),
-                    % removes comments
-                    case re:split(RemainingLine, " ;|\t;", [{return, list}]) of
-                    [[]] ->
-                        % empty line means delete this key
-                        ets:delete(?MODULE, {AccSectionName, ValueName}),
-                        {AccSectionName, AccValues};
-                    [LineValue | _Rest] ->
-                        {AccSectionName,
-                            [{{AccSectionName, ValueName}, LineValue} | AccValues]}
-                    end
-                end
-            end
-        end, {"", []}, Lines),
-    {ok, ParsedIniValues}.
-

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_config_writer.erl
----------------------------------------------------------------------
diff --git a/couch_config_writer.erl b/couch_config_writer.erl
deleted file mode 100644
index 21f1c3f..0000000
--- a/couch_config_writer.erl
+++ /dev/null
@@ -1,88 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% @doc Saves a Key/Value pair to a ini file. The Key consists of a Section
-%%      and Option combination. If that combination is found in the ini file
-%%      the new value replaces the old value. If only the Section is found the
-%%      Option and value combination is appended to the Section. If the Section
-%%      does not yet exist in the ini file, it is added and the Option/Value
-%%      pair is appended.
-%% @see couch_config
-
--module(couch_config_writer).
-
--export([save_to_file/2]).
-
--include("couch_db.hrl").
-
-%% @spec save_to_file(
-%%           Config::{{Section::string(), Option::string()}, Value::string()},
-%%           File::filename()) -> ok
-%% @doc Saves a Section/Key/Value triple to the ini file File::filename()
-save_to_file({{Section, Key}, Value}, File) ->
-    {ok, OldFileContents} = file:read_file(File),
-    Lines = re:split(OldFileContents, "\r\n|\n|\r|\032", [{return, list}]),
-
-    SectionLine = "[" ++ Section ++ "]",
-    {ok, Pattern} = re:compile(["^(", Key, "\\s*=)|\\[[a-zA-Z0-9\_-]*\\]"]),
-
-    NewLines = process_file_lines(Lines, [], SectionLine, Pattern, Key, Value),
-    NewFileContents = reverse_and_add_newline(strip_empty_lines(NewLines), []),
-    case file:write_file(File, NewFileContents) of
-    ok ->
-        ok;
-    {error, Reason} = Error ->
-        ?LOG_ERROR("Could not write config file ~s: ~s",
-            [File, file:format_error(Reason)]),
-        Error
-    end.
-
-
-process_file_lines([Section|Rest], SeenLines, Section, Pattern, Key, Value) ->
-    process_section_lines(Rest, [Section|SeenLines], Pattern, Key, Value);
-
-process_file_lines([Line|Rest], SeenLines, Section, Pattern, Key, Value) ->
-    process_file_lines(Rest, [Line|SeenLines], Section, Pattern, Key, Value);
-
-process_file_lines([], SeenLines, Section, _Pattern, Key, Value) ->
-    % Section wasn't found.  Append it with the option here.
-    [Key ++ " = " ++ Value, Section, "" | strip_empty_lines(SeenLines)].
-
-
-process_section_lines([Line|Rest], SeenLines, Pattern, Key, Value) ->
-    case re:run(Line, Pattern, [{capture, all_but_first}]) of
-    nomatch -> % Found nothing interesting. Move on.
-        process_section_lines(Rest, [Line|SeenLines], Pattern, Key, Value);
-    {match, []} -> % Found another section. Append the option here.
-        lists:reverse(Rest) ++
-        [Line, "", Key ++ " = " ++ Value | strip_empty_lines(SeenLines)];
-    {match, _} -> % Found the option itself. Replace it.
-        lists:reverse(Rest) ++ [Key ++ " = " ++ Value | SeenLines]
-    end;
-
-process_section_lines([], SeenLines, _Pattern, Key, Value) ->
-    % Found end of file within the section. Append the option here.
-    [Key ++ " = " ++ Value | strip_empty_lines(SeenLines)].
-
-
-reverse_and_add_newline([Line|Rest], Content) ->
-    reverse_and_add_newline(Rest, [Line, "\n", Content]);
-
-reverse_and_add_newline([], Content) ->
-    Content.
-
-
-strip_empty_lines(["" | Rest]) ->
-    strip_empty_lines(Rest);
-
-strip_empty_lines(All) ->
-    All.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_db.erl
----------------------------------------------------------------------
diff --git a/couch_db.erl b/couch_db.erl
deleted file mode 100644
index 11ea0fd..0000000
--- a/couch_db.erl
+++ /dev/null
@@ -1,1358 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db).
--behaviour(gen_server).
-
--export([open/2,open_int/2,close/1,create/2,get_db_info/1,get_design_docs/1]).
--export([start_compact/1, cancel_compact/1]).
--export([open_ref_counted/2,is_idle/1,monitor/1,count_changes_since/2]).
--export([update_doc/3,update_doc/4,update_docs/4,update_docs/2,update_docs/3,delete_doc/3]).
--export([get_doc_info/2,get_full_doc_info/2,get_full_doc_infos/2]).
--export([open_doc/2,open_doc/3,open_doc_revs/4]).
--export([set_revs_limit/2,get_revs_limit/1]).
--export([get_missing_revs/2,name/1,get_update_seq/1,get_committed_update_seq/1]).
--export([enum_docs/4,enum_docs_since/5]).
--export([enum_docs_since_reduce_to_count/1,enum_docs_reduce_to_count/1]).
--export([increment_update_seq/1,get_purge_seq/1,purge_docs/2,get_last_purged/1]).
--export([start_link/3,open_doc_int/3,ensure_full_commit/1]).
--export([set_security/2,get_security/1]).
--export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
--export([changes_since/4,changes_since/5,read_doc/2,new_revid/1]).
--export([check_is_admin/1, check_is_member/1]).
--export([reopen/1, is_system_db/1, compression/1]).
-
--include("couch_db.hrl").
-
-
-start_link(DbName, Filepath, Options) ->
-    case open_db_file(Filepath, Options) of
-    {ok, Fd} ->
-        StartResult = gen_server:start_link(couch_db, {DbName, Filepath, Fd, Options}, []),
-        unlink(Fd),
-        StartResult;
-    Else ->
-        Else
-    end.
-
-open_db_file(Filepath, Options) ->
-    case couch_file:open(Filepath, Options) of
-    {ok, Fd} ->
-        {ok, Fd};
-    {error, enoent} ->
-        % couldn't find file. is there a compact version? This can happen if
-        % crashed during the file switch.
-        case couch_file:open(Filepath ++ ".compact", [nologifmissing]) of
-        {ok, Fd} ->
-            ?LOG_INFO("Found ~s~s compaction file, using as primary storage.", [Filepath, ".compact"]),
-            ok = file:rename(Filepath ++ ".compact", Filepath),
-            ok = couch_file:sync(Fd),
-            {ok, Fd};
-        {error, enoent} ->
-            {not_found, no_db_file}
-        end;
-    Error ->
-        Error
-    end.
-
-
-create(DbName, Options) ->
-    couch_server:create(DbName, Options).
-
-% this is for opening a database for internal purposes like the replicator
-% or the view indexer. it never throws a reader error.
-open_int(DbName, Options) ->
-    couch_server:open(DbName, Options).
-
-% this should be called anytime an http request opens the database.
-% it ensures that the http userCtx is a valid reader
-open(DbName, Options) ->
-    case couch_server:open(DbName, Options) of
-        {ok, Db} ->
-            try
-                check_is_member(Db),
-                {ok, Db}
-            catch
-                throw:Error ->
-                    close(Db),
-                    throw(Error)
-            end;
-        Else -> Else
-    end.
-
-reopen(#db{main_pid = Pid, fd_ref_counter = OldRefCntr, user_ctx = UserCtx}) ->
-    {ok, #db{fd_ref_counter = NewRefCntr} = NewDb} =
-        gen_server:call(Pid, get_db, infinity),
-    case NewRefCntr =:= OldRefCntr of
-    true ->
-        ok;
-    false ->
-        couch_ref_counter:add(NewRefCntr),
-        catch couch_ref_counter:drop(OldRefCntr)
-    end,
-    {ok, NewDb#db{user_ctx = UserCtx}}.
-
-is_system_db(#db{options = Options}) ->
-    lists:member(sys_db, Options).
-
-ensure_full_commit(#db{update_pid=UpdatePid,instance_start_time=StartTime}) ->
-    ok = gen_server:call(UpdatePid, full_commit, infinity),
-    {ok, StartTime}.
-
-close(#db{fd_ref_counter=RefCntr}) ->
-    couch_ref_counter:drop(RefCntr).
-
-open_ref_counted(MainPid, OpenedPid) ->
-    gen_server:call(MainPid, {open_ref_count, OpenedPid}).
-
-is_idle(#db{main_pid = MainPid}) ->
-    is_idle(MainPid);
-is_idle(MainPid) ->
-    gen_server:call(MainPid, is_idle).
-
-monitor(#db{main_pid=MainPid}) ->
-    erlang:monitor(process, MainPid).
-
-start_compact(#db{update_pid=Pid}) ->
-    gen_server:call(Pid, start_compact).
-
-cancel_compact(#db{update_pid=Pid}) ->
-    gen_server:call(Pid, cancel_compact).
-
-delete_doc(Db, Id, Revisions) ->
-    DeletedDocs = [#doc{id=Id, revs=[Rev], deleted=true} || Rev <- Revisions],
-    {ok, [Result]} = update_docs(Db, DeletedDocs, []),
-    {ok, Result}.
-
-open_doc(Db, IdOrDocInfo) ->
-    open_doc(Db, IdOrDocInfo, []).
-
-open_doc(Db, Id, Options) ->
-    increment_stat(Db, {couchdb, database_reads}),
-    case open_doc_int(Db, Id, Options) of
-    {ok, #doc{deleted=true}=Doc} ->
-        case lists:member(deleted, Options) of
-        true ->
-            apply_open_options({ok, Doc},Options);
-        false ->
-            {not_found, deleted}
-        end;
-    Else ->
-        apply_open_options(Else,Options)
-    end.
-
-apply_open_options({ok, Doc},Options) ->
-    apply_open_options2(Doc,Options);
-apply_open_options(Else,_Options) ->
-    Else.
-
-apply_open_options2(Doc,[]) ->
-    {ok, Doc};
-apply_open_options2(#doc{atts=Atts,revs=Revs}=Doc,
-        [{atts_since, PossibleAncestors}|Rest]) ->
-    RevPos = find_ancestor_rev_pos(Revs, PossibleAncestors),
-    apply_open_options2(Doc#doc{atts=[A#att{data=
-        if AttPos>RevPos -> Data; true -> stub end}
-        || #att{revpos=AttPos,data=Data}=A <- Atts]}, Rest);
-apply_open_options2(Doc, [ejson_body | Rest]) ->
-    apply_open_options2(couch_doc:with_ejson_body(Doc), Rest);
-apply_open_options2(Doc,[_|Rest]) ->
-    apply_open_options2(Doc,Rest).
-
-
-find_ancestor_rev_pos({_, []}, _AttsSinceRevs) ->
-    0;
-find_ancestor_rev_pos(_DocRevs, []) ->
-    0;
-find_ancestor_rev_pos({RevPos, [RevId|Rest]}, AttsSinceRevs) ->
-    case lists:member({RevPos, RevId}, AttsSinceRevs) of
-    true ->
-        RevPos;
-    false ->
-        find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs)
-    end.
-
-open_doc_revs(Db, Id, Revs, Options) ->
-    increment_stat(Db, {couchdb, database_reads}),
-    [{ok, Results}] = open_doc_revs_int(Db, [{Id, Revs}], Options),
-    {ok, [apply_open_options(Result, Options) || Result <- Results]}.
-
-% Each returned result is a list of tuples:
-% {Id, MissingRevs, PossibleAncestors}
-% if no revs are missing, it's omitted from the results.
-get_missing_revs(Db, IdRevsList) ->
-    Results = get_full_doc_infos(Db, [Id1 || {Id1, _Revs} <- IdRevsList]),
-    {ok, find_missing(IdRevsList, Results)}.
-
-find_missing([], []) ->
-    [];
-find_missing([{Id, Revs}|RestIdRevs], [{ok, FullInfo} | RestLookupInfo]) ->
-    case couch_key_tree:find_missing(FullInfo#full_doc_info.rev_tree, Revs) of
-    [] ->
-        find_missing(RestIdRevs, RestLookupInfo);
-    MissingRevs ->
-        #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo),
-        LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo],
-        % Find the revs that are possible parents of this rev
-        PossibleAncestors =
-        lists:foldl(fun({LeafPos, LeafRevId}, Acc) ->
-            % this leaf is a "possible ancenstor" of the missing
-            % revs if this LeafPos lessthan any of the missing revs
-            case lists:any(fun({MissingPos, _}) ->
-                    LeafPos < MissingPos end, MissingRevs) of
-            true ->
-                [{LeafPos, LeafRevId} | Acc];
-            false ->
-                Acc
-            end
-        end, [], LeafRevs),
-        [{Id, MissingRevs, PossibleAncestors} |
-                find_missing(RestIdRevs, RestLookupInfo)]
-    end;
-find_missing([{Id, Revs}|RestIdRevs], [not_found | RestLookupInfo]) ->
-    [{Id, Revs, []} | find_missing(RestIdRevs, RestLookupInfo)].
-
-get_doc_info(Db, Id) ->
-    case get_full_doc_info(Db, Id) of
-    {ok, DocInfo} ->
-        {ok, couch_doc:to_doc_info(DocInfo)};
-    Else ->
-        Else
-    end.
-
-%   returns {ok, DocInfo} or not_found
-get_full_doc_info(Db, Id) ->
-    [Result] = get_full_doc_infos(Db, [Id]),
-    Result.
-
-get_full_doc_infos(Db, Ids) ->
-    couch_btree:lookup(by_id_btree(Db), Ids).
-
-increment_update_seq(#db{update_pid=UpdatePid}) ->
-    gen_server:call(UpdatePid, increment_update_seq).
-
-purge_docs(#db{update_pid=UpdatePid}, IdsRevs) ->
-    gen_server:call(UpdatePid, {purge_docs, IdsRevs}).
-
-get_committed_update_seq(#db{committed_update_seq=Seq}) ->
-    Seq.
-
-get_update_seq(#db{update_seq=Seq})->
-    Seq.
-
-get_purge_seq(#db{header=#db_header{purge_seq=PurgeSeq}})->
-    PurgeSeq.
-
-get_last_purged(#db{header=#db_header{purged_docs=nil}}) ->
-    {ok, []};
-get_last_purged(#db{fd=Fd, header=#db_header{purged_docs=PurgedPointer}}) ->
-    couch_file:pread_term(Fd, PurgedPointer).
-
-get_db_info(Db) ->
-    #db{fd=Fd,
-        header=#db_header{disk_version=DiskVersion},
-        compactor_pid=Compactor,
-        update_seq=SeqNum,
-        name=Name,
-        instance_start_time=StartTime,
-        committed_update_seq=CommittedUpdateSeq,
-        fulldocinfo_by_id_btree = IdBtree,
-        docinfo_by_seq_btree = SeqBtree,
-        local_docs_btree = LocalBtree
-    } = Db,
-    {ok, Size} = couch_file:bytes(Fd),
-    {ok, DbReduction} = couch_btree:full_reduce(by_id_btree(Db)),
-    InfoList = [
-        {db_name, Name},
-        {doc_count, element(1, DbReduction)},
-        {doc_del_count, element(2, DbReduction)},
-        {update_seq, SeqNum},
-        {purge_seq, couch_db:get_purge_seq(Db)},
-        {compact_running, Compactor/=nil},
-        {disk_size, Size},
-        {data_size, db_data_size(DbReduction, [SeqBtree, IdBtree, LocalBtree])},
-        {instance_start_time, StartTime},
-        {disk_format_version, DiskVersion},
-        {committed_update_seq, CommittedUpdateSeq}
-        ],
-    {ok, InfoList}.
-
-db_data_size({_Count, _DelCount}, _Trees) ->
-    % pre 1.2 format, upgraded on compaction
-    null;
-db_data_size({_Count, _DelCount, nil}, _Trees) ->
-    null;
-db_data_size({_Count, _DelCount, DocAndAttsSize}, Trees) ->
-    sum_tree_sizes(DocAndAttsSize, Trees).
-
-sum_tree_sizes(Acc, []) ->
-    Acc;
-sum_tree_sizes(Acc, [T | Rest]) ->
-    case couch_btree:size(T) of
-    nil ->
-        null;
-    Sz ->
-        sum_tree_sizes(Acc + Sz, Rest)
-    end.
-
-get_design_docs(Db) ->
-    FoldFun = skip_deleted(fun
-        (#full_doc_info{deleted = true}, _Reds, Acc) ->
-            {ok, Acc};
-        (#full_doc_info{id= <<"_design/",_/binary>>}=FullDocInfo, _Reds, Acc) ->
-            {ok, [FullDocInfo | Acc]};
-        (_, _Reds, Acc) ->
-            {stop, Acc}
-    end),
-    KeyOpts = [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}],
-    {ok, _, Docs} = couch_btree:fold(by_id_btree(Db), FoldFun, [], KeyOpts),
-    Docs.
-
-check_is_admin(#db{user_ctx=#user_ctx{name=Name,roles=Roles}}=Db) ->
-    {Admins} = get_admins(Db),
-    AdminRoles = [<<"_admin">> | couch_util:get_value(<<"roles">>, Admins, [])],
-    AdminNames = couch_util:get_value(<<"names">>, Admins,[]),
-    case AdminRoles -- Roles of
-    AdminRoles -> % same list, not an admin role
-        case AdminNames -- [Name] of
-        AdminNames -> % same names, not an admin
-            throw({unauthorized, <<"You are not a db or server admin.">>});
-        _ ->
-            ok
-        end;
-    _ ->
-        ok
-    end.
-
-check_is_member(#db{user_ctx=#user_ctx{name=Name,roles=Roles}=UserCtx}=Db) ->
-    case (catch check_is_admin(Db)) of
-    ok -> ok;
-    _ ->
-        {Members} = get_members(Db),
-        ReaderRoles = couch_util:get_value(<<"roles">>, Members,[]),
-        WithAdminRoles = [<<"_admin">> | ReaderRoles],
-        ReaderNames = couch_util:get_value(<<"names">>, Members,[]),
-        case ReaderRoles ++ ReaderNames of
-        [] -> ok; % no readers == public access
-        _Else ->
-            case WithAdminRoles -- Roles of
-            WithAdminRoles -> % same list, not an reader role
-                case ReaderNames -- [Name] of
-                ReaderNames -> % same names, not a reader
-                    ?LOG_DEBUG("Not a reader: UserCtx ~p vs Names ~p Roles ~p",[UserCtx, ReaderNames, WithAdminRoles]),
-                    throw({unauthorized, <<"You are not authorized to access this db.">>});
-                _ ->
-                    ok
-                end;
-            _ ->
-                ok
-            end
-        end
-    end.
-
-get_admins(#db{security=SecProps}) ->
-    couch_util:get_value(<<"admins">>, SecProps, {[]}).
-
-get_members(#db{security=SecProps}) ->
-    % we fallback to readers here for backwards compatibility
-    couch_util:get_value(<<"members">>, SecProps,
-        couch_util:get_value(<<"readers">>, SecProps, {[]})).
-
-get_security(#db{security=SecProps}) ->
-    {SecProps}.
-
-set_security(#db{update_pid=Pid}=Db, {NewSecProps}) when is_list(NewSecProps) ->
-    check_is_admin(Db),
-    ok = validate_security_object(NewSecProps),
-    ok = gen_server:call(Pid, {set_security, NewSecProps}, infinity),
-    {ok, _} = ensure_full_commit(Db),
-    ok;
-set_security(_, _) ->
-    throw(bad_request).
-
-validate_security_object(SecProps) ->
-    Admins = couch_util:get_value(<<"admins">>, SecProps, {[]}),
-    % we fallback to readers here for backwards compatibility
-    Members = couch_util:get_value(<<"members">>, SecProps,
-        couch_util:get_value(<<"readers">>, SecProps, {[]})),
-    ok = validate_names_and_roles(Admins),
-    ok = validate_names_and_roles(Members),
-    ok.
-
-% validate user input
-validate_names_and_roles({Props}) when is_list(Props) ->
-    case couch_util:get_value(<<"names">>,Props,[]) of
-    Ns when is_list(Ns) ->
-            [throw("names must be a JSON list of strings") ||N <- Ns, not is_binary(N)],
-            Ns;
-    _ -> throw("names must be a JSON list of strings")
-    end,
-    case couch_util:get_value(<<"roles">>,Props,[]) of
-    Rs when is_list(Rs) ->
-        [throw("roles must be a JSON list of strings") ||R <- Rs, not is_binary(R)],
-        Rs;
-    _ -> throw("roles must be a JSON list of strings")
-    end,
-    ok.
-
-get_revs_limit(#db{revs_limit=Limit}) ->
-    Limit.
-
-set_revs_limit(#db{update_pid=Pid}=Db, Limit) when Limit > 0 ->
-    check_is_admin(Db),
-    gen_server:call(Pid, {set_revs_limit, Limit}, infinity);
-set_revs_limit(_Db, _Limit) ->
-    throw(invalid_revs_limit).
-
-name(#db{name=Name}) ->
-    Name.
-
-compression(#db{compression=Compression}) ->
-    Compression.
-
-update_doc(Db, Doc, Options) ->
-    update_doc(Db, Doc, Options, interactive_edit).
-
-update_doc(Db, Doc, Options, UpdateType) ->
-    case update_docs(Db, [Doc], Options, UpdateType) of
-    {ok, [{ok, NewRev}]} ->
-        {ok, NewRev};
-    {ok, [{{_Id, _Rev}, Error}]} ->
-        throw(Error);
-    {ok, [Error]} ->
-        throw(Error);
-    {ok, []} ->
-        % replication success
-        {Pos, [RevId | _]} = Doc#doc.revs,
-        {ok, {Pos, RevId}}
-    end.
-
-update_docs(Db, Docs) ->
-    update_docs(Db, Docs, []).
-
-% group_alike_docs groups the sorted documents into sublist buckets, by id.
-% ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]]
-group_alike_docs(Docs) ->
-    Sorted = lists:sort(fun({#doc{id=A},_},{#doc{id=B},_})-> A < B end, Docs),
-    group_alike_docs(Sorted, []).
-
-group_alike_docs([], Buckets) ->
-    lists:reverse(lists:map(fun lists:reverse/1, Buckets));
-group_alike_docs([Doc|Rest], []) ->
-    group_alike_docs(Rest, [[Doc]]);
-group_alike_docs([{Doc,Ref}|Rest], [Bucket|RestBuckets]) ->
-    [{#doc{id=BucketId},_Ref}|_] = Bucket,
-    case Doc#doc.id == BucketId of
-    true ->
-        % add to existing bucket
-        group_alike_docs(Rest, [[{Doc,Ref}|Bucket]|RestBuckets]);
-    false ->
-        % add to new bucket
-       group_alike_docs(Rest, [[{Doc,Ref}]|[Bucket|RestBuckets]])
-    end.
-
-validate_doc_update(#db{}=Db, #doc{id= <<"_design/",_/binary>>}, _GetDiskDocFun) ->
-    catch check_is_admin(Db);
-validate_doc_update(#db{validate_doc_funs=[]}, _Doc, _GetDiskDocFun) ->
-    ok;
-validate_doc_update(_Db, #doc{id= <<"_local/",_/binary>>}, _GetDiskDocFun) ->
-    ok;
-validate_doc_update(Db, Doc, GetDiskDocFun) ->
-    DiskDoc = GetDiskDocFun(),
-    JsonCtx = couch_util:json_user_ctx(Db),
-    SecObj = get_security(Db),
-    try [case Fun(Doc, DiskDoc, JsonCtx, SecObj) of
-            ok -> ok;
-            Error -> throw(Error)
-        end || Fun <- Db#db.validate_doc_funs],
-        ok
-    catch
-        throw:Error ->
-            Error
-    end.
-
-
-prep_and_validate_update(Db, #doc{id=Id,revs={RevStart, Revs}}=Doc,
-        OldFullDocInfo, LeafRevsDict, AllowConflict) ->
-    case Revs of
-    [PrevRev|_] ->
-        case dict:find({RevStart, PrevRev}, LeafRevsDict) of
-        {ok, {Deleted, DiskSp, DiskRevs}} ->
-            case couch_doc:has_stubs(Doc) of
-            true ->
-                DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs),
-                Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
-                {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2};
-            false ->
-                LoadDiskDoc = fun() -> make_doc(Db,Id,Deleted,DiskSp,DiskRevs) end,
-                {validate_doc_update(Db, Doc, LoadDiskDoc), Doc}
-            end;
-        error when AllowConflict ->
-            couch_doc:merge_stubs(Doc, #doc{}), % will generate error if
-                                                        % there are stubs
-            {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
-        error ->
-            {conflict, Doc}
-        end;
-    [] ->
-        % new doc, and we have existing revs.
-        % reuse existing deleted doc
-        if OldFullDocInfo#full_doc_info.deleted orelse AllowConflict ->
-            {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
-        true ->
-            {conflict, Doc}
-        end
-    end.
-
-
-
-prep_and_validate_updates(_Db, [], [], _AllowConflict, AccPrepped,
-        AccFatalErrors) ->
-   {AccPrepped, AccFatalErrors};
-prep_and_validate_updates(Db, [DocBucket|RestBuckets], [not_found|RestLookups],
-        AllowConflict, AccPrepped, AccErrors) ->
-    {PreppedBucket, AccErrors3} = lists:foldl(
-        fun({#doc{revs=Revs}=Doc,Ref}, {AccBucket, AccErrors2}) ->
-            case couch_doc:has_stubs(Doc) of
-            true ->
-                couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
-            false -> ok
-            end,
-            case Revs of
-            {0, []} ->
-                case validate_doc_update(Db, Doc, fun() -> nil end) of
-                ok ->
-                    {[{Doc, Ref} | AccBucket], AccErrors2};
-                Error ->
-                    {AccBucket, [{Ref, Error} | AccErrors2]}
-                end;
-            _ ->
-                % old revs specified but none exist, a conflict
-                {AccBucket, [{Ref, conflict} | AccErrors2]}
-            end
-        end,
-        {[], AccErrors}, DocBucket),
-
-    prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
-            [lists:reverse(PreppedBucket) | AccPrepped], AccErrors3);
-prep_and_validate_updates(Db, [DocBucket|RestBuckets],
-        [{ok, #full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo}|RestLookups],
-        AllowConflict, AccPrepped, AccErrors) ->
-    Leafs = couch_key_tree:get_all_leafs(OldRevTree),
-    LeafRevsDict = dict:from_list([
-        begin
-            Deleted = element(1, LeafVal),
-            Sp = element(2, LeafVal),
-            {{Start, RevId}, {Deleted, Sp, Revs}}
-        end ||
-        {LeafVal, {Start, [RevId | _]} = Revs} <- Leafs
-    ]),
-    {PreppedBucket, AccErrors3} = lists:foldl(
-        fun({Doc, Ref}, {Docs2Acc, AccErrors2}) ->
-            case prep_and_validate_update(Db, Doc, OldFullDocInfo,
-                    LeafRevsDict, AllowConflict) of
-            {ok, Doc2} ->
-                {[{Doc2, Ref} | Docs2Acc], AccErrors2};
-            {Error, #doc{}} ->
-                % Record the error
-                {Docs2Acc, [{Ref, Error} |AccErrors2]}
-            end
-        end,
-        {[], AccErrors}, DocBucket),
-    prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
-            [PreppedBucket | AccPrepped], AccErrors3).
-
-
-update_docs(Db, Docs, Options) ->
-    update_docs(Db, Docs, Options, interactive_edit).
-
-
-prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) ->
-    Errors2 = [{{Id, {Pos, Rev}}, Error} ||
-            {#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors],
-    {lists:reverse(AccPrepped), lists:reverse(Errors2)};
-prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) ->
-    case OldInfo of
-    not_found ->
-        {ValidatedBucket, AccErrors3} = lists:foldl(
-            fun({Doc, Ref}, {AccPrepped2, AccErrors2}) ->
-                case couch_doc:has_stubs(Doc) of
-                true ->
-                    couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
-                false -> ok
-                end,
-                case validate_doc_update(Db, Doc, fun() -> nil end) of
-                ok ->
-                    {[{Doc, Ref} | AccPrepped2], AccErrors2};
-                Error ->
-                    {AccPrepped2, [{Doc, Error} | AccErrors2]}
-                end
-            end,
-            {[], AccErrors}, Bucket),
-        prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3);
-    {ok, #full_doc_info{rev_tree=OldTree}} ->
-        NewRevTree = lists:foldl(
-            fun({NewDoc, _Ref}, AccTree) ->
-                {NewTree, _} = couch_key_tree:merge(AccTree,
-                    couch_doc:to_path(NewDoc), Db#db.revs_limit),
-                NewTree
-            end,
-            OldTree, Bucket),
-        Leafs = couch_key_tree:get_all_leafs_full(NewRevTree),
-        LeafRevsFullDict = dict:from_list( [{{Start, RevId}, FullPath} || {Start, [{RevId, _}|_]}=FullPath <- Leafs]),
-        {ValidatedBucket, AccErrors3} =
-        lists:foldl(
-            fun({#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, Ref}, {AccValidated, AccErrors2}) ->
-                case dict:find({Pos, RevId}, LeafRevsFullDict) of
-                {ok, {Start, Path}} ->
-                    % our unflushed doc is a leaf node. Go back on the path
-                    % to find the previous rev that's on disk.
-
-                    LoadPrevRevFun = fun() ->
-                                make_first_doc_on_disk(Db,Id,Start-1, tl(Path))
-                            end,
-
-                    case couch_doc:has_stubs(Doc) of
-                    true ->
-                        DiskDoc = LoadPrevRevFun(),
-                        Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
-                        GetDiskDocFun = fun() -> DiskDoc end;
-                    false ->
-                        Doc2 = Doc,
-                        GetDiskDocFun = LoadPrevRevFun
-                    end,
-
-                    case validate_doc_update(Db, Doc2, GetDiskDocFun) of
-                    ok ->
-                        {[{Doc2, Ref} | AccValidated], AccErrors2};
-                    Error ->
-                        {AccValidated, [{Doc, Error} | AccErrors2]}
-                    end;
-                _ ->
-                    % this doc isn't a leaf or already exists in the tree.
-                    % ignore but consider it a success.
-                    {AccValidated, AccErrors2}
-                end
-            end,
-            {[], AccErrors}, Bucket),
-        prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo,
-                [ValidatedBucket | AccPrepped], AccErrors3)
-    end.
-
-
-
-new_revid(#doc{body=Body,revs={OldStart,OldRevs},
-        atts=Atts,deleted=Deleted}) ->
-    case [{N, T, M} || #att{name=N,type=T,md5=M} <- Atts, M =/= <<>>] of
-    Atts2 when length(Atts) =/= length(Atts2) ->
-        % We must have old style non-md5 attachments
-        ?l2b(integer_to_list(couch_util:rand32()));
-    Atts2 ->
-        OldRev = case OldRevs of [] -> 0; [OldRev0|_] -> OldRev0 end,
-        couch_util:md5(term_to_binary([Deleted, OldStart, OldRev, Body, Atts2]))
-    end.
-
-new_revs([], OutBuckets, IdRevsAcc) ->
-    {lists:reverse(OutBuckets), IdRevsAcc};
-new_revs([Bucket|RestBuckets], OutBuckets, IdRevsAcc) ->
-    {NewBucket, IdRevsAcc3} = lists:mapfoldl(
-        fun({#doc{revs={Start, RevIds}}=Doc, Ref}, IdRevsAcc2)->
-        NewRevId = new_revid(Doc),
-        {{Doc#doc{revs={Start+1, [NewRevId | RevIds]}}, Ref},
-            [{Ref, {ok, {Start+1, NewRevId}}} | IdRevsAcc2]}
-    end, IdRevsAcc, Bucket),
-    new_revs(RestBuckets, [NewBucket|OutBuckets], IdRevsAcc3).
-
-check_dup_atts(#doc{atts=Atts}=Doc) ->
-    Atts2 = lists:sort(fun(#att{name=N1}, #att{name=N2}) -> N1 < N2 end, Atts),
-    check_dup_atts2(Atts2),
-    Doc.
-
-check_dup_atts2([#att{name=N}, #att{name=N} | _]) ->
-    throw({bad_request, <<"Duplicate attachments">>});
-check_dup_atts2([_ | Rest]) ->
-    check_dup_atts2(Rest);
-check_dup_atts2(_) ->
-    ok.
-
-
-update_docs(Db, Docs, Options, replicated_changes) ->
-    increment_stat(Db, {couchdb, database_writes}),
-    % associate reference with each doc in order to track duplicates
-    Docs2 = lists:map(fun(Doc) -> {Doc, make_ref()} end, Docs),
-    DocBuckets = before_docs_update(Db, group_alike_docs(Docs2)),
-    case (Db#db.validate_doc_funs /= []) orelse
-        lists:any(
-            fun({#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}, _Ref}) -> true;
-            ({#doc{atts=Atts}, _Ref}) ->
-                Atts /= []
-            end, Docs2) of
-    true ->
-        Ids = [Id || [{#doc{id=Id}, _Ref}|_] <- DocBuckets],
-        ExistingDocs = get_full_doc_infos(Db, Ids),
-
-        {DocBuckets2, DocErrors} =
-                prep_and_validate_replicated_updates(Db, DocBuckets, ExistingDocs, [], []),
-        DocBuckets3 = [Bucket || [_|_]=Bucket <- DocBuckets2]; % remove empty buckets
-    false ->
-        DocErrors = [],
-        DocBuckets3 = DocBuckets
-    end,
-    DocBuckets4 = [[{doc_flush_atts(check_dup_atts(Doc), Db#db.updater_fd), Ref}
-            || {Doc, Ref} <- Bucket] || Bucket <- DocBuckets3],
-    {ok, []} = write_and_commit(Db, DocBuckets4, [], [merge_conflicts | Options]),
-    {ok, DocErrors};
-
-update_docs(Db, Docs, Options, interactive_edit) ->
-    increment_stat(Db, {couchdb, database_writes}),
-    AllOrNothing = lists:member(all_or_nothing, Options),
-    % go ahead and generate the new revision ids for the documents.
-    % separate out the NonRep documents from the rest of the documents
-
-    % associate reference with each doc in order to track duplicates
-    Docs2 = lists:map(fun(Doc) -> {Doc, make_ref()} end,Docs),
-    {Docs3, NonRepDocs} = lists:foldl(
-         fun({#doc{id=Id},_Ref}=Doc, {DocsAcc, NonRepDocsAcc}) ->
-            case Id of
-            <<?LOCAL_DOC_PREFIX, _/binary>> ->
-                {DocsAcc, [Doc | NonRepDocsAcc]};
-            Id->
-                {[Doc | DocsAcc], NonRepDocsAcc}
-            end
-        end, {[], []}, Docs2),
-
-    DocBuckets = before_docs_update(Db, group_alike_docs(Docs3)),
-
-    case (Db#db.validate_doc_funs /= []) orelse
-        lists:any(
-            fun({#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}, _Ref}) ->
-                true;
-            ({#doc{atts=Atts}, _Ref}) ->
-                Atts /= []
-            end, Docs3) of
-    true ->
-        % lookup the doc by id and get the most recent
-        Ids = [Id || [{#doc{id=Id}, _Ref}|_] <- DocBuckets],
-        ExistingDocInfos = get_full_doc_infos(Db, Ids),
-
-        {DocBucketsPrepped, PreCommitFailures} = prep_and_validate_updates(Db,
-                DocBuckets, ExistingDocInfos, AllOrNothing, [], []),
-
-        % strip out any empty buckets
-        DocBuckets2 = [Bucket || [_|_] = Bucket <- DocBucketsPrepped];
-    false ->
-        PreCommitFailures = [],
-        DocBuckets2 = DocBuckets
-    end,
-
-    if (AllOrNothing) and (PreCommitFailures /= []) ->
-        {aborted,
-         lists:foldl(fun({#doc{id=Id,revs=Revs}, Ref},Acc) ->
-                         case lists:keyfind(Ref,1,PreCommitFailures) of
-                         {Ref, Error} ->
-                             case Revs of
-                             {Pos, [RevId|_]} ->
-                                 [{{Id,{Pos, RevId}}, Error} | Acc];
-                             {0, []} ->
-                                 [{{Id,{0, <<>>}}, Error} | Acc]
-                             end;
-                         false ->
-                             Acc
-                         end
-                     end,[],Docs3)};
-
-    true ->
-        Options2 = if AllOrNothing -> [merge_conflicts];
-                true -> [] end ++ Options,
-        DocBuckets3 = [[
-                {doc_flush_atts(set_new_att_revpos(
-                        check_dup_atts(Doc)), Db#db.updater_fd), Ref}
-                || {Doc, Ref} <- B] || B <- DocBuckets2],
-        {DocBuckets4, IdRevs} = new_revs(DocBuckets3, [], []),
-
-        {ok, CommitResults} = write_and_commit(Db, DocBuckets4, NonRepDocs, Options2),
-
-        ResultsDict = dict:from_list(IdRevs ++ CommitResults ++ PreCommitFailures),
-        {ok, lists:map(
-            fun({#doc{}, Ref}) ->
-                {ok, Result} = dict:find(Ref, ResultsDict),
-                Result
-            end, Docs2)}
-    end.
-
-% Returns the first available document on disk. Input list is a full rev path
-% for the doc.
-make_first_doc_on_disk(_Db, _Id, _Pos, []) ->
-    nil;
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #doc{}} | RestPath]) ->
-    make_first_doc_on_disk(Db, Id, Pos-1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING}|RestPath]) ->
-    make_first_doc_on_disk(Db, Id, Pos - 1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, RevValue} |_]=DocPath) ->
-    IsDel = element(1, RevValue),
-    Sp = element(2, RevValue),
-    Revs = [Rev || {Rev, _} <- DocPath],
-    make_doc(Db, Id, IsDel, Sp, {Pos, Revs}).
-
-set_commit_option(Options) ->
-    CommitSettings = {
-        [true || O <- Options, O==full_commit orelse O==delay_commit],
-        couch_config:get("couchdb", "delayed_commits", "false")
-    },
-    case CommitSettings of
-    {[true], _} ->
-        Options; % user requested explicit commit setting, do not change it
-    {_, "true"} ->
-        Options; % delayed commits are enabled, do nothing
-    {_, "false"} ->
-        [full_commit|Options];
-    {_, Else} ->
-        ?LOG_ERROR("[couchdb] delayed_commits setting must be true/false, not ~p",
-            [Else]),
-        [full_commit|Options]
-    end.
-
-collect_results(UpdatePid, MRef, ResultsAcc) ->
-    receive
-    {result, UpdatePid, Result} ->
-        collect_results(UpdatePid, MRef, [Result | ResultsAcc]);
-    {done, UpdatePid} ->
-        {ok, ResultsAcc};
-    {retry, UpdatePid} ->
-        retry;
-    {'DOWN', MRef, _, _, Reason} ->
-        exit(Reason)
-    end.
-
-write_and_commit(#db{update_pid=UpdatePid}=Db, DocBuckets1,
-        NonRepDocs, Options0) ->
-    DocBuckets = prepare_doc_summaries(Db, DocBuckets1),
-    Options = set_commit_option(Options0),
-    MergeConflicts = lists:member(merge_conflicts, Options),
-    FullCommit = lists:member(full_commit, Options),
-    MRef = erlang:monitor(process, UpdatePid),
-    try
-        UpdatePid ! {update_docs, self(), DocBuckets, NonRepDocs, MergeConflicts, FullCommit},
-        case collect_results(UpdatePid, MRef, []) of
-        {ok, Results} -> {ok, Results};
-        retry ->
-            % This can happen if the db file we wrote to was swapped out by
-            % compaction. Retry by reopening the db and writing to the current file
-            {ok, Db2} = open_ref_counted(Db#db.main_pid, self()),
-            DocBuckets2 = [
-                [{doc_flush_atts(Doc, Db2#db.updater_fd), Ref} || {Doc, Ref} <- Bucket] ||
-                Bucket <- DocBuckets1
-            ],
-            % We only retry once
-            DocBuckets3 = prepare_doc_summaries(Db2, DocBuckets2),
-            close(Db2),
-            UpdatePid ! {update_docs, self(), DocBuckets3, NonRepDocs, MergeConflicts, FullCommit},
-            case collect_results(UpdatePid, MRef, []) of
-            {ok, Results} -> {ok, Results};
-            retry -> throw({update_error, compaction_retry})
-            end
-        end
-    after
-        erlang:demonitor(MRef, [flush])
-    end.
-
-
-prepare_doc_summaries(Db, BucketList) ->
-    [lists:map(
-        fun({#doc{body = Body, atts = Atts} = Doc, Ref}) ->
-            DiskAtts = [{N, T, P, AL, DL, R, M, E} ||
-                #att{name = N, type = T, data = {_, P}, md5 = M, revpos = R,
-                    att_len = AL, disk_len = DL, encoding = E} <- Atts],
-            AttsFd = case Atts of
-            [#att{data = {Fd, _}} | _] ->
-                Fd;
-            [] ->
-                nil
-            end,
-            SummaryChunk = couch_db_updater:make_doc_summary(Db, {Body, DiskAtts}),
-            {Doc#doc{body = {summary, SummaryChunk, AttsFd}}, Ref}
-        end,
-        Bucket) || Bucket <- BucketList].
-
-
-before_docs_update(#db{before_doc_update = nil}, BucketList) ->
-    BucketList;
-before_docs_update(#db{before_doc_update = Fun} = Db, BucketList) ->
-    [lists:map(
-        fun({Doc, Ref}) ->
-            NewDoc = Fun(couch_doc:with_ejson_body(Doc), Db),
-            {NewDoc, Ref}
-        end,
-        Bucket) || Bucket <- BucketList].
-
-
-set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts}=Doc) ->
-    Doc#doc{atts= lists:map(fun(#att{data={_Fd,_Sp}}=Att) ->
-            % already commited to disk, do not set new rev
-            Att;
-        (Att) ->
-            Att#att{revpos=RevPos+1}
-        end, Atts)}.
-
-
-doc_flush_atts(Doc, Fd) ->
-    Doc#doc{atts=[flush_att(Fd, Att) || Att <- Doc#doc.atts]}.
-
-check_md5(_NewSig, <<>>) -> ok;
-check_md5(Sig, Sig) -> ok;
-check_md5(_, _) -> throw(md5_mismatch).
-
-flush_att(Fd, #att{data={Fd0, _}}=Att) when Fd0 == Fd ->
-    % already written to our file, nothing to write
-    Att;
-
-flush_att(Fd, #att{data={OtherFd,StreamPointer}, md5=InMd5,
-    disk_len=InDiskLen} = Att) ->
-    {NewStreamData, Len, _IdentityLen, Md5, IdentityMd5} =
-            couch_stream:copy_to_new_stream(OtherFd, StreamPointer, Fd),
-    check_md5(IdentityMd5, InMd5),
-    Att#att{data={Fd, NewStreamData}, md5=Md5, att_len=Len, disk_len=InDiskLen};
-
-flush_att(Fd, #att{data=Data}=Att) when is_binary(Data) ->
-    with_stream(Fd, Att, fun(OutputStream) ->
-        couch_stream:write(OutputStream, Data)
-    end);
-
-flush_att(Fd, #att{data=Fun,att_len=undefined}=Att) when is_function(Fun) ->
-    MaxChunkSize = list_to_integer(
-        couch_config:get("couchdb", "attachment_stream_buffer_size", "4096")),
-    with_stream(Fd, Att, fun(OutputStream) ->
-        % Fun(MaxChunkSize, WriterFun) must call WriterFun
-        % once for each chunk of the attachment,
-        Fun(MaxChunkSize,
-            % WriterFun({Length, Binary}, State)
-            % WriterFun({0, _Footers}, State)
-            % Called with Length == 0 on the last time.
-            % WriterFun returns NewState.
-            fun({0, Footers}, _) ->
-                F = mochiweb_headers:from_binary(Footers),
-                case mochiweb_headers:get_value("Content-MD5", F) of
-                undefined ->
-                    ok;
-                Md5 ->
-                    {md5, base64:decode(Md5)}
-                end;
-            ({_Length, Chunk}, _) ->
-                couch_stream:write(OutputStream, Chunk)
-            end, ok)
-    end);
-
-flush_att(Fd, #att{data=Fun,att_len=AttLen}=Att) when is_function(Fun) ->
-    with_stream(Fd, Att, fun(OutputStream) ->
-        write_streamed_attachment(OutputStream, Fun, AttLen)
-    end).
-
-
-compressible_att_type(MimeType) when is_binary(MimeType) ->
-    compressible_att_type(?b2l(MimeType));
-compressible_att_type(MimeType) ->
-    TypeExpList = re:split(
-        couch_config:get("attachments", "compressible_types", ""),
-        "\\s*,\\s*",
-        [{return, list}]
-    ),
-    lists:any(
-        fun(TypeExp) ->
-            Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
-                "(?:\\s*;.*?)?\\s*", $$],
-            re:run(MimeType, Regexp, [caseless]) =/= nomatch
-        end,
-        [T || T <- TypeExpList, T /= []]
-    ).
-
-% From RFC 2616 3.6.1 - Chunked Transfer Coding
-%
-%   In other words, the origin server is willing to accept
-%   the possibility that the trailer fields might be silently
-%   discarded along the path to the client.
-%
-% I take this to mean that if "Trailers: Content-MD5\r\n"
-% is present in the request, but there is no Content-MD5
-% trailer, we're free to ignore this inconsistency and
-% pretend that no Content-MD5 exists.
-with_stream(Fd, #att{md5=InMd5,type=Type,encoding=Enc}=Att, Fun) ->
-    BufferSize = list_to_integer(
-        couch_config:get("couchdb", "attachment_stream_buffer_size", "4096")),
-    {ok, OutputStream} = case (Enc =:= identity) andalso
-        compressible_att_type(Type) of
-    true ->
-        CompLevel = list_to_integer(
-            couch_config:get("attachments", "compression_level", "0")
-        ),
-        couch_stream:open(Fd, [{buffer_size, BufferSize},
-            {encoding, gzip}, {compression_level, CompLevel}]);
-    _ ->
-        couch_stream:open(Fd, [{buffer_size, BufferSize}])
-    end,
-    ReqMd5 = case Fun(OutputStream) of
-        {md5, FooterMd5} ->
-            case InMd5 of
-                md5_in_footer -> FooterMd5;
-                _ -> InMd5
-            end;
-        _ ->
-            InMd5
-    end,
-    {StreamInfo, Len, IdentityLen, Md5, IdentityMd5} =
-        couch_stream:close(OutputStream),
-    check_md5(IdentityMd5, ReqMd5),
-    {AttLen, DiskLen, NewEnc} = case Enc of
-    identity ->
-        case {Md5, IdentityMd5} of
-        {Same, Same} ->
-            {Len, IdentityLen, identity};
-        _ ->
-            {Len, IdentityLen, gzip}
-        end;
-    gzip ->
-        case {Att#att.att_len, Att#att.disk_len} of
-        {AL, DL} when AL =:= undefined orelse DL =:= undefined ->
-            % Compressed attachment uploaded through the standalone API.
-            {Len, Len, gzip};
-        {AL, DL} ->
-            % This case is used for efficient push-replication, where a
-            % compressed attachment is located in the body of multipart
-            % content-type request.
-            {AL, DL, gzip}
-        end
-    end,
-    Att#att{
-        data={Fd,StreamInfo},
-        att_len=AttLen,
-        disk_len=DiskLen,
-        md5=Md5,
-        encoding=NewEnc
-    }.
-
-
-write_streamed_attachment(_Stream, _F, 0) ->
-    ok;
-write_streamed_attachment(Stream, F, LenLeft) when LenLeft > 0 ->
-    Bin = read_next_chunk(F, LenLeft),
-    ok = couch_stream:write(Stream, Bin),
-    write_streamed_attachment(Stream, F, LenLeft - size(Bin)).
-
-read_next_chunk(F, _) when is_function(F, 0) ->
-    F();
-read_next_chunk(F, LenLeft) when is_function(F, 1) ->
-    F(lists:min([LenLeft, 16#2000])).
-
-enum_docs_since_reduce_to_count(Reds) ->
-    couch_btree:final_reduce(
-            fun couch_db_updater:btree_by_seq_reduce/2, Reds).
-
-enum_docs_reduce_to_count(Reds) ->
-    FinalRed = couch_btree:final_reduce(
-            fun couch_db_updater:btree_by_id_reduce/2, Reds),
-    element(1, FinalRed).
-
-changes_since(Db, StartSeq, Fun, Acc) ->
-    changes_since(Db, StartSeq, Fun, [], Acc).
-
-changes_since(Db, StartSeq, Fun, Options, Acc) ->
-    Wrapper = fun(DocInfo, _Offset, Acc2) -> Fun(DocInfo, Acc2) end,
-    {ok, _LastReduction, AccOut} = couch_btree:fold(by_seq_btree(Db),
-        Wrapper, Acc, [{start_key, StartSeq + 1}] ++ Options),
-    {ok, AccOut}.
-
-count_changes_since(Db, SinceSeq) ->
-    BTree = by_seq_btree(Db),
-    {ok, Changes} =
-    couch_btree:fold_reduce(BTree,
-        fun(_SeqStart, PartialReds, 0) ->
-            {ok, couch_btree:final_reduce(BTree, PartialReds)}
-        end,
-        0, [{start_key, SinceSeq + 1}]),
-    Changes.
-
-enum_docs_since(Db, SinceSeq, InFun, Acc, Options) ->
-    {ok, LastReduction, AccOut} = couch_btree:fold(
-        by_seq_btree(Db), InFun, Acc, [{start_key, SinceSeq + 1} | Options]),
-    {ok, enum_docs_since_reduce_to_count(LastReduction), AccOut}.
-
-enum_docs(Db, InFun, InAcc, Options) ->
-    FoldFun = skip_deleted(InFun),
-    {ok, LastReduce, OutAcc} = couch_btree:fold(
-        by_id_btree(Db), FoldFun, InAcc, Options),
-    {ok, enum_docs_reduce_to_count(LastReduce), OutAcc}.
-
-% server functions
-
-init({DbName, Filepath, Fd, Options}) ->
-    {ok, UpdaterPid} = gen_server:start_link(couch_db_updater, {self(), DbName, Filepath, Fd, Options}, []),
-    {ok, #db{fd_ref_counter=RefCntr}=Db} = gen_server:call(UpdaterPid, get_db),
-    couch_ref_counter:add(RefCntr),
-    case lists:member(sys_db, Options) of
-    true ->
-        ok;
-    false ->
-        couch_stats_collector:track_process_count({couchdb, open_databases})
-    end,
-    process_flag(trap_exit, true),
-    {ok, Db}.
-
-terminate(_Reason, Db) ->
-    couch_util:shutdown_sync(Db#db.update_pid),
-    ok.
-
-handle_call({open_ref_count, OpenerPid}, _, #db{fd_ref_counter=RefCntr}=Db) ->
-    ok = couch_ref_counter:add(RefCntr, OpenerPid),
-    {reply, {ok, Db}, Db};
-handle_call(is_idle, _From, #db{fd_ref_counter=RefCntr, compactor_pid=Compact,
-            waiting_delayed_commit=Delay}=Db) ->
-    % Idle means no referrers. Unless in the middle of a compaction file switch,
-    % there are always at least 2 referrers, couch_db_updater and us.
-    {reply, (Delay == nil) andalso (Compact == nil) andalso (couch_ref_counter:count(RefCntr) == 2), Db};
-handle_call({db_updated, NewDb}, _From, #db{fd_ref_counter=OldRefCntr}) ->
-    #db{fd_ref_counter=NewRefCntr}=NewDb,
-    case NewRefCntr =:= OldRefCntr of
-    true -> ok;
-    false ->
-        couch_ref_counter:add(NewRefCntr),
-        couch_ref_counter:drop(OldRefCntr)
-    end,
-    {reply, ok, NewDb};
-handle_call(get_db, _From, Db) ->
-    {reply, {ok, Db}, Db}.
-
-
-handle_cast(Msg, Db) ->
-    ?LOG_ERROR("Bad cast message received for db ~s: ~p", [Db#db.name, Msg]),
-    exit({error, Msg}).
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-handle_info({'EXIT', _Pid, normal}, Db) ->
-    {noreply, Db};
-handle_info({'EXIT', _Pid, Reason}, Server) ->
-    {stop, Reason, Server};
-handle_info(Msg, Db) ->
-    ?LOG_ERROR("Bad message received for db ~s: ~p", [Db#db.name, Msg]),
-    exit({error, Msg}).
-
-
-%%% Internal function %%%
-open_doc_revs_int(Db, IdRevs, Options) ->
-    Ids = [Id || {Id, _Revs} <- IdRevs],
-    LookupResults = get_full_doc_infos(Db, Ids),
-    lists:zipwith(
-        fun({Id, Revs}, Lookup) ->
-            case Lookup of
-            {ok, #full_doc_info{rev_tree=RevTree}} ->
-                {FoundRevs, MissingRevs} =
-                case Revs of
-                all ->
-                    {couch_key_tree:get_all_leafs(RevTree), []};
-                _ ->
-                    case lists:member(latest, Options) of
-                    true ->
-                        couch_key_tree:get_key_leafs(RevTree, Revs);
-                    false ->
-                        couch_key_tree:get(RevTree, Revs)
-                    end
-                end,
-                FoundResults =
-                lists:map(fun({Value, {Pos, [Rev|_]}=FoundRevPath}) ->
-                    case Value of
-                    ?REV_MISSING ->
-                        % we have the rev in our list but know nothing about it
-                        {{not_found, missing}, {Pos, Rev}};
-                    RevValue ->
-                        IsDeleted = element(1, RevValue),
-                        SummaryPtr = element(2, RevValue),
-                        {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)}
-                    end
-                end, FoundRevs),
-                Results = FoundResults ++ [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs],
-                {ok, Results};
-            not_found when Revs == all ->
-                {ok, []};
-            not_found ->
-                {ok, [{{not_found, missing}, Rev} || Rev <- Revs]}
-            end
-        end,
-        IdRevs, LookupResults).
-
-open_doc_int(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = Id, Options) ->
-    case couch_btree:lookup(local_btree(Db), [Id]) of
-    [{ok, {_, {Rev, BodyData}}}] ->
-        Doc = #doc{id=Id, revs={0, [?l2b(integer_to_list(Rev))]}, body=BodyData},
-        apply_open_options({ok, Doc}, Options);
-    [not_found] ->
-        {not_found, missing}
-    end;
-open_doc_int(Db, #doc_info{id=Id,revs=[RevInfo|_]}=DocInfo, Options) ->
-    #rev_info{deleted=IsDeleted,rev={Pos,RevId},body_sp=Bp} = RevInfo,
-    Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos,[RevId]}),
-    apply_open_options(
-       {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}}, Options);
-open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
-    #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} =
-        DocInfo = couch_doc:to_doc_info(FullDocInfo),
-    {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]),
-    Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath),
-    apply_open_options(
-        {ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}}, Options);
-open_doc_int(Db, Id, Options) ->
-    case get_full_doc_info(Db, Id) of
-    {ok, FullDocInfo} ->
-        open_doc_int(Db, FullDocInfo, Options);
-    not_found ->
-        {not_found, missing}
-    end.
-
-doc_meta_info(#doc_info{high_seq=Seq,revs=[#rev_info{rev=Rev}|RestInfo]}, RevTree, Options) ->
-    case lists:member(revs_info, Options) of
-    false -> [];
-    true ->
-        {[{Pos, RevPath}],[]} =
-            couch_key_tree:get_full_key_paths(RevTree, [Rev]),
-
-        [{revs_info, Pos, lists:map(
-            fun({Rev1, ?REV_MISSING}) ->
-                {Rev1, missing};
-            ({Rev1, RevValue}) ->
-                case element(1, RevValue) of
-                true ->
-                    {Rev1, deleted};
-                false ->
-                    {Rev1, available}
-                end
-            end, RevPath)}]
-    end ++
-    case lists:member(conflicts, Options) of
-    false -> [];
-    true ->
-        case [Rev1 || #rev_info{rev=Rev1,deleted=false} <- RestInfo] of
-        [] -> [];
-        ConflictRevs -> [{conflicts, ConflictRevs}]
-        end
-    end ++
-    case lists:member(deleted_conflicts, Options) of
-    false -> [];
-    true ->
-        case [Rev1 || #rev_info{rev=Rev1,deleted=true} <- RestInfo] of
-        [] -> [];
-        DelConflictRevs -> [{deleted_conflicts, DelConflictRevs}]
-        end
-    end ++
-    case lists:member(local_seq, Options) of
-    false -> [];
-    true -> [{local_seq, Seq}]
-    end.
-
-read_doc(#db{fd=Fd}, Pos) ->
-    couch_file:pread_term(Fd, Pos).
-
-
-make_doc(#db{updater_fd = Fd} = Db, Id, Deleted, Bp, RevisionPath) ->
-    {BodyData, Atts} =
-    case Bp of
-    nil ->
-        {[], []};
-    _ ->
-        {ok, {BodyData0, Atts00}} = read_doc(Db, Bp),
-        Atts0 = case Atts00 of
-        _ when is_binary(Atts00) ->
-            couch_compress:decompress(Atts00);
-        _ when is_list(Atts00) ->
-            % pre 1.2 format
-            Atts00
-        end,
-        {BodyData0,
-            lists:map(
-                fun({Name,Type,Sp,AttLen,DiskLen,RevPos,Md5,Enc}) ->
-                    #att{name=Name,
-                        type=Type,
-                        att_len=AttLen,
-                        disk_len=DiskLen,
-                        md5=Md5,
-                        revpos=RevPos,
-                        data={Fd,Sp},
-                        encoding=
-                            case Enc of
-                            true ->
-                                % 0110 UPGRADE CODE
-                                gzip;
-                            false ->
-                                % 0110 UPGRADE CODE
-                                identity;
-                            _ ->
-                                Enc
-                            end
-                    };
-                ({Name,Type,Sp,AttLen,RevPos,Md5}) ->
-                    #att{name=Name,
-                        type=Type,
-                        att_len=AttLen,
-                        disk_len=AttLen,
-                        md5=Md5,
-                        revpos=RevPos,
-                        data={Fd,Sp}};
-                ({Name,{Type,Sp,AttLen}}) ->
-                    #att{name=Name,
-                        type=Type,
-                        att_len=AttLen,
-                        disk_len=AttLen,
-                        md5= <<>>,
-                        revpos=0,
-                        data={Fd,Sp}}
-                end, Atts0)}
-    end,
-    Doc = #doc{
-        id = Id,
-        revs = RevisionPath,
-        body = BodyData,
-        atts = Atts,
-        deleted = Deleted
-    },
-    after_doc_read(Db, Doc).
-
-
-after_doc_read(#db{after_doc_read = nil}, Doc) ->
-    Doc;
-after_doc_read(#db{after_doc_read = Fun} = Db, Doc) ->
-    Fun(couch_doc:with_ejson_body(Doc), Db).
-
-
-increment_stat(#db{options = Options}, Stat) ->
-    case lists:member(sys_db, Options) of
-    true ->
-        ok;
-    false ->
-        couch_stats_collector:increment(Stat)
-    end.
-
-local_btree(#db{local_docs_btree = BTree, fd = ReaderFd}) ->
-    BTree#btree{fd = ReaderFd}.
-
-by_seq_btree(#db{docinfo_by_seq_btree = BTree, fd = ReaderFd}) ->
-    BTree#btree{fd = ReaderFd}.
-
-by_id_btree(#db{fulldocinfo_by_id_btree = BTree, fd = ReaderFd}) ->
-    BTree#btree{fd = ReaderFd}.
-
-skip_deleted(FoldFun) ->
-    fun
-        (visit, KV, Reds, Acc) ->
-            FoldFun(KV, Reds, Acc);
-        (traverse, _LK, {Undeleted, _Del, _Size}, Acc) when Undeleted == 0 ->
-            {skip, Acc};
-        (traverse, _, _, Acc) ->
-            {ok, Acc}
-    end.


[20/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
working release


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/661d4305
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/661d4305
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/661d4305

Branch: refs/heads/import-rcouch
Commit: 661d43051ef9bd0ee6f47dff8d0ec49e3931965f
Parents: bfa5ec5
Author: benoitc <be...@apache.org>
Authored: Tue Jan 7 16:14:56 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:20 2014 -0600

----------------------------------------------------------------------
 priv/stat_descriptions.cfg    | 50 ++++++++++++++++++++++++++++++++++++++
 priv/stat_descriptions.cfg.in | 50 --------------------------------------
 rebar.config.script           | 22 +++++++++++------
 src/couch.app.src             |  4 +--
 4 files changed, 66 insertions(+), 60 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/661d4305/priv/stat_descriptions.cfg
----------------------------------------------------------------------
diff --git a/priv/stat_descriptions.cfg b/priv/stat_descriptions.cfg
new file mode 100644
index 0000000..b80d768
--- /dev/null
+++ b/priv/stat_descriptions.cfg
@@ -0,0 +1,50 @@
+%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+%% use this file except in compliance with the License. You may obtain a copy of
+%% the License at
+%%
+%%   http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+%% License for the specific language governing permissions and limitations under
+%% the License.
+
+% Style guide for descriptions: Start with a lowercase letter & do not add
+% a trailing full-stop / period
+% Please keep this in alphabetical order
+
+{couchdb, database_writes, "number of times a database was changed"}.
+{couchdb, database_reads, "number of times a document was read from a database"}.
+{couchdb, open_databases, "number of open databases"}.
+{couchdb, open_os_files, "number of file descriptors CouchDB has open"}.
+{couchdb, request_time, "length of a request inside CouchDB without MochiWeb"}.
+{couchdb, auth_cache_hits, "number of authentication cache hits"}.
+{couchdb, auth_cache_misses, "number of authentication cache misses"}.
+
+{httpd, bulk_requests, "number of bulk requests"}.
+{httpd, requests, "number of HTTP requests"}.
+{httpd, temporary_view_reads, "number of temporary view reads"}.
+{httpd, view_reads, "number of view reads"}.
+{httpd, clients_requesting_changes, "number of clients for continuous _changes"}.
+
+{httpd_request_methods, 'COPY', "number of HTTP COPY requests"}.
+{httpd_request_methods, 'DELETE', "number of HTTP DELETE requests"}.
+{httpd_request_methods, 'GET', "number of HTTP GET requests"}.
+{httpd_request_methods, 'HEAD', "number of HTTP HEAD requests"}.
+{httpd_request_methods, 'POST', "number of HTTP POST requests"}.
+{httpd_request_methods, 'PUT', "number of HTTP PUT requests"}.
+
+{httpd_status_codes, '200', "number of HTTP 200 OK responses"}.
+{httpd_status_codes, '201', "number of HTTP 201 Created responses"}.
+{httpd_status_codes, '202', "number of HTTP 202 Accepted responses"}.
+{httpd_status_codes, '301', "number of HTTP 301 Moved Permanently responses"}.
+{httpd_status_codes, '304', "number of HTTP 304 Not Modified responses"}.
+{httpd_status_codes, '400', "number of HTTP 400 Bad Request responses"}.
+{httpd_status_codes, '401', "number of HTTP 401 Unauthorized responses"}.
+{httpd_status_codes, '403', "number of HTTP 403 Forbidden responses"}.
+{httpd_status_codes, '404', "number of HTTP 404 Not Found responses"}.
+{httpd_status_codes, '405', "number of HTTP 405 Method Not Allowed responses"}.
+{httpd_status_codes, '409', "number of HTTP 409 Conflict responses"}.
+{httpd_status_codes, '412', "number of HTTP 412 Precondition Failed responses"}.
+{httpd_status_codes, '500', "number of HTTP 500 Internal Server Error responses"}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/661d4305/priv/stat_descriptions.cfg.in
----------------------------------------------------------------------
diff --git a/priv/stat_descriptions.cfg.in b/priv/stat_descriptions.cfg.in
deleted file mode 100644
index b80d768..0000000
--- a/priv/stat_descriptions.cfg.in
+++ /dev/null
@@ -1,50 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-%% use this file except in compliance with the License. You may obtain a copy of
-%% the License at
-%%
-%%   http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-%% License for the specific language governing permissions and limitations under
-%% the License.
-
-% Style guide for descriptions: Start with a lowercase letter & do not add
-% a trailing full-stop / period
-% Please keep this in alphabetical order
-
-{couchdb, database_writes, "number of times a database was changed"}.
-{couchdb, database_reads, "number of times a document was read from a database"}.
-{couchdb, open_databases, "number of open databases"}.
-{couchdb, open_os_files, "number of file descriptors CouchDB has open"}.
-{couchdb, request_time, "length of a request inside CouchDB without MochiWeb"}.
-{couchdb, auth_cache_hits, "number of authentication cache hits"}.
-{couchdb, auth_cache_misses, "number of authentication cache misses"}.
-
-{httpd, bulk_requests, "number of bulk requests"}.
-{httpd, requests, "number of HTTP requests"}.
-{httpd, temporary_view_reads, "number of temporary view reads"}.
-{httpd, view_reads, "number of view reads"}.
-{httpd, clients_requesting_changes, "number of clients for continuous _changes"}.
-
-{httpd_request_methods, 'COPY', "number of HTTP COPY requests"}.
-{httpd_request_methods, 'DELETE', "number of HTTP DELETE requests"}.
-{httpd_request_methods, 'GET', "number of HTTP GET requests"}.
-{httpd_request_methods, 'HEAD', "number of HTTP HEAD requests"}.
-{httpd_request_methods, 'POST', "number of HTTP POST requests"}.
-{httpd_request_methods, 'PUT', "number of HTTP PUT requests"}.
-
-{httpd_status_codes, '200', "number of HTTP 200 OK responses"}.
-{httpd_status_codes, '201', "number of HTTP 201 Created responses"}.
-{httpd_status_codes, '202', "number of HTTP 202 Accepted responses"}.
-{httpd_status_codes, '301', "number of HTTP 301 Moved Permanently responses"}.
-{httpd_status_codes, '304', "number of HTTP 304 Not Modified responses"}.
-{httpd_status_codes, '400', "number of HTTP 400 Bad Request responses"}.
-{httpd_status_codes, '401', "number of HTTP 401 Unauthorized responses"}.
-{httpd_status_codes, '403', "number of HTTP 403 Forbidden responses"}.
-{httpd_status_codes, '404', "number of HTTP 404 Not Found responses"}.
-{httpd_status_codes, '405', "number of HTTP 405 Method Not Allowed responses"}.
-{httpd_status_codes, '409', "number of HTTP 409 Conflict responses"}.
-{httpd_status_codes, '412', "number of HTTP 412 Precondition Failed responses"}.
-{httpd_status_codes, '500', "number of HTTP 500 Internal Server Error responses"}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/661d4305/rebar.config.script
----------------------------------------------------------------------
diff --git a/rebar.config.script b/rebar.config.script
index e8b987c..fb93f97 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -38,21 +38,27 @@ Version = case os:getenv("RELEASE") of
         BaseVersion ++ SecondaryVersion
 end,
 
-Cfg1 = [{package_string, proplists:get_value(vendor_name, Cfg, "")
-                         ++ " " ++ Version },
-        {package_version, Version}] ++ Cfg,
+%% couchjs name
+CouchJSName = proplists:get_value(couchjs_name, Cfg, "couchjs"),
+
+%% build config.h
+ConfigH = [{package_name, proplists:get_value(vendor_name, Cfg, "")},
+           {package_string, proplists:get_value(vendor_name, Cfg, "")
+                            ++ " " ++ Version },
+           {package_bugreport, proplists:get_value(package_bugreport, Cfg, "")},
+           {package_version, Version},
+           {couchjs_name, CouchJSName}],
 
 %% write config.h
-CfgStr = lists:foldl(fun({K, V}, Acc) ->
+ConfigHStr = lists:foldl(fun({K, V}, Acc) ->
             K1 = string:to_upper(atom_to_list(K)),
             case K1 of
                 "VERSION_" ++ _ -> Acc;
                 _ ->
                     ["#define ", K1, " ", $", V, $", $\n | Acc]
             end
-        end, [], Cfg1),
-ok = file:write_file("c_src/couch_js/config.h", iolist_to_binary(CfgStr)),
-
+        end, [], ConfigH),
+ok = file:write_file("c_src/couch_js/config.h", iolist_to_binary(ConfigHStr)),
 
 GetFlag = fun(Name, Default) ->
         case os:getenv(Name) of
@@ -84,7 +90,7 @@ PortEnv = [{port_env, [
             {"LDFLAGS", "$LDFLAGS " ++ LDFLAGS1}]},
 
            {port_specs, [
-            {filename:join(["priv", "couchjs"]),
+            {filename:join(["priv", CouchJSName]),
             ["c_src/couch_js/*.c"]}]}
 ],
 

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/661d4305/src/couch.app.src
----------------------------------------------------------------------
diff --git a/src/couch.app.src b/src/couch.app.src
index 2d14148..53cee43 100644
--- a/src/couch.app.src
+++ b/src/couch.app.src
@@ -1,6 +1,6 @@
 {application, couch, [
-    {description, "@package_name@"},
-    {vsn, "@version@"},
+    {description, "Apache CouchDB"},
+    {vsn, "1.6.1"},
     {registered, [
         couch_config,
         couch_db_update,


[29/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
add release build


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/bfa5ec51
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/bfa5ec51
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/bfa5ec51

Branch: refs/heads/import-rcouch
Commit: bfa5ec51d1860fa4900c1c66af0f138db8bbed6a
Parents: 64779a3
Author: benoitc <be...@apache.org>
Authored: Tue Jan 7 15:18:21 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:20 2014 -0600

----------------------------------------------------------------------
 c_src/couch_js/help.h | 2 +-
 rebar.config.script   | 6 ++----
 2 files changed, 3 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/bfa5ec51/c_src/couch_js/help.h
----------------------------------------------------------------------
diff --git a/c_src/couch_js/help.h b/c_src/couch_js/help.h
index f4ddb24..81bae4d 100644
--- a/c_src/couch_js/help.h
+++ b/c_src/couch_js/help.h
@@ -73,7 +73,7 @@ static const char USAGE_TEMPLATE[] =
             USAGE_TEMPLATE,                     \
             basename,                           \
             basename,                           \
-            PACKAGE_NAME,                       \
+            VENDOR_NAME,                       \
             basename,                           \
             PACKAGE_BUGREPORT)
 

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/bfa5ec51/rebar.config.script
----------------------------------------------------------------------
diff --git a/rebar.config.script b/rebar.config.script
index df8c48e..e8b987c 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -14,14 +14,13 @@
 %% the License.
 
 
-Cfg = case file:consult("../../vars.config") of
+Cfg = case file:consult("../../pkg.vars.config") of
           {ok, Terms} ->
               Terms;
           _Err ->
               []
       end,
 
-
 %% get version infos
 MajorVersion = integer_to_list(proplists:get_value(version_major, Cfg, 0)),
 MinorVersion = integer_to_list(proplists:get_value(version_minor, Cfg, 0)),
@@ -39,7 +38,7 @@ Version = case os:getenv("RELEASE") of
         BaseVersion ++ SecondaryVersion
 end,
 
-Cfg1 = [{package_string, proplists:get_value(package_name, Cfg, "")
+Cfg1 = [{package_string, proplists:get_value(vendor_name, Cfg, "")
                          ++ " " ++ Version },
         {package_version, Version}] ++ Cfg,
 
@@ -55,7 +54,6 @@ CfgStr = lists:foldl(fun({K, V}, Acc) ->
 ok = file:write_file("c_src/couch_js/config.h", iolist_to_binary(CfgStr)),
 
 
-
 GetFlag = fun(Name, Default) ->
         case os:getenv(Name) of
             false -> Default;


[39/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
add `make test` target . all erlang test pass

note: from time to time there is a timing issue on 200- test that need
to be fixed. It is most probably due to the way the indexer is
supervised.


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/c7c431a8
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/c7c431a8
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/c7c431a8

Branch: refs/heads/import-rcouch
Commit: c7c431a85121b9022b8c5a3aaf471749f72cd784
Parents: 9e429fd
Author: benoitc <be...@apache.org>
Authored: Thu Jan 9 16:24:32 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:21 2014 -0600

----------------------------------------------------------------------
 src/couch.erl            |  2 ++
 src/couch_app.erl        |  2 +-
 src/couch_os_daemons.erl | 11 ++++++-----
 src/couch_server_sup.erl |  2 +-
 4 files changed, 10 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c7c431a8/src/couch.erl
----------------------------------------------------------------------
diff --git a/src/couch.erl b/src/couch.erl
index 92c2b74..8526e68 100644
--- a/src/couch.erl
+++ b/src/couch.erl
@@ -43,6 +43,8 @@ release_version() ->
     end.
 
 start() ->
+    application:load(couch),
+    couch_util:start_app_deps(couch),
     application:start(couch).
 
 stop() ->

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c7c431a8/src/couch_app.erl
----------------------------------------------------------------------
diff --git a/src/couch_app.erl b/src/couch_app.erl
index a8d215e..414a5c9 100644
--- a/src/couch_app.erl
+++ b/src/couch_app.erl
@@ -18,7 +18,7 @@
 
 -export([start/2, stop/1]).
 
--define(CONF_FILES, ["couch.ini", "couch_httpd.ini", "local.ini"]).
+-define(CONF_FILES, ["couch.ini", "local.ini"]).
 
 start(_Type, _Args) ->
     couch_util:start_app_deps(couch),

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c7c431a8/src/couch_os_daemons.erl
----------------------------------------------------------------------
diff --git a/src/couch_os_daemons.erl b/src/couch_os_daemons.erl
index cac031a..75a14d8 100644
--- a/src/couch_os_daemons.erl
+++ b/src/couch_os_daemons.erl
@@ -164,6 +164,7 @@ handle_info({Port, {data, {eol, Data}}}, Table) ->
         _Else ->
             D2 = case (catch ?JSON_DECODE(Line)) of
                 {invalid_json, Rejected} ->
+                    io:format("fuckk so ~p~n", [Line]),
                     ?LOG_ERROR("Ignoring OS daemon request: ~p", [Rejected]),
                     D;
                 JSON ->
@@ -271,12 +272,12 @@ handle_log_message(Name, Msg, Level) ->
 reload_daemons(Table) ->
     % List of daemons we want to have running.
     Configured = lists:sort(couch_config:get("os_daemons")),
-    
+
     % Remove records for daemons that were halted.
     MSpecHalted = #daemon{name='$1', cmd='$2', status=halted, _='_'},
     Halted = lists:sort([{N, C} || [N, C] <- ets:match(Table, MSpecHalted)]),
     ok = stop_os_daemons(Table, find_to_stop(Configured, Halted, [])),
-    
+
     % Stop daemons that are running
     % Start newly configured daemons
     MSpecRunning = #daemon{name='$1', cmd='$2', status=running, _='_'},
@@ -304,7 +305,7 @@ restart_daemons(Table, Sect, Key, Port) ->
             ok
     end,
     restart_daemons(Table, Sect, Key, ets:next(Table, Port)).
-    
+
 
 stop_os_daemons(_Table, []) ->
     ok;
@@ -320,14 +321,14 @@ stop_os_daemons(Table, [{Name, Cmd} | Rest]) ->
             true = ets:insert(Table, D2)
     end,
     stop_os_daemons(Table, Rest).
-    
+
 boot_os_daemons(_Table, []) ->
     ok;
 boot_os_daemons(Table, [{Name, Cmd} | Rest]) ->
     {ok, Port} = start_port(Cmd),
     true = ets:insert(Table, #daemon{port=Port, name=Name, cmd=Cmd}),
     boot_os_daemons(Table, Rest).
-    
+
 % Elements unique to the configured set need to be booted.
 find_to_boot([], _Rest, Acc) ->
     % Nothing else configured.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/c7c431a8/src/couch_server_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch_server_sup.erl b/src/couch_server_sup.erl
index be3c3a3..379b384 100644
--- a/src/couch_server_sup.erl
+++ b/src/couch_server_sup.erl
@@ -114,7 +114,7 @@ start_server(IniFiles) ->
         end
     end
     || Uri <- Uris],
-    case couch_config:get("couchdb", "uri_file", null) of 
+    case couch_config:get("couchdb", "uri_file", null) of
     null -> ok;
     UriFile ->
         Lines = [begin case Uri of


[18/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
fix build on macosx


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/370ab883
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/370ab883
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/370ab883

Branch: refs/heads/import-rcouch
Commit: 370ab88325a3786f30d9c6a3793393dad729d8cd
Parents: 4c23323
Author: benoitc <bc...@gmail.com>
Authored: Tue Jan 7 22:42:39 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:20 2014 -0600

----------------------------------------------------------------------
 c_src/couch_js/http.c | 2 +-
 c_src/couch_js/main.c | 2 +-
 c_src/couch_js/utf8.c | 2 +-
 c_src/couch_js/util.c | 2 +-
 c_src/couch_js/util.h | 2 +-
 rebar.config.script   | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/370ab883/c_src/couch_js/http.c
----------------------------------------------------------------------
diff --git a/c_src/couch_js/http.c b/c_src/couch_js/http.c
index 3baa59d..da29b5f 100644
--- a/c_src/couch_js/http.c
+++ b/c_src/couch_js/http.c
@@ -16,7 +16,7 @@
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
-#include <jsapi.h>
+#include <js/jsapi.h>
 #include "utf8.h"
 #include "util.h"
 

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/370ab883/c_src/couch_js/main.c
----------------------------------------------------------------------
diff --git a/c_src/couch_js/main.c b/c_src/couch_js/main.c
index a0fc143..d4f4ef0 100644
--- a/c_src/couch_js/main.c
+++ b/c_src/couch_js/main.c
@@ -14,7 +14,7 @@
 #include <stdio.h>
 #include <string.h>
 
-#include <jsapi.h>
+#include <js/jsapi.h>
 #include "http.h"
 #include "utf8.h"
 #include "util.h"

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/370ab883/c_src/couch_js/utf8.c
----------------------------------------------------------------------
diff --git a/c_src/couch_js/utf8.c b/c_src/couch_js/utf8.c
index 2b3735a..75a4315 100644
--- a/c_src/couch_js/utf8.c
+++ b/c_src/couch_js/utf8.c
@@ -10,7 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include <jsapi.h>
+#include <js/jsapi.h>
 
 static int
 enc_char(uint8 *utf8Buffer, uint32 ucs4Char)

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/370ab883/c_src/couch_js/util.c
----------------------------------------------------------------------
diff --git a/c_src/couch_js/util.c b/c_src/couch_js/util.c
index 9b46ceb..ef984d1 100644
--- a/c_src/couch_js/util.c
+++ b/c_src/couch_js/util.c
@@ -13,7 +13,7 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include <jsapi.h>
+#include <js/jsapi.h>
 
 #include "help.h"
 #include "util.h"

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/370ab883/c_src/couch_js/util.h
----------------------------------------------------------------------
diff --git a/c_src/couch_js/util.h b/c_src/couch_js/util.h
index 65a2a06..a1e2b6c 100644
--- a/c_src/couch_js/util.h
+++ b/c_src/couch_js/util.h
@@ -13,7 +13,7 @@
 #ifndef COUCHJS_UTIL_H
 #define COUCHJS_UTIL_H
 
-#include <jsapi.h>
+#include <js/jsapi.h>
 
 typedef struct {
     int          use_http;

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/370ab883/rebar.config.script
----------------------------------------------------------------------
diff --git a/rebar.config.script b/rebar.config.script
index ca79b39..ab4501d 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -69,7 +69,7 @@ GetFlag = fun(Name, Default) ->
 
 
 JSLIBS = GetFlag("JS_LIBS", "-lmozjs185"),
-JSCFLAGS = GetFlag("JS_CFLAGS", "-I/usr/include/js"),
+JSCFLAGS = GetFlag("JS_CFLAGS", "$CFLAGS"),
 
 {CFLAGS, LDFLAGS}  = case os:type() of
     {unix, darwin} ->


[14/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_external_manager.erl
----------------------------------------------------------------------
diff --git a/couch_external_manager.erl b/couch_external_manager.erl
deleted file mode 100644
index 0c66ef8..0000000
--- a/couch_external_manager.erl
+++ /dev/null
@@ -1,101 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_external_manager).
--behaviour(gen_server).
-
--export([start_link/0, execute/2, config_change/2]).
--export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2]).
-
--include("couch_db.hrl").
-
-start_link() ->
-    gen_server:start_link({local, couch_external_manager},
-        couch_external_manager, [], []).
-
-execute(UrlName, JsonReq) ->
-    Pid = gen_server:call(couch_external_manager, {get, UrlName}),
-    case Pid of
-    {error, Reason} ->
-        Reason;
-    _ ->
-        couch_external_server:execute(Pid, JsonReq)
-    end.
-
-config_change("external", UrlName) ->
-    gen_server:call(couch_external_manager, {config, UrlName}).
-
-% gen_server API
-
-init([]) ->
-    process_flag(trap_exit, true),
-    Handlers = ets:new(couch_external_manager_handlers, [set, private]),
-    couch_config:register(fun ?MODULE:config_change/2),
-    {ok, Handlers}.
-
-terminate(_Reason, Handlers) ->
-    ets:foldl(fun({_UrlName, Pid}, nil) ->
-        couch_external_server:stop(Pid),
-        nil
-    end, nil, Handlers),
-    ok.
-
-handle_call({get, UrlName}, _From, Handlers) ->
-    case ets:lookup(Handlers, UrlName) of
-    [] ->
-        case couch_config:get("external", UrlName, nil) of
-        nil ->
-            Msg = lists:flatten(
-                io_lib:format("No server configured for ~p.", [UrlName])),
-            {reply, {error, {unknown_external_server, ?l2b(Msg)}}, Handlers};
-        Command ->
-            {ok, NewPid} = couch_external_server:start_link(UrlName, Command),
-            true = ets:insert(Handlers, {UrlName, NewPid}),
-            {reply, NewPid, Handlers}
-        end;
-    [{UrlName, Pid}] ->
-        {reply, Pid, Handlers}
-    end;
-handle_call({config, UrlName}, _From, Handlers) ->
-    % A newly added handler and a handler that had it's command
-    % changed are treated exactly the same.
-
-    % Shutdown the old handler.
-    case ets:lookup(Handlers, UrlName) of
-    [{UrlName, Pid}] ->
-        couch_external_server:stop(Pid);
-    [] ->
-        ok
-    end,
-    % Wait for next request to boot the handler.
-    {reply, ok, Handlers}.
-
-handle_cast(_Whatever, State) ->
-    {noreply, State}.
-
-handle_info({'EXIT', Pid, normal}, Handlers) ->
-    ?LOG_INFO("EXTERNAL: Server ~p terminated normally", [Pid]),
-    % The process terminated normally without us asking - Remove Pid from the
-    % handlers table so we don't attempt to reuse it
-    ets:match_delete(Handlers, {'_', Pid}),
-    {noreply, Handlers};
-
-handle_info({'EXIT', Pid, Reason}, Handlers) ->
-    ?LOG_INFO("EXTERNAL: Server ~p died. (reason: ~p)", [Pid, Reason]),
-    % Remove Pid from the handlers table so we don't try closing
-    % it a second time in terminate/2.
-    ets:match_delete(Handlers, {'_', Pid}),
-    {stop, normal, Handlers}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_external_server.erl
----------------------------------------------------------------------
diff --git a/couch_external_server.erl b/couch_external_server.erl
deleted file mode 100644
index b52c7ff..0000000
--- a/couch_external_server.erl
+++ /dev/null
@@ -1,70 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_external_server).
--behaviour(gen_server).
-
--export([start_link/2, stop/1, execute/2]).
--export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
-
--include("couch_db.hrl").
-
-% External API
-
-start_link(Name, Command) ->
-    gen_server:start_link(couch_external_server, [Name, Command], []).
-
-stop(Pid) ->
-    gen_server:cast(Pid, stop).
-
-execute(Pid, JsonReq) ->
-    {json, Json} = gen_server:call(Pid, {execute, JsonReq}, infinity),
-    ?JSON_DECODE(Json).
-
-% Gen Server Handlers
-
-init([Name, Command]) ->
-    ?LOG_INFO("EXTERNAL: Starting process for: ~s", [Name]),
-    ?LOG_INFO("COMMAND: ~s", [Command]),
-    process_flag(trap_exit, true),
-    Timeout = list_to_integer(couch_config:get("couchdb", "os_process_timeout",
-        "5000")),
-    {ok, Pid} = couch_os_process:start_link(Command, [{timeout, Timeout}]),
-    couch_config:register(fun("couchdb", "os_process_timeout", NewTimeout) ->
-        couch_os_process:set_timeout(Pid, list_to_integer(NewTimeout))
-    end),
-    {ok, {Name, Command, Pid}}.
-
-terminate(_Reason, {_Name, _Command, Pid}) ->
-    couch_os_process:stop(Pid),
-    ok.
-
-handle_call({execute, JsonReq}, _From, {Name, Command, Pid}) ->
-    {reply, couch_os_process:prompt(Pid, JsonReq), {Name, Command, Pid}}.
-
-handle_info({'EXIT', _Pid, normal}, State) ->
-    {noreply, State};
-handle_info({'EXIT', Pid, Reason}, {Name, Command, Pid}) ->
-    ?LOG_INFO("EXTERNAL: Process for ~s exiting. (reason: ~w)", [Name, Reason]),
-    {stop, Reason, {Name, Command, Pid}}.
-
-handle_cast(stop, {Name, Command, Pid}) ->
-    ?LOG_INFO("EXTERNAL: Shutting down ~s", [Name]),
-    exit(Pid, normal),
-    {stop, normal, {Name, Command, Pid}};
-handle_cast(_Whatever, State) ->
-    {noreply, State}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_file.erl
----------------------------------------------------------------------
diff --git a/couch_file.erl b/couch_file.erl
deleted file mode 100644
index ee5dafb..0000000
--- a/couch_file.erl
+++ /dev/null
@@ -1,532 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_file).
--behaviour(gen_server).
-
--include("couch_db.hrl").
-
--define(SIZE_BLOCK, 4096).
-
--record(file, {
-    fd,
-    eof = 0
-}).
-
-% public API
--export([open/1, open/2, close/1, bytes/1, sync/1, truncate/2]).
--export([pread_term/2, pread_iolist/2, pread_binary/2]).
--export([append_binary/2, append_binary_md5/2]).
--export([append_raw_chunk/2, assemble_file_chunk/1, assemble_file_chunk/2]).
--export([append_term/2, append_term/3, append_term_md5/2, append_term_md5/3]).
--export([write_header/2, read_header/1]).
--export([delete/2, delete/3, nuke_dir/2, init_delete_dir/1]).
-
-% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-%%----------------------------------------------------------------------
-%% Args:   Valid Options are [create] and [create,overwrite].
-%%  Files are opened in read/write mode.
-%% Returns: On success, {ok, Fd}
-%%  or {error, Reason} if the file could not be opened.
-%%----------------------------------------------------------------------
-
-open(Filepath) ->
-    open(Filepath, []).
-
-open(Filepath, Options) ->
-    case gen_server:start_link(couch_file,
-            {Filepath, Options, self(), Ref = make_ref()}, []) of
-    {ok, Fd} ->
-        {ok, Fd};
-    ignore ->
-        % get the error
-        receive
-        {Ref, Pid, {error, Reason} = Error} ->
-            case process_info(self(), trap_exit) of
-            {trap_exit, true} -> receive {'EXIT', Pid, _} -> ok end;
-            {trap_exit, false} -> ok
-            end,
-            case {lists:member(nologifmissing, Options), Reason} of
-            {true, enoent} -> ok;
-            _ ->
-            ?LOG_ERROR("Could not open file ~s: ~s",
-                [Filepath, file:format_error(Reason)])
-            end,
-            Error
-        end;
-    Error ->
-        % We can't say much here, because it could be any kind of error.
-        % Just let it bubble and an encapsulating subcomponent can perhaps
-        % be more informative. It will likely appear in the SASL log, anyway.
-        Error
-    end.
-
-
-%%----------------------------------------------------------------------
-%% Purpose: To append an Erlang term to the end of the file.
-%% Args:    Erlang term to serialize and append to the file.
-%% Returns: {ok, Pos, NumBytesWritten} where Pos is the file offset to
-%%  the beginning the serialized  term. Use pread_term to read the term
-%%  back.
-%%  or {error, Reason}.
-%%----------------------------------------------------------------------
-
-append_term(Fd, Term) ->
-    append_term(Fd, Term, []).
-
-append_term(Fd, Term, Options) ->
-    Comp = couch_util:get_value(compression, Options, ?DEFAULT_COMPRESSION),
-    append_binary(Fd, couch_compress:compress(Term, Comp)).
-
-append_term_md5(Fd, Term) ->
-    append_term_md5(Fd, Term, []).
-
-append_term_md5(Fd, Term, Options) ->
-    Comp = couch_util:get_value(compression, Options, ?DEFAULT_COMPRESSION),
-    append_binary_md5(Fd, couch_compress:compress(Term, Comp)).
-
-%%----------------------------------------------------------------------
-%% Purpose: To append an Erlang binary to the end of the file.
-%% Args:    Erlang term to serialize and append to the file.
-%% Returns: {ok, Pos, NumBytesWritten} where Pos is the file offset to the
-%%  beginning the serialized term. Use pread_term to read the term back.
-%%  or {error, Reason}.
-%%----------------------------------------------------------------------
-
-append_binary(Fd, Bin) ->
-    gen_server:call(Fd, {append_bin, assemble_file_chunk(Bin)}, infinity).
-    
-append_binary_md5(Fd, Bin) ->
-    gen_server:call(Fd,
-        {append_bin, assemble_file_chunk(Bin, couch_util:md5(Bin))}, infinity).
-
-append_raw_chunk(Fd, Chunk) ->
-    gen_server:call(Fd, {append_bin, Chunk}, infinity).
-
-
-assemble_file_chunk(Bin) ->
-    [<<0:1/integer, (iolist_size(Bin)):31/integer>>, Bin].
-
-assemble_file_chunk(Bin, Md5) ->
-    [<<1:1/integer, (iolist_size(Bin)):31/integer>>, Md5, Bin].
-
-%%----------------------------------------------------------------------
-%% Purpose: Reads a term from a file that was written with append_term
-%% Args:    Pos, the offset into the file where the term is serialized.
-%% Returns: {ok, Term}
-%%  or {error, Reason}.
-%%----------------------------------------------------------------------
-
-
-pread_term(Fd, Pos) ->
-    {ok, Bin} = pread_binary(Fd, Pos),
-    {ok, couch_compress:decompress(Bin)}.
-
-
-%%----------------------------------------------------------------------
-%% Purpose: Reads a binrary from a file that was written with append_binary
-%% Args:    Pos, the offset into the file where the term is serialized.
-%% Returns: {ok, Term}
-%%  or {error, Reason}.
-%%----------------------------------------------------------------------
-
-pread_binary(Fd, Pos) ->
-    {ok, L} = pread_iolist(Fd, Pos),
-    {ok, iolist_to_binary(L)}.
-
-
-pread_iolist(Fd, Pos) ->
-    case gen_server:call(Fd, {pread_iolist, Pos}, infinity) of
-    {ok, IoList, <<>>} ->
-        {ok, IoList};
-    {ok, IoList, Md5} ->
-        case couch_util:md5(IoList) of
-        Md5 ->
-            {ok, IoList};
-        _ ->
-            exit({file_corruption, <<"file corruption">>})
-        end;
-    Error ->
-        Error
-    end.
-
-%%----------------------------------------------------------------------
-%% Purpose: The length of a file, in bytes.
-%% Returns: {ok, Bytes}
-%%  or {error, Reason}.
-%%----------------------------------------------------------------------
-
-% length in bytes
-bytes(Fd) ->
-    gen_server:call(Fd, bytes, infinity).
-
-%%----------------------------------------------------------------------
-%% Purpose: Truncate a file to the number of bytes.
-%% Returns: ok
-%%  or {error, Reason}.
-%%----------------------------------------------------------------------
-
-truncate(Fd, Pos) ->
-    gen_server:call(Fd, {truncate, Pos}, infinity).
-
-%%----------------------------------------------------------------------
-%% Purpose: Ensure all bytes written to the file are flushed to disk.
-%% Returns: ok
-%%  or {error, Reason}.
-%%----------------------------------------------------------------------
-
-sync(Filepath) when is_list(Filepath) ->
-    {ok, Fd} = file:open(Filepath, [append, raw]),
-    try ok = file:sync(Fd) after ok = file:close(Fd) end;
-sync(Fd) ->
-    gen_server:call(Fd, sync, infinity).
-
-%%----------------------------------------------------------------------
-%% Purpose: Close the file.
-%% Returns: ok
-%%----------------------------------------------------------------------
-close(Fd) ->
-    couch_util:shutdown_sync(Fd).
-
-
-delete(RootDir, Filepath) ->
-    delete(RootDir, Filepath, true).
-
-
-delete(RootDir, Filepath, Async) ->
-    DelFile = filename:join([RootDir,".delete", ?b2l(couch_uuids:random())]),
-    case file:rename(Filepath, DelFile) of
-    ok ->
-        if (Async) ->
-            spawn(file, delete, [DelFile]),
-            ok;
-        true ->
-            file:delete(DelFile)
-        end;
-    Error ->
-        Error
-    end.
-
-
-nuke_dir(RootDelDir, Dir) ->
-    FoldFun = fun(File) ->
-        Path = Dir ++ "/" ++ File,
-        case filelib:is_dir(Path) of
-            true ->
-                ok = nuke_dir(RootDelDir, Path),
-                file:del_dir(Path);
-            false ->
-                delete(RootDelDir, Path, false)
-        end
-    end,
-    case file:list_dir(Dir) of
-        {ok, Files} ->
-            lists:foreach(FoldFun, Files),
-            ok = file:del_dir(Dir);
-        {error, enoent} ->
-            ok
-    end.
-
-
-init_delete_dir(RootDir) ->
-    Dir = filename:join(RootDir,".delete"),
-    % note: ensure_dir requires an actual filename companent, which is the
-    % reason for "foo".
-    filelib:ensure_dir(filename:join(Dir,"foo")),
-    filelib:fold_files(Dir, ".*", true,
-        fun(Filename, _) ->
-            ok = file:delete(Filename)
-        end, ok).
-
-
-read_header(Fd) ->
-    case gen_server:call(Fd, find_header, infinity) of
-    {ok, Bin} ->
-        {ok, binary_to_term(Bin)};
-    Else ->
-        Else
-    end.
-
-write_header(Fd, Data) ->
-    Bin = term_to_binary(Data),
-    Md5 = couch_util:md5(Bin),
-    % now we assemble the final header binary and write to disk
-    FinalBin = <<Md5/binary, Bin/binary>>,
-    gen_server:call(Fd, {write_header, FinalBin}, infinity).
-
-
-
-
-init_status_error(ReturnPid, Ref, Error) ->
-    ReturnPid ! {Ref, self(), Error},
-    ignore.
-
-% server functions
-
-init({Filepath, Options, ReturnPid, Ref}) ->
-    process_flag(trap_exit, true),
-    OpenOptions = file_open_options(Options),
-    case lists:member(create, Options) of
-    true ->
-        filelib:ensure_dir(Filepath),
-        case file:open(Filepath, OpenOptions) of
-        {ok, Fd} ->
-            {ok, Length} = file:position(Fd, eof),
-            case Length > 0 of
-            true ->
-                % this means the file already exists and has data.
-                % FYI: We don't differentiate between empty files and non-existant
-                % files here.
-                case lists:member(overwrite, Options) of
-                true ->
-                    {ok, 0} = file:position(Fd, 0),
-                    ok = file:truncate(Fd),
-                    ok = file:sync(Fd),
-                    maybe_track_open_os_files(Options),
-                    {ok, #file{fd=Fd}};
-                false ->
-                    ok = file:close(Fd),
-                    init_status_error(ReturnPid, Ref, {error, eexist})
-                end;
-            false ->
-                maybe_track_open_os_files(Options),
-                {ok, #file{fd=Fd}}
-            end;
-        Error ->
-            init_status_error(ReturnPid, Ref, Error)
-        end;
-    false ->
-        % open in read mode first, so we don't create the file if it doesn't exist.
-        case file:open(Filepath, [read, raw]) of
-        {ok, Fd_Read} ->
-            {ok, Fd} = file:open(Filepath, OpenOptions),
-            ok = file:close(Fd_Read),
-            maybe_track_open_os_files(Options),
-            {ok, Eof} = file:position(Fd, eof),
-            {ok, #file{fd=Fd, eof=Eof}};
-        Error ->
-            init_status_error(ReturnPid, Ref, Error)
-        end
-    end.
-
-file_open_options(Options) ->
-    [read, raw, binary] ++ case lists:member(read_only, Options) of
-    true ->
-        [];
-    false ->
-        [append]
-    end.
-
-maybe_track_open_os_files(FileOptions) ->
-    case lists:member(sys_db, FileOptions) of
-    true ->
-        ok;
-    false ->
-        couch_stats_collector:track_process_count({couchdb, open_os_files})
-    end.
-
-terminate(_Reason, #file{fd = Fd}) ->
-    ok = file:close(Fd).
-
-
-handle_call({pread_iolist, Pos}, _From, File) ->
-    {RawData, NextPos} = try
-        % up to 8Kbs of read ahead
-        read_raw_iolist_int(File, Pos, 2 * ?SIZE_BLOCK - (Pos rem ?SIZE_BLOCK))
-    catch
-    _:_ ->
-        read_raw_iolist_int(File, Pos, 4)
-    end,
-    <<Prefix:1/integer, Len:31/integer, RestRawData/binary>> =
-        iolist_to_binary(RawData),
-    case Prefix of
-    1 ->
-        {Md5, IoList} = extract_md5(
-            maybe_read_more_iolist(RestRawData, 16 + Len, NextPos, File)),
-        {reply, {ok, IoList, Md5}, File};
-    0 ->
-        IoList = maybe_read_more_iolist(RestRawData, Len, NextPos, File),
-        {reply, {ok, IoList, <<>>}, File}
-    end;
-
-handle_call(bytes, _From, #file{fd = Fd} = File) ->
-    {reply, file:position(Fd, eof), File};
-
-handle_call(sync, _From, #file{fd=Fd}=File) ->
-    {reply, file:sync(Fd), File};
-
-handle_call({truncate, Pos}, _From, #file{fd=Fd}=File) ->
-    {ok, Pos} = file:position(Fd, Pos),
-    case file:truncate(Fd) of
-    ok ->
-        {reply, ok, File#file{eof = Pos}};
-    Error ->
-        {reply, Error, File}
-    end;
-
-handle_call({append_bin, Bin}, _From, #file{fd = Fd, eof = Pos} = File) ->
-    Blocks = make_blocks(Pos rem ?SIZE_BLOCK, Bin),
-    Size = iolist_size(Blocks),
-    case file:write(Fd, Blocks) of
-    ok ->
-        {reply, {ok, Pos, Size}, File#file{eof = Pos + Size}};
-    Error ->
-        {reply, Error, File}
-    end;
-
-handle_call({write_header, Bin}, _From, #file{fd = Fd, eof = Pos} = File) ->
-    BinSize = byte_size(Bin),
-    case Pos rem ?SIZE_BLOCK of
-    0 ->
-        Padding = <<>>;
-    BlockOffset ->
-        Padding = <<0:(8*(?SIZE_BLOCK-BlockOffset))>>
-    end,
-    FinalBin = [Padding, <<1, BinSize:32/integer>> | make_blocks(5, [Bin])],
-    case file:write(Fd, FinalBin) of
-    ok ->
-        {reply, ok, File#file{eof = Pos + iolist_size(FinalBin)}};
-    Error ->
-        {reply, Error, File}
-    end;
-
-handle_call(find_header, _From, #file{fd = Fd, eof = Pos} = File) ->
-    {reply, find_header(Fd, Pos div ?SIZE_BLOCK), File}.
-
-handle_cast(close, Fd) ->
-    {stop,normal,Fd}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-handle_info({'EXIT', _, normal}, Fd) ->
-    {noreply, Fd};
-handle_info({'EXIT', _, Reason}, Fd) ->
-    {stop, Reason, Fd}.
-
-
-find_header(_Fd, -1) ->
-    no_valid_header;
-find_header(Fd, Block) ->
-    case (catch load_header(Fd, Block)) of
-    {ok, Bin} ->
-        {ok, Bin};
-    _Error ->
-        find_header(Fd, Block -1)
-    end.
-
-load_header(Fd, Block) ->
-    {ok, <<1, HeaderLen:32/integer, RestBlock/binary>>} =
-        file:pread(Fd, Block * ?SIZE_BLOCK, ?SIZE_BLOCK),
-    TotalBytes = calculate_total_read_len(5, HeaderLen),
-    case TotalBytes > byte_size(RestBlock) of
-    false ->
-        <<RawBin:TotalBytes/binary, _/binary>> = RestBlock;
-    true ->
-        {ok, Missing} = file:pread(
-            Fd, (Block * ?SIZE_BLOCK) + 5 + byte_size(RestBlock),
-            TotalBytes - byte_size(RestBlock)),
-        RawBin = <<RestBlock/binary, Missing/binary>>
-    end,
-    <<Md5Sig:16/binary, HeaderBin/binary>> =
-        iolist_to_binary(remove_block_prefixes(5, RawBin)),
-    Md5Sig = couch_util:md5(HeaderBin),
-    {ok, HeaderBin}.
-
-maybe_read_more_iolist(Buffer, DataSize, _, _)
-    when DataSize =< byte_size(Buffer) ->
-    <<Data:DataSize/binary, _/binary>> = Buffer,
-    [Data];
-maybe_read_more_iolist(Buffer, DataSize, NextPos, File) ->
-    {Missing, _} =
-        read_raw_iolist_int(File, NextPos, DataSize - byte_size(Buffer)),
-    [Buffer, Missing].
-
--spec read_raw_iolist_int(#file{}, Pos::non_neg_integer(), Len::non_neg_integer()) ->
-    {Data::iolist(), CurPos::non_neg_integer()}.
-read_raw_iolist_int(Fd, {Pos, _Size}, Len) -> % 0110 UPGRADE CODE
-    read_raw_iolist_int(Fd, Pos, Len);
-read_raw_iolist_int(#file{fd = Fd}, Pos, Len) ->
-    BlockOffset = Pos rem ?SIZE_BLOCK,
-    TotalBytes = calculate_total_read_len(BlockOffset, Len),
-    {ok, <<RawBin:TotalBytes/binary>>} = file:pread(Fd, Pos, TotalBytes),
-    {remove_block_prefixes(BlockOffset, RawBin), Pos + TotalBytes}.
-
--spec extract_md5(iolist()) -> {binary(), iolist()}.
-extract_md5(FullIoList) ->
-    {Md5List, IoList} = split_iolist(FullIoList, 16, []),
-    {iolist_to_binary(Md5List), IoList}.
-
-calculate_total_read_len(0, FinalLen) ->
-    calculate_total_read_len(1, FinalLen) + 1;
-calculate_total_read_len(BlockOffset, FinalLen) ->
-    case ?SIZE_BLOCK - BlockOffset of
-    BlockLeft when BlockLeft >= FinalLen ->
-        FinalLen;
-    BlockLeft ->
-        FinalLen + ((FinalLen - BlockLeft) div (?SIZE_BLOCK -1)) +
-            if ((FinalLen - BlockLeft) rem (?SIZE_BLOCK -1)) =:= 0 -> 0;
-                true -> 1 end
-    end.
-
-remove_block_prefixes(_BlockOffset, <<>>) ->
-    [];
-remove_block_prefixes(0, <<_BlockPrefix,Rest/binary>>) ->
-    remove_block_prefixes(1, Rest);
-remove_block_prefixes(BlockOffset, Bin) ->
-    BlockBytesAvailable = ?SIZE_BLOCK - BlockOffset,
-    case size(Bin) of
-    Size when Size > BlockBytesAvailable ->
-        <<DataBlock:BlockBytesAvailable/binary,Rest/binary>> = Bin,
-        [DataBlock | remove_block_prefixes(0, Rest)];
-    _Size ->
-        [Bin]
-    end.
-
-make_blocks(_BlockOffset, []) ->
-    [];
-make_blocks(0, IoList) ->
-    [<<0>> | make_blocks(1, IoList)];
-make_blocks(BlockOffset, IoList) ->
-    case split_iolist(IoList, (?SIZE_BLOCK - BlockOffset), []) of
-    {Begin, End} ->
-        [Begin | make_blocks(0, End)];
-    _SplitRemaining ->
-        IoList
-    end.
-
-%% @doc Returns a tuple where the first element contains the leading SplitAt
-%% bytes of the original iolist, and the 2nd element is the tail. If SplitAt
-%% is larger than byte_size(IoList), return the difference.
--spec split_iolist(IoList::iolist(), SplitAt::non_neg_integer(), Acc::list()) ->
-    {iolist(), iolist()} | non_neg_integer().
-split_iolist(List, 0, BeginAcc) ->
-    {lists:reverse(BeginAcc), List};
-split_iolist([], SplitAt, _BeginAcc) ->
-    SplitAt;
-split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) when SplitAt > byte_size(Bin) ->
-    split_iolist(Rest, SplitAt - byte_size(Bin), [Bin | BeginAcc]);
-split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) ->
-    <<Begin:SplitAt/binary,End/binary>> = Bin,
-    split_iolist([End | Rest], 0, [Begin | BeginAcc]);
-split_iolist([Sublist| Rest], SplitAt, BeginAcc) when is_list(Sublist) ->
-    case split_iolist(Sublist, SplitAt, BeginAcc) of
-    {Begin, End} ->
-        {Begin, [End | Rest]};
-    SplitRemaining ->
-        split_iolist(Rest, SplitAt - (SplitAt - SplitRemaining), [Sublist | BeginAcc])
-    end;
-split_iolist([Byte | Rest], SplitAt, BeginAcc) when is_integer(Byte) ->
-    split_iolist(Rest, SplitAt - 1, [Byte | BeginAcc]).

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_httpd.erl
----------------------------------------------------------------------
diff --git a/couch_httpd.erl b/couch_httpd.erl
deleted file mode 100644
index 28932ba..0000000
--- a/couch_httpd.erl
+++ /dev/null
@@ -1,1114 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd).
--include("couch_db.hrl").
-
--export([start_link/0, start_link/1, stop/0, config_change/2,
-        handle_request/5]).
-
--export([header_value/2,header_value/3,qs_value/2,qs_value/3,qs/1,qs_json_value/3]).
--export([path/1,absolute_uri/2,body_length/1]).
--export([verify_is_server_admin/1,unquote/1,quote/1,recv/2,recv_chunked/4,error_info/1]).
--export([make_fun_spec_strs/1]).
--export([make_arity_1_fun/1, make_arity_2_fun/1, make_arity_3_fun/1]).
--export([parse_form/1,json_body/1,json_body_obj/1,body/1]).
--export([doc_etag/1, make_etag/1, etag_match/2, etag_respond/3, etag_maybe/2]).
--export([primary_header_value/2,partition/1,serve_file/3,serve_file/4, server_header/0]).
--export([start_chunked_response/3,send_chunk/2,log_request/2]).
--export([start_response_length/4, start_response/3, send/2]).
--export([start_json_response/2, start_json_response/3, end_json_response/1]).
--export([send_response/4,send_method_not_allowed/2,send_error/4, send_redirect/2,send_chunked_error/2]).
--export([send_json/2,send_json/3,send_json/4,last_chunk/1,parse_multipart_request/3]).
--export([accepted_encodings/1,handle_request_int/5,validate_referer/1,validate_ctype/2]).
--export([http_1_0_keep_alive/2]).
-
-start_link() ->
-    start_link(http).
-start_link(http) ->
-    Port = couch_config:get("httpd", "port", "5984"),
-    start_link(?MODULE, [{port, Port}]);
-start_link(https) ->
-    Port = couch_config:get("ssl", "port", "6984"),
-    CertFile = couch_config:get("ssl", "cert_file", nil),
-    KeyFile = couch_config:get("ssl", "key_file", nil),
-    Options = case CertFile /= nil andalso KeyFile /= nil of
-        true ->
-            SslOpts = [{certfile, CertFile}, {keyfile, KeyFile}],
-
-            %% set password if one is needed for the cert
-            SslOpts1 = case couch_config:get("ssl", "password", nil) of
-                nil -> SslOpts;
-                Password ->
-                    SslOpts ++ [{password, Password}]
-            end,
-            % do we verify certificates ?
-            FinalSslOpts = case couch_config:get("ssl",
-                    "verify_ssl_certificates", "false") of
-                "false" -> SslOpts1;
-                "true" ->
-                    case couch_config:get("ssl",
-                            "cacert_file", nil) of
-                        nil ->
-                            io:format("Verify SSL certificate "
-                                ++"enabled but file containing "
-                                ++"PEM encoded CA certificates is "
-                                ++"missing", []),
-                            throw({error, missing_cacerts});
-                        CaCertFile ->
-                            Depth = list_to_integer(couch_config:get("ssl",
-                                    "ssl_certificate_max_depth",
-                                    "1")),
-                            FinalOpts = [
-                                {cacertfile, CaCertFile},
-                                {depth, Depth},
-                                {verify, verify_peer}],
-                            % allows custom verify fun.
-                            case couch_config:get("ssl",
-                                    "verify_fun", nil) of
-                                nil -> FinalOpts;
-                                SpecStr ->
-                                    FinalOpts
-                                    ++ [{verify_fun, make_arity_3_fun(SpecStr)}]
-                            end
-                    end
-            end,
-
-            [{port, Port},
-                {ssl, true},
-                {ssl_opts, FinalSslOpts}];
-        false ->
-            io:format("SSL enabled but PEM certificates are missing.", []),
-            throw({error, missing_certs})
-    end,
-    start_link(https, Options).
-start_link(Name, Options) ->
-    % read config and register for configuration changes
-
-    % just stop if one of the config settings change. couch_server_sup
-    % will restart us and then we will pick up the new settings.
-
-    BindAddress = couch_config:get("httpd", "bind_address", any),
-    validate_bind_address(BindAddress),
-    DefaultSpec = "{couch_httpd_db, handle_request}",
-    DefaultFun = make_arity_1_fun(
-        couch_config:get("httpd", "default_handler", DefaultSpec)
-    ),
-
-    UrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
-        end, couch_config:get("httpd_global_handlers")),
-
-    DbUrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
-        end, couch_config:get("httpd_db_handlers")),
-
-    DesignUrlHandlersList = lists:map(
-        fun({UrlKey, SpecStr}) ->
-            {?l2b(UrlKey), make_arity_3_fun(SpecStr)}
-        end, couch_config:get("httpd_design_handlers")),
-
-    UrlHandlers = dict:from_list(UrlHandlersList),
-    DbUrlHandlers = dict:from_list(DbUrlHandlersList),
-    DesignUrlHandlers = dict:from_list(DesignUrlHandlersList),
-    {ok, ServerOptions} = couch_util:parse_term(
-        couch_config:get("httpd", "server_options", "[]")),
-    {ok, SocketOptions} = couch_util:parse_term(
-        couch_config:get("httpd", "socket_options", "[]")),
-
-    set_auth_handlers(),
-
-    % ensure uuid is set so that concurrent replications
-    % get the same value.
-    couch_server:get_uuid(),
-
-    Loop = fun(Req)->
-        case SocketOptions of
-        [] ->
-            ok;
-        _ ->
-            ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions)
-        end,
-        apply(?MODULE, handle_request, [
-            Req, DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers
-        ])
-    end,
-
-    % set mochiweb options
-    FinalOptions = lists:append([Options, ServerOptions, [
-            {loop, Loop},
-            {name, Name},
-            {ip, BindAddress}]]),
-
-    % launch mochiweb
-    {ok, Pid} = case mochiweb_http:start(FinalOptions) of
-        {ok, MochiPid} ->
-            {ok, MochiPid};
-        {error, Reason} ->
-            io:format("Failure to start Mochiweb: ~s~n",[Reason]),
-            throw({error, Reason})
-    end,
-
-    ok = couch_config:register(fun ?MODULE:config_change/2, Pid),
-    {ok, Pid}.
-
-
-stop() ->
-    mochiweb_http:stop(couch_httpd),
-    mochiweb_http:stop(https).
-
-config_change("httpd", "bind_address") ->
-    ?MODULE:stop();
-config_change("httpd", "port") ->
-    ?MODULE:stop();
-config_change("httpd", "default_handler") ->
-    ?MODULE:stop();
-config_change("httpd", "server_options") ->
-    ?MODULE:stop();
-config_change("httpd", "socket_options") ->
-    ?MODULE:stop();
-config_change("httpd", "authentication_handlers") ->
-    set_auth_handlers();
-config_change("httpd_global_handlers", _) ->
-    ?MODULE:stop();
-config_change("httpd_db_handlers", _) ->
-    ?MODULE:stop();
-config_change("ssl", _) ->
-    ?MODULE:stop().
-
-set_auth_handlers() ->
-    AuthenticationSrcs = make_fun_spec_strs(
-        couch_config:get("httpd", "authentication_handlers", "")),
-    AuthHandlers = lists:map(
-        fun(A) -> {make_arity_1_fun(A), ?l2b(A)} end, AuthenticationSrcs),
-    ok = application:set_env(couch, auth_handlers, AuthHandlers).
-
-% SpecStr is a string like "{my_module, my_fun}"
-%  or "{my_module, my_fun, <<"my_arg">>}"
-make_arity_1_fun(SpecStr) ->
-    case couch_util:parse_term(SpecStr) of
-    {ok, {Mod, Fun, SpecArg}} ->
-        fun(Arg) -> Mod:Fun(Arg, SpecArg) end;
-    {ok, {Mod, Fun}} ->
-        fun(Arg) -> Mod:Fun(Arg) end
-    end.
-
-make_arity_2_fun(SpecStr) ->
-    case couch_util:parse_term(SpecStr) of
-    {ok, {Mod, Fun, SpecArg}} ->
-        fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2, SpecArg) end;
-    {ok, {Mod, Fun}} ->
-        fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2) end
-    end.
-
-make_arity_3_fun(SpecStr) ->
-    case couch_util:parse_term(SpecStr) of
-    {ok, {Mod, Fun, SpecArg}} ->
-        fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3, SpecArg) end;
-    {ok, {Mod, Fun}} ->
-        fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3) end
-    end.
-
-% SpecStr is "{my_module, my_fun}, {my_module2, my_fun2}"
-make_fun_spec_strs(SpecStr) ->
-    re:split(SpecStr, "(?<=})\\s*,\\s*(?={)", [{return, list}]).
-
-handle_request(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers,
-    DesignUrlHandlers) ->
-    %% reset rewrite count for new request
-    erlang:put(?REWRITE_COUNT, 0),
-
-    MochiReq1 = couch_httpd_vhost:dispatch_host(MochiReq),
-
-    handle_request_int(MochiReq1, DefaultFun,
-                UrlHandlers, DbUrlHandlers, DesignUrlHandlers).
-
-handle_request_int(MochiReq, DefaultFun,
-            UrlHandlers, DbUrlHandlers, DesignUrlHandlers) ->
-    Begin = now(),
-    % for the path, use the raw path with the query string and fragment
-    % removed, but URL quoting left intact
-    RawUri = MochiReq:get(raw_path),
-    {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
-
-    Headers = MochiReq:get(headers),
-
-    % get requested path
-    RequestedPath = case MochiReq:get_header_value("x-couchdb-vhost-path") of
-        undefined ->
-            case MochiReq:get_header_value("x-couchdb-requested-path") of
-                undefined -> RawUri;
-                R -> R
-            end;
-        P -> P
-    end,
-
-    HandlerKey =
-    case mochiweb_util:partition(Path, "/") of
-    {"", "", ""} ->
-        <<"/">>; % Special case the root url handler
-    {FirstPart, _, _} ->
-        list_to_binary(FirstPart)
-    end,
-    ?LOG_DEBUG("~p ~s ~p from ~p~nHeaders: ~p", [
-        MochiReq:get(method),
-        RawUri,
-        MochiReq:get(version),
-        MochiReq:get(peer),
-        mochiweb_headers:to_list(MochiReq:get(headers))
-    ]),
-
-    Method1 =
-    case MochiReq:get(method) of
-        % already an atom
-        Meth when is_atom(Meth) -> Meth;
-
-        % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
-        % possible (if any module references the atom, then it's existing).
-        Meth -> couch_util:to_existing_atom(Meth)
-    end,
-    increment_method_stats(Method1),
-
-    % allow broken HTTP clients to fake a full method vocabulary with an X-HTTP-METHOD-OVERRIDE header
-    MethodOverride = MochiReq:get_primary_header_value("X-HTTP-Method-Override"),
-    Method2 = case lists:member(MethodOverride, ["GET", "HEAD", "POST",
-                                                 "PUT", "DELETE",
-                                                 "TRACE", "CONNECT",
-                                                 "COPY"]) of
-    true ->
-        ?LOG_INFO("MethodOverride: ~s (real method was ~s)", [MethodOverride, Method1]),
-        case Method1 of
-        'POST' -> couch_util:to_existing_atom(MethodOverride);
-        _ ->
-            % Ignore X-HTTP-Method-Override when the original verb isn't POST.
-            % I'd like to send a 406 error to the client, but that'd require a nasty refactor.
-            % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>})
-            Method1
-        end;
-    _ -> Method1
-    end,
-
-    % alias HEAD to GET as mochiweb takes care of stripping the body
-    Method = case Method2 of
-        'HEAD' -> 'GET';
-        Other -> Other
-    end,
-
-    HttpReq = #httpd{
-        mochi_req = MochiReq,
-        peer = MochiReq:get(peer),
-        method = Method,
-        requested_path_parts =
-            [?l2b(unquote(Part)) || Part <- string:tokens(RequestedPath, "/")],
-        path_parts = [?l2b(unquote(Part)) || Part <- string:tokens(Path, "/")],
-        db_url_handlers = DbUrlHandlers,
-        design_url_handlers = DesignUrlHandlers,
-        default_fun = DefaultFun,
-        url_handlers = UrlHandlers,
-        user_ctx = erlang:erase(pre_rewrite_user_ctx),
-        auth = erlang:erase(pre_rewrite_auth)
-    },
-
-    HandlerFun = couch_util:dict_find(HandlerKey, UrlHandlers, DefaultFun),
-    {ok, AuthHandlers} = application:get_env(couch, auth_handlers),
-
-    {ok, Resp} =
-    try
-        case couch_httpd_cors:is_preflight_request(HttpReq) of
-        #httpd{} ->
-            case authenticate_request(HttpReq, AuthHandlers) of
-            #httpd{} = Req ->
-                HandlerFun(Req);
-            Response ->
-                Response
-            end;
-        Response ->
-            Response
-        end
-    catch
-        throw:{http_head_abort, Resp0} ->
-            {ok, Resp0};
-        throw:{invalid_json, S} ->
-            ?LOG_ERROR("attempted upload of invalid JSON (set log_level to debug to log it)", []),
-            ?LOG_DEBUG("Invalid JSON: ~p",[S]),
-            send_error(HttpReq, {bad_request, invalid_json});
-        throw:unacceptable_encoding ->
-            ?LOG_ERROR("unsupported encoding method for the response", []),
-            send_error(HttpReq, {not_acceptable, "unsupported encoding"});
-        throw:bad_accept_encoding_value ->
-            ?LOG_ERROR("received invalid Accept-Encoding header", []),
-            send_error(HttpReq, bad_request);
-        exit:normal ->
-            exit(normal);
-        exit:snappy_nif_not_loaded ->
-            ErrorReason = "To access the database or view index, Apache CouchDB"
-                " must be built with Erlang OTP R13B04 or higher.",
-            ?LOG_ERROR("~s", [ErrorReason]),
-            send_error(HttpReq, {bad_otp_release, ErrorReason});
-        exit:{body_too_large, _} ->
-            send_error(HttpReq, request_entity_too_large);
-        throw:Error ->
-            Stack = erlang:get_stacktrace(),
-            ?LOG_DEBUG("Minor error in HTTP request: ~p",[Error]),
-            ?LOG_DEBUG("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, Error);
-        error:badarg ->
-            Stack = erlang:get_stacktrace(),
-            ?LOG_ERROR("Badarg error in HTTP request",[]),
-            ?LOG_INFO("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, badarg);
-        error:function_clause ->
-            Stack = erlang:get_stacktrace(),
-            ?LOG_ERROR("function_clause error in HTTP request",[]),
-            ?LOG_INFO("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, function_clause);
-        Tag:Error ->
-            Stack = erlang:get_stacktrace(),
-            ?LOG_ERROR("Uncaught error in HTTP request: ~p",[{Tag, Error}]),
-            ?LOG_INFO("Stacktrace: ~p",[Stack]),
-            send_error(HttpReq, Error)
-    end,
-    RequestTime = round(timer:now_diff(now(), Begin)/1000),
-    couch_stats_collector:record({couchdb, request_time}, RequestTime),
-    couch_stats_collector:increment({httpd, requests}),
-    {ok, Resp}.
-
-% Try authentication handlers in order until one sets a user_ctx
-% the auth funs also have the option of returning a response
-% move this to couch_httpd_auth?
-authenticate_request(#httpd{user_ctx=#user_ctx{}} = Req, _AuthHandlers) ->
-    Req;
-authenticate_request(#httpd{} = Req, []) ->
-    case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of
-    "true" ->
-        throw({unauthorized, <<"Authentication required.">>});
-    "false" ->
-        Req#httpd{user_ctx=#user_ctx{}}
-    end;
-authenticate_request(#httpd{} = Req, [{AuthFun, AuthSrc} | RestAuthHandlers]) ->
-    R = case AuthFun(Req) of
-        #httpd{user_ctx=#user_ctx{}=UserCtx}=Req2 ->
-            Req2#httpd{user_ctx=UserCtx#user_ctx{handler=AuthSrc}};
-        Else -> Else
-    end,
-    authenticate_request(R, RestAuthHandlers);
-authenticate_request(Response, _AuthSrcs) ->
-    Response.
-
-increment_method_stats(Method) ->
-    couch_stats_collector:increment({httpd_request_methods, Method}).
-
-validate_referer(Req) ->
-    Host = host_for_request(Req),
-    Referer = header_value(Req, "Referer", fail),
-    case Referer of
-    fail ->
-        throw({bad_request, <<"Referer header required.">>});
-    Referer ->
-        {_,RefererHost,_,_,_} = mochiweb_util:urlsplit(Referer),
-        if
-            RefererHost =:= Host -> ok;
-            true -> throw({bad_request, <<"Referer header must match host.">>})
-        end
-    end.
-
-validate_ctype(Req, Ctype) ->
-    case header_value(Req, "Content-Type") of
-    undefined ->
-        throw({bad_ctype, "Content-Type must be "++Ctype});
-    ReqCtype ->
-        case string:tokens(ReqCtype, ";") of
-        [Ctype] -> ok;
-        [Ctype, _Rest] -> ok;
-        _Else ->
-            throw({bad_ctype, "Content-Type must be "++Ctype})
-        end
-    end.
-
-% Utilities
-
-partition(Path) ->
-    mochiweb_util:partition(Path, "/").
-
-header_value(#httpd{mochi_req=MochiReq}, Key) ->
-    MochiReq:get_header_value(Key).
-
-header_value(#httpd{mochi_req=MochiReq}, Key, Default) ->
-    case MochiReq:get_header_value(Key) of
-    undefined -> Default;
-    Value -> Value
-    end.
-
-primary_header_value(#httpd{mochi_req=MochiReq}, Key) ->
-    MochiReq:get_primary_header_value(Key).
-
-accepted_encodings(#httpd{mochi_req=MochiReq}) ->
-    case MochiReq:accepted_encodings(["gzip", "identity"]) of
-    bad_accept_encoding_value ->
-        throw(bad_accept_encoding_value);
-    [] ->
-        throw(unacceptable_encoding);
-    EncList ->
-        EncList
-    end.
-
-serve_file(Req, RelativePath, DocumentRoot) ->
-    serve_file(Req, RelativePath, DocumentRoot, []).
-
-serve_file(#httpd{mochi_req=MochiReq}=Req, RelativePath, DocumentRoot,
-           ExtraHeaders) ->
-    log_request(Req, 200),
-    ResponseHeaders = server_header()
-        ++ couch_httpd_auth:cookie_auth_header(Req, [])
-        ++ ExtraHeaders,
-    {ok, MochiReq:serve_file(RelativePath, DocumentRoot,
-            couch_httpd_cors:cors_headers(Req, ResponseHeaders))}.
-
-qs_value(Req, Key) ->
-    qs_value(Req, Key, undefined).
-
-qs_value(Req, Key, Default) ->
-    couch_util:get_value(Key, qs(Req), Default).
-
-qs_json_value(Req, Key, Default) ->
-    case qs_value(Req, Key, Default) of
-    Default ->
-        Default;
-    Result ->
-        ?JSON_DECODE(Result)
-    end.
-
-qs(#httpd{mochi_req=MochiReq}) ->
-    MochiReq:parse_qs().
-
-path(#httpd{mochi_req=MochiReq}) ->
-    MochiReq:get(path).
-
-host_for_request(#httpd{mochi_req=MochiReq}) ->
-    XHost = couch_config:get("httpd", "x_forwarded_host", "X-Forwarded-Host"),
-    case MochiReq:get_header_value(XHost) of
-        undefined ->
-            case MochiReq:get_header_value("Host") of
-                undefined ->
-                    {ok, {Address, Port}} = case MochiReq:get(socket) of
-                        {ssl, SslSocket} -> ssl:sockname(SslSocket);
-                        Socket -> inet:sockname(Socket)
-                    end,
-                    inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
-                Value1 ->
-                    Value1
-            end;
-        Value -> Value
-    end.
-
-absolute_uri(#httpd{mochi_req=MochiReq}=Req, Path) ->
-    Host = host_for_request(Req),
-    XSsl = couch_config:get("httpd", "x_forwarded_ssl", "X-Forwarded-Ssl"),
-    Scheme = case MochiReq:get_header_value(XSsl) of
-                 "on" -> "https";
-                 _ ->
-                     XProto = couch_config:get("httpd", "x_forwarded_proto", "X-Forwarded-Proto"),
-                     case MochiReq:get_header_value(XProto) of
-                         %% Restrict to "https" and "http" schemes only
-                         "https" -> "https";
-                         _ -> case MochiReq:get(scheme) of
-                                  https -> "https";
-                                  http -> "http"
-                              end
-                     end
-             end,
-    Scheme ++ "://" ++ Host ++ Path.
-
-unquote(UrlEncodedString) ->
-    mochiweb_util:unquote(UrlEncodedString).
-
-quote(UrlDecodedString) ->
-    mochiweb_util:quote_plus(UrlDecodedString).
-
-parse_form(#httpd{mochi_req=MochiReq}) ->
-    mochiweb_multipart:parse_form(MochiReq).
-
-recv(#httpd{mochi_req=MochiReq}, Len) ->
-    MochiReq:recv(Len).
-
-recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
-    % Fun is called once with each chunk
-    % Fun({Length, Binary}, State)
-    % called with Length == 0 on the last time.
-    MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState).
-
-body_length(#httpd{mochi_req=MochiReq}) ->
-    MochiReq:get(body_length).
-
-body(#httpd{mochi_req=MochiReq, req_body=undefined}) ->
-    MaxSize = list_to_integer(
-        couch_config:get("couchdb", "max_document_size", "4294967296")),
-    MochiReq:recv_body(MaxSize);
-body(#httpd{req_body=ReqBody}) ->
-    ReqBody.
-
-json_body(Httpd) ->
-    ?JSON_DECODE(body(Httpd)).
-
-json_body_obj(Httpd) ->
-    case json_body(Httpd) of
-        {Props} -> {Props};
-        _Else ->
-            throw({bad_request, "Request body must be a JSON object"})
-    end.
-
-
-
-doc_etag(#doc{revs={Start, [DiskRev|_]}}) ->
-    "\"" ++ ?b2l(couch_doc:rev_to_str({Start, DiskRev})) ++ "\"".
-
-make_etag(Term) ->
-    <<SigInt:128/integer>> = couch_util:md5(term_to_binary(Term)),
-    iolist_to_binary([$", io_lib:format("~.36B", [SigInt]), $"]).
-
-etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
-    etag_match(Req, binary_to_list(CurrentEtag));
-
-etag_match(Req, CurrentEtag) ->
-    EtagsToMatch = string:tokens(
-        header_value(Req, "If-None-Match", ""), ", "),
-    lists:member(CurrentEtag, EtagsToMatch).
-
-etag_respond(Req, CurrentEtag, RespFun) ->
-    case etag_match(Req, CurrentEtag) of
-    true ->
-        % the client has this in their cache.
-        send_response(Req, 304, [{"ETag", CurrentEtag}], <<>>);
-    false ->
-        % Run the function.
-        RespFun()
-    end.
-
-etag_maybe(Req, RespFun) ->
-    try
-        RespFun()
-    catch
-        throw:{etag_match, ETag} ->
-            send_response(Req, 304, [{"ETag", ETag}], <<>>)
-    end.
-
-verify_is_server_admin(#httpd{user_ctx=UserCtx}) ->
-    verify_is_server_admin(UserCtx);
-verify_is_server_admin(#user_ctx{roles=Roles}) ->
-    case lists:member(<<"_admin">>, Roles) of
-    true -> ok;
-    false -> throw({unauthorized, <<"You are not a server admin.">>})
-    end.
-
-log_request(#httpd{mochi_req=MochiReq,peer=Peer}=Req, Code) ->
-    ?LOG_INFO("~s - - ~s ~s ~B", [
-        Peer,
-        MochiReq:get(method),
-        MochiReq:get(raw_path),
-        Code
-    ]),
-    gen_event:notify(couch_plugin, {log_request, Req, Code}).
-
-
-start_response_length(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Length) ->
-    log_request(Req, Code),
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    Headers1 = Headers ++ server_header() ++
-               couch_httpd_auth:cookie_auth_header(Req, Headers),
-    Headers2 = couch_httpd_cors:cors_headers(Req, Headers1),
-    Resp = MochiReq:start_response_length({Code, Headers2, Length}),
-    case MochiReq:get(method) of
-    'HEAD' -> throw({http_head_abort, Resp});
-    _ -> ok
-    end,
-    {ok, Resp}.
-
-start_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers) ->
-    log_request(Req, Code),
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    CookieHeader = couch_httpd_auth:cookie_auth_header(Req, Headers),
-    Headers1 = Headers ++ server_header() ++ CookieHeader,
-    Headers2 = couch_httpd_cors:cors_headers(Req, Headers1),
-    Resp = MochiReq:start_response({Code, Headers2}),
-    case MochiReq:get(method) of
-        'HEAD' -> throw({http_head_abort, Resp});
-        _ -> ok
-    end,
-    {ok, Resp}.
-
-send(Resp, Data) ->
-    Resp:send(Data),
-    {ok, Resp}.
-
-no_resp_conn_header([]) ->
-    true;
-no_resp_conn_header([{Hdr, _}|Rest]) ->
-    case string:to_lower(Hdr) of
-        "connection" -> false;
-        _ -> no_resp_conn_header(Rest)
-    end.
-
-http_1_0_keep_alive(Req, Headers) ->
-    KeepOpen = Req:should_close() == false,
-    IsHttp10 = Req:get(version) == {1, 0},
-    NoRespHeader = no_resp_conn_header(Headers),
-    case KeepOpen andalso IsHttp10 andalso NoRespHeader of
-        true -> [{"Connection", "Keep-Alive"} | Headers];
-        false -> Headers
-    end.
-
-start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers) ->
-    log_request(Req, Code),
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    Headers1 = http_1_0_keep_alive(MochiReq, Headers),
-    Headers2 = Headers1 ++ server_header() ++
-               couch_httpd_auth:cookie_auth_header(Req, Headers1),
-    Headers3 = couch_httpd_cors:cors_headers(Req, Headers2),
-    Resp = MochiReq:respond({Code, Headers3, chunked}),
-    case MochiReq:get(method) of
-    'HEAD' -> throw({http_head_abort, Resp});
-    _ -> ok
-    end,
-    {ok, Resp}.
-
-send_chunk(Resp, Data) ->
-    case iolist_size(Data) of
-    0 -> ok; % do nothing
-    _ -> Resp:write_chunk(Data)
-    end,
-    {ok, Resp}.
-
-last_chunk(Resp) ->
-    Resp:write_chunk([]),
-    {ok, Resp}.
-
-send_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Body) ->
-    log_request(Req, Code),
-    couch_stats_collector:increment({httpd_status_codes, Code}),
-    Headers1 = http_1_0_keep_alive(MochiReq, Headers),
-    if Code >= 500 ->
-        ?LOG_ERROR("httpd ~p error response:~n ~s", [Code, Body]);
-    Code >= 400 ->
-        ?LOG_DEBUG("httpd ~p error response:~n ~s", [Code, Body]);
-    true -> ok
-    end,
-    Headers2 = Headers1 ++ server_header() ++
-               couch_httpd_auth:cookie_auth_header(Req, Headers1),
-    Headers3 = couch_httpd_cors:cors_headers(Req, Headers2),
-
-    {ok, MochiReq:respond({Code, Headers3, Body})}.
-
-send_method_not_allowed(Req, Methods) ->
-    send_error(Req, 405, [{"Allow", Methods}], <<"method_not_allowed">>, ?l2b("Only " ++ Methods ++ " allowed")).
-
-send_json(Req, Value) ->
-    send_json(Req, 200, Value).
-
-send_json(Req, Code, Value) ->
-    send_json(Req, Code, [], Value).
-
-send_json(Req, Code, Headers, Value) ->
-    initialize_jsonp(Req),
-    DefaultHeaders = [
-        {"Content-Type", negotiate_content_type(Req)},
-        {"Cache-Control", "must-revalidate"}
-    ],
-    Body = [start_jsonp(), ?JSON_ENCODE(Value), end_jsonp(), $\n],
-    send_response(Req, Code, DefaultHeaders ++ Headers, Body).
-
-start_json_response(Req, Code) ->
-    start_json_response(Req, Code, []).
-
-start_json_response(Req, Code, Headers) ->
-    initialize_jsonp(Req),
-    DefaultHeaders = [
-        {"Content-Type", negotiate_content_type(Req)},
-        {"Cache-Control", "must-revalidate"}
-    ],
-    {ok, Resp} = start_chunked_response(Req, Code, DefaultHeaders ++ Headers),
-    case start_jsonp() of
-        [] -> ok;
-        Start -> send_chunk(Resp, Start)
-    end,
-    {ok, Resp}.
-
-end_json_response(Resp) ->
-    send_chunk(Resp, end_jsonp() ++ [$\n]),
-    last_chunk(Resp).
-
-initialize_jsonp(Req) ->
-    case get(jsonp) of
-        undefined -> put(jsonp, qs_value(Req, "callback", no_jsonp));
-        _ -> ok
-    end,
-    case get(jsonp) of
-        no_jsonp -> [];
-        [] -> [];
-        CallBack ->
-            try
-                % make sure jsonp is configured on (default off)
-                case couch_config:get("httpd", "allow_jsonp", "false") of
-                "true" ->
-                    validate_callback(CallBack);
-                _Else ->
-                    put(jsonp, no_jsonp)
-                end
-            catch
-                Error ->
-                    put(jsonp, no_jsonp),
-                    throw(Error)
-            end
-    end.
-
-start_jsonp() ->
-    case get(jsonp) of
-        no_jsonp -> [];
-        [] -> [];
-        CallBack -> ["/* CouchDB */", CallBack, "("]
-    end.
-
-end_jsonp() ->
-    case erlang:erase(jsonp) of
-        no_jsonp -> [];
-        [] -> [];
-        _ -> ");"
-    end.
-
-validate_callback(CallBack) when is_binary(CallBack) ->
-    validate_callback(binary_to_list(CallBack));
-validate_callback([]) ->
-    ok;
-validate_callback([Char | Rest]) ->
-    case Char of
-        _ when Char >= $a andalso Char =< $z -> ok;
-        _ when Char >= $A andalso Char =< $Z -> ok;
-        _ when Char >= $0 andalso Char =< $9 -> ok;
-        _ when Char == $. -> ok;
-        _ when Char == $_ -> ok;
-        _ when Char == $[ -> ok;
-        _ when Char == $] -> ok;
-        _ ->
-            throw({bad_request, invalid_callback})
-    end,
-    validate_callback(Rest).
-
-
-error_info({Error, Reason}) when is_list(Reason) ->
-    error_info({Error, ?l2b(Reason)});
-error_info(bad_request) ->
-    {400, <<"bad_request">>, <<>>};
-error_info({bad_request, Reason}) ->
-    {400, <<"bad_request">>, Reason};
-error_info({query_parse_error, Reason}) ->
-    {400, <<"query_parse_error">>, Reason};
-% Prior art for md5 mismatch resulting in a 400 is from AWS S3
-error_info(md5_mismatch) ->
-    {400, <<"content_md5_mismatch">>, <<"Possible message corruption.">>};
-error_info(not_found) ->
-    {404, <<"not_found">>, <<"missing">>};
-error_info({not_found, Reason}) ->
-    {404, <<"not_found">>, Reason};
-error_info({not_acceptable, Reason}) ->
-    {406, <<"not_acceptable">>, Reason};
-error_info(conflict) ->
-    {409, <<"conflict">>, <<"Document update conflict.">>};
-error_info({forbidden, Msg}) ->
-    {403, <<"forbidden">>, Msg};
-error_info({unauthorized, Msg}) ->
-    {401, <<"unauthorized">>, Msg};
-error_info(file_exists) ->
-    {412, <<"file_exists">>, <<"The database could not be "
-        "created, the file already exists.">>};
-error_info(request_entity_too_large) ->
-    {413, <<"too_large">>, <<"the request entity is too large">>};
-error_info({bad_ctype, Reason}) ->
-    {415, <<"bad_content_type">>, Reason};
-error_info(requested_range_not_satisfiable) ->
-    {416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>};
-error_info({error, illegal_database_name, Name}) ->
-    Message = "Name: '" ++ Name ++ "'. Only lowercase characters (a-z), "
-        ++ "digits (0-9), and any of the characters _, $, (, ), +, -, and / "
-        ++ "are allowed. Must begin with a letter.",
-    {400, <<"illegal_database_name">>, couch_util:to_binary(Message)};
-error_info({missing_stub, Reason}) ->
-    {412, <<"missing_stub">>, Reason};
-error_info({Error, Reason}) ->
-    {500, couch_util:to_binary(Error), couch_util:to_binary(Reason)};
-error_info(Error) ->
-    {500, <<"unknown_error">>, couch_util:to_binary(Error)}.
-
-error_headers(#httpd{mochi_req=MochiReq}=Req, Code, ErrorStr, ReasonStr) ->
-    if Code == 401 ->
-        % this is where the basic auth popup is triggered
-        case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
-        undefined ->
-            case couch_config:get("httpd", "WWW-Authenticate", nil) of
-            nil ->
-                % If the client is a browser and the basic auth popup isn't turned on
-                % redirect to the session page.
-                case ErrorStr of
-                <<"unauthorized">> ->
-                    case couch_config:get("couch_httpd_auth", "authentication_redirect", nil) of
-                    nil -> {Code, []};
-                    AuthRedirect ->
-                        case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of
-                        "true" ->
-                            % send the browser popup header no matter what if we are require_valid_user
-                            {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]};
-                        _False ->
-                            case MochiReq:accepts_content_type("application/json") of
-                            true ->
-                                {Code, []};
-                            false ->
-                                case MochiReq:accepts_content_type("text/html") of
-                                true ->
-                                    % Redirect to the path the user requested, not
-                                    % the one that is used internally.
-                                    UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of
-                                    undefined ->
-                                        MochiReq:get(path);
-                                    VHostPath ->
-                                        VHostPath
-                                    end,
-                                    RedirectLocation = lists:flatten([
-                                        AuthRedirect,
-                                        "?return=", couch_util:url_encode(UrlReturnRaw),
-                                        "&reason=", couch_util:url_encode(ReasonStr)
-                                    ]),
-                                    {302, [{"Location", absolute_uri(Req, RedirectLocation)}]};
-                                false ->
-                                    {Code, []}
-                                end
-                            end
-                        end
-                    end;
-                _Else ->
-                    {Code, []}
-                end;
-            Type ->
-                {Code, [{"WWW-Authenticate", Type}]}
-            end;
-        Type ->
-           {Code, [{"WWW-Authenticate", Type}]}
-        end;
-    true ->
-        {Code, []}
-    end.
-
-send_error(_Req, {already_sent, Resp, _Error}) ->
-    {ok, Resp};
-
-send_error(Req, Error) ->
-    {Code, ErrorStr, ReasonStr} = error_info(Error),
-    {Code1, Headers} = error_headers(Req, Code, ErrorStr, ReasonStr),
-    send_error(Req, Code1, Headers, ErrorStr, ReasonStr).
-
-send_error(Req, Code, ErrorStr, ReasonStr) ->
-    send_error(Req, Code, [], ErrorStr, ReasonStr).
-
-send_error(Req, Code, Headers, ErrorStr, ReasonStr) ->
-    send_json(Req, Code, Headers,
-        {[{<<"error">>,  ErrorStr},
-         {<<"reason">>, ReasonStr}]}).
-
-% give the option for list functions to output html or other raw errors
-send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
-    send_chunk(Resp, Reason),
-    last_chunk(Resp);
-
-send_chunked_error(Resp, Error) ->
-    {Code, ErrorStr, ReasonStr} = error_info(Error),
-    JsonError = {[{<<"code">>, Code},
-        {<<"error">>,  ErrorStr},
-        {<<"reason">>, ReasonStr}]},
-    send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])),
-    last_chunk(Resp).
-
-send_redirect(Req, Path) ->
-     send_response(Req, 301, [{"Location", absolute_uri(Req, Path)}], <<>>).
-
-negotiate_content_type(Req) ->
-    case get(jsonp) of
-        no_jsonp -> negotiate_content_type1(Req);
-        [] -> negotiate_content_type1(Req);
-        _Callback -> "text/javascript"
-    end.
-
-negotiate_content_type1(#httpd{mochi_req=MochiReq}) ->
-    %% Determine the appropriate Content-Type header for a JSON response
-    %% depending on the Accept header in the request. A request that explicitly
-    %% lists the correct JSON MIME type will get that type, otherwise the
-    %% response will have the generic MIME type "text/plain"
-    AcceptedTypes = case MochiReq:get_header_value("Accept") of
-        undefined       -> [];
-        AcceptHeader    -> string:tokens(AcceptHeader, ", ")
-    end,
-    case lists:member("application/json", AcceptedTypes) of
-        true  -> "application/json";
-        false -> "text/plain; charset=utf-8"
-    end.
-
-server_header() ->
-    [{"Server", "CouchDB/" ++ couch_server:get_version() ++
-                " (Erlang OTP/" ++ erlang:system_info(otp_release) ++ ")"}].
-
-
--record(mp, {boundary, buffer, data_fun, callback}).
-
-
-parse_multipart_request(ContentType, DataFun, Callback) ->
-    Boundary0 = iolist_to_binary(get_boundary(ContentType)),
-    Boundary = <<"\r\n--", Boundary0/binary>>,
-    Mp = #mp{boundary= Boundary,
-            buffer= <<>>,
-            data_fun=DataFun,
-            callback=Callback},
-    {Mp2, _NilCallback} = read_until(Mp, <<"--", Boundary0/binary>>,
-        fun nil_callback/1),
-    #mp{buffer=Buffer, data_fun=DataFun2, callback=Callback2} =
-            parse_part_header(Mp2),
-    {Buffer, DataFun2, Callback2}.
-
-nil_callback(_Data)->
-    fun nil_callback/1.
-
-get_boundary({"multipart/" ++ _, Opts}) ->
-    case couch_util:get_value("boundary", Opts) of
-        S when is_list(S) ->
-            S
-    end;
-get_boundary(ContentType) ->
-    {"multipart/" ++ _ , Opts} = mochiweb_util:parse_header(ContentType),
-    get_boundary({"multipart/", Opts}).
-
-
-
-split_header(<<>>) ->
-    [];
-split_header(Line) ->
-    {Name, [$: | Value]} = lists:splitwith(fun (C) -> C =/= $: end,
-                                           binary_to_list(Line)),
-    [{string:to_lower(string:strip(Name)),
-     mochiweb_util:parse_header(Value)}].
-
-read_until(#mp{data_fun=DataFun, buffer=Buffer}=Mp, Pattern, Callback) ->
-    case find_in_binary(Pattern, Buffer) of
-    not_found ->
-        Callback2 = Callback(Buffer),
-        {Buffer2, DataFun2} = DataFun(),
-        Buffer3 = iolist_to_binary(Buffer2),
-        read_until(Mp#mp{data_fun=DataFun2,buffer=Buffer3}, Pattern, Callback2);
-    {partial, 0} ->
-        {NewData, DataFun2} = DataFun(),
-        read_until(Mp#mp{data_fun=DataFun2,
-                buffer= iolist_to_binary([Buffer,NewData])},
-                Pattern, Callback);
-    {partial, Skip} ->
-        <<DataChunk:Skip/binary, Rest/binary>> = Buffer,
-        Callback2 = Callback(DataChunk),
-        {NewData, DataFun2} = DataFun(),
-        read_until(Mp#mp{data_fun=DataFun2,
-                buffer= iolist_to_binary([Rest | NewData])},
-                Pattern, Callback2);
-    {exact, 0} ->
-        PatternLen = size(Pattern),
-        <<_:PatternLen/binary, Rest/binary>> = Buffer,
-        {Mp#mp{buffer= Rest}, Callback};
-    {exact, Skip} ->
-        PatternLen = size(Pattern),
-        <<DataChunk:Skip/binary, _:PatternLen/binary, Rest/binary>> = Buffer,
-        Callback2 = Callback(DataChunk),
-        {Mp#mp{buffer= Rest}, Callback2}
-    end.
-
-
-parse_part_header(#mp{callback=UserCallBack}=Mp) ->
-    {Mp2, AccCallback} = read_until(Mp, <<"\r\n\r\n">>,
-            fun(Next) -> acc_callback(Next, []) end),
-    HeaderData = AccCallback(get_data),
-
-    Headers =
-    lists:foldl(fun(Line, Acc) ->
-            split_header(Line) ++ Acc
-        end, [], re:split(HeaderData,<<"\r\n">>, [])),
-    NextCallback = UserCallBack({headers, Headers}),
-    parse_part_body(Mp2#mp{callback=NextCallback}).
-
-parse_part_body(#mp{boundary=Prefix, callback=Callback}=Mp) ->
-    {Mp2, WrappedCallback} = read_until(Mp, Prefix,
-            fun(Data) -> body_callback_wrapper(Data, Callback) end),
-    Callback2 = WrappedCallback(get_callback),
-    Callback3 = Callback2(body_end),
-    case check_for_last(Mp2#mp{callback=Callback3}) of
-    {last, #mp{callback=Callback3}=Mp3} ->
-        Mp3#mp{callback=Callback3(eof)};
-    {more, Mp3} ->
-        parse_part_header(Mp3)
-    end.
-
-acc_callback(get_data, Acc)->
-    iolist_to_binary(lists:reverse(Acc));
-acc_callback(Data, Acc)->
-    fun(Next) -> acc_callback(Next, [Data | Acc]) end.
-
-body_callback_wrapper(get_callback, Callback) ->
-    Callback;
-body_callback_wrapper(Data, Callback) ->
-    Callback2 = Callback({body, Data}),
-    fun(Next) -> body_callback_wrapper(Next, Callback2) end.
-
-
-check_for_last(#mp{buffer=Buffer, data_fun=DataFun}=Mp) ->
-    case Buffer of
-    <<"--",_/binary>> -> {last, Mp};
-    <<_, _, _/binary>> -> {more, Mp};
-    _ -> % not long enough
-        {Data, DataFun2} = DataFun(),
-        check_for_last(Mp#mp{buffer= <<Buffer/binary, Data/binary>>,
-                data_fun = DataFun2})
-    end.
-
-find_in_binary(_B, <<>>) ->
-    not_found;
-
-find_in_binary(B, Data) ->
-    case binary:match(Data, [B], []) of
-    nomatch ->
-        partial_find(binary:part(B, {0, byte_size(B) - 1}),
-                     binary:part(Data, {byte_size(Data), -byte_size(Data) + 1}), 1);
-    {Pos, _Len} ->
-        {exact, Pos}
-    end.
-
-partial_find(<<>>, _Data, _Pos) ->
-    not_found;
-
-partial_find(B, Data, N) when byte_size(Data) > 0 ->
-    case binary:match(Data, [B], []) of
-    nomatch ->
-        partial_find(binary:part(B, {0, byte_size(B) - 1}),
-                     binary:part(Data, {byte_size(Data), -byte_size(Data) + 1}), N + 1);
-    {Pos, _Len} ->
-        {partial, N + Pos}
-    end;
-
-partial_find(_B, _Data, _N) ->
-    not_found.
-
-
-validate_bind_address(Address) ->
-    case inet_parse:address(Address) of
-        {ok, _} -> ok;
-        _ -> throw({error, invalid_bind_address})
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_httpd_auth.erl
----------------------------------------------------------------------
diff --git a/couch_httpd_auth.erl b/couch_httpd_auth.erl
deleted file mode 100644
index b8c4e26..0000000
--- a/couch_httpd_auth.erl
+++ /dev/null
@@ -1,380 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License.  You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_auth).
--include("couch_db.hrl").
-
--export([default_authentication_handler/1,special_test_authentication_handler/1]).
--export([cookie_authentication_handler/1]).
--export([null_authentication_handler/1]).
--export([proxy_authentication_handler/1, proxy_authentification_handler/1]).
--export([cookie_auth_header/2]).
--export([handle_session_req/1]).
-
--import(couch_httpd, [header_value/2, send_json/2,send_json/4, send_method_not_allowed/2]).
-
-special_test_authentication_handler(Req) ->
-    case header_value(Req, "WWW-Authenticate") of
-    "X-Couch-Test-Auth " ++ NamePass ->
-        % NamePass is a colon separated string: "joe schmoe:a password".
-        [Name, Pass] = re:split(NamePass, ":", [{return, list}, {parts, 2}]),
-        case {Name, Pass} of
-        {"Jan Lehnardt", "apple"} -> ok;
-        {"Christopher Lenz", "dog food"} -> ok;
-        {"Noah Slater", "biggiesmalls endian"} -> ok;
-        {"Chris Anderson", "mp3"} -> ok;
-        {"Damien Katz", "pecan pie"} -> ok;
-        {_, _} ->
-            throw({unauthorized, <<"Name or password is incorrect.">>})
-        end,
-        Req#httpd{user_ctx=#user_ctx{name=?l2b(Name)}};
-    _ ->
-        % No X-Couch-Test-Auth credentials sent, give admin access so the
-        % previous authentication can be restored after the test
-        Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}
-    end.
-
-basic_name_pw(Req) ->
-    AuthorizationHeader = header_value(Req, "Authorization"),
-    case AuthorizationHeader of
-    "Basic " ++ Base64Value ->
-        case re:split(base64:decode(Base64Value), ":",
-                      [{return, list}, {parts, 2}]) of
-        ["_", "_"] ->
-            % special name and pass to be logged out
-            nil;
-        [User, Pass] ->
-            {User, Pass};
-        _ ->
-            nil
-        end;
-    _ ->
-        nil
-    end.
-
-default_authentication_handler(Req) ->
-    case basic_name_pw(Req) of
-    {User, Pass} ->
-        case couch_auth_cache:get_user_creds(User) of
-            nil ->
-                throw({unauthorized, <<"Name or password is incorrect.">>});
-            UserProps ->
-                case authenticate(?l2b(Pass), UserProps) of
-                    true ->
-                        Req#httpd{user_ctx=#user_ctx{
-                            name=?l2b(User),
-                            roles=couch_util:get_value(<<"roles">>, UserProps, [])
-                        }};
-                    _Else ->
-                        throw({unauthorized, <<"Name or password is incorrect.">>})
-                end
-        end;
-    nil ->
-        case couch_server:has_admins() of
-        true ->
-            Req;
-        false ->
-            case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of
-                "true" -> Req;
-                % If no admins, and no user required, then everyone is admin!
-                % Yay, admin party!
-                _ -> Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}
-            end
-        end
-    end.
-
-null_authentication_handler(Req) ->
-    Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}.
-
-%% @doc proxy auth handler.
-%
-% This handler allows creation of a userCtx object from a user authenticated remotly.
-% The client just pass specific headers to CouchDB and the handler create the userCtx.
-% Headers  name can be defined in local.ini. By thefault they are :
-%
-%   * X-Auth-CouchDB-UserName : contain the username, (x_auth_username in
-%   couch_httpd_auth section)
-%   * X-Auth-CouchDB-Roles : contain the user roles, list of roles separated by a
-%   comma (x_auth_roles in couch_httpd_auth section)
-%   * X-Auth-CouchDB-Token : token to authenticate the authorization (x_auth_token
-%   in couch_httpd_auth section). This token is an hmac-sha1 created from secret key
-%   and username. The secret key should be the same in the client and couchdb node. s
-%   ecret key is the secret key in couch_httpd_auth section of ini. This token is optional
-%   if value of proxy_use_secret key in couch_httpd_auth section of ini isn't true.
-%
-proxy_authentication_handler(Req) ->
-    case proxy_auth_user(Req) of
-        nil -> Req;
-        Req2 -> Req2
-    end.
-
-%% @deprecated
-proxy_authentification_handler(Req) ->
-    proxy_authentication_handler(Req).
-    
-proxy_auth_user(Req) ->
-    XHeaderUserName = couch_config:get("couch_httpd_auth", "x_auth_username",
-                                "X-Auth-CouchDB-UserName"),
-    XHeaderRoles = couch_config:get("couch_httpd_auth", "x_auth_roles",
-                                "X-Auth-CouchDB-Roles"),
-    XHeaderToken = couch_config:get("couch_httpd_auth", "x_auth_token",
-                                "X-Auth-CouchDB-Token"),
-    case header_value(Req, XHeaderUserName) of
-        undefined -> nil;
-        UserName ->
-            Roles = case header_value(Req, XHeaderRoles) of
-                undefined -> [];
-                Else ->
-                    [?l2b(R) || R <- string:tokens(Else, ",")]
-            end,
-            case couch_config:get("couch_httpd_auth", "proxy_use_secret", "false") of
-                "true" ->
-                    case couch_config:get("couch_httpd_auth", "secret", nil) of
-                        nil ->
-                            Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}};
-                        Secret ->
-                            ExpectedToken = couch_util:to_hex(crypto:sha_mac(Secret, UserName)),
-                            case header_value(Req, XHeaderToken) of
-                                Token when Token == ExpectedToken ->
-                                    Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName),
-                                                            roles=Roles}};
-                                _ -> nil
-                            end
-                    end;
-                _ ->
-                    Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}}
-            end
-    end.
-
-
-cookie_authentication_handler(#httpd{mochi_req=MochiReq}=Req) ->
-    case MochiReq:get_cookie_value("AuthSession") of
-    undefined -> Req;
-    [] -> Req;
-    Cookie ->
-        [User, TimeStr, HashStr] = try
-            AuthSession = couch_util:decodeBase64Url(Cookie),
-            [_A, _B, _Cs] = re:split(?b2l(AuthSession), ":",
-                                     [{return, list}, {parts, 3}])
-        catch
-            _:_Error ->
-                Reason = <<"Malformed AuthSession cookie. Please clear your cookies.">>,
-                throw({bad_request, Reason})
-        end,
-        % Verify expiry and hash
-        CurrentTime = make_cookie_time(),
-        case couch_config:get("couch_httpd_auth", "secret", nil) of
-        nil ->
-            ?LOG_DEBUG("cookie auth secret is not set",[]),
-            Req;
-        SecretStr ->
-            Secret = ?l2b(SecretStr),
-            case couch_auth_cache:get_user_creds(User) of
-            nil -> Req;
-            UserProps ->
-                UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<"">>),
-                FullSecret = <<Secret/binary, UserSalt/binary>>,
-                ExpectedHash = crypto:sha_mac(FullSecret, User ++ ":" ++ TimeStr),
-                Hash = ?l2b(HashStr),
-                Timeout = list_to_integer(
-                    couch_config:get("couch_httpd_auth", "timeout", "600")),
-                ?LOG_DEBUG("timeout ~p", [Timeout]),
-                case (catch erlang:list_to_integer(TimeStr, 16)) of
-                    TimeStamp when CurrentTime < TimeStamp + Timeout ->
-                        case couch_passwords:verify(ExpectedHash, Hash) of
-                            true ->
-                                TimeLeft = TimeStamp + Timeout - CurrentTime,
-                                ?LOG_DEBUG("Successful cookie auth as: ~p", [User]),
-                                Req#httpd{user_ctx=#user_ctx{
-                                    name=?l2b(User),
-                                    roles=couch_util:get_value(<<"roles">>, UserProps, [])
-                                }, auth={FullSecret, TimeLeft < Timeout*0.9}};
-                            _Else ->
-                                Req
-                        end;
-                    _Else ->
-                        Req
-                end
-            end
-        end
-    end.
-
-cookie_auth_header(#httpd{user_ctx=#user_ctx{name=null}}, _Headers) -> [];
-cookie_auth_header(#httpd{user_ctx=#user_ctx{name=User}, auth={Secret, true}}=Req, Headers) ->
-    % Note: we only set the AuthSession cookie if:
-    %  * a valid AuthSession cookie has been received
-    %  * we are outside a 10% timeout window
-    %  * and if an AuthSession cookie hasn't already been set e.g. by a login
-    %    or logout handler.
-    % The login and logout handlers need to set the AuthSession cookie
-    % themselves.
-    CookieHeader = couch_util:get_value("Set-Cookie", Headers, ""),
-    Cookies = mochiweb_cookies:parse_cookie(CookieHeader),
-    AuthSession = couch_util:get_value("AuthSession", Cookies),
-    if AuthSession == undefined ->
-        TimeStamp = make_cookie_time(),
-        [cookie_auth_cookie(Req, ?b2l(User), Secret, TimeStamp)];
-    true ->
-        []
-    end;
-cookie_auth_header(_Req, _Headers) -> [].
-
-cookie_auth_cookie(Req, User, Secret, TimeStamp) ->
-    SessionData = User ++ ":" ++ erlang:integer_to_list(TimeStamp, 16),
-    Hash = crypto:sha_mac(Secret, SessionData),
-    mochiweb_cookies:cookie("AuthSession",
-        couch_util:encodeBase64Url(SessionData ++ ":" ++ ?b2l(Hash)),
-        [{path, "/"}] ++ cookie_scheme(Req) ++ max_age()).
-
-ensure_cookie_auth_secret() ->
-    case couch_config:get("couch_httpd_auth", "secret", nil) of
-        nil ->
-            NewSecret = ?b2l(couch_uuids:random()),
-            couch_config:set("couch_httpd_auth", "secret", NewSecret),
-            NewSecret;
-        Secret -> Secret
-    end.
-
-% session handlers
-% Login handler with user db
-handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req) ->
-    ReqBody = MochiReq:recv_body(),
-    Form = case MochiReq:get_primary_header_value("content-type") of
-        % content type should be json
-        "application/x-www-form-urlencoded" ++ _ ->
-            mochiweb_util:parse_qs(ReqBody);
-        "application/json" ++ _ ->
-            {Pairs} = ?JSON_DECODE(ReqBody),
-            lists:map(fun({Key, Value}) ->
-              {?b2l(Key), ?b2l(Value)}
-            end, Pairs);
-        _ ->
-            []
-    end,
-    UserName = ?l2b(couch_util:get_value("name", Form, "")),
-    Password = ?l2b(couch_util:get_value("password", Form, "")),
-    ?LOG_DEBUG("Attempt Login: ~s",[UserName]),
-    User = case couch_auth_cache:get_user_creds(UserName) of
-        nil -> [];
-        Result -> Result
-    end,
-    UserSalt = couch_util:get_value(<<"salt">>, User, <<>>),
-    case authenticate(Password, User) of
-        true ->
-            % setup the session cookie
-            Secret = ?l2b(ensure_cookie_auth_secret()),
-            CurrentTime = make_cookie_time(),
-            Cookie = cookie_auth_cookie(Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime),
-            % TODO document the "next" feature in Futon
-            {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
-                nil ->
-                    {200, [Cookie]};
-                Redirect ->
-                    {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
-            end,
-            send_json(Req#httpd{req_body=ReqBody}, Code, Headers,
-                {[
-                    {ok, true},
-                    {name, couch_util:get_value(<<"name">>, User, null)},
-                    {roles, couch_util:get_value(<<"roles">>, User, [])}
-                ]});
-        _Else ->
-            % clear the session
-            Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}] ++ cookie_scheme(Req)),
-            {Code, Headers} = case couch_httpd:qs_value(Req, "fail", nil) of
-                nil ->
-                    {401, [Cookie]};
-                Redirect ->
-                    {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
-            end,
-            send_json(Req, Code, Headers, {[{error, <<"unauthorized">>},{reason, <<"Name or password is incorrect.">>}]})
-    end;
-% get user info
-% GET /_session
-handle_session_req(#httpd{method='GET', user_ctx=UserCtx}=Req) ->
-    Name = UserCtx#user_ctx.name,
-    ForceLogin = couch_httpd:qs_value(Req, "basic", "false"),
-    case {Name, ForceLogin} of
-        {null, "true"} ->
-            throw({unauthorized, <<"Please login.">>});
-        {Name, _} ->
-            send_json(Req, {[
-                % remove this ok
-                {ok, true},
-                {<<"userCtx">>, {[
-                    {name, Name},
-                    {roles, UserCtx#user_ctx.roles}
-                ]}},
-                {info, {[
-                    {authentication_db, ?l2b(couch_config:get("couch_httpd_auth", "authentication_db"))},
-                    {authentication_handlers, [auth_name(H) || H <- couch_httpd:make_fun_spec_strs(
-                            couch_config:get("httpd", "authentication_handlers"))]}
-                ] ++ maybe_value(authenticated, UserCtx#user_ctx.handler, fun(Handler) ->
-                        auth_name(?b2l(Handler))
-                    end)}}
-            ]})
-    end;
-% logout by deleting the session
-handle_session_req(#httpd{method='DELETE'}=Req) ->
-    Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}] ++ cookie_scheme(Req)),
-    {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
-        nil ->
-            {200, [Cookie]};
-        Redirect ->
-            {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
-    end,
-    send_json(Req, Code, Headers, {[{ok, true}]});
-handle_session_req(Req) ->
-    send_method_not_allowed(Req, "GET,HEAD,POST,DELETE").
-
-maybe_value(_Key, undefined, _Fun) -> [];
-maybe_value(Key, Else, Fun) ->
-    [{Key, Fun(Else)}].
-
-authenticate(Pass, UserProps) ->
-    UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<>>),
-    {PasswordHash, ExpectedHash} =
-        case couch_util:get_value(<<"password_scheme">>, UserProps, <<"simple">>) of
-        <<"simple">> ->
-            {couch_passwords:simple(Pass, UserSalt),
-            couch_util:get_value(<<"password_sha">>, UserProps, nil)};
-        <<"pbkdf2">> ->
-            Iterations = couch_util:get_value(<<"iterations">>, UserProps, 10000),
-            {couch_passwords:pbkdf2(Pass, UserSalt, Iterations),
-             couch_util:get_value(<<"derived_key">>, UserProps, nil)}
-    end,
-    couch_passwords:verify(PasswordHash, ExpectedHash).
-
-auth_name(String) when is_list(String) ->
-    [_,_,_,_,_,Name|_] = re:split(String, "[\\W_]", [{return, list}]),
-    ?l2b(Name).
-
-make_cookie_time() ->
-    {NowMS, NowS, _} = erlang:now(),
-    NowMS * 1000000 + NowS.
-
-cookie_scheme(#httpd{mochi_req=MochiReq}) ->
-    [{http_only, true}] ++
-    case MochiReq:get(scheme) of
-        http -> [];
-        https -> [{secure, true}]
-    end.
-
-max_age() ->
-    case couch_config:get("couch_httpd_auth", "allow_persistent_cookies", "false") of
-        "false" ->
-            [];
-        "true" ->
-            Timeout = list_to_integer(
-                couch_config:get("couch_httpd_auth", "timeout", "600")),
-            [{max_age, Timeout}]
-    end.


[37/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
move src/* to the root

There is no need of an src/ folder except for an aesthetic reason, we
are obviously distributing a source. This layout is alos more common in
the Erlang world, and most editor and package manager understand it.


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/a3f94781
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/a3f94781
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/a3f94781

Branch: refs/heads/import-rcouch
Commit: a3f94781e77a9236976dfb447e8dddd02656eb15
Parents: a278e0d
Author: benoitc <be...@apache.org>
Authored: Sun Jan 12 10:17:31 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:21 2014 -0600

----------------------------------------------------------------------
 rebar.config.script      | 2 +-
 src/couch.app.src.script | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a3f94781/rebar.config.script
----------------------------------------------------------------------
diff --git a/rebar.config.script b/rebar.config.script
index 5a75a3d..3acdfae 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -14,7 +14,7 @@
 %% the License.
 
 
-Cfg = case file:consult("../../../pkg.vars.config") of
+Cfg = case file:consult("../../pkg.vars.config") of
           {ok, Terms} ->
               Terms;
           _Err ->

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a3f94781/src/couch.app.src.script
----------------------------------------------------------------------
diff --git a/src/couch.app.src.script b/src/couch.app.src.script
index d8962fa..1e14e3d 100644
--- a/src/couch.app.src.script
+++ b/src/couch.app.src.script
@@ -14,7 +14,7 @@
 %% the License.
 
 
-Cfg = case file:consult("../../../pkg.vars.config") of
+Cfg = case file:consult("../../pkg.vars.config") of
           {ok, Terms} ->
               Terms;
           _Err ->


[08/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_compaction_daemon.erl
----------------------------------------------------------------------
diff --git a/src/couch_compaction_daemon.erl b/src/couch_compaction_daemon.erl
new file mode 100644
index 0000000..18a51a4
--- /dev/null
+++ b/src/couch_compaction_daemon.erl
@@ -0,0 +1,504 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_compaction_daemon).
+-behaviour(gen_server).
+
+% public API
+-export([start_link/0, config_change/3]).
+
+% gen_server callbacks
+-export([init/1, handle_call/3, handle_info/2, handle_cast/2]).
+-export([code_change/3, terminate/2]).
+
+-include("couch_db.hrl").
+
+-define(CONFIG_ETS, couch_compaction_daemon_config).
+
+-record(state, {
+    loop_pid
+}).
+
+-record(config, {
+    db_frag = nil,
+    view_frag = nil,
+    period = nil,
+    cancel = false,
+    parallel_view_compact = false
+}).
+
+-record(period, {
+    from = nil,
+    to = nil
+}).
+
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+init(_) ->
+    process_flag(trap_exit, true),
+    ?CONFIG_ETS = ets:new(?CONFIG_ETS, [named_table, set, protected]),
+    ok = couch_config:register(fun ?MODULE:config_change/3),
+    load_config(),
+    Server = self(),
+    Loop = spawn_link(fun() -> compact_loop(Server) end),
+    {ok, #state{loop_pid = Loop}}.
+
+
+config_change("compactions", DbName, NewValue) ->
+    ok = gen_server:cast(?MODULE, {config_update, DbName, NewValue}).
+
+
+handle_cast({config_update, DbName, deleted}, State) ->
+    true = ets:delete(?CONFIG_ETS, ?l2b(DbName)),
+    {noreply, State};
+
+handle_cast({config_update, DbName, Config}, #state{loop_pid = Loop} = State) ->
+    case parse_config(DbName, Config) of
+    {ok, NewConfig} ->
+        WasEmpty = (ets:info(?CONFIG_ETS, size) =:= 0),
+        true = ets:insert(?CONFIG_ETS, {?l2b(DbName), NewConfig}),
+        case WasEmpty of
+        true ->
+            Loop ! {self(), have_config};
+        false ->
+            ok
+        end;
+    error ->
+        ok
+    end,
+    {noreply, State}.
+
+
+handle_call(Msg, _From, State) ->
+    {stop, {unexpected_call, Msg}, State}.
+
+
+handle_info({'EXIT', Pid, Reason}, #state{loop_pid = Pid} = State) ->
+    {stop, {compaction_loop_died, Reason}, State}.
+
+
+terminate(_Reason, _State) ->
+    true = ets:delete(?CONFIG_ETS).
+
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+
+compact_loop(Parent) ->
+    {ok, _} = couch_server:all_databases(
+        fun(DbName, Acc) ->
+            case ets:info(?CONFIG_ETS, size) =:= 0 of
+            true ->
+                {stop, Acc};
+            false ->
+                case get_db_config(DbName) of
+                nil ->
+                    ok;
+                {ok, Config} ->
+                    case check_period(Config) of
+                    true ->
+                        maybe_compact_db(DbName, Config);
+                    false ->
+                        ok
+                    end
+                end,
+                {ok, Acc}
+            end
+        end, ok),
+    case ets:info(?CONFIG_ETS, size) =:= 0 of
+    true ->
+        receive {Parent, have_config} -> ok end;
+    false ->
+        PausePeriod = list_to_integer(
+            couch_config:get("compaction_daemon", "check_interval", "300")),
+        ok = timer:sleep(PausePeriod * 1000)
+    end,
+    compact_loop(Parent).
+
+
+maybe_compact_db(DbName, Config) ->
+    case (catch couch_db:open_int(DbName, [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}])) of
+    {ok, Db} ->
+        DDocNames = db_ddoc_names(Db),
+        case can_db_compact(Config, Db) of
+        true ->
+            {ok, DbCompactPid} = couch_db:start_compact(Db),
+            TimeLeft = compact_time_left(Config),
+            case Config#config.parallel_view_compact of
+            true ->
+                ViewsCompactPid = spawn_link(fun() ->
+                    maybe_compact_views(DbName, DDocNames, Config)
+                end),
+                ViewsMonRef = erlang:monitor(process, ViewsCompactPid);
+            false ->
+                ViewsCompactPid = nil,
+                ViewsMonRef = nil
+            end,
+            DbMonRef = erlang:monitor(process, DbCompactPid),
+            receive
+            {'DOWN', DbMonRef, process, _, normal} ->
+                couch_db:close(Db),
+                case Config#config.parallel_view_compact of
+                true ->
+                    ok;
+                false ->
+                    maybe_compact_views(DbName, DDocNames, Config)
+                end;
+            {'DOWN', DbMonRef, process, _, Reason} ->
+                couch_db:close(Db),
+                ?LOG_ERROR("Compaction daemon - an error ocurred while"
+                    " compacting the database `~s`: ~p", [DbName, Reason])
+            after TimeLeft ->
+                ?LOG_INFO("Compaction daemon - canceling compaction for database"
+                    " `~s` because it's exceeding the allowed period.",
+                    [DbName]),
+                erlang:demonitor(DbMonRef, [flush]),
+                ok = couch_db:cancel_compact(Db),
+                couch_db:close(Db)
+            end,
+            case ViewsMonRef of
+            nil ->
+                ok;
+            _ ->
+                receive
+                {'DOWN', ViewsMonRef, process, _, _Reason} ->
+                    ok
+                after TimeLeft + 1000 ->
+                    % Under normal circunstances, the view compaction process
+                    % should have finished already.
+                    erlang:demonitor(ViewsMonRef, [flush]),
+                    unlink(ViewsCompactPid),
+                    exit(ViewsCompactPid, kill)
+                end
+            end;
+        false ->
+            couch_db:close(Db),
+            maybe_compact_views(DbName, DDocNames, Config)
+        end;
+    _ ->
+        ok
+    end.
+
+
+maybe_compact_views(_DbName, [], _Config) ->
+    ok;
+maybe_compact_views(DbName, [DDocName | Rest], Config) ->
+    case check_period(Config) of
+    true ->
+        case maybe_compact_view(DbName, DDocName, Config) of
+        ok ->
+            maybe_compact_views(DbName, Rest, Config);
+        timeout ->
+            ok
+        end;
+    false ->
+        ok
+    end.
+
+
+db_ddoc_names(Db) ->
+    {ok, _, DDocNames} = couch_db:enum_docs(
+        Db,
+        fun(#full_doc_info{id = <<"_design/", _/binary>>, deleted = true}, _, Acc) ->
+            {ok, Acc};
+        (#full_doc_info{id = <<"_design/", Id/binary>>}, _, Acc) ->
+            {ok, [Id | Acc]};
+        (_, _, Acc) ->
+            {stop, Acc}
+        end, [], [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}]),
+    DDocNames.
+
+
+maybe_compact_view(DbName, GroupId, Config) ->
+    DDocId = <<"_design/", GroupId/binary>>,
+    case (catch couch_mrview:get_info(DbName, DDocId)) of
+    {ok, GroupInfo} ->
+        case can_view_compact(Config, DbName, GroupId, GroupInfo) of
+        true ->
+            {ok, MonRef} = couch_mrview:compact(DbName, DDocId, [monitor]),
+            TimeLeft = compact_time_left(Config),
+            receive
+            {'DOWN', MonRef, process, _, normal} ->
+                ok;
+            {'DOWN', MonRef, process, _, Reason} ->
+                ?LOG_ERROR("Compaction daemon - an error ocurred while compacting"
+                    " the view group `~s` from database `~s`: ~p",
+                    [GroupId, DbName, Reason]),
+                ok
+            after TimeLeft ->
+                ?LOG_INFO("Compaction daemon - canceling the compaction for the "
+                    "view group `~s` of the database `~s` because it's exceeding"
+                    " the allowed period.", [GroupId, DbName]),
+                erlang:demonitor(MonRef, [flush]),
+                ok = couch_mrview:cancel_compaction(DbName, DDocId),
+                timeout
+            end;
+        false ->
+            ok
+        end;
+    Error ->
+        ?LOG_ERROR("Error opening view group `~s` from database `~s`: ~p",
+            [GroupId, DbName, Error]),
+        ok
+    end.
+
+
+compact_time_left(#config{cancel = false}) ->
+    infinity;
+compact_time_left(#config{period = nil}) ->
+    infinity;
+compact_time_left(#config{period = #period{to = {ToH, ToM} = To}}) ->
+    {H, M, _} = time(),
+    case To > {H, M} of
+    true ->
+        ((ToH - H) * 60 * 60 * 1000) + (abs(ToM - M) * 60 * 1000);
+    false ->
+        ((24 - H + ToH) * 60 * 60 * 1000) + (abs(ToM - M) * 60 * 1000)
+    end.
+
+
+get_db_config(DbName) ->
+    case ets:lookup(?CONFIG_ETS, DbName) of
+    [] ->
+        case ets:lookup(?CONFIG_ETS, <<"_default">>) of
+        [] ->
+            nil;
+        [{<<"_default">>, Config}] ->
+            {ok, Config}
+        end;
+    [{DbName, Config}] ->
+        {ok, Config}
+    end.
+
+
+can_db_compact(#config{db_frag = Threshold} = Config, Db) ->
+    case check_period(Config) of
+    false ->
+        false;
+    true ->
+        {ok, DbInfo} = couch_db:get_db_info(Db),
+        {Frag, SpaceRequired} = frag(DbInfo),
+        ?LOG_DEBUG("Fragmentation for database `~s` is ~p%, estimated space for"
+           " compaction is ~p bytes.", [Db#db.name, Frag, SpaceRequired]),
+        case check_frag(Threshold, Frag) of
+        false ->
+            false;
+        true ->
+            Free = free_space(couch_config:get("couchdb", "database_dir")),
+            case Free >= SpaceRequired of
+            true ->
+                true;
+            false ->
+                ?LOG_WARN("Compaction daemon - skipping database `~s` "
+                    "compaction: the estimated necessary disk space is about ~p"
+                    " bytes but the currently available disk space is ~p bytes.",
+                   [Db#db.name, SpaceRequired, Free]),
+                false
+            end
+        end
+    end.
+
+can_view_compact(Config, DbName, GroupId, GroupInfo) ->
+    case check_period(Config) of
+    false ->
+        false;
+    true ->
+        case couch_util:get_value(updater_running, GroupInfo) of
+        true ->
+            false;
+        false ->
+            {Frag, SpaceRequired} = frag(GroupInfo),
+            ?LOG_DEBUG("Fragmentation for view group `~s` (database `~s`) is "
+                "~p%, estimated space for compaction is ~p bytes.",
+                [GroupId, DbName, Frag, SpaceRequired]),
+            case check_frag(Config#config.view_frag, Frag) of
+            false ->
+                false;
+            true ->
+                Free = free_space(couch_index_util:root_dir()),
+                case Free >= SpaceRequired of
+                true ->
+                    true;
+                false ->
+                    ?LOG_WARN("Compaction daemon - skipping view group `~s` "
+                        "compaction (database `~s`): the estimated necessary "
+                        "disk space is about ~p bytes but the currently available"
+                        " disk space is ~p bytes.",
+                        [GroupId, DbName, SpaceRequired, Free]),
+                    false
+                end
+            end
+        end
+    end.
+
+
+check_period(#config{period = nil}) ->
+    true;
+check_period(#config{period = #period{from = From, to = To}}) ->
+    {HH, MM, _} = erlang:time(),
+    case From < To of
+    true ->
+        ({HH, MM} >= From) andalso ({HH, MM} < To);
+    false ->
+        ({HH, MM} >= From) orelse ({HH, MM} < To)
+    end.
+
+
+check_frag(nil, _) ->
+    true;
+check_frag(Threshold, Frag) ->
+    Frag >= Threshold.
+
+
+frag(Props) ->
+    FileSize = couch_util:get_value(disk_size, Props),
+    MinFileSize = list_to_integer(
+        couch_config:get("compaction_daemon", "min_file_size", "131072")),
+    case FileSize < MinFileSize of
+    true ->
+        {0, FileSize};
+    false ->
+        case couch_util:get_value(data_size, Props) of
+        null ->
+            {100, FileSize};
+        0 ->
+            {0, FileSize};
+        DataSize ->
+            Frag = round(((FileSize - DataSize) / FileSize * 100)),
+            {Frag, space_required(DataSize)}
+        end
+    end.
+
+% Rough, and pessimistic, estimation of necessary disk space to compact a
+% database or view index.
+space_required(DataSize) ->
+    round(DataSize * 2.0).
+
+
+load_config() ->
+    lists:foreach(
+        fun({DbName, ConfigString}) ->
+            case parse_config(DbName, ConfigString) of
+            {ok, Config} ->
+                true = ets:insert(?CONFIG_ETS, {?l2b(DbName), Config});
+            error ->
+                ok
+            end
+        end,
+        couch_config:get("compactions")).
+
+parse_config(DbName, ConfigString) ->
+    case (catch do_parse_config(ConfigString)) of
+    {ok, Conf} ->
+        {ok, Conf};
+    incomplete_period ->
+        ?LOG_ERROR("Incomplete period ('to' or 'from' missing) in the compaction"
+            " configuration for database `~s`", [DbName]),
+        error;
+    _ ->
+        ?LOG_ERROR("Invalid compaction configuration for database "
+            "`~s`: `~s`", [DbName, ConfigString]),
+        error
+    end.
+
+do_parse_config(ConfigString) ->
+    {ok, ConfProps} = couch_util:parse_term(ConfigString),
+    {ok, #config{period = Period} = Conf} = config_record(ConfProps, #config{}),
+    case Period of
+    nil ->
+        {ok, Conf};
+    #period{from = From, to = To} when From =/= nil, To =/= nil ->
+        {ok, Conf};
+    #period{} ->
+        incomplete_period
+    end.
+
+config_record([], Config) ->
+    {ok, Config};
+
+config_record([{db_fragmentation, V} | Rest], Config) ->
+    [Frag] = string:tokens(V, "%"),
+    config_record(Rest, Config#config{db_frag = list_to_integer(Frag)});
+
+config_record([{view_fragmentation, V} | Rest], Config) ->
+    [Frag] = string:tokens(V, "%"),
+    config_record(Rest, Config#config{view_frag = list_to_integer(Frag)});
+
+config_record([{from, V} | Rest], #config{period = Period0} = Config) ->
+    Time = parse_time(V),
+    Period = case Period0 of
+    nil ->
+        #period{from = Time};
+    #period{} ->
+        Period0#period{from = Time}
+    end,
+    config_record(Rest, Config#config{period = Period});
+
+config_record([{to, V} | Rest], #config{period = Period0} = Config) ->
+    Time = parse_time(V),
+    Period = case Period0 of
+    nil ->
+        #period{to = Time};
+    #period{} ->
+        Period0#period{to = Time}
+    end,
+    config_record(Rest, Config#config{period = Period});
+
+config_record([{strict_window, true} | Rest], Config) ->
+    config_record(Rest, Config#config{cancel = true});
+
+config_record([{strict_window, false} | Rest], Config) ->
+    config_record(Rest, Config#config{cancel = false});
+
+config_record([{parallel_view_compaction, true} | Rest], Config) ->
+    config_record(Rest, Config#config{parallel_view_compact = true});
+
+config_record([{parallel_view_compaction, false} | Rest], Config) ->
+    config_record(Rest, Config#config{parallel_view_compact = false}).
+
+
+parse_time(String) ->
+    [HH, MM] = string:tokens(String, ":"),
+    {list_to_integer(HH), list_to_integer(MM)}.
+
+
+free_space(Path) ->
+    DiskData = lists:sort(
+        fun({PathA, _, _}, {PathB, _, _}) ->
+            length(filename:split(PathA)) > length(filename:split(PathB))
+        end,
+        disksup:get_disk_data()),
+    free_space_rec(abs_path(Path), DiskData).
+
+free_space_rec(_Path, []) ->
+    undefined;
+free_space_rec(Path, [{MountPoint0, Total, Usage} | Rest]) ->
+    MountPoint = abs_path(MountPoint0),
+    case MountPoint =:= string:substr(Path, 1, length(MountPoint)) of
+    false ->
+        free_space_rec(Path, Rest);
+    true ->
+        trunc(Total - (Total * (Usage / 100))) * 1024
+    end.
+
+abs_path(Path0) ->
+    Path = filename:absname(Path0),
+    case lists:last(Path) of
+    $/ ->
+        Path;
+    _ ->
+        Path ++ "/"
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_compress.erl
----------------------------------------------------------------------
diff --git a/src/couch_compress.erl b/src/couch_compress.erl
new file mode 100644
index 0000000..ac386fd
--- /dev/null
+++ b/src/couch_compress.erl
@@ -0,0 +1,84 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_compress).
+
+-export([compress/2, decompress/1, is_compressed/2]).
+-export([get_compression_method/0]).
+
+-include("couch_db.hrl").
+
+% binaries compressed with snappy have their first byte set to this value
+-define(SNAPPY_PREFIX, 1).
+% Term prefixes documented at:
+%      http://www.erlang.org/doc/apps/erts/erl_ext_dist.html
+-define(TERM_PREFIX, 131).
+-define(COMPRESSED_TERM_PREFIX, 131, 80).
+
+
+get_compression_method() ->
+    case couch_config:get("couchdb", "file_compression") of
+    undefined ->
+        ?DEFAULT_COMPRESSION;
+    Method1 ->
+        case string:tokens(Method1, "_") of
+        [Method] ->
+            list_to_existing_atom(Method);
+        [Method, Level] ->
+            {list_to_existing_atom(Method), list_to_integer(Level)}
+        end
+    end.
+
+
+compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, snappy) ->
+    Bin;
+compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, Method) ->
+    compress(decompress(Bin), Method);
+compress(<<?TERM_PREFIX, _/binary>> = Bin, Method) ->
+    compress(decompress(Bin), Method);
+compress(Term, none) ->
+    ?term_to_bin(Term);
+compress(Term, {deflate, Level}) ->
+    term_to_binary(Term, [{minor_version, 1}, {compressed, Level}]);
+compress(Term, snappy) ->
+    Bin = ?term_to_bin(Term),
+    try
+        {ok, CompressedBin} = snappy:compress(Bin),
+        case byte_size(CompressedBin) < byte_size(Bin) of
+        true ->
+            <<?SNAPPY_PREFIX, CompressedBin/binary>>;
+        false ->
+            Bin
+        end
+    catch exit:snappy_nif_not_loaded ->
+        Bin
+    end.
+
+
+decompress(<<?SNAPPY_PREFIX, Rest/binary>>) ->
+    {ok, TermBin} = snappy:decompress(Rest),
+    binary_to_term(TermBin);
+decompress(<<?TERM_PREFIX, _/binary>> = Bin) ->
+    binary_to_term(Bin).
+
+
+is_compressed(<<?SNAPPY_PREFIX, _/binary>>, Method) ->
+    Method =:= snappy;
+is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, {deflate, _Level}) ->
+    true;
+is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, _Method) ->
+    false;
+is_compressed(<<?TERM_PREFIX, _/binary>>, Method) ->
+    Method =:= none;
+is_compressed(Term, _Method) when not is_binary(Term) ->
+    false.
+

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_config.erl
----------------------------------------------------------------------
diff --git a/src/couch_config.erl b/src/couch_config.erl
new file mode 100644
index 0000000..22d7cdc
--- /dev/null
+++ b/src/couch_config.erl
@@ -0,0 +1,251 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% Reads CouchDB's ini file and gets queried for configuration parameters.
+% This module is initialized with a list of ini files that it consecutively
+% reads Key/Value pairs from and saves them in an ets table. If more than one
+% ini file is specified, the last one is used to write changes that are made
+% with store/2 back to that ini file.
+
+-module(couch_config).
+-behaviour(gen_server).
+
+-include("couch_db.hrl").
+
+
+-export([start_link/1, stop/0]).
+-export([all/0, get/1, get/2, get/3, set/3, set/4, delete/2, delete/3]).
+-export([register/1, register/2]).
+-export([parse_ini_file/1]).
+
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+
+-record(config, {
+    notify_funs=[],
+    write_filename=undefined
+}).
+
+
+start_link(IniFiles) ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, IniFiles, []).
+
+stop() ->
+    gen_server:cast(?MODULE, stop).
+
+
+all() ->
+    lists:sort(gen_server:call(?MODULE, all, infinity)).
+
+
+get(Section) when is_binary(Section) ->
+    ?MODULE:get(?b2l(Section));
+get(Section) ->
+    Matches = ets:match(?MODULE, {{Section, '$1'}, '$2'}),
+    [{Key, Value} || [Key, Value] <- Matches].
+
+get(Section, Key) ->
+    ?MODULE:get(Section, Key, undefined).
+
+get(Section, Key, Default) when is_binary(Section) and is_binary(Key) ->
+    ?MODULE:get(?b2l(Section), ?b2l(Key), Default);
+get(Section, Key, Default) ->
+    case ets:lookup(?MODULE, {Section, Key}) of
+        [] -> Default;
+        [{_, Match}] -> Match
+    end.
+
+set(Section, Key, Value) ->
+    ?MODULE:set(Section, Key, Value, true).
+
+set(Section, Key, Value, Persist) when is_binary(Section) and is_binary(Key)  ->
+    ?MODULE:set(?b2l(Section), ?b2l(Key), Value, Persist);
+set(Section, Key, Value, Persist) ->
+    gen_server:call(?MODULE, {set, Section, Key, Value, Persist}).
+
+
+delete(Section, Key) when is_binary(Section) and is_binary(Key) ->
+    delete(?b2l(Section), ?b2l(Key));
+delete(Section, Key) ->
+    delete(Section, Key, true).
+
+delete(Section, Key, Persist) when is_binary(Section) and is_binary(Key) ->
+    delete(?b2l(Section), ?b2l(Key), Persist);
+delete(Section, Key, Persist) ->
+    gen_server:call(?MODULE, {delete, Section, Key, Persist}).
+
+
+register(Fun) ->
+    ?MODULE:register(Fun, self()).
+
+register(Fun, Pid) ->
+    gen_server:call(?MODULE, {register, Fun, Pid}).
+
+
+init(IniFiles) ->
+    ets:new(?MODULE, [named_table, set, protected]),
+    try
+        lists:foreach(fun(IniFile) ->
+            {ok, ParsedIniValues} = parse_ini_file(IniFile),
+            ets:insert(?MODULE, ParsedIniValues)
+        end, IniFiles),
+        WriteFile = case IniFiles of
+            [_|_] -> lists:last(IniFiles);
+            _ -> undefined
+        end,
+        {ok, #config{write_filename = WriteFile}}
+    catch _Tag:Error ->
+        {stop, Error}
+    end.
+
+
+terminate(_Reason, _State) ->
+    ok.
+
+
+handle_call(all, _From, Config) ->
+    Resp = lists:sort((ets:tab2list(?MODULE))),
+    {reply, Resp, Config};
+handle_call({set, Sec, Key, Val, Persist}, From, Config) ->
+    Result = case {Persist, Config#config.write_filename} of
+        {true, undefined} ->
+            ok;
+        {true, FileName} ->
+            couch_config_writer:save_to_file({{Sec, Key}, Val}, FileName);
+        _ ->
+            ok
+    end,
+    case Result of
+    ok ->
+        true = ets:insert(?MODULE, {{Sec, Key}, Val}),
+        spawn_link(fun() ->
+            [catch F(Sec, Key, Val, Persist) || {_Pid, F} <- Config#config.notify_funs],
+                gen_server:reply(From, ok)
+        end),
+        {noreply, Config};
+    _Error ->
+        {reply, Result, Config}
+    end;
+handle_call({delete, Sec, Key, Persist}, From, Config) ->
+    true = ets:delete(?MODULE, {Sec,Key}),
+    case {Persist, Config#config.write_filename} of
+        {true, undefined} ->
+            ok;
+        {true, FileName} ->
+            couch_config_writer:save_to_file({{Sec, Key}, ""}, FileName);
+        _ ->
+            ok
+    end,
+    spawn_link(fun() ->
+        [catch F(Sec, Key, deleted, Persist) || {_Pid, F} <- Config#config.notify_funs],
+            gen_server:reply(From, ok)
+    end),
+    {noreply, Config};
+handle_call({register, Fun, Pid}, _From, #config{notify_funs=PidFuns}=Config) ->
+    erlang:monitor(process, Pid),
+    % convert 1 and 2 arity to 3 arity
+    Fun2 =
+    case Fun of
+        _ when is_function(Fun, 1) ->
+            fun(Section, _Key, _Value, _Persist) -> Fun(Section) end;
+        _ when is_function(Fun, 2) ->
+            fun(Section, Key, _Value, _Persist) -> Fun(Section, Key) end;
+        _ when is_function(Fun, 3) ->
+            fun(Section, Key, Value, _Persist) -> Fun(Section, Key, Value) end;
+        _ when is_function(Fun, 4) ->
+            Fun
+    end,
+    {reply, ok, Config#config{notify_funs=[{Pid, Fun2} | PidFuns]}}.
+
+
+handle_cast(stop, State) ->
+    {stop, normal, State};
+handle_cast(_Msg, State) ->
+    {noreply, State}.
+
+handle_info({'DOWN', _, _, DownPid, _}, #config{notify_funs=PidFuns}=Config) ->
+    % remove any funs registered by the downed process
+    FilteredPidFuns = [{Pid,Fun} || {Pid,Fun} <- PidFuns, Pid /= DownPid],
+    {noreply, Config#config{notify_funs=FilteredPidFuns}}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+
+parse_ini_file(IniFile) ->
+    IniFilename = couch_util:abs_pathname(IniFile),
+    IniBin =
+    case file:read_file(IniFilename) of
+        {ok, IniBin0} ->
+            IniBin0;
+        {error, Reason} = Error ->
+            ?LOG_ERROR("Could not read server configuration file ~s: ~s",
+                [IniFilename, file:format_error(Reason)]),
+            throw(Error)
+    end,
+
+    Lines = re:split(IniBin, "\r\n|\n|\r|\032", [{return, list}]),
+    {_, ParsedIniValues} =
+    lists:foldl(fun(Line, {AccSectionName, AccValues}) ->
+            case string:strip(Line) of
+            "[" ++ Rest ->
+                case re:split(Rest, "\\]", [{return, list}]) of
+                [NewSectionName, ""] ->
+                    {NewSectionName, AccValues};
+                _Else -> % end bracket not at end, ignore this line
+                    {AccSectionName, AccValues}
+                end;
+            ";" ++ _Comment ->
+                {AccSectionName, AccValues};
+            Line2 ->
+                case re:split(Line2, "\s*=\s*", [{return, list}]) of
+                [Value] ->
+                    MultiLineValuePart = case re:run(Line, "^ \\S", []) of
+                    {match, _} ->
+                        true;
+                    _ ->
+                        false
+                    end,
+                    case {MultiLineValuePart, AccValues} of
+                    {true, [{{_, ValueName}, PrevValue} | AccValuesRest]} ->
+                        % remove comment
+                        case re:split(Value, " ;|\t;", [{return, list}]) of
+                        [[]] ->
+                            % empty line
+                            {AccSectionName, AccValues};
+                        [LineValue | _Rest] ->
+                            E = {{AccSectionName, ValueName},
+                                PrevValue ++ " " ++ LineValue},
+                            {AccSectionName, [E | AccValuesRest]}
+                        end;
+                    _ ->
+                        {AccSectionName, AccValues}
+                    end;
+                [""|_LineValues] -> % line begins with "=", ignore
+                    {AccSectionName, AccValues};
+                [ValueName|LineValues] -> % yeehaw, got a line!
+                    RemainingLine = couch_util:implode(LineValues, "="),
+                    % removes comments
+                    case re:split(RemainingLine, " ;|\t;", [{return, list}]) of
+                    [[]] ->
+                        % empty line means delete this key
+                        ets:delete(?MODULE, {AccSectionName, ValueName}),
+                        {AccSectionName, AccValues};
+                    [LineValue | _Rest] ->
+                        {AccSectionName,
+                            [{{AccSectionName, ValueName}, LineValue} | AccValues]}
+                    end
+                end
+            end
+        end, {"", []}, Lines),
+    {ok, ParsedIniValues}.
+

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_config_writer.erl
----------------------------------------------------------------------
diff --git a/src/couch_config_writer.erl b/src/couch_config_writer.erl
new file mode 100644
index 0000000..21f1c3f
--- /dev/null
+++ b/src/couch_config_writer.erl
@@ -0,0 +1,88 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%% @doc Saves a Key/Value pair to a ini file. The Key consists of a Section
+%%      and Option combination. If that combination is found in the ini file
+%%      the new value replaces the old value. If only the Section is found the
+%%      Option and value combination is appended to the Section. If the Section
+%%      does not yet exist in the ini file, it is added and the Option/Value
+%%      pair is appended.
+%% @see couch_config
+
+-module(couch_config_writer).
+
+-export([save_to_file/2]).
+
+-include("couch_db.hrl").
+
+%% @spec save_to_file(
+%%           Config::{{Section::string(), Option::string()}, Value::string()},
+%%           File::filename()) -> ok
+%% @doc Saves a Section/Key/Value triple to the ini file File::filename()
+save_to_file({{Section, Key}, Value}, File) ->
+    {ok, OldFileContents} = file:read_file(File),
+    Lines = re:split(OldFileContents, "\r\n|\n|\r|\032", [{return, list}]),
+
+    SectionLine = "[" ++ Section ++ "]",
+    {ok, Pattern} = re:compile(["^(", Key, "\\s*=)|\\[[a-zA-Z0-9\_-]*\\]"]),
+
+    NewLines = process_file_lines(Lines, [], SectionLine, Pattern, Key, Value),
+    NewFileContents = reverse_and_add_newline(strip_empty_lines(NewLines), []),
+    case file:write_file(File, NewFileContents) of
+    ok ->
+        ok;
+    {error, Reason} = Error ->
+        ?LOG_ERROR("Could not write config file ~s: ~s",
+            [File, file:format_error(Reason)]),
+        Error
+    end.
+
+
+process_file_lines([Section|Rest], SeenLines, Section, Pattern, Key, Value) ->
+    process_section_lines(Rest, [Section|SeenLines], Pattern, Key, Value);
+
+process_file_lines([Line|Rest], SeenLines, Section, Pattern, Key, Value) ->
+    process_file_lines(Rest, [Line|SeenLines], Section, Pattern, Key, Value);
+
+process_file_lines([], SeenLines, Section, _Pattern, Key, Value) ->
+    % Section wasn't found.  Append it with the option here.
+    [Key ++ " = " ++ Value, Section, "" | strip_empty_lines(SeenLines)].
+
+
+process_section_lines([Line|Rest], SeenLines, Pattern, Key, Value) ->
+    case re:run(Line, Pattern, [{capture, all_but_first}]) of
+    nomatch -> % Found nothing interesting. Move on.
+        process_section_lines(Rest, [Line|SeenLines], Pattern, Key, Value);
+    {match, []} -> % Found another section. Append the option here.
+        lists:reverse(Rest) ++
+        [Line, "", Key ++ " = " ++ Value | strip_empty_lines(SeenLines)];
+    {match, _} -> % Found the option itself. Replace it.
+        lists:reverse(Rest) ++ [Key ++ " = " ++ Value | SeenLines]
+    end;
+
+process_section_lines([], SeenLines, _Pattern, Key, Value) ->
+    % Found end of file within the section. Append the option here.
+    [Key ++ " = " ++ Value | strip_empty_lines(SeenLines)].
+
+
+reverse_and_add_newline([Line|Rest], Content) ->
+    reverse_and_add_newline(Rest, [Line, "\n", Content]);
+
+reverse_and_add_newline([], Content) ->
+    Content.
+
+
+strip_empty_lines(["" | Rest]) ->
+    strip_empty_lines(Rest);
+
+strip_empty_lines(All) ->
+    All.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_db.erl
----------------------------------------------------------------------
diff --git a/src/couch_db.erl b/src/couch_db.erl
new file mode 100644
index 0000000..11ea0fd
--- /dev/null
+++ b/src/couch_db.erl
@@ -0,0 +1,1358 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_db).
+-behaviour(gen_server).
+
+-export([open/2,open_int/2,close/1,create/2,get_db_info/1,get_design_docs/1]).
+-export([start_compact/1, cancel_compact/1]).
+-export([open_ref_counted/2,is_idle/1,monitor/1,count_changes_since/2]).
+-export([update_doc/3,update_doc/4,update_docs/4,update_docs/2,update_docs/3,delete_doc/3]).
+-export([get_doc_info/2,get_full_doc_info/2,get_full_doc_infos/2]).
+-export([open_doc/2,open_doc/3,open_doc_revs/4]).
+-export([set_revs_limit/2,get_revs_limit/1]).
+-export([get_missing_revs/2,name/1,get_update_seq/1,get_committed_update_seq/1]).
+-export([enum_docs/4,enum_docs_since/5]).
+-export([enum_docs_since_reduce_to_count/1,enum_docs_reduce_to_count/1]).
+-export([increment_update_seq/1,get_purge_seq/1,purge_docs/2,get_last_purged/1]).
+-export([start_link/3,open_doc_int/3,ensure_full_commit/1]).
+-export([set_security/2,get_security/1]).
+-export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
+-export([changes_since/4,changes_since/5,read_doc/2,new_revid/1]).
+-export([check_is_admin/1, check_is_member/1]).
+-export([reopen/1, is_system_db/1, compression/1]).
+
+-include("couch_db.hrl").
+
+
+start_link(DbName, Filepath, Options) ->
+    case open_db_file(Filepath, Options) of
+    {ok, Fd} ->
+        StartResult = gen_server:start_link(couch_db, {DbName, Filepath, Fd, Options}, []),
+        unlink(Fd),
+        StartResult;
+    Else ->
+        Else
+    end.
+
+open_db_file(Filepath, Options) ->
+    case couch_file:open(Filepath, Options) of
+    {ok, Fd} ->
+        {ok, Fd};
+    {error, enoent} ->
+        % couldn't find file. is there a compact version? This can happen if
+        % crashed during the file switch.
+        case couch_file:open(Filepath ++ ".compact", [nologifmissing]) of
+        {ok, Fd} ->
+            ?LOG_INFO("Found ~s~s compaction file, using as primary storage.", [Filepath, ".compact"]),
+            ok = file:rename(Filepath ++ ".compact", Filepath),
+            ok = couch_file:sync(Fd),
+            {ok, Fd};
+        {error, enoent} ->
+            {not_found, no_db_file}
+        end;
+    Error ->
+        Error
+    end.
+
+
+create(DbName, Options) ->
+    couch_server:create(DbName, Options).
+
+% this is for opening a database for internal purposes like the replicator
+% or the view indexer. it never throws a reader error.
+open_int(DbName, Options) ->
+    couch_server:open(DbName, Options).
+
+% this should be called anytime an http request opens the database.
+% it ensures that the http userCtx is a valid reader
+open(DbName, Options) ->
+    case couch_server:open(DbName, Options) of
+        {ok, Db} ->
+            try
+                check_is_member(Db),
+                {ok, Db}
+            catch
+                throw:Error ->
+                    close(Db),
+                    throw(Error)
+            end;
+        Else -> Else
+    end.
+
+reopen(#db{main_pid = Pid, fd_ref_counter = OldRefCntr, user_ctx = UserCtx}) ->
+    {ok, #db{fd_ref_counter = NewRefCntr} = NewDb} =
+        gen_server:call(Pid, get_db, infinity),
+    case NewRefCntr =:= OldRefCntr of
+    true ->
+        ok;
+    false ->
+        couch_ref_counter:add(NewRefCntr),
+        catch couch_ref_counter:drop(OldRefCntr)
+    end,
+    {ok, NewDb#db{user_ctx = UserCtx}}.
+
+is_system_db(#db{options = Options}) ->
+    lists:member(sys_db, Options).
+
+ensure_full_commit(#db{update_pid=UpdatePid,instance_start_time=StartTime}) ->
+    ok = gen_server:call(UpdatePid, full_commit, infinity),
+    {ok, StartTime}.
+
+close(#db{fd_ref_counter=RefCntr}) ->
+    couch_ref_counter:drop(RefCntr).
+
+open_ref_counted(MainPid, OpenedPid) ->
+    gen_server:call(MainPid, {open_ref_count, OpenedPid}).
+
+is_idle(#db{main_pid = MainPid}) ->
+    is_idle(MainPid);
+is_idle(MainPid) ->
+    gen_server:call(MainPid, is_idle).
+
+monitor(#db{main_pid=MainPid}) ->
+    erlang:monitor(process, MainPid).
+
+start_compact(#db{update_pid=Pid}) ->
+    gen_server:call(Pid, start_compact).
+
+cancel_compact(#db{update_pid=Pid}) ->
+    gen_server:call(Pid, cancel_compact).
+
+delete_doc(Db, Id, Revisions) ->
+    DeletedDocs = [#doc{id=Id, revs=[Rev], deleted=true} || Rev <- Revisions],
+    {ok, [Result]} = update_docs(Db, DeletedDocs, []),
+    {ok, Result}.
+
+open_doc(Db, IdOrDocInfo) ->
+    open_doc(Db, IdOrDocInfo, []).
+
+open_doc(Db, Id, Options) ->
+    increment_stat(Db, {couchdb, database_reads}),
+    case open_doc_int(Db, Id, Options) of
+    {ok, #doc{deleted=true}=Doc} ->
+        case lists:member(deleted, Options) of
+        true ->
+            apply_open_options({ok, Doc},Options);
+        false ->
+            {not_found, deleted}
+        end;
+    Else ->
+        apply_open_options(Else,Options)
+    end.
+
+apply_open_options({ok, Doc},Options) ->
+    apply_open_options2(Doc,Options);
+apply_open_options(Else,_Options) ->
+    Else.
+
+apply_open_options2(Doc,[]) ->
+    {ok, Doc};
+apply_open_options2(#doc{atts=Atts,revs=Revs}=Doc,
+        [{atts_since, PossibleAncestors}|Rest]) ->
+    RevPos = find_ancestor_rev_pos(Revs, PossibleAncestors),
+    apply_open_options2(Doc#doc{atts=[A#att{data=
+        if AttPos>RevPos -> Data; true -> stub end}
+        || #att{revpos=AttPos,data=Data}=A <- Atts]}, Rest);
+apply_open_options2(Doc, [ejson_body | Rest]) ->
+    apply_open_options2(couch_doc:with_ejson_body(Doc), Rest);
+apply_open_options2(Doc,[_|Rest]) ->
+    apply_open_options2(Doc,Rest).
+
+
+find_ancestor_rev_pos({_, []}, _AttsSinceRevs) ->
+    0;
+find_ancestor_rev_pos(_DocRevs, []) ->
+    0;
+find_ancestor_rev_pos({RevPos, [RevId|Rest]}, AttsSinceRevs) ->
+    case lists:member({RevPos, RevId}, AttsSinceRevs) of
+    true ->
+        RevPos;
+    false ->
+        find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs)
+    end.
+
+open_doc_revs(Db, Id, Revs, Options) ->
+    increment_stat(Db, {couchdb, database_reads}),
+    [{ok, Results}] = open_doc_revs_int(Db, [{Id, Revs}], Options),
+    {ok, [apply_open_options(Result, Options) || Result <- Results]}.
+
+% Each returned result is a list of tuples:
+% {Id, MissingRevs, PossibleAncestors}
+% if no revs are missing, it's omitted from the results.
+get_missing_revs(Db, IdRevsList) ->
+    Results = get_full_doc_infos(Db, [Id1 || {Id1, _Revs} <- IdRevsList]),
+    {ok, find_missing(IdRevsList, Results)}.
+
+find_missing([], []) ->
+    [];
+find_missing([{Id, Revs}|RestIdRevs], [{ok, FullInfo} | RestLookupInfo]) ->
+    case couch_key_tree:find_missing(FullInfo#full_doc_info.rev_tree, Revs) of
+    [] ->
+        find_missing(RestIdRevs, RestLookupInfo);
+    MissingRevs ->
+        #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo),
+        LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo],
+        % Find the revs that are possible parents of this rev
+        PossibleAncestors =
+        lists:foldl(fun({LeafPos, LeafRevId}, Acc) ->
+            % this leaf is a "possible ancenstor" of the missing
+            % revs if this LeafPos lessthan any of the missing revs
+            case lists:any(fun({MissingPos, _}) ->
+                    LeafPos < MissingPos end, MissingRevs) of
+            true ->
+                [{LeafPos, LeafRevId} | Acc];
+            false ->
+                Acc
+            end
+        end, [], LeafRevs),
+        [{Id, MissingRevs, PossibleAncestors} |
+                find_missing(RestIdRevs, RestLookupInfo)]
+    end;
+find_missing([{Id, Revs}|RestIdRevs], [not_found | RestLookupInfo]) ->
+    [{Id, Revs, []} | find_missing(RestIdRevs, RestLookupInfo)].
+
+get_doc_info(Db, Id) ->
+    case get_full_doc_info(Db, Id) of
+    {ok, DocInfo} ->
+        {ok, couch_doc:to_doc_info(DocInfo)};
+    Else ->
+        Else
+    end.
+
+%   returns {ok, DocInfo} or not_found
+get_full_doc_info(Db, Id) ->
+    [Result] = get_full_doc_infos(Db, [Id]),
+    Result.
+
+get_full_doc_infos(Db, Ids) ->
+    couch_btree:lookup(by_id_btree(Db), Ids).
+
+increment_update_seq(#db{update_pid=UpdatePid}) ->
+    gen_server:call(UpdatePid, increment_update_seq).
+
+purge_docs(#db{update_pid=UpdatePid}, IdsRevs) ->
+    gen_server:call(UpdatePid, {purge_docs, IdsRevs}).
+
+get_committed_update_seq(#db{committed_update_seq=Seq}) ->
+    Seq.
+
+get_update_seq(#db{update_seq=Seq})->
+    Seq.
+
+get_purge_seq(#db{header=#db_header{purge_seq=PurgeSeq}})->
+    PurgeSeq.
+
+get_last_purged(#db{header=#db_header{purged_docs=nil}}) ->
+    {ok, []};
+get_last_purged(#db{fd=Fd, header=#db_header{purged_docs=PurgedPointer}}) ->
+    couch_file:pread_term(Fd, PurgedPointer).
+
+get_db_info(Db) ->
+    #db{fd=Fd,
+        header=#db_header{disk_version=DiskVersion},
+        compactor_pid=Compactor,
+        update_seq=SeqNum,
+        name=Name,
+        instance_start_time=StartTime,
+        committed_update_seq=CommittedUpdateSeq,
+        fulldocinfo_by_id_btree = IdBtree,
+        docinfo_by_seq_btree = SeqBtree,
+        local_docs_btree = LocalBtree
+    } = Db,
+    {ok, Size} = couch_file:bytes(Fd),
+    {ok, DbReduction} = couch_btree:full_reduce(by_id_btree(Db)),
+    InfoList = [
+        {db_name, Name},
+        {doc_count, element(1, DbReduction)},
+        {doc_del_count, element(2, DbReduction)},
+        {update_seq, SeqNum},
+        {purge_seq, couch_db:get_purge_seq(Db)},
+        {compact_running, Compactor/=nil},
+        {disk_size, Size},
+        {data_size, db_data_size(DbReduction, [SeqBtree, IdBtree, LocalBtree])},
+        {instance_start_time, StartTime},
+        {disk_format_version, DiskVersion},
+        {committed_update_seq, CommittedUpdateSeq}
+        ],
+    {ok, InfoList}.
+
+db_data_size({_Count, _DelCount}, _Trees) ->
+    % pre 1.2 format, upgraded on compaction
+    null;
+db_data_size({_Count, _DelCount, nil}, _Trees) ->
+    null;
+db_data_size({_Count, _DelCount, DocAndAttsSize}, Trees) ->
+    sum_tree_sizes(DocAndAttsSize, Trees).
+
+sum_tree_sizes(Acc, []) ->
+    Acc;
+sum_tree_sizes(Acc, [T | Rest]) ->
+    case couch_btree:size(T) of
+    nil ->
+        null;
+    Sz ->
+        sum_tree_sizes(Acc + Sz, Rest)
+    end.
+
+get_design_docs(Db) ->
+    FoldFun = skip_deleted(fun
+        (#full_doc_info{deleted = true}, _Reds, Acc) ->
+            {ok, Acc};
+        (#full_doc_info{id= <<"_design/",_/binary>>}=FullDocInfo, _Reds, Acc) ->
+            {ok, [FullDocInfo | Acc]};
+        (_, _Reds, Acc) ->
+            {stop, Acc}
+    end),
+    KeyOpts = [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}],
+    {ok, _, Docs} = couch_btree:fold(by_id_btree(Db), FoldFun, [], KeyOpts),
+    Docs.
+
+check_is_admin(#db{user_ctx=#user_ctx{name=Name,roles=Roles}}=Db) ->
+    {Admins} = get_admins(Db),
+    AdminRoles = [<<"_admin">> | couch_util:get_value(<<"roles">>, Admins, [])],
+    AdminNames = couch_util:get_value(<<"names">>, Admins,[]),
+    case AdminRoles -- Roles of
+    AdminRoles -> % same list, not an admin role
+        case AdminNames -- [Name] of
+        AdminNames -> % same names, not an admin
+            throw({unauthorized, <<"You are not a db or server admin.">>});
+        _ ->
+            ok
+        end;
+    _ ->
+        ok
+    end.
+
+check_is_member(#db{user_ctx=#user_ctx{name=Name,roles=Roles}=UserCtx}=Db) ->
+    case (catch check_is_admin(Db)) of
+    ok -> ok;
+    _ ->
+        {Members} = get_members(Db),
+        ReaderRoles = couch_util:get_value(<<"roles">>, Members,[]),
+        WithAdminRoles = [<<"_admin">> | ReaderRoles],
+        ReaderNames = couch_util:get_value(<<"names">>, Members,[]),
+        case ReaderRoles ++ ReaderNames of
+        [] -> ok; % no readers == public access
+        _Else ->
+            case WithAdminRoles -- Roles of
+            WithAdminRoles -> % same list, not an reader role
+                case ReaderNames -- [Name] of
+                ReaderNames -> % same names, not a reader
+                    ?LOG_DEBUG("Not a reader: UserCtx ~p vs Names ~p Roles ~p",[UserCtx, ReaderNames, WithAdminRoles]),
+                    throw({unauthorized, <<"You are not authorized to access this db.">>});
+                _ ->
+                    ok
+                end;
+            _ ->
+                ok
+            end
+        end
+    end.
+
+get_admins(#db{security=SecProps}) ->
+    couch_util:get_value(<<"admins">>, SecProps, {[]}).
+
+get_members(#db{security=SecProps}) ->
+    % we fallback to readers here for backwards compatibility
+    couch_util:get_value(<<"members">>, SecProps,
+        couch_util:get_value(<<"readers">>, SecProps, {[]})).
+
+get_security(#db{security=SecProps}) ->
+    {SecProps}.
+
+set_security(#db{update_pid=Pid}=Db, {NewSecProps}) when is_list(NewSecProps) ->
+    check_is_admin(Db),
+    ok = validate_security_object(NewSecProps),
+    ok = gen_server:call(Pid, {set_security, NewSecProps}, infinity),
+    {ok, _} = ensure_full_commit(Db),
+    ok;
+set_security(_, _) ->
+    throw(bad_request).
+
+validate_security_object(SecProps) ->
+    Admins = couch_util:get_value(<<"admins">>, SecProps, {[]}),
+    % we fallback to readers here for backwards compatibility
+    Members = couch_util:get_value(<<"members">>, SecProps,
+        couch_util:get_value(<<"readers">>, SecProps, {[]})),
+    ok = validate_names_and_roles(Admins),
+    ok = validate_names_and_roles(Members),
+    ok.
+
+% validate user input
+validate_names_and_roles({Props}) when is_list(Props) ->
+    case couch_util:get_value(<<"names">>,Props,[]) of
+    Ns when is_list(Ns) ->
+            [throw("names must be a JSON list of strings") ||N <- Ns, not is_binary(N)],
+            Ns;
+    _ -> throw("names must be a JSON list of strings")
+    end,
+    case couch_util:get_value(<<"roles">>,Props,[]) of
+    Rs when is_list(Rs) ->
+        [throw("roles must be a JSON list of strings") ||R <- Rs, not is_binary(R)],
+        Rs;
+    _ -> throw("roles must be a JSON list of strings")
+    end,
+    ok.
+
+get_revs_limit(#db{revs_limit=Limit}) ->
+    Limit.
+
+set_revs_limit(#db{update_pid=Pid}=Db, Limit) when Limit > 0 ->
+    check_is_admin(Db),
+    gen_server:call(Pid, {set_revs_limit, Limit}, infinity);
+set_revs_limit(_Db, _Limit) ->
+    throw(invalid_revs_limit).
+
+name(#db{name=Name}) ->
+    Name.
+
+compression(#db{compression=Compression}) ->
+    Compression.
+
+update_doc(Db, Doc, Options) ->
+    update_doc(Db, Doc, Options, interactive_edit).
+
+update_doc(Db, Doc, Options, UpdateType) ->
+    case update_docs(Db, [Doc], Options, UpdateType) of
+    {ok, [{ok, NewRev}]} ->
+        {ok, NewRev};
+    {ok, [{{_Id, _Rev}, Error}]} ->
+        throw(Error);
+    {ok, [Error]} ->
+        throw(Error);
+    {ok, []} ->
+        % replication success
+        {Pos, [RevId | _]} = Doc#doc.revs,
+        {ok, {Pos, RevId}}
+    end.
+
+update_docs(Db, Docs) ->
+    update_docs(Db, Docs, []).
+
+% group_alike_docs groups the sorted documents into sublist buckets, by id.
+% ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]]
+group_alike_docs(Docs) ->
+    Sorted = lists:sort(fun({#doc{id=A},_},{#doc{id=B},_})-> A < B end, Docs),
+    group_alike_docs(Sorted, []).
+
+group_alike_docs([], Buckets) ->
+    lists:reverse(lists:map(fun lists:reverse/1, Buckets));
+group_alike_docs([Doc|Rest], []) ->
+    group_alike_docs(Rest, [[Doc]]);
+group_alike_docs([{Doc,Ref}|Rest], [Bucket|RestBuckets]) ->
+    [{#doc{id=BucketId},_Ref}|_] = Bucket,
+    case Doc#doc.id == BucketId of
+    true ->
+        % add to existing bucket
+        group_alike_docs(Rest, [[{Doc,Ref}|Bucket]|RestBuckets]);
+    false ->
+        % add to new bucket
+       group_alike_docs(Rest, [[{Doc,Ref}]|[Bucket|RestBuckets]])
+    end.
+
+validate_doc_update(#db{}=Db, #doc{id= <<"_design/",_/binary>>}, _GetDiskDocFun) ->
+    catch check_is_admin(Db);
+validate_doc_update(#db{validate_doc_funs=[]}, _Doc, _GetDiskDocFun) ->
+    ok;
+validate_doc_update(_Db, #doc{id= <<"_local/",_/binary>>}, _GetDiskDocFun) ->
+    ok;
+validate_doc_update(Db, Doc, GetDiskDocFun) ->
+    DiskDoc = GetDiskDocFun(),
+    JsonCtx = couch_util:json_user_ctx(Db),
+    SecObj = get_security(Db),
+    try [case Fun(Doc, DiskDoc, JsonCtx, SecObj) of
+            ok -> ok;
+            Error -> throw(Error)
+        end || Fun <- Db#db.validate_doc_funs],
+        ok
+    catch
+        throw:Error ->
+            Error
+    end.
+
+
+prep_and_validate_update(Db, #doc{id=Id,revs={RevStart, Revs}}=Doc,
+        OldFullDocInfo, LeafRevsDict, AllowConflict) ->
+    case Revs of
+    [PrevRev|_] ->
+        case dict:find({RevStart, PrevRev}, LeafRevsDict) of
+        {ok, {Deleted, DiskSp, DiskRevs}} ->
+            case couch_doc:has_stubs(Doc) of
+            true ->
+                DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs),
+                Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
+                {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2};
+            false ->
+                LoadDiskDoc = fun() -> make_doc(Db,Id,Deleted,DiskSp,DiskRevs) end,
+                {validate_doc_update(Db, Doc, LoadDiskDoc), Doc}
+            end;
+        error when AllowConflict ->
+            couch_doc:merge_stubs(Doc, #doc{}), % will generate error if
+                                                        % there are stubs
+            {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
+        error ->
+            {conflict, Doc}
+        end;
+    [] ->
+        % new doc, and we have existing revs.
+        % reuse existing deleted doc
+        if OldFullDocInfo#full_doc_info.deleted orelse AllowConflict ->
+            {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
+        true ->
+            {conflict, Doc}
+        end
+    end.
+
+
+
+prep_and_validate_updates(_Db, [], [], _AllowConflict, AccPrepped,
+        AccFatalErrors) ->
+   {AccPrepped, AccFatalErrors};
+prep_and_validate_updates(Db, [DocBucket|RestBuckets], [not_found|RestLookups],
+        AllowConflict, AccPrepped, AccErrors) ->
+    {PreppedBucket, AccErrors3} = lists:foldl(
+        fun({#doc{revs=Revs}=Doc,Ref}, {AccBucket, AccErrors2}) ->
+            case couch_doc:has_stubs(Doc) of
+            true ->
+                couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
+            false -> ok
+            end,
+            case Revs of
+            {0, []} ->
+                case validate_doc_update(Db, Doc, fun() -> nil end) of
+                ok ->
+                    {[{Doc, Ref} | AccBucket], AccErrors2};
+                Error ->
+                    {AccBucket, [{Ref, Error} | AccErrors2]}
+                end;
+            _ ->
+                % old revs specified but none exist, a conflict
+                {AccBucket, [{Ref, conflict} | AccErrors2]}
+            end
+        end,
+        {[], AccErrors}, DocBucket),
+
+    prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
+            [lists:reverse(PreppedBucket) | AccPrepped], AccErrors3);
+prep_and_validate_updates(Db, [DocBucket|RestBuckets],
+        [{ok, #full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo}|RestLookups],
+        AllowConflict, AccPrepped, AccErrors) ->
+    Leafs = couch_key_tree:get_all_leafs(OldRevTree),
+    LeafRevsDict = dict:from_list([
+        begin
+            Deleted = element(1, LeafVal),
+            Sp = element(2, LeafVal),
+            {{Start, RevId}, {Deleted, Sp, Revs}}
+        end ||
+        {LeafVal, {Start, [RevId | _]} = Revs} <- Leafs
+    ]),
+    {PreppedBucket, AccErrors3} = lists:foldl(
+        fun({Doc, Ref}, {Docs2Acc, AccErrors2}) ->
+            case prep_and_validate_update(Db, Doc, OldFullDocInfo,
+                    LeafRevsDict, AllowConflict) of
+            {ok, Doc2} ->
+                {[{Doc2, Ref} | Docs2Acc], AccErrors2};
+            {Error, #doc{}} ->
+                % Record the error
+                {Docs2Acc, [{Ref, Error} |AccErrors2]}
+            end
+        end,
+        {[], AccErrors}, DocBucket),
+    prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
+            [PreppedBucket | AccPrepped], AccErrors3).
+
+
+update_docs(Db, Docs, Options) ->
+    update_docs(Db, Docs, Options, interactive_edit).
+
+
+prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) ->
+    Errors2 = [{{Id, {Pos, Rev}}, Error} ||
+            {#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors],
+    {lists:reverse(AccPrepped), lists:reverse(Errors2)};
+prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) ->
+    case OldInfo of
+    not_found ->
+        {ValidatedBucket, AccErrors3} = lists:foldl(
+            fun({Doc, Ref}, {AccPrepped2, AccErrors2}) ->
+                case couch_doc:has_stubs(Doc) of
+                true ->
+                    couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
+                false -> ok
+                end,
+                case validate_doc_update(Db, Doc, fun() -> nil end) of
+                ok ->
+                    {[{Doc, Ref} | AccPrepped2], AccErrors2};
+                Error ->
+                    {AccPrepped2, [{Doc, Error} | AccErrors2]}
+                end
+            end,
+            {[], AccErrors}, Bucket),
+        prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3);
+    {ok, #full_doc_info{rev_tree=OldTree}} ->
+        NewRevTree = lists:foldl(
+            fun({NewDoc, _Ref}, AccTree) ->
+                {NewTree, _} = couch_key_tree:merge(AccTree,
+                    couch_doc:to_path(NewDoc), Db#db.revs_limit),
+                NewTree
+            end,
+            OldTree, Bucket),
+        Leafs = couch_key_tree:get_all_leafs_full(NewRevTree),
+        LeafRevsFullDict = dict:from_list( [{{Start, RevId}, FullPath} || {Start, [{RevId, _}|_]}=FullPath <- Leafs]),
+        {ValidatedBucket, AccErrors3} =
+        lists:foldl(
+            fun({#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, Ref}, {AccValidated, AccErrors2}) ->
+                case dict:find({Pos, RevId}, LeafRevsFullDict) of
+                {ok, {Start, Path}} ->
+                    % our unflushed doc is a leaf node. Go back on the path
+                    % to find the previous rev that's on disk.
+
+                    LoadPrevRevFun = fun() ->
+                                make_first_doc_on_disk(Db,Id,Start-1, tl(Path))
+                            end,
+
+                    case couch_doc:has_stubs(Doc) of
+                    true ->
+                        DiskDoc = LoadPrevRevFun(),
+                        Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
+                        GetDiskDocFun = fun() -> DiskDoc end;
+                    false ->
+                        Doc2 = Doc,
+                        GetDiskDocFun = LoadPrevRevFun
+                    end,
+
+                    case validate_doc_update(Db, Doc2, GetDiskDocFun) of
+                    ok ->
+                        {[{Doc2, Ref} | AccValidated], AccErrors2};
+                    Error ->
+                        {AccValidated, [{Doc, Error} | AccErrors2]}
+                    end;
+                _ ->
+                    % this doc isn't a leaf or already exists in the tree.
+                    % ignore but consider it a success.
+                    {AccValidated, AccErrors2}
+                end
+            end,
+            {[], AccErrors}, Bucket),
+        prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo,
+                [ValidatedBucket | AccPrepped], AccErrors3)
+    end.
+
+
+
+new_revid(#doc{body=Body,revs={OldStart,OldRevs},
+        atts=Atts,deleted=Deleted}) ->
+    case [{N, T, M} || #att{name=N,type=T,md5=M} <- Atts, M =/= <<>>] of
+    Atts2 when length(Atts) =/= length(Atts2) ->
+        % We must have old style non-md5 attachments
+        ?l2b(integer_to_list(couch_util:rand32()));
+    Atts2 ->
+        OldRev = case OldRevs of [] -> 0; [OldRev0|_] -> OldRev0 end,
+        couch_util:md5(term_to_binary([Deleted, OldStart, OldRev, Body, Atts2]))
+    end.
+
+new_revs([], OutBuckets, IdRevsAcc) ->
+    {lists:reverse(OutBuckets), IdRevsAcc};
+new_revs([Bucket|RestBuckets], OutBuckets, IdRevsAcc) ->
+    {NewBucket, IdRevsAcc3} = lists:mapfoldl(
+        fun({#doc{revs={Start, RevIds}}=Doc, Ref}, IdRevsAcc2)->
+        NewRevId = new_revid(Doc),
+        {{Doc#doc{revs={Start+1, [NewRevId | RevIds]}}, Ref},
+            [{Ref, {ok, {Start+1, NewRevId}}} | IdRevsAcc2]}
+    end, IdRevsAcc, Bucket),
+    new_revs(RestBuckets, [NewBucket|OutBuckets], IdRevsAcc3).
+
+check_dup_atts(#doc{atts=Atts}=Doc) ->
+    Atts2 = lists:sort(fun(#att{name=N1}, #att{name=N2}) -> N1 < N2 end, Atts),
+    check_dup_atts2(Atts2),
+    Doc.
+
+check_dup_atts2([#att{name=N}, #att{name=N} | _]) ->
+    throw({bad_request, <<"Duplicate attachments">>});
+check_dup_atts2([_ | Rest]) ->
+    check_dup_atts2(Rest);
+check_dup_atts2(_) ->
+    ok.
+
+
+update_docs(Db, Docs, Options, replicated_changes) ->
+    increment_stat(Db, {couchdb, database_writes}),
+    % associate reference with each doc in order to track duplicates
+    Docs2 = lists:map(fun(Doc) -> {Doc, make_ref()} end, Docs),
+    DocBuckets = before_docs_update(Db, group_alike_docs(Docs2)),
+    case (Db#db.validate_doc_funs /= []) orelse
+        lists:any(
+            fun({#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}, _Ref}) -> true;
+            ({#doc{atts=Atts}, _Ref}) ->
+                Atts /= []
+            end, Docs2) of
+    true ->
+        Ids = [Id || [{#doc{id=Id}, _Ref}|_] <- DocBuckets],
+        ExistingDocs = get_full_doc_infos(Db, Ids),
+
+        {DocBuckets2, DocErrors} =
+                prep_and_validate_replicated_updates(Db, DocBuckets, ExistingDocs, [], []),
+        DocBuckets3 = [Bucket || [_|_]=Bucket <- DocBuckets2]; % remove empty buckets
+    false ->
+        DocErrors = [],
+        DocBuckets3 = DocBuckets
+    end,
+    DocBuckets4 = [[{doc_flush_atts(check_dup_atts(Doc), Db#db.updater_fd), Ref}
+            || {Doc, Ref} <- Bucket] || Bucket <- DocBuckets3],
+    {ok, []} = write_and_commit(Db, DocBuckets4, [], [merge_conflicts | Options]),
+    {ok, DocErrors};
+
+update_docs(Db, Docs, Options, interactive_edit) ->
+    increment_stat(Db, {couchdb, database_writes}),
+    AllOrNothing = lists:member(all_or_nothing, Options),
+    % go ahead and generate the new revision ids for the documents.
+    % separate out the NonRep documents from the rest of the documents
+
+    % associate reference with each doc in order to track duplicates
+    Docs2 = lists:map(fun(Doc) -> {Doc, make_ref()} end,Docs),
+    {Docs3, NonRepDocs} = lists:foldl(
+         fun({#doc{id=Id},_Ref}=Doc, {DocsAcc, NonRepDocsAcc}) ->
+            case Id of
+            <<?LOCAL_DOC_PREFIX, _/binary>> ->
+                {DocsAcc, [Doc | NonRepDocsAcc]};
+            Id->
+                {[Doc | DocsAcc], NonRepDocsAcc}
+            end
+        end, {[], []}, Docs2),
+
+    DocBuckets = before_docs_update(Db, group_alike_docs(Docs3)),
+
+    case (Db#db.validate_doc_funs /= []) orelse
+        lists:any(
+            fun({#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}, _Ref}) ->
+                true;
+            ({#doc{atts=Atts}, _Ref}) ->
+                Atts /= []
+            end, Docs3) of
+    true ->
+        % lookup the doc by id and get the most recent
+        Ids = [Id || [{#doc{id=Id}, _Ref}|_] <- DocBuckets],
+        ExistingDocInfos = get_full_doc_infos(Db, Ids),
+
+        {DocBucketsPrepped, PreCommitFailures} = prep_and_validate_updates(Db,
+                DocBuckets, ExistingDocInfos, AllOrNothing, [], []),
+
+        % strip out any empty buckets
+        DocBuckets2 = [Bucket || [_|_] = Bucket <- DocBucketsPrepped];
+    false ->
+        PreCommitFailures = [],
+        DocBuckets2 = DocBuckets
+    end,
+
+    if (AllOrNothing) and (PreCommitFailures /= []) ->
+        {aborted,
+         lists:foldl(fun({#doc{id=Id,revs=Revs}, Ref},Acc) ->
+                         case lists:keyfind(Ref,1,PreCommitFailures) of
+                         {Ref, Error} ->
+                             case Revs of
+                             {Pos, [RevId|_]} ->
+                                 [{{Id,{Pos, RevId}}, Error} | Acc];
+                             {0, []} ->
+                                 [{{Id,{0, <<>>}}, Error} | Acc]
+                             end;
+                         false ->
+                             Acc
+                         end
+                     end,[],Docs3)};
+
+    true ->
+        Options2 = if AllOrNothing -> [merge_conflicts];
+                true -> [] end ++ Options,
+        DocBuckets3 = [[
+                {doc_flush_atts(set_new_att_revpos(
+                        check_dup_atts(Doc)), Db#db.updater_fd), Ref}
+                || {Doc, Ref} <- B] || B <- DocBuckets2],
+        {DocBuckets4, IdRevs} = new_revs(DocBuckets3, [], []),
+
+        {ok, CommitResults} = write_and_commit(Db, DocBuckets4, NonRepDocs, Options2),
+
+        ResultsDict = dict:from_list(IdRevs ++ CommitResults ++ PreCommitFailures),
+        {ok, lists:map(
+            fun({#doc{}, Ref}) ->
+                {ok, Result} = dict:find(Ref, ResultsDict),
+                Result
+            end, Docs2)}
+    end.
+
+% Returns the first available document on disk. Input list is a full rev path
+% for the doc.
+make_first_doc_on_disk(_Db, _Id, _Pos, []) ->
+    nil;
+make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #doc{}} | RestPath]) ->
+    make_first_doc_on_disk(Db, Id, Pos-1, RestPath);
+make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING}|RestPath]) ->
+    make_first_doc_on_disk(Db, Id, Pos - 1, RestPath);
+make_first_doc_on_disk(Db, Id, Pos, [{_Rev, RevValue} |_]=DocPath) ->
+    IsDel = element(1, RevValue),
+    Sp = element(2, RevValue),
+    Revs = [Rev || {Rev, _} <- DocPath],
+    make_doc(Db, Id, IsDel, Sp, {Pos, Revs}).
+
+set_commit_option(Options) ->
+    CommitSettings = {
+        [true || O <- Options, O==full_commit orelse O==delay_commit],
+        couch_config:get("couchdb", "delayed_commits", "false")
+    },
+    case CommitSettings of
+    {[true], _} ->
+        Options; % user requested explicit commit setting, do not change it
+    {_, "true"} ->
+        Options; % delayed commits are enabled, do nothing
+    {_, "false"} ->
+        [full_commit|Options];
+    {_, Else} ->
+        ?LOG_ERROR("[couchdb] delayed_commits setting must be true/false, not ~p",
+            [Else]),
+        [full_commit|Options]
+    end.
+
+collect_results(UpdatePid, MRef, ResultsAcc) ->
+    receive
+    {result, UpdatePid, Result} ->
+        collect_results(UpdatePid, MRef, [Result | ResultsAcc]);
+    {done, UpdatePid} ->
+        {ok, ResultsAcc};
+    {retry, UpdatePid} ->
+        retry;
+    {'DOWN', MRef, _, _, Reason} ->
+        exit(Reason)
+    end.
+
+write_and_commit(#db{update_pid=UpdatePid}=Db, DocBuckets1,
+        NonRepDocs, Options0) ->
+    DocBuckets = prepare_doc_summaries(Db, DocBuckets1),
+    Options = set_commit_option(Options0),
+    MergeConflicts = lists:member(merge_conflicts, Options),
+    FullCommit = lists:member(full_commit, Options),
+    MRef = erlang:monitor(process, UpdatePid),
+    try
+        UpdatePid ! {update_docs, self(), DocBuckets, NonRepDocs, MergeConflicts, FullCommit},
+        case collect_results(UpdatePid, MRef, []) of
+        {ok, Results} -> {ok, Results};
+        retry ->
+            % This can happen if the db file we wrote to was swapped out by
+            % compaction. Retry by reopening the db and writing to the current file
+            {ok, Db2} = open_ref_counted(Db#db.main_pid, self()),
+            DocBuckets2 = [
+                [{doc_flush_atts(Doc, Db2#db.updater_fd), Ref} || {Doc, Ref} <- Bucket] ||
+                Bucket <- DocBuckets1
+            ],
+            % We only retry once
+            DocBuckets3 = prepare_doc_summaries(Db2, DocBuckets2),
+            close(Db2),
+            UpdatePid ! {update_docs, self(), DocBuckets3, NonRepDocs, MergeConflicts, FullCommit},
+            case collect_results(UpdatePid, MRef, []) of
+            {ok, Results} -> {ok, Results};
+            retry -> throw({update_error, compaction_retry})
+            end
+        end
+    after
+        erlang:demonitor(MRef, [flush])
+    end.
+
+
+prepare_doc_summaries(Db, BucketList) ->
+    [lists:map(
+        fun({#doc{body = Body, atts = Atts} = Doc, Ref}) ->
+            DiskAtts = [{N, T, P, AL, DL, R, M, E} ||
+                #att{name = N, type = T, data = {_, P}, md5 = M, revpos = R,
+                    att_len = AL, disk_len = DL, encoding = E} <- Atts],
+            AttsFd = case Atts of
+            [#att{data = {Fd, _}} | _] ->
+                Fd;
+            [] ->
+                nil
+            end,
+            SummaryChunk = couch_db_updater:make_doc_summary(Db, {Body, DiskAtts}),
+            {Doc#doc{body = {summary, SummaryChunk, AttsFd}}, Ref}
+        end,
+        Bucket) || Bucket <- BucketList].
+
+
+before_docs_update(#db{before_doc_update = nil}, BucketList) ->
+    BucketList;
+before_docs_update(#db{before_doc_update = Fun} = Db, BucketList) ->
+    [lists:map(
+        fun({Doc, Ref}) ->
+            NewDoc = Fun(couch_doc:with_ejson_body(Doc), Db),
+            {NewDoc, Ref}
+        end,
+        Bucket) || Bucket <- BucketList].
+
+
+set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts}=Doc) ->
+    Doc#doc{atts= lists:map(fun(#att{data={_Fd,_Sp}}=Att) ->
+            % already commited to disk, do not set new rev
+            Att;
+        (Att) ->
+            Att#att{revpos=RevPos+1}
+        end, Atts)}.
+
+
+doc_flush_atts(Doc, Fd) ->
+    Doc#doc{atts=[flush_att(Fd, Att) || Att <- Doc#doc.atts]}.
+
+check_md5(_NewSig, <<>>) -> ok;
+check_md5(Sig, Sig) -> ok;
+check_md5(_, _) -> throw(md5_mismatch).
+
+flush_att(Fd, #att{data={Fd0, _}}=Att) when Fd0 == Fd ->
+    % already written to our file, nothing to write
+    Att;
+
+flush_att(Fd, #att{data={OtherFd,StreamPointer}, md5=InMd5,
+    disk_len=InDiskLen} = Att) ->
+    {NewStreamData, Len, _IdentityLen, Md5, IdentityMd5} =
+            couch_stream:copy_to_new_stream(OtherFd, StreamPointer, Fd),
+    check_md5(IdentityMd5, InMd5),
+    Att#att{data={Fd, NewStreamData}, md5=Md5, att_len=Len, disk_len=InDiskLen};
+
+flush_att(Fd, #att{data=Data}=Att) when is_binary(Data) ->
+    with_stream(Fd, Att, fun(OutputStream) ->
+        couch_stream:write(OutputStream, Data)
+    end);
+
+flush_att(Fd, #att{data=Fun,att_len=undefined}=Att) when is_function(Fun) ->
+    MaxChunkSize = list_to_integer(
+        couch_config:get("couchdb", "attachment_stream_buffer_size", "4096")),
+    with_stream(Fd, Att, fun(OutputStream) ->
+        % Fun(MaxChunkSize, WriterFun) must call WriterFun
+        % once for each chunk of the attachment,
+        Fun(MaxChunkSize,
+            % WriterFun({Length, Binary}, State)
+            % WriterFun({0, _Footers}, State)
+            % Called with Length == 0 on the last time.
+            % WriterFun returns NewState.
+            fun({0, Footers}, _) ->
+                F = mochiweb_headers:from_binary(Footers),
+                case mochiweb_headers:get_value("Content-MD5", F) of
+                undefined ->
+                    ok;
+                Md5 ->
+                    {md5, base64:decode(Md5)}
+                end;
+            ({_Length, Chunk}, _) ->
+                couch_stream:write(OutputStream, Chunk)
+            end, ok)
+    end);
+
+flush_att(Fd, #att{data=Fun,att_len=AttLen}=Att) when is_function(Fun) ->
+    with_stream(Fd, Att, fun(OutputStream) ->
+        write_streamed_attachment(OutputStream, Fun, AttLen)
+    end).
+
+
+compressible_att_type(MimeType) when is_binary(MimeType) ->
+    compressible_att_type(?b2l(MimeType));
+compressible_att_type(MimeType) ->
+    TypeExpList = re:split(
+        couch_config:get("attachments", "compressible_types", ""),
+        "\\s*,\\s*",
+        [{return, list}]
+    ),
+    lists:any(
+        fun(TypeExp) ->
+            Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
+                "(?:\\s*;.*?)?\\s*", $$],
+            re:run(MimeType, Regexp, [caseless]) =/= nomatch
+        end,
+        [T || T <- TypeExpList, T /= []]
+    ).
+
+% From RFC 2616 3.6.1 - Chunked Transfer Coding
+%
+%   In other words, the origin server is willing to accept
+%   the possibility that the trailer fields might be silently
+%   discarded along the path to the client.
+%
+% I take this to mean that if "Trailers: Content-MD5\r\n"
+% is present in the request, but there is no Content-MD5
+% trailer, we're free to ignore this inconsistency and
+% pretend that no Content-MD5 exists.
+with_stream(Fd, #att{md5=InMd5,type=Type,encoding=Enc}=Att, Fun) ->
+    BufferSize = list_to_integer(
+        couch_config:get("couchdb", "attachment_stream_buffer_size", "4096")),
+    {ok, OutputStream} = case (Enc =:= identity) andalso
+        compressible_att_type(Type) of
+    true ->
+        CompLevel = list_to_integer(
+            couch_config:get("attachments", "compression_level", "0")
+        ),
+        couch_stream:open(Fd, [{buffer_size, BufferSize},
+            {encoding, gzip}, {compression_level, CompLevel}]);
+    _ ->
+        couch_stream:open(Fd, [{buffer_size, BufferSize}])
+    end,
+    ReqMd5 = case Fun(OutputStream) of
+        {md5, FooterMd5} ->
+            case InMd5 of
+                md5_in_footer -> FooterMd5;
+                _ -> InMd5
+            end;
+        _ ->
+            InMd5
+    end,
+    {StreamInfo, Len, IdentityLen, Md5, IdentityMd5} =
+        couch_stream:close(OutputStream),
+    check_md5(IdentityMd5, ReqMd5),
+    {AttLen, DiskLen, NewEnc} = case Enc of
+    identity ->
+        case {Md5, IdentityMd5} of
+        {Same, Same} ->
+            {Len, IdentityLen, identity};
+        _ ->
+            {Len, IdentityLen, gzip}
+        end;
+    gzip ->
+        case {Att#att.att_len, Att#att.disk_len} of
+        {AL, DL} when AL =:= undefined orelse DL =:= undefined ->
+            % Compressed attachment uploaded through the standalone API.
+            {Len, Len, gzip};
+        {AL, DL} ->
+            % This case is used for efficient push-replication, where a
+            % compressed attachment is located in the body of multipart
+            % content-type request.
+            {AL, DL, gzip}
+        end
+    end,
+    Att#att{
+        data={Fd,StreamInfo},
+        att_len=AttLen,
+        disk_len=DiskLen,
+        md5=Md5,
+        encoding=NewEnc
+    }.
+
+
+write_streamed_attachment(_Stream, _F, 0) ->
+    ok;
+write_streamed_attachment(Stream, F, LenLeft) when LenLeft > 0 ->
+    Bin = read_next_chunk(F, LenLeft),
+    ok = couch_stream:write(Stream, Bin),
+    write_streamed_attachment(Stream, F, LenLeft - size(Bin)).
+
+read_next_chunk(F, _) when is_function(F, 0) ->
+    F();
+read_next_chunk(F, LenLeft) when is_function(F, 1) ->
+    F(lists:min([LenLeft, 16#2000])).
+
+enum_docs_since_reduce_to_count(Reds) ->
+    couch_btree:final_reduce(
+            fun couch_db_updater:btree_by_seq_reduce/2, Reds).
+
+enum_docs_reduce_to_count(Reds) ->
+    FinalRed = couch_btree:final_reduce(
+            fun couch_db_updater:btree_by_id_reduce/2, Reds),
+    element(1, FinalRed).
+
+changes_since(Db, StartSeq, Fun, Acc) ->
+    changes_since(Db, StartSeq, Fun, [], Acc).
+
+changes_since(Db, StartSeq, Fun, Options, Acc) ->
+    Wrapper = fun(DocInfo, _Offset, Acc2) -> Fun(DocInfo, Acc2) end,
+    {ok, _LastReduction, AccOut} = couch_btree:fold(by_seq_btree(Db),
+        Wrapper, Acc, [{start_key, StartSeq + 1}] ++ Options),
+    {ok, AccOut}.
+
+count_changes_since(Db, SinceSeq) ->
+    BTree = by_seq_btree(Db),
+    {ok, Changes} =
+    couch_btree:fold_reduce(BTree,
+        fun(_SeqStart, PartialReds, 0) ->
+            {ok, couch_btree:final_reduce(BTree, PartialReds)}
+        end,
+        0, [{start_key, SinceSeq + 1}]),
+    Changes.
+
+enum_docs_since(Db, SinceSeq, InFun, Acc, Options) ->
+    {ok, LastReduction, AccOut} = couch_btree:fold(
+        by_seq_btree(Db), InFun, Acc, [{start_key, SinceSeq + 1} | Options]),
+    {ok, enum_docs_since_reduce_to_count(LastReduction), AccOut}.
+
+enum_docs(Db, InFun, InAcc, Options) ->
+    FoldFun = skip_deleted(InFun),
+    {ok, LastReduce, OutAcc} = couch_btree:fold(
+        by_id_btree(Db), FoldFun, InAcc, Options),
+    {ok, enum_docs_reduce_to_count(LastReduce), OutAcc}.
+
+% server functions
+
+init({DbName, Filepath, Fd, Options}) ->
+    {ok, UpdaterPid} = gen_server:start_link(couch_db_updater, {self(), DbName, Filepath, Fd, Options}, []),
+    {ok, #db{fd_ref_counter=RefCntr}=Db} = gen_server:call(UpdaterPid, get_db),
+    couch_ref_counter:add(RefCntr),
+    case lists:member(sys_db, Options) of
+    true ->
+        ok;
+    false ->
+        couch_stats_collector:track_process_count({couchdb, open_databases})
+    end,
+    process_flag(trap_exit, true),
+    {ok, Db}.
+
+terminate(_Reason, Db) ->
+    couch_util:shutdown_sync(Db#db.update_pid),
+    ok.
+
+handle_call({open_ref_count, OpenerPid}, _, #db{fd_ref_counter=RefCntr}=Db) ->
+    ok = couch_ref_counter:add(RefCntr, OpenerPid),
+    {reply, {ok, Db}, Db};
+handle_call(is_idle, _From, #db{fd_ref_counter=RefCntr, compactor_pid=Compact,
+            waiting_delayed_commit=Delay}=Db) ->
+    % Idle means no referrers. Unless in the middle of a compaction file switch,
+    % there are always at least 2 referrers, couch_db_updater and us.
+    {reply, (Delay == nil) andalso (Compact == nil) andalso (couch_ref_counter:count(RefCntr) == 2), Db};
+handle_call({db_updated, NewDb}, _From, #db{fd_ref_counter=OldRefCntr}) ->
+    #db{fd_ref_counter=NewRefCntr}=NewDb,
+    case NewRefCntr =:= OldRefCntr of
+    true -> ok;
+    false ->
+        couch_ref_counter:add(NewRefCntr),
+        couch_ref_counter:drop(OldRefCntr)
+    end,
+    {reply, ok, NewDb};
+handle_call(get_db, _From, Db) ->
+    {reply, {ok, Db}, Db}.
+
+
+handle_cast(Msg, Db) ->
+    ?LOG_ERROR("Bad cast message received for db ~s: ~p", [Db#db.name, Msg]),
+    exit({error, Msg}).
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+handle_info({'EXIT', _Pid, normal}, Db) ->
+    {noreply, Db};
+handle_info({'EXIT', _Pid, Reason}, Server) ->
+    {stop, Reason, Server};
+handle_info(Msg, Db) ->
+    ?LOG_ERROR("Bad message received for db ~s: ~p", [Db#db.name, Msg]),
+    exit({error, Msg}).
+
+
+%%% Internal function %%%
+open_doc_revs_int(Db, IdRevs, Options) ->
+    Ids = [Id || {Id, _Revs} <- IdRevs],
+    LookupResults = get_full_doc_infos(Db, Ids),
+    lists:zipwith(
+        fun({Id, Revs}, Lookup) ->
+            case Lookup of
+            {ok, #full_doc_info{rev_tree=RevTree}} ->
+                {FoundRevs, MissingRevs} =
+                case Revs of
+                all ->
+                    {couch_key_tree:get_all_leafs(RevTree), []};
+                _ ->
+                    case lists:member(latest, Options) of
+                    true ->
+                        couch_key_tree:get_key_leafs(RevTree, Revs);
+                    false ->
+                        couch_key_tree:get(RevTree, Revs)
+                    end
+                end,
+                FoundResults =
+                lists:map(fun({Value, {Pos, [Rev|_]}=FoundRevPath}) ->
+                    case Value of
+                    ?REV_MISSING ->
+                        % we have the rev in our list but know nothing about it
+                        {{not_found, missing}, {Pos, Rev}};
+                    RevValue ->
+                        IsDeleted = element(1, RevValue),
+                        SummaryPtr = element(2, RevValue),
+                        {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)}
+                    end
+                end, FoundRevs),
+                Results = FoundResults ++ [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs],
+                {ok, Results};
+            not_found when Revs == all ->
+                {ok, []};
+            not_found ->
+                {ok, [{{not_found, missing}, Rev} || Rev <- Revs]}
+            end
+        end,
+        IdRevs, LookupResults).
+
+open_doc_int(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = Id, Options) ->
+    case couch_btree:lookup(local_btree(Db), [Id]) of
+    [{ok, {_, {Rev, BodyData}}}] ->
+        Doc = #doc{id=Id, revs={0, [?l2b(integer_to_list(Rev))]}, body=BodyData},
+        apply_open_options({ok, Doc}, Options);
+    [not_found] ->
+        {not_found, missing}
+    end;
+open_doc_int(Db, #doc_info{id=Id,revs=[RevInfo|_]}=DocInfo, Options) ->
+    #rev_info{deleted=IsDeleted,rev={Pos,RevId},body_sp=Bp} = RevInfo,
+    Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos,[RevId]}),
+    apply_open_options(
+       {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}}, Options);
+open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
+    #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} =
+        DocInfo = couch_doc:to_doc_info(FullDocInfo),
+    {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]),
+    Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath),
+    apply_open_options(
+        {ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}}, Options);
+open_doc_int(Db, Id, Options) ->
+    case get_full_doc_info(Db, Id) of
+    {ok, FullDocInfo} ->
+        open_doc_int(Db, FullDocInfo, Options);
+    not_found ->
+        {not_found, missing}
+    end.
+
+doc_meta_info(#doc_info{high_seq=Seq,revs=[#rev_info{rev=Rev}|RestInfo]}, RevTree, Options) ->
+    case lists:member(revs_info, Options) of
+    false -> [];
+    true ->
+        {[{Pos, RevPath}],[]} =
+            couch_key_tree:get_full_key_paths(RevTree, [Rev]),
+
+        [{revs_info, Pos, lists:map(
+            fun({Rev1, ?REV_MISSING}) ->
+                {Rev1, missing};
+            ({Rev1, RevValue}) ->
+                case element(1, RevValue) of
+                true ->
+                    {Rev1, deleted};
+                false ->
+                    {Rev1, available}
+                end
+            end, RevPath)}]
+    end ++
+    case lists:member(conflicts, Options) of
+    false -> [];
+    true ->
+        case [Rev1 || #rev_info{rev=Rev1,deleted=false} <- RestInfo] of
+        [] -> [];
+        ConflictRevs -> [{conflicts, ConflictRevs}]
+        end
+    end ++
+    case lists:member(deleted_conflicts, Options) of
+    false -> [];
+    true ->
+        case [Rev1 || #rev_info{rev=Rev1,deleted=true} <- RestInfo] of
+        [] -> [];
+        DelConflictRevs -> [{deleted_conflicts, DelConflictRevs}]
+        end
+    end ++
+    case lists:member(local_seq, Options) of
+    false -> [];
+    true -> [{local_seq, Seq}]
+    end.
+
+read_doc(#db{fd=Fd}, Pos) ->
+    couch_file:pread_term(Fd, Pos).
+
+
+make_doc(#db{updater_fd = Fd} = Db, Id, Deleted, Bp, RevisionPath) ->
+    {BodyData, Atts} =
+    case Bp of
+    nil ->
+        {[], []};
+    _ ->
+        {ok, {BodyData0, Atts00}} = read_doc(Db, Bp),
+        Atts0 = case Atts00 of
+        _ when is_binary(Atts00) ->
+            couch_compress:decompress(Atts00);
+        _ when is_list(Atts00) ->
+            % pre 1.2 format
+            Atts00
+        end,
+        {BodyData0,
+            lists:map(
+                fun({Name,Type,Sp,AttLen,DiskLen,RevPos,Md5,Enc}) ->
+                    #att{name=Name,
+                        type=Type,
+                        att_len=AttLen,
+                        disk_len=DiskLen,
+                        md5=Md5,
+                        revpos=RevPos,
+                        data={Fd,Sp},
+                        encoding=
+                            case Enc of
+                            true ->
+                                % 0110 UPGRADE CODE
+                                gzip;
+                            false ->
+                                % 0110 UPGRADE CODE
+                                identity;
+                            _ ->
+                                Enc
+                            end
+                    };
+                ({Name,Type,Sp,AttLen,RevPos,Md5}) ->
+                    #att{name=Name,
+                        type=Type,
+                        att_len=AttLen,
+                        disk_len=AttLen,
+                        md5=Md5,
+                        revpos=RevPos,
+                        data={Fd,Sp}};
+                ({Name,{Type,Sp,AttLen}}) ->
+                    #att{name=Name,
+                        type=Type,
+                        att_len=AttLen,
+                        disk_len=AttLen,
+                        md5= <<>>,
+                        revpos=0,
+                        data={Fd,Sp}}
+                end, Atts0)}
+    end,
+    Doc = #doc{
+        id = Id,
+        revs = RevisionPath,
+        body = BodyData,
+        atts = Atts,
+        deleted = Deleted
+    },
+    after_doc_read(Db, Doc).
+
+
+after_doc_read(#db{after_doc_read = nil}, Doc) ->
+    Doc;
+after_doc_read(#db{after_doc_read = Fun} = Db, Doc) ->
+    Fun(couch_doc:with_ejson_body(Doc), Db).
+
+
+increment_stat(#db{options = Options}, Stat) ->
+    case lists:member(sys_db, Options) of
+    true ->
+        ok;
+    false ->
+        couch_stats_collector:increment(Stat)
+    end.
+
+local_btree(#db{local_docs_btree = BTree, fd = ReaderFd}) ->
+    BTree#btree{fd = ReaderFd}.
+
+by_seq_btree(#db{docinfo_by_seq_btree = BTree, fd = ReaderFd}) ->
+    BTree#btree{fd = ReaderFd}.
+
+by_id_btree(#db{fulldocinfo_by_id_btree = BTree, fd = ReaderFd}) ->
+    BTree#btree{fd = ReaderFd}.
+
+skip_deleted(FoldFun) ->
+    fun
+        (visit, KV, Reds, Acc) ->
+            FoldFun(KV, Reds, Acc);
+        (traverse, _LK, {Undeleted, _Del, _Size}, Acc) when Undeleted == 0 ->
+            {skip, Acc};
+        (traverse, _, _, Acc) ->
+            {ok, Acc}
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_db_update_notifier.erl
----------------------------------------------------------------------
diff --git a/src/couch_db_update_notifier.erl b/src/couch_db_update_notifier.erl
new file mode 100644
index 0000000..bfa770a
--- /dev/null
+++ b/src/couch_db_update_notifier.erl
@@ -0,0 +1,82 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%
+% This causes an OS process to spawned and it is notified every time a database
+% is updated.
+%
+% The notifications are in the form of a the database name sent as a line of
+% text to the OS processes stdout.
+%
+
+-module(couch_db_update_notifier).
+
+-behaviour(gen_event).
+
+-export([start_link/1, notify/1]).
+-export([init/1, terminate/2, handle_event/2, handle_call/2, handle_info/2, code_change/3,stop/1]).
+
+-include("couch_db.hrl").
+
+start_link(Exec) ->
+    couch_event_sup:start_link(couch_db_update, {couch_db_update_notifier, make_ref()}, Exec).
+
+notify(Event) ->
+    gen_event:notify(couch_db_update, Event).
+
+stop(Pid) ->
+    couch_event_sup:stop(Pid).
+
+init(Exec) when is_list(Exec) -> % an exe
+    couch_os_process:start_link(Exec, []);
+init(Else) ->
+    {ok, Else}.
+
+terminate(_Reason, Pid) when is_pid(Pid) ->
+    couch_os_process:stop(Pid),
+    ok;
+terminate(_Reason, _State) ->
+    ok.
+
+handle_event(Event, Fun) when is_function(Fun, 1) ->
+    Fun(Event),
+    {ok, Fun};
+handle_event(Event, {Fun, FunAcc}) ->
+    FunAcc2 = Fun(Event, FunAcc),
+    {ok, {Fun, FunAcc2}};
+handle_event({EventType, EventDesc}, Pid) ->
+    Obj = encode_event(EventType, EventDesc),
+    ok = couch_os_process:send(Pid, Obj),
+    {ok, Pid}.
+
+handle_call(_Request, State) ->
+    {reply, ok, State}.
+
+handle_info({'EXIT', Pid, Reason}, Pid) ->
+    ?LOG_ERROR("Update notification process ~p died: ~p", [Pid, Reason]),
+    remove_handler;
+handle_info({'EXIT', _, _}, Pid) ->
+    %% the db_update event manager traps exits and forwards this message to all
+    %% its handlers. Just ignore as it wasn't our os_process that exited.
+    {ok, Pid}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+encode_event(EventType, EventDesc) when is_atom(EventType) ->
+    encode_event(atom_to_list(EventType), EventDesc);
+encode_event(EventType, EventDesc) when is_list(EventType) ->
+    encode_event(?l2b(EventType), EventDesc);
+encode_event(EventType, {DbName, DocId}) ->
+    {[{type, EventType}, {db, DbName}, {id, DocId}]};
+encode_event(EventType, DbName) ->
+    {[{type, EventType}, {db, DbName}]}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_db_update_notifier_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch_db_update_notifier_sup.erl b/src/couch_db_update_notifier_sup.erl
new file mode 100644
index 0000000..e7cc16c
--- /dev/null
+++ b/src/couch_db_update_notifier_sup.erl
@@ -0,0 +1,61 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%
+% This causes an OS process to spawned and it is notified every time a database
+% is updated.
+%
+% The notifications are in the form of a the database name sent as a line of
+% text to the OS processes stdout.
+%
+
+-module(couch_db_update_notifier_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/0, init/1, config_change/3]).
+
+start_link() ->
+    supervisor:start_link({local, couch_db_update_notifier_sup},
+        couch_db_update_notifier_sup, []).
+
+init([]) ->
+    ok = couch_config:register(fun ?MODULE:config_change/3),
+
+    UpdateNotifierExes = couch_config:get("update_notification"),
+
+    {ok,
+        {{one_for_one, 10, 3600},
+            lists:map(fun({Name, UpdateNotifierExe}) ->
+                {Name,
+                {couch_db_update_notifier, start_link, [UpdateNotifierExe]},
+                    permanent,
+                    1000,
+                    supervisor,
+                    [couch_db_update_notifier]}
+                end, UpdateNotifierExes)}}.
+
+%% @doc when update_notification configuration changes, terminate the process
+%%      for that notifier and start a new one with the updated config
+config_change("update_notification", Id, Exe) ->
+    ChildSpec = {
+        Id,
+        {couch_db_update_notifier, start_link, [Exe]},
+        permanent,
+        1000,
+        supervisor,
+        [couch_db_update_notifier]
+    },
+    supervisor:terminate_child(couch_db_update_notifier_sup, Id),
+    supervisor:delete_child(couch_db_update_notifier_sup, Id),
+    supervisor:start_child(couch_db_update_notifier_sup, ChildSpec).
+


[06/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_file.erl
----------------------------------------------------------------------
diff --git a/src/couch_file.erl b/src/couch_file.erl
new file mode 100644
index 0000000..ee5dafb
--- /dev/null
+++ b/src/couch_file.erl
@@ -0,0 +1,532 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_file).
+-behaviour(gen_server).
+
+-include("couch_db.hrl").
+
+-define(SIZE_BLOCK, 4096).
+
+-record(file, {
+    fd,
+    eof = 0
+}).
+
+% public API
+-export([open/1, open/2, close/1, bytes/1, sync/1, truncate/2]).
+-export([pread_term/2, pread_iolist/2, pread_binary/2]).
+-export([append_binary/2, append_binary_md5/2]).
+-export([append_raw_chunk/2, assemble_file_chunk/1, assemble_file_chunk/2]).
+-export([append_term/2, append_term/3, append_term_md5/2, append_term_md5/3]).
+-export([write_header/2, read_header/1]).
+-export([delete/2, delete/3, nuke_dir/2, init_delete_dir/1]).
+
+% gen_server callbacks
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+
+%%----------------------------------------------------------------------
+%% Args:   Valid Options are [create] and [create,overwrite].
+%%  Files are opened in read/write mode.
+%% Returns: On success, {ok, Fd}
+%%  or {error, Reason} if the file could not be opened.
+%%----------------------------------------------------------------------
+
+open(Filepath) ->
+    open(Filepath, []).
+
+open(Filepath, Options) ->
+    case gen_server:start_link(couch_file,
+            {Filepath, Options, self(), Ref = make_ref()}, []) of
+    {ok, Fd} ->
+        {ok, Fd};
+    ignore ->
+        % get the error
+        receive
+        {Ref, Pid, {error, Reason} = Error} ->
+            case process_info(self(), trap_exit) of
+            {trap_exit, true} -> receive {'EXIT', Pid, _} -> ok end;
+            {trap_exit, false} -> ok
+            end,
+            case {lists:member(nologifmissing, Options), Reason} of
+            {true, enoent} -> ok;
+            _ ->
+            ?LOG_ERROR("Could not open file ~s: ~s",
+                [Filepath, file:format_error(Reason)])
+            end,
+            Error
+        end;
+    Error ->
+        % We can't say much here, because it could be any kind of error.
+        % Just let it bubble and an encapsulating subcomponent can perhaps
+        % be more informative. It will likely appear in the SASL log, anyway.
+        Error
+    end.
+
+
+%%----------------------------------------------------------------------
+%% Purpose: To append an Erlang term to the end of the file.
+%% Args:    Erlang term to serialize and append to the file.
+%% Returns: {ok, Pos, NumBytesWritten} where Pos is the file offset to
+%%  the beginning the serialized  term. Use pread_term to read the term
+%%  back.
+%%  or {error, Reason}.
+%%----------------------------------------------------------------------
+
+append_term(Fd, Term) ->
+    append_term(Fd, Term, []).
+
+append_term(Fd, Term, Options) ->
+    Comp = couch_util:get_value(compression, Options, ?DEFAULT_COMPRESSION),
+    append_binary(Fd, couch_compress:compress(Term, Comp)).
+
+append_term_md5(Fd, Term) ->
+    append_term_md5(Fd, Term, []).
+
+append_term_md5(Fd, Term, Options) ->
+    Comp = couch_util:get_value(compression, Options, ?DEFAULT_COMPRESSION),
+    append_binary_md5(Fd, couch_compress:compress(Term, Comp)).
+
+%%----------------------------------------------------------------------
+%% Purpose: To append an Erlang binary to the end of the file.
+%% Args:    Erlang term to serialize and append to the file.
+%% Returns: {ok, Pos, NumBytesWritten} where Pos is the file offset to the
+%%  beginning the serialized term. Use pread_term to read the term back.
+%%  or {error, Reason}.
+%%----------------------------------------------------------------------
+
+append_binary(Fd, Bin) ->
+    gen_server:call(Fd, {append_bin, assemble_file_chunk(Bin)}, infinity).
+    
+append_binary_md5(Fd, Bin) ->
+    gen_server:call(Fd,
+        {append_bin, assemble_file_chunk(Bin, couch_util:md5(Bin))}, infinity).
+
+append_raw_chunk(Fd, Chunk) ->
+    gen_server:call(Fd, {append_bin, Chunk}, infinity).
+
+
+assemble_file_chunk(Bin) ->
+    [<<0:1/integer, (iolist_size(Bin)):31/integer>>, Bin].
+
+assemble_file_chunk(Bin, Md5) ->
+    [<<1:1/integer, (iolist_size(Bin)):31/integer>>, Md5, Bin].
+
+%%----------------------------------------------------------------------
+%% Purpose: Reads a term from a file that was written with append_term
+%% Args:    Pos, the offset into the file where the term is serialized.
+%% Returns: {ok, Term}
+%%  or {error, Reason}.
+%%----------------------------------------------------------------------
+
+
+pread_term(Fd, Pos) ->
+    {ok, Bin} = pread_binary(Fd, Pos),
+    {ok, couch_compress:decompress(Bin)}.
+
+
+%%----------------------------------------------------------------------
+%% Purpose: Reads a binrary from a file that was written with append_binary
+%% Args:    Pos, the offset into the file where the term is serialized.
+%% Returns: {ok, Term}
+%%  or {error, Reason}.
+%%----------------------------------------------------------------------
+
+pread_binary(Fd, Pos) ->
+    {ok, L} = pread_iolist(Fd, Pos),
+    {ok, iolist_to_binary(L)}.
+
+
+pread_iolist(Fd, Pos) ->
+    case gen_server:call(Fd, {pread_iolist, Pos}, infinity) of
+    {ok, IoList, <<>>} ->
+        {ok, IoList};
+    {ok, IoList, Md5} ->
+        case couch_util:md5(IoList) of
+        Md5 ->
+            {ok, IoList};
+        _ ->
+            exit({file_corruption, <<"file corruption">>})
+        end;
+    Error ->
+        Error
+    end.
+
+%%----------------------------------------------------------------------
+%% Purpose: The length of a file, in bytes.
+%% Returns: {ok, Bytes}
+%%  or {error, Reason}.
+%%----------------------------------------------------------------------
+
+% length in bytes
+bytes(Fd) ->
+    gen_server:call(Fd, bytes, infinity).
+
+%%----------------------------------------------------------------------
+%% Purpose: Truncate a file to the number of bytes.
+%% Returns: ok
+%%  or {error, Reason}.
+%%----------------------------------------------------------------------
+
+truncate(Fd, Pos) ->
+    gen_server:call(Fd, {truncate, Pos}, infinity).
+
+%%----------------------------------------------------------------------
+%% Purpose: Ensure all bytes written to the file are flushed to disk.
+%% Returns: ok
+%%  or {error, Reason}.
+%%----------------------------------------------------------------------
+
+sync(Filepath) when is_list(Filepath) ->
+    {ok, Fd} = file:open(Filepath, [append, raw]),
+    try ok = file:sync(Fd) after ok = file:close(Fd) end;
+sync(Fd) ->
+    gen_server:call(Fd, sync, infinity).
+
+%%----------------------------------------------------------------------
+%% Purpose: Close the file.
+%% Returns: ok
+%%----------------------------------------------------------------------
+close(Fd) ->
+    couch_util:shutdown_sync(Fd).
+
+
+delete(RootDir, Filepath) ->
+    delete(RootDir, Filepath, true).
+
+
+delete(RootDir, Filepath, Async) ->
+    DelFile = filename:join([RootDir,".delete", ?b2l(couch_uuids:random())]),
+    case file:rename(Filepath, DelFile) of
+    ok ->
+        if (Async) ->
+            spawn(file, delete, [DelFile]),
+            ok;
+        true ->
+            file:delete(DelFile)
+        end;
+    Error ->
+        Error
+    end.
+
+
+nuke_dir(RootDelDir, Dir) ->
+    FoldFun = fun(File) ->
+        Path = Dir ++ "/" ++ File,
+        case filelib:is_dir(Path) of
+            true ->
+                ok = nuke_dir(RootDelDir, Path),
+                file:del_dir(Path);
+            false ->
+                delete(RootDelDir, Path, false)
+        end
+    end,
+    case file:list_dir(Dir) of
+        {ok, Files} ->
+            lists:foreach(FoldFun, Files),
+            ok = file:del_dir(Dir);
+        {error, enoent} ->
+            ok
+    end.
+
+
+init_delete_dir(RootDir) ->
+    Dir = filename:join(RootDir,".delete"),
+    % note: ensure_dir requires an actual filename companent, which is the
+    % reason for "foo".
+    filelib:ensure_dir(filename:join(Dir,"foo")),
+    filelib:fold_files(Dir, ".*", true,
+        fun(Filename, _) ->
+            ok = file:delete(Filename)
+        end, ok).
+
+
+read_header(Fd) ->
+    case gen_server:call(Fd, find_header, infinity) of
+    {ok, Bin} ->
+        {ok, binary_to_term(Bin)};
+    Else ->
+        Else
+    end.
+
+write_header(Fd, Data) ->
+    Bin = term_to_binary(Data),
+    Md5 = couch_util:md5(Bin),
+    % now we assemble the final header binary and write to disk
+    FinalBin = <<Md5/binary, Bin/binary>>,
+    gen_server:call(Fd, {write_header, FinalBin}, infinity).
+
+
+
+
+init_status_error(ReturnPid, Ref, Error) ->
+    ReturnPid ! {Ref, self(), Error},
+    ignore.
+
+% server functions
+
+init({Filepath, Options, ReturnPid, Ref}) ->
+    process_flag(trap_exit, true),
+    OpenOptions = file_open_options(Options),
+    case lists:member(create, Options) of
+    true ->
+        filelib:ensure_dir(Filepath),
+        case file:open(Filepath, OpenOptions) of
+        {ok, Fd} ->
+            {ok, Length} = file:position(Fd, eof),
+            case Length > 0 of
+            true ->
+                % this means the file already exists and has data.
+                % FYI: We don't differentiate between empty files and non-existant
+                % files here.
+                case lists:member(overwrite, Options) of
+                true ->
+                    {ok, 0} = file:position(Fd, 0),
+                    ok = file:truncate(Fd),
+                    ok = file:sync(Fd),
+                    maybe_track_open_os_files(Options),
+                    {ok, #file{fd=Fd}};
+                false ->
+                    ok = file:close(Fd),
+                    init_status_error(ReturnPid, Ref, {error, eexist})
+                end;
+            false ->
+                maybe_track_open_os_files(Options),
+                {ok, #file{fd=Fd}}
+            end;
+        Error ->
+            init_status_error(ReturnPid, Ref, Error)
+        end;
+    false ->
+        % open in read mode first, so we don't create the file if it doesn't exist.
+        case file:open(Filepath, [read, raw]) of
+        {ok, Fd_Read} ->
+            {ok, Fd} = file:open(Filepath, OpenOptions),
+            ok = file:close(Fd_Read),
+            maybe_track_open_os_files(Options),
+            {ok, Eof} = file:position(Fd, eof),
+            {ok, #file{fd=Fd, eof=Eof}};
+        Error ->
+            init_status_error(ReturnPid, Ref, Error)
+        end
+    end.
+
+file_open_options(Options) ->
+    [read, raw, binary] ++ case lists:member(read_only, Options) of
+    true ->
+        [];
+    false ->
+        [append]
+    end.
+
+maybe_track_open_os_files(FileOptions) ->
+    case lists:member(sys_db, FileOptions) of
+    true ->
+        ok;
+    false ->
+        couch_stats_collector:track_process_count({couchdb, open_os_files})
+    end.
+
+terminate(_Reason, #file{fd = Fd}) ->
+    ok = file:close(Fd).
+
+
+handle_call({pread_iolist, Pos}, _From, File) ->
+    {RawData, NextPos} = try
+        % up to 8Kbs of read ahead
+        read_raw_iolist_int(File, Pos, 2 * ?SIZE_BLOCK - (Pos rem ?SIZE_BLOCK))
+    catch
+    _:_ ->
+        read_raw_iolist_int(File, Pos, 4)
+    end,
+    <<Prefix:1/integer, Len:31/integer, RestRawData/binary>> =
+        iolist_to_binary(RawData),
+    case Prefix of
+    1 ->
+        {Md5, IoList} = extract_md5(
+            maybe_read_more_iolist(RestRawData, 16 + Len, NextPos, File)),
+        {reply, {ok, IoList, Md5}, File};
+    0 ->
+        IoList = maybe_read_more_iolist(RestRawData, Len, NextPos, File),
+        {reply, {ok, IoList, <<>>}, File}
+    end;
+
+handle_call(bytes, _From, #file{fd = Fd} = File) ->
+    {reply, file:position(Fd, eof), File};
+
+handle_call(sync, _From, #file{fd=Fd}=File) ->
+    {reply, file:sync(Fd), File};
+
+handle_call({truncate, Pos}, _From, #file{fd=Fd}=File) ->
+    {ok, Pos} = file:position(Fd, Pos),
+    case file:truncate(Fd) of
+    ok ->
+        {reply, ok, File#file{eof = Pos}};
+    Error ->
+        {reply, Error, File}
+    end;
+
+handle_call({append_bin, Bin}, _From, #file{fd = Fd, eof = Pos} = File) ->
+    Blocks = make_blocks(Pos rem ?SIZE_BLOCK, Bin),
+    Size = iolist_size(Blocks),
+    case file:write(Fd, Blocks) of
+    ok ->
+        {reply, {ok, Pos, Size}, File#file{eof = Pos + Size}};
+    Error ->
+        {reply, Error, File}
+    end;
+
+handle_call({write_header, Bin}, _From, #file{fd = Fd, eof = Pos} = File) ->
+    BinSize = byte_size(Bin),
+    case Pos rem ?SIZE_BLOCK of
+    0 ->
+        Padding = <<>>;
+    BlockOffset ->
+        Padding = <<0:(8*(?SIZE_BLOCK-BlockOffset))>>
+    end,
+    FinalBin = [Padding, <<1, BinSize:32/integer>> | make_blocks(5, [Bin])],
+    case file:write(Fd, FinalBin) of
+    ok ->
+        {reply, ok, File#file{eof = Pos + iolist_size(FinalBin)}};
+    Error ->
+        {reply, Error, File}
+    end;
+
+handle_call(find_header, _From, #file{fd = Fd, eof = Pos} = File) ->
+    {reply, find_header(Fd, Pos div ?SIZE_BLOCK), File}.
+
+handle_cast(close, Fd) ->
+    {stop,normal,Fd}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+handle_info({'EXIT', _, normal}, Fd) ->
+    {noreply, Fd};
+handle_info({'EXIT', _, Reason}, Fd) ->
+    {stop, Reason, Fd}.
+
+
+find_header(_Fd, -1) ->
+    no_valid_header;
+find_header(Fd, Block) ->
+    case (catch load_header(Fd, Block)) of
+    {ok, Bin} ->
+        {ok, Bin};
+    _Error ->
+        find_header(Fd, Block -1)
+    end.
+
+load_header(Fd, Block) ->
+    {ok, <<1, HeaderLen:32/integer, RestBlock/binary>>} =
+        file:pread(Fd, Block * ?SIZE_BLOCK, ?SIZE_BLOCK),
+    TotalBytes = calculate_total_read_len(5, HeaderLen),
+    case TotalBytes > byte_size(RestBlock) of
+    false ->
+        <<RawBin:TotalBytes/binary, _/binary>> = RestBlock;
+    true ->
+        {ok, Missing} = file:pread(
+            Fd, (Block * ?SIZE_BLOCK) + 5 + byte_size(RestBlock),
+            TotalBytes - byte_size(RestBlock)),
+        RawBin = <<RestBlock/binary, Missing/binary>>
+    end,
+    <<Md5Sig:16/binary, HeaderBin/binary>> =
+        iolist_to_binary(remove_block_prefixes(5, RawBin)),
+    Md5Sig = couch_util:md5(HeaderBin),
+    {ok, HeaderBin}.
+
+maybe_read_more_iolist(Buffer, DataSize, _, _)
+    when DataSize =< byte_size(Buffer) ->
+    <<Data:DataSize/binary, _/binary>> = Buffer,
+    [Data];
+maybe_read_more_iolist(Buffer, DataSize, NextPos, File) ->
+    {Missing, _} =
+        read_raw_iolist_int(File, NextPos, DataSize - byte_size(Buffer)),
+    [Buffer, Missing].
+
+-spec read_raw_iolist_int(#file{}, Pos::non_neg_integer(), Len::non_neg_integer()) ->
+    {Data::iolist(), CurPos::non_neg_integer()}.
+read_raw_iolist_int(Fd, {Pos, _Size}, Len) -> % 0110 UPGRADE CODE
+    read_raw_iolist_int(Fd, Pos, Len);
+read_raw_iolist_int(#file{fd = Fd}, Pos, Len) ->
+    BlockOffset = Pos rem ?SIZE_BLOCK,
+    TotalBytes = calculate_total_read_len(BlockOffset, Len),
+    {ok, <<RawBin:TotalBytes/binary>>} = file:pread(Fd, Pos, TotalBytes),
+    {remove_block_prefixes(BlockOffset, RawBin), Pos + TotalBytes}.
+
+-spec extract_md5(iolist()) -> {binary(), iolist()}.
+extract_md5(FullIoList) ->
+    {Md5List, IoList} = split_iolist(FullIoList, 16, []),
+    {iolist_to_binary(Md5List), IoList}.
+
+calculate_total_read_len(0, FinalLen) ->
+    calculate_total_read_len(1, FinalLen) + 1;
+calculate_total_read_len(BlockOffset, FinalLen) ->
+    case ?SIZE_BLOCK - BlockOffset of
+    BlockLeft when BlockLeft >= FinalLen ->
+        FinalLen;
+    BlockLeft ->
+        FinalLen + ((FinalLen - BlockLeft) div (?SIZE_BLOCK -1)) +
+            if ((FinalLen - BlockLeft) rem (?SIZE_BLOCK -1)) =:= 0 -> 0;
+                true -> 1 end
+    end.
+
+remove_block_prefixes(_BlockOffset, <<>>) ->
+    [];
+remove_block_prefixes(0, <<_BlockPrefix,Rest/binary>>) ->
+    remove_block_prefixes(1, Rest);
+remove_block_prefixes(BlockOffset, Bin) ->
+    BlockBytesAvailable = ?SIZE_BLOCK - BlockOffset,
+    case size(Bin) of
+    Size when Size > BlockBytesAvailable ->
+        <<DataBlock:BlockBytesAvailable/binary,Rest/binary>> = Bin,
+        [DataBlock | remove_block_prefixes(0, Rest)];
+    _Size ->
+        [Bin]
+    end.
+
+make_blocks(_BlockOffset, []) ->
+    [];
+make_blocks(0, IoList) ->
+    [<<0>> | make_blocks(1, IoList)];
+make_blocks(BlockOffset, IoList) ->
+    case split_iolist(IoList, (?SIZE_BLOCK - BlockOffset), []) of
+    {Begin, End} ->
+        [Begin | make_blocks(0, End)];
+    _SplitRemaining ->
+        IoList
+    end.
+
+%% @doc Returns a tuple where the first element contains the leading SplitAt
+%% bytes of the original iolist, and the 2nd element is the tail. If SplitAt
+%% is larger than byte_size(IoList), return the difference.
+-spec split_iolist(IoList::iolist(), SplitAt::non_neg_integer(), Acc::list()) ->
+    {iolist(), iolist()} | non_neg_integer().
+split_iolist(List, 0, BeginAcc) ->
+    {lists:reverse(BeginAcc), List};
+split_iolist([], SplitAt, _BeginAcc) ->
+    SplitAt;
+split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) when SplitAt > byte_size(Bin) ->
+    split_iolist(Rest, SplitAt - byte_size(Bin), [Bin | BeginAcc]);
+split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) ->
+    <<Begin:SplitAt/binary,End/binary>> = Bin,
+    split_iolist([End | Rest], 0, [Begin | BeginAcc]);
+split_iolist([Sublist| Rest], SplitAt, BeginAcc) when is_list(Sublist) ->
+    case split_iolist(Sublist, SplitAt, BeginAcc) of
+    {Begin, End} ->
+        {Begin, [End | Rest]};
+    SplitRemaining ->
+        split_iolist(Rest, SplitAt - (SplitAt - SplitRemaining), [Sublist | BeginAcc])
+    end;
+split_iolist([Byte | Rest], SplitAt, BeginAcc) when is_integer(Byte) ->
+    split_iolist(Rest, SplitAt - 1, [Byte | BeginAcc]).

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_httpd.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd.erl b/src/couch_httpd.erl
new file mode 100644
index 0000000..28932ba
--- /dev/null
+++ b/src/couch_httpd.erl
@@ -0,0 +1,1114 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd).
+-include("couch_db.hrl").
+
+-export([start_link/0, start_link/1, stop/0, config_change/2,
+        handle_request/5]).
+
+-export([header_value/2,header_value/3,qs_value/2,qs_value/3,qs/1,qs_json_value/3]).
+-export([path/1,absolute_uri/2,body_length/1]).
+-export([verify_is_server_admin/1,unquote/1,quote/1,recv/2,recv_chunked/4,error_info/1]).
+-export([make_fun_spec_strs/1]).
+-export([make_arity_1_fun/1, make_arity_2_fun/1, make_arity_3_fun/1]).
+-export([parse_form/1,json_body/1,json_body_obj/1,body/1]).
+-export([doc_etag/1, make_etag/1, etag_match/2, etag_respond/3, etag_maybe/2]).
+-export([primary_header_value/2,partition/1,serve_file/3,serve_file/4, server_header/0]).
+-export([start_chunked_response/3,send_chunk/2,log_request/2]).
+-export([start_response_length/4, start_response/3, send/2]).
+-export([start_json_response/2, start_json_response/3, end_json_response/1]).
+-export([send_response/4,send_method_not_allowed/2,send_error/4, send_redirect/2,send_chunked_error/2]).
+-export([send_json/2,send_json/3,send_json/4,last_chunk/1,parse_multipart_request/3]).
+-export([accepted_encodings/1,handle_request_int/5,validate_referer/1,validate_ctype/2]).
+-export([http_1_0_keep_alive/2]).
+
+start_link() ->
+    start_link(http).
+start_link(http) ->
+    Port = couch_config:get("httpd", "port", "5984"),
+    start_link(?MODULE, [{port, Port}]);
+start_link(https) ->
+    Port = couch_config:get("ssl", "port", "6984"),
+    CertFile = couch_config:get("ssl", "cert_file", nil),
+    KeyFile = couch_config:get("ssl", "key_file", nil),
+    Options = case CertFile /= nil andalso KeyFile /= nil of
+        true ->
+            SslOpts = [{certfile, CertFile}, {keyfile, KeyFile}],
+
+            %% set password if one is needed for the cert
+            SslOpts1 = case couch_config:get("ssl", "password", nil) of
+                nil -> SslOpts;
+                Password ->
+                    SslOpts ++ [{password, Password}]
+            end,
+            % do we verify certificates ?
+            FinalSslOpts = case couch_config:get("ssl",
+                    "verify_ssl_certificates", "false") of
+                "false" -> SslOpts1;
+                "true" ->
+                    case couch_config:get("ssl",
+                            "cacert_file", nil) of
+                        nil ->
+                            io:format("Verify SSL certificate "
+                                ++"enabled but file containing "
+                                ++"PEM encoded CA certificates is "
+                                ++"missing", []),
+                            throw({error, missing_cacerts});
+                        CaCertFile ->
+                            Depth = list_to_integer(couch_config:get("ssl",
+                                    "ssl_certificate_max_depth",
+                                    "1")),
+                            FinalOpts = [
+                                {cacertfile, CaCertFile},
+                                {depth, Depth},
+                                {verify, verify_peer}],
+                            % allows custom verify fun.
+                            case couch_config:get("ssl",
+                                    "verify_fun", nil) of
+                                nil -> FinalOpts;
+                                SpecStr ->
+                                    FinalOpts
+                                    ++ [{verify_fun, make_arity_3_fun(SpecStr)}]
+                            end
+                    end
+            end,
+
+            [{port, Port},
+                {ssl, true},
+                {ssl_opts, FinalSslOpts}];
+        false ->
+            io:format("SSL enabled but PEM certificates are missing.", []),
+            throw({error, missing_certs})
+    end,
+    start_link(https, Options).
+start_link(Name, Options) ->
+    % read config and register for configuration changes
+
+    % just stop if one of the config settings change. couch_server_sup
+    % will restart us and then we will pick up the new settings.
+
+    BindAddress = couch_config:get("httpd", "bind_address", any),
+    validate_bind_address(BindAddress),
+    DefaultSpec = "{couch_httpd_db, handle_request}",
+    DefaultFun = make_arity_1_fun(
+        couch_config:get("httpd", "default_handler", DefaultSpec)
+    ),
+
+    UrlHandlersList = lists:map(
+        fun({UrlKey, SpecStr}) ->
+            {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
+        end, couch_config:get("httpd_global_handlers")),
+
+    DbUrlHandlersList = lists:map(
+        fun({UrlKey, SpecStr}) ->
+            {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
+        end, couch_config:get("httpd_db_handlers")),
+
+    DesignUrlHandlersList = lists:map(
+        fun({UrlKey, SpecStr}) ->
+            {?l2b(UrlKey), make_arity_3_fun(SpecStr)}
+        end, couch_config:get("httpd_design_handlers")),
+
+    UrlHandlers = dict:from_list(UrlHandlersList),
+    DbUrlHandlers = dict:from_list(DbUrlHandlersList),
+    DesignUrlHandlers = dict:from_list(DesignUrlHandlersList),
+    {ok, ServerOptions} = couch_util:parse_term(
+        couch_config:get("httpd", "server_options", "[]")),
+    {ok, SocketOptions} = couch_util:parse_term(
+        couch_config:get("httpd", "socket_options", "[]")),
+
+    set_auth_handlers(),
+
+    % ensure uuid is set so that concurrent replications
+    % get the same value.
+    couch_server:get_uuid(),
+
+    Loop = fun(Req)->
+        case SocketOptions of
+        [] ->
+            ok;
+        _ ->
+            ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions)
+        end,
+        apply(?MODULE, handle_request, [
+            Req, DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers
+        ])
+    end,
+
+    % set mochiweb options
+    FinalOptions = lists:append([Options, ServerOptions, [
+            {loop, Loop},
+            {name, Name},
+            {ip, BindAddress}]]),
+
+    % launch mochiweb
+    {ok, Pid} = case mochiweb_http:start(FinalOptions) of
+        {ok, MochiPid} ->
+            {ok, MochiPid};
+        {error, Reason} ->
+            io:format("Failure to start Mochiweb: ~s~n",[Reason]),
+            throw({error, Reason})
+    end,
+
+    ok = couch_config:register(fun ?MODULE:config_change/2, Pid),
+    {ok, Pid}.
+
+
+stop() ->
+    mochiweb_http:stop(couch_httpd),
+    mochiweb_http:stop(https).
+
+config_change("httpd", "bind_address") ->
+    ?MODULE:stop();
+config_change("httpd", "port") ->
+    ?MODULE:stop();
+config_change("httpd", "default_handler") ->
+    ?MODULE:stop();
+config_change("httpd", "server_options") ->
+    ?MODULE:stop();
+config_change("httpd", "socket_options") ->
+    ?MODULE:stop();
+config_change("httpd", "authentication_handlers") ->
+    set_auth_handlers();
+config_change("httpd_global_handlers", _) ->
+    ?MODULE:stop();
+config_change("httpd_db_handlers", _) ->
+    ?MODULE:stop();
+config_change("ssl", _) ->
+    ?MODULE:stop().
+
+set_auth_handlers() ->
+    AuthenticationSrcs = make_fun_spec_strs(
+        couch_config:get("httpd", "authentication_handlers", "")),
+    AuthHandlers = lists:map(
+        fun(A) -> {make_arity_1_fun(A), ?l2b(A)} end, AuthenticationSrcs),
+    ok = application:set_env(couch, auth_handlers, AuthHandlers).
+
+% SpecStr is a string like "{my_module, my_fun}"
+%  or "{my_module, my_fun, <<"my_arg">>}"
+make_arity_1_fun(SpecStr) ->
+    case couch_util:parse_term(SpecStr) of
+    {ok, {Mod, Fun, SpecArg}} ->
+        fun(Arg) -> Mod:Fun(Arg, SpecArg) end;
+    {ok, {Mod, Fun}} ->
+        fun(Arg) -> Mod:Fun(Arg) end
+    end.
+
+make_arity_2_fun(SpecStr) ->
+    case couch_util:parse_term(SpecStr) of
+    {ok, {Mod, Fun, SpecArg}} ->
+        fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2, SpecArg) end;
+    {ok, {Mod, Fun}} ->
+        fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2) end
+    end.
+
+make_arity_3_fun(SpecStr) ->
+    case couch_util:parse_term(SpecStr) of
+    {ok, {Mod, Fun, SpecArg}} ->
+        fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3, SpecArg) end;
+    {ok, {Mod, Fun}} ->
+        fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3) end
+    end.
+
+% SpecStr is "{my_module, my_fun}, {my_module2, my_fun2}"
+make_fun_spec_strs(SpecStr) ->
+    re:split(SpecStr, "(?<=})\\s*,\\s*(?={)", [{return, list}]).
+
+handle_request(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers,
+    DesignUrlHandlers) ->
+    %% reset rewrite count for new request
+    erlang:put(?REWRITE_COUNT, 0),
+
+    MochiReq1 = couch_httpd_vhost:dispatch_host(MochiReq),
+
+    handle_request_int(MochiReq1, DefaultFun,
+                UrlHandlers, DbUrlHandlers, DesignUrlHandlers).
+
+handle_request_int(MochiReq, DefaultFun,
+            UrlHandlers, DbUrlHandlers, DesignUrlHandlers) ->
+    Begin = now(),
+    % for the path, use the raw path with the query string and fragment
+    % removed, but URL quoting left intact
+    RawUri = MochiReq:get(raw_path),
+    {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
+
+    Headers = MochiReq:get(headers),
+
+    % get requested path
+    RequestedPath = case MochiReq:get_header_value("x-couchdb-vhost-path") of
+        undefined ->
+            case MochiReq:get_header_value("x-couchdb-requested-path") of
+                undefined -> RawUri;
+                R -> R
+            end;
+        P -> P
+    end,
+
+    HandlerKey =
+    case mochiweb_util:partition(Path, "/") of
+    {"", "", ""} ->
+        <<"/">>; % Special case the root url handler
+    {FirstPart, _, _} ->
+        list_to_binary(FirstPart)
+    end,
+    ?LOG_DEBUG("~p ~s ~p from ~p~nHeaders: ~p", [
+        MochiReq:get(method),
+        RawUri,
+        MochiReq:get(version),
+        MochiReq:get(peer),
+        mochiweb_headers:to_list(MochiReq:get(headers))
+    ]),
+
+    Method1 =
+    case MochiReq:get(method) of
+        % already an atom
+        Meth when is_atom(Meth) -> Meth;
+
+        % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
+        % possible (if any module references the atom, then it's existing).
+        Meth -> couch_util:to_existing_atom(Meth)
+    end,
+    increment_method_stats(Method1),
+
+    % allow broken HTTP clients to fake a full method vocabulary with an X-HTTP-METHOD-OVERRIDE header
+    MethodOverride = MochiReq:get_primary_header_value("X-HTTP-Method-Override"),
+    Method2 = case lists:member(MethodOverride, ["GET", "HEAD", "POST",
+                                                 "PUT", "DELETE",
+                                                 "TRACE", "CONNECT",
+                                                 "COPY"]) of
+    true ->
+        ?LOG_INFO("MethodOverride: ~s (real method was ~s)", [MethodOverride, Method1]),
+        case Method1 of
+        'POST' -> couch_util:to_existing_atom(MethodOverride);
+        _ ->
+            % Ignore X-HTTP-Method-Override when the original verb isn't POST.
+            % I'd like to send a 406 error to the client, but that'd require a nasty refactor.
+            % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>})
+            Method1
+        end;
+    _ -> Method1
+    end,
+
+    % alias HEAD to GET as mochiweb takes care of stripping the body
+    Method = case Method2 of
+        'HEAD' -> 'GET';
+        Other -> Other
+    end,
+
+    HttpReq = #httpd{
+        mochi_req = MochiReq,
+        peer = MochiReq:get(peer),
+        method = Method,
+        requested_path_parts =
+            [?l2b(unquote(Part)) || Part <- string:tokens(RequestedPath, "/")],
+        path_parts = [?l2b(unquote(Part)) || Part <- string:tokens(Path, "/")],
+        db_url_handlers = DbUrlHandlers,
+        design_url_handlers = DesignUrlHandlers,
+        default_fun = DefaultFun,
+        url_handlers = UrlHandlers,
+        user_ctx = erlang:erase(pre_rewrite_user_ctx),
+        auth = erlang:erase(pre_rewrite_auth)
+    },
+
+    HandlerFun = couch_util:dict_find(HandlerKey, UrlHandlers, DefaultFun),
+    {ok, AuthHandlers} = application:get_env(couch, auth_handlers),
+
+    {ok, Resp} =
+    try
+        case couch_httpd_cors:is_preflight_request(HttpReq) of
+        #httpd{} ->
+            case authenticate_request(HttpReq, AuthHandlers) of
+            #httpd{} = Req ->
+                HandlerFun(Req);
+            Response ->
+                Response
+            end;
+        Response ->
+            Response
+        end
+    catch
+        throw:{http_head_abort, Resp0} ->
+            {ok, Resp0};
+        throw:{invalid_json, S} ->
+            ?LOG_ERROR("attempted upload of invalid JSON (set log_level to debug to log it)", []),
+            ?LOG_DEBUG("Invalid JSON: ~p",[S]),
+            send_error(HttpReq, {bad_request, invalid_json});
+        throw:unacceptable_encoding ->
+            ?LOG_ERROR("unsupported encoding method for the response", []),
+            send_error(HttpReq, {not_acceptable, "unsupported encoding"});
+        throw:bad_accept_encoding_value ->
+            ?LOG_ERROR("received invalid Accept-Encoding header", []),
+            send_error(HttpReq, bad_request);
+        exit:normal ->
+            exit(normal);
+        exit:snappy_nif_not_loaded ->
+            ErrorReason = "To access the database or view index, Apache CouchDB"
+                " must be built with Erlang OTP R13B04 or higher.",
+            ?LOG_ERROR("~s", [ErrorReason]),
+            send_error(HttpReq, {bad_otp_release, ErrorReason});
+        exit:{body_too_large, _} ->
+            send_error(HttpReq, request_entity_too_large);
+        throw:Error ->
+            Stack = erlang:get_stacktrace(),
+            ?LOG_DEBUG("Minor error in HTTP request: ~p",[Error]),
+            ?LOG_DEBUG("Stacktrace: ~p",[Stack]),
+            send_error(HttpReq, Error);
+        error:badarg ->
+            Stack = erlang:get_stacktrace(),
+            ?LOG_ERROR("Badarg error in HTTP request",[]),
+            ?LOG_INFO("Stacktrace: ~p",[Stack]),
+            send_error(HttpReq, badarg);
+        error:function_clause ->
+            Stack = erlang:get_stacktrace(),
+            ?LOG_ERROR("function_clause error in HTTP request",[]),
+            ?LOG_INFO("Stacktrace: ~p",[Stack]),
+            send_error(HttpReq, function_clause);
+        Tag:Error ->
+            Stack = erlang:get_stacktrace(),
+            ?LOG_ERROR("Uncaught error in HTTP request: ~p",[{Tag, Error}]),
+            ?LOG_INFO("Stacktrace: ~p",[Stack]),
+            send_error(HttpReq, Error)
+    end,
+    RequestTime = round(timer:now_diff(now(), Begin)/1000),
+    couch_stats_collector:record({couchdb, request_time}, RequestTime),
+    couch_stats_collector:increment({httpd, requests}),
+    {ok, Resp}.
+
+% Try authentication handlers in order until one sets a user_ctx
+% the auth funs also have the option of returning a response
+% move this to couch_httpd_auth?
+authenticate_request(#httpd{user_ctx=#user_ctx{}} = Req, _AuthHandlers) ->
+    Req;
+authenticate_request(#httpd{} = Req, []) ->
+    case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of
+    "true" ->
+        throw({unauthorized, <<"Authentication required.">>});
+    "false" ->
+        Req#httpd{user_ctx=#user_ctx{}}
+    end;
+authenticate_request(#httpd{} = Req, [{AuthFun, AuthSrc} | RestAuthHandlers]) ->
+    R = case AuthFun(Req) of
+        #httpd{user_ctx=#user_ctx{}=UserCtx}=Req2 ->
+            Req2#httpd{user_ctx=UserCtx#user_ctx{handler=AuthSrc}};
+        Else -> Else
+    end,
+    authenticate_request(R, RestAuthHandlers);
+authenticate_request(Response, _AuthSrcs) ->
+    Response.
+
+increment_method_stats(Method) ->
+    couch_stats_collector:increment({httpd_request_methods, Method}).
+
+validate_referer(Req) ->
+    Host = host_for_request(Req),
+    Referer = header_value(Req, "Referer", fail),
+    case Referer of
+    fail ->
+        throw({bad_request, <<"Referer header required.">>});
+    Referer ->
+        {_,RefererHost,_,_,_} = mochiweb_util:urlsplit(Referer),
+        if
+            RefererHost =:= Host -> ok;
+            true -> throw({bad_request, <<"Referer header must match host.">>})
+        end
+    end.
+
+validate_ctype(Req, Ctype) ->
+    case header_value(Req, "Content-Type") of
+    undefined ->
+        throw({bad_ctype, "Content-Type must be "++Ctype});
+    ReqCtype ->
+        case string:tokens(ReqCtype, ";") of
+        [Ctype] -> ok;
+        [Ctype, _Rest] -> ok;
+        _Else ->
+            throw({bad_ctype, "Content-Type must be "++Ctype})
+        end
+    end.
+
+% Utilities
+
+partition(Path) ->
+    mochiweb_util:partition(Path, "/").
+
+header_value(#httpd{mochi_req=MochiReq}, Key) ->
+    MochiReq:get_header_value(Key).
+
+header_value(#httpd{mochi_req=MochiReq}, Key, Default) ->
+    case MochiReq:get_header_value(Key) of
+    undefined -> Default;
+    Value -> Value
+    end.
+
+primary_header_value(#httpd{mochi_req=MochiReq}, Key) ->
+    MochiReq:get_primary_header_value(Key).
+
+accepted_encodings(#httpd{mochi_req=MochiReq}) ->
+    case MochiReq:accepted_encodings(["gzip", "identity"]) of
+    bad_accept_encoding_value ->
+        throw(bad_accept_encoding_value);
+    [] ->
+        throw(unacceptable_encoding);
+    EncList ->
+        EncList
+    end.
+
+serve_file(Req, RelativePath, DocumentRoot) ->
+    serve_file(Req, RelativePath, DocumentRoot, []).
+
+serve_file(#httpd{mochi_req=MochiReq}=Req, RelativePath, DocumentRoot,
+           ExtraHeaders) ->
+    log_request(Req, 200),
+    ResponseHeaders = server_header()
+        ++ couch_httpd_auth:cookie_auth_header(Req, [])
+        ++ ExtraHeaders,
+    {ok, MochiReq:serve_file(RelativePath, DocumentRoot,
+            couch_httpd_cors:cors_headers(Req, ResponseHeaders))}.
+
+qs_value(Req, Key) ->
+    qs_value(Req, Key, undefined).
+
+qs_value(Req, Key, Default) ->
+    couch_util:get_value(Key, qs(Req), Default).
+
+qs_json_value(Req, Key, Default) ->
+    case qs_value(Req, Key, Default) of
+    Default ->
+        Default;
+    Result ->
+        ?JSON_DECODE(Result)
+    end.
+
+qs(#httpd{mochi_req=MochiReq}) ->
+    MochiReq:parse_qs().
+
+path(#httpd{mochi_req=MochiReq}) ->
+    MochiReq:get(path).
+
+host_for_request(#httpd{mochi_req=MochiReq}) ->
+    XHost = couch_config:get("httpd", "x_forwarded_host", "X-Forwarded-Host"),
+    case MochiReq:get_header_value(XHost) of
+        undefined ->
+            case MochiReq:get_header_value("Host") of
+                undefined ->
+                    {ok, {Address, Port}} = case MochiReq:get(socket) of
+                        {ssl, SslSocket} -> ssl:sockname(SslSocket);
+                        Socket -> inet:sockname(Socket)
+                    end,
+                    inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
+                Value1 ->
+                    Value1
+            end;
+        Value -> Value
+    end.
+
+absolute_uri(#httpd{mochi_req=MochiReq}=Req, Path) ->
+    Host = host_for_request(Req),
+    XSsl = couch_config:get("httpd", "x_forwarded_ssl", "X-Forwarded-Ssl"),
+    Scheme = case MochiReq:get_header_value(XSsl) of
+                 "on" -> "https";
+                 _ ->
+                     XProto = couch_config:get("httpd", "x_forwarded_proto", "X-Forwarded-Proto"),
+                     case MochiReq:get_header_value(XProto) of
+                         %% Restrict to "https" and "http" schemes only
+                         "https" -> "https";
+                         _ -> case MochiReq:get(scheme) of
+                                  https -> "https";
+                                  http -> "http"
+                              end
+                     end
+             end,
+    Scheme ++ "://" ++ Host ++ Path.
+
+unquote(UrlEncodedString) ->
+    mochiweb_util:unquote(UrlEncodedString).
+
+quote(UrlDecodedString) ->
+    mochiweb_util:quote_plus(UrlDecodedString).
+
+parse_form(#httpd{mochi_req=MochiReq}) ->
+    mochiweb_multipart:parse_form(MochiReq).
+
+recv(#httpd{mochi_req=MochiReq}, Len) ->
+    MochiReq:recv(Len).
+
+recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
+    % Fun is called once with each chunk
+    % Fun({Length, Binary}, State)
+    % called with Length == 0 on the last time.
+    MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState).
+
+body_length(#httpd{mochi_req=MochiReq}) ->
+    MochiReq:get(body_length).
+
+body(#httpd{mochi_req=MochiReq, req_body=undefined}) ->
+    MaxSize = list_to_integer(
+        couch_config:get("couchdb", "max_document_size", "4294967296")),
+    MochiReq:recv_body(MaxSize);
+body(#httpd{req_body=ReqBody}) ->
+    ReqBody.
+
+json_body(Httpd) ->
+    ?JSON_DECODE(body(Httpd)).
+
+json_body_obj(Httpd) ->
+    case json_body(Httpd) of
+        {Props} -> {Props};
+        _Else ->
+            throw({bad_request, "Request body must be a JSON object"})
+    end.
+
+
+
+doc_etag(#doc{revs={Start, [DiskRev|_]}}) ->
+    "\"" ++ ?b2l(couch_doc:rev_to_str({Start, DiskRev})) ++ "\"".
+
+make_etag(Term) ->
+    <<SigInt:128/integer>> = couch_util:md5(term_to_binary(Term)),
+    iolist_to_binary([$", io_lib:format("~.36B", [SigInt]), $"]).
+
+etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
+    etag_match(Req, binary_to_list(CurrentEtag));
+
+etag_match(Req, CurrentEtag) ->
+    EtagsToMatch = string:tokens(
+        header_value(Req, "If-None-Match", ""), ", "),
+    lists:member(CurrentEtag, EtagsToMatch).
+
+etag_respond(Req, CurrentEtag, RespFun) ->
+    case etag_match(Req, CurrentEtag) of
+    true ->
+        % the client has this in their cache.
+        send_response(Req, 304, [{"ETag", CurrentEtag}], <<>>);
+    false ->
+        % Run the function.
+        RespFun()
+    end.
+
+etag_maybe(Req, RespFun) ->
+    try
+        RespFun()
+    catch
+        throw:{etag_match, ETag} ->
+            send_response(Req, 304, [{"ETag", ETag}], <<>>)
+    end.
+
+verify_is_server_admin(#httpd{user_ctx=UserCtx}) ->
+    verify_is_server_admin(UserCtx);
+verify_is_server_admin(#user_ctx{roles=Roles}) ->
+    case lists:member(<<"_admin">>, Roles) of
+    true -> ok;
+    false -> throw({unauthorized, <<"You are not a server admin.">>})
+    end.
+
+log_request(#httpd{mochi_req=MochiReq,peer=Peer}=Req, Code) ->
+    ?LOG_INFO("~s - - ~s ~s ~B", [
+        Peer,
+        MochiReq:get(method),
+        MochiReq:get(raw_path),
+        Code
+    ]),
+    gen_event:notify(couch_plugin, {log_request, Req, Code}).
+
+
+start_response_length(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Length) ->
+    log_request(Req, Code),
+    couch_stats_collector:increment({httpd_status_codes, Code}),
+    Headers1 = Headers ++ server_header() ++
+               couch_httpd_auth:cookie_auth_header(Req, Headers),
+    Headers2 = couch_httpd_cors:cors_headers(Req, Headers1),
+    Resp = MochiReq:start_response_length({Code, Headers2, Length}),
+    case MochiReq:get(method) of
+    'HEAD' -> throw({http_head_abort, Resp});
+    _ -> ok
+    end,
+    {ok, Resp}.
+
+start_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers) ->
+    log_request(Req, Code),
+    couch_stats_collector:increment({httpd_status_codes, Code}),
+    CookieHeader = couch_httpd_auth:cookie_auth_header(Req, Headers),
+    Headers1 = Headers ++ server_header() ++ CookieHeader,
+    Headers2 = couch_httpd_cors:cors_headers(Req, Headers1),
+    Resp = MochiReq:start_response({Code, Headers2}),
+    case MochiReq:get(method) of
+        'HEAD' -> throw({http_head_abort, Resp});
+        _ -> ok
+    end,
+    {ok, Resp}.
+
+send(Resp, Data) ->
+    Resp:send(Data),
+    {ok, Resp}.
+
+no_resp_conn_header([]) ->
+    true;
+no_resp_conn_header([{Hdr, _}|Rest]) ->
+    case string:to_lower(Hdr) of
+        "connection" -> false;
+        _ -> no_resp_conn_header(Rest)
+    end.
+
+http_1_0_keep_alive(Req, Headers) ->
+    KeepOpen = Req:should_close() == false,
+    IsHttp10 = Req:get(version) == {1, 0},
+    NoRespHeader = no_resp_conn_header(Headers),
+    case KeepOpen andalso IsHttp10 andalso NoRespHeader of
+        true -> [{"Connection", "Keep-Alive"} | Headers];
+        false -> Headers
+    end.
+
+start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers) ->
+    log_request(Req, Code),
+    couch_stats_collector:increment({httpd_status_codes, Code}),
+    Headers1 = http_1_0_keep_alive(MochiReq, Headers),
+    Headers2 = Headers1 ++ server_header() ++
+               couch_httpd_auth:cookie_auth_header(Req, Headers1),
+    Headers3 = couch_httpd_cors:cors_headers(Req, Headers2),
+    Resp = MochiReq:respond({Code, Headers3, chunked}),
+    case MochiReq:get(method) of
+    'HEAD' -> throw({http_head_abort, Resp});
+    _ -> ok
+    end,
+    {ok, Resp}.
+
+send_chunk(Resp, Data) ->
+    case iolist_size(Data) of
+    0 -> ok; % do nothing
+    _ -> Resp:write_chunk(Data)
+    end,
+    {ok, Resp}.
+
+last_chunk(Resp) ->
+    Resp:write_chunk([]),
+    {ok, Resp}.
+
+send_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Body) ->
+    log_request(Req, Code),
+    couch_stats_collector:increment({httpd_status_codes, Code}),
+    Headers1 = http_1_0_keep_alive(MochiReq, Headers),
+    if Code >= 500 ->
+        ?LOG_ERROR("httpd ~p error response:~n ~s", [Code, Body]);
+    Code >= 400 ->
+        ?LOG_DEBUG("httpd ~p error response:~n ~s", [Code, Body]);
+    true -> ok
+    end,
+    Headers2 = Headers1 ++ server_header() ++
+               couch_httpd_auth:cookie_auth_header(Req, Headers1),
+    Headers3 = couch_httpd_cors:cors_headers(Req, Headers2),
+
+    {ok, MochiReq:respond({Code, Headers3, Body})}.
+
+send_method_not_allowed(Req, Methods) ->
+    send_error(Req, 405, [{"Allow", Methods}], <<"method_not_allowed">>, ?l2b("Only " ++ Methods ++ " allowed")).
+
+send_json(Req, Value) ->
+    send_json(Req, 200, Value).
+
+send_json(Req, Code, Value) ->
+    send_json(Req, Code, [], Value).
+
+send_json(Req, Code, Headers, Value) ->
+    initialize_jsonp(Req),
+    DefaultHeaders = [
+        {"Content-Type", negotiate_content_type(Req)},
+        {"Cache-Control", "must-revalidate"}
+    ],
+    Body = [start_jsonp(), ?JSON_ENCODE(Value), end_jsonp(), $\n],
+    send_response(Req, Code, DefaultHeaders ++ Headers, Body).
+
+start_json_response(Req, Code) ->
+    start_json_response(Req, Code, []).
+
+start_json_response(Req, Code, Headers) ->
+    initialize_jsonp(Req),
+    DefaultHeaders = [
+        {"Content-Type", negotiate_content_type(Req)},
+        {"Cache-Control", "must-revalidate"}
+    ],
+    {ok, Resp} = start_chunked_response(Req, Code, DefaultHeaders ++ Headers),
+    case start_jsonp() of
+        [] -> ok;
+        Start -> send_chunk(Resp, Start)
+    end,
+    {ok, Resp}.
+
+end_json_response(Resp) ->
+    send_chunk(Resp, end_jsonp() ++ [$\n]),
+    last_chunk(Resp).
+
+initialize_jsonp(Req) ->
+    case get(jsonp) of
+        undefined -> put(jsonp, qs_value(Req, "callback", no_jsonp));
+        _ -> ok
+    end,
+    case get(jsonp) of
+        no_jsonp -> [];
+        [] -> [];
+        CallBack ->
+            try
+                % make sure jsonp is configured on (default off)
+                case couch_config:get("httpd", "allow_jsonp", "false") of
+                "true" ->
+                    validate_callback(CallBack);
+                _Else ->
+                    put(jsonp, no_jsonp)
+                end
+            catch
+                Error ->
+                    put(jsonp, no_jsonp),
+                    throw(Error)
+            end
+    end.
+
+start_jsonp() ->
+    case get(jsonp) of
+        no_jsonp -> [];
+        [] -> [];
+        CallBack -> ["/* CouchDB */", CallBack, "("]
+    end.
+
+end_jsonp() ->
+    case erlang:erase(jsonp) of
+        no_jsonp -> [];
+        [] -> [];
+        _ -> ");"
+    end.
+
+validate_callback(CallBack) when is_binary(CallBack) ->
+    validate_callback(binary_to_list(CallBack));
+validate_callback([]) ->
+    ok;
+validate_callback([Char | Rest]) ->
+    case Char of
+        _ when Char >= $a andalso Char =< $z -> ok;
+        _ when Char >= $A andalso Char =< $Z -> ok;
+        _ when Char >= $0 andalso Char =< $9 -> ok;
+        _ when Char == $. -> ok;
+        _ when Char == $_ -> ok;
+        _ when Char == $[ -> ok;
+        _ when Char == $] -> ok;
+        _ ->
+            throw({bad_request, invalid_callback})
+    end,
+    validate_callback(Rest).
+
+
+error_info({Error, Reason}) when is_list(Reason) ->
+    error_info({Error, ?l2b(Reason)});
+error_info(bad_request) ->
+    {400, <<"bad_request">>, <<>>};
+error_info({bad_request, Reason}) ->
+    {400, <<"bad_request">>, Reason};
+error_info({query_parse_error, Reason}) ->
+    {400, <<"query_parse_error">>, Reason};
+% Prior art for md5 mismatch resulting in a 400 is from AWS S3
+error_info(md5_mismatch) ->
+    {400, <<"content_md5_mismatch">>, <<"Possible message corruption.">>};
+error_info(not_found) ->
+    {404, <<"not_found">>, <<"missing">>};
+error_info({not_found, Reason}) ->
+    {404, <<"not_found">>, Reason};
+error_info({not_acceptable, Reason}) ->
+    {406, <<"not_acceptable">>, Reason};
+error_info(conflict) ->
+    {409, <<"conflict">>, <<"Document update conflict.">>};
+error_info({forbidden, Msg}) ->
+    {403, <<"forbidden">>, Msg};
+error_info({unauthorized, Msg}) ->
+    {401, <<"unauthorized">>, Msg};
+error_info(file_exists) ->
+    {412, <<"file_exists">>, <<"The database could not be "
+        "created, the file already exists.">>};
+error_info(request_entity_too_large) ->
+    {413, <<"too_large">>, <<"the request entity is too large">>};
+error_info({bad_ctype, Reason}) ->
+    {415, <<"bad_content_type">>, Reason};
+error_info(requested_range_not_satisfiable) ->
+    {416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>};
+error_info({error, illegal_database_name, Name}) ->
+    Message = "Name: '" ++ Name ++ "'. Only lowercase characters (a-z), "
+        ++ "digits (0-9), and any of the characters _, $, (, ), +, -, and / "
+        ++ "are allowed. Must begin with a letter.",
+    {400, <<"illegal_database_name">>, couch_util:to_binary(Message)};
+error_info({missing_stub, Reason}) ->
+    {412, <<"missing_stub">>, Reason};
+error_info({Error, Reason}) ->
+    {500, couch_util:to_binary(Error), couch_util:to_binary(Reason)};
+error_info(Error) ->
+    {500, <<"unknown_error">>, couch_util:to_binary(Error)}.
+
+error_headers(#httpd{mochi_req=MochiReq}=Req, Code, ErrorStr, ReasonStr) ->
+    if Code == 401 ->
+        % this is where the basic auth popup is triggered
+        case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
+        undefined ->
+            case couch_config:get("httpd", "WWW-Authenticate", nil) of
+            nil ->
+                % If the client is a browser and the basic auth popup isn't turned on
+                % redirect to the session page.
+                case ErrorStr of
+                <<"unauthorized">> ->
+                    case couch_config:get("couch_httpd_auth", "authentication_redirect", nil) of
+                    nil -> {Code, []};
+                    AuthRedirect ->
+                        case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of
+                        "true" ->
+                            % send the browser popup header no matter what if we are require_valid_user
+                            {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]};
+                        _False ->
+                            case MochiReq:accepts_content_type("application/json") of
+                            true ->
+                                {Code, []};
+                            false ->
+                                case MochiReq:accepts_content_type("text/html") of
+                                true ->
+                                    % Redirect to the path the user requested, not
+                                    % the one that is used internally.
+                                    UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of
+                                    undefined ->
+                                        MochiReq:get(path);
+                                    VHostPath ->
+                                        VHostPath
+                                    end,
+                                    RedirectLocation = lists:flatten([
+                                        AuthRedirect,
+                                        "?return=", couch_util:url_encode(UrlReturnRaw),
+                                        "&reason=", couch_util:url_encode(ReasonStr)
+                                    ]),
+                                    {302, [{"Location", absolute_uri(Req, RedirectLocation)}]};
+                                false ->
+                                    {Code, []}
+                                end
+                            end
+                        end
+                    end;
+                _Else ->
+                    {Code, []}
+                end;
+            Type ->
+                {Code, [{"WWW-Authenticate", Type}]}
+            end;
+        Type ->
+           {Code, [{"WWW-Authenticate", Type}]}
+        end;
+    true ->
+        {Code, []}
+    end.
+
+send_error(_Req, {already_sent, Resp, _Error}) ->
+    {ok, Resp};
+
+send_error(Req, Error) ->
+    {Code, ErrorStr, ReasonStr} = error_info(Error),
+    {Code1, Headers} = error_headers(Req, Code, ErrorStr, ReasonStr),
+    send_error(Req, Code1, Headers, ErrorStr, ReasonStr).
+
+send_error(Req, Code, ErrorStr, ReasonStr) ->
+    send_error(Req, Code, [], ErrorStr, ReasonStr).
+
+send_error(Req, Code, Headers, ErrorStr, ReasonStr) ->
+    send_json(Req, Code, Headers,
+        {[{<<"error">>,  ErrorStr},
+         {<<"reason">>, ReasonStr}]}).
+
+% give the option for list functions to output html or other raw errors
+send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
+    send_chunk(Resp, Reason),
+    last_chunk(Resp);
+
+send_chunked_error(Resp, Error) ->
+    {Code, ErrorStr, ReasonStr} = error_info(Error),
+    JsonError = {[{<<"code">>, Code},
+        {<<"error">>,  ErrorStr},
+        {<<"reason">>, ReasonStr}]},
+    send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])),
+    last_chunk(Resp).
+
+send_redirect(Req, Path) ->
+     send_response(Req, 301, [{"Location", absolute_uri(Req, Path)}], <<>>).
+
+negotiate_content_type(Req) ->
+    case get(jsonp) of
+        no_jsonp -> negotiate_content_type1(Req);
+        [] -> negotiate_content_type1(Req);
+        _Callback -> "text/javascript"
+    end.
+
+negotiate_content_type1(#httpd{mochi_req=MochiReq}) ->
+    %% Determine the appropriate Content-Type header for a JSON response
+    %% depending on the Accept header in the request. A request that explicitly
+    %% lists the correct JSON MIME type will get that type, otherwise the
+    %% response will have the generic MIME type "text/plain"
+    AcceptedTypes = case MochiReq:get_header_value("Accept") of
+        undefined       -> [];
+        AcceptHeader    -> string:tokens(AcceptHeader, ", ")
+    end,
+    case lists:member("application/json", AcceptedTypes) of
+        true  -> "application/json";
+        false -> "text/plain; charset=utf-8"
+    end.
+
+server_header() ->
+    [{"Server", "CouchDB/" ++ couch_server:get_version() ++
+                " (Erlang OTP/" ++ erlang:system_info(otp_release) ++ ")"}].
+
+
+-record(mp, {boundary, buffer, data_fun, callback}).
+
+
+parse_multipart_request(ContentType, DataFun, Callback) ->
+    Boundary0 = iolist_to_binary(get_boundary(ContentType)),
+    Boundary = <<"\r\n--", Boundary0/binary>>,
+    Mp = #mp{boundary= Boundary,
+            buffer= <<>>,
+            data_fun=DataFun,
+            callback=Callback},
+    {Mp2, _NilCallback} = read_until(Mp, <<"--", Boundary0/binary>>,
+        fun nil_callback/1),
+    #mp{buffer=Buffer, data_fun=DataFun2, callback=Callback2} =
+            parse_part_header(Mp2),
+    {Buffer, DataFun2, Callback2}.
+
+nil_callback(_Data)->
+    fun nil_callback/1.
+
+get_boundary({"multipart/" ++ _, Opts}) ->
+    case couch_util:get_value("boundary", Opts) of
+        S when is_list(S) ->
+            S
+    end;
+get_boundary(ContentType) ->
+    {"multipart/" ++ _ , Opts} = mochiweb_util:parse_header(ContentType),
+    get_boundary({"multipart/", Opts}).
+
+
+
+split_header(<<>>) ->
+    [];
+split_header(Line) ->
+    {Name, [$: | Value]} = lists:splitwith(fun (C) -> C =/= $: end,
+                                           binary_to_list(Line)),
+    [{string:to_lower(string:strip(Name)),
+     mochiweb_util:parse_header(Value)}].
+
+read_until(#mp{data_fun=DataFun, buffer=Buffer}=Mp, Pattern, Callback) ->
+    case find_in_binary(Pattern, Buffer) of
+    not_found ->
+        Callback2 = Callback(Buffer),
+        {Buffer2, DataFun2} = DataFun(),
+        Buffer3 = iolist_to_binary(Buffer2),
+        read_until(Mp#mp{data_fun=DataFun2,buffer=Buffer3}, Pattern, Callback2);
+    {partial, 0} ->
+        {NewData, DataFun2} = DataFun(),
+        read_until(Mp#mp{data_fun=DataFun2,
+                buffer= iolist_to_binary([Buffer,NewData])},
+                Pattern, Callback);
+    {partial, Skip} ->
+        <<DataChunk:Skip/binary, Rest/binary>> = Buffer,
+        Callback2 = Callback(DataChunk),
+        {NewData, DataFun2} = DataFun(),
+        read_until(Mp#mp{data_fun=DataFun2,
+                buffer= iolist_to_binary([Rest | NewData])},
+                Pattern, Callback2);
+    {exact, 0} ->
+        PatternLen = size(Pattern),
+        <<_:PatternLen/binary, Rest/binary>> = Buffer,
+        {Mp#mp{buffer= Rest}, Callback};
+    {exact, Skip} ->
+        PatternLen = size(Pattern),
+        <<DataChunk:Skip/binary, _:PatternLen/binary, Rest/binary>> = Buffer,
+        Callback2 = Callback(DataChunk),
+        {Mp#mp{buffer= Rest}, Callback2}
+    end.
+
+
+parse_part_header(#mp{callback=UserCallBack}=Mp) ->
+    {Mp2, AccCallback} = read_until(Mp, <<"\r\n\r\n">>,
+            fun(Next) -> acc_callback(Next, []) end),
+    HeaderData = AccCallback(get_data),
+
+    Headers =
+    lists:foldl(fun(Line, Acc) ->
+            split_header(Line) ++ Acc
+        end, [], re:split(HeaderData,<<"\r\n">>, [])),
+    NextCallback = UserCallBack({headers, Headers}),
+    parse_part_body(Mp2#mp{callback=NextCallback}).
+
+parse_part_body(#mp{boundary=Prefix, callback=Callback}=Mp) ->
+    {Mp2, WrappedCallback} = read_until(Mp, Prefix,
+            fun(Data) -> body_callback_wrapper(Data, Callback) end),
+    Callback2 = WrappedCallback(get_callback),
+    Callback3 = Callback2(body_end),
+    case check_for_last(Mp2#mp{callback=Callback3}) of
+    {last, #mp{callback=Callback3}=Mp3} ->
+        Mp3#mp{callback=Callback3(eof)};
+    {more, Mp3} ->
+        parse_part_header(Mp3)
+    end.
+
+acc_callback(get_data, Acc)->
+    iolist_to_binary(lists:reverse(Acc));
+acc_callback(Data, Acc)->
+    fun(Next) -> acc_callback(Next, [Data | Acc]) end.
+
+body_callback_wrapper(get_callback, Callback) ->
+    Callback;
+body_callback_wrapper(Data, Callback) ->
+    Callback2 = Callback({body, Data}),
+    fun(Next) -> body_callback_wrapper(Next, Callback2) end.
+
+
+check_for_last(#mp{buffer=Buffer, data_fun=DataFun}=Mp) ->
+    case Buffer of
+    <<"--",_/binary>> -> {last, Mp};
+    <<_, _, _/binary>> -> {more, Mp};
+    _ -> % not long enough
+        {Data, DataFun2} = DataFun(),
+        check_for_last(Mp#mp{buffer= <<Buffer/binary, Data/binary>>,
+                data_fun = DataFun2})
+    end.
+
+find_in_binary(_B, <<>>) ->
+    not_found;
+
+find_in_binary(B, Data) ->
+    case binary:match(Data, [B], []) of
+    nomatch ->
+        partial_find(binary:part(B, {0, byte_size(B) - 1}),
+                     binary:part(Data, {byte_size(Data), -byte_size(Data) + 1}), 1);
+    {Pos, _Len} ->
+        {exact, Pos}
+    end.
+
+partial_find(<<>>, _Data, _Pos) ->
+    not_found;
+
+partial_find(B, Data, N) when byte_size(Data) > 0 ->
+    case binary:match(Data, [B], []) of
+    nomatch ->
+        partial_find(binary:part(B, {0, byte_size(B) - 1}),
+                     binary:part(Data, {byte_size(Data), -byte_size(Data) + 1}), N + 1);
+    {Pos, _Len} ->
+        {partial, N + Pos}
+    end;
+
+partial_find(_B, _Data, _N) ->
+    not_found.
+
+
+validate_bind_address(Address) ->
+    case inet_parse:address(Address) of
+        {ok, _} -> ok;
+        _ -> throw({error, invalid_bind_address})
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_httpd_auth.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_auth.erl b/src/couch_httpd_auth.erl
new file mode 100644
index 0000000..b8c4e26
--- /dev/null
+++ b/src/couch_httpd_auth.erl
@@ -0,0 +1,380 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License.  You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_auth).
+-include("couch_db.hrl").
+
+-export([default_authentication_handler/1,special_test_authentication_handler/1]).
+-export([cookie_authentication_handler/1]).
+-export([null_authentication_handler/1]).
+-export([proxy_authentication_handler/1, proxy_authentification_handler/1]).
+-export([cookie_auth_header/2]).
+-export([handle_session_req/1]).
+
+-import(couch_httpd, [header_value/2, send_json/2,send_json/4, send_method_not_allowed/2]).
+
+special_test_authentication_handler(Req) ->
+    case header_value(Req, "WWW-Authenticate") of
+    "X-Couch-Test-Auth " ++ NamePass ->
+        % NamePass is a colon separated string: "joe schmoe:a password".
+        [Name, Pass] = re:split(NamePass, ":", [{return, list}, {parts, 2}]),
+        case {Name, Pass} of
+        {"Jan Lehnardt", "apple"} -> ok;
+        {"Christopher Lenz", "dog food"} -> ok;
+        {"Noah Slater", "biggiesmalls endian"} -> ok;
+        {"Chris Anderson", "mp3"} -> ok;
+        {"Damien Katz", "pecan pie"} -> ok;
+        {_, _} ->
+            throw({unauthorized, <<"Name or password is incorrect.">>})
+        end,
+        Req#httpd{user_ctx=#user_ctx{name=?l2b(Name)}};
+    _ ->
+        % No X-Couch-Test-Auth credentials sent, give admin access so the
+        % previous authentication can be restored after the test
+        Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}
+    end.
+
+basic_name_pw(Req) ->
+    AuthorizationHeader = header_value(Req, "Authorization"),
+    case AuthorizationHeader of
+    "Basic " ++ Base64Value ->
+        case re:split(base64:decode(Base64Value), ":",
+                      [{return, list}, {parts, 2}]) of
+        ["_", "_"] ->
+            % special name and pass to be logged out
+            nil;
+        [User, Pass] ->
+            {User, Pass};
+        _ ->
+            nil
+        end;
+    _ ->
+        nil
+    end.
+
+default_authentication_handler(Req) ->
+    case basic_name_pw(Req) of
+    {User, Pass} ->
+        case couch_auth_cache:get_user_creds(User) of
+            nil ->
+                throw({unauthorized, <<"Name or password is incorrect.">>});
+            UserProps ->
+                case authenticate(?l2b(Pass), UserProps) of
+                    true ->
+                        Req#httpd{user_ctx=#user_ctx{
+                            name=?l2b(User),
+                            roles=couch_util:get_value(<<"roles">>, UserProps, [])
+                        }};
+                    _Else ->
+                        throw({unauthorized, <<"Name or password is incorrect.">>})
+                end
+        end;
+    nil ->
+        case couch_server:has_admins() of
+        true ->
+            Req;
+        false ->
+            case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of
+                "true" -> Req;
+                % If no admins, and no user required, then everyone is admin!
+                % Yay, admin party!
+                _ -> Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}
+            end
+        end
+    end.
+
+null_authentication_handler(Req) ->
+    Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}.
+
+%% @doc proxy auth handler.
+%
+% This handler allows creation of a userCtx object from a user authenticated remotly.
+% The client just pass specific headers to CouchDB and the handler create the userCtx.
+% Headers  name can be defined in local.ini. By thefault they are :
+%
+%   * X-Auth-CouchDB-UserName : contain the username, (x_auth_username in
+%   couch_httpd_auth section)
+%   * X-Auth-CouchDB-Roles : contain the user roles, list of roles separated by a
+%   comma (x_auth_roles in couch_httpd_auth section)
+%   * X-Auth-CouchDB-Token : token to authenticate the authorization (x_auth_token
+%   in couch_httpd_auth section). This token is an hmac-sha1 created from secret key
+%   and username. The secret key should be the same in the client and couchdb node. s
+%   ecret key is the secret key in couch_httpd_auth section of ini. This token is optional
+%   if value of proxy_use_secret key in couch_httpd_auth section of ini isn't true.
+%
+proxy_authentication_handler(Req) ->
+    case proxy_auth_user(Req) of
+        nil -> Req;
+        Req2 -> Req2
+    end.
+
+%% @deprecated
+proxy_authentification_handler(Req) ->
+    proxy_authentication_handler(Req).
+    
+proxy_auth_user(Req) ->
+    XHeaderUserName = couch_config:get("couch_httpd_auth", "x_auth_username",
+                                "X-Auth-CouchDB-UserName"),
+    XHeaderRoles = couch_config:get("couch_httpd_auth", "x_auth_roles",
+                                "X-Auth-CouchDB-Roles"),
+    XHeaderToken = couch_config:get("couch_httpd_auth", "x_auth_token",
+                                "X-Auth-CouchDB-Token"),
+    case header_value(Req, XHeaderUserName) of
+        undefined -> nil;
+        UserName ->
+            Roles = case header_value(Req, XHeaderRoles) of
+                undefined -> [];
+                Else ->
+                    [?l2b(R) || R <- string:tokens(Else, ",")]
+            end,
+            case couch_config:get("couch_httpd_auth", "proxy_use_secret", "false") of
+                "true" ->
+                    case couch_config:get("couch_httpd_auth", "secret", nil) of
+                        nil ->
+                            Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}};
+                        Secret ->
+                            ExpectedToken = couch_util:to_hex(crypto:sha_mac(Secret, UserName)),
+                            case header_value(Req, XHeaderToken) of
+                                Token when Token == ExpectedToken ->
+                                    Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName),
+                                                            roles=Roles}};
+                                _ -> nil
+                            end
+                    end;
+                _ ->
+                    Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}}
+            end
+    end.
+
+
+cookie_authentication_handler(#httpd{mochi_req=MochiReq}=Req) ->
+    case MochiReq:get_cookie_value("AuthSession") of
+    undefined -> Req;
+    [] -> Req;
+    Cookie ->
+        [User, TimeStr, HashStr] = try
+            AuthSession = couch_util:decodeBase64Url(Cookie),
+            [_A, _B, _Cs] = re:split(?b2l(AuthSession), ":",
+                                     [{return, list}, {parts, 3}])
+        catch
+            _:_Error ->
+                Reason = <<"Malformed AuthSession cookie. Please clear your cookies.">>,
+                throw({bad_request, Reason})
+        end,
+        % Verify expiry and hash
+        CurrentTime = make_cookie_time(),
+        case couch_config:get("couch_httpd_auth", "secret", nil) of
+        nil ->
+            ?LOG_DEBUG("cookie auth secret is not set",[]),
+            Req;
+        SecretStr ->
+            Secret = ?l2b(SecretStr),
+            case couch_auth_cache:get_user_creds(User) of
+            nil -> Req;
+            UserProps ->
+                UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<"">>),
+                FullSecret = <<Secret/binary, UserSalt/binary>>,
+                ExpectedHash = crypto:sha_mac(FullSecret, User ++ ":" ++ TimeStr),
+                Hash = ?l2b(HashStr),
+                Timeout = list_to_integer(
+                    couch_config:get("couch_httpd_auth", "timeout", "600")),
+                ?LOG_DEBUG("timeout ~p", [Timeout]),
+                case (catch erlang:list_to_integer(TimeStr, 16)) of
+                    TimeStamp when CurrentTime < TimeStamp + Timeout ->
+                        case couch_passwords:verify(ExpectedHash, Hash) of
+                            true ->
+                                TimeLeft = TimeStamp + Timeout - CurrentTime,
+                                ?LOG_DEBUG("Successful cookie auth as: ~p", [User]),
+                                Req#httpd{user_ctx=#user_ctx{
+                                    name=?l2b(User),
+                                    roles=couch_util:get_value(<<"roles">>, UserProps, [])
+                                }, auth={FullSecret, TimeLeft < Timeout*0.9}};
+                            _Else ->
+                                Req
+                        end;
+                    _Else ->
+                        Req
+                end
+            end
+        end
+    end.
+
+cookie_auth_header(#httpd{user_ctx=#user_ctx{name=null}}, _Headers) -> [];
+cookie_auth_header(#httpd{user_ctx=#user_ctx{name=User}, auth={Secret, true}}=Req, Headers) ->
+    % Note: we only set the AuthSession cookie if:
+    %  * a valid AuthSession cookie has been received
+    %  * we are outside a 10% timeout window
+    %  * and if an AuthSession cookie hasn't already been set e.g. by a login
+    %    or logout handler.
+    % The login and logout handlers need to set the AuthSession cookie
+    % themselves.
+    CookieHeader = couch_util:get_value("Set-Cookie", Headers, ""),
+    Cookies = mochiweb_cookies:parse_cookie(CookieHeader),
+    AuthSession = couch_util:get_value("AuthSession", Cookies),
+    if AuthSession == undefined ->
+        TimeStamp = make_cookie_time(),
+        [cookie_auth_cookie(Req, ?b2l(User), Secret, TimeStamp)];
+    true ->
+        []
+    end;
+cookie_auth_header(_Req, _Headers) -> [].
+
+cookie_auth_cookie(Req, User, Secret, TimeStamp) ->
+    SessionData = User ++ ":" ++ erlang:integer_to_list(TimeStamp, 16),
+    Hash = crypto:sha_mac(Secret, SessionData),
+    mochiweb_cookies:cookie("AuthSession",
+        couch_util:encodeBase64Url(SessionData ++ ":" ++ ?b2l(Hash)),
+        [{path, "/"}] ++ cookie_scheme(Req) ++ max_age()).
+
+ensure_cookie_auth_secret() ->
+    case couch_config:get("couch_httpd_auth", "secret", nil) of
+        nil ->
+            NewSecret = ?b2l(couch_uuids:random()),
+            couch_config:set("couch_httpd_auth", "secret", NewSecret),
+            NewSecret;
+        Secret -> Secret
+    end.
+
+% session handlers
+% Login handler with user db
+handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req) ->
+    ReqBody = MochiReq:recv_body(),
+    Form = case MochiReq:get_primary_header_value("content-type") of
+        % content type should be json
+        "application/x-www-form-urlencoded" ++ _ ->
+            mochiweb_util:parse_qs(ReqBody);
+        "application/json" ++ _ ->
+            {Pairs} = ?JSON_DECODE(ReqBody),
+            lists:map(fun({Key, Value}) ->
+              {?b2l(Key), ?b2l(Value)}
+            end, Pairs);
+        _ ->
+            []
+    end,
+    UserName = ?l2b(couch_util:get_value("name", Form, "")),
+    Password = ?l2b(couch_util:get_value("password", Form, "")),
+    ?LOG_DEBUG("Attempt Login: ~s",[UserName]),
+    User = case couch_auth_cache:get_user_creds(UserName) of
+        nil -> [];
+        Result -> Result
+    end,
+    UserSalt = couch_util:get_value(<<"salt">>, User, <<>>),
+    case authenticate(Password, User) of
+        true ->
+            % setup the session cookie
+            Secret = ?l2b(ensure_cookie_auth_secret()),
+            CurrentTime = make_cookie_time(),
+            Cookie = cookie_auth_cookie(Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime),
+            % TODO document the "next" feature in Futon
+            {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
+                nil ->
+                    {200, [Cookie]};
+                Redirect ->
+                    {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
+            end,
+            send_json(Req#httpd{req_body=ReqBody}, Code, Headers,
+                {[
+                    {ok, true},
+                    {name, couch_util:get_value(<<"name">>, User, null)},
+                    {roles, couch_util:get_value(<<"roles">>, User, [])}
+                ]});
+        _Else ->
+            % clear the session
+            Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}] ++ cookie_scheme(Req)),
+            {Code, Headers} = case couch_httpd:qs_value(Req, "fail", nil) of
+                nil ->
+                    {401, [Cookie]};
+                Redirect ->
+                    {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
+            end,
+            send_json(Req, Code, Headers, {[{error, <<"unauthorized">>},{reason, <<"Name or password is incorrect.">>}]})
+    end;
+% get user info
+% GET /_session
+handle_session_req(#httpd{method='GET', user_ctx=UserCtx}=Req) ->
+    Name = UserCtx#user_ctx.name,
+    ForceLogin = couch_httpd:qs_value(Req, "basic", "false"),
+    case {Name, ForceLogin} of
+        {null, "true"} ->
+            throw({unauthorized, <<"Please login.">>});
+        {Name, _} ->
+            send_json(Req, {[
+                % remove this ok
+                {ok, true},
+                {<<"userCtx">>, {[
+                    {name, Name},
+                    {roles, UserCtx#user_ctx.roles}
+                ]}},
+                {info, {[
+                    {authentication_db, ?l2b(couch_config:get("couch_httpd_auth", "authentication_db"))},
+                    {authentication_handlers, [auth_name(H) || H <- couch_httpd:make_fun_spec_strs(
+                            couch_config:get("httpd", "authentication_handlers"))]}
+                ] ++ maybe_value(authenticated, UserCtx#user_ctx.handler, fun(Handler) ->
+                        auth_name(?b2l(Handler))
+                    end)}}
+            ]})
+    end;
+% logout by deleting the session
+handle_session_req(#httpd{method='DELETE'}=Req) ->
+    Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}] ++ cookie_scheme(Req)),
+    {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
+        nil ->
+            {200, [Cookie]};
+        Redirect ->
+            {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
+    end,
+    send_json(Req, Code, Headers, {[{ok, true}]});
+handle_session_req(Req) ->
+    send_method_not_allowed(Req, "GET,HEAD,POST,DELETE").
+
+maybe_value(_Key, undefined, _Fun) -> [];
+maybe_value(Key, Else, Fun) ->
+    [{Key, Fun(Else)}].
+
+authenticate(Pass, UserProps) ->
+    UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<>>),
+    {PasswordHash, ExpectedHash} =
+        case couch_util:get_value(<<"password_scheme">>, UserProps, <<"simple">>) of
+        <<"simple">> ->
+            {couch_passwords:simple(Pass, UserSalt),
+            couch_util:get_value(<<"password_sha">>, UserProps, nil)};
+        <<"pbkdf2">> ->
+            Iterations = couch_util:get_value(<<"iterations">>, UserProps, 10000),
+            {couch_passwords:pbkdf2(Pass, UserSalt, Iterations),
+             couch_util:get_value(<<"derived_key">>, UserProps, nil)}
+    end,
+    couch_passwords:verify(PasswordHash, ExpectedHash).
+
+auth_name(String) when is_list(String) ->
+    [_,_,_,_,_,Name|_] = re:split(String, "[\\W_]", [{return, list}]),
+    ?l2b(Name).
+
+make_cookie_time() ->
+    {NowMS, NowS, _} = erlang:now(),
+    NowMS * 1000000 + NowS.
+
+cookie_scheme(#httpd{mochi_req=MochiReq}) ->
+    [{http_only, true}] ++
+    case MochiReq:get(scheme) of
+        http -> [];
+        https -> [{secure, true}]
+    end.
+
+max_age() ->
+    case couch_config:get("couch_httpd_auth", "allow_persistent_cookies", "false") of
+        "false" ->
+            [];
+        "true" ->
+            Timeout = list_to_integer(
+                couch_config:get("couch_httpd_auth", "timeout", "600")),
+            [{max_age, Timeout}]
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/src/couch_httpd_cors.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_cors.erl b/src/couch_httpd_cors.erl
new file mode 100644
index 0000000..d9462d1
--- /dev/null
+++ b/src/couch_httpd_cors.erl
@@ -0,0 +1,351 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%% @doc module to handle Cross-Origin Resource Sharing
+%%
+%% This module handles CORS requests and preflight request for
+%% CouchDB. The configuration is done in the ini file.
+%%
+%% This implements http://www.w3.org/TR/cors/
+
+
+-module(couch_httpd_cors).
+
+-include("couch_db.hrl").
+
+-export([is_preflight_request/1, cors_headers/2]).
+
+-define(SUPPORTED_HEADERS, "Accept, Accept-Language, Content-Type," ++
+        "Expires, Last-Modified, Pragma, Origin, Content-Length," ++
+        "If-Match, Destination, X-Requested-With, " ++
+        "X-Http-Method-Override, Content-Range").
+
+-define(SUPPORTED_METHODS, "GET, HEAD, POST, PUT, DELETE," ++
+        "TRACE, CONNECT, COPY, OPTIONS").
+
+% as defined in http://www.w3.org/TR/cors/#terminology
+-define(SIMPLE_HEADERS, ["Cache-Control", "Content-Language",
+        "Content-Type", "Expires", "Last-Modified", "Pragma"]).
+-define(ALLOWED_HEADERS, lists:sort(["Server", "Etag",
+        "Accept-Ranges" | ?SIMPLE_HEADERS])).
+-define(SIMPLE_CONTENT_TYPE_VALUES, ["application/x-www-form-urlencoded",
+        "multipart/form-data", "text/plain"]).
+
+% TODO: - pick a sane default
+-define(CORS_DEFAULT_MAX_AGE, 12345).
+
+%% is_preflight_request/1
+
+% http://www.w3.org/TR/cors/#resource-preflight-requests
+
+is_preflight_request(#httpd{method=Method}=Req) when Method /= 'OPTIONS' ->
+    Req;
+is_preflight_request(Req) ->
+    EnableCors = enable_cors(),
+    is_preflight_request(Req, EnableCors).
+
+is_preflight_request(Req, false) ->
+    Req;
+is_preflight_request(#httpd{mochi_req=MochiReq}=Req, true) ->
+    case preflight_request(MochiReq) of
+    {ok, PreflightHeaders} ->
+        send_preflight_response(Req, PreflightHeaders);
+    _ ->
+        Req
+    end.
+
+
+preflight_request(MochiReq) ->
+    Origin = MochiReq:get_header_value("Origin"),
+    preflight_request(MochiReq, Origin).
+
+preflight_request(MochiReq, undefined) ->
+    % If the Origin header is not present terminate this set of
+    % steps. The request is outside the scope of this specification.
+    % http://www.w3.org/TR/cors/#resource-preflight-requests
+    MochiReq;
+preflight_request(MochiReq, Origin) ->
+    Host = couch_httpd_vhost:host(MochiReq),
+    AcceptedOrigins = get_accepted_origins(Host),
+    AcceptAll = lists:member("*", AcceptedOrigins),
+
+    HandlerFun = fun() ->
+        OriginList = couch_util:to_list(Origin),
+        handle_preflight_request(OriginList, Host, MochiReq)
+    end,
+
+    case AcceptAll of
+    true ->
+        % Always matching is acceptable since the list of
+        % origins can be unbounded.
+        % http://www.w3.org/TR/cors/#resource-preflight-requests
+        HandlerFun();
+    false ->
+        case lists:member(Origin, AcceptedOrigins) of
+        % The Origin header can only contain a single origin as
+        % the user agent will not follow redirects.
+        % http://www.w3.org/TR/cors/#resource-preflight-requests
+        % TODO: Square against multi origin thinger in Security Considerations
+        true ->
+            HandlerFun();
+        false ->
+            % If the value of the Origin header is not a
+            % case-sensitive match for any of the values
+            % in list of origins do not set any additional
+            % headers and terminate this set of steps.
+            % http://www.w3.org/TR/cors/#resource-preflight-requests
+            false
+        end
+    end.
+
+
+handle_preflight_request(Origin, Host, MochiReq) ->
+    %% get supported methods
+    SupportedMethods = split_list(cors_config(Host, "methods",
+                                              ?SUPPORTED_METHODS)),
+
+    % get supported headers
+    AllSupportedHeaders = split_list(cors_config(Host, "headers",
+                                                 ?SUPPORTED_HEADERS)),
+
+    SupportedHeaders = [string:to_lower(H) || H <- AllSupportedHeaders],
+
+    % get max age
+    MaxAge = cors_config(Host, "max_age", ?CORS_DEFAULT_MAX_AGE),
+
+    PreflightHeaders0 = maybe_add_credentials(Origin, Host, [
+        {"Access-Control-Allow-Origin", Origin},
+        {"Access-Control-Max-Age", MaxAge},
+        {"Access-Control-Allow-Methods",
+            string:join(SupportedMethods, ", ")}]),
+
+    case MochiReq:get_header_value("Access-Control-Request-Method") of
+    undefined ->
+        % If there is no Access-Control-Request-Method header
+        % or if parsing failed, do not set any additional headers
+        % and terminate this set of steps. The request is outside
+        % the scope of this specification.
+        % http://www.w3.org/TR/cors/#resource-preflight-requests
+        {ok, PreflightHeaders0};
+    Method ->
+        case lists:member(Method, SupportedMethods) of
+        true ->
+            % method ok , check headers
+            AccessHeaders = MochiReq:get_header_value(
+                    "Access-Control-Request-Headers"),
+            {FinalReqHeaders, ReqHeaders} = case AccessHeaders of
+                undefined -> {"", []};
+                Headers ->
+                    % transform header list in something we
+                    % could check. make sure everything is a
+                    % list
+                    RH = [string:to_lower(H)
+                          || H <- split_headers(Headers)],
+                    {Headers, RH}
+            end,
+            % check if headers are supported
+            case ReqHeaders -- SupportedHeaders of
+            [] ->
+                PreflightHeaders = PreflightHeaders0 ++
+                                   [{"Access-Control-Allow-Headers",
+                                     FinalReqHeaders}],
+                {ok, PreflightHeaders};
+            _ ->
+                false
+            end;
+        false ->
+        % If method is not a case-sensitive match for any of
+        % the values in list of methods do not set any additional
+        % headers and terminate this set of steps.
+        % http://www.w3.org/TR/cors/#resource-preflight-requests
+            false
+        end
+    end.
+
+
+send_preflight_response(#httpd{mochi_req=MochiReq}=Req, Headers) ->
+    couch_httpd:log_request(Req, 204),
+    couch_stats_collector:increment({httpd_status_codes, 204}),
+    Headers1 = couch_httpd:http_1_0_keep_alive(MochiReq, Headers),
+    Headers2 = Headers1 ++ couch_httpd:server_header() ++
+               couch_httpd_auth:cookie_auth_header(Req, Headers1),
+    {ok, MochiReq:respond({204, Headers2, <<>>})}.
+
+
+% cors_headers/1
+
+cors_headers(MochiReq, RequestHeaders) ->
+    EnableCors = enable_cors(),
+    CorsHeaders = do_cors_headers(MochiReq, EnableCors),
+    maybe_apply_cors_headers(CorsHeaders, RequestHeaders).
+
+do_cors_headers(#httpd{mochi_req=MochiReq}, true) ->
+    Host = couch_httpd_vhost:host(MochiReq),
+    AcceptedOrigins = get_accepted_origins(Host),
+    case MochiReq:get_header_value("Origin") of
+    undefined ->
+        % If the Origin header is not present terminate
+        % this set of steps. The request is outside the scope
+        % of this specification.
+        % http://www.w3.org/TR/cors/#resource-processing-model
+        [];
+    Origin ->
+        handle_cors_headers(couch_util:to_list(Origin),
+                            Host, AcceptedOrigins)
+    end;
+do_cors_headers(_MochiReq, false) ->
+    [].
+
+maybe_apply_cors_headers([], RequestHeaders) ->
+    RequestHeaders;
+maybe_apply_cors_headers(CorsHeaders, RequestHeaders0) ->
+    % for each RequestHeader that isn't in SimpleHeaders,
+    % (or Content-Type with SIMPLE_CONTENT_TYPE_VALUES)
+    % append to Access-Control-Expose-Headers
+    % return: RequestHeaders ++ CorsHeaders ++ ACEH
+
+    RequestHeaders = [K || {K,_V} <- RequestHeaders0],
+    ExposedHeaders0 = reduce_headers(RequestHeaders, ?ALLOWED_HEADERS),
+
+    % here we may have not moved Content-Type into ExposedHeaders,
+    % now we need to check whether the Content-Type valus is
+    % in ?SIMPLE_CONTENT_TYPE_VALUES and if it isn’t add Content-
+    % Type to to ExposedHeaders
+    ContentType =  proplists:get_value("Content-Type", RequestHeaders0),
+    IncludeContentType = case ContentType of
+    undefined ->
+        false;
+    _ ->
+        ContentType_ = string:to_lower(ContentType),
+        lists:member(ContentType_, ?SIMPLE_CONTENT_TYPE_VALUES)
+    end,
+    ExposedHeaders = case IncludeContentType of
+    false ->
+        lists:umerge(ExposedHeaders0, ["Content-Type"]);
+    true ->
+        ExposedHeaders0
+    end,
+    CorsHeaders
+    ++ RequestHeaders0
+    ++ [{"Access-Control-Expose-Headers",
+            string:join(ExposedHeaders, ", ")}].
+
+
+reduce_headers(A, B) ->
+    reduce_headers0(A, B, []).
+
+reduce_headers0([], _B, Result) ->
+    lists:sort(Result);
+reduce_headers0([ElmA|RestA], B, Result) ->
+    R = case member_nocase(ElmA, B) of
+    false -> Result;
+    _Else -> [ElmA | Result]
+    end,
+    reduce_headers0(RestA, B, R).
+
+member_nocase(ElmA, List) ->
+    lists:any(fun(ElmB) ->
+        string:to_lower(ElmA) =:= string:to_lower(ElmB)
+    end, List).
+
+handle_cors_headers(_Origin, _Host, []) ->
+    [];
+handle_cors_headers(Origin, Host, AcceptedOrigins) ->
+    AcceptAll = lists:member("*", AcceptedOrigins),
+    case {AcceptAll, lists:member(Origin, AcceptedOrigins)} of
+    {true, _} ->
+        make_cors_header(Origin, Host);
+    {false, true}  ->
+        make_cors_header(Origin, Host);
+    _ ->
+        % If the value of the Origin header is not a
+        % case-sensitive match for any of the values
+        % in list of origins, do not set any additional
+        % headers and terminate this set of steps.
+        % http://www.w3.org/TR/cors/#resource-requests
+        []
+    end.
+
+
+make_cors_header(Origin, Host) ->
+    Headers = [{"Access-Control-Allow-Origin", Origin}],
+    maybe_add_credentials(Origin, Host, Headers).
+
+
+%% util
+
+maybe_add_credentials(Origin, Host, Headers) ->
+    maybe_add_credentials(Headers, allow_credentials(Origin, Host)).
+
+maybe_add_credentials(Headers, false) ->
+    Headers;
+maybe_add_credentials(Headers, true) ->
+    Headers ++ [{"Access-Control-Allow-Credentials", "true"}].
+
+
+allow_credentials("*", _Host) ->
+    false;
+allow_credentials(_Origin, Host) ->
+    Default = get_bool_config("cors", "credentials", false),
+    get_bool_config(cors_section(Host), "credentials", Default).
+
+
+
+cors_config(Host, Key, Default) ->
+    couch_config:get(cors_section(Host), Key,
+                     couch_config:get("cors", Key, Default)).
+
+cors_section(Host0) ->
+    {Host, _Port} = split_host_port(Host0),
+    "cors:" ++ Host.
+
+enable_cors() ->
+    get_bool_config("httpd", "enable_cors", false).
+
+get_bool_config(Section, Key, Default) ->
+    case couch_config:get(Section, Key) of
+    undefined ->
+        Default;
+    "true" ->
+        true;
+    "false" ->
+        false
+    end.
+
+get_accepted_origins(Host) ->
+    split_list(cors_config(Host, "origins", [])).
+
+split_list(S) ->
+    re:split(S, "\\s*,\\s*", [trim, {return, list}]).
+
+split_headers(H) ->
+    re:split(H, ",\\s*", [{return,list}, trim]).
+
+split_host_port(HostAsString) ->
+    % split at semicolon ":"
+    Split = string:rchr(HostAsString, $:),
+    split_host_port(HostAsString, Split).
+
+split_host_port(HostAsString, 0) ->
+    % no semicolon
+    {HostAsString, '*'};
+split_host_port(HostAsString, N) ->
+    HostPart = string:substr(HostAsString, 1, N-1),
+    % parse out port
+    % is there a nicer way?
+    case (catch erlang:list_to_integer(string:substr(HostAsString,
+                    N+1, length(HostAsString)))) of
+    {'EXIT', _} ->
+        {HostAsString, '*'};
+    Port ->
+        {HostPart, Port}
+    end.


[22/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
working build of couchjs

couchjs is now built correctly. It can be build against the libraries
already installed on the system or statically.

By default it set the CFLAGS to /usr/include/js and the LIBS to -lmozjs185.

These values can be changed by setting the environnment variables
JS_CFLAGS and JS_LIBS.


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/8c6a64d0
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/8c6a64d0
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/8c6a64d0

Branch: refs/heads/import-rcouch
Commit: 8c6a64d0e39631be0da809e5cb72743adcc08bdd
Parents: 75f30db
Author: benoitc <be...@apache.org>
Authored: Mon Jan 6 23:16:06 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:20 2014 -0600

----------------------------------------------------------------------
 c_src/couch_js/help.h |  82 ++++++
 c_src/couch_js/http.c | 696 ++++++++++++++++++++++++++++++++++++++++++++
 c_src/couch_js/http.h |  27 ++
 c_src/couch_js/main.c | 431 ++++++++++++++++++++++++++++
 c_src/couch_js/utf8.c | 288 +++++++++++++++++++
 c_src/couch_js/utf8.h |  19 ++
 c_src/couch_js/util.c | 288 +++++++++++++++++++
 c_src/couch_js/util.h |  35 +++
 priv/couch_js/help.h  |  82 ------
 priv/couch_js/http.c  | 698 ---------------------------------------------
 priv/couch_js/http.h  |  27 --
 priv/couch_js/main.c  |  21 --
 priv/couch_js/sm170.c | 398 --------------------------
 priv/couch_js/sm180.c | 407 --------------------------
 priv/couch_js/sm185.c | 431 ----------------------------
 priv/couch_js/utf8.c  | 294 -------------------
 priv/couch_js/utf8.h  |  19 --
 priv/couch_js/util.c  | 294 -------------------
 priv/couch_js/util.h  |  35 ---
 rebar.config.script   |  93 ++++++
 20 files changed, 1959 insertions(+), 2706 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/c_src/couch_js/help.h
----------------------------------------------------------------------
diff --git a/c_src/couch_js/help.h b/c_src/couch_js/help.h
new file mode 100644
index 0000000..f4ddb24
--- /dev/null
+++ b/c_src/couch_js/help.h
@@ -0,0 +1,82 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#ifndef COUCHJS_HELP_H
+#define COUCHJS_HELP_H
+
+#include "config.h"
+
+static const char VERSION_TEMPLATE[] =
+    "%s - %s\n"
+    "\n"
+    "Licensed under the Apache License, Version 2.0 (the \"License\"); you may "
+        "not use\n"
+    "this file except in compliance with the License. You may obtain a copy of"
+        "the\n"
+    "License at\n"
+    "\n"
+    "  http://www.apache.org/licenses/LICENSE-2.0\n"
+    "\n"
+    "Unless required by applicable law or agreed to in writing, software "
+        "distributed\n"
+    "under the License is distributed on an \"AS IS\" BASIS, WITHOUT "
+        "WARRANTIES OR\n"
+    "CONDITIONS OF ANY KIND, either express or implied. See the License "
+        "for the\n"
+    "specific language governing permissions and limitations under the "
+        "License.\n";
+
+static const char USAGE_TEMPLATE[] =
+    "Usage: %s [FILE]\n"
+    "\n"
+    "The %s command runs the %s JavaScript interpreter.\n"
+    "\n"
+    "The exit status is 0 for success or 1 for failure.\n"
+    "\n"
+    "Options:\n"
+    "\n"
+    "  -h          display a short help message and exit\n"
+    "  -V          display version information and exit\n"
+    "  -H          enable %s cURL bindings (only avaiable\n"
+    "              if package was built with cURL available)\n"
+    "  -S SIZE     specify that the runtime should allow at\n"
+    "              most SIZE bytes of memory to be allocated\n"
+    "  -u FILE     path to a .uri file containing the address\n"
+    "              (or addresses) of one or more servers\n"
+    "\n"
+    "Report bugs at <%s>.\n";
+
+#define BASENAME COUCHJS_NAME
+
+#define couch_version(basename)  \
+    fprintf(                     \
+            stdout,              \
+            VERSION_TEMPLATE,    \
+            basename,            \
+            PACKAGE_STRING)
+
+#define DISPLAY_VERSION couch_version(BASENAME)
+
+
+#define couch_usage(basename) \
+    fprintf(                                    \
+            stdout,                             \
+            USAGE_TEMPLATE,                     \
+            basename,                           \
+            basename,                           \
+            PACKAGE_NAME,                       \
+            basename,                           \
+            PACKAGE_BUGREPORT)
+
+#define DISPLAY_USAGE couch_usage(BASENAME)
+
+#endif // Included help.h

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/c_src/couch_js/http.c
----------------------------------------------------------------------
diff --git a/c_src/couch_js/http.c b/c_src/couch_js/http.c
new file mode 100644
index 0000000..3baa59d
--- /dev/null
+++ b/c_src/couch_js/http.c
@@ -0,0 +1,696 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <jsapi.h>
+#include "utf8.h"
+#include "util.h"
+
+// Soft dependency on cURL bindings because they're
+// only used when running the JS tests from the
+// command line which is rare.
+#ifdef WITHOUT_CURL
+
+void
+http_check_enabled()
+{
+    fprintf(stderr, "HTTP API was disabled at compile time.\n");
+    exit(3);
+}
+
+
+JSBool
+http_ctor(JSContext* cx, JSObject* req)
+{
+    return JS_FALSE;
+}
+
+
+JSBool
+http_dtor(JSContext* cx, JSObject* req)
+{
+    return JS_FALSE;
+}
+
+
+JSBool
+http_open(JSContext* cx, JSObject* req, jsval mth, jsval url, jsval snc)
+{
+    return JS_FALSE;
+}
+
+
+JSBool
+http_set_hdr(JSContext* cx, JSObject* req, jsval name, jsval val)
+{
+    return JS_FALSE;
+}
+
+
+JSBool
+http_send(JSContext* cx, JSObject* req, jsval body)
+{
+    return JS_FALSE;
+}
+
+
+int
+http_status(JSContext* cx, JSObject* req, jsval body)
+{
+    return -1;
+}
+
+JSBool
+http_uri(JSContext* cx, JSObject* req, couch_args* args, jsval* uri_val)
+{
+    return JS_FALSE;
+}
+
+
+#else
+#include <curl/curl.h>
+
+void
+http_check_enabled()
+{
+    return;
+}
+
+
+// Map some of the string function names to things which exist on Windows
+#ifdef XP_WIN
+#define strcasecmp _strcmpi
+#define strncasecmp _strnicmp
+#define snprintf _snprintf
+#endif
+
+
+typedef struct curl_slist CurlHeaders;
+
+
+typedef struct {
+    int             method;
+    char*           url;
+    CurlHeaders*    req_headers;
+    jsint           last_status;
+} HTTPData;
+
+
+char* METHODS[] = {"GET", "HEAD", "POST", "PUT", "DELETE", "COPY", "OPTIONS", NULL};
+
+
+#define GET     0
+#define HEAD    1
+#define POST    2
+#define PUT     3
+#define DELETE  4
+#define COPY    5
+#define OPTIONS 6
+
+
+static JSBool
+go(JSContext* cx, JSObject* obj, HTTPData* http, char* body, size_t blen);
+
+
+static JSString*
+str_from_binary(JSContext* cx, char* data, size_t length);
+
+
+JSBool
+http_ctor(JSContext* cx, JSObject* req)
+{
+    HTTPData* http = NULL;
+    JSBool ret = JS_FALSE;
+
+    http = (HTTPData*) malloc(sizeof(HTTPData));
+    if(!http)
+    {
+        JS_ReportError(cx, "Failed to create CouchHTTP instance.");
+        goto error;
+    }
+
+    http->method = -1;
+    http->url = NULL;
+    http->req_headers = NULL;
+    http->last_status = -1;
+
+    if(!JS_SetPrivate(cx, req, http))
+    {
+        JS_ReportError(cx, "Failed to set private CouchHTTP data.");
+        goto error;
+    }
+
+    ret = JS_TRUE;
+    goto success;
+
+error:
+    if(http) free(http);
+
+success:
+    return ret;
+}
+
+
+void
+http_dtor(JSContext* cx, JSObject* obj)
+{
+    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, obj);
+    if(http) {
+        if(http->url) free(http->url);
+        if(http->req_headers) curl_slist_free_all(http->req_headers);
+        free(http);
+    }
+}
+
+
+JSBool
+http_open(JSContext* cx, JSObject* req, jsval mth, jsval url, jsval snc)
+{
+    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req);
+    char* method = NULL;
+    int methid;
+    JSBool ret = JS_FALSE;
+
+    if(!http) {
+        JS_ReportError(cx, "Invalid CouchHTTP instance.");
+        goto done;
+    }
+
+    if(JSVAL_IS_VOID(mth)) {
+        JS_ReportError(cx, "You must specify a method.");
+        goto done;
+    }
+
+    method = enc_string(cx, mth, NULL);
+    if(!method) {
+        JS_ReportError(cx, "Failed to encode method.");
+        goto done;
+    }
+
+    for(methid = 0; METHODS[methid] != NULL; methid++) {
+        if(strcasecmp(METHODS[methid], method) == 0) break;
+    }
+
+    if(methid > OPTIONS) {
+        JS_ReportError(cx, "Invalid method specified.");
+        goto done;
+    }
+
+    http->method = methid;
+
+    if(JSVAL_IS_VOID(url)) {
+        JS_ReportError(cx, "You must specify a URL.");
+        goto done;
+    }
+
+    if(http->url != NULL) {
+        free(http->url);
+        http->url = NULL;
+    }
+
+    http->url = enc_string(cx, url, NULL);
+    if(http->url == NULL) {
+        JS_ReportError(cx, "Failed to encode URL.");
+        goto done;
+    }
+
+    if(JSVAL_IS_BOOLEAN(snc) && JSVAL_TO_BOOLEAN(snc)) {
+        JS_ReportError(cx, "Synchronous flag must be false.");
+        goto done;
+    }
+
+    if(http->req_headers) {
+        curl_slist_free_all(http->req_headers);
+        http->req_headers = NULL;
+    }
+
+    // Disable Expect: 100-continue
+    http->req_headers = curl_slist_append(http->req_headers, "Expect:");
+
+    ret = JS_TRUE;
+
+done:
+    if(method) free(method);
+    return ret;
+}
+
+
+JSBool
+http_set_hdr(JSContext* cx, JSObject* req, jsval name, jsval val)
+{
+    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req);
+    char* keystr = NULL;
+    char* valstr = NULL;
+    char* hdrbuf = NULL;
+    size_t hdrlen = -1;
+    JSBool ret = JS_FALSE;
+
+    if(!http) {
+        JS_ReportError(cx, "Invalid CouchHTTP instance.");
+        goto done;
+    }
+
+    if(JSVAL_IS_VOID(name))
+    {
+        JS_ReportError(cx, "You must speciy a header name.");
+        goto done;
+    }
+
+    keystr = enc_string(cx, name, NULL);
+    if(!keystr)
+    {
+        JS_ReportError(cx, "Failed to encode header name.");
+        goto done;
+    }
+
+    if(JSVAL_IS_VOID(val))
+    {
+        JS_ReportError(cx, "You must specify a header value.");
+        goto done;
+    }
+
+    valstr = enc_string(cx, val, NULL);
+    if(!valstr)
+    {
+        JS_ReportError(cx, "Failed to encode header value.");
+        goto done;
+    }
+
+    hdrlen = strlen(keystr) + strlen(valstr) + 3;
+    hdrbuf = (char*) malloc(hdrlen * sizeof(char));
+    if(!hdrbuf) {
+        JS_ReportError(cx, "Failed to allocate header buffer.");
+        goto done;
+    }
+
+    snprintf(hdrbuf, hdrlen, "%s: %s", keystr, valstr);
+    http->req_headers = curl_slist_append(http->req_headers, hdrbuf);
+
+    ret = JS_TRUE;
+
+done:
+    if(keystr) free(keystr);
+    if(valstr) free(valstr);
+    if(hdrbuf) free(hdrbuf);
+    return ret;
+}
+
+JSBool
+http_send(JSContext* cx, JSObject* req, jsval body)
+{
+    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req);
+    char* bodystr = NULL;
+    size_t bodylen = 0;
+    JSBool ret = JS_FALSE;
+
+    if(!http) {
+        JS_ReportError(cx, "Invalid CouchHTTP instance.");
+        goto done;
+    }
+
+    if(!JSVAL_IS_VOID(body)) {
+        bodystr = enc_string(cx, body, &bodylen);
+        if(!bodystr) {
+            JS_ReportError(cx, "Failed to encode body.");
+            goto done;
+        }
+    }
+
+    ret = go(cx, req, http, bodystr, bodylen);
+
+done:
+    if(bodystr) free(bodystr);
+    return ret;
+}
+
+int
+http_status(JSContext* cx, JSObject* req)
+{
+    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req);
+
+    if(!http) {
+        JS_ReportError(cx, "Invalid CouchHTTP instance.");
+        return JS_FALSE;
+    }
+
+    return http->last_status;
+}
+
+JSBool
+http_uri(JSContext* cx, JSObject* req, couch_args* args, jsval* uri_val)
+{
+    FILE* uri_fp = NULL;
+    JSString* uri_str;
+
+    // Default is http://localhost:5984/ when no uri file is specified
+    if (!args->uri_file) {
+        uri_str = JS_InternString(cx, "http://localhost:5984/");
+        *uri_val = STRING_TO_JSVAL(uri_str);
+        return JS_TRUE;
+    }
+
+    // Else check to see if the base url is cached in a reserved slot
+    if (JS_GetReservedSlot(cx, req, 0, uri_val) && !JSVAL_IS_VOID(*uri_val)) {
+        return JS_TRUE;
+    }
+
+    // Read the first line of the couch.uri file.
+    if(!((uri_fp = fopen(args->uri_file, "r")) &&
+         (uri_str = couch_readline(cx, uri_fp)))) {
+        JS_ReportError(cx, "Failed to read couch.uri file.");
+        goto error;
+    }
+
+    fclose(uri_fp);
+    *uri_val = STRING_TO_JSVAL(uri_str);
+    JS_SetReservedSlot(cx, req, 0, *uri_val);
+    return JS_TRUE;
+
+error:
+    if(uri_fp) fclose(uri_fp);
+    return JS_FALSE;
+}
+
+
+// Curl Helpers
+
+typedef struct {
+    HTTPData*   http;
+    JSContext*  cx;
+    JSObject*   resp_headers;
+    char*       sendbuf;
+    size_t      sendlen;
+    size_t      sent;
+    int         sent_once;
+    char*       recvbuf;
+    size_t      recvlen;
+    size_t      read;
+} CurlState;
+
+/*
+ * I really hate doing this but this doesn't have to be
+ * uber awesome, it just has to work.
+ */
+CURL*       HTTP_HANDLE = NULL;
+char        ERRBUF[CURL_ERROR_SIZE];
+
+static size_t send_body(void *ptr, size_t size, size_t nmem, void *data);
+static int seek_body(void *ptr, curl_off_t offset, int origin);
+static size_t recv_body(void *ptr, size_t size, size_t nmem, void *data);
+static size_t recv_header(void *ptr, size_t size, size_t nmem, void *data);
+
+static JSBool
+go(JSContext* cx, JSObject* obj, HTTPData* http, char* body, size_t bodylen)
+{
+    CurlState state;
+    char* referer;
+    JSString* jsbody;
+    JSBool ret = JS_FALSE;
+    jsval tmp;
+
+    state.cx = cx;
+    state.http = http;
+
+    state.sendbuf = body;
+    state.sendlen = bodylen;
+    state.sent = 0;
+    state.sent_once = 0;
+
+    state.recvbuf = NULL;
+    state.recvlen = 0;
+    state.read = 0;
+
+    if(HTTP_HANDLE == NULL) {
+        HTTP_HANDLE = curl_easy_init();
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_READFUNCTION, send_body);
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_SEEKFUNCTION,
+                                        (curl_seek_callback) seek_body);
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_HEADERFUNCTION, recv_header);
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_WRITEFUNCTION, recv_body);
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_NOPROGRESS, 1);
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V4);
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_ERRORBUFFER, ERRBUF);
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_COOKIEFILE, "");
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_USERAGENT,
+                                            "CouchHTTP Client - Relax");
+    }
+
+    if(!HTTP_HANDLE) {
+        JS_ReportError(cx, "Failed to initialize cURL handle.");
+        goto done;
+    }
+
+    if(!JS_GetReservedSlot(cx, obj, 0, &tmp)) {
+      JS_ReportError(cx, "Failed to readreserved slot.");
+      goto done;
+    }
+
+    if(!(referer = enc_string(cx, tmp, NULL))) {
+      JS_ReportError(cx, "Failed to encode referer.");
+      goto done;
+    }
+    curl_easy_setopt(HTTP_HANDLE, CURLOPT_REFERER, referer);
+    free(referer);
+
+    if(http->method < 0 || http->method > OPTIONS) {
+        JS_ReportError(cx, "INTERNAL: Unknown method.");
+        goto done;
+    }
+
+    curl_easy_setopt(HTTP_HANDLE, CURLOPT_CUSTOMREQUEST, METHODS[http->method]);
+    curl_easy_setopt(HTTP_HANDLE, CURLOPT_NOBODY, 0);
+    curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 1);
+    curl_easy_setopt(HTTP_HANDLE, CURLOPT_UPLOAD, 0);
+
+    if(http->method == HEAD) {
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_NOBODY, 1);
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 0);
+    } else if(http->method == POST || http->method == PUT) {
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_UPLOAD, 1);
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 0);
+    }
+
+    if(body && bodylen) {
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_INFILESIZE, bodylen);
+    } else {
+        curl_easy_setopt(HTTP_HANDLE, CURLOPT_INFILESIZE, 0);
+    }
+
+    // curl_easy_setopt(HTTP_HANDLE, CURLOPT_VERBOSE, 1);
+
+    curl_easy_setopt(HTTP_HANDLE, CURLOPT_URL, http->url);
+    curl_easy_setopt(HTTP_HANDLE, CURLOPT_HTTPHEADER, http->req_headers);
+    curl_easy_setopt(HTTP_HANDLE, CURLOPT_READDATA, &state);
+    curl_easy_setopt(HTTP_HANDLE, CURLOPT_SEEKDATA, &state);
+    curl_easy_setopt(HTTP_HANDLE, CURLOPT_WRITEHEADER, &state);
+    curl_easy_setopt(HTTP_HANDLE, CURLOPT_WRITEDATA, &state);
+
+    if(curl_easy_perform(HTTP_HANDLE) != 0) {
+        JS_ReportError(cx, "Failed to execute HTTP request: %s", ERRBUF);
+        goto done;
+    }
+
+    if(!state.resp_headers) {
+        JS_ReportError(cx, "Failed to recieve HTTP headers.");
+        goto done;
+    }
+
+    tmp = OBJECT_TO_JSVAL(state.resp_headers);
+    if(!JS_DefineProperty(
+        cx, obj,
+        "_headers",
+        tmp,
+        NULL, NULL,
+        JSPROP_READONLY
+    )) {
+        JS_ReportError(cx, "INTERNAL: Failed to set response headers.");
+        goto done;
+    }
+
+    if(state.recvbuf) {
+        state.recvbuf[state.read] = '\0';
+        jsbody = dec_string(cx, state.recvbuf, state.read+1);
+        if(!jsbody) {
+            // If we can't decode the body as UTF-8 we forcefully
+            // convert it to a string by just forcing each byte
+            // to a jschar.
+            jsbody = str_from_binary(cx, state.recvbuf, state.read);
+            if(!jsbody) {
+                if(!JS_IsExceptionPending(cx)) {
+                    JS_ReportError(cx, "INTERNAL: Failed to decode body.");
+                }
+                goto done;
+            }
+        }
+        tmp = STRING_TO_JSVAL(jsbody);
+    } else {
+        tmp = JS_GetEmptyStringValue(cx);
+    }
+
+    if(!JS_DefineProperty(
+        cx, obj,
+        "responseText",
+        tmp,
+        NULL, NULL,
+        JSPROP_READONLY
+    )) {
+        JS_ReportError(cx, "INTERNAL: Failed to set responseText.");
+        goto done;
+    }
+
+    ret = JS_TRUE;
+
+done:
+    if(state.recvbuf) JS_free(cx, state.recvbuf);
+    return ret;
+}
+
+static size_t
+send_body(void *ptr, size_t size, size_t nmem, void *data)
+{
+    CurlState* state = (CurlState*) data;
+    size_t length = size * nmem;
+    size_t towrite = state->sendlen - state->sent;
+
+    // Assume this is cURL trying to resend a request that
+    // failed.
+    if(towrite == 0 && state->sent_once == 0) {
+        state->sent_once = 1;
+        return 0;
+    } else if(towrite == 0) {
+        state->sent = 0;
+        state->sent_once = 0;
+        towrite = state->sendlen;
+    }
+
+    if(length < towrite) towrite = length;
+
+    memcpy(ptr, state->sendbuf + state->sent, towrite);
+    state->sent += towrite;
+
+    return towrite;
+}
+
+static int
+seek_body(void* ptr, curl_off_t offset, int origin)
+{
+    CurlState* state = (CurlState*) ptr;
+    if(origin != SEEK_SET) return -1;
+
+    state->sent = (size_t) offset;
+    return (int) state->sent;
+}
+
+static size_t
+recv_header(void *ptr, size_t size, size_t nmem, void *data)
+{
+    CurlState* state = (CurlState*) data;
+    char code[4];
+    char* header = (char*) ptr;
+    size_t length = size * nmem;
+    JSString* hdr = NULL;
+    jsuint hdrlen;
+    jsval hdrval;
+
+    if(length > 7 && strncasecmp(header, "HTTP/1.", 7) == 0) {
+        if(length < 12) {
+            return CURLE_WRITE_ERROR;
+        }
+
+        memcpy(code, header+9, 3*sizeof(char));
+        code[3] = '\0';
+        state->http->last_status = atoi(code);
+
+        state->resp_headers = JS_NewArrayObject(state->cx, 0, NULL);
+        if(!state->resp_headers) {
+            return CURLE_WRITE_ERROR;
+        }
+
+        return length;
+    }
+
+    // We get a notice at the \r\n\r\n after headers.
+    if(length <= 2) {
+        return length;
+    }
+
+    // Append the new header to our array.
+    hdr = dec_string(state->cx, header, length);
+    if(!hdr) {
+        return CURLE_WRITE_ERROR;
+    }
+
+    if(!JS_GetArrayLength(state->cx, state->resp_headers, &hdrlen)) {
+        return CURLE_WRITE_ERROR;
+    }
+
+    hdrval = STRING_TO_JSVAL(hdr);
+    if(!JS_SetElement(state->cx, state->resp_headers, hdrlen, &hdrval)) {
+        return CURLE_WRITE_ERROR;
+    }
+
+    return length;
+}
+
+static size_t
+recv_body(void *ptr, size_t size, size_t nmem, void *data)
+{
+    CurlState* state = (CurlState*) data;
+    size_t length = size * nmem;
+    char* tmp = NULL;
+
+    if(!state->recvbuf) {
+        state->recvlen = 4096;
+        state->read = 0;
+        state->recvbuf = JS_malloc(state->cx, state->recvlen);
+    }
+
+    if(!state->recvbuf) {
+        return CURLE_WRITE_ERROR;
+    }
+
+    // +1 so we can add '\0' back up in the go function.
+    while(length+1 > state->recvlen - state->read) state->recvlen *= 2;
+    tmp = JS_realloc(state->cx, state->recvbuf, state->recvlen);
+    if(!tmp) return CURLE_WRITE_ERROR;
+    state->recvbuf = tmp;
+
+    memcpy(state->recvbuf + state->read, ptr, length);
+    state->read += length;
+    return length;
+}
+
+JSString*
+str_from_binary(JSContext* cx, char* data, size_t length)
+{
+    jschar* conv = (jschar*) JS_malloc(cx, length * sizeof(jschar));
+    JSString* ret = NULL;
+    size_t i;
+
+    if(!conv) return NULL;
+
+    for(i = 0; i < length; i++) {
+        conv[i] = (jschar) data[i];
+    }
+
+    ret = JS_NewUCString(cx, conv, length);
+    if(!ret) JS_free(cx, conv);
+
+    return ret;
+}
+
+#endif /* HAVE_CURL */

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/c_src/couch_js/http.h
----------------------------------------------------------------------
diff --git a/c_src/couch_js/http.h b/c_src/couch_js/http.h
new file mode 100644
index 0000000..63d45bd
--- /dev/null
+++ b/c_src/couch_js/http.h
@@ -0,0 +1,27 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#ifndef COUCH_JS_HTTP_H
+#define COUCH_JS_HTTP_H
+
+#include "util.h"
+
+void http_check_enabled();
+JSBool http_ctor(JSContext* cx, JSObject* req);
+void http_dtor(JSContext* cx, JSObject* req);
+JSBool http_open(JSContext* cx, JSObject* req, jsval mth, jsval url, jsval snc);
+JSBool http_set_hdr(JSContext* cx, JSObject* req, jsval name, jsval val);
+JSBool http_send(JSContext* cx, JSObject* req, jsval body);
+int http_status(JSContext* cx, JSObject* req);
+JSBool http_uri(JSContext* cx, JSObject *req, couch_args* args, jsval* uri);
+
+#endif

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/c_src/couch_js/main.c
----------------------------------------------------------------------
diff --git a/c_src/couch_js/main.c b/c_src/couch_js/main.c
new file mode 100644
index 0000000..a0fc143
--- /dev/null
+++ b/c_src/couch_js/main.c
@@ -0,0 +1,431 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <jsapi.h>
+#include "http.h"
+#include "utf8.h"
+#include "util.h"
+
+
+#define SETUP_REQUEST(cx) \
+    JS_SetContextThread(cx); \
+    JS_BeginRequest(cx);
+#define FINISH_REQUEST(cx) \
+    JS_EndRequest(cx); \
+    JS_ClearContextThread(cx);
+
+
+static JSClass global_class = {
+    "GlobalClass",
+    JSCLASS_GLOBAL_FLAGS,
+    JS_PropertyStub,
+    JS_PropertyStub,
+    JS_PropertyStub,
+    JS_StrictPropertyStub,
+    JS_EnumerateStub,
+    JS_ResolveStub,
+    JS_ConvertStub,
+    JS_FinalizeStub,
+    JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+
+static JSBool
+req_ctor(JSContext* cx, uintN argc, jsval* vp)
+{
+    JSBool ret;
+    JSObject* obj = JS_NewObjectForConstructor(cx, vp);
+    if(!obj) {
+        JS_ReportError(cx, "Failed to create CouchHTTP instance.\n");
+        return JS_FALSE;
+    }
+    ret = http_ctor(cx, obj);
+    JS_SET_RVAL(cx, vp, OBJECT_TO_JSVAL(obj));
+    return ret;
+}
+
+
+static void
+req_dtor(JSContext* cx, JSObject* obj)
+{
+    http_dtor(cx, obj);
+}
+
+
+static JSBool
+req_open(JSContext* cx, uintN argc, jsval* vp)
+{
+    JSObject* obj = JS_THIS_OBJECT(cx, vp);
+    jsval* argv = JS_ARGV(cx, vp);
+    JSBool ret = JS_FALSE;
+
+    if(argc == 2) {
+        ret = http_open(cx, obj, argv[0], argv[1], JSVAL_FALSE);
+    } else if(argc == 3) {
+        ret = http_open(cx, obj, argv[0], argv[1], argv[2]);
+    } else {
+        JS_ReportError(cx, "Invalid call to CouchHTTP.open");
+    }
+
+    JS_SET_RVAL(cx, vp, JSVAL_VOID);
+    return ret;
+}
+
+
+static JSBool
+req_set_hdr(JSContext* cx, uintN argc, jsval* vp)
+{
+    JSObject* obj = JS_THIS_OBJECT(cx, vp);
+    jsval* argv = JS_ARGV(cx, vp);
+    JSBool ret = JS_FALSE;
+
+    if(argc == 2) {
+        ret = http_set_hdr(cx, obj, argv[0], argv[1]);
+    } else {
+        JS_ReportError(cx, "Invalid call to CouchHTTP.set_header");
+    }
+
+    JS_SET_RVAL(cx, vp, JSVAL_VOID);
+    return ret;
+}
+
+
+static JSBool
+req_send(JSContext* cx, uintN argc, jsval* vp)
+{
+    JSObject* obj = JS_THIS_OBJECT(cx, vp);
+    jsval* argv = JS_ARGV(cx, vp);
+    JSBool ret = JS_FALSE;
+
+    if(argc == 1) {
+        ret = http_send(cx, obj, argv[0]);
+    } else {
+        JS_ReportError(cx, "Invalid call to CouchHTTP.send");
+    }
+
+    JS_SET_RVAL(cx, vp, JSVAL_VOID);
+    return ret;
+}
+
+
+static JSBool
+req_status(JSContext* cx, JSObject* obj, jsid pid, jsval* vp)
+{
+    int status = http_status(cx, obj);
+    if(status < 0)
+        return JS_FALSE;
+
+    JS_SET_RVAL(cx, vp, INT_TO_JSVAL(status));
+    return JS_TRUE;
+}
+
+
+static JSBool
+base_url(JSContext *cx, JSObject* obj, jsid pid, jsval* vp)
+{
+    couch_args *args = (couch_args*)JS_GetContextPrivate(cx);
+    return http_uri(cx, obj, args, &JS_RVAL(cx, vp));
+}
+
+
+static JSBool
+evalcx(JSContext *cx, uintN argc, jsval* vp)
+{
+    jsval* argv = JS_ARGV(cx, vp);
+    JSString* str;
+    JSObject* sandbox;
+    JSObject* global;
+    JSContext* subcx;
+    JSCrossCompartmentCall* call = NULL;
+    const jschar* src;
+    size_t srclen;
+    jsval rval;
+    JSBool ret = JS_FALSE;
+    char *name = NULL;
+
+    sandbox = NULL;
+    if(!JS_ConvertArguments(cx, argc, argv, "S / o", &str, &sandbox)) {
+        return JS_FALSE;
+    }
+
+    subcx = JS_NewContext(JS_GetRuntime(cx), 8L * 1024L);
+    if(!subcx) {
+        JS_ReportOutOfMemory(cx);
+        return JS_FALSE;
+    }
+
+    SETUP_REQUEST(subcx);
+
+    src = JS_GetStringCharsAndLength(cx, str, &srclen);
+
+    // Re-use the compartment associated with the main context,
+    // rather than creating a new compartment */
+    global = JS_GetGlobalObject(cx);
+    if(global == NULL) goto done;
+    call = JS_EnterCrossCompartmentCall(subcx, global);
+
+    if(!sandbox) {
+        sandbox = JS_NewGlobalObject(subcx, &global_class);
+        if(!sandbox || !JS_InitStandardClasses(subcx, sandbox)) {
+            goto done;
+        }
+    }
+
+    if(argc > 2) {
+        name = enc_string(cx, argv[2], NULL);
+    }
+
+    if(srclen == 0) {
+        JS_SET_RVAL(cx, vp, OBJECT_TO_JSVAL(sandbox));
+    } else {
+        JS_EvaluateUCScript(subcx, sandbox, src, srclen, name, 1, &rval);
+        JS_SET_RVAL(cx, vp, rval);
+    }
+
+    ret = JS_TRUE;
+
+done:
+    if(name) JS_free(cx, name);
+    JS_LeaveCrossCompartmentCall(call);
+    FINISH_REQUEST(subcx);
+    JS_DestroyContext(subcx);
+    return ret;
+}
+
+
+static JSBool
+gc(JSContext* cx, uintN argc, jsval* vp)
+{
+    JS_GC(cx);
+    JS_SET_RVAL(cx, vp, JSVAL_VOID);
+    return JS_TRUE;
+}
+
+
+static JSBool
+print(JSContext* cx, uintN argc, jsval* vp)
+{
+    jsval* argv = JS_ARGV(cx, vp);
+    couch_print(cx, argc, argv);
+    JS_SET_RVAL(cx, vp, JSVAL_VOID);
+    return JS_TRUE;
+}
+
+
+static JSBool
+quit(JSContext* cx, uintN argc, jsval* vp)
+{
+    jsval* argv = JS_ARGV(cx, vp);
+    int exit_code = 0;
+    JS_ConvertArguments(cx, argc, argv, "/i", &exit_code);
+    exit(exit_code);
+}
+
+
+static JSBool
+readline(JSContext* cx, uintN argc, jsval* vp)
+{
+    JSString* line;
+
+    /* GC Occasionally */
+    JS_MaybeGC(cx);
+
+    line = couch_readline(cx, stdin);
+    if(line == NULL) return JS_FALSE;
+
+    JS_SET_RVAL(cx, vp, STRING_TO_JSVAL(line));
+    return JS_TRUE;
+}
+
+
+static JSBool
+seal(JSContext* cx, uintN argc, jsval* vp)
+{
+    jsval* argv = JS_ARGV(cx, vp);
+    JSObject *target;
+    JSBool deep = JS_FALSE;
+    JSBool ret;
+
+    if(!JS_ConvertArguments(cx, argc, argv, "o/b", &target, &deep))
+        return JS_FALSE;
+
+    if(!target) {
+        JS_SET_RVAL(cx, vp, JSVAL_VOID);
+        return JS_TRUE;
+    }
+
+
+    ret = deep ? JS_DeepFreezeObject(cx, target) : JS_FreezeObject(cx, target);
+    JS_SET_RVAL(cx, vp, JSVAL_VOID);
+    return ret;
+}
+
+
+JSClass CouchHTTPClass = {
+    "CouchHTTP",
+    JSCLASS_HAS_PRIVATE
+        | JSCLASS_CONSTRUCT_PROTOTYPE
+        | JSCLASS_HAS_RESERVED_SLOTS(2),
+    JS_PropertyStub,
+    JS_PropertyStub,
+    JS_PropertyStub,
+    JS_StrictPropertyStub,
+    JS_EnumerateStub,
+    JS_ResolveStub,
+    JS_ConvertStub,
+    req_dtor,
+    JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+
+JSPropertySpec CouchHTTPProperties[] = {
+    {"status", 0, JSPROP_READONLY, req_status, NULL},
+    {"base_url", 0, JSPROP_READONLY | JSPROP_SHARED, base_url, NULL},
+    {0, 0, 0, 0, 0}
+};
+
+
+JSFunctionSpec CouchHTTPFunctions[] = {
+    JS_FS("_open", req_open, 3, 0),
+    JS_FS("_setRequestHeader", req_set_hdr, 2, 0),
+    JS_FS("_send", req_send, 1, 0),
+    JS_FS_END
+};
+
+
+static JSFunctionSpec global_functions[] = {
+    JS_FS("evalcx", evalcx, 0, 0),
+    JS_FS("gc", gc, 0, 0),
+    JS_FS("print", print, 0, 0),
+    JS_FS("quit", quit, 0, 0),
+    JS_FS("readline", readline, 0, 0),
+    JS_FS("seal", seal, 0, 0),
+    JS_FS_END
+};
+
+
+int
+main(int argc, const char* argv[])
+{
+    JSRuntime* rt = NULL;
+    JSContext* cx = NULL;
+    JSObject* global = NULL;
+    JSCrossCompartmentCall *call = NULL;
+    JSObject* klass = NULL;
+    JSObject* script;
+    JSString* scriptsrc;
+    const jschar* schars;
+    size_t slen;
+    jsval sroot;
+    jsval result;
+    int i;
+
+    couch_args* args = couch_parse_args(argc, argv);
+
+    rt = JS_NewRuntime(args->stack_size);
+    if(rt == NULL)
+        return 1;
+
+    cx = JS_NewContext(rt, 8L * 1024L);
+    if(cx == NULL)
+        return 1;
+
+    JS_SetErrorReporter(cx, couch_error);
+    JS_ToggleOptions(cx, JSOPTION_XML);
+    JS_SetOptions(cx, JSOPTION_METHODJIT);
+#ifdef JSOPTION_TYPE_INFERENCE
+    JS_SetOptions(cx, JSOPTION_TYPE_INFERENCE);
+#endif
+    JS_SetContextPrivate(cx, args);
+
+    SETUP_REQUEST(cx);
+
+    global = JS_NewCompartmentAndGlobalObject(cx, &global_class, NULL);
+    if(global == NULL)
+        return 1;
+
+    call = JS_EnterCrossCompartmentCall(cx, global);
+
+    JS_SetGlobalObject(cx, global);
+
+    if(!JS_InitStandardClasses(cx, global))
+        return 1;
+
+    if(couch_load_funcs(cx, global, global_functions) != JS_TRUE)
+        return 1;
+
+    if(args->use_http) {
+        http_check_enabled();
+
+        klass = JS_InitClass(
+            cx, global,
+            NULL,
+            &CouchHTTPClass, req_ctor,
+            0,
+            CouchHTTPProperties, CouchHTTPFunctions,
+            NULL, NULL
+        );
+
+        if(!klass)
+        {
+            fprintf(stderr, "Failed to initialize CouchHTTP class.\n");
+            exit(2);
+        }
+    }
+
+    for(i = 0 ; args->scripts[i] ; i++) {
+        // Convert script source to jschars.
+        scriptsrc = couch_readfile(cx, args->scripts[i]);
+        if(!scriptsrc)
+            return 1;
+
+        schars = JS_GetStringCharsAndLength(cx, scriptsrc, &slen);
+
+        // Root it so GC doesn't collect it.
+        sroot = STRING_TO_JSVAL(scriptsrc);
+        if(JS_AddValueRoot(cx, &sroot) != JS_TRUE) {
+            fprintf(stderr, "Internal root error.\n");
+            return 1;
+        }
+
+        // Compile and run
+        script = JS_CompileUCScript(cx, global, schars, slen,
+                                    args->scripts[i], 1);
+        if(!script) {
+            fprintf(stderr, "Failed to compile script.\n");
+            return 1;
+        }
+
+        if(JS_ExecuteScript(cx, global, script, &result) != JS_TRUE) {
+            fprintf(stderr, "Failed to execute script.\n");
+            return 1;
+        }
+
+        // Warning message if we don't remove it.
+        JS_RemoveValueRoot(cx, &sroot);
+
+        // Give the GC a chance to run.
+        JS_MaybeGC(cx);
+    }
+
+    JS_LeaveCrossCompartmentCall(call);
+    FINISH_REQUEST(cx);
+    JS_DestroyContext(cx);
+    JS_DestroyRuntime(rt);
+    JS_ShutDown();
+
+    return 0;
+}

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/c_src/couch_js/utf8.c
----------------------------------------------------------------------
diff --git a/c_src/couch_js/utf8.c b/c_src/couch_js/utf8.c
new file mode 100644
index 0000000..2b3735a
--- /dev/null
+++ b/c_src/couch_js/utf8.c
@@ -0,0 +1,288 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <jsapi.h>
+
+static int
+enc_char(uint8 *utf8Buffer, uint32 ucs4Char)
+{
+    int utf8Length = 1;
+
+    if (ucs4Char < 0x80)
+    {
+        *utf8Buffer = (uint8)ucs4Char;
+    }
+    else
+    {
+        int i;
+        uint32 a = ucs4Char >> 11;
+        utf8Length = 2;
+        while(a)
+        {
+            a >>= 5;
+            utf8Length++;
+        }
+        i = utf8Length;
+        while(--i)
+        {
+            utf8Buffer[i] = (uint8)((ucs4Char & 0x3F) | 0x80);
+            ucs4Char >>= 6;
+        }
+        *utf8Buffer = (uint8)(0x100 - (1 << (8-utf8Length)) + ucs4Char);
+    }
+
+    return utf8Length;
+}
+
+static JSBool
+enc_charbuf(const jschar* src, size_t srclen, char* dst, size_t* dstlenp)
+{
+    size_t i;
+    size_t utf8Len;
+    size_t dstlen = *dstlenp;
+    size_t origDstlen = dstlen;
+    jschar c;
+    jschar c2;
+    uint32 v;
+    uint8 utf8buf[6];
+
+    if(!dst)
+    {
+        dstlen = origDstlen = (size_t) -1;
+    }
+
+    while(srclen)
+    {
+        c = *src++;
+        srclen--;
+
+        if(c <= 0xD7FF || c >= 0xE000)
+        {
+            v = (uint32) c;
+        }
+        else if(c >= 0xD800 && c <= 0xDBFF)
+        {
+            if(srclen < 1) goto buffer_too_small;
+            c2 = *src++;
+            srclen--;
+            if(c2 >= 0xDC00 && c2 <= 0xDFFF)
+            {
+                v = (uint32) (((c - 0xD800) << 10) + (c2 - 0xDC00) + 0x10000);
+            }
+            else
+            {
+                // Invalid second half of surrogate pair
+                v = (uint32) 0xFFFD;
+            }
+        }
+        else
+        {
+            // Invalid first half surrogate pair
+            v = (uint32) 0xFFFD;
+        }
+
+        if(v < 0x0080)
+        {
+            /* no encoding necessary - performance hack */
+            if(!dstlen) goto buffer_too_small;
+            if(dst) *dst++ = (char) v;
+            utf8Len = 1;
+        }
+        else
+        {
+            utf8Len = enc_char(utf8buf, v);
+            if(utf8Len > dstlen) goto buffer_too_small;
+            if(dst)
+            {
+                for (i = 0; i < utf8Len; i++)
+                {
+                    *dst++ = (char) utf8buf[i];
+                }
+            }
+        }
+        dstlen -= utf8Len;
+    }
+
+    *dstlenp = (origDstlen - dstlen);
+    return JS_TRUE;
+
+buffer_too_small:
+    *dstlenp = (origDstlen - dstlen);
+    return JS_FALSE;
+}
+
+char*
+enc_string(JSContext* cx, jsval arg, size_t* buflen)
+{
+    JSString* str = NULL;
+    const jschar* src = NULL;
+    char* bytes = NULL;
+    size_t srclen = 0;
+    size_t byteslen = 0;
+
+    str = JS_ValueToString(cx, arg);
+    if(!str) goto error;
+
+    src = JS_GetStringCharsAndLength(cx, str, &srclen);
+
+    if(!enc_charbuf(src, srclen, NULL, &byteslen)) goto error;
+
+    bytes = JS_malloc(cx, (byteslen) + 1);
+    bytes[byteslen] = 0;
+
+    if(!enc_charbuf(src, srclen, bytes, &byteslen)) goto error;
+
+    if(buflen) *buflen = byteslen;
+    goto success;
+
+error:
+    if(bytes != NULL) JS_free(cx, bytes);
+    bytes = NULL;
+
+success:
+    return bytes;
+}
+
+static uint32
+dec_char(const uint8 *utf8Buffer, int utf8Length)
+{
+    uint32 ucs4Char;
+    uint32 minucs4Char;
+
+    /* from Unicode 3.1, non-shortest form is illegal */
+    static const uint32 minucs4Table[] = {
+        0x00000080, 0x00000800, 0x0001000, 0x0020000, 0x0400000
+    };
+
+    if (utf8Length == 1)
+    {
+        ucs4Char = *utf8Buffer;
+    }
+    else
+    {
+        ucs4Char = *utf8Buffer++ & ((1<<(7-utf8Length))-1);
+        minucs4Char = minucs4Table[utf8Length-2];
+        while(--utf8Length)
+        {
+            ucs4Char = ucs4Char<<6 | (*utf8Buffer++ & 0x3F);
+        }
+        if(ucs4Char < minucs4Char || ucs4Char == 0xFFFE || ucs4Char == 0xFFFF)
+        {
+            ucs4Char = 0xFFFD;
+        }
+    }
+
+    return ucs4Char;
+}
+
+static JSBool
+dec_charbuf(const char *src, size_t srclen, jschar *dst, size_t *dstlenp)
+{
+    uint32 v;
+    size_t offset = 0;
+    size_t j;
+    size_t n;
+    size_t dstlen = *dstlenp;
+    size_t origDstlen = dstlen;
+
+    if(!dst) dstlen = origDstlen = (size_t) -1;
+
+    while(srclen)
+    {
+        v = (uint8) *src;
+        n = 1;
+
+        if(v & 0x80)
+        {
+            while(v & (0x80 >> n))
+            {
+                n++;
+            }
+
+            if(n > srclen) goto buffer_too_small;
+            if(n == 1 || n > 6) goto bad_character;
+
+            for(j = 1; j < n; j++)
+            {
+                if((src[j] & 0xC0) != 0x80) goto bad_character;
+            }
+
+            v = dec_char((const uint8 *) src, n);
+            if(v >= 0x10000)
+            {
+                v -= 0x10000;
+
+                if(v > 0xFFFFF || dstlen < 2)
+                {
+                    *dstlenp = (origDstlen - dstlen);
+                    return JS_FALSE;
+                }
+
+                if(dstlen < 2) goto buffer_too_small;
+
+                if(dst)
+                {
+                    *dst++ = (jschar)((v >> 10) + 0xD800);
+                    v = (jschar)((v & 0x3FF) + 0xDC00);
+                }
+                dstlen--;
+            }
+        }
+
+        if(!dstlen) goto buffer_too_small;
+        if(dst) *dst++ = (jschar) v;
+
+        dstlen--;
+        offset += n;
+        src += n;
+        srclen -= n;
+    }
+
+    *dstlenp = (origDstlen - dstlen);
+    return JS_TRUE;
+
+bad_character:
+    *dstlenp = (origDstlen - dstlen);
+    return JS_FALSE;
+
+buffer_too_small:
+    *dstlenp = (origDstlen - dstlen);
+    return JS_FALSE;
+}
+
+JSString*
+dec_string(JSContext* cx, const char* bytes, size_t byteslen)
+{
+    JSString* str = NULL;
+    jschar* chars = NULL;
+    size_t charslen;
+
+    if(!dec_charbuf(bytes, byteslen, NULL, &charslen)) goto error;
+
+    chars = JS_malloc(cx, (charslen + 1) * sizeof(jschar));
+    if(!chars) return NULL;
+    chars[charslen] = 0;
+
+    if(!dec_charbuf(bytes, byteslen, chars, &charslen)) goto error;
+
+    str = JS_NewUCString(cx, chars, charslen - 1);
+    if(!str) goto error;
+
+    goto success;
+
+error:
+    if(chars != NULL) JS_free(cx, chars);
+    str = NULL;
+
+success:
+    return str;
+}

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/c_src/couch_js/utf8.h
----------------------------------------------------------------------
diff --git a/c_src/couch_js/utf8.h b/c_src/couch_js/utf8.h
new file mode 100644
index 0000000..c5cb86c
--- /dev/null
+++ b/c_src/couch_js/utf8.h
@@ -0,0 +1,19 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#ifndef COUCH_JS_UTF_8_H
+#define COUCH_JS_UTF_8_H
+
+char* enc_string(JSContext* cx, jsval arg, size_t* buflen);
+JSString* dec_string(JSContext* cx, const char* buf, size_t buflen);
+
+#endif

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/c_src/couch_js/util.c
----------------------------------------------------------------------
diff --git a/c_src/couch_js/util.c b/c_src/couch_js/util.c
new file mode 100644
index 0000000..9b46ceb
--- /dev/null
+++ b/c_src/couch_js/util.c
@@ -0,0 +1,288 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <jsapi.h>
+
+#include "help.h"
+#include "util.h"
+#include "utf8.h"
+
+
+size_t
+slurp_file(const char* file, char** outbuf_p)
+{
+    FILE* fp;
+    char fbuf[16384];
+    char *buf = NULL;
+    char* tmp;
+    size_t nread = 0;
+    size_t buflen = 0;
+
+    if(strcmp(file, "-") == 0) {
+        fp = stdin;
+    } else {
+        fp = fopen(file, "r");
+        if(fp == NULL) {
+            fprintf(stderr, "Failed to read file: %s\n", file);
+            exit(3);
+        }
+    }
+
+    while((nread = fread(fbuf, 1, 16384, fp)) > 0) {
+        if(buf == NULL) {
+            buf = (char*) malloc(nread + 1);
+            if(buf == NULL) {
+                fprintf(stderr, "Out of memory.\n");
+                exit(3);
+            }
+            memcpy(buf, fbuf, nread);
+        } else {
+            tmp = (char*) malloc(buflen + nread + 1);
+            if(tmp == NULL) {
+                fprintf(stderr, "Out of memory.\n");
+                exit(3);
+            }
+            memcpy(tmp, buf, buflen);
+            memcpy(tmp+buflen, fbuf, nread);
+            free(buf);
+            buf = tmp;
+        }
+        buflen += nread;
+        buf[buflen] = '\0';
+    }
+    *outbuf_p = buf;
+    return buflen + 1;
+}
+
+couch_args*
+couch_parse_args(int argc, const char* argv[])
+{
+    couch_args* args;
+    int i = 1;
+
+    args = (couch_args*) malloc(sizeof(couch_args));
+    if(args == NULL)
+        return NULL;
+
+    memset(args, '\0', sizeof(couch_args));
+    args->stack_size = 64L * 1024L * 1024L;
+
+    while(i < argc) {
+        if(strcmp("-h", argv[i]) == 0) {
+            DISPLAY_USAGE;
+            exit(0);
+        } else if(strcmp("-V", argv[i]) == 0) {
+            DISPLAY_VERSION;
+            exit(0);
+        } else if(strcmp("-H", argv[i]) == 0) {
+            args->use_http = 1;
+        } else if(strcmp("-S", argv[i]) == 0) {
+            args->stack_size = atoi(argv[++i]);
+            if(args->stack_size <= 0) {
+                fprintf(stderr, "Invalid stack size.\n");
+                exit(2);
+            }
+        } else if(strcmp("-u", argv[i]) == 0) {
+            args->uri_file = argv[++i];
+        } else if(strcmp("--", argv[i]) == 0) {
+            i++;
+            break;
+        } else {
+            break;
+        }
+        i++;
+    }
+
+    if(i >= argc) {
+        DISPLAY_USAGE;
+        exit(3);
+    }
+    args->scripts = argv + i;
+
+    return args;
+}
+
+
+int
+couch_fgets(char* buf, int size, FILE* fp)
+{
+    int n, i, c;
+
+    if(size <= 0) return -1;
+    n = size - 1;
+
+    for(i = 0; i < n && (c = getc(fp)) != EOF; i++) {
+        buf[i] = c;
+        if(c == '\n') {
+            i++;
+            break;
+        }
+    }
+
+    buf[i] = '\0';
+    return i;
+}
+
+
+JSString*
+couch_readline(JSContext* cx, FILE* fp)
+{
+    JSString* str;
+    char* bytes = NULL;
+    char* tmp = NULL;
+    size_t used = 0;
+    size_t byteslen = 256;
+    size_t readlen = 0;
+
+    bytes = JS_malloc(cx, byteslen);
+    if(bytes == NULL) return NULL;
+
+    while((readlen = couch_fgets(bytes+used, byteslen-used, fp)) > 0) {
+        used += readlen;
+
+        if(bytes[used-1] == '\n') {
+            bytes[used-1] = '\0';
+            break;
+        }
+
+        // Double our buffer and read more.
+        byteslen *= 2;
+        tmp = JS_realloc(cx, bytes, byteslen);
+        if(!tmp) {
+            JS_free(cx, bytes);
+            return NULL;
+        }
+
+        bytes = tmp;
+    }
+
+    // Treat empty strings specially
+    if(used == 0) {
+        JS_free(cx, bytes);
+        return JSVAL_TO_STRING(JS_GetEmptyStringValue(cx));
+    }
+
+    // Shring the buffer to the actual data size
+    tmp = JS_realloc(cx, bytes, used);
+    if(!tmp) {
+        JS_free(cx, bytes);
+        return NULL;
+    }
+    bytes = tmp;
+    byteslen = used;
+
+    str = dec_string(cx, bytes, byteslen);
+    JS_free(cx, bytes);
+    return str;
+}
+
+
+JSString*
+couch_readfile(JSContext* cx, const char* filename)
+{
+    JSString *string;
+    size_t byteslen;
+    char *bytes;
+
+    if((byteslen = slurp_file(filename, &bytes))) {
+        string = dec_string(cx, bytes, byteslen);
+
+        free(bytes);
+        return string;
+    }
+    return NULL;
+}
+
+
+void
+couch_print(JSContext* cx, uintN argc, jsval* argv)
+{
+    char *bytes = NULL;
+    FILE *stream = stdout;
+
+    if (argc) {
+        if (argc > 1 && argv[1] == JSVAL_TRUE) {
+          stream = stderr;
+        }
+        bytes = enc_string(cx, argv[0], NULL);
+        if(!bytes) return;
+        fprintf(stream, "%s", bytes);
+        JS_free(cx, bytes);
+    }
+
+    fputc('\n', stream);
+    fflush(stream);
+}
+
+
+void
+couch_error(JSContext* cx, const char* mesg, JSErrorReport* report)
+{
+    jsval v, replace;
+    char* bytes;
+    JSObject* regexp, *stack;
+    jsval re_args[2];
+
+    if(!report || !JSREPORT_IS_WARNING(report->flags))
+    {
+        fprintf(stderr, "%s\n", mesg);
+
+        // Print a stack trace, if available.
+        if (JSREPORT_IS_EXCEPTION(report->flags) &&
+            JS_GetPendingException(cx, &v))
+        {
+            // Clear the exception before an JS method calls or the result is
+            // infinite, recursive error report generation.
+            JS_ClearPendingException(cx);
+
+            // Use JS regexp to indent the stack trace.
+            // If the regexp can't be created, don't JS_ReportError since it is
+            // probably not productive to wind up here again.
+            if(JS_GetProperty(cx, JSVAL_TO_OBJECT(v), "stack", &v) &&
+               (regexp = JS_NewRegExpObjectNoStatics(
+                   cx, "^(?=.)", 6, JSREG_GLOB | JSREG_MULTILINE)))
+            {
+                // Set up the arguments to ``String.replace()``
+                re_args[0] = OBJECT_TO_JSVAL(regexp);
+                re_args[1] = STRING_TO_JSVAL(JS_InternString(cx, "\t"));
+
+                // Perform the replacement
+                if(JS_ValueToObject(cx, v, &stack) &&
+                   JS_GetProperty(cx, stack, "replace", &replace) &&
+                   JS_CallFunctionValue(cx, stack, replace, 2, re_args, &v))
+                {
+                    // Print the result
+                    bytes = enc_string(cx, v, NULL);
+                    fprintf(stderr, "Stacktrace:\n%s", bytes);
+                    JS_free(cx, bytes);
+                }
+            }
+        }
+    }
+}
+
+
+JSBool
+couch_load_funcs(JSContext* cx, JSObject* obj, JSFunctionSpec* funcs)
+{
+    JSFunctionSpec* f;
+    for(f = funcs; f->name != NULL; f++) {
+        if(!JS_DefineFunction(cx, obj, f->name, f->call, f->nargs, f->flags)) {
+            fprintf(stderr, "Failed to create function: %s\n", f->name);
+            return JS_FALSE;
+        }
+    }
+    return JS_TRUE;
+}

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/c_src/couch_js/util.h
----------------------------------------------------------------------
diff --git a/c_src/couch_js/util.h b/c_src/couch_js/util.h
new file mode 100644
index 0000000..65a2a06
--- /dev/null
+++ b/c_src/couch_js/util.h
@@ -0,0 +1,35 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#ifndef COUCHJS_UTIL_H
+#define COUCHJS_UTIL_H
+
+#include <jsapi.h>
+
+typedef struct {
+    int          use_http;
+    int          stack_size;
+    const char** scripts;
+    const char*  uri_file;
+    JSString*    uri;
+} couch_args;
+
+couch_args* couch_parse_args(int argc, const char* argv[]);
+int couch_fgets(char* buf, int size, FILE* fp);
+JSString* couch_readline(JSContext* cx, FILE* fp);
+JSString* couch_readfile(JSContext* cx, const char* filename);
+void couch_print(JSContext* cx, uintN argc, jsval* argv);
+void couch_error(JSContext* cx, const char* mesg, JSErrorReport* report);
+JSBool couch_load_funcs(JSContext* cx, JSObject* obj, JSFunctionSpec* funcs);
+
+
+#endif // Included util.h

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/priv/couch_js/help.h
----------------------------------------------------------------------
diff --git a/priv/couch_js/help.h b/priv/couch_js/help.h
deleted file mode 100644
index f4ddb24..0000000
--- a/priv/couch_js/help.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCHJS_HELP_H
-#define COUCHJS_HELP_H
-
-#include "config.h"
-
-static const char VERSION_TEMPLATE[] =
-    "%s - %s\n"
-    "\n"
-    "Licensed under the Apache License, Version 2.0 (the \"License\"); you may "
-        "not use\n"
-    "this file except in compliance with the License. You may obtain a copy of"
-        "the\n"
-    "License at\n"
-    "\n"
-    "  http://www.apache.org/licenses/LICENSE-2.0\n"
-    "\n"
-    "Unless required by applicable law or agreed to in writing, software "
-        "distributed\n"
-    "under the License is distributed on an \"AS IS\" BASIS, WITHOUT "
-        "WARRANTIES OR\n"
-    "CONDITIONS OF ANY KIND, either express or implied. See the License "
-        "for the\n"
-    "specific language governing permissions and limitations under the "
-        "License.\n";
-
-static const char USAGE_TEMPLATE[] =
-    "Usage: %s [FILE]\n"
-    "\n"
-    "The %s command runs the %s JavaScript interpreter.\n"
-    "\n"
-    "The exit status is 0 for success or 1 for failure.\n"
-    "\n"
-    "Options:\n"
-    "\n"
-    "  -h          display a short help message and exit\n"
-    "  -V          display version information and exit\n"
-    "  -H          enable %s cURL bindings (only avaiable\n"
-    "              if package was built with cURL available)\n"
-    "  -S SIZE     specify that the runtime should allow at\n"
-    "              most SIZE bytes of memory to be allocated\n"
-    "  -u FILE     path to a .uri file containing the address\n"
-    "              (or addresses) of one or more servers\n"
-    "\n"
-    "Report bugs at <%s>.\n";
-
-#define BASENAME COUCHJS_NAME
-
-#define couch_version(basename)  \
-    fprintf(                     \
-            stdout,              \
-            VERSION_TEMPLATE,    \
-            basename,            \
-            PACKAGE_STRING)
-
-#define DISPLAY_VERSION couch_version(BASENAME)
-
-
-#define couch_usage(basename) \
-    fprintf(                                    \
-            stdout,                             \
-            USAGE_TEMPLATE,                     \
-            basename,                           \
-            basename,                           \
-            PACKAGE_NAME,                       \
-            basename,                           \
-            PACKAGE_BUGREPORT)
-
-#define DISPLAY_USAGE couch_usage(BASENAME)
-
-#endif // Included help.h

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/priv/couch_js/http.c
----------------------------------------------------------------------
diff --git a/priv/couch_js/http.c b/priv/couch_js/http.c
deleted file mode 100644
index c66b5da..0000000
--- a/priv/couch_js/http.c
+++ /dev/null
@@ -1,698 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <jsapi.h>
-#include "config.h"
-#include "utf8.h"
-#include "util.h"
-
-// Soft dependency on cURL bindings because they're
-// only used when running the JS tests from the
-// command line which is rare.
-#ifndef HAVE_CURL
-
-void
-http_check_enabled()
-{
-    fprintf(stderr, "HTTP API was disabled at compile time.\n");
-    exit(3);
-}
-
-
-JSBool
-http_ctor(JSContext* cx, JSObject* req)
-{
-    return JS_FALSE;
-}
-
-
-JSBool
-http_dtor(JSContext* cx, JSObject* req)
-{
-    return JS_FALSE;
-}
-
-
-JSBool
-http_open(JSContext* cx, JSObject* req, jsval mth, jsval url, jsval snc)
-{
-    return JS_FALSE;
-}
-
-
-JSBool
-http_set_hdr(JSContext* cx, JSObject* req, jsval name, jsval val)
-{
-    return JS_FALSE;
-}
-
-
-JSBool
-http_send(JSContext* cx, JSObject* req, jsval body)
-{
-    return JS_FALSE;
-}
-
-
-int
-http_status(JSContext* cx, JSObject* req, jsval body)
-{
-    return -1;
-}
-
-JSBool
-http_uri(JSContext* cx, JSObject* req, couch_args* args, jsval* uri_val)
-{
-    return JS_FALSE;
-}
-
-
-#else
-#include <curl/curl.h>
-#include <unistd.h>
-
-
-void
-http_check_enabled()
-{
-    return;
-}
-
-
-// Map some of the string function names to things which exist on Windows
-#ifdef XP_WIN
-#define strcasecmp _strcmpi
-#define strncasecmp _strnicmp
-#define snprintf _snprintf
-#endif
-
-
-typedef struct curl_slist CurlHeaders;
-
-
-typedef struct {
-    int             method;
-    char*           url;
-    CurlHeaders*    req_headers;
-    jsint           last_status;
-} HTTPData;
-
-
-char* METHODS[] = {"GET", "HEAD", "POST", "PUT", "DELETE", "COPY", "OPTIONS", NULL};
-
-
-#define GET     0
-#define HEAD    1
-#define POST    2
-#define PUT     3
-#define DELETE  4
-#define COPY    5
-#define OPTIONS 6
-
-
-static JSBool
-go(JSContext* cx, JSObject* obj, HTTPData* http, char* body, size_t blen);
-
-
-static JSString*
-str_from_binary(JSContext* cx, char* data, size_t length);
-
-
-JSBool
-http_ctor(JSContext* cx, JSObject* req)
-{
-    HTTPData* http = NULL;
-    JSBool ret = JS_FALSE;
-
-    http = (HTTPData*) malloc(sizeof(HTTPData));
-    if(!http)
-    {
-        JS_ReportError(cx, "Failed to create CouchHTTP instance.");
-        goto error;
-    }
-
-    http->method = -1;
-    http->url = NULL;
-    http->req_headers = NULL;
-    http->last_status = -1;
-
-    if(!JS_SetPrivate(cx, req, http))
-    {
-        JS_ReportError(cx, "Failed to set private CouchHTTP data.");
-        goto error;
-    }
-
-    ret = JS_TRUE;
-    goto success;
-
-error:
-    if(http) free(http);
-
-success:
-    return ret;
-}
-
-
-void
-http_dtor(JSContext* cx, JSObject* obj)
-{
-    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, obj);
-    if(http) { 
-        if(http->url) free(http->url);
-        if(http->req_headers) curl_slist_free_all(http->req_headers);
-        free(http);
-    }
-}
-
-
-JSBool
-http_open(JSContext* cx, JSObject* req, jsval mth, jsval url, jsval snc)
-{
-    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req);
-    char* method = NULL;
-    int methid;
-    JSBool ret = JS_FALSE;
-
-    if(!http) {
-        JS_ReportError(cx, "Invalid CouchHTTP instance.");
-        goto done;
-    }
-
-    if(JSVAL_IS_VOID(mth)) {
-        JS_ReportError(cx, "You must specify a method.");
-        goto done;
-    }
-
-    method = enc_string(cx, mth, NULL);
-    if(!method) {
-        JS_ReportError(cx, "Failed to encode method.");
-        goto done;
-    }
-    
-    for(methid = 0; METHODS[methid] != NULL; methid++) {
-        if(strcasecmp(METHODS[methid], method) == 0) break;
-    }
-    
-    if(methid > OPTIONS) {
-        JS_ReportError(cx, "Invalid method specified.");
-        goto done;
-    }
-
-    http->method = methid;
-
-    if(JSVAL_IS_VOID(url)) {
-        JS_ReportError(cx, "You must specify a URL.");
-        goto done;
-    }
-
-    if(http->url != NULL) {
-        free(http->url);
-        http->url = NULL;
-    }
-
-    http->url = enc_string(cx, url, NULL);
-    if(http->url == NULL) {
-        JS_ReportError(cx, "Failed to encode URL.");
-        goto done;
-    }
-    
-    if(JSVAL_IS_BOOLEAN(snc) && JSVAL_TO_BOOLEAN(snc)) {
-        JS_ReportError(cx, "Synchronous flag must be false.");
-        goto done;
-    }
-    
-    if(http->req_headers) {
-        curl_slist_free_all(http->req_headers);
-        http->req_headers = NULL;
-    }
-    
-    // Disable Expect: 100-continue
-    http->req_headers = curl_slist_append(http->req_headers, "Expect:");
-
-    ret = JS_TRUE;
-
-done:
-    if(method) free(method);
-    return ret;
-}
-
-
-JSBool
-http_set_hdr(JSContext* cx, JSObject* req, jsval name, jsval val)
-{
-    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req);
-    char* keystr = NULL;
-    char* valstr = NULL;
-    char* hdrbuf = NULL;
-    size_t hdrlen = -1;
-    JSBool ret = JS_FALSE;
-
-    if(!http) {
-        JS_ReportError(cx, "Invalid CouchHTTP instance.");
-        goto done;
-    }
-
-    if(JSVAL_IS_VOID(name))
-    {
-        JS_ReportError(cx, "You must speciy a header name.");
-        goto done;
-    }
-
-    keystr = enc_string(cx, name, NULL);
-    if(!keystr)
-    {
-        JS_ReportError(cx, "Failed to encode header name.");
-        goto done;
-    }
-    
-    if(JSVAL_IS_VOID(val))
-    {
-        JS_ReportError(cx, "You must specify a header value.");
-        goto done;
-    }
-    
-    valstr = enc_string(cx, val, NULL);
-    if(!valstr)
-    {
-        JS_ReportError(cx, "Failed to encode header value.");
-        goto done;
-    }
-    
-    hdrlen = strlen(keystr) + strlen(valstr) + 3;
-    hdrbuf = (char*) malloc(hdrlen * sizeof(char));
-    if(!hdrbuf) {
-        JS_ReportError(cx, "Failed to allocate header buffer.");
-        goto done;
-    }
-    
-    snprintf(hdrbuf, hdrlen, "%s: %s", keystr, valstr);
-    http->req_headers = curl_slist_append(http->req_headers, hdrbuf);
-
-    ret = JS_TRUE;
-
-done:
-    if(keystr) free(keystr);
-    if(valstr) free(valstr);
-    if(hdrbuf) free(hdrbuf);
-    return ret;
-}
-
-JSBool
-http_send(JSContext* cx, JSObject* req, jsval body)
-{
-    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req);
-    char* bodystr = NULL;
-    size_t bodylen = 0;
-    JSBool ret = JS_FALSE;
-    
-    if(!http) {
-        JS_ReportError(cx, "Invalid CouchHTTP instance.");
-        goto done;
-    }
-
-    if(!JSVAL_IS_VOID(body)) {
-        bodystr = enc_string(cx, body, &bodylen);
-        if(!bodystr) {
-            JS_ReportError(cx, "Failed to encode body.");
-            goto done;
-        }
-    }
-
-    ret = go(cx, req, http, bodystr, bodylen);
-
-done:
-    if(bodystr) free(bodystr);
-    return ret;
-}
-
-int
-http_status(JSContext* cx, JSObject* req)
-{
-    HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req);
-    
-    if(!http) {
-        JS_ReportError(cx, "Invalid CouchHTTP instance.");
-        return JS_FALSE;
-    }
-
-    return http->last_status;
-}
-
-JSBool
-http_uri(JSContext* cx, JSObject* req, couch_args* args, jsval* uri_val)
-{
-    FILE* uri_fp = NULL;
-    JSString* uri_str;
-
-    // Default is http://localhost:5984/ when no uri file is specified
-    if (!args->uri_file) {
-        uri_str = JS_InternString(cx, "http://localhost:5984/");
-        *uri_val = STRING_TO_JSVAL(uri_str);
-        return JS_TRUE;
-    }
-
-    // Else check to see if the base url is cached in a reserved slot
-    if (JS_GetReservedSlot(cx, req, 0, uri_val) && !JSVAL_IS_VOID(*uri_val)) {
-        return JS_TRUE;
-    }
-
-    // Read the first line of the couch.uri file.
-    if(!((uri_fp = fopen(args->uri_file, "r")) &&
-         (uri_str = couch_readline(cx, uri_fp)))) {
-        JS_ReportError(cx, "Failed to read couch.uri file.");
-        goto error;
-    }
-
-    fclose(uri_fp);
-    *uri_val = STRING_TO_JSVAL(uri_str);
-    JS_SetReservedSlot(cx, req, 0, *uri_val);
-    return JS_TRUE;
-
-error:
-    if(uri_fp) fclose(uri_fp);
-    return JS_FALSE;
-}
-
-
-// Curl Helpers
-
-typedef struct {
-    HTTPData*   http;
-    JSContext*  cx;
-    JSObject*   resp_headers;
-    char*       sendbuf;
-    size_t      sendlen;
-    size_t      sent;
-    int         sent_once;
-    char*       recvbuf;
-    size_t      recvlen;
-    size_t      read;
-} CurlState;
-
-/*
- * I really hate doing this but this doesn't have to be
- * uber awesome, it just has to work.
- */
-CURL*       HTTP_HANDLE = NULL;
-char        ERRBUF[CURL_ERROR_SIZE];
-
-static size_t send_body(void *ptr, size_t size, size_t nmem, void *data);
-static int seek_body(void *ptr, curl_off_t offset, int origin);
-static size_t recv_body(void *ptr, size_t size, size_t nmem, void *data);
-static size_t recv_header(void *ptr, size_t size, size_t nmem, void *data);
-
-static JSBool
-go(JSContext* cx, JSObject* obj, HTTPData* http, char* body, size_t bodylen)
-{
-    CurlState state;
-    char* referer;
-    JSString* jsbody;
-    JSBool ret = JS_FALSE;
-    jsval tmp;
-    
-    state.cx = cx;
-    state.http = http;
-    
-    state.sendbuf = body;
-    state.sendlen = bodylen;
-    state.sent = 0;
-    state.sent_once = 0;
-
-    state.recvbuf = NULL;
-    state.recvlen = 0;
-    state.read = 0;
-
-    if(HTTP_HANDLE == NULL) {
-        HTTP_HANDLE = curl_easy_init();
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_READFUNCTION, send_body);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_SEEKFUNCTION,
-                                        (curl_seek_callback) seek_body);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_HEADERFUNCTION, recv_header);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_WRITEFUNCTION, recv_body);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_NOPROGRESS, 1);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V4);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_ERRORBUFFER, ERRBUF);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_COOKIEFILE, "");
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_USERAGENT,
-                                            "CouchHTTP Client - Relax");
-    }
-    
-    if(!HTTP_HANDLE) {
-        JS_ReportError(cx, "Failed to initialize cURL handle.");
-        goto done;
-    }
-
-    if(!JS_GetReservedSlot(cx, obj, 0, &tmp)) {
-      JS_ReportError(cx, "Failed to readreserved slot.");
-      goto done;
-    }
-
-    if(!(referer = enc_string(cx, tmp, NULL))) {
-      JS_ReportError(cx, "Failed to encode referer.");
-      goto done;
-    }
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_REFERER, referer);
-    free(referer);
-
-    if(http->method < 0 || http->method > OPTIONS) {
-        JS_ReportError(cx, "INTERNAL: Unknown method.");
-        goto done;
-    }
-
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_CUSTOMREQUEST, METHODS[http->method]);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_NOBODY, 0);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 1);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_UPLOAD, 0);
-    
-    if(http->method == HEAD) {
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_NOBODY, 1);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 0);
-    } else if(http->method == POST || http->method == PUT) {
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_UPLOAD, 1);
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 0);
-    }
-    
-    if(body && bodylen) {
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_INFILESIZE, bodylen);        
-    } else {
-        curl_easy_setopt(HTTP_HANDLE, CURLOPT_INFILESIZE, 0);
-    }
-
-    // curl_easy_setopt(HTTP_HANDLE, CURLOPT_VERBOSE, 1);
-
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_URL, http->url);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_HTTPHEADER, http->req_headers);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_READDATA, &state);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_SEEKDATA, &state);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_WRITEHEADER, &state);
-    curl_easy_setopt(HTTP_HANDLE, CURLOPT_WRITEDATA, &state);
-
-    if(curl_easy_perform(HTTP_HANDLE) != 0) {
-        JS_ReportError(cx, "Failed to execute HTTP request: %s", ERRBUF);
-        goto done;
-    }
-    
-    if(!state.resp_headers) {
-        JS_ReportError(cx, "Failed to recieve HTTP headers.");
-        goto done;
-    }
-
-    tmp = OBJECT_TO_JSVAL(state.resp_headers);
-    if(!JS_DefineProperty(
-        cx, obj,
-        "_headers",
-        tmp,
-        NULL, NULL,
-        JSPROP_READONLY
-    )) {
-        JS_ReportError(cx, "INTERNAL: Failed to set response headers.");
-        goto done;
-    }
-    
-    if(state.recvbuf) {
-        state.recvbuf[state.read] = '\0';
-        jsbody = dec_string(cx, state.recvbuf, state.read+1);
-        if(!jsbody) {
-            // If we can't decode the body as UTF-8 we forcefully
-            // convert it to a string by just forcing each byte
-            // to a jschar.
-            jsbody = str_from_binary(cx, state.recvbuf, state.read);
-            if(!jsbody) {
-                if(!JS_IsExceptionPending(cx)) {
-                    JS_ReportError(cx, "INTERNAL: Failed to decode body.");
-                }
-                goto done;
-            }
-        }
-        tmp = STRING_TO_JSVAL(jsbody);
-    } else {
-        tmp = JS_GetEmptyStringValue(cx);
-    }
-    
-    if(!JS_DefineProperty(
-        cx, obj,
-        "responseText",
-        tmp,
-        NULL, NULL,
-        JSPROP_READONLY
-    )) {
-        JS_ReportError(cx, "INTERNAL: Failed to set responseText.");
-        goto done;
-    }
-    
-    ret = JS_TRUE;
-
-done:
-    if(state.recvbuf) JS_free(cx, state.recvbuf);
-    return ret;
-}
-
-static size_t
-send_body(void *ptr, size_t size, size_t nmem, void *data)
-{
-    CurlState* state = (CurlState*) data;
-    size_t length = size * nmem;
-    size_t towrite = state->sendlen - state->sent;
-
-    // Assume this is cURL trying to resend a request that
-    // failed.
-    if(towrite == 0 && state->sent_once == 0) {
-        state->sent_once = 1;
-        return 0;
-    } else if(towrite == 0) {
-        state->sent = 0;
-        state->sent_once = 0;
-        towrite = state->sendlen;
-    }
-
-    if(length < towrite) towrite = length;
-
-    memcpy(ptr, state->sendbuf + state->sent, towrite);
-    state->sent += towrite;
-
-    return towrite;
-}
-
-static int
-seek_body(void* ptr, curl_off_t offset, int origin)
-{
-    CurlState* state = (CurlState*) ptr;
-    if(origin != SEEK_SET) return -1;
-
-    state->sent = (size_t) offset;
-    return (int) state->sent;
-}
-
-static size_t
-recv_header(void *ptr, size_t size, size_t nmem, void *data)
-{
-    CurlState* state = (CurlState*) data;
-    char code[4];
-    char* header = (char*) ptr;
-    size_t length = size * nmem;
-    JSString* hdr = NULL;
-    jsuint hdrlen;
-    jsval hdrval;
-    
-    if(length > 7 && strncasecmp(header, "HTTP/1.", 7) == 0) {
-        if(length < 12) {
-            return CURLE_WRITE_ERROR;
-        }
-
-        memcpy(code, header+9, 3*sizeof(char));
-        code[3] = '\0';
-        state->http->last_status = atoi(code);
-
-        state->resp_headers = JS_NewArrayObject(state->cx, 0, NULL);
-        if(!state->resp_headers) {
-            return CURLE_WRITE_ERROR;
-        }
-
-        return length;
-    }
-
-    // We get a notice at the \r\n\r\n after headers.
-    if(length <= 2) {
-        return length;
-    }
-
-    // Append the new header to our array.
-    hdr = dec_string(state->cx, header, length);
-    if(!hdr) {
-        return CURLE_WRITE_ERROR;
-    }
-
-    if(!JS_GetArrayLength(state->cx, state->resp_headers, &hdrlen)) {
-        return CURLE_WRITE_ERROR;
-    }
-
-    hdrval = STRING_TO_JSVAL(hdr);
-    if(!JS_SetElement(state->cx, state->resp_headers, hdrlen, &hdrval)) {
-        return CURLE_WRITE_ERROR;
-    }
-
-    return length;
-}
-
-static size_t
-recv_body(void *ptr, size_t size, size_t nmem, void *data)
-{
-    CurlState* state = (CurlState*) data;
-    size_t length = size * nmem;
-    char* tmp = NULL;
-    
-    if(!state->recvbuf) {
-        state->recvlen = 4096;
-        state->read = 0;
-        state->recvbuf = JS_malloc(state->cx, state->recvlen);
-    }
-    
-    if(!state->recvbuf) {
-        return CURLE_WRITE_ERROR;
-    }
-
-    // +1 so we can add '\0' back up in the go function.
-    while(length+1 > state->recvlen - state->read) state->recvlen *= 2;
-    tmp = JS_realloc(state->cx, state->recvbuf, state->recvlen);
-    if(!tmp) return CURLE_WRITE_ERROR;
-    state->recvbuf = tmp;
-   
-    memcpy(state->recvbuf + state->read, ptr, length);
-    state->read += length;
-    return length;
-}
-
-JSString*
-str_from_binary(JSContext* cx, char* data, size_t length)
-{
-    jschar* conv = (jschar*) JS_malloc(cx, length * sizeof(jschar));
-    JSString* ret = NULL;
-    size_t i;
-
-    if(!conv) return NULL;
-
-    for(i = 0; i < length; i++) {
-        conv[i] = (jschar) data[i];
-    }
-
-    ret = JS_NewUCString(cx, conv, length);
-    if(!ret) JS_free(cx, conv);
-
-    return ret;
-}
-
-#endif /* HAVE_CURL */

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/priv/couch_js/http.h
----------------------------------------------------------------------
diff --git a/priv/couch_js/http.h b/priv/couch_js/http.h
deleted file mode 100644
index 63d45bd..0000000
--- a/priv/couch_js/http.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCH_JS_HTTP_H
-#define COUCH_JS_HTTP_H
-
-#include "util.h"
-
-void http_check_enabled();
-JSBool http_ctor(JSContext* cx, JSObject* req);
-void http_dtor(JSContext* cx, JSObject* req);
-JSBool http_open(JSContext* cx, JSObject* req, jsval mth, jsval url, jsval snc);
-JSBool http_set_hdr(JSContext* cx, JSObject* req, jsval name, jsval val);
-JSBool http_send(JSContext* cx, JSObject* req, jsval body);
-int http_status(JSContext* cx, JSObject* req);
-JSBool http_uri(JSContext* cx, JSObject *req, couch_args* args, jsval* uri);
-
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/priv/couch_js/main.c
----------------------------------------------------------------------
diff --git a/priv/couch_js/main.c b/priv/couch_js/main.c
deleted file mode 100644
index 209bb02..0000000
--- a/priv/couch_js/main.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include "config.h"
-
-#if defined(SM185)
-#include "sm185.c"
-#elif defined(SM180)
-#include "sm180.c"
-#else
-#include "sm170.c"
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/priv/couch_js/sm170.c
----------------------------------------------------------------------
diff --git a/priv/couch_js/sm170.c b/priv/couch_js/sm170.c
deleted file mode 100644
index 51e4f4d..0000000
--- a/priv/couch_js/sm170.c
+++ /dev/null
@@ -1,398 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#include <jsapi.h>
-#include "http.h"
-#include "utf8.h"
-#include "util.h"
-
-
-#ifdef JS_THREADSAFE
-#define SETUP_REQUEST(cx) \
-    JS_SetContextThread(cx); \
-    JS_BeginRequest(cx);
-#define FINISH_REQUEST(cx) \
-    JS_EndRequest(cx); \
-    JS_ClearContextThread(cx);
-#else
-#define SETUP_REQUEST(cx)
-#define FINISH_REQUEST(cx)
-#endif
-
-
-static JSBool
-req_ctor(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval)
-{
-    return http_ctor(cx, obj);
-}
-
-
-static void 
-req_dtor(JSContext* cx, JSObject* obj)
-{
-    http_dtor(cx, obj);
-}
-
-
-static JSBool
-req_open(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval)
-{
-    JSBool ret = JS_FALSE;
-
-    if(argc == 2) {
-        ret = http_open(cx, obj, argv[0], argv[1], JSVAL_FALSE);
-    } else if(argc == 3) {
-        ret = http_open(cx, obj, argv[0], argv[1], argv[2]);
-    } else {
-        JS_ReportError(cx, "Invalid call to CouchHTTP.open");
-    }
-
-    *rval = JSVAL_VOID;
-    return ret;
-}
-
-
-static JSBool
-req_set_hdr(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval)
-{
-    JSBool ret = JS_FALSE;
-    if(argc == 2) {
-        ret = http_set_hdr(cx, obj, argv[0], argv[1]);
-    } else {
-        JS_ReportError(cx, "Invalid call to CouchHTTP.set_header");
-    }
-
-    *rval = JSVAL_VOID;
-    return ret;
-}
-
-
-static JSBool
-req_send(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval)
-{
-    JSBool ret = JS_FALSE;
-    if(argc == 1) {
-        ret = http_send(cx, obj, argv[0]);
-    } else {
-        JS_ReportError(cx, "Invalid call to CouchHTTP.send");
-    }
-
-    *rval = JSVAL_VOID;
-    return ret;
-}
-
-
-static JSBool
-req_status(JSContext* cx, JSObject* obj, jsval idval, jsval* rval)
-{
-    int status = http_status(cx, obj);
-    if(status < 0)
-        return JS_FALSE;
-
-    if(INT_FITS_IN_JSVAL(status)) {
-        *rval = INT_TO_JSVAL(status);
-        return JS_TRUE;
-    } else {
-        JS_ReportError(cx, "Invalid HTTP status.");
-        return JS_FALSE;
-    }
-}
-
-
-static JSBool
-base_url(JSContext *cx, JSObject* obj, jsval idval, jsval* rval)
-{
-    couch_args *args = (couch_args*)JS_GetContextPrivate(cx);
-    return http_uri(cx, obj, args, rval);
-}
-
-
-static JSBool
-evalcx(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
-{
-    JSString *str;
-    JSObject *sandbox;
-    JSContext *subcx;
-    const jschar *src;
-    size_t srclen;
-    JSBool ret = JS_FALSE;
-    char *name = NULL;
-
-    sandbox = NULL;
-    if(!JS_ConvertArguments(cx, argc, argv, "S / o", &str, &sandbox)) {
-        return JS_FALSE;
-    }
-
-    subcx = JS_NewContext(JS_GetRuntime(cx), 8L * 1024L);
-    if(!subcx) {
-        JS_ReportOutOfMemory(cx);
-        return JS_FALSE;
-    }
-
-    SETUP_REQUEST(subcx);
-
-    src = JS_GetStringChars(str);
-    srclen = JS_GetStringLength(str);
-
-    if(!sandbox) {
-        sandbox = JS_NewObject(subcx, NULL, NULL, NULL);
-        if(!sandbox || !JS_InitStandardClasses(subcx, sandbox)) {
-            goto done;
-        }
-    }
-
-    if(argc > 2) {
-      name = enc_string(cx, argv[2], NULL);
-    }
-
-    if(srclen == 0) {
-        *rval = OBJECT_TO_JSVAL(sandbox);
-    } else {
-        JS_EvaluateUCScript(subcx, sandbox, src, srclen, name, 1, rval);
-    }
-    
-    ret = JS_TRUE;
-
-done:
-    if(name) JS_free(cx, name);
-    FINISH_REQUEST(subcx);
-    JS_DestroyContext(subcx);
-    return ret;
-}
-
-
-static JSBool
-gc(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval)
-{
-    JS_GC(cx);
-    *rval = JSVAL_VOID;
-    return JS_TRUE;
-}
-
-
-static JSBool
-print(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval)
-{
-    couch_print(cx, argc, argv);
-    *rval = JSVAL_VOID;
-    return JS_TRUE;
-}
-
-
-static JSBool
-quit(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval)
-{
-    int exit_code = 0;
-    JS_ConvertArguments(cx, argc, argv, "/i", &exit_code);
-    exit(exit_code);
-}
-
-
-static JSBool
-readline(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval)
-{
-    JSString* line;
-
-    /* GC Occasionally */
-    JS_MaybeGC(cx);
-
-    line = couch_readline(cx, stdin);
-    if(line == NULL) return JS_FALSE;
-
-    *rval = STRING_TO_JSVAL(line);
-    return JS_TRUE;
-}
-
-
-static JSBool
-seal(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval)
-{
-    JSObject *target;
-    JSBool deep = JS_FALSE;
-
-    if(!JS_ConvertArguments(cx, argc, argv, "o/b", &target, &deep))
-        return JS_FALSE;
-
-    if(!target) {
-        *rval = JSVAL_VOID;
-        return JS_TRUE;
-    }
-
-    if(JS_SealObject(cx, target, deep) != JS_TRUE)
-        return JS_FALSE;
-
-    *rval = JSVAL_VOID;
-    return JS_TRUE;
-}
-
-
-JSClass CouchHTTPClass = {
-    "CouchHTTP",
-    JSCLASS_HAS_PRIVATE
-        | JSCLASS_CONSTRUCT_PROTOTYPE
-        | JSCLASS_HAS_RESERVED_SLOTS(2),
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_EnumerateStub,
-    JS_ResolveStub,
-    JS_ConvertStub,
-    req_dtor,
-    JSCLASS_NO_OPTIONAL_MEMBERS
-};
-
-
-JSPropertySpec CouchHTTPProperties[] = {
-    {"status", 0, JSPROP_READONLY, req_status, NULL},
-    {"base_url", 0, JSPROP_READONLY | JSPROP_SHARED, base_url, NULL},
-    {0, 0, 0, 0, 0}
-};
-
-
-JSFunctionSpec CouchHTTPFunctions[] = {
-    {"_open", req_open, 3, 0, 0},
-    {"_setRequestHeader", req_set_hdr, 2, 0, 0},
-    {"_send", req_send, 1, 0, 0},
-    {0, 0, 0, 0, 0}
-};
-
-
-static JSClass global_class = {
-    "GlobalClass",
-    JSCLASS_GLOBAL_FLAGS,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_EnumerateStub,
-    JS_ResolveStub,
-    JS_ConvertStub,
-    JS_FinalizeStub,
-    JSCLASS_NO_OPTIONAL_MEMBERS
-};
-
-
-static JSFunctionSpec global_functions[] = {
-    {"evalcx", evalcx, 0, 0, 0},
-    {"gc", gc, 0, 0, 0},
-    {"print", print, 0, 0, 0},
-    {"quit", quit, 0, 0, 0},
-    {"readline", readline, 0, 0, 0},
-    {"seal", seal, 0, 0, 0},
-    {0, 0, 0, 0, 0}
-};
-
-
-int
-main(int argc, const char* argv[])
-{
-    JSRuntime* rt = NULL;
-    JSContext* cx = NULL;
-    JSObject* global = NULL;
-    JSObject* klass = NULL;
-    JSScript* script;
-    JSString* scriptsrc;
-    jschar* schars;
-    size_t slen;
-    jsval sroot;
-    jsval result;
-    int i;
-
-    couch_args* args = couch_parse_args(argc, argv);
-
-    rt = JS_NewRuntime(args->stack_size);
-    if(rt == NULL)
-        return 1;
-
-    cx = JS_NewContext(rt, 8L * 1024L);
-    if(cx == NULL)
-        return 1;
-
-    JS_SetErrorReporter(cx, couch_error);
-    JS_ToggleOptions(cx, JSOPTION_XML);
-    JS_SetContextPrivate(cx, args);
-    
-    SETUP_REQUEST(cx);
-
-    global = JS_NewObject(cx, &global_class, NULL, NULL);
-    if(global == NULL)
-        return 1;
-
-    JS_SetGlobalObject(cx, global);
-    
-    if(!JS_InitStandardClasses(cx, global))
-        return 1;
-
-    if(couch_load_funcs(cx, global, global_functions) != JS_TRUE)
-        return 1;
- 
-    if(args->use_http) {
-        http_check_enabled();
-
-        klass = JS_InitClass(
-            cx, global,
-            NULL,
-            &CouchHTTPClass, req_ctor,
-            0,
-            CouchHTTPProperties, CouchHTTPFunctions,
-            NULL, NULL
-        );
-
-        if(!klass)
-        {
-            fprintf(stderr, "Failed to initialize CouchHTTP class.\n");
-            exit(2);
-        }
-    } 
-
-    for (i = 0 ; args->scripts[i] ; i++) {
-        // Convert script source to jschars.
-        scriptsrc = couch_readfile(cx, args->scripts[i]);
-        if(!scriptsrc)
-            return 1;
-
-        schars = JS_GetStringChars(scriptsrc);
-        slen = JS_GetStringLength(scriptsrc);
-
-        // Root it so GC doesn't collect it.
-        sroot = STRING_TO_JSVAL(scriptsrc);
-        if(JS_AddRoot(cx, &sroot) != JS_TRUE) {
-            fprintf(stderr, "Internal root error.\n");
-            return 1;
-        }
-
-        // Compile and run
-        script = JS_CompileUCScript(cx, global, schars, slen,
-                                    args->scripts[i], 1);
-        if(!script) {
-            fprintf(stderr, "Failed to compile script.\n");
-            return 1;
-        }
-
-        JS_ExecuteScript(cx, global, script, &result);
-
-        // Warning message if we don't remove it.
-        JS_RemoveRoot(cx, &sroot);
-    }
-
-    FINISH_REQUEST(cx);
-    JS_DestroyContext(cx);
-    JS_DestroyRuntime(rt);
-    JS_ShutDown();
-
-    return 0;
-}


[28/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
Revert "fix build on macosx"

This reverts commit 94fe661233f69faa8cfbcb71d0202eb9bca4600f.


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/5ead9289
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/5ead9289
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/5ead9289

Branch: refs/heads/import-rcouch
Commit: 5ead928916dcb2f083f9b5cddd43d0bad7f7da43
Parents: 370ab88
Author: benoitc <be...@apache.org>
Authored: Wed Jan 8 02:28:42 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:20 2014 -0600

----------------------------------------------------------------------
 c_src/couch_js/http.c | 2 +-
 c_src/couch_js/main.c | 2 +-
 c_src/couch_js/utf8.c | 2 +-
 c_src/couch_js/util.c | 2 +-
 c_src/couch_js/util.h | 2 +-
 rebar.config.script   | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/5ead9289/c_src/couch_js/http.c
----------------------------------------------------------------------
diff --git a/c_src/couch_js/http.c b/c_src/couch_js/http.c
index da29b5f..3baa59d 100644
--- a/c_src/couch_js/http.c
+++ b/c_src/couch_js/http.c
@@ -16,7 +16,7 @@
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
-#include <js/jsapi.h>
+#include <jsapi.h>
 #include "utf8.h"
 #include "util.h"
 

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/5ead9289/c_src/couch_js/main.c
----------------------------------------------------------------------
diff --git a/c_src/couch_js/main.c b/c_src/couch_js/main.c
index d4f4ef0..a0fc143 100644
--- a/c_src/couch_js/main.c
+++ b/c_src/couch_js/main.c
@@ -14,7 +14,7 @@
 #include <stdio.h>
 #include <string.h>
 
-#include <js/jsapi.h>
+#include <jsapi.h>
 #include "http.h"
 #include "utf8.h"
 #include "util.h"

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/5ead9289/c_src/couch_js/utf8.c
----------------------------------------------------------------------
diff --git a/c_src/couch_js/utf8.c b/c_src/couch_js/utf8.c
index 75a4315..2b3735a 100644
--- a/c_src/couch_js/utf8.c
+++ b/c_src/couch_js/utf8.c
@@ -10,7 +10,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include <js/jsapi.h>
+#include <jsapi.h>
 
 static int
 enc_char(uint8 *utf8Buffer, uint32 ucs4Char)

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/5ead9289/c_src/couch_js/util.c
----------------------------------------------------------------------
diff --git a/c_src/couch_js/util.c b/c_src/couch_js/util.c
index ef984d1..9b46ceb 100644
--- a/c_src/couch_js/util.c
+++ b/c_src/couch_js/util.c
@@ -13,7 +13,7 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include <js/jsapi.h>
+#include <jsapi.h>
 
 #include "help.h"
 #include "util.h"

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/5ead9289/c_src/couch_js/util.h
----------------------------------------------------------------------
diff --git a/c_src/couch_js/util.h b/c_src/couch_js/util.h
index a1e2b6c..65a2a06 100644
--- a/c_src/couch_js/util.h
+++ b/c_src/couch_js/util.h
@@ -13,7 +13,7 @@
 #ifndef COUCHJS_UTIL_H
 #define COUCHJS_UTIL_H
 
-#include <js/jsapi.h>
+#include <jsapi.h>
 
 typedef struct {
     int          use_http;

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/5ead9289/rebar.config.script
----------------------------------------------------------------------
diff --git a/rebar.config.script b/rebar.config.script
index ab4501d..ca79b39 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -69,7 +69,7 @@ GetFlag = fun(Name, Default) ->
 
 
 JSLIBS = GetFlag("JS_LIBS", "-lmozjs185"),
-JSCFLAGS = GetFlag("JS_CFLAGS", "$CFLAGS"),
+JSCFLAGS = GetFlag("JS_CFLAGS", "-I/usr/include/js"),
 
 {CFLAGS, LDFLAGS}  = case os:type() of
     {unix, darwin} ->


[40/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
Speed up and move couch_httpd:find_in_binary.

See https://issues.apache.org/jira/browse/COUCHDB-1953


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/f07bbfcc
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/f07bbfcc
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/f07bbfcc

Branch: refs/heads/import-rcouch
Commit: f07bbfcc0bbcd726b886394f86acc506db593fb0
Parents: 7f9c06d
Author: NickNorth <No...@gmail.com>
Authored: Tue Dec 3 20:58:53 2013 +0000
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:21 2014 -0600

----------------------------------------------------------------------
 src/couch_util.erl | 32 ++++++++++++++++++++++++++++++++
 1 file changed, 32 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/f07bbfcc/src/couch_util.erl
----------------------------------------------------------------------
diff --git a/src/couch_util.erl b/src/couch_util.erl
index f4d66ef..40401e2 100644
--- a/src/couch_util.erl
+++ b/src/couch_util.erl
@@ -31,6 +31,7 @@
 -export([encode_doc_id/1]).
 -export([with_db/2]).
 -export([rfc1123_date/0, rfc1123_date/1]).
+-export([find_in_binary/2]).
 
 -include("couch_db.hrl").
 
@@ -486,3 +487,34 @@ month(9) -> "Sep";
 month(10) -> "Oct";
 month(11) -> "Nov";
 month(12) -> "Dec".
+
+
+find_in_binary(_B, <<>>) ->
+    not_found;
+
+find_in_binary(B, Data) ->
+    case binary:match(Data, [B], []) of
+    nomatch ->
+        MatchLength = erlang:min(byte_size(B), byte_size(Data)),
+        match_prefix_at_end(binary:part(B, {0, MatchLength}),
+                            binary:part(Data, {byte_size(Data), -MatchLength}),
+                            MatchLength, byte_size(Data) - MatchLength);
+    {Pos, _Len} ->
+        {exact, Pos}
+    end.
+
+match_prefix_at_end(Prefix, Data, PrefixLength, N) ->
+    FirstCharMatches = binary:matches(Data, [binary:part(Prefix, {0, 1})], []),
+    match_rest_of_prefix(FirstCharMatches, Prefix, Data, PrefixLength, N).
+
+match_rest_of_prefix([], _Prefix, _Data, _PrefixLength, _N) ->
+    not_found;
+
+match_rest_of_prefix([{Pos, _Len} | Rest], Prefix, Data, PrefixLength, N) ->
+    case binary:match(binary:part(Data, {PrefixLength, Pos - PrefixLength}),
+                      [binary:part(Prefix, {0, PrefixLength - Pos})], []) of
+        nomatch ->
+            match_rest_of_prefix(Rest, Prefix, Data, PrefixLength, N);
+        {_Pos, _Len1} ->
+            {partial, N + Pos}
+    end.


[21/41] working build of couchjs

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/priv/couch_js/sm180.c
----------------------------------------------------------------------
diff --git a/priv/couch_js/sm180.c b/priv/couch_js/sm180.c
deleted file mode 100644
index 5fb8ce0..0000000
--- a/priv/couch_js/sm180.c
+++ /dev/null
@@ -1,407 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#include <jsapi.h>
-#include "http.h"
-#include "utf8.h"
-#include "util.h"
-
-
-#define SETUP_REQUEST(cx) \
-    JS_SetContextThread(cx); \
-    JS_BeginRequest(cx);
-#define FINISH_REQUEST(cx) \
-    JS_EndRequest(cx); \
-    JS_ClearContextThread(cx);
-
-
-static JSBool
-req_ctor(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval)
-{
-    return http_ctor(cx, obj);
-}
-
-
-static void 
-req_dtor(JSContext* cx, JSObject* obj)
-{
-    http_dtor(cx, obj);
-}
-
-
-static JSBool
-req_open(JSContext* cx, uintN argc, jsval* vp)
-{
-    JSObject* obj = JS_THIS_OBJECT(cx, vp);
-    jsval* argv = JS_ARGV(cx, vp);
-    JSBool ret = JS_FALSE;
-
-    if(argc == 2) {
-        ret = http_open(cx, obj, argv[0], argv[1], JSVAL_FALSE);
-    } else if(argc == 3) {
-        ret = http_open(cx, obj, argv[0], argv[1], argv[2]);
-    } else {
-        JS_ReportError(cx, "Invalid call to CouchHTTP.open");
-    }
-
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return ret;
-}
-
-
-static JSBool
-req_set_hdr(JSContext* cx, uintN argc, jsval* vp)
-{
-    JSObject* obj = JS_THIS_OBJECT(cx, vp);
-    jsval* argv = JS_ARGV(cx, vp);
-    JSBool ret = JS_FALSE;
-
-    if(argc == 2) {
-        ret = http_set_hdr(cx, obj, argv[0], argv[1]);
-    } else {
-        JS_ReportError(cx, "Invalid call to CouchHTTP.set_header");
-    }
-
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return ret;
-}
-
-
-static JSBool
-req_send(JSContext* cx, uintN argc, jsval* vp)
-{
-    JSObject* obj = JS_THIS_OBJECT(cx, vp);
-    jsval* argv = JS_ARGV(cx, vp);
-    JSBool ret = JS_FALSE;
-
-    if(argc == 1) {
-        ret = http_send(cx, obj, argv[0]);
-    } else {
-        JS_ReportError(cx, "Invalid call to CouchHTTP.send");
-    }
-
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return ret;
-}
-
-
-static JSBool
-req_status(JSContext* cx, JSObject* obj, jsval idval, jsval* vp)
-{
-    int status = http_status(cx, obj);
-    if(status < 0)
-        return JS_FALSE;
-
-    if(INT_FITS_IN_JSVAL(status)) {
-        JS_SET_RVAL(cx, vp, INT_TO_JSVAL(status));
-        return JS_TRUE;
-    } else {
-        JS_ReportError(cx, "Invalid HTTP status.");
-        return JS_FALSE;
-    }
-}
-
-
-static JSBool
-base_url(JSContext *cx, JSObject* obj, jsid pid, jsval* vp)
-{
-    couch_args *args = (couch_args*)JS_GetContextPrivate(cx);
-    return http_uri(cx, obj, args, &JS_RVAL(cx, vp));
-}
-
-
-static JSBool
-evalcx(JSContext *cx, uintN argc, jsval* vp)
-{
-    jsval* argv = JS_ARGV(cx, vp);
-    JSString *str;
-    JSObject *sandbox;
-    JSContext *subcx;
-    const jschar *src;
-    size_t srclen;
-    jsval rval;
-    JSBool ret = JS_FALSE;
-    char *name = NULL;
-
-    sandbox = NULL;
-    if(!JS_ConvertArguments(cx, argc, argv, "S / o", &str, &sandbox)) {
-        return JS_FALSE;
-    }
-
-    subcx = JS_NewContext(JS_GetRuntime(cx), 8L * 1024L);
-    if(!subcx) {
-        JS_ReportOutOfMemory(cx);
-        return JS_FALSE;
-    }
-
-    SETUP_REQUEST(subcx);
-
-    src = JS_GetStringChars(str);
-    srclen = JS_GetStringLength(str);
-
-    if(!sandbox) {
-        sandbox = JS_NewObject(subcx, NULL, NULL, NULL);
-        if(!sandbox || !JS_InitStandardClasses(subcx, sandbox)) {
-            goto done;
-        }
-    }
-
-    if(argc > 2) {
-      name = enc_string(cx, argv[2], NULL);
-    }
-
-    if(srclen == 0) {
-        JS_SET_RVAL(cx, vp, OBJECT_TO_JSVAL(sandbox));
-    } else {
-        JS_EvaluateUCScript(subcx, sandbox, src, srclen, name, 1, &rval);
-        JS_SET_RVAL(cx, vp, rval);
-    }
-    
-    ret = JS_TRUE;
-
-done:
-    if(name) JS_free(cx, name);
-    FINISH_REQUEST(subcx);
-    JS_DestroyContext(subcx);
-    return ret;
-}
-
-
-static JSBool
-gc(JSContext* cx, uintN argc, jsval* vp)
-{
-    JS_GC(cx);
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return JS_TRUE;
-}
-
-
-static JSBool
-print(JSContext* cx, uintN argc, jsval* vp)
-{
-    jsval* argv = JS_ARGV(cx, vp);
-    couch_print(cx, argc, argv);
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return JS_TRUE;
-}
-
-
-static JSBool
-quit(JSContext* cx, uintN argc, jsval* vp)
-{
-    jsval* argv = JS_ARGV(cx, vp);
-    int exit_code = 0;
-    JS_ConvertArguments(cx, argc, argv, "/i", &exit_code);
-    exit(exit_code);
-}
-
-
-static JSBool
-readline(JSContext* cx, uintN argc, jsval* vp)
-{
-    JSString* line;
-
-    /* GC Occasionally */
-    JS_MaybeGC(cx);
-
-    line = couch_readline(cx, stdin);
-    if(line == NULL) return JS_FALSE;
-
-    JS_SET_RVAL(cx, vp, STRING_TO_JSVAL(line));
-    return JS_TRUE;
-}
-
-
-static JSBool
-seal(JSContext* cx, uintN argc, jsval* vp)
-{
-    jsval* argv = JS_ARGV(cx, vp);
-    JSObject *target;
-    JSBool deep = JS_FALSE;
-
-    if(!JS_ConvertArguments(cx, argc, argv, "o/b", &target, &deep))
-        return JS_FALSE;
-
-    if(!target) {
-        JS_SET_RVAL(cx, vp, JSVAL_VOID);
-        return JS_TRUE;
-    }
-
-    if(JS_SealObject(cx, target, deep) != JS_TRUE)
-        return JS_FALSE;
-
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return JS_TRUE;
-}
-
-
-JSClass CouchHTTPClass = {
-    "CouchHTTP",
-    JSCLASS_HAS_PRIVATE
-        | JSCLASS_CONSTRUCT_PROTOTYPE
-        | JSCLASS_HAS_RESERVED_SLOTS(2),
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_EnumerateStub,
-    JS_ResolveStub,
-    JS_ConvertStub,
-    req_dtor,
-    JSCLASS_NO_OPTIONAL_MEMBERS
-};
-
-
-JSPropertySpec CouchHTTPProperties[] = {
-    {"status", 0, JSPROP_READONLY, req_status, NULL},
-    {"base_url", 0, JSPROP_READONLY | JSPROP_SHARED, base_url, NULL},
-    {0, 0, 0, 0, 0}
-};
-
-
-JSFunctionSpec CouchHTTPFunctions[] = {
-    JS_FS("_open", (JSNative) req_open, 3, JSFUN_FAST_NATIVE, 0),
-    JS_FS("_setRequestHeader", (JSNative) req_set_hdr, 2, JSFUN_FAST_NATIVE, 0),
-    JS_FS("_send", (JSNative) req_send, 1, JSFUN_FAST_NATIVE, 0),
-    JS_FS_END
-};
-
-
-static JSClass global_class = {
-    "GlobalClass",
-    JSCLASS_GLOBAL_FLAGS,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_EnumerateStub,
-    JS_ResolveStub,
-    JS_ConvertStub,
-    JS_FinalizeStub,
-    JSCLASS_NO_OPTIONAL_MEMBERS
-};
-
-
-static JSFunctionSpec global_functions[] = {
-    JS_FS("evalcx", (JSNative) evalcx, 0, JSFUN_FAST_NATIVE, 0),
-    JS_FS("gc", (JSNative) gc, 0, JSFUN_FAST_NATIVE, 0),
-    JS_FS("print", (JSNative) print, 0, JSFUN_FAST_NATIVE, 0),
-    JS_FS("quit", (JSNative) quit, 0, JSFUN_FAST_NATIVE, 0),
-    JS_FS("readline", (JSNative) readline, 0, JSFUN_FAST_NATIVE, 0),
-    JS_FS("seal", (JSNative) seal, 0, JSFUN_FAST_NATIVE, 0),
-    JS_FS_END
-};
-
-
-int
-main(int argc, const char* argv[])
-{
-    JSRuntime* rt = NULL;
-    JSContext* cx = NULL;
-    JSObject* global = NULL;
-    JSObject* klass = NULL;
-    JSScript* script;
-    JSString* scriptsrc;
-    jschar* schars;
-    size_t slen;
-    jsval sroot;
-    jsval result;
-    int i;
-
-    couch_args* args = couch_parse_args(argc, argv);
-
-    rt = JS_NewRuntime(args->stack_size);
-    if(rt == NULL)
-        return 1;
-
-    cx = JS_NewContext(rt, 8L * 1024L);
-    if(cx == NULL)
-        return 1;
-
-    JS_SetErrorReporter(cx, couch_error);
-    JS_ToggleOptions(cx, JSOPTION_XML);
-    JS_SetContextPrivate(cx, args);
-    
-    SETUP_REQUEST(cx);
-
-    global = JS_NewObject(cx, &global_class, NULL, NULL);
-    if(global == NULL)
-        return 1;
-
-    JS_SetGlobalObject(cx, global);
-    
-    if(!JS_InitStandardClasses(cx, global))
-        return 1;
-
-    if(couch_load_funcs(cx, global, global_functions) != JS_TRUE)
-        return 1;
- 
-    if(args->use_http) {
-        http_check_enabled();
-
-        klass = JS_InitClass(
-            cx, global,
-            NULL,
-            &CouchHTTPClass, req_ctor,
-            0,
-            CouchHTTPProperties, CouchHTTPFunctions,
-            NULL, NULL
-        );
-
-        if(!klass)
-        {
-            fprintf(stderr, "Failed to initialize CouchHTTP class.\n");
-            exit(2);
-        }
-    } 
-
-    for (i = 0 ; args->scripts[i] ; i++) {
-        // Convert script source to jschars.
-        scriptsrc = couch_readfile(cx, args->scripts[i]);
-        if(!scriptsrc)
-            return 1;
-
-        schars = JS_GetStringChars(scriptsrc);
-        slen = JS_GetStringLength(scriptsrc);
-
-        // Root it so GC doesn't collect it.
-        sroot = STRING_TO_JSVAL(scriptsrc);
-        if(JS_AddRoot(cx, &sroot) != JS_TRUE) {
-            fprintf(stderr, "Internal root error.\n");
-            return 1;
-        }
-
-        // Compile and run
-        script = JS_CompileUCScript(cx, global, schars, slen,
-                                    args->scripts[i], 1);
-        if(!script) {
-            fprintf(stderr, "Failed to compile script.\n");
-            return 1;
-        }
-
-        JS_ExecuteScript(cx, global, script, &result);
-
-        // Warning message if we don't remove it.
-        JS_RemoveRoot(cx, &sroot);
-    }
-
-    FINISH_REQUEST(cx);
-    JS_DestroyContext(cx);
-    JS_DestroyRuntime(rt);
-    JS_ShutDown();
-
-    return 0;
-}

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/priv/couch_js/sm185.c
----------------------------------------------------------------------
diff --git a/priv/couch_js/sm185.c b/priv/couch_js/sm185.c
deleted file mode 100644
index c378d4a..0000000
--- a/priv/couch_js/sm185.c
+++ /dev/null
@@ -1,431 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#include <jsapi.h>
-#include "http.h"
-#include "utf8.h"
-#include "util.h"
-
-
-#define SETUP_REQUEST(cx) \
-    JS_SetContextThread(cx); \
-    JS_BeginRequest(cx);
-#define FINISH_REQUEST(cx) \
-    JS_EndRequest(cx); \
-    JS_ClearContextThread(cx);
-
-
-static JSClass global_class = {
-    "GlobalClass",
-    JSCLASS_GLOBAL_FLAGS,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_StrictPropertyStub,
-    JS_EnumerateStub,
-    JS_ResolveStub,
-    JS_ConvertStub,
-    JS_FinalizeStub,
-    JSCLASS_NO_OPTIONAL_MEMBERS
-};
-
-
-static JSBool
-req_ctor(JSContext* cx, uintN argc, jsval* vp)
-{
-    JSBool ret;
-    JSObject* obj = JS_NewObjectForConstructor(cx, vp);
-    if(!obj) {
-        JS_ReportError(cx, "Failed to create CouchHTTP instance.\n");
-        return JS_FALSE;
-    }
-    ret = http_ctor(cx, obj);
-    JS_SET_RVAL(cx, vp, OBJECT_TO_JSVAL(obj));
-    return ret;
-}
-
-
-static void 
-req_dtor(JSContext* cx, JSObject* obj)
-{
-    http_dtor(cx, obj);
-}
-
-
-static JSBool
-req_open(JSContext* cx, uintN argc, jsval* vp)
-{
-    JSObject* obj = JS_THIS_OBJECT(cx, vp);
-    jsval* argv = JS_ARGV(cx, vp);
-    JSBool ret = JS_FALSE;
-
-    if(argc == 2) {
-        ret = http_open(cx, obj, argv[0], argv[1], JSVAL_FALSE);
-    } else if(argc == 3) {
-        ret = http_open(cx, obj, argv[0], argv[1], argv[2]);
-    } else {
-        JS_ReportError(cx, "Invalid call to CouchHTTP.open");
-    }
-
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return ret;
-}
-
-
-static JSBool
-req_set_hdr(JSContext* cx, uintN argc, jsval* vp)
-{
-    JSObject* obj = JS_THIS_OBJECT(cx, vp);
-    jsval* argv = JS_ARGV(cx, vp);
-    JSBool ret = JS_FALSE;
-
-    if(argc == 2) {
-        ret = http_set_hdr(cx, obj, argv[0], argv[1]);
-    } else {
-        JS_ReportError(cx, "Invalid call to CouchHTTP.set_header");
-    }
-
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return ret;
-}
-
-
-static JSBool
-req_send(JSContext* cx, uintN argc, jsval* vp)
-{
-    JSObject* obj = JS_THIS_OBJECT(cx, vp);
-    jsval* argv = JS_ARGV(cx, vp);
-    JSBool ret = JS_FALSE;
-
-    if(argc == 1) {
-        ret = http_send(cx, obj, argv[0]);
-    } else {
-        JS_ReportError(cx, "Invalid call to CouchHTTP.send");
-    }
-
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return ret;
-}
-
-
-static JSBool
-req_status(JSContext* cx, JSObject* obj, jsid pid, jsval* vp)
-{
-    int status = http_status(cx, obj);
-    if(status < 0)
-        return JS_FALSE;
-
-    JS_SET_RVAL(cx, vp, INT_TO_JSVAL(status));
-    return JS_TRUE;
-}
-
-
-static JSBool
-base_url(JSContext *cx, JSObject* obj, jsid pid, jsval* vp)
-{
-    couch_args *args = (couch_args*)JS_GetContextPrivate(cx);
-    return http_uri(cx, obj, args, &JS_RVAL(cx, vp));
-}
-
-
-static JSBool
-evalcx(JSContext *cx, uintN argc, jsval* vp)
-{
-    jsval* argv = JS_ARGV(cx, vp);
-    JSString* str;
-    JSObject* sandbox;
-    JSObject* global;
-    JSContext* subcx;
-    JSCrossCompartmentCall* call = NULL;
-    const jschar* src;
-    size_t srclen;
-    jsval rval;
-    JSBool ret = JS_FALSE;
-    char *name = NULL;
-
-    sandbox = NULL;
-    if(!JS_ConvertArguments(cx, argc, argv, "S / o", &str, &sandbox)) {
-        return JS_FALSE;
-    }
-
-    subcx = JS_NewContext(JS_GetRuntime(cx), 8L * 1024L);
-    if(!subcx) {
-        JS_ReportOutOfMemory(cx);
-        return JS_FALSE;
-    }
-
-    SETUP_REQUEST(subcx);
-
-    src = JS_GetStringCharsAndLength(cx, str, &srclen);
-
-    // Re-use the compartment associated with the main context,
-    // rather than creating a new compartment */
-    global = JS_GetGlobalObject(cx);
-    if(global == NULL) goto done;
-    call = JS_EnterCrossCompartmentCall(subcx, global);
-
-    if(!sandbox) {
-        sandbox = JS_NewGlobalObject(subcx, &global_class);
-        if(!sandbox || !JS_InitStandardClasses(subcx, sandbox)) {
-            goto done;
-        }
-    }
-
-    if(argc > 2) {
-        name = enc_string(cx, argv[2], NULL);
-    }
-
-    if(srclen == 0) {
-        JS_SET_RVAL(cx, vp, OBJECT_TO_JSVAL(sandbox));
-    } else {
-        JS_EvaluateUCScript(subcx, sandbox, src, srclen, name, 1, &rval);
-        JS_SET_RVAL(cx, vp, rval);
-    }
-    
-    ret = JS_TRUE;
-
-done:
-    if(name) JS_free(cx, name);
-    JS_LeaveCrossCompartmentCall(call);
-    FINISH_REQUEST(subcx);
-    JS_DestroyContext(subcx);
-    return ret;
-}
-
-
-static JSBool
-gc(JSContext* cx, uintN argc, jsval* vp)
-{
-    JS_GC(cx);
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return JS_TRUE;
-}
-
-
-static JSBool
-print(JSContext* cx, uintN argc, jsval* vp)
-{
-    jsval* argv = JS_ARGV(cx, vp);
-    couch_print(cx, argc, argv);
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return JS_TRUE;
-}
-
-
-static JSBool
-quit(JSContext* cx, uintN argc, jsval* vp)
-{
-    jsval* argv = JS_ARGV(cx, vp);
-    int exit_code = 0;
-    JS_ConvertArguments(cx, argc, argv, "/i", &exit_code);
-    exit(exit_code);
-}
-
-
-static JSBool
-readline(JSContext* cx, uintN argc, jsval* vp)
-{
-    JSString* line;
-
-    /* GC Occasionally */
-    JS_MaybeGC(cx);
-
-    line = couch_readline(cx, stdin);
-    if(line == NULL) return JS_FALSE;
-
-    JS_SET_RVAL(cx, vp, STRING_TO_JSVAL(line));
-    return JS_TRUE;
-}
-
-
-static JSBool
-seal(JSContext* cx, uintN argc, jsval* vp)
-{
-    jsval* argv = JS_ARGV(cx, vp);
-    JSObject *target;
-    JSBool deep = JS_FALSE;
-    JSBool ret;
-
-    if(!JS_ConvertArguments(cx, argc, argv, "o/b", &target, &deep))
-        return JS_FALSE;
-
-    if(!target) {
-        JS_SET_RVAL(cx, vp, JSVAL_VOID);
-        return JS_TRUE;
-    }
-
-    
-    ret = deep ? JS_DeepFreezeObject(cx, target) : JS_FreezeObject(cx, target);
-    JS_SET_RVAL(cx, vp, JSVAL_VOID);
-    return ret;
-}
-
-
-JSClass CouchHTTPClass = {
-    "CouchHTTP",
-    JSCLASS_HAS_PRIVATE
-        | JSCLASS_CONSTRUCT_PROTOTYPE
-        | JSCLASS_HAS_RESERVED_SLOTS(2),
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_PropertyStub,
-    JS_StrictPropertyStub,
-    JS_EnumerateStub,
-    JS_ResolveStub,
-    JS_ConvertStub,
-    req_dtor,
-    JSCLASS_NO_OPTIONAL_MEMBERS
-};
-
-
-JSPropertySpec CouchHTTPProperties[] = {
-    {"status", 0, JSPROP_READONLY, req_status, NULL},
-    {"base_url", 0, JSPROP_READONLY | JSPROP_SHARED, base_url, NULL},
-    {0, 0, 0, 0, 0}
-};
-
-
-JSFunctionSpec CouchHTTPFunctions[] = {
-    JS_FS("_open", req_open, 3, 0),
-    JS_FS("_setRequestHeader", req_set_hdr, 2, 0),
-    JS_FS("_send", req_send, 1, 0),
-    JS_FS_END
-};
-
-
-static JSFunctionSpec global_functions[] = {
-    JS_FS("evalcx", evalcx, 0, 0),
-    JS_FS("gc", gc, 0, 0),
-    JS_FS("print", print, 0, 0),
-    JS_FS("quit", quit, 0, 0),
-    JS_FS("readline", readline, 0, 0),
-    JS_FS("seal", seal, 0, 0),
-    JS_FS_END
-};
-
-
-int
-main(int argc, const char* argv[])
-{
-    JSRuntime* rt = NULL;
-    JSContext* cx = NULL;
-    JSObject* global = NULL;
-    JSCrossCompartmentCall *call = NULL;
-    JSObject* klass = NULL;
-    JSSCRIPT_TYPE script;
-    JSString* scriptsrc;
-    const jschar* schars;
-    size_t slen;
-    jsval sroot;
-    jsval result;
-    int i;
-
-    couch_args* args = couch_parse_args(argc, argv);
-
-    rt = JS_NewRuntime(args->stack_size);
-    if(rt == NULL)
-        return 1;
-
-    cx = JS_NewContext(rt, 8L * 1024L);
-    if(cx == NULL)
-        return 1;
-
-    JS_SetErrorReporter(cx, couch_error);
-    JS_ToggleOptions(cx, JSOPTION_XML);
-    JS_SetOptions(cx, JSOPTION_METHODJIT);
-#ifdef JSOPTION_TYPE_INFERENCE
-    JS_SetOptions(cx, JSOPTION_TYPE_INFERENCE);
-#endif
-    JS_SetContextPrivate(cx, args);
-    
-    SETUP_REQUEST(cx);
-
-    global = JS_NewCompartmentAndGlobalObject(cx, &global_class, NULL);
-    if(global == NULL)
-        return 1;
-
-    call = JS_EnterCrossCompartmentCall(cx, global);
-
-    JS_SetGlobalObject(cx, global);
-    
-    if(!JS_InitStandardClasses(cx, global))
-        return 1;
-
-    if(couch_load_funcs(cx, global, global_functions) != JS_TRUE)
-        return 1;
- 
-    if(args->use_http) {
-        http_check_enabled();
-
-        klass = JS_InitClass(
-            cx, global,
-            NULL,
-            &CouchHTTPClass, req_ctor,
-            0,
-            CouchHTTPProperties, CouchHTTPFunctions,
-            NULL, NULL
-        );
-
-        if(!klass)
-        {
-            fprintf(stderr, "Failed to initialize CouchHTTP class.\n");
-            exit(2);
-        }
-    } 
-
-    for(i = 0 ; args->scripts[i] ; i++) {
-        // Convert script source to jschars.
-        scriptsrc = couch_readfile(cx, args->scripts[i]);
-        if(!scriptsrc)
-            return 1;
-
-        schars = JS_GetStringCharsAndLength(cx, scriptsrc, &slen);
-
-        // Root it so GC doesn't collect it.
-        sroot = STRING_TO_JSVAL(scriptsrc);
-        if(JS_AddValueRoot(cx, &sroot) != JS_TRUE) {
-            fprintf(stderr, "Internal root error.\n");
-            return 1;
-        }
-
-        // Compile and run
-        script = JS_CompileUCScript(cx, global, schars, slen,
-                                    args->scripts[i], 1);
-        if(!script) {
-            fprintf(stderr, "Failed to compile script.\n");
-            return 1;
-        }
-
-        if(JS_ExecuteScript(cx, global, script, &result) != JS_TRUE) {
-            fprintf(stderr, "Failed to execute script.\n");
-            return 1;
-        }
-
-        // Warning message if we don't remove it.
-        JS_RemoveValueRoot(cx, &sroot);
-
-        // Give the GC a chance to run.
-        JS_MaybeGC(cx);
-    }
-
-    JS_LeaveCrossCompartmentCall(call);
-    FINISH_REQUEST(cx);
-    JS_DestroyContext(cx);
-    JS_DestroyRuntime(rt);
-    JS_ShutDown();
-
-    return 0;
-}

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/priv/couch_js/utf8.c
----------------------------------------------------------------------
diff --git a/priv/couch_js/utf8.c b/priv/couch_js/utf8.c
deleted file mode 100644
index 2d23cc2..0000000
--- a/priv/couch_js/utf8.c
+++ /dev/null
@@ -1,294 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <jsapi.h>
-#include "config.h"
-
-static int
-enc_char(uint8 *utf8Buffer, uint32 ucs4Char)
-{
-    int utf8Length = 1;
-
-    if (ucs4Char < 0x80)
-    {
-        *utf8Buffer = (uint8)ucs4Char;
-    }
-    else
-    {
-        int i;
-        uint32 a = ucs4Char >> 11;
-        utf8Length = 2;
-        while(a)
-        {
-            a >>= 5;
-            utf8Length++;
-        }
-        i = utf8Length;
-        while(--i)
-        {
-            utf8Buffer[i] = (uint8)((ucs4Char & 0x3F) | 0x80);
-            ucs4Char >>= 6;
-        }
-        *utf8Buffer = (uint8)(0x100 - (1 << (8-utf8Length)) + ucs4Char);
-    }
-
-    return utf8Length;
-}
-
-static JSBool
-enc_charbuf(const jschar* src, size_t srclen, char* dst, size_t* dstlenp)
-{
-    size_t i;
-    size_t utf8Len;
-    size_t dstlen = *dstlenp;
-    size_t origDstlen = dstlen;
-    jschar c;
-    jschar c2;
-    uint32 v;
-    uint8 utf8buf[6];
-
-    if(!dst)
-    {
-        dstlen = origDstlen = (size_t) -1;
-    }
-
-    while(srclen)
-    {
-        c = *src++;
-        srclen--;
-
-        if(c <= 0xD7FF || c >= 0xE000)
-        {
-            v = (uint32) c;
-        }
-        else if(c >= 0xD800 && c <= 0xDBFF)
-        {
-            if(srclen < 1) goto buffer_too_small;
-            c2 = *src++;
-            srclen--;
-            if(c2 >= 0xDC00 && c2 <= 0xDFFF)
-            {
-                v = (uint32) (((c - 0xD800) << 10) + (c2 - 0xDC00) + 0x10000);
-            }
-            else
-            {
-                // Invalid second half of surrogate pair
-                v = (uint32) 0xFFFD;
-            }
-        }
-        else
-        {
-            // Invalid first half surrogate pair
-            v = (uint32) 0xFFFD;
-        }
-
-        if(v < 0x0080)
-        {
-            /* no encoding necessary - performance hack */
-            if(!dstlen) goto buffer_too_small;
-            if(dst) *dst++ = (char) v;
-            utf8Len = 1;
-        }
-        else
-        {
-            utf8Len = enc_char(utf8buf, v);
-            if(utf8Len > dstlen) goto buffer_too_small;
-            if(dst)
-            {
-                for (i = 0; i < utf8Len; i++)
-                {
-                    *dst++ = (char) utf8buf[i];
-                }
-            }
-        }
-        dstlen -= utf8Len;
-    }
-    
-    *dstlenp = (origDstlen - dstlen);
-    return JS_TRUE;
-
-buffer_too_small:
-    *dstlenp = (origDstlen - dstlen);
-    return JS_FALSE;
-}
-
-char*
-enc_string(JSContext* cx, jsval arg, size_t* buflen)
-{
-    JSString* str = NULL;
-    const jschar* src = NULL;
-    char* bytes = NULL;
-    size_t srclen = 0;
-    size_t byteslen = 0;
-    
-    str = JS_ValueToString(cx, arg);
-    if(!str) goto error;
-
-#ifdef HAVE_JS_GET_STRING_CHARS_AND_LENGTH
-    src = JS_GetStringCharsAndLength(cx, str, &srclen);
-#else
-    src = JS_GetStringChars(str);
-    srclen = JS_GetStringLength(str);
-#endif
-
-    if(!enc_charbuf(src, srclen, NULL, &byteslen)) goto error;
-    
-    bytes = JS_malloc(cx, (byteslen) + 1);
-    bytes[byteslen] = 0;
-    
-    if(!enc_charbuf(src, srclen, bytes, &byteslen)) goto error;
-
-    if(buflen) *buflen = byteslen;
-    goto success;
-
-error:
-    if(bytes != NULL) JS_free(cx, bytes);
-    bytes = NULL;
-
-success:
-    return bytes;
-}
-
-static uint32
-dec_char(const uint8 *utf8Buffer, int utf8Length)
-{
-    uint32 ucs4Char;
-    uint32 minucs4Char;
-
-    /* from Unicode 3.1, non-shortest form is illegal */
-    static const uint32 minucs4Table[] = {
-        0x00000080, 0x00000800, 0x0001000, 0x0020000, 0x0400000
-    };
-
-    if (utf8Length == 1)
-    {
-        ucs4Char = *utf8Buffer;
-    }
-    else
-    {
-        ucs4Char = *utf8Buffer++ & ((1<<(7-utf8Length))-1);
-        minucs4Char = minucs4Table[utf8Length-2];
-        while(--utf8Length)
-        {
-            ucs4Char = ucs4Char<<6 | (*utf8Buffer++ & 0x3F);
-        }
-        if(ucs4Char < minucs4Char || ucs4Char == 0xFFFE || ucs4Char == 0xFFFF)
-        {
-            ucs4Char = 0xFFFD;
-        }
-    }
-
-    return ucs4Char;
-}
-
-static JSBool
-dec_charbuf(const char *src, size_t srclen, jschar *dst, size_t *dstlenp)
-{
-    uint32 v;
-    size_t offset = 0;
-    size_t j;
-    size_t n;
-    size_t dstlen = *dstlenp;
-    size_t origDstlen = dstlen;
-
-    if(!dst) dstlen = origDstlen = (size_t) -1;
-
-    while(srclen)
-    {
-        v = (uint8) *src;
-        n = 1;
-        
-        if(v & 0x80)
-        {
-            while(v & (0x80 >> n))
-            {
-                n++;
-            }
-            
-            if(n > srclen) goto buffer_too_small;
-            if(n == 1 || n > 6) goto bad_character;
-            
-            for(j = 1; j < n; j++)
-            {
-                if((src[j] & 0xC0) != 0x80) goto bad_character;
-            }
-
-            v = dec_char((const uint8 *) src, n);
-            if(v >= 0x10000)
-            {
-                v -= 0x10000;
-                
-                if(v > 0xFFFFF || dstlen < 2)
-                {
-                    *dstlenp = (origDstlen - dstlen);
-                    return JS_FALSE;
-                }
-                
-                if(dstlen < 2) goto buffer_too_small;
-
-                if(dst)
-                {
-                    *dst++ = (jschar)((v >> 10) + 0xD800);
-                    v = (jschar)((v & 0x3FF) + 0xDC00);
-                }
-                dstlen--;
-            }
-        }
-
-        if(!dstlen) goto buffer_too_small;
-        if(dst) *dst++ = (jschar) v;
-
-        dstlen--;
-        offset += n;
-        src += n;
-        srclen -= n;
-    }
-
-    *dstlenp = (origDstlen - dstlen);
-    return JS_TRUE;
-
-bad_character:
-    *dstlenp = (origDstlen - dstlen);
-    return JS_FALSE;
-
-buffer_too_small:
-    *dstlenp = (origDstlen - dstlen);
-    return JS_FALSE;
-}
-
-JSString*
-dec_string(JSContext* cx, const char* bytes, size_t byteslen)
-{
-    JSString* str = NULL;
-    jschar* chars = NULL;
-    size_t charslen;
-    
-    if(!dec_charbuf(bytes, byteslen, NULL, &charslen)) goto error;
-
-    chars = JS_malloc(cx, (charslen + 1) * sizeof(jschar));
-    if(!chars) return NULL;
-    chars[charslen] = 0;
-
-    if(!dec_charbuf(bytes, byteslen, chars, &charslen)) goto error;
-
-    str = JS_NewUCString(cx, chars, charslen - 1);
-    if(!str) goto error;
-
-    goto success;
-
-error:
-    if(chars != NULL) JS_free(cx, chars);
-    str = NULL;
-
-success:
-    return str;
-}

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/priv/couch_js/utf8.h
----------------------------------------------------------------------
diff --git a/priv/couch_js/utf8.h b/priv/couch_js/utf8.h
deleted file mode 100644
index c5cb86c..0000000
--- a/priv/couch_js/utf8.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCH_JS_UTF_8_H
-#define COUCH_JS_UTF_8_H
-
-char* enc_string(JSContext* cx, jsval arg, size_t* buflen);
-JSString* dec_string(JSContext* cx, const char* buf, size_t buflen);
-
-#endif

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/priv/couch_js/util.c
----------------------------------------------------------------------
diff --git a/priv/couch_js/util.c b/priv/couch_js/util.c
deleted file mode 100644
index b4700de..0000000
--- a/priv/couch_js/util.c
+++ /dev/null
@@ -1,294 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include <stdlib.h>
-#include <string.h>
-
-#include <jsapi.h>
-
-#include "help.h"
-#include "util.h"
-#include "utf8.h"
-
-
-size_t
-slurp_file(const char* file, char** outbuf_p)
-{
-    FILE* fp;
-    char fbuf[16384];
-    char *buf = NULL;
-    char* tmp;
-    size_t nread = 0;
-    size_t buflen = 0;
-
-    if(strcmp(file, "-") == 0) {
-        fp = stdin;
-    } else {
-        fp = fopen(file, "r");
-        if(fp == NULL) {
-            fprintf(stderr, "Failed to read file: %s\n", file);
-            exit(3);
-        }
-    }
-
-    while((nread = fread(fbuf, 1, 16384, fp)) > 0) {
-        if(buf == NULL) {
-            buf = (char*) malloc(nread + 1);
-            if(buf == NULL) {
-                fprintf(stderr, "Out of memory.\n");
-                exit(3);
-            }
-            memcpy(buf, fbuf, nread);
-        } else {
-            tmp = (char*) malloc(buflen + nread + 1);
-            if(tmp == NULL) {
-                fprintf(stderr, "Out of memory.\n");
-                exit(3);
-            }
-            memcpy(tmp, buf, buflen);
-            memcpy(tmp+buflen, fbuf, nread);
-            free(buf);
-            buf = tmp;
-        }
-        buflen += nread;
-        buf[buflen] = '\0';
-    }
-    *outbuf_p = buf;
-    return buflen + 1;
-}
-
-couch_args*
-couch_parse_args(int argc, const char* argv[])
-{
-    couch_args* args;
-    int i = 1;
-
-    args = (couch_args*) malloc(sizeof(couch_args));
-    if(args == NULL)
-        return NULL;
-
-    memset(args, '\0', sizeof(couch_args));
-    args->stack_size = 64L * 1024L * 1024L;
-
-    while(i < argc) {
-        if(strcmp("-h", argv[i]) == 0) {
-            DISPLAY_USAGE;
-            exit(0);
-        } else if(strcmp("-V", argv[i]) == 0) {
-            DISPLAY_VERSION;
-            exit(0);
-        } else if(strcmp("-H", argv[i]) == 0) {
-            args->use_http = 1;
-        } else if(strcmp("-S", argv[i]) == 0) {
-            args->stack_size = atoi(argv[++i]);
-            if(args->stack_size <= 0) {
-                fprintf(stderr, "Invalid stack size.\n");
-                exit(2);
-            }
-        } else if(strcmp("-u", argv[i]) == 0) {
-            args->uri_file = argv[++i];
-        } else if(strcmp("--", argv[i]) == 0) {
-            i++;
-            break;
-        } else {
-            break;
-        }
-        i++;
-    }
-
-    if(i >= argc) {
-        DISPLAY_USAGE;
-        exit(3);
-    }
-    args->scripts = argv + i;
-
-    return args;
-}
-
-
-int
-couch_fgets(char* buf, int size, FILE* fp)
-{
-    int n, i, c;
-
-    if(size <= 0) return -1;
-    n = size - 1;
-
-    for(i = 0; i < n && (c = getc(fp)) != EOF; i++) {
-        buf[i] = c;
-        if(c == '\n') {
-            i++;
-            break;
-        }
-    }
-
-    buf[i] = '\0';
-    return i;
-}
-
-
-JSString*
-couch_readline(JSContext* cx, FILE* fp)
-{
-    JSString* str;
-    char* bytes = NULL;
-    char* tmp = NULL;
-    size_t used = 0;
-    size_t byteslen = 256;
-    size_t readlen = 0;
-
-    bytes = JS_malloc(cx, byteslen);
-    if(bytes == NULL) return NULL;
-    
-    while((readlen = couch_fgets(bytes+used, byteslen-used, fp)) > 0) {
-        used += readlen;
-        
-        if(bytes[used-1] == '\n') {
-            bytes[used-1] = '\0';
-            break;
-        }
-        
-        // Double our buffer and read more.
-        byteslen *= 2;
-        tmp = JS_realloc(cx, bytes, byteslen);
-        if(!tmp) {
-            JS_free(cx, bytes);
-            return NULL;
-        }
-        
-        bytes = tmp;
-    }
-
-    // Treat empty strings specially
-    if(used == 0) {
-        JS_free(cx, bytes);
-        return JSVAL_TO_STRING(JS_GetEmptyStringValue(cx));
-    }
-
-    // Shring the buffer to the actual data size
-    tmp = JS_realloc(cx, bytes, used);
-    if(!tmp) {
-        JS_free(cx, bytes);
-        return NULL;
-    }
-    bytes = tmp;
-    byteslen = used;
-
-    str = dec_string(cx, bytes, byteslen);
-    JS_free(cx, bytes);
-    return str;
-}
-
-
-JSString*
-couch_readfile(JSContext* cx, const char* filename)
-{
-    JSString *string;
-    size_t byteslen;
-    char *bytes;
-
-    if((byteslen = slurp_file(filename, &bytes))) {
-        string = dec_string(cx, bytes, byteslen);
-
-        free(bytes);
-        return string;
-    }
-    return NULL;    
-}
-
-
-void
-couch_print(JSContext* cx, uintN argc, jsval* argv)
-{
-    char *bytes = NULL;
-    FILE *stream = stdout;
-
-    if (argc) {
-        if (argc > 1 && argv[1] == JSVAL_TRUE) {
-          stream = stderr;
-        }
-        bytes = enc_string(cx, argv[0], NULL);
-        if(!bytes) return;
-        fprintf(stream, "%s", bytes);
-        JS_free(cx, bytes);
-    }
-
-    fputc('\n', stream);
-    fflush(stream);
-}
-
-
-void
-couch_error(JSContext* cx, const char* mesg, JSErrorReport* report)
-{
-    jsval v, replace;
-    char* bytes;
-    JSObject* regexp, *stack;
-    jsval re_args[2];
-
-    if(!report || !JSREPORT_IS_WARNING(report->flags))
-    {
-        fprintf(stderr, "%s\n", mesg);
-
-        // Print a stack trace, if available.
-        if (JSREPORT_IS_EXCEPTION(report->flags) &&
-            JS_GetPendingException(cx, &v))
-        {
-            // Clear the exception before an JS method calls or the result is
-            // infinite, recursive error report generation.
-            JS_ClearPendingException(cx);
-
-            // Use JS regexp to indent the stack trace.
-            // If the regexp can't be created, don't JS_ReportError since it is
-            // probably not productive to wind up here again.
-#ifdef SM185
-            if(JS_GetProperty(cx, JSVAL_TO_OBJECT(v), "stack", &v) &&
-               (regexp = JS_NewRegExpObjectNoStatics(
-                   cx, "^(?=.)", 6, JSREG_GLOB | JSREG_MULTILINE)))
-#else
-            if(JS_GetProperty(cx, JSVAL_TO_OBJECT(v), "stack", &v) &&
-               (regexp = JS_NewRegExpObject(
-                   cx, "^(?=.)", 6, JSREG_GLOB | JSREG_MULTILINE)))
-#endif
-            {
-                // Set up the arguments to ``String.replace()``
-                re_args[0] = OBJECT_TO_JSVAL(regexp);
-                re_args[1] = STRING_TO_JSVAL(JS_InternString(cx, "\t"));
-
-                // Perform the replacement
-                if(JS_ValueToObject(cx, v, &stack) &&
-                   JS_GetProperty(cx, stack, "replace", &replace) &&
-                   JS_CallFunctionValue(cx, stack, replace, 2, re_args, &v))
-                {
-                    // Print the result
-                    bytes = enc_string(cx, v, NULL);
-                    fprintf(stderr, "Stacktrace:\n%s", bytes);
-                    JS_free(cx, bytes);
-                }
-            }
-        }
-    }
-}
-
-
-JSBool
-couch_load_funcs(JSContext* cx, JSObject* obj, JSFunctionSpec* funcs)
-{
-    JSFunctionSpec* f;
-    for(f = funcs; f->name != NULL; f++) {
-        if(!JS_DefineFunction(cx, obj, f->name, f->call, f->nargs, f->flags)) {
-            fprintf(stderr, "Failed to create function: %s\n", f->name);
-            return JS_FALSE;
-        }
-    }
-    return JS_TRUE;
-}

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/priv/couch_js/util.h
----------------------------------------------------------------------
diff --git a/priv/couch_js/util.h b/priv/couch_js/util.h
deleted file mode 100644
index 65a2a06..0000000
--- a/priv/couch_js/util.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#ifndef COUCHJS_UTIL_H
-#define COUCHJS_UTIL_H
-
-#include <jsapi.h>
-
-typedef struct {
-    int          use_http;
-    int          stack_size;
-    const char** scripts;
-    const char*  uri_file;
-    JSString*    uri;
-} couch_args;
-
-couch_args* couch_parse_args(int argc, const char* argv[]);
-int couch_fgets(char* buf, int size, FILE* fp);
-JSString* couch_readline(JSContext* cx, FILE* fp);
-JSString* couch_readfile(JSContext* cx, const char* filename);
-void couch_print(JSContext* cx, uintN argc, jsval* argv);
-void couch_error(JSContext* cx, const char* mesg, JSErrorReport* report);
-JSBool couch_load_funcs(JSContext* cx, JSObject* obj, JSFunctionSpec* funcs);
-
-
-#endif // Included util.h

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/8c6a64d0/rebar.config.script
----------------------------------------------------------------------
diff --git a/rebar.config.script b/rebar.config.script
new file mode 100644
index 0000000..df8c48e
--- /dev/null
+++ b/rebar.config.script
@@ -0,0 +1,93 @@
+%% -*- tab-width: 4;erlang-indent-level: 4;indent-tabs-mode: nil -*-
+%% ex: ft=erlang ts=4 sw=4 et
+
+%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+%% use this file except in compliance with the License. You may obtain a copy of
+%% the License at
+%%
+%%   http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+%% License for the specific language governing permissions and limitations under
+%% the License.
+
+
+Cfg = case file:consult("../../vars.config") of
+          {ok, Terms} ->
+              Terms;
+          _Err ->
+              []
+      end,
+
+
+%% get version infos
+MajorVersion = integer_to_list(proplists:get_value(version_major, Cfg, 0)),
+MinorVersion = integer_to_list(proplists:get_value(version_minor, Cfg, 0)),
+RevVersion = integer_to_list(proplists:get_value(version_revision, Cfg, 0)),
+StageVersion = proplists:get_value(version_stage, Cfg, ""),
+RelVersion = proplists:get_value(version_release, Cfg, ""),
+
+%% build the version
+BaseVersion = MajorVersion ++ "." ++ MinorVersion ++ "." ++ RevVersion,
+SecondaryVersion = StageVersion ++ RelVersion,
+Version = case os:getenv("RELEASE") of
+    "1" ->
+        BaseVersion;
+    _ ->
+        BaseVersion ++ SecondaryVersion
+end,
+
+Cfg1 = [{package_string, proplists:get_value(package_name, Cfg, "")
+                         ++ " " ++ Version },
+        {package_version, Version}] ++ Cfg,
+
+%% write config.h
+CfgStr = lists:foldl(fun({K, V}, Acc) ->
+            K1 = string:to_upper(atom_to_list(K)),
+            case K1 of
+                "VERSION_" ++ _ -> Acc;
+                _ ->
+                    ["#define ", K1, " ", $", V, $", $\n | Acc]
+            end
+        end, [], Cfg1),
+ok = file:write_file("c_src/couch_js/config.h", iolist_to_binary(CfgStr)),
+
+
+
+GetFlag = fun(Name, Default) ->
+        case os:getenv(Name) of
+            false -> Default;
+            Val -> Val
+        end
+    end,
+
+
+JSLIBS = GetFlag("JS_LIBS", "-lmozjs185"),
+JSCFLAGS = GetFlag("JS_CFLAGS", "-I/usr/include/js"),
+
+{CFLAGS, LDFLAGS}  = case os:type() of
+    {unix, darwin} ->
+        {"-DXP_UNIX " ++ JSCFLAGS, JSLIBS};
+    {unix, _} ->
+        {"-DXP_UNIX " ++ JSCFLAGS, JSLIBS ++ " -lm"};
+    _ ->
+        {"-DXP_WIN " ++ JSCFLAGS, JSLIBS}
+end,
+
+{CFLAGS1, LDFLAGS1} = case os:getenv("WITHOUT_CURL") of
+    "1" -> {"-DWITHOUT_CURL " ++ CFLAGS, LDFLAGS};
+    _ -> {CFLAGS, LDFLAGS ++ " -lcurl"}
+end,
+
+PortEnv = [{port_env, [
+            {"CFLAGS",  "$CFLAGS -Wall -c -g -O2 " ++ CFLAGS1},
+            {"LDFLAGS", "$LDFLAGS " ++ LDFLAGS1}]},
+
+           {port_specs, [
+            {filename:join(["priv", "couchjs"]),
+            ["c_src/couch_js/*.c"]}]}
+],
+
+lists:keymerge(1,lists:keysort(1, PortEnv), lists:keysort(1, CONFIG)).


[25/41] support static build

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/9e429fd2/patches/js/patch-configure
----------------------------------------------------------------------
diff --git a/patches/js/patch-configure b/patches/js/patch-configure
new file mode 100644
index 0000000..5c87deb
--- /dev/null
+++ b/patches/js/patch-configure
@@ -0,0 +1,3508 @@
+--- configure.orig	2012-06-13 21:31:31.000000000 +0200
++++ configure	2012-06-13 21:39:23.000000000 +0200
+@@ -4312,21 +4312,10 @@
+   fi
+ 
+ 
+-    case "$PBBUILD" in
+-      *xcodebuild*)
+-        
+-        XCODEBUILD_VERSION=`$PBBUILD -version 2>/dev/null | xargs | sed -e 's/.*DevToolsCore-\([0-9]*\).*/\1/'`
+-        
+-        if test -n "$XCODEBUILD_VERSION" && test "$XCODEBUILD_VERSION" -ge 620 ; then
+-          HAS_XCODE_2_1=1;
+-        fi
+-      ;;
+-    esac
+-
+              # Extract the first word of "sdp", so it can be a program name with args.
+ set dummy sdp; ac_word=$2
+ echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+-echo "configure:4330: checking for $ac_word" >&5
++echo "configure:4319: checking for $ac_word" >&5
+ if eval "test \"`echo '$''{'ac_cv_path_SDP'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -4380,8 +4369,6 @@
+ 
+ 
+ 
+-
+-
+ # Check whether --enable-macos-target or --disable-macos-target was given.
+ if test "${enable_macos_target+set}" = set; then
+   enableval="$enable_macos_target"
+@@ -4452,7 +4439,7 @@
+ 
+   
+   echo $ac_n "checking for valid compiler/Mac OS X SDK combination""... $ac_c" 1>&6
+-echo "configure:4456: checking for valid compiler/Mac OS X SDK combination" >&5
++echo "configure:4443: checking for valid compiler/Mac OS X SDK combination" >&5
+   ac_ext=C
+ # CXXFLAGS is not in ac_cpp because -g, -O, etc. are not valid cpp options.
+ ac_cpp='$CXXCPP $CPPFLAGS'
+@@ -4461,7 +4448,7 @@
+ cross_compiling=$ac_cv_prog_cxx_cross
+ 
+   cat > conftest.$ac_ext <<EOF
+-#line 4465 "configure"
++#line 4452 "configure"
+ #include "confdefs.h"
+ #include <new>
+                  int main() { return 0; }
+@@ -4469,7 +4456,7 @@
+ result=yes
+ ; return 0; }
+ EOF
+-if { (eval echo configure:4473: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:4460: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   result=no
+ else
+@@ -4506,7 +4493,7 @@
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+ set dummy $ac_prog; ac_word=$2
+ echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+-echo "configure:4510: checking for $ac_word" >&5
++echo "configure:4497: checking for $ac_word" >&5
+ if eval "test \"`echo '$''{'ac_cv_path_GMAKE'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -4562,7 +4549,7 @@
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+ set dummy $ac_prog; ac_word=$2
+ echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+-echo "configure:4566: checking for $ac_word" >&5
++echo "configure:4553: checking for $ac_word" >&5
+ if eval "test \"`echo '$''{'ac_cv_path_GMAKE'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -4625,7 +4612,7 @@
+ # Uses ac_ vars as temps to allow command line to override cache and checks.
+ # --without-x overrides everything else, but does not touch the cache.
+ echo $ac_n "checking for X""... $ac_c" 1>&6
+-echo "configure:4629: checking for X" >&5
++echo "configure:4616: checking for X" >&5
+ 
+ # Check whether --with-x or --without-x was given.
+ if test "${with_x+set}" = set; then
+@@ -4687,12 +4674,12 @@
+ 
+   # First, try using that file with no special directory specified.
+ cat > conftest.$ac_ext <<EOF
+-#line 4691 "configure"
++#line 4678 "configure"
+ #include "confdefs.h"
+ #include <$x_direct_test_include>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:4696: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:4683: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -4761,14 +4748,14 @@
+   ac_save_LIBS="$LIBS"
+   LIBS="-l$x_direct_test_library $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 4765 "configure"
++#line 4752 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+ ${x_direct_test_function}()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:4772: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:4759: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   LIBS="$ac_save_LIBS"
+ # We can link X programs with no special library path.
+@@ -4874,17 +4861,17 @@
+     case "`(uname -sr) 2>/dev/null`" in
+     "SunOS 5"*)
+       echo $ac_n "checking whether -R must be followed by a space""... $ac_c" 1>&6
+-echo "configure:4878: checking whether -R must be followed by a space" >&5
++echo "configure:4865: checking whether -R must be followed by a space" >&5
+       ac_xsave_LIBS="$LIBS"; LIBS="$LIBS -R$x_libraries"
+       cat > conftest.$ac_ext <<EOF
+-#line 4881 "configure"
++#line 4868 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+ 
+ ; return 0; }
+ EOF
+-if { (eval echo configure:4888: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:4875: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   ac_R_nospace=yes
+ else
+@@ -4900,14 +4887,14 @@
+       else
+ 	LIBS="$ac_xsave_LIBS -R $x_libraries"
+ 	cat > conftest.$ac_ext <<EOF
+-#line 4904 "configure"
++#line 4891 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+ 
+ ; return 0; }
+ EOF
+-if { (eval echo configure:4911: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:4898: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   ac_R_space=yes
+ else
+@@ -4939,7 +4926,7 @@
+     # libraries were built with DECnet support.  And karl@cs.umb.edu says
+     # the Alpha needs dnet_stub (dnet does not exist).
+     echo $ac_n "checking for dnet_ntoa in -ldnet""... $ac_c" 1>&6
+-echo "configure:4943: checking for dnet_ntoa in -ldnet" >&5
++echo "configure:4930: checking for dnet_ntoa in -ldnet" >&5
+ ac_lib_var=`echo dnet'_'dnet_ntoa | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -4947,7 +4934,7 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-ldnet  $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 4951 "configure"
++#line 4938 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -4958,7 +4945,7 @@
+ dnet_ntoa()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:4962: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:4949: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -4980,7 +4967,7 @@
+ 
+     if test $ac_cv_lib_dnet_dnet_ntoa = no; then
+       echo $ac_n "checking for dnet_ntoa in -ldnet_stub""... $ac_c" 1>&6
+-echo "configure:4984: checking for dnet_ntoa in -ldnet_stub" >&5
++echo "configure:4971: checking for dnet_ntoa in -ldnet_stub" >&5
+ ac_lib_var=`echo dnet_stub'_'dnet_ntoa | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -4988,7 +4975,7 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-ldnet_stub  $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 4992 "configure"
++#line 4979 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -4999,7 +4986,7 @@
+ dnet_ntoa()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:5003: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:4990: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -5028,12 +5015,12 @@
+     # The nsl library prevents programs from opening the X display
+     # on Irix 5.2, according to dickey@clark.net.
+     echo $ac_n "checking for gethostbyname""... $ac_c" 1>&6
+-echo "configure:5032: checking for gethostbyname" >&5
++echo "configure:5019: checking for gethostbyname" >&5
+ if eval "test \"`echo '$''{'ac_cv_func_gethostbyname'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 5037 "configure"
++#line 5024 "configure"
+ #include "confdefs.h"
+ /* System header to define __stub macros and hopefully few prototypes,
+     which can conflict with char gethostbyname(); below.  */
+@@ -5056,7 +5043,7 @@
+ 
+ ; return 0; }
+ EOF
+-if { (eval echo configure:5060: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:5047: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_func_gethostbyname=yes"
+ else
+@@ -5077,7 +5064,7 @@
+ 
+     if test $ac_cv_func_gethostbyname = no; then
+       echo $ac_n "checking for gethostbyname in -lnsl""... $ac_c" 1>&6
+-echo "configure:5081: checking for gethostbyname in -lnsl" >&5
++echo "configure:5068: checking for gethostbyname in -lnsl" >&5
+ ac_lib_var=`echo nsl'_'gethostbyname | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -5085,7 +5072,7 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-lnsl  $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 5089 "configure"
++#line 5076 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -5096,7 +5083,7 @@
+ gethostbyname()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:5100: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:5087: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -5126,12 +5113,12 @@
+     # -lsocket must be given before -lnsl if both are needed.
+     # We assume that if connect needs -lnsl, so does gethostbyname.
+     echo $ac_n "checking for connect""... $ac_c" 1>&6
+-echo "configure:5130: checking for connect" >&5
++echo "configure:5117: checking for connect" >&5
+ if eval "test \"`echo '$''{'ac_cv_func_connect'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 5135 "configure"
++#line 5122 "configure"
+ #include "confdefs.h"
+ /* System header to define __stub macros and hopefully few prototypes,
+     which can conflict with char connect(); below.  */
+@@ -5154,7 +5141,7 @@
+ 
+ ; return 0; }
+ EOF
+-if { (eval echo configure:5158: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:5145: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_func_connect=yes"
+ else
+@@ -5175,7 +5162,7 @@
+ 
+     if test $ac_cv_func_connect = no; then
+       echo $ac_n "checking for connect in -lsocket""... $ac_c" 1>&6
+-echo "configure:5179: checking for connect in -lsocket" >&5
++echo "configure:5166: checking for connect in -lsocket" >&5
+ ac_lib_var=`echo socket'_'connect | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -5183,7 +5170,7 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-lsocket $X_EXTRA_LIBS $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 5187 "configure"
++#line 5174 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -5194,7 +5181,7 @@
+ connect()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:5198: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:5185: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -5218,12 +5205,12 @@
+ 
+     # gomez@mi.uni-erlangen.de says -lposix is necessary on A/UX.
+     echo $ac_n "checking for remove""... $ac_c" 1>&6
+-echo "configure:5222: checking for remove" >&5
++echo "configure:5209: checking for remove" >&5
+ if eval "test \"`echo '$''{'ac_cv_func_remove'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 5227 "configure"
++#line 5214 "configure"
+ #include "confdefs.h"
+ /* System header to define __stub macros and hopefully few prototypes,
+     which can conflict with char remove(); below.  */
+@@ -5246,7 +5233,7 @@
+ 
+ ; return 0; }
+ EOF
+-if { (eval echo configure:5250: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:5237: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_func_remove=yes"
+ else
+@@ -5267,7 +5254,7 @@
+ 
+     if test $ac_cv_func_remove = no; then
+       echo $ac_n "checking for remove in -lposix""... $ac_c" 1>&6
+-echo "configure:5271: checking for remove in -lposix" >&5
++echo "configure:5258: checking for remove in -lposix" >&5
+ ac_lib_var=`echo posix'_'remove | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -5275,7 +5262,7 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-lposix  $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 5279 "configure"
++#line 5266 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -5286,7 +5273,7 @@
+ remove()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:5290: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:5277: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -5310,12 +5297,12 @@
+ 
+     # BSDI BSD/OS 2.1 needs -lipc for XOpenDisplay.
+     echo $ac_n "checking for shmat""... $ac_c" 1>&6
+-echo "configure:5314: checking for shmat" >&5
++echo "configure:5301: checking for shmat" >&5
+ if eval "test \"`echo '$''{'ac_cv_func_shmat'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 5319 "configure"
++#line 5306 "configure"
+ #include "confdefs.h"
+ /* System header to define __stub macros and hopefully few prototypes,
+     which can conflict with char shmat(); below.  */
+@@ -5338,7 +5325,7 @@
+ 
+ ; return 0; }
+ EOF
+-if { (eval echo configure:5342: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:5329: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_func_shmat=yes"
+ else
+@@ -5359,7 +5346,7 @@
+ 
+     if test $ac_cv_func_shmat = no; then
+       echo $ac_n "checking for shmat in -lipc""... $ac_c" 1>&6
+-echo "configure:5363: checking for shmat in -lipc" >&5
++echo "configure:5350: checking for shmat in -lipc" >&5
+ ac_lib_var=`echo ipc'_'shmat | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -5367,7 +5354,7 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-lipc  $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 5371 "configure"
++#line 5358 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -5378,7 +5365,7 @@
+ shmat()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:5382: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:5369: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -5411,7 +5398,7 @@
+   # libraries we check for below, so use a different variable.
+   #  --interran@uluru.Stanford.EDU, kb@cs.umb.edu.
+   echo $ac_n "checking for IceConnectionNumber in -lICE""... $ac_c" 1>&6
+-echo "configure:5415: checking for IceConnectionNumber in -lICE" >&5
++echo "configure:5402: checking for IceConnectionNumber in -lICE" >&5
+ ac_lib_var=`echo ICE'_'IceConnectionNumber | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -5419,7 +5406,7 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-lICE $X_EXTRA_LIBS $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 5423 "configure"
++#line 5410 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -5430,7 +5417,7 @@
+ IceConnectionNumber()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:5434: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:5421: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -5913,7 +5900,7 @@
+     _USE_CPP_INCLUDE_FLAG=1
+ 
+     echo $ac_n "checking whether the compiler supports -Wno-invalid-offsetof""... $ac_c" 1>&6
+-echo "configure:5917: checking whether the compiler supports -Wno-invalid-offsetof" >&5
++echo "configure:5904: checking whether the compiler supports -Wno-invalid-offsetof" >&5
+ if eval "test \"`echo '$''{'ac_has_wno_invalid_offsetof'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -5929,14 +5916,14 @@
+             _SAVE_CXXFLAGS="$CXXFLAGS"
+             CXXFLAGS="$CXXFLAGS ${_COMPILER_PREFIX}-Wno-invalid-offsetof"
+             cat > conftest.$ac_ext <<EOF
+-#line 5933 "configure"
++#line 5920 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+ return(0);
+ ; return 0; }
+ EOF
+-if { (eval echo configure:5940: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:5927: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_has_wno_invalid_offsetof="yes"
+ else
+@@ -5963,7 +5950,7 @@
+     fi
+ 
+     echo $ac_n "checking whether the compiler supports -Wno-variadic-macros""... $ac_c" 1>&6
+-echo "configure:5967: checking whether the compiler supports -Wno-variadic-macros" >&5
++echo "configure:5954: checking whether the compiler supports -Wno-variadic-macros" >&5
+ if eval "test \"`echo '$''{'ac_has_wno_variadic_macros'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -5979,14 +5966,14 @@
+             _SAVE_CXXFLAGS="$CXXFLAGS"
+             CXXFLAGS="$CXXFLAGS ${_COMPILER_PREFIX}-Wno-variadic-macros"
+             cat > conftest.$ac_ext <<EOF
+-#line 5983 "configure"
++#line 5970 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+ return(0);
+ ; return 0; }
+ EOF
+-if { (eval echo configure:5990: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:5977: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_has_wno_variadic_macros="yes"
+ else
+@@ -6013,7 +6000,7 @@
+     fi
+ 
+     echo $ac_n "checking whether the compiler supports -Werror=return-type""... $ac_c" 1>&6
+-echo "configure:6017: checking whether the compiler supports -Werror=return-type" >&5
++echo "configure:6004: checking whether the compiler supports -Werror=return-type" >&5
+ if eval "test \"`echo '$''{'ac_has_werror_return_type'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -6029,14 +6016,14 @@
+             _SAVE_CXXFLAGS="$CXXFLAGS"
+             CXXFLAGS="$CXXFLAGS -Werror=return-type"
+             cat > conftest.$ac_ext <<EOF
+-#line 6033 "configure"
++#line 6020 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+ return(0);
+ ; return 0; }
+ EOF
+-if { (eval echo configure:6040: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:6027: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_has_werror_return_type="yes"
+ else
+@@ -6072,7 +6059,7 @@
+ if test "$COMPILE_ENVIRONMENT"; then
+ if test "$GNU_CC"; then
+   echo $ac_n "checking whether ld has archive extraction flags""... $ac_c" 1>&6
+-echo "configure:6076: checking whether ld has archive extraction flags" >&5
++echo "configure:6063: checking whether ld has archive extraction flags" >&5
+   if eval "test \"`echo '$''{'ac_cv_mkshlib_force_and_unforce'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -6089,14 +6076,14 @@
+       LDFLAGS=$force
+       LIBS=$unforce
+       cat > conftest.$ac_ext <<EOF
+-#line 6093 "configure"
++#line 6080 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+ 
+ ; return 0; }
+ EOF
+-if { (eval echo configure:6100: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:6087: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   ac_cv_mkshlib_force_and_unforce=$line; break
+ else
+@@ -6129,7 +6116,7 @@
+ '
+ 
+ echo $ac_n "checking that static assertion macros used in autoconf tests work""... $ac_c" 1>&6
+-echo "configure:6133: checking that static assertion macros used in autoconf tests work" >&5
++echo "configure:6120: checking that static assertion macros used in autoconf tests work" >&5
+ if eval "test \"`echo '$''{'ac_cv_static_assertion_macros_work'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -6143,14 +6130,14 @@
+ 
+   ac_cv_static_assertion_macros_work="yes"
+   cat > conftest.$ac_ext <<EOF
+-#line 6147 "configure"
++#line 6134 "configure"
+ #include "confdefs.h"
+ $configure_static_assert_macros
+ int main() {
+ CONFIGURE_STATIC_ASSERT(1)
+ ; return 0; }
+ EOF
+-if { (eval echo configure:6154: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:6141: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   :
+ else
+   echo "configure: failed program was:" >&5
+@@ -6160,14 +6147,14 @@
+ fi
+ rm -f conftest*
+   cat > conftest.$ac_ext <<EOF
+-#line 6164 "configure"
++#line 6151 "configure"
+ #include "confdefs.h"
+ $configure_static_assert_macros
+ int main() {
+ CONFIGURE_STATIC_ASSERT(0)
+ ; return 0; }
+ EOF
+-if { (eval echo configure:6171: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:6158: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_static_assertion_macros_work="no"
+ else
+@@ -6183,14 +6170,14 @@
+ cross_compiling=$ac_cv_prog_cxx_cross
+ 
+   cat > conftest.$ac_ext <<EOF
+-#line 6187 "configure"
++#line 6174 "configure"
+ #include "confdefs.h"
+ $configure_static_assert_macros
+ int main() {
+ CONFIGURE_STATIC_ASSERT(1)
+ ; return 0; }
+ EOF
+-if { (eval echo configure:6194: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:6181: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   :
+ else
+   echo "configure: failed program was:" >&5
+@@ -6200,14 +6187,14 @@
+ fi
+ rm -f conftest*
+   cat > conftest.$ac_ext <<EOF
+-#line 6204 "configure"
++#line 6191 "configure"
+ #include "confdefs.h"
+ $configure_static_assert_macros
+ int main() {
+ CONFIGURE_STATIC_ASSERT(0)
+ ; return 0; }
+ EOF
+-if { (eval echo configure:6211: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:6198: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_static_assertion_macros_work="no"
+ else
+@@ -6241,16 +6228,16 @@
+ cross_compiling=$ac_cv_prog_cc_cross
+ 
+ echo $ac_n "checking for 64-bit OS""... $ac_c" 1>&6
+-echo "configure:6245: checking for 64-bit OS" >&5
++echo "configure:6232: checking for 64-bit OS" >&5
+ cat > conftest.$ac_ext <<EOF
+-#line 6247 "configure"
++#line 6234 "configure"
+ #include "confdefs.h"
+ $configure_static_assert_macros
+ int main() {
+ CONFIGURE_STATIC_ASSERT(sizeof(void*) == 8)
+ ; return 0; }
+ EOF
+-if { (eval echo configure:6254: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:6241: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   result="yes"
+ else
+@@ -6389,7 +6376,7 @@
+ esac
+ 
+ echo $ac_n "checking for Python version >= $PYTHON_VERSION but not 3.x""... $ac_c" 1>&6
+-echo "configure:6393: checking for Python version >= $PYTHON_VERSION but not 3.x" >&5
++echo "configure:6380: checking for Python version >= $PYTHON_VERSION but not 3.x" >&5
+ 
+ $PYTHON -c "import sys; sys.exit(sys.version[:3] < sys.argv[1] or sys.version[:2] != '2.')" $PYTHON_VERSION
+ _python_res=$?
+@@ -6437,9 +6424,9 @@
+ cross_compiling=$ac_cv_prog_cxx_cross
+ 
+             echo $ac_n "checking for IBM XLC/C++ compiler version >= 9.0.0.7""... $ac_c" 1>&6
+-echo "configure:6441: checking for IBM XLC/C++ compiler version >= 9.0.0.7" >&5
++echo "configure:6428: checking for IBM XLC/C++ compiler version >= 9.0.0.7" >&5
+             cat > conftest.$ac_ext <<EOF
+-#line 6443 "configure"
++#line 6430 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+@@ -6448,7 +6435,7 @@
+                  #endif
+ ; return 0; }
+ EOF
+-if { (eval echo configure:6452: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:6439: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   _BAD_COMPILER=
+ else
+@@ -6486,17 +6473,17 @@
+ do
+ ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+-echo "configure:6490: checking for $ac_hdr" >&5
++echo "configure:6477: checking for $ac_hdr" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 6495 "configure"
++#line 6482 "configure"
+ #include "confdefs.h"
+ #include <$ac_hdr>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:6500: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:6487: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -6542,7 +6529,7 @@
+     LIBS="$LIBS -lbe"
+     if test "$COMPILE_ENVIRONMENT"; then
+         echo $ac_n "checking for main in -lbind""... $ac_c" 1>&6
+-echo "configure:6546: checking for main in -lbind" >&5
++echo "configure:6533: checking for main in -lbind" >&5
+ ac_lib_var=`echo bind'_'main | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -6550,14 +6537,14 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-lbind  $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 6554 "configure"
++#line 6541 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+ main()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:6561: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:6548: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -6578,7 +6565,7 @@
+ fi
+ 
+         echo $ac_n "checking for main in -lzeta""... $ac_c" 1>&6
+-echo "configure:6582: checking for main in -lzeta" >&5
++echo "configure:6569: checking for main in -lzeta" >&5
+ ac_lib_var=`echo zeta'_'main | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -6586,14 +6573,14 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-lzeta  $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 6590 "configure"
++#line 6577 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+ main()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:6597: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:6584: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -6667,18 +6654,18 @@
+         echo "Skipping -dead_strip because DTrace is enabled. See bug 403132."
+     else
+                 echo $ac_n "checking for -dead_strip option to ld""... $ac_c" 1>&6
+-echo "configure:6671: checking for -dead_strip option to ld" >&5
++echo "configure:6658: checking for -dead_strip option to ld" >&5
+         _SAVE_LDFLAGS=$LDFLAGS
+         LDFLAGS="$LDFLAGS -Wl,-dead_strip"
+         cat > conftest.$ac_ext <<EOF
+-#line 6675 "configure"
++#line 6662 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+ return 0;
+ ; return 0; }
+ EOF
+-if { (eval echo configure:6682: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:6669: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   _HAVE_DEAD_STRIP=1
+ else
+@@ -7250,17 +7237,17 @@
+ do
+ ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+-echo "configure:7254: checking for $ac_hdr" >&5
++echo "configure:7241: checking for $ac_hdr" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 7259 "configure"
++#line 7246 "configure"
+ #include "confdefs.h"
+ #include <$ac_hdr>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:7264: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:7251: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -7492,19 +7479,19 @@
+     _DEFINES_CXXFLAGS="$_DEFINES_CXXFLAGS -Uunix -U__unix -U__unix__"
+ 
+     echo $ac_n "checking for __declspec(dllexport)""... $ac_c" 1>&6
+-echo "configure:7496: checking for __declspec(dllexport)" >&5
++echo "configure:7483: checking for __declspec(dllexport)" >&5
+ if eval "test \"`echo '$''{'ac_os2_declspec'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 7501 "configure"
++#line 7488 "configure"
+ #include "confdefs.h"
+ __declspec(dllexport) void ac_os2_declspec(void) {}
+ int main() {
+ return 0;
+ ; return 0; }
+ EOF
+-if { (eval echo configure:7508: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:7495: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_os2_declspec="yes"
+ else
+@@ -7625,14 +7612,14 @@
+            _SAVE_LDFLAGS=$LDFLAGS
+            LDFLAGS="-M /usr/lib/ld/map.noexstk $LDFLAGS" 
+            cat > conftest.$ac_ext <<EOF
+-#line 7629 "configure"
++#line 7616 "configure"
+ #include "confdefs.h"
+ #include <stdio.h>
+ int main() {
+ printf("Hello World\n");
+ ; return 0; }
+ EOF
+-if { (eval echo configure:7636: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:7623: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   :
+ else
+   echo "configure: failed program was:" >&5
+@@ -7661,7 +7648,7 @@
+        CC_VERSION=`$CC -V 2>&1 | grep '^cc:' 2>/dev/null | $AWK -F\: '{ print $2 }'`
+        CXX_VERSION=`$CXX -V 2>&1 | grep '^CC:' 2>/dev/null | $AWK -F\: '{ print $2 }'`
+        echo $ac_n "checking for Sun C++ compiler version >= 5.9""... $ac_c" 1>&6
+-echo "configure:7665: checking for Sun C++ compiler version >= 5.9" >&5
++echo "configure:7652: checking for Sun C++ compiler version >= 5.9" >&5
+        
+        ac_ext=C
+ # CXXFLAGS is not in ac_cpp because -g, -O, etc. are not valid cpp options.
+@@ -7671,7 +7658,7 @@
+ cross_compiling=$ac_cv_prog_cxx_cross
+ 
+        cat > conftest.$ac_ext <<EOF
+-#line 7675 "configure"
++#line 7662 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+@@ -7680,7 +7667,7 @@
+            #endif
+ ; return 0; }
+ EOF
+-if { (eval echo configure:7684: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:7671: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   _BAD_COMPILER=
+ else
+@@ -7697,7 +7684,7 @@
+            _res="yes"
+        fi
+        cat > conftest.$ac_ext <<EOF
+-#line 7701 "configure"
++#line 7688 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+@@ -7706,7 +7693,7 @@
+            #endif
+ ; return 0; }
+ EOF
+-if { (eval echo configure:7710: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:7697: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   _ABOVE_SS12U1=
+ else
+@@ -8122,12 +8109,12 @@
+ 
+ if test -z "$SKIP_COMPILER_CHECKS"; then
+ echo $ac_n "checking for ANSI C header files""... $ac_c" 1>&6
+-echo "configure:8126: checking for ANSI C header files" >&5
++echo "configure:8113: checking for ANSI C header files" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_stdc'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 8131 "configure"
++#line 8118 "configure"
+ #include "confdefs.h"
+ #include <stdlib.h>
+ #include <stdarg.h>
+@@ -8135,7 +8122,7 @@
+ #include <float.h>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:8139: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:8126: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -8152,7 +8139,7 @@
+ if test $ac_cv_header_stdc = yes; then
+   # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+ cat > conftest.$ac_ext <<EOF
+-#line 8156 "configure"
++#line 8143 "configure"
+ #include "confdefs.h"
+ #include <string.h>
+ EOF
+@@ -8170,7 +8157,7 @@
+ if test $ac_cv_header_stdc = yes; then
+   # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+ cat > conftest.$ac_ext <<EOF
+-#line 8174 "configure"
++#line 8161 "configure"
+ #include "confdefs.h"
+ #include <stdlib.h>
+ EOF
+@@ -8191,7 +8178,7 @@
+   :
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 8195 "configure"
++#line 8182 "configure"
+ #include "confdefs.h"
+ #include <ctype.h>
+ #define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+@@ -8202,7 +8189,7 @@
+ exit (0); }
+ 
+ EOF
+-if { (eval echo configure:8206: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
++if { (eval echo configure:8193: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
+ then
+   :
+ else
+@@ -8226,12 +8213,12 @@
+ fi
+ 
+ echo $ac_n "checking for working const""... $ac_c" 1>&6
+-echo "configure:8230: checking for working const" >&5
++echo "configure:8217: checking for working const" >&5
+ if eval "test \"`echo '$''{'ac_cv_c_const'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 8235 "configure"
++#line 8222 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+@@ -8280,7 +8267,7 @@
+ 
+ ; return 0; }
+ EOF
+-if { (eval echo configure:8284: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:8271: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_c_const=yes
+ else
+@@ -8301,12 +8288,12 @@
+ fi
+ 
+ echo $ac_n "checking for mode_t""... $ac_c" 1>&6
+-echo "configure:8305: checking for mode_t" >&5
++echo "configure:8292: checking for mode_t" >&5
+ if eval "test \"`echo '$''{'ac_cv_type_mode_t'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 8310 "configure"
++#line 8297 "configure"
+ #include "confdefs.h"
+ #include <sys/types.h>
+ #if STDC_HEADERS
+@@ -8334,12 +8321,12 @@
+ fi
+ 
+ echo $ac_n "checking for off_t""... $ac_c" 1>&6
+-echo "configure:8338: checking for off_t" >&5
++echo "configure:8325: checking for off_t" >&5
+ if eval "test \"`echo '$''{'ac_cv_type_off_t'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 8343 "configure"
++#line 8330 "configure"
+ #include "confdefs.h"
+ #include <sys/types.h>
+ #if STDC_HEADERS
+@@ -8367,12 +8354,12 @@
+ fi
+ 
+ echo $ac_n "checking for pid_t""... $ac_c" 1>&6
+-echo "configure:8371: checking for pid_t" >&5
++echo "configure:8358: checking for pid_t" >&5
+ if eval "test \"`echo '$''{'ac_cv_type_pid_t'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 8376 "configure"
++#line 8363 "configure"
+ #include "confdefs.h"
+ #include <sys/types.h>
+ #if STDC_HEADERS
+@@ -8400,12 +8387,12 @@
+ fi
+ 
+ echo $ac_n "checking for size_t""... $ac_c" 1>&6
+-echo "configure:8404: checking for size_t" >&5
++echo "configure:8391: checking for size_t" >&5
+ if eval "test \"`echo '$''{'ac_cv_type_size_t'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 8409 "configure"
++#line 8396 "configure"
+ #include "confdefs.h"
+ #include <sys/types.h>
+ #if STDC_HEADERS
+@@ -8440,12 +8427,12 @@
+ cross_compiling=$ac_cv_prog_cxx_cross
+ 
+ echo $ac_n "checking for __stdcall""... $ac_c" 1>&6
+-echo "configure:8444: checking for __stdcall" >&5
++echo "configure:8431: checking for __stdcall" >&5
+ if eval "test \"`echo '$''{'ac_cv___stdcall'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 8449 "configure"
++#line 8436 "configure"
+ #include "confdefs.h"
+ template <typename Method> struct foo;
+                   template <> struct foo<void (*)()> {};
+@@ -8454,7 +8441,7 @@
+ 
+ ; return 0; }
+ EOF
+-if { (eval echo configure:8458: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:8445: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv___stdcall=true
+ else
+@@ -8483,12 +8470,12 @@
+ cross_compiling=$ac_cv_prog_cc_cross
+ 
+ echo $ac_n "checking for ssize_t""... $ac_c" 1>&6
+-echo "configure:8487: checking for ssize_t" >&5
++echo "configure:8474: checking for ssize_t" >&5
+ if eval "test \"`echo '$''{'ac_cv_type_ssize_t'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 8492 "configure"
++#line 8479 "configure"
+ #include "confdefs.h"
+ #include <stdio.h>
+                   #include <sys/types.h>
+@@ -8496,7 +8483,7 @@
+ ssize_t foo = 0;
+ ; return 0; }
+ EOF
+-if { (eval echo configure:8500: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:8487: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_type_ssize_t=true
+ else
+@@ -8518,12 +8505,12 @@
+   echo "$ac_t""no" 1>&6
+ fi
+ echo $ac_n "checking for st_blksize in struct stat""... $ac_c" 1>&6
+-echo "configure:8522: checking for st_blksize in struct stat" >&5
++echo "configure:8509: checking for st_blksize in struct stat" >&5
+ if eval "test \"`echo '$''{'ac_cv_struct_st_blksize'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 8527 "configure"
++#line 8514 "configure"
+ #include "confdefs.h"
+ #include <sys/types.h>
+ #include <sys/stat.h>
+@@ -8531,7 +8518,7 @@
+ struct stat s; s.st_blksize;
+ ; return 0; }
+ EOF
+-if { (eval echo configure:8535: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:8522: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_struct_st_blksize=yes
+ else
+@@ -8552,12 +8539,12 @@
+ fi
+ 
+ echo $ac_n "checking for siginfo_t""... $ac_c" 1>&6
+-echo "configure:8556: checking for siginfo_t" >&5
++echo "configure:8543: checking for siginfo_t" >&5
+ if eval "test \"`echo '$''{'ac_cv_siginfo_t'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 8561 "configure"
++#line 8548 "configure"
+ #include "confdefs.h"
+ #define _POSIX_C_SOURCE 199506L
+                   #include <signal.h>
+@@ -8565,7 +8552,7 @@
+ siginfo_t* info;
+ ; return 0; }
+ EOF
+-if { (eval echo configure:8569: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:8556: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_siginfo_t=true
+ else
+@@ -8590,17 +8577,17 @@
+ 
+ ac_safe=`echo "stdint.h" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for stdint.h""... $ac_c" 1>&6
+-echo "configure:8594: checking for stdint.h" >&5
++echo "configure:8581: checking for stdint.h" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 8599 "configure"
++#line 8586 "configure"
+ #include "confdefs.h"
+ #include <stdint.h>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:8604: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:8591: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -8629,7 +8616,7 @@
+ else
+                     
+ echo $ac_n "checking for a 1-byte type""... $ac_c" 1>&6
+-echo "configure:8633: checking for a 1-byte type" >&5
++echo "configure:8620: checking for a 1-byte type" >&5
+ if eval "test \"`echo '$''{'moz_cv_n_byte_type_JS_INT8_TYPE'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -8637,7 +8624,7 @@
+   moz_cv_n_byte_type_JS_INT8_TYPE=
+   for type in char; do
+     cat > conftest.$ac_ext <<EOF
+-#line 8641 "configure"
++#line 8628 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+@@ -8647,7 +8634,7 @@
+                    
+ ; return 0; }
+ EOF
+-if { (eval echo configure:8651: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:8638: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   moz_cv_n_byte_type_JS_INT8_TYPE=$type; break
+ else
+@@ -8670,7 +8657,7 @@
+ 
+     
+ echo $ac_n "checking for a 2-byte type""... $ac_c" 1>&6
+-echo "configure:8674: checking for a 2-byte type" >&5
++echo "configure:8661: checking for a 2-byte type" >&5
+ if eval "test \"`echo '$''{'moz_cv_n_byte_type_JS_INT16_TYPE'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -8678,7 +8665,7 @@
+   moz_cv_n_byte_type_JS_INT16_TYPE=
+   for type in short int long; do
+     cat > conftest.$ac_ext <<EOF
+-#line 8682 "configure"
++#line 8669 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+@@ -8688,7 +8675,7 @@
+                    
+ ; return 0; }
+ EOF
+-if { (eval echo configure:8692: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:8679: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   moz_cv_n_byte_type_JS_INT16_TYPE=$type; break
+ else
+@@ -8711,7 +8698,7 @@
+ 
+     
+ echo $ac_n "checking for a 4-byte type""... $ac_c" 1>&6
+-echo "configure:8715: checking for a 4-byte type" >&5
++echo "configure:8702: checking for a 4-byte type" >&5
+ if eval "test \"`echo '$''{'moz_cv_n_byte_type_JS_INT32_TYPE'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -8719,7 +8706,7 @@
+   moz_cv_n_byte_type_JS_INT32_TYPE=
+   for type in int long 'long long' short; do
+     cat > conftest.$ac_ext <<EOF
+-#line 8723 "configure"
++#line 8710 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+@@ -8729,7 +8716,7 @@
+                    
+ ; return 0; }
+ EOF
+-if { (eval echo configure:8733: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:8720: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   moz_cv_n_byte_type_JS_INT32_TYPE=$type; break
+ else
+@@ -8752,7 +8739,7 @@
+ 
+     
+ echo $ac_n "checking for a 8-byte type""... $ac_c" 1>&6
+-echo "configure:8756: checking for a 8-byte type" >&5
++echo "configure:8743: checking for a 8-byte type" >&5
+ if eval "test \"`echo '$''{'moz_cv_n_byte_type_JS_INT64_TYPE'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -8760,7 +8747,7 @@
+   moz_cv_n_byte_type_JS_INT64_TYPE=
+   for type in int long 'long long'; do
+     cat > conftest.$ac_ext <<EOF
+-#line 8764 "configure"
++#line 8751 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+@@ -8770,7 +8757,7 @@
+                    
+ ; return 0; }
+ EOF
+-if { (eval echo configure:8774: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:8761: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   moz_cv_n_byte_type_JS_INT64_TYPE=$type; break
+ else
+@@ -8793,7 +8780,7 @@
+ 
+     
+ echo $ac_n "checking for a sizeof (void *)-byte type""... $ac_c" 1>&6
+-echo "configure:8797: checking for a sizeof (void *)-byte type" >&5
++echo "configure:8784: checking for a sizeof (void *)-byte type" >&5
+ if eval "test \"`echo '$''{'moz_cv_n_byte_type_JS_INTPTR_TYPE'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -8801,7 +8788,7 @@
+   moz_cv_n_byte_type_JS_INTPTR_TYPE=
+   for type in int long 'long long' short; do
+     cat > conftest.$ac_ext <<EOF
+-#line 8805 "configure"
++#line 8792 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+@@ -8811,7 +8798,7 @@
+                    
+ ; return 0; }
+ EOF
+-if { (eval echo configure:8815: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:8802: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   moz_cv_n_byte_type_JS_INTPTR_TYPE=$type; break
+ else
+@@ -8836,7 +8823,7 @@
+ 
+ 
+ echo $ac_n "checking for the size of void*""... $ac_c" 1>&6
+-echo "configure:8840: checking for the size of void*" >&5
++echo "configure:8827: checking for the size of void*" >&5
+ if eval "test \"`echo '$''{'moz_cv_size_of_JS_BYTES_PER_WORD'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -8844,7 +8831,7 @@
+   moz_cv_size_of_JS_BYTES_PER_WORD=
+   for size in 4 8; do
+     cat > conftest.$ac_ext <<EOF
+-#line 8848 "configure"
++#line 8835 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+@@ -8854,7 +8841,7 @@
+                    
+ ; return 0; }
+ EOF
+-if { (eval echo configure:8858: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:8845: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   moz_cv_size_of_JS_BYTES_PER_WORD=$size; break
+ else
+@@ -8891,7 +8878,7 @@
+ 
+ 
+ echo $ac_n "checking for the alignment of void*""... $ac_c" 1>&6
+-echo "configure:8895: checking for the alignment of void*" >&5
++echo "configure:8882: checking for the alignment of void*" >&5
+ if eval "test \"`echo '$''{'moz_cv_align_of_JS_ALIGN_OF_POINTER'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -8899,7 +8886,7 @@
+   moz_cv_align_of_JS_ALIGN_OF_POINTER=
+   for align in 2 4 8 16; do
+     cat > conftest.$ac_ext <<EOF
+-#line 8903 "configure"
++#line 8890 "configure"
+ #include "confdefs.h"
+ 
+                      #include <stddef.h>
+@@ -8912,7 +8899,7 @@
+                    
+ ; return 0; }
+ EOF
+-if { (eval echo configure:8916: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:8903: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   moz_cv_align_of_JS_ALIGN_OF_POINTER=$align; break
+ else
+@@ -8935,7 +8922,7 @@
+ 
+ 
+ echo $ac_n "checking for the size of double""... $ac_c" 1>&6
+-echo "configure:8939: checking for the size of double" >&5
++echo "configure:8926: checking for the size of double" >&5
+ if eval "test \"`echo '$''{'moz_cv_size_of_JS_BYTES_PER_DOUBLE'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -8943,7 +8930,7 @@
+   moz_cv_size_of_JS_BYTES_PER_DOUBLE=
+   for size in 6 8 10 12 14; do
+     cat > conftest.$ac_ext <<EOF
+-#line 8947 "configure"
++#line 8934 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+@@ -8953,7 +8940,7 @@
+                    
+ ; return 0; }
+ EOF
+-if { (eval echo configure:8957: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:8944: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   moz_cv_size_of_JS_BYTES_PER_DOUBLE=$size; break
+ else
+@@ -8976,12 +8963,12 @@
+ 
+ 
+ echo $ac_n "checking for int16_t""... $ac_c" 1>&6
+-echo "configure:8980: checking for int16_t" >&5
++echo "configure:8967: checking for int16_t" >&5
+ if eval "test \"`echo '$''{'ac_cv_int16_t'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 8985 "configure"
++#line 8972 "configure"
+ #include "confdefs.h"
+ #include <stdio.h>
+                   #include <sys/types.h>
+@@ -8989,7 +8976,7 @@
+ int16_t foo = 0;
+ ; return 0; }
+ EOF
+-if { (eval echo configure:8993: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:8980: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_int16_t=true
+ else
+@@ -9011,12 +8998,12 @@
+   echo "$ac_t""no" 1>&6
+ fi
+ echo $ac_n "checking for int32_t""... $ac_c" 1>&6
+-echo "configure:9015: checking for int32_t" >&5
++echo "configure:9002: checking for int32_t" >&5
+ if eval "test \"`echo '$''{'ac_cv_int32_t'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9020 "configure"
++#line 9007 "configure"
+ #include "confdefs.h"
+ #include <stdio.h>
+                   #include <sys/types.h>
+@@ -9024,7 +9011,7 @@
+ int32_t foo = 0;
+ ; return 0; }
+ EOF
+-if { (eval echo configure:9028: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:9015: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_int32_t=true
+ else
+@@ -9046,12 +9033,12 @@
+   echo "$ac_t""no" 1>&6
+ fi
+ echo $ac_n "checking for int64_t""... $ac_c" 1>&6
+-echo "configure:9050: checking for int64_t" >&5
++echo "configure:9037: checking for int64_t" >&5
+ if eval "test \"`echo '$''{'ac_cv_int64_t'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9055 "configure"
++#line 9042 "configure"
+ #include "confdefs.h"
+ #include <stdio.h>
+                   #include <sys/types.h>
+@@ -9059,7 +9046,7 @@
+ int64_t foo = 0;
+ ; return 0; }
+ EOF
+-if { (eval echo configure:9063: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:9050: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_int64_t=true
+ else
+@@ -9081,12 +9068,12 @@
+   echo "$ac_t""no" 1>&6
+ fi
+ echo $ac_n "checking for int64""... $ac_c" 1>&6
+-echo "configure:9085: checking for int64" >&5
++echo "configure:9072: checking for int64" >&5
+ if eval "test \"`echo '$''{'ac_cv_int64'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9090 "configure"
++#line 9077 "configure"
+ #include "confdefs.h"
+ #include <stdio.h>
+                   #include <sys/types.h>
+@@ -9094,7 +9081,7 @@
+ int64 foo = 0;
+ ; return 0; }
+ EOF
+-if { (eval echo configure:9098: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:9085: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_int64=true
+ else
+@@ -9116,12 +9103,12 @@
+   echo "$ac_t""no" 1>&6
+ fi
+ echo $ac_n "checking for uint""... $ac_c" 1>&6
+-echo "configure:9120: checking for uint" >&5
++echo "configure:9107: checking for uint" >&5
+ if eval "test \"`echo '$''{'ac_cv_uint'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9125 "configure"
++#line 9112 "configure"
+ #include "confdefs.h"
+ #include <stdio.h>
+                   #include <sys/types.h>
+@@ -9129,7 +9116,7 @@
+ uint foo = 0;
+ ; return 0; }
+ EOF
+-if { (eval echo configure:9133: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:9120: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_uint=true
+ else
+@@ -9151,12 +9138,12 @@
+   echo "$ac_t""no" 1>&6
+ fi
+ echo $ac_n "checking for uint_t""... $ac_c" 1>&6
+-echo "configure:9155: checking for uint_t" >&5
++echo "configure:9142: checking for uint_t" >&5
+ if eval "test \"`echo '$''{'ac_cv_uint_t'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9160 "configure"
++#line 9147 "configure"
+ #include "confdefs.h"
+ #include <stdio.h>
+                   #include <sys/types.h>
+@@ -9164,7 +9151,7 @@
+ uint_t foo = 0;
+ ; return 0; }
+ EOF
+-if { (eval echo configure:9168: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:9155: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_uint_t=true
+ else
+@@ -9186,12 +9173,12 @@
+   echo "$ac_t""no" 1>&6
+ fi
+ echo $ac_n "checking for uint16_t""... $ac_c" 1>&6
+-echo "configure:9190: checking for uint16_t" >&5
++echo "configure:9177: checking for uint16_t" >&5
+ if eval "test \"`echo '$''{'ac_cv_uint16_t'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9195 "configure"
++#line 9182 "configure"
+ #include "confdefs.h"
+ #include <stdio.h>
+                   #include <sys/types.h>
+@@ -9199,7 +9186,7 @@
+ uint16_t foo = 0;
+ ; return 0; }
+ EOF
+-if { (eval echo configure:9203: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:9190: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_uint16_t=true
+ else
+@@ -9230,12 +9217,12 @@
+ 
+ 
+ echo $ac_n "checking for uname.domainname""... $ac_c" 1>&6
+-echo "configure:9234: checking for uname.domainname" >&5
++echo "configure:9221: checking for uname.domainname" >&5
+ if eval "test \"`echo '$''{'ac_cv_have_uname_domainname_field'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9239 "configure"
++#line 9226 "configure"
+ #include "confdefs.h"
+ #include <sys/utsname.h>
+ int main() {
+@@ -9243,7 +9230,7 @@
+             (void)uname(res);  if (res != 0) { domain = res->domainname; } 
+ ; return 0; }
+ EOF
+-if { (eval echo configure:9247: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:9234: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_have_uname_domainname_field=true
+ else
+@@ -9267,12 +9254,12 @@
+ fi
+ 
+ echo $ac_n "checking for uname.__domainname""... $ac_c" 1>&6
+-echo "configure:9271: checking for uname.__domainname" >&5
++echo "configure:9258: checking for uname.__domainname" >&5
+ if eval "test \"`echo '$''{'ac_cv_have_uname_us_domainname_field'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9276 "configure"
++#line 9263 "configure"
+ #include "confdefs.h"
+ #include <sys/utsname.h>
+ int main() {
+@@ -9280,7 +9267,7 @@
+             (void)uname(res);  if (res != 0) { domain = res->__domainname; } 
+ ; return 0; }
+ EOF
+-if { (eval echo configure:9284: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:9271: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_have_uname_us_domainname_field=true
+ else
+@@ -9313,7 +9300,7 @@
+ 
+ if test "$GNU_CC"; then
+   echo $ac_n "checking for visibility(hidden) attribute""... $ac_c" 1>&6
+-echo "configure:9317: checking for visibility(hidden) attribute" >&5
++echo "configure:9304: checking for visibility(hidden) attribute" >&5
+ if eval "test \"`echo '$''{'ac_cv_visibility_hidden'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -9338,7 +9325,7 @@
+ 
+ 
+     echo $ac_n "checking for visibility(default) attribute""... $ac_c" 1>&6
+-echo "configure:9342: checking for visibility(default) attribute" >&5
++echo "configure:9329: checking for visibility(default) attribute" >&5
+ if eval "test \"`echo '$''{'ac_cv_visibility_default'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -9363,7 +9350,7 @@
+ 
+ 
+       echo $ac_n "checking for visibility pragma support""... $ac_c" 1>&6
+-echo "configure:9367: checking for visibility pragma support" >&5
++echo "configure:9354: checking for visibility pragma support" >&5
+ if eval "test \"`echo '$''{'ac_cv_visibility_pragma'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -9388,7 +9375,7 @@
+ echo "$ac_t""$ac_cv_visibility_pragma" 1>&6
+       if test "$ac_cv_visibility_pragma" = "yes"; then
+         echo $ac_n "checking For gcc visibility bug with class-level attributes (GCC bug 26905)""... $ac_c" 1>&6
+-echo "configure:9392: checking For gcc visibility bug with class-level attributes (GCC bug 26905)" >&5
++echo "configure:9379: checking For gcc visibility bug with class-level attributes (GCC bug 26905)" >&5
+ if eval "test \"`echo '$''{'ac_cv_have_visibility_class_bug'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -9416,7 +9403,7 @@
+ echo "$ac_t""$ac_cv_have_visibility_class_bug" 1>&6
+ 
+         echo $ac_n "checking For x86_64 gcc visibility bug with builtins (GCC bug 20297)""... $ac_c" 1>&6
+-echo "configure:9420: checking For x86_64 gcc visibility bug with builtins (GCC bug 20297)" >&5
++echo "configure:9407: checking For x86_64 gcc visibility bug with builtins (GCC bug 20297)" >&5
+ if eval "test \"`echo '$''{'ac_cv_have_visibility_builtin_bug'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -9470,19 +9457,19 @@
+   CFLAGS_save="${CFLAGS}"
+   CFLAGS="${CFLAGS} -Werror"
+   echo $ac_n "checking for __force_align_arg_pointer__ attribute""... $ac_c" 1>&6
+-echo "configure:9474: checking for __force_align_arg_pointer__ attribute" >&5
++echo "configure:9461: checking for __force_align_arg_pointer__ attribute" >&5
+ if eval "test \"`echo '$''{'ac_cv_force_align_arg_pointer'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9479 "configure"
++#line 9466 "configure"
+ #include "confdefs.h"
+ __attribute__ ((__force_align_arg_pointer__)) void test() {}
+ int main() {
+ 
+ ; return 0; }
+ EOF
+-if { (eval echo configure:9486: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:9473: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   ac_cv_force_align_arg_pointer="yes"
+ else
+@@ -9509,12 +9496,12 @@
+ do
+ ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for $ac_hdr that defines DIR""... $ac_c" 1>&6
+-echo "configure:9513: checking for $ac_hdr that defines DIR" >&5
++echo "configure:9500: checking for $ac_hdr that defines DIR" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_dirent_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9518 "configure"
++#line 9505 "configure"
+ #include "confdefs.h"
+ #include <sys/types.h>
+ #include <$ac_hdr>
+@@ -9522,7 +9509,7 @@
+ DIR *dirp = 0;
+ ; return 0; }
+ EOF
+-if { (eval echo configure:9526: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:9513: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   eval "ac_cv_header_dirent_$ac_safe=yes"
+ else
+@@ -9547,7 +9534,7 @@
+ # Two versions of opendir et al. are in -ldir and -lx on SCO Xenix.
+ if test $ac_header_dirent = dirent.h; then
+ echo $ac_n "checking for opendir in -ldir""... $ac_c" 1>&6
+-echo "configure:9551: checking for opendir in -ldir" >&5
++echo "configure:9538: checking for opendir in -ldir" >&5
+ ac_lib_var=`echo dir'_'opendir | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -9555,7 +9542,7 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-ldir  $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 9559 "configure"
++#line 9546 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -9566,7 +9553,7 @@
+ opendir()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:9570: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:9557: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -9588,7 +9575,7 @@
+ 
+ else
+ echo $ac_n "checking for opendir in -lx""... $ac_c" 1>&6
+-echo "configure:9592: checking for opendir in -lx" >&5
++echo "configure:9579: checking for opendir in -lx" >&5
+ ac_lib_var=`echo x'_'opendir | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -9596,7 +9583,7 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-lx  $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 9600 "configure"
++#line 9587 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -9607,7 +9594,7 @@
+ opendir()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:9611: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:9598: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -9639,17 +9626,17 @@
+ do
+ ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+-echo "configure:9643: checking for $ac_hdr" >&5
++echo "configure:9630: checking for $ac_hdr" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9648 "configure"
++#line 9635 "configure"
+ #include "confdefs.h"
+ #include <$ac_hdr>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:9653: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:9640: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -9679,17 +9666,17 @@
+ do
+ ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+-echo "configure:9683: checking for $ac_hdr" >&5
++echo "configure:9670: checking for $ac_hdr" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9688 "configure"
++#line 9675 "configure"
+ #include "confdefs.h"
+ #include <$ac_hdr>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:9693: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:9680: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -9719,17 +9706,17 @@
+ do
+ ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+-echo "configure:9723: checking for $ac_hdr" >&5
++echo "configure:9710: checking for $ac_hdr" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9728 "configure"
++#line 9715 "configure"
+ #include "confdefs.h"
+ #include <$ac_hdr>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:9733: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:9720: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -9759,17 +9746,17 @@
+ do
+ ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+-echo "configure:9763: checking for $ac_hdr" >&5
++echo "configure:9750: checking for $ac_hdr" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9768 "configure"
++#line 9755 "configure"
+ #include "confdefs.h"
+ #include <$ac_hdr>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:9773: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:9760: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -9799,17 +9786,17 @@
+ do
+ ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+-echo "configure:9803: checking for $ac_hdr" >&5
++echo "configure:9790: checking for $ac_hdr" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9808 "configure"
++#line 9795 "configure"
+ #include "confdefs.h"
+ #include <$ac_hdr>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:9813: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:9800: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -9839,17 +9826,17 @@
+ do
+ ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+-echo "configure:9843: checking for $ac_hdr" >&5
++echo "configure:9830: checking for $ac_hdr" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9848 "configure"
++#line 9835 "configure"
+ #include "confdefs.h"
+ #include <$ac_hdr>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:9853: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:9840: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -9880,17 +9867,17 @@
+ do
+ ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+-echo "configure:9884: checking for $ac_hdr" >&5
++echo "configure:9871: checking for $ac_hdr" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9889 "configure"
++#line 9876 "configure"
+ #include "confdefs.h"
+ #include <$ac_hdr>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:9894: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:9881: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -9921,17 +9908,17 @@
+ do
+ ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+-echo "configure:9925: checking for $ac_hdr" >&5
++echo "configure:9912: checking for $ac_hdr" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9930 "configure"
++#line 9917 "configure"
+ #include "confdefs.h"
+ #include <$ac_hdr>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:9935: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:9922: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -9961,17 +9948,17 @@
+ do
+ ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+-echo "configure:9965: checking for $ac_hdr" >&5
++echo "configure:9952: checking for $ac_hdr" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 9970 "configure"
++#line 9957 "configure"
+ #include "confdefs.h"
+ #include <$ac_hdr>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:9975: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:9962: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -10002,17 +9989,17 @@
+ do
+ ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+-echo "configure:10006: checking for $ac_hdr" >&5
++echo "configure:9993: checking for $ac_hdr" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 10011 "configure"
++#line 9998 "configure"
+ #include "confdefs.h"
+ #include <$ac_hdr>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:10016: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:10003: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -10049,17 +10036,17 @@
+ NEW_H=new.h
+ ac_safe=`echo "new" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for new""... $ac_c" 1>&6
+-echo "configure:10053: checking for new" >&5
++echo "configure:10040: checking for new" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 10058 "configure"
++#line 10045 "configure"
+ #include "confdefs.h"
+ #include <new>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:10063: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:10050: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -10101,17 +10088,17 @@
+ if test "x$enable_dtrace" = "xyes"; then
+   ac_safe=`echo "sys/sdt.h" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for sys/sdt.h""... $ac_c" 1>&6
+-echo "configure:10105: checking for sys/sdt.h" >&5
++echo "configure:10092: checking for sys/sdt.h" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 10110 "configure"
++#line 10097 "configure"
+ #include "confdefs.h"
+ #include <sys/sdt.h>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:10115: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:10102: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -10151,17 +10138,17 @@
+ do
+ ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
+-echo "configure:10155: checking for $ac_hdr" >&5
++echo "configure:10142: checking for $ac_hdr" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 10160 "configure"
++#line 10147 "configure"
+ #include "confdefs.h"
+ #include <$ac_hdr>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:10165: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:10152: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -10192,17 +10179,17 @@
+ 
+ ac_safe=`echo "linux/perf_event.h" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for linux/perf_event.h""... $ac_c" 1>&6
+-echo "configure:10196: checking for linux/perf_event.h" >&5
++echo "configure:10183: checking for linux/perf_event.h" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 10201 "configure"
++#line 10188 "configure"
+ #include "confdefs.h"
+ #include <linux/perf_event.h>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:10206: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:10193: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -10230,7 +10217,7 @@
+ 	;;
+ *)
+ 	echo $ac_n "checking for gethostbyname_r in -lc_r""... $ac_c" 1>&6
+-echo "configure:10234: checking for gethostbyname_r in -lc_r" >&5
++echo "configure:10221: checking for gethostbyname_r in -lc_r" >&5
+ ac_lib_var=`echo c_r'_'gethostbyname_r | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -10238,7 +10225,7 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-lc_r  $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 10242 "configure"
++#line 10229 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -10249,7 +10236,7 @@
+ gethostbyname_r()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:10253: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:10240: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -10289,14 +10276,14 @@
+ *)
+     
+ echo $ac_n "checking for library containing dlopen""... $ac_c" 1>&6
+-echo "configure:10293: checking for library containing dlopen" >&5
++echo "configure:10280: checking for library containing dlopen" >&5
+ if eval "test \"`echo '$''{'ac_cv_search_dlopen'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   ac_func_search_save_LIBS="$LIBS"
+ ac_cv_search_dlopen="no"
+ cat > conftest.$ac_ext <<EOF
+-#line 10300 "configure"
++#line 10287 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -10307,7 +10294,7 @@
+ dlopen()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:10311: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:10298: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   ac_cv_search_dlopen="none required"
+ else
+@@ -10318,7 +10305,7 @@
+ test "$ac_cv_search_dlopen" = "no" && for i in dl; do
+ LIBS="-l$i  $ac_func_search_save_LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 10322 "configure"
++#line 10309 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -10329,7 +10316,7 @@
+ dlopen()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:10333: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:10320: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   ac_cv_search_dlopen="-l$i"
+ break
+@@ -10347,17 +10334,17 @@
+   test "$ac_cv_search_dlopen" = "none required" || LIBS="$ac_cv_search_dlopen $LIBS"
+   ac_safe=`echo "dlfcn.h" | sed 'y%./+-%__p_%'`
+ echo $ac_n "checking for dlfcn.h""... $ac_c" 1>&6
+-echo "configure:10351: checking for dlfcn.h" >&5
++echo "configure:10338: checking for dlfcn.h" >&5
+ if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 10356 "configure"
++#line 10343 "configure"
+ #include "confdefs.h"
+ #include <dlfcn.h>
+ EOF
+ ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
+-{ (eval echo configure:10361: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
++{ (eval echo configure:10348: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+ ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
+ if test -z "$ac_err"; then
+   rm -rf conftest*
+@@ -10392,12 +10379,12 @@
+ for ac_func in dladdr
+ do
+ echo $ac_n "checking for $ac_func""... $ac_c" 1>&6
+-echo "configure:10396: checking for $ac_func" >&5
++echo "configure:10383: checking for $ac_func" >&5
+ if eval "test \"`echo '$''{'ac_cv_func_$ac_func'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 10401 "configure"
++#line 10388 "configure"
+ #include "confdefs.h"
+ /* System header to define __stub macros and hopefully few prototypes,
+     which can conflict with char $ac_func(); below.  */
+@@ -10420,7 +10407,7 @@
+ 
+ ; return 0; }
+ EOF
+-if { (eval echo configure:10424: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:10411: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_func_$ac_func=yes"
+ else
+@@ -10451,7 +10438,7 @@
+     case $target in
+     *-aix*)
+ 	echo $ac_n "checking for demangle in -lC_r""... $ac_c" 1>&6
+-echo "configure:10455: checking for demangle in -lC_r" >&5
++echo "configure:10442: checking for demangle in -lC_r" >&5
+ ac_lib_var=`echo C_r'_'demangle | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -10459,7 +10446,7 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-lC_r  $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 10463 "configure"
++#line 10450 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -10470,7 +10457,7 @@
+ demangle()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:10474: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:10461: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -10500,7 +10487,7 @@
+ 	;;
+      *)
+ 	echo $ac_n "checking for demangle in -lC""... $ac_c" 1>&6
+-echo "configure:10504: checking for demangle in -lC" >&5
++echo "configure:10491: checking for demangle in -lC" >&5
+ ac_lib_var=`echo C'_'demangle | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -10508,7 +10495,7 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-lC  $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 10512 "configure"
++#line 10499 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -10519,7 +10506,7 @@
+ demangle()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:10523: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:10510: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -10555,7 +10542,7 @@
+     ;;
+ *)
+     echo $ac_n "checking for socket in -lsocket""... $ac_c" 1>&6
+-echo "configure:10559: checking for socket in -lsocket" >&5
++echo "configure:10546: checking for socket in -lsocket" >&5
+ ac_lib_var=`echo socket'_'socket | sed 'y%./+-%__p_%'`
+ if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+@@ -10563,7 +10550,7 @@
+   ac_save_LIBS="$LIBS"
+ LIBS="-lsocket  $LIBS"
+ cat > conftest.$ac_ext <<EOF
+-#line 10567 "configure"
++#line 10554 "configure"
+ #include "confdefs.h"
+ /* Override any gcc2 internal prototype to avoid an error.  */
+ /* We use char because int might match the return type of a gcc2
+@@ -10574,7 +10561,7 @@
+ socket()
+ ; return 0; }
+ EOF
+-if { (eval echo configure:10578: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:10565: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_lib_$ac_lib_var=yes"
+ else
+@@ -10604,16 +10591,16 @@
+ esac
+ 
+ echo $ac_n "checking for ARM SIMD support in compiler""... $ac_c" 1>&6
+-echo "configure:10608: checking for ARM SIMD support in compiler" >&5
++echo "configure:10595: checking for ARM SIMD support in compiler" >&5
+ cat > conftest.$ac_ext <<EOF
+-#line 10610 "configure"
++#line 10597 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+ asm("uqadd8 r1, r1, r2");
+ ; return 0; }
+ EOF
+-if { (eval echo configure:10617: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:10604: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   result="yes"
+ else
+@@ -10634,21 +10621,21 @@
+ 
+ 
+ echo $ac_n "checking for ARM NEON support in compiler""... $ac_c" 1>&6
+-echo "configure:10638: checking for ARM NEON support in compiler" >&5
++echo "configure:10625: checking for ARM NEON support in compiler" >&5
+ _SAVE_CFLAGS="$CFLAGS"
+ if test "$GNU_CC"; then
+   # gcc needs -mfpu=neon to recognize NEON instructions
+   CFLAGS="$CFLAGS -mfpu=neon -mfloat-abi=softfp"
+ fi
+ cat > conftest.$ac_ext <<EOF
+-#line 10645 "configure"
++#line 10632 "configure"
+ #include "confdefs.h"
+ 
+ int main() {
+ asm("vadd.i8 d0, d0, d0");
+ ; return 0; }
+ EOF
+-if { (eval echo configure:10652: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
++if { (eval echo configure:10639: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+   rm -rf conftest*
+   result="yes"
+ else
+@@ -10676,7 +10663,7 @@
+ *)
+     
+ echo $ac_n "checking for pthread_create in -lpthreads""... $ac_c" 1>&6
+-echo "configure:10680: checking for pthread_create in -lpthreads" >&5
++echo "configure:10667: checking for pthread_create in -lpthreads" >&5
+ echo "
+     #include <pthread.h>
+     #include <stdlib.h>
+@@ -10699,7 +10686,7 @@
+         echo "$ac_t""no" 1>&6
+         
+ echo $ac_n "checking for pthread_create in -lpthread""... $ac_c" 1>&6
+-echo "configure:10703: checking for pthread_create in -lpthread" >&5
++echo "configure:10690: checking for pthread_create in -lpthread" >&5
+ echo "
+     #include <pthread.h>
+     #include <stdlib.h>
+@@ -10722,7 +10709,7 @@
+         echo "$ac_t""no" 1>&6
+         
+ echo $ac_n "checking for pthread_create in -lc_r""... $ac_c" 1>&6
+-echo "configure:10726: checking for pthread_create in -lc_r" >&5
++echo "configure:10713: checking for pthread_create in -lc_r" >&5
+ echo "
+     #include <pthread.h>
+     #include <stdlib.h>
+@@ -10745,7 +10732,7 @@
+         echo "$ac_t""no" 1>&6
+         
+ echo $ac_n "checking for pthread_create in -lc""... $ac_c" 1>&6
+-echo "configure:10749: checking for pthread_create in -lc" >&5
++echo "configure:10736: checking for pthread_create in -lc" >&5
+ echo "
+     #include <pthread.h>
+     #include <stdlib.h>
+@@ -10804,7 +10791,7 @@
+ 				rm -f conftest*
+ 	ac_cv_have_dash_pthread=no
+ 	echo $ac_n "checking whether ${CC-cc} accepts -pthread""... $ac_c" 1>&6
+-echo "configure:10808: checking whether ${CC-cc} accepts -pthread" >&5
++echo "configure:10795: checking whether ${CC-cc} accepts -pthread" >&5
+ 	echo 'int main() { return 0; }' | cat > conftest.c
+ 	${CC-cc} -pthread -o conftest conftest.c > conftest.out 2>&1
+ 	if test $? -eq 0; then
+@@ -10827,7 +10814,7 @@
+ 			    ac_cv_have_dash_pthreads=no
+     if test "$ac_cv_have_dash_pthread" = "no"; then
+ 	    echo $ac_n "checking whether ${CC-cc} accepts -pthreads""... $ac_c" 1>&6
+-echo "configure:10831: checking whether ${CC-cc} accepts -pthreads" >&5
++echo "configure:10818: checking whether ${CC-cc} accepts -pthreads" >&5
+     	echo 'int main() { return 0; }' | cat > conftest.c
+ 	    ${CC-cc} -pthreads -o conftest conftest.c > conftest.out 2>&1
+     	if test $? -eq 0; then
+@@ -10922,7 +10909,7 @@
+ fi
+ 
+ echo $ac_n "checking whether mmap() sees write()s""... $ac_c" 1>&6
+-echo "configure:10926: checking whether mmap() sees write()s" >&5
++echo "configure:10913: checking whether mmap() sees write()s" >&5
+ 
+ 
+ mmap_test_prog='
+@@ -10961,11 +10948,11 @@
+   result="yes"
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 10965 "configure"
++#line 10952 "configure"
+ #include "confdefs.h"
+ $mmap_test_prog 
+ EOF
+-if { (eval echo configure:10969: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
++if { (eval echo configure:10956: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
+ then
+   result="yes"
+ else
+@@ -10990,13 +10977,13 @@
+ 
+ if test $ac_cv_prog_gcc = yes; then
+     echo $ac_n "checking whether ${CC-cc} needs -traditional""... $ac_c" 1>&6
+-echo "configure:10994: checking whether ${CC-cc} needs -traditional" >&5
++echo "configure:10981: checking whether ${CC-cc} needs -traditional" >&5
+ if eval "test \"`echo '$''{'ac_cv_prog_gcc_traditional'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+     ac_pattern="Autoconf.*'x'"
+   cat > conftest.$ac_ext <<EOF
+-#line 11000 "configure"
++#line 10987 "configure"
+ #include "confdefs.h"
+ #include <sgtty.h>
+ Autoconf TIOCGETP
+@@ -11014,7 +11001,7 @@
+ 
+   if test $ac_cv_prog_gcc_traditional = no; then
+     cat > conftest.$ac_ext <<EOF
+-#line 11018 "configure"
++#line 11005 "configure"
+ #include "confdefs.h"
+ #include <termio.h>
+ Autoconf TCGETA
+@@ -11036,7 +11023,7 @@
+ fi
+ 
+ echo $ac_n "checking for 8-bit clean memcmp""... $ac_c" 1>&6
+-echo "configure:11040: checking for 8-bit clean memcmp" >&5
++echo "configure:11027: checking for 8-bit clean memcmp" >&5
+ if eval "test \"`echo '$''{'ac_cv_func_memcmp_clean'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+@@ -11044,7 +11031,7 @@
+   ac_cv_func_memcmp_clean=no
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 11048 "configure"
++#line 11035 "configure"
+ #include "confdefs.h"
+ 
+ main()
+@@ -11054,7 +11041,7 @@
+ }
+ 
+ EOF
+-if { (eval echo configure:11058: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
++if { (eval echo configure:11045: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
+ then
+   ac_cv_func_memcmp_clean=yes
+ else
+@@ -11076,12 +11063,12 @@
+                 stat64 statvfs statvfs64 strerror strtok_r truncate64
+ do
+ echo $ac_n "checking for $ac_func""... $ac_c" 1>&6
+-echo "configure:11080: checking for $ac_func" >&5
++echo "configure:11067: checking for $ac_func" >&5
+ if eval "test \"`echo '$''{'ac_cv_func_$ac_func'+set}'`\" = set"; then
+   echo $ac_n "(cached) $ac_c" 1>&6
+ else
+   cat > conftest.$ac_ext <<EOF
+-#line 11085 "configure"
++#line 11072 "configure"
+ #include "confdefs.h"
+ /* System header to define __stub macros and hopefully few prototypes,
+     which can conflict with char $ac_func(); below.  */
+@@ -11104,7 +11091,7 @@
+ 
+ ; return 0; }
+ EOF
+-if { (eval echo configure:11108: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
++if { (eval echo configure:11095: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+   rm -rf conftest*
+   eval "ac_cv_func_$ac_func=yes"
+ else
+@@ -11132,12 +11119,12 @@
+ _SAVE_LDFLAGS=$LDFLAGS
+ LDFLAGS="$LDFLAGS -lrt"
+ echo $ac_n "checking for clock_gettime(CLOCK_MONOTONIC) and -lrt""... $ac_c" 1>&6
+-echo "configure:11136: checking for clock_gettime(CLOCK_MONOTONIC)

<TRUNCATED>

[15/41] inital move to rebar compilation

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_db.hrl
----------------------------------------------------------------------
diff --git a/couch_db.hrl b/couch_db.hrl
deleted file mode 100644
index 6888f10..0000000
--- a/couch_db.hrl
+++ /dev/null
@@ -1,286 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(LOCAL_DOC_PREFIX, "_local/").
--define(DESIGN_DOC_PREFIX0, "_design").
--define(DESIGN_DOC_PREFIX, "_design/").
--define(DEFAULT_COMPRESSION, snappy).
-
--define(MIN_STR, <<"">>).
--define(MAX_STR, <<255>>). % illegal utf string
-
-% the lowest possible database sequence number
--define(LOWEST_SEQ, 0).
-
--define(REWRITE_COUNT, couch_rewrite_count).
-
--define(JSON_ENCODE(V), ejson:encode(V)).
--define(JSON_DECODE(V), ejson:decode(V)).
-
--define(b2l(V), binary_to_list(V)).
--define(l2b(V), list_to_binary(V)).
--define(term_to_bin(T), term_to_binary(T, [{minor_version, 1}])).
--define(term_size(T),
-    try
-        erlang:external_size(T)
-    catch _:_ ->
-        byte_size(?term_to_bin(T))
-    end).
-
--define(DEFAULT_ATTACHMENT_CONTENT_TYPE, <<"application/octet-stream">>).
-
--define(LOG_DEBUG(Format, Args),
-    case couch_log:debug_on(?MODULE) of
-        true ->
-            couch_log:debug(Format, Args);
-        false -> ok
-    end).
-
--define(LOG_INFO(Format, Args),
-    case couch_log:info_on(?MODULE) of
-        true ->
-            couch_log:info(Format, Args);
-        false -> ok
-    end).
-
--define(LOG_WARN(Format, Args),
-    case couch_log:warn_on(?MODULE) of
-        true ->
-            couch_log:warn(Format, Args);
-        false -> ok
-    end).
-
--define(LOG_ERROR(Format, Args), couch_log:error(Format, Args)).
-
-% Tree::term() is really a tree(), but we don't want to require R13B04 yet
--type branch() :: {Key::term(), Value::term(), Tree::term()}.
--type path() :: {Start::pos_integer(), branch()}.
--type tree() :: [branch()]. % sorted by key
-
--record(rev_info,
-    {
-    rev,
-    seq = 0,
-    deleted = false,
-    body_sp = nil % stream pointer
-    }).
-
--record(doc_info,
-    {
-    id = <<"">>,
-    high_seq = 0,
-    revs = [] % rev_info
-    }).
-
--record(full_doc_info,
-    {id = <<"">>,
-    update_seq = 0,
-    deleted = false,
-    rev_tree = [],
-    leafs_size = 0
-    }).
-
--record(httpd,
-    {mochi_req,
-    peer,
-    method,
-    requested_path_parts,
-    path_parts,
-    db_url_handlers,
-    user_ctx,
-    req_body = undefined,
-    design_url_handlers,
-    auth,
-    default_fun,
-    url_handlers
-    }).
-
-
--record(doc,
-    {
-    id = <<"">>,
-    revs = {0, []},
-
-    % the json body object.
-    body = {[]},
-
-    atts = [], % attachments
-
-    deleted = false,
-
-    % key/value tuple of meta information, provided when using special options:
-    % couch_db:open_doc(Db, Id, Options).
-    meta = []
-    }).
-
-
--record(att,
-    {
-    name,
-    type,
-    att_len,
-    disk_len, % length of the attachment in its identity form
-              % (that is, without a content encoding applied to it)
-              % differs from att_len when encoding /= identity
-    md5= <<>>,
-    revpos=0,
-    data,
-    encoding=identity % currently supported values are:
-                      %     identity, gzip
-                      % additional values to support in the future:
-                      %     deflate, compress
-    }).
-
-
--record(user_ctx,
-    {
-    name=null,
-    roles=[],
-    handler
-    }).
-
-% This should be updated anytime a header change happens that requires more
-% than filling in new defaults.
-%
-% As long the changes are limited to new header fields (with inline
-% defaults) added to the end of the record, then there is no need to increment
-% the disk revision number.
-%
-% if the disk revision is incremented, then new upgrade logic will need to be
-% added to couch_db_updater:init_db.
-
--define(LATEST_DISK_VERSION, 6).
-
--record(db_header,
-    {disk_version = ?LATEST_DISK_VERSION,
-     update_seq = 0,
-     unused = 0,
-     fulldocinfo_by_id_btree_state = nil,
-     docinfo_by_seq_btree_state = nil,
-     local_docs_btree_state = nil,
-     purge_seq = 0,
-     purged_docs = nil,
-     security_ptr = nil,
-     revs_limit = 1000
-    }).
-
--record(db,
-    {main_pid = nil,
-    update_pid = nil,
-    compactor_pid = nil,
-    instance_start_time, % number of microsecs since jan 1 1970 as a binary string
-    fd,
-    updater_fd,
-    fd_ref_counter,
-    header = #db_header{},
-    committed_update_seq,
-    fulldocinfo_by_id_btree,
-    docinfo_by_seq_btree,
-    local_docs_btree,
-    update_seq,
-    name,
-    filepath,
-    validate_doc_funs = [],
-    security = [],
-    security_ptr = nil,
-    user_ctx = #user_ctx{},
-    waiting_delayed_commit = nil,
-    revs_limit = 1000,
-    fsync_options = [],
-    options = [],
-    compression,
-    before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc
-    after_doc_read = nil     % nil | fun(Doc, Db) -> NewDoc
-    }).
-
-
--record(view_query_args, {
-    start_key,
-    end_key,
-    start_docid = ?MIN_STR,
-    end_docid = ?MAX_STR,
-
-    direction = fwd,
-    inclusive_end=true, % aka a closed-interval
-
-    limit = 10000000000, % Huge number to simplify logic
-    skip = 0,
-
-    group_level = 0,
-
-    view_type = nil,
-    include_docs = false,
-    doc_options = [],
-    conflicts = false,
-    stale = false,
-    multi_get = false,
-    callback = nil,
-    list = nil
-}).
-
--record(view_fold_helper_funs, {
-    reduce_count,
-    passed_end,
-    start_response,
-    send_row
-}).
-
--record(reduce_fold_helper_funs, {
-    start_response,
-    send_row
-}).
-
--record(extern_resp_args, {
-    code = 200,
-    stop = false,
-    data = <<>>,
-    ctype = "application/json",
-    headers = [],
-    json = nil
-}).
-
--record(index_header,
-    {seq=0,
-    purge_seq=0,
-    id_btree_state=nil,
-    view_states=nil
-    }).
-
-% small value used in revision trees to indicate the revision isn't stored
--define(REV_MISSING, []).
-
--record(changes_args, {
-    feed = "normal",
-    dir = fwd,
-    since = 0,
-    limit = 1000000000000000,
-    style = main_only,
-    heartbeat,
-    timeout,
-    filter = "",
-    filter_fun,
-    filter_args = [],
-    include_docs = false,
-    doc_options = [],
-    conflicts = false,
-    db_open_options = []
-}).
-
--record(btree, {
-    fd,
-    root,
-    extract_kv = fun({_Key, _Value} = KV) -> KV end,
-    assemble_kv = fun(Key, Value) -> {Key, Value} end,
-    less = fun(A, B) -> A < B end,
-    reduce = nil,
-    compression = ?DEFAULT_COMPRESSION
-}).

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_db_update_notifier.erl
----------------------------------------------------------------------
diff --git a/couch_db_update_notifier.erl b/couch_db_update_notifier.erl
deleted file mode 100644
index bfa770a..0000000
--- a/couch_db_update_notifier.erl
+++ /dev/null
@@ -1,82 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%
-% This causes an OS process to spawned and it is notified every time a database
-% is updated.
-%
-% The notifications are in the form of a the database name sent as a line of
-% text to the OS processes stdout.
-%
-
--module(couch_db_update_notifier).
-
--behaviour(gen_event).
-
--export([start_link/1, notify/1]).
--export([init/1, terminate/2, handle_event/2, handle_call/2, handle_info/2, code_change/3,stop/1]).
-
--include("couch_db.hrl").
-
-start_link(Exec) ->
-    couch_event_sup:start_link(couch_db_update, {couch_db_update_notifier, make_ref()}, Exec).
-
-notify(Event) ->
-    gen_event:notify(couch_db_update, Event).
-
-stop(Pid) ->
-    couch_event_sup:stop(Pid).
-
-init(Exec) when is_list(Exec) -> % an exe
-    couch_os_process:start_link(Exec, []);
-init(Else) ->
-    {ok, Else}.
-
-terminate(_Reason, Pid) when is_pid(Pid) ->
-    couch_os_process:stop(Pid),
-    ok;
-terminate(_Reason, _State) ->
-    ok.
-
-handle_event(Event, Fun) when is_function(Fun, 1) ->
-    Fun(Event),
-    {ok, Fun};
-handle_event(Event, {Fun, FunAcc}) ->
-    FunAcc2 = Fun(Event, FunAcc),
-    {ok, {Fun, FunAcc2}};
-handle_event({EventType, EventDesc}, Pid) ->
-    Obj = encode_event(EventType, EventDesc),
-    ok = couch_os_process:send(Pid, Obj),
-    {ok, Pid}.
-
-handle_call(_Request, State) ->
-    {reply, ok, State}.
-
-handle_info({'EXIT', Pid, Reason}, Pid) ->
-    ?LOG_ERROR("Update notification process ~p died: ~p", [Pid, Reason]),
-    remove_handler;
-handle_info({'EXIT', _, _}, Pid) ->
-    %% the db_update event manager traps exits and forwards this message to all
-    %% its handlers. Just ignore as it wasn't our os_process that exited.
-    {ok, Pid}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-encode_event(EventType, EventDesc) when is_atom(EventType) ->
-    encode_event(atom_to_list(EventType), EventDesc);
-encode_event(EventType, EventDesc) when is_list(EventType) ->
-    encode_event(?l2b(EventType), EventDesc);
-encode_event(EventType, {DbName, DocId}) ->
-    {[{type, EventType}, {db, DbName}, {id, DocId}]};
-encode_event(EventType, DbName) ->
-    {[{type, EventType}, {db, DbName}]}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_db_update_notifier_sup.erl
----------------------------------------------------------------------
diff --git a/couch_db_update_notifier_sup.erl b/couch_db_update_notifier_sup.erl
deleted file mode 100644
index e7cc16c..0000000
--- a/couch_db_update_notifier_sup.erl
+++ /dev/null
@@ -1,61 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%
-% This causes an OS process to spawned and it is notified every time a database
-% is updated.
-%
-% The notifications are in the form of a the database name sent as a line of
-% text to the OS processes stdout.
-%
-
--module(couch_db_update_notifier_sup).
-
--behaviour(supervisor).
-
--export([start_link/0, init/1, config_change/3]).
-
-start_link() ->
-    supervisor:start_link({local, couch_db_update_notifier_sup},
-        couch_db_update_notifier_sup, []).
-
-init([]) ->
-    ok = couch_config:register(fun ?MODULE:config_change/3),
-
-    UpdateNotifierExes = couch_config:get("update_notification"),
-
-    {ok,
-        {{one_for_one, 10, 3600},
-            lists:map(fun({Name, UpdateNotifierExe}) ->
-                {Name,
-                {couch_db_update_notifier, start_link, [UpdateNotifierExe]},
-                    permanent,
-                    1000,
-                    supervisor,
-                    [couch_db_update_notifier]}
-                end, UpdateNotifierExes)}}.
-
-%% @doc when update_notification configuration changes, terminate the process
-%%      for that notifier and start a new one with the updated config
-config_change("update_notification", Id, Exe) ->
-    ChildSpec = {
-        Id,
-        {couch_db_update_notifier, start_link, [Exe]},
-        permanent,
-        1000,
-        supervisor,
-        [couch_db_update_notifier]
-    },
-    supervisor:terminate_child(couch_db_update_notifier_sup, Id),
-    supervisor:delete_child(couch_db_update_notifier_sup, Id),
-    supervisor:start_child(couch_db_update_notifier_sup, ChildSpec).
-

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_db_updater.erl
----------------------------------------------------------------------
diff --git a/couch_db_updater.erl b/couch_db_updater.erl
deleted file mode 100644
index af7578e..0000000
--- a/couch_db_updater.erl
+++ /dev/null
@@ -1,1035 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_db_updater).
--behaviour(gen_server).
-
--export([btree_by_id_reduce/2,btree_by_seq_reduce/2]).
--export([make_doc_summary/2]).
--export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
-
--include("couch_db.hrl").
-
-
-init({MainPid, DbName, Filepath, Fd, Options}) ->
-    process_flag(trap_exit, true),
-    case lists:member(create, Options) of
-    true ->
-        % create a new header and writes it to the file
-        Header =  #db_header{},
-        ok = couch_file:write_header(Fd, Header),
-        % delete any old compaction files that might be hanging around
-        RootDir = couch_config:get("couchdb", "database_dir", "."),
-        couch_file:delete(RootDir, Filepath ++ ".compact");
-    false ->
-        case couch_file:read_header(Fd) of
-        {ok, Header} ->
-            ok;
-        no_valid_header ->
-            % create a new header and writes it to the file
-            Header =  #db_header{},
-            ok = couch_file:write_header(Fd, Header),
-            % delete any old compaction files that might be hanging around
-            file:delete(Filepath ++ ".compact")
-        end
-    end,
-    ReaderFd = open_reader_fd(Filepath, Options),
-    Db = init_db(DbName, Filepath, Fd, ReaderFd, Header, Options),
-    Db2 = refresh_validate_doc_funs(Db),
-    {ok, Db2#db{main_pid = MainPid}}.
-
-
-terminate(_Reason, Db) ->
-    ok = couch_file:close(Db#db.updater_fd),
-    ok = couch_file:close(Db#db.fd),
-    couch_util:shutdown_sync(Db#db.compactor_pid),
-    couch_util:shutdown_sync(Db#db.fd_ref_counter),
-    ok.
-
-handle_call(get_db, _From, Db) ->
-    {reply, {ok, Db}, Db};
-handle_call(full_commit, _From, #db{waiting_delayed_commit=nil}=Db) ->
-    {reply, ok, Db}; % no data waiting, return ok immediately
-handle_call(full_commit, _From,  Db) ->
-    {reply, ok, commit_data(Db)}; % commit the data and return ok
-handle_call(increment_update_seq, _From, Db) ->
-    Db2 = commit_data(Db#db{update_seq=Db#db.update_seq+1}),
-    ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
-    couch_db_update_notifier:notify({updated, Db#db.name}),
-    {reply, {ok, Db2#db.update_seq}, Db2};
-
-handle_call({set_security, NewSec}, _From, #db{compression = Comp} = Db) ->
-    {ok, Ptr, _} = couch_file:append_term(
-        Db#db.updater_fd, NewSec, [{compression, Comp}]),
-    Db2 = commit_data(Db#db{security=NewSec, security_ptr=Ptr,
-            update_seq=Db#db.update_seq+1}),
-    ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
-    {reply, ok, Db2};
-
-handle_call({set_revs_limit, Limit}, _From, Db) ->
-    Db2 = commit_data(Db#db{revs_limit=Limit,
-            update_seq=Db#db.update_seq+1}),
-    ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
-    {reply, ok, Db2};
-
-handle_call({purge_docs, _IdRevs}, _From,
-        #db{compactor_pid=Pid}=Db) when Pid /= nil ->
-    {reply, {error, purge_during_compaction}, Db};
-handle_call({purge_docs, IdRevs}, _From, Db) ->
-    #db{
-        updater_fd = Fd,
-        fulldocinfo_by_id_btree = DocInfoByIdBTree,
-        docinfo_by_seq_btree = DocInfoBySeqBTree,
-        update_seq = LastSeq,
-        header = Header = #db_header{purge_seq=PurgeSeq},
-        compression = Comp
-        } = Db,
-    DocLookups = couch_btree:lookup(DocInfoByIdBTree,
-            [Id || {Id, _Revs} <- IdRevs]),
-
-    NewDocInfos = lists:zipwith(
-        fun({_Id, Revs}, {ok, #full_doc_info{rev_tree=Tree}=FullDocInfo}) ->
-            case couch_key_tree:remove_leafs(Tree, Revs) of
-            {_, []=_RemovedRevs} -> % no change
-                nil;
-            {NewTree, RemovedRevs} ->
-                {FullDocInfo#full_doc_info{rev_tree=NewTree},RemovedRevs}
-            end;
-        (_, not_found) ->
-            nil
-        end,
-        IdRevs, DocLookups),
-
-    SeqsToRemove = [Seq
-            || {#full_doc_info{update_seq=Seq},_} <- NewDocInfos],
-
-    FullDocInfoToUpdate = [FullInfo
-            || {#full_doc_info{rev_tree=Tree}=FullInfo,_}
-            <- NewDocInfos, Tree /= []],
-
-    IdRevsPurged = [{Id, Revs}
-            || {#full_doc_info{id=Id}, Revs} <- NewDocInfos],
-
-    {DocInfoToUpdate, NewSeq} = lists:mapfoldl(
-        fun(#full_doc_info{rev_tree=Tree}=FullInfo, SeqAcc) ->
-            Tree2 = couch_key_tree:map_leafs(
-                fun(_RevId, LeafVal) ->
-                    IsDeleted = element(1, LeafVal),
-                    BodyPointer = element(2, LeafVal),
-                    {IsDeleted, BodyPointer, SeqAcc + 1}
-                end, Tree),
-            {couch_doc:to_doc_info(FullInfo#full_doc_info{rev_tree=Tree2}),
-                SeqAcc + 1}
-        end, LastSeq, FullDocInfoToUpdate),
-
-    IdsToRemove = [Id || {#full_doc_info{id=Id,rev_tree=[]},_}
-            <- NewDocInfos],
-
-    {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree,
-            DocInfoToUpdate, SeqsToRemove),
-    {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree,
-            FullDocInfoToUpdate, IdsToRemove),
-    {ok, Pointer, _} = couch_file:append_term(
-            Fd, IdRevsPurged, [{compression, Comp}]),
-
-    Db2 = commit_data(
-        Db#db{
-            fulldocinfo_by_id_btree = DocInfoByIdBTree2,
-            docinfo_by_seq_btree = DocInfoBySeqBTree2,
-            update_seq = NewSeq + 1,
-            header=Header#db_header{purge_seq=PurgeSeq+1, purged_docs=Pointer}}),
-
-    ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
-    couch_db_update_notifier:notify({updated, Db#db.name}),
-    {reply, {ok, (Db2#db.header)#db_header.purge_seq, IdRevsPurged}, Db2};
-handle_call(start_compact, _From, Db) ->
-    case Db#db.compactor_pid of
-    nil ->
-        ?LOG_INFO("Starting compaction for db \"~s\"", [Db#db.name]),
-        Pid = spawn_link(fun() -> start_copy_compact(Db) end),
-        Db2 = Db#db{compactor_pid=Pid},
-        ok = gen_server:call(Db#db.main_pid, {db_updated, Db2}),
-        {reply, {ok, Pid}, Db2};
-    _ ->
-        % compact currently running, this is a no-op
-        {reply, {ok, Db#db.compactor_pid}, Db}
-    end;
-handle_call(cancel_compact, _From, #db{compactor_pid = nil} = Db) ->
-    {reply, ok, Db};
-handle_call(cancel_compact, _From, #db{compactor_pid = Pid} = Db) ->
-    unlink(Pid),
-    exit(Pid, kill),
-    RootDir = couch_config:get("couchdb", "database_dir", "."),
-    ok = couch_file:delete(RootDir, Db#db.filepath ++ ".compact"),
-    {reply, ok, Db#db{compactor_pid = nil}};
-
-
-handle_call({compact_done, CompactFilepath}, _From, #db{filepath=Filepath}=Db) ->
-    {ok, NewFd} = couch_file:open(CompactFilepath),
-    ReaderFd = open_reader_fd(CompactFilepath, Db#db.options),
-    {ok, NewHeader} = couch_file:read_header(NewFd),
-    #db{update_seq=NewSeq} = NewDb =
-        init_db(Db#db.name, Filepath, NewFd, ReaderFd, NewHeader, Db#db.options),
-    unlink(NewFd),
-    case Db#db.update_seq == NewSeq of
-    true ->
-        % suck up all the local docs into memory and write them to the new db
-        {ok, _, LocalDocs} = couch_btree:foldl(Db#db.local_docs_btree,
-                fun(Value, _Offset, Acc) -> {ok, [Value | Acc]} end, []),
-        {ok, NewLocalBtree} = couch_btree:add(NewDb#db.local_docs_btree, LocalDocs),
-
-        NewDb2 = commit_data(NewDb#db{
-            local_docs_btree = NewLocalBtree,
-            main_pid = Db#db.main_pid,
-            filepath = Filepath,
-            instance_start_time = Db#db.instance_start_time,
-            revs_limit = Db#db.revs_limit
-        }),
-
-        ?LOG_DEBUG("CouchDB swapping files ~s and ~s.",
-                [Filepath, CompactFilepath]),
-        RootDir = couch_config:get("couchdb", "database_dir", "."),
-        couch_file:delete(RootDir, Filepath),
-        ok = file:rename(CompactFilepath, Filepath),
-        close_db(Db),
-        NewDb3 = refresh_validate_doc_funs(NewDb2),
-        ok = gen_server:call(Db#db.main_pid, {db_updated, NewDb3}, infinity),
-        couch_db_update_notifier:notify({compacted, NewDb3#db.name}),
-        ?LOG_INFO("Compaction for db \"~s\" completed.", [Db#db.name]),
-        {reply, ok, NewDb3#db{compactor_pid=nil}};
-    false ->
-        ?LOG_INFO("Compaction file still behind main file "
-            "(update seq=~p. compact update seq=~p). Retrying.",
-            [Db#db.update_seq, NewSeq]),
-        close_db(NewDb),
-        {reply, {retry, Db}, Db}
-    end.
-
-
-handle_cast(Msg, #db{name = Name} = Db) ->
-    ?LOG_ERROR("Database `~s` updater received unexpected cast: ~p", [Name, Msg]),
-    {stop, Msg, Db}.
-
-
-handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts,
-        FullCommit}, Db) ->
-    GroupedDocs2 = [[{Client, D} || D <- DocGroup] || DocGroup <- GroupedDocs],
-    if NonRepDocs == [] ->
-        {GroupedDocs3, Clients, FullCommit2} = collect_updates(GroupedDocs2,
-                [Client], MergeConflicts, FullCommit);
-    true ->
-        GroupedDocs3 = GroupedDocs2,
-        FullCommit2 = FullCommit,
-        Clients = [Client]
-    end,
-    NonRepDocs2 = [{Client, NRDoc} || NRDoc <- NonRepDocs],
-    try update_docs_int(Db, GroupedDocs3, NonRepDocs2, MergeConflicts,
-                FullCommit2) of
-    {ok, Db2, UpdatedDDocIds} ->
-        ok = gen_server:call(Db#db.main_pid, {db_updated, Db2}),
-        if Db2#db.update_seq /= Db#db.update_seq ->
-            couch_db_update_notifier:notify({updated, Db2#db.name});
-        true -> ok
-        end,
-        [catch(ClientPid ! {done, self()}) || ClientPid <- Clients],
-        lists:foreach(fun(DDocId) ->
-            couch_db_update_notifier:notify({ddoc_updated, {Db#db.name, DDocId}})
-        end, UpdatedDDocIds),
-        {noreply, Db2}
-    catch
-        throw: retry ->
-            [catch(ClientPid ! {retry, self()}) || ClientPid <- Clients],
-            {noreply, Db}
-    end;
-handle_info(delayed_commit, #db{waiting_delayed_commit=nil}=Db) ->
-    %no outstanding delayed commits, ignore
-    {noreply, Db};
-handle_info(delayed_commit, Db) ->
-    case commit_data(Db) of
-        Db ->
-            {noreply, Db};
-        Db2 ->
-            ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
-            {noreply, Db2}
-    end;
-handle_info({'EXIT', _Pid, normal}, Db) ->
-    {noreply, Db};
-handle_info({'EXIT', _Pid, Reason}, Db) ->
-    {stop, Reason, Db}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-merge_updates([], RestB, AccOutGroups) ->
-    lists:reverse(AccOutGroups, RestB);
-merge_updates(RestA, [], AccOutGroups) ->
-    lists:reverse(AccOutGroups, RestA);
-merge_updates([[{_, {#doc{id=IdA}, _}}|_]=GroupA | RestA],
-        [[{_, {#doc{id=IdB}, _}}|_]=GroupB | RestB], AccOutGroups) ->
-    if IdA == IdB ->
-        merge_updates(RestA, RestB, [GroupA ++ GroupB | AccOutGroups]);
-    IdA < IdB ->
-        merge_updates(RestA, [GroupB | RestB], [GroupA | AccOutGroups]);
-    true ->
-        merge_updates([GroupA | RestA], RestB, [GroupB | AccOutGroups])
-    end.
-
-collect_updates(GroupedDocsAcc, ClientsAcc, MergeConflicts, FullCommit) ->
-    receive
-        % Only collect updates with the same MergeConflicts flag and without
-        % local docs. It's easier to just avoid multiple _local doc
-        % updaters than deal with their possible conflicts, and local docs
-        % writes are relatively rare. Can be optmized later if really needed.
-        {update_docs, Client, GroupedDocs, [], MergeConflicts, FullCommit2} ->
-            GroupedDocs2 = [[{Client, Doc} || Doc <- DocGroup]
-                    || DocGroup <- GroupedDocs],
-            GroupedDocsAcc2 =
-                merge_updates(GroupedDocsAcc, GroupedDocs2, []),
-            collect_updates(GroupedDocsAcc2, [Client | ClientsAcc],
-                    MergeConflicts, (FullCommit or FullCommit2))
-    after 0 ->
-        {GroupedDocsAcc, ClientsAcc, FullCommit}
-    end.
-
-
-btree_by_seq_split(#doc_info{id=Id, high_seq=KeySeq, revs=Revs}) ->
-    {RevInfos, DeletedRevInfos} = lists:foldl(
-        fun(#rev_info{deleted = false, seq = Seq} = Ri, {Acc, AccDel}) ->
-                {[{Ri#rev_info.rev, Seq, Ri#rev_info.body_sp} | Acc], AccDel};
-            (#rev_info{deleted = true, seq = Seq} = Ri, {Acc, AccDel}) ->
-                {Acc, [{Ri#rev_info.rev, Seq, Ri#rev_info.body_sp} | AccDel]}
-        end,
-        {[], []}, Revs),
-    {KeySeq, {Id, lists:reverse(RevInfos), lists:reverse(DeletedRevInfos)}}.
-
-btree_by_seq_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) ->
-    #doc_info{
-        id = Id,
-        high_seq=KeySeq,
-        revs =
-            [#rev_info{rev=Rev,seq=Seq,deleted=false,body_sp = Bp} ||
-                {Rev, Seq, Bp} <- RevInfos] ++
-            [#rev_info{rev=Rev,seq=Seq,deleted=true,body_sp = Bp} ||
-                {Rev, Seq, Bp} <- DeletedRevInfos]}.
-
-btree_by_id_split(#full_doc_info{id=Id, update_seq=Seq,
-        deleted=Deleted, rev_tree=Tree}) ->
-    DiskTree =
-    couch_key_tree:map(
-        fun(_RevId, ?REV_MISSING) ->
-            ?REV_MISSING;
-        (_RevId, RevValue) ->
-            IsDeleted = element(1, RevValue),
-            BodyPointer = element(2, RevValue),
-            UpdateSeq = element(3, RevValue),
-            Size = case tuple_size(RevValue) of
-            4 ->
-                element(4, RevValue);
-            3 ->
-                % pre 1.2 format, will be upgraded on compaction
-                nil
-            end,
-            {if IsDeleted -> 1; true -> 0 end, BodyPointer, UpdateSeq, Size}
-        end, Tree),
-    {Id, {Seq, if Deleted -> 1; true -> 0 end, DiskTree}}.
-
-btree_by_id_join(Id, {HighSeq, Deleted, DiskTree}) ->
-    {Tree, LeafsSize} =
-    couch_key_tree:mapfold(
-        fun(_RevId, {IsDeleted, BodyPointer, UpdateSeq}, leaf, _Acc) ->
-            % pre 1.2 format, will be upgraded on compaction
-            {{IsDeleted == 1, BodyPointer, UpdateSeq, nil}, nil};
-        (_RevId, {IsDeleted, BodyPointer, UpdateSeq}, branch, Acc) ->
-            {{IsDeleted == 1, BodyPointer, UpdateSeq, nil}, Acc};
-        (_RevId, {IsDeleted, BodyPointer, UpdateSeq, Size}, leaf, Acc) ->
-            Acc2 = sum_leaf_sizes(Acc, Size),
-            {{IsDeleted == 1, BodyPointer, UpdateSeq, Size}, Acc2};
-        (_RevId, {IsDeleted, BodyPointer, UpdateSeq, Size}, branch, Acc) ->
-            {{IsDeleted == 1, BodyPointer, UpdateSeq, Size}, Acc};
-        (_RevId, ?REV_MISSING, _Type, Acc) ->
-            {?REV_MISSING, Acc}
-        end, 0, DiskTree),
-    #full_doc_info{
-        id = Id,
-        update_seq = HighSeq,
-        deleted = (Deleted == 1),
-        rev_tree = Tree,
-        leafs_size = LeafsSize
-    }.
-
-btree_by_id_reduce(reduce, FullDocInfos) ->
-    lists:foldl(
-        fun(Info, {NotDeleted, Deleted, Size}) ->
-            Size2 = sum_leaf_sizes(Size, Info#full_doc_info.leafs_size),
-            case Info#full_doc_info.deleted of
-            true ->
-                {NotDeleted, Deleted + 1, Size2};
-            false ->
-                {NotDeleted + 1, Deleted, Size2}
-            end
-        end,
-        {0, 0, 0}, FullDocInfos);
-btree_by_id_reduce(rereduce, Reds) ->
-    lists:foldl(
-        fun({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted, _AccSize}) ->
-            % pre 1.2 format, will be upgraded on compaction
-            {AccNotDeleted + NotDeleted, AccDeleted + Deleted, nil};
-        ({NotDeleted, Deleted, Size}, {AccNotDeleted, AccDeleted, AccSize}) ->
-            AccSize2 = sum_leaf_sizes(AccSize, Size),
-            {AccNotDeleted + NotDeleted, AccDeleted + Deleted, AccSize2}
-        end,
-        {0, 0, 0}, Reds).
-
-sum_leaf_sizes(nil, _) ->
-    nil;
-sum_leaf_sizes(_, nil) ->
-    nil;
-sum_leaf_sizes(Size1, Size2) ->
-    Size1 + Size2.
-
-btree_by_seq_reduce(reduce, DocInfos) ->
-    % count the number of documents
-    length(DocInfos);
-btree_by_seq_reduce(rereduce, Reds) ->
-    lists:sum(Reds).
-
-simple_upgrade_record(Old, New) when tuple_size(Old) < tuple_size(New) ->
-    OldSz = tuple_size(Old),
-    NewValuesTail =
-        lists:sublist(tuple_to_list(New), OldSz + 1, tuple_size(New) - OldSz),
-    list_to_tuple(tuple_to_list(Old) ++ NewValuesTail);
-simple_upgrade_record(Old, _New) ->
-    Old.
-
--define(OLD_DISK_VERSION_ERROR,
-    "Database files from versions smaller than 0.10.0 are no longer supported").
-
-init_db(DbName, Filepath, Fd, ReaderFd, Header0, Options) ->
-    Header1 = simple_upgrade_record(Header0, #db_header{}),
-    Header =
-    case element(2, Header1) of
-    1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-    2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-    3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
-    4 -> Header1#db_header{security_ptr = nil}; % 0.10 and pre 0.11
-    5 -> Header1; % pre 1.2
-    ?LATEST_DISK_VERSION -> Header1;
-    _ -> throw({database_disk_version_error, "Incorrect disk header version"})
-    end,
-
-    {ok, FsyncOptions} = couch_util:parse_term(
-            couch_config:get("couchdb", "fsync_options",
-                    "[before_header, after_header, on_file_open]")),
-
-    case lists:member(on_file_open, FsyncOptions) of
-    true -> ok = couch_file:sync(Fd);
-    _ -> ok
-    end,
-
-    Compression = couch_compress:get_compression_method(),
-
-    {ok, IdBtree} = couch_btree:open(Header#db_header.fulldocinfo_by_id_btree_state, Fd,
-        [{split, fun(X) -> btree_by_id_split(X) end},
-        {join, fun(X,Y) -> btree_by_id_join(X,Y) end},
-        {reduce, fun(X,Y) -> btree_by_id_reduce(X,Y) end},
-        {compression, Compression}]),
-    {ok, SeqBtree} = couch_btree:open(Header#db_header.docinfo_by_seq_btree_state, Fd,
-            [{split, fun(X) -> btree_by_seq_split(X) end},
-            {join, fun(X,Y) -> btree_by_seq_join(X,Y) end},
-            {reduce, fun(X,Y) -> btree_by_seq_reduce(X,Y) end},
-            {compression, Compression}]),
-    {ok, LocalDocsBtree} = couch_btree:open(Header#db_header.local_docs_btree_state, Fd,
-        [{compression, Compression}]),
-    case Header#db_header.security_ptr of
-    nil ->
-        Security = [],
-        SecurityPtr = nil;
-    SecurityPtr ->
-        {ok, Security} = couch_file:pread_term(Fd, SecurityPtr)
-    end,
-    % convert start time tuple to microsecs and store as a binary string
-    {MegaSecs, Secs, MicroSecs} = now(),
-    StartTime = ?l2b(io_lib:format("~p",
-            [(MegaSecs*1000000*1000000) + (Secs*1000000) + MicroSecs])),
-    {ok, RefCntr} = couch_ref_counter:start([Fd, ReaderFd]),
-    #db{
-        update_pid=self(),
-        fd = ReaderFd,
-        updater_fd = Fd,
-        fd_ref_counter = RefCntr,
-        header=Header,
-        fulldocinfo_by_id_btree = IdBtree,
-        docinfo_by_seq_btree = SeqBtree,
-        local_docs_btree = LocalDocsBtree,
-        committed_update_seq = Header#db_header.update_seq,
-        update_seq = Header#db_header.update_seq,
-        name = DbName,
-        filepath = Filepath,
-        security = Security,
-        security_ptr = SecurityPtr,
-        instance_start_time = StartTime,
-        revs_limit = Header#db_header.revs_limit,
-        fsync_options = FsyncOptions,
-        options = Options,
-        compression = Compression,
-        before_doc_update = couch_util:get_value(before_doc_update, Options, nil),
-        after_doc_read = couch_util:get_value(after_doc_read, Options, nil)
-        }.
-
-open_reader_fd(Filepath, Options) ->
-    {ok, Fd} = case lists:member(sys_db, Options) of
-    true ->
-        couch_file:open(Filepath, [read_only, sys_db]);
-    false ->
-        couch_file:open(Filepath, [read_only])
-    end,
-    unlink(Fd),
-    Fd.
-
-close_db(#db{fd_ref_counter = RefCntr}) ->
-    couch_ref_counter:drop(RefCntr).
-
-
-refresh_validate_doc_funs(Db0) ->
-    Db = Db0#db{user_ctx = #user_ctx{roles=[<<"_admin">>]}},
-    DesignDocs = couch_db:get_design_docs(Db),
-    ProcessDocFuns = lists:flatmap(
-        fun(DesignDocInfo) ->
-            {ok, DesignDoc} = couch_db:open_doc_int(
-                Db, DesignDocInfo, [ejson_body]),
-            case couch_doc:get_validate_doc_fun(DesignDoc) of
-            nil -> [];
-            Fun -> [Fun]
-            end
-        end, DesignDocs),
-    Db0#db{validate_doc_funs=ProcessDocFuns}.
-
-% rev tree functions
-
-flush_trees(_Db, [], AccFlushedTrees) ->
-    {ok, lists:reverse(AccFlushedTrees)};
-flush_trees(#db{updater_fd = Fd} = Db,
-        [InfoUnflushed | RestUnflushed], AccFlushed) ->
-    #full_doc_info{update_seq=UpdateSeq, rev_tree=Unflushed} = InfoUnflushed,
-    {Flushed, LeafsSize} = couch_key_tree:mapfold(
-        fun(_Rev, Value, Type, Acc) ->
-            case Value of
-            #doc{deleted = IsDeleted, body = {summary, Summary, AttsFd}} ->
-                % this node value is actually an unwritten document summary,
-                % write to disk.
-                % make sure the Fd in the written bins is the same Fd we are
-                % and convert bins, removing the FD.
-                % All bins should have been written to disk already.
-                case {AttsFd, Fd} of
-                {nil, _} ->
-                    ok;
-                {SameFd, SameFd} ->
-                    ok;
-                _ ->
-                    % Fd where the attachments were written to is not the same
-                    % as our Fd. This can happen when a database is being
-                    % switched out during a compaction.
-                    ?LOG_DEBUG("File where the attachments are written has"
-                            " changed. Possibly retrying.", []),
-                    throw(retry)
-                end,
-                {ok, NewSummaryPointer, SummarySize} =
-                    couch_file:append_raw_chunk(Fd, Summary),
-                TotalSize = lists:foldl(
-                    fun(#att{att_len = L}, A) -> A + L end,
-                    SummarySize, Value#doc.atts),
-                NewValue = {IsDeleted, NewSummaryPointer, UpdateSeq, TotalSize},
-                case Type of
-                leaf ->
-                    {NewValue, Acc + TotalSize};
-                branch ->
-                    {NewValue, Acc}
-                end;
-             {_, _, _, LeafSize} when Type =:= leaf, LeafSize =/= nil ->
-                {Value, Acc + LeafSize};
-             _ ->
-                {Value, Acc}
-            end
-        end, 0, Unflushed),
-    InfoFlushed = InfoUnflushed#full_doc_info{
-        rev_tree = Flushed,
-        leafs_size = LeafsSize
-    },
-    flush_trees(Db, RestUnflushed, [InfoFlushed | AccFlushed]).
-
-
-send_result(Client, Ref, NewResult) ->
-    % used to send a result to the client
-    catch(Client ! {result, self(), {Ref, NewResult}}).
-
-merge_rev_trees(_Limit, _Merge, [], [], AccNewInfos, AccRemoveSeqs, AccSeq) ->
-    {ok, lists:reverse(AccNewInfos), AccRemoveSeqs, AccSeq};
-merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList],
-        [OldDocInfo|RestOldInfo], AccNewInfos, AccRemoveSeqs, AccSeq) ->
-    #full_doc_info{id=Id,rev_tree=OldTree,deleted=OldDeleted0,update_seq=OldSeq}
-            = OldDocInfo,
-    {NewRevTree, _} = lists:foldl(
-        fun({Client, {#doc{revs={Pos,[_Rev|PrevRevs]}}=NewDoc, Ref}}, {AccTree, OldDeleted}) ->
-            if not MergeConflicts ->
-                case couch_key_tree:merge(AccTree, couch_doc:to_path(NewDoc),
-                    Limit) of
-                {_NewTree, conflicts} when (not OldDeleted) ->
-                    send_result(Client, Ref, conflict),
-                    {AccTree, OldDeleted};
-                {NewTree, conflicts} when PrevRevs /= [] ->
-                    % Check to be sure if prev revision was specified, it's
-                    % a leaf node in the tree
-                    Leafs = couch_key_tree:get_all_leafs(AccTree),
-                    IsPrevLeaf = lists:any(fun({_, {LeafPos, [LeafRevId|_]}}) ->
-                            {LeafPos, LeafRevId} == {Pos-1, hd(PrevRevs)}
-                        end, Leafs),
-                    if IsPrevLeaf ->
-                        {NewTree, OldDeleted};
-                    true ->
-                        send_result(Client, Ref, conflict),
-                        {AccTree, OldDeleted}
-                    end;
-                {NewTree, no_conflicts} when  AccTree == NewTree ->
-                    % the tree didn't change at all
-                    % meaning we are saving a rev that's already
-                    % been editted again.
-                    if (Pos == 1) and OldDeleted ->
-                        % this means we are recreating a brand new document
-                        % into a state that already existed before.
-                        % put the rev into a subsequent edit of the deletion
-                        #doc_info{revs=[#rev_info{rev={OldPos,OldRev}}|_]} =
-                                couch_doc:to_doc_info(OldDocInfo),
-                        NewRevId = couch_db:new_revid(
-                                NewDoc#doc{revs={OldPos, [OldRev]}}),
-                        NewDoc2 = NewDoc#doc{revs={OldPos + 1, [NewRevId, OldRev]}},
-                        {NewTree2, _} = couch_key_tree:merge(AccTree,
-                                couch_doc:to_path(NewDoc2), Limit),
-                        % we changed the rev id, this tells the caller we did
-                        send_result(Client, Ref, {ok, {OldPos + 1, NewRevId}}),
-                        {NewTree2, OldDeleted};
-                    true ->
-                        send_result(Client, Ref, conflict),
-                        {AccTree, OldDeleted}
-                    end;
-                {NewTree, _} ->
-                    {NewTree, NewDoc#doc.deleted}
-                end;
-            true ->
-                {NewTree, _} = couch_key_tree:merge(AccTree,
-                            couch_doc:to_path(NewDoc), Limit),
-                {NewTree, OldDeleted}
-            end
-        end,
-        {OldTree, OldDeleted0}, NewDocs),
-    if NewRevTree == OldTree ->
-        % nothing changed
-        merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
-            AccNewInfos, AccRemoveSeqs, AccSeq);
-    true ->
-        % we have updated the document, give it a new seq #
-        NewInfo = #full_doc_info{id=Id,update_seq=AccSeq+1,rev_tree=NewRevTree},
-        RemoveSeqs = case OldSeq of
-            0 -> AccRemoveSeqs;
-            _ -> [OldSeq | AccRemoveSeqs]
-        end,
-        merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
-            [NewInfo|AccNewInfos], RemoveSeqs, AccSeq+1)
-    end.
-
-
-
-new_index_entries([], AccById, AccBySeq, AccDDocIds) ->
-    {AccById, AccBySeq, AccDDocIds};
-new_index_entries([FullDocInfo|RestInfos], AccById, AccBySeq, AccDDocIds) ->
-    #doc_info{revs=[#rev_info{deleted=Deleted}|_], id=Id} = DocInfo =
-            couch_doc:to_doc_info(FullDocInfo),
-    AccDDocIds2 = case Id of
-    <<?DESIGN_DOC_PREFIX, _/binary>> ->
-        [Id | AccDDocIds];
-    _ ->
-        AccDDocIds
-    end,
-    new_index_entries(RestInfos,
-        [FullDocInfo#full_doc_info{deleted=Deleted}|AccById],
-        [DocInfo|AccBySeq],
-        AccDDocIds2).
-
-
-stem_full_doc_infos(#db{revs_limit=Limit}, DocInfos) ->
-    [Info#full_doc_info{rev_tree=couch_key_tree:stem(Tree, Limit)} ||
-            #full_doc_info{rev_tree=Tree}=Info <- DocInfos].
-
-update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) ->
-    #db{
-        fulldocinfo_by_id_btree = DocInfoByIdBTree,
-        docinfo_by_seq_btree = DocInfoBySeqBTree,
-        update_seq = LastSeq,
-        revs_limit = RevsLimit
-        } = Db,
-    Ids = [Id || [{_Client, {#doc{id=Id}, _Ref}}|_] <- DocsList],
-    % lookup up the old documents, if they exist.
-    OldDocLookups = couch_btree:lookup(DocInfoByIdBTree, Ids),
-    OldDocInfos = lists:zipwith(
-        fun(_Id, {ok, FullDocInfo}) ->
-            FullDocInfo;
-        (Id, not_found) ->
-            #full_doc_info{id=Id}
-        end,
-        Ids, OldDocLookups),
-    % Merge the new docs into the revision trees.
-    {ok, NewFullDocInfos, RemoveSeqs, NewSeq} = merge_rev_trees(RevsLimit,
-            MergeConflicts, DocsList, OldDocInfos, [], [], LastSeq),
-
-    % All documents are now ready to write.
-
-    {ok, Db2}  = update_local_docs(Db, NonRepDocs),
-
-    % Write out the document summaries (the bodies are stored in the nodes of
-    % the trees, the attachments are already written to disk)
-    {ok, FlushedFullDocInfos} = flush_trees(Db2, NewFullDocInfos, []),
-
-    {IndexFullDocInfos, IndexDocInfos, UpdatedDDocIds} =
-            new_index_entries(FlushedFullDocInfos, [], [], []),
-
-    % and the indexes
-    {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree, IndexFullDocInfos, []),
-    {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree, IndexDocInfos, RemoveSeqs),
-
-    Db3 = Db2#db{
-        fulldocinfo_by_id_btree = DocInfoByIdBTree2,
-        docinfo_by_seq_btree = DocInfoBySeqBTree2,
-        update_seq = NewSeq},
-
-    % Check if we just updated any design documents, and update the validation
-    % funs if we did.
-    Db4 = case UpdatedDDocIds of
-    [] ->
-        Db3;
-    _ ->
-        refresh_validate_doc_funs(Db3)
-    end,
-
-    {ok, commit_data(Db4, not FullCommit), UpdatedDDocIds}.
-
-update_local_docs(Db, []) ->
-    {ok, Db};
-update_local_docs(#db{local_docs_btree=Btree}=Db, Docs) ->
-    Ids = [Id || {_Client, {#doc{id=Id}, _Ref}} <- Docs],
-    OldDocLookups = couch_btree:lookup(Btree, Ids),
-    BtreeEntries = lists:zipwith(
-        fun({Client, {#doc{id=Id,deleted=Delete,revs={0,PrevRevs},body=Body}, Ref}}, OldDocLookup) ->
-            case PrevRevs of
-            [RevStr|_] ->
-                PrevRev = list_to_integer(?b2l(RevStr));
-            [] ->
-                PrevRev = 0
-            end,
-            OldRev =
-            case OldDocLookup of
-                {ok, {_, {OldRev0, _}}} -> OldRev0;
-                not_found -> 0
-            end,
-            case OldRev == PrevRev of
-            true ->
-                case Delete of
-                    false ->
-                        send_result(Client, Ref, {ok,
-                                {0, ?l2b(integer_to_list(PrevRev + 1))}}),
-                        {update, {Id, {PrevRev + 1, Body}}};
-                    true  ->
-                        send_result(Client, Ref,
-                                {ok, {0, <<"0">>}}),
-                        {remove, Id}
-                end;
-            false ->
-                send_result(Client, Ref, conflict),
-                ignore
-            end
-        end, Docs, OldDocLookups),
-
-    BtreeIdsRemove = [Id || {remove, Id} <- BtreeEntries],
-    BtreeIdsUpdate = [{Key, Val} || {update, {Key, Val}} <- BtreeEntries],
-
-    {ok, Btree2} =
-        couch_btree:add_remove(Btree, BtreeIdsUpdate, BtreeIdsRemove),
-
-    {ok, Db#db{local_docs_btree = Btree2}}.
-
-
-commit_data(Db) ->
-    commit_data(Db, false).
-
-db_to_header(Db, Header) ->
-    Header#db_header{
-        update_seq = Db#db.update_seq,
-        docinfo_by_seq_btree_state = couch_btree:get_state(Db#db.docinfo_by_seq_btree),
-        fulldocinfo_by_id_btree_state = couch_btree:get_state(Db#db.fulldocinfo_by_id_btree),
-        local_docs_btree_state = couch_btree:get_state(Db#db.local_docs_btree),
-        security_ptr = Db#db.security_ptr,
-        revs_limit = Db#db.revs_limit}.
-
-commit_data(#db{waiting_delayed_commit=nil} = Db, true) ->
-    Db#db{waiting_delayed_commit=erlang:send_after(1000,self(),delayed_commit)};
-commit_data(Db, true) ->
-    Db;
-commit_data(Db, _) ->
-    #db{
-        updater_fd = Fd,
-        filepath = Filepath,
-        header = OldHeader,
-        fsync_options = FsyncOptions,
-        waiting_delayed_commit = Timer
-    } = Db,
-    if is_reference(Timer) -> erlang:cancel_timer(Timer); true -> ok end,
-    case db_to_header(Db, OldHeader) of
-    OldHeader ->
-        Db#db{waiting_delayed_commit=nil};
-    Header ->
-        case lists:member(before_header, FsyncOptions) of
-        true -> ok = couch_file:sync(Filepath);
-        _    -> ok
-        end,
-
-        ok = couch_file:write_header(Fd, Header),
-
-        case lists:member(after_header, FsyncOptions) of
-        true -> ok = couch_file:sync(Filepath);
-        _    -> ok
-        end,
-
-        Db#db{waiting_delayed_commit=nil,
-            header=Header,
-            committed_update_seq=Db#db.update_seq}
-    end.
-
-
-copy_doc_attachments(#db{updater_fd = SrcFd} = SrcDb, SrcSp, DestFd) ->
-    {ok, {BodyData, BinInfos0}} = couch_db:read_doc(SrcDb, SrcSp),
-    BinInfos = case BinInfos0 of
-    _ when is_binary(BinInfos0) ->
-        couch_compress:decompress(BinInfos0);
-    _ when is_list(BinInfos0) ->
-        % pre 1.2 file format
-        BinInfos0
-    end,
-    % copy the bin values
-    NewBinInfos = lists:map(
-        fun({Name, Type, BinSp, AttLen, RevPos, Md5}) ->
-            % 010 UPGRADE CODE
-            {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} =
-                couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
-            {Name, Type, NewBinSp, AttLen, AttLen, RevPos, Md5, identity};
-        ({Name, Type, BinSp, AttLen, DiskLen, RevPos, Md5, Enc1}) ->
-            {NewBinSp, AttLen, _, Md5, _IdentityMd5} =
-                couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
-            Enc = case Enc1 of
-            true ->
-                % 0110 UPGRADE CODE
-                gzip;
-            false ->
-                % 0110 UPGRADE CODE
-                identity;
-            _ ->
-                Enc1
-            end,
-            {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, Md5, Enc}
-        end, BinInfos),
-    {BodyData, NewBinInfos}.
-
-copy_docs(Db, #db{updater_fd = DestFd} = NewDb, InfoBySeq0, Retry) ->
-    % COUCHDB-968, make sure we prune duplicates during compaction
-    InfoBySeq = lists:usort(fun(#doc_info{id=A}, #doc_info{id=B}) -> A =< B end,
-        InfoBySeq0),
-    Ids = [Id || #doc_info{id=Id} <- InfoBySeq],
-    LookupResults = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids),
-
-    NewFullDocInfos1 = lists:map(
-        fun({ok, #full_doc_info{rev_tree=RevTree}=Info}) ->
-            Info#full_doc_info{rev_tree=couch_key_tree:map(
-                fun(_, _, branch) ->
-                    ?REV_MISSING;
-                (_Rev, LeafVal, leaf) ->
-                    IsDel = element(1, LeafVal),
-                    Sp = element(2, LeafVal),
-                    Seq = element(3, LeafVal),
-                    {_Body, AttsInfo} = Summary = copy_doc_attachments(
-                        Db, Sp, DestFd),
-                    SummaryChunk = make_doc_summary(NewDb, Summary),
-                    {ok, Pos, SummarySize} = couch_file:append_raw_chunk(
-                        DestFd, SummaryChunk),
-                    TotalLeafSize = lists:foldl(
-                        fun({_, _, _, AttLen, _, _, _, _}, S) -> S + AttLen end,
-                        SummarySize, AttsInfo),
-                    {IsDel, Pos, Seq, TotalLeafSize}
-                end, RevTree)}
-        end, LookupResults),
-
-    NewFullDocInfos = stem_full_doc_infos(Db, NewFullDocInfos1),
-    NewDocInfos = [couch_doc:to_doc_info(Info) || Info <- NewFullDocInfos],
-    RemoveSeqs =
-    case Retry of
-    false ->
-        [];
-    true ->
-        % We are retrying a compaction, meaning the documents we are copying may
-        % already exist in our file and must be removed from the by_seq index.
-        Existing = couch_btree:lookup(NewDb#db.fulldocinfo_by_id_btree, Ids),
-        [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing]
-    end,
-
-    {ok, DocInfoBTree} = couch_btree:add_remove(
-            NewDb#db.docinfo_by_seq_btree, NewDocInfos, RemoveSeqs),
-    {ok, FullDocInfoBTree} = couch_btree:add_remove(
-            NewDb#db.fulldocinfo_by_id_btree, NewFullDocInfos, []),
-    update_compact_task(length(NewFullDocInfos)),
-    NewDb#db{ fulldocinfo_by_id_btree=FullDocInfoBTree,
-              docinfo_by_seq_btree=DocInfoBTree}.
-
-
-
-copy_compact(Db, NewDb0, Retry) ->
-    FsyncOptions = [Op || Op <- NewDb0#db.fsync_options, Op == before_header],
-    Compression = couch_compress:get_compression_method(),
-    NewDb = NewDb0#db{fsync_options=FsyncOptions, compression=Compression},
-    TotalChanges = couch_db:count_changes_since(Db, NewDb#db.update_seq),
-    BufferSize = list_to_integer(
-        couch_config:get("database_compaction", "doc_buffer_size", "524288")),
-    CheckpointAfter = couch_util:to_integer(
-        couch_config:get("database_compaction", "checkpoint_after",
-            BufferSize * 10)),
-
-    EnumBySeqFun =
-    fun(#doc_info{high_seq=Seq}=DocInfo, _Offset,
-        {AccNewDb, AccUncopied, AccUncopiedSize, AccCopiedSize}) ->
-
-        AccUncopiedSize2 = AccUncopiedSize + ?term_size(DocInfo),
-        if AccUncopiedSize2 >= BufferSize ->
-            NewDb2 = copy_docs(
-                Db, AccNewDb, lists:reverse([DocInfo | AccUncopied]), Retry),
-            AccCopiedSize2 = AccCopiedSize + AccUncopiedSize2,
-            if AccCopiedSize2 >= CheckpointAfter ->
-                {ok, {commit_data(NewDb2#db{update_seq = Seq}), [], 0, 0}};
-            true ->
-                {ok, {NewDb2#db{update_seq = Seq}, [], 0, AccCopiedSize2}}
-            end;
-        true ->
-            {ok, {AccNewDb, [DocInfo | AccUncopied], AccUncopiedSize2,
-                AccCopiedSize}}
-        end
-    end,
-
-    TaskProps0 = [
-        {type, database_compaction},
-        {database, Db#db.name},
-        {progress, 0},
-        {changes_done, 0},
-        {total_changes, TotalChanges}
-    ],
-    case Retry and couch_task_status:is_task_added() of
-    true ->
-        couch_task_status:update([
-            {retry, true},
-            {progress, 0},
-            {changes_done, 0},
-            {total_changes, TotalChanges}
-        ]);
-    false ->
-        couch_task_status:add_task(TaskProps0),
-        couch_task_status:set_update_frequency(500)
-    end,
-
-    {ok, _, {NewDb2, Uncopied, _, _}} =
-        couch_btree:foldl(Db#db.docinfo_by_seq_btree, EnumBySeqFun,
-            {NewDb, [], 0, 0},
-            [{start_key, NewDb#db.update_seq + 1}]),
-
-    NewDb3 = copy_docs(Db, NewDb2, lists:reverse(Uncopied), Retry),
-
-    % copy misc header values
-    if NewDb3#db.security /= Db#db.security ->
-        {ok, Ptr, _} = couch_file:append_term(
-            NewDb3#db.updater_fd, Db#db.security,
-            [{compression, NewDb3#db.compression}]),
-        NewDb4 = NewDb3#db{security=Db#db.security, security_ptr=Ptr};
-    true ->
-        NewDb4 = NewDb3
-    end,
-
-    commit_data(NewDb4#db{update_seq=Db#db.update_seq}).
-
-start_copy_compact(#db{name=Name,filepath=Filepath,header=#db_header{purge_seq=PurgeSeq}}=Db) ->
-    CompactFile = Filepath ++ ".compact",
-    ?LOG_DEBUG("Compaction process spawned for db \"~s\"", [Name]),
-    case couch_file:open(CompactFile, [nologifmissing]) of
-    {ok, Fd} ->
-        Retry = true,
-        case couch_file:read_header(Fd) of
-        {ok, Header} ->
-            ok;
-        no_valid_header ->
-            ok = couch_file:write_header(Fd, Header=#db_header{})
-        end;
-    {error, enoent} ->
-        {ok, Fd} = couch_file:open(CompactFile, [create]),
-        Retry = false,
-        ok = couch_file:write_header(Fd, Header=#db_header{})
-    end,
-    ReaderFd = open_reader_fd(CompactFile, Db#db.options),
-    NewDb = init_db(Name, CompactFile, Fd, ReaderFd, Header, Db#db.options),
-    NewDb2 = if PurgeSeq > 0 ->
-        {ok, PurgedIdsRevs} = couch_db:get_last_purged(Db),
-        {ok, Pointer, _} = couch_file:append_term(
-            Fd, PurgedIdsRevs, [{compression, NewDb#db.compression}]),
-        NewDb#db{header=Header#db_header{purge_seq=PurgeSeq, purged_docs=Pointer}};
-    true ->
-        NewDb
-    end,
-    unlink(Fd),
-
-    NewDb3 = copy_compact(Db, NewDb2, Retry),
-    close_db(NewDb3),
-    case gen_server:call(
-        Db#db.update_pid, {compact_done, CompactFile}, infinity) of
-    ok ->
-        ok;
-    {retry, CurrentDb} ->
-        start_copy_compact(CurrentDb)
-    end.
-
-update_compact_task(NumChanges) ->
-    [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
-    Changes2 = Changes + NumChanges,
-    Progress = case Total of
-    0 ->
-        0;
-    _ ->
-        (Changes2 * 100) div Total
-    end,
-    couch_task_status:update([{changes_done, Changes2}, {progress, Progress}]).
-
-make_doc_summary(#db{compression = Comp}, {Body0, Atts0}) ->
-    Body = case couch_compress:is_compressed(Body0, Comp) of
-    true ->
-        Body0;
-    false ->
-        % pre 1.2 database file format
-        couch_compress:compress(Body0, Comp)
-    end,
-    Atts = case couch_compress:is_compressed(Atts0, Comp) of
-    true ->
-        Atts0;
-    false ->
-        couch_compress:compress(Atts0, Comp)
-    end,
-    SummaryBin = ?term_to_bin({Body, Atts}),
-    couch_file:assemble_file_chunk(SummaryBin, couch_util:md5(SummaryBin)).

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_doc.erl
----------------------------------------------------------------------
diff --git a/couch_doc.erl b/couch_doc.erl
deleted file mode 100644
index 4047370..0000000
--- a/couch_doc.erl
+++ /dev/null
@@ -1,650 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_doc).
-
--export([to_doc_info/1,to_doc_info_path/1,parse_rev/1,parse_revs/1,rev_to_str/1,revs_to_strs/1]).
--export([att_foldl/3,range_att_foldl/5,att_foldl_decode/3,get_validate_doc_fun/1]).
--export([from_json_obj/1,to_json_obj/2,has_stubs/1, merge_stubs/2]).
--export([validate_docid/1]).
--export([doc_from_multi_part_stream/2]).
--export([doc_to_multi_part_stream/5, len_doc_to_multi_part_stream/4]).
--export([abort_multi_part_stream/1]).
--export([to_path/1]).
--export([mp_parse_doc/2]).
--export([with_ejson_body/1]).
-
--include("couch_db.hrl").
-
--spec to_path(#doc{}) -> path().
-to_path(#doc{revs={Start, RevIds}}=Doc) ->
-    [Branch] = to_branch(Doc, lists:reverse(RevIds)),
-    {Start - length(RevIds) + 1, Branch}.
-
--spec to_branch(#doc{}, [RevId::binary()]) -> [branch()].
-to_branch(Doc, [RevId]) ->
-    [{RevId, Doc, []}];
-to_branch(Doc, [RevId | Rest]) ->
-    [{RevId, ?REV_MISSING, to_branch(Doc, Rest)}].
-
-% helpers used by to_json_obj
-to_json_rev(0, []) ->
-    [];
-to_json_rev(Start, [FirstRevId|_]) ->
-    [{<<"_rev">>, ?l2b([integer_to_list(Start),"-",revid_to_str(FirstRevId)])}].
-
-to_json_body(true, {Body}) ->
-    Body ++ [{<<"_deleted">>, true}];
-to_json_body(false, {Body}) ->
-    Body.
-
-to_json_revisions(Options, Start, RevIds) ->
-    case lists:member(revs, Options) of
-    false -> [];
-    true ->
-        [{<<"_revisions">>, {[{<<"start">>, Start},
-                {<<"ids">>, [revid_to_str(R) ||R <- RevIds]}]}}]
-    end.
-
-revid_to_str(RevId) when size(RevId) =:= 16 ->
-    ?l2b(couch_util:to_hex(RevId));
-revid_to_str(RevId) ->
-    RevId.
-
-rev_to_str({Pos, RevId}) ->
-    ?l2b([integer_to_list(Pos),"-",revid_to_str(RevId)]).
-
-
-revs_to_strs([]) ->
-    [];
-revs_to_strs([{Pos, RevId}| Rest]) ->
-    [rev_to_str({Pos, RevId}) | revs_to_strs(Rest)].
-
-to_json_meta(Meta) ->
-    lists:map(
-        fun({revs_info, Start, RevsInfo}) ->
-            {JsonRevsInfo, _Pos}  = lists:mapfoldl(
-                fun({RevId, Status}, PosAcc) ->
-                    JsonObj = {[{<<"rev">>, rev_to_str({PosAcc, RevId})},
-                        {<<"status">>, ?l2b(atom_to_list(Status))}]},
-                    {JsonObj, PosAcc - 1}
-                end, Start, RevsInfo),
-            {<<"_revs_info">>, JsonRevsInfo};
-        ({local_seq, Seq}) ->
-            {<<"_local_seq">>, Seq};
-        ({conflicts, Conflicts}) ->
-            {<<"_conflicts">>, revs_to_strs(Conflicts)};
-        ({deleted_conflicts, DConflicts}) ->
-            {<<"_deleted_conflicts">>, revs_to_strs(DConflicts)}
-        end, Meta).
-
-to_json_attachments(Attachments, Options) ->
-    to_json_attachments(
-        Attachments,
-        lists:member(attachments, Options),
-        lists:member(follows, Options),
-        lists:member(att_encoding_info, Options)
-    ).
-
-to_json_attachments([], _OutputData, _DataToFollow, _ShowEncInfo) ->
-    [];
-to_json_attachments(Atts, OutputData, DataToFollow, ShowEncInfo) ->
-    AttProps = lists:map(
-        fun(#att{disk_len=DiskLen, att_len=AttLen, encoding=Enc}=Att) ->
-            {Att#att.name, {[
-                {<<"content_type">>, Att#att.type},
-                {<<"revpos">>, Att#att.revpos}] ++
-                case Att#att.md5 of
-                    <<>> ->
-                        [];
-                    Md5 ->
-                        EncodedMd5 = base64:encode(Md5),
-                        [{<<"digest">>, <<"md5-",EncodedMd5/binary>>}]
-                end ++
-                if not OutputData orelse Att#att.data == stub ->
-                    [{<<"length">>, DiskLen}, {<<"stub">>, true}];
-                true ->
-                    if DataToFollow ->
-                        [{<<"length">>, DiskLen}, {<<"follows">>, true}];
-                    true ->
-                        AttData = case Enc of
-                        gzip ->
-                            zlib:gunzip(att_to_bin(Att));
-                        identity ->
-                            att_to_bin(Att)
-                        end,
-                        [{<<"data">>, base64:encode(AttData)}]
-                    end
-                end ++
-                    case {ShowEncInfo, Enc} of
-                    {false, _} ->
-                        [];
-                    {true, identity} ->
-                        [];
-                    {true, _} ->
-                        [
-                            {<<"encoding">>, couch_util:to_binary(Enc)},
-                            {<<"encoded_length">>, AttLen}
-                        ]
-                    end
-            }}
-        end, Atts),
-    [{<<"_attachments">>, {AttProps}}].
-
-to_json_obj(Doc, Options) ->
-    doc_to_json_obj(with_ejson_body(Doc), Options).
-
-doc_to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds},
-            meta=Meta}=Doc,Options)->
-    {[{<<"_id">>, Id}]
-        ++ to_json_rev(Start, RevIds)
-        ++ to_json_body(Del, Body)
-        ++ to_json_revisions(Options, Start, RevIds)
-        ++ to_json_meta(Meta)
-        ++ to_json_attachments(Doc#doc.atts, Options)
-    }.
-
-from_json_obj({Props}) ->
-    transfer_fields(Props, #doc{body=[]});
-
-from_json_obj(_Other) ->
-    throw({bad_request, "Document must be a JSON object"}).
-
-parse_revid(RevId) when size(RevId) =:= 32 ->
-    RevInt = erlang:list_to_integer(?b2l(RevId), 16),
-     <<RevInt:128>>;
-parse_revid(RevId) when length(RevId) =:= 32 ->
-    RevInt = erlang:list_to_integer(RevId, 16),
-     <<RevInt:128>>;
-parse_revid(RevId) when is_binary(RevId) ->
-    RevId;
-parse_revid(RevId) when is_list(RevId) ->
-    ?l2b(RevId).
-
-
-parse_rev(Rev) when is_binary(Rev) ->
-    parse_rev(?b2l(Rev));
-parse_rev(Rev) when is_list(Rev) ->
-    SplitRev = lists:splitwith(fun($-) -> false; (_) -> true end, Rev),
-    case SplitRev of
-        {Pos, [$- | RevId]} -> {list_to_integer(Pos), parse_revid(RevId)};
-        _Else -> throw({bad_request, <<"Invalid rev format">>})
-    end;
-parse_rev(_BadRev) ->
-    throw({bad_request, <<"Invalid rev format">>}).
-
-parse_revs([]) ->
-    [];
-parse_revs([Rev | Rest]) ->
-    [parse_rev(Rev) | parse_revs(Rest)].
-
-
-validate_docid(<<"">>) ->
-    throw({bad_request, <<"Document id must not be empty">>});
-validate_docid(Id) when is_binary(Id) ->
-    case couch_util:validate_utf8(Id) of
-        false -> throw({bad_request, <<"Document id must be valid UTF-8">>});
-        true -> ok
-    end,
-    case Id of
-    <<"_design/", _/binary>> -> ok;
-    <<"_local/", _/binary>> -> ok;
-    <<"_", _/binary>> ->
-        throw({bad_request, <<"Only reserved document ids may start with underscore.">>});
-    _Else -> ok
-    end;
-validate_docid(Id) ->
-    ?LOG_DEBUG("Document id is not a string: ~p", [Id]),
-    throw({bad_request, <<"Document id must be a string">>}).
-
-transfer_fields([], #doc{body=Fields}=Doc) ->
-    % convert fields back to json object
-    Doc#doc{body={lists:reverse(Fields)}};
-
-transfer_fields([{<<"_id">>, Id} | Rest], Doc) ->
-    validate_docid(Id),
-    transfer_fields(Rest, Doc#doc{id=Id});
-
-transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc) ->
-    {Pos, RevId} = parse_rev(Rev),
-    transfer_fields(Rest,
-            Doc#doc{revs={Pos, [RevId]}});
-
-transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc) ->
-    % we already got the rev from the _revisions
-    transfer_fields(Rest,Doc);
-
-transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) ->
-    Atts = lists:map(fun({Name, {BinProps}}) ->
-        Md5 = case couch_util:get_value(<<"digest">>, BinProps) of
-            <<"md5-",EncodedMd5/binary>> ->
-                base64:decode(EncodedMd5);
-            _ ->
-               <<>>
-        end,
-        case couch_util:get_value(<<"stub">>, BinProps) of
-        true ->
-            Type = couch_util:get_value(<<"content_type">>, BinProps),
-            RevPos = couch_util:get_value(<<"revpos">>, BinProps, nil),
-            DiskLen = couch_util:get_value(<<"length">>, BinProps),
-            {Enc, EncLen} = att_encoding_info(BinProps),
-            #att{name=Name, data=stub, type=Type, att_len=EncLen,
-                disk_len=DiskLen, encoding=Enc, revpos=RevPos, md5=Md5};
-        _ ->
-            Type = couch_util:get_value(<<"content_type">>, BinProps,
-                    ?DEFAULT_ATTACHMENT_CONTENT_TYPE),
-            RevPos = couch_util:get_value(<<"revpos">>, BinProps, 0),
-            case couch_util:get_value(<<"follows">>, BinProps) of
-            true ->
-                DiskLen = couch_util:get_value(<<"length">>, BinProps),
-                {Enc, EncLen} = att_encoding_info(BinProps),
-                #att{name=Name, data=follows, type=Type, encoding=Enc,
-                    att_len=EncLen, disk_len=DiskLen, revpos=RevPos, md5=Md5};
-            _ ->
-                Value = couch_util:get_value(<<"data">>, BinProps),
-                Bin = base64:decode(Value),
-                LenBin = size(Bin),
-                #att{name=Name, data=Bin, type=Type, att_len=LenBin,
-                        disk_len=LenBin, revpos=RevPos}
-            end
-        end
-    end, JsonBins),
-    transfer_fields(Rest, Doc#doc{atts=Atts});
-
-transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) ->
-    RevIds = couch_util:get_value(<<"ids">>, Props),
-    Start = couch_util:get_value(<<"start">>, Props),
-    if not is_integer(Start) ->
-        throw({doc_validation, "_revisions.start isn't an integer."});
-    not is_list(RevIds) ->
-        throw({doc_validation, "_revisions.ids isn't a array."});
-    true ->
-        ok
-    end,
-    [throw({doc_validation, "RevId isn't a string"}) ||
-            RevId <- RevIds, not is_binary(RevId)],
-    RevIds2 = [parse_revid(RevId) || RevId <- RevIds],
-    transfer_fields(Rest, Doc#doc{revs={Start, RevIds2}});
-
-transfer_fields([{<<"_deleted">>, B} | Rest], Doc) when is_boolean(B) ->
-    transfer_fields(Rest, Doc#doc{deleted=B});
-
-% ignored fields
-transfer_fields([{<<"_revs_info">>, _} | Rest], Doc) ->
-    transfer_fields(Rest, Doc);
-transfer_fields([{<<"_local_seq">>, _} | Rest], Doc) ->
-    transfer_fields(Rest, Doc);
-transfer_fields([{<<"_conflicts">>, _} | Rest], Doc) ->
-    transfer_fields(Rest, Doc);
-transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc) ->
-    transfer_fields(Rest, Doc);
-
-% special fields for replication documents
-transfer_fields([{<<"_replication_state">>, _} = Field | Rest],
-    #doc{body=Fields} = Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
-transfer_fields([{<<"_replication_state_time">>, _} = Field | Rest],
-    #doc{body=Fields} = Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
-transfer_fields([{<<"_replication_state_reason">>, _} = Field | Rest],
-    #doc{body=Fields} = Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
-transfer_fields([{<<"_replication_id">>, _} = Field | Rest],
-    #doc{body=Fields} = Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
-transfer_fields([{<<"_replication_stats">>, _} = Field | Rest],
-    #doc{body=Fields} = Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
-
-% unknown special field
-transfer_fields([{<<"_",Name/binary>>, _} | _], _) ->
-    throw({doc_validation,
-            ?l2b(io_lib:format("Bad special document member: _~s", [Name]))});
-
-transfer_fields([Field | Rest], #doc{body=Fields}=Doc) ->
-    transfer_fields(Rest, Doc#doc{body=[Field|Fields]}).
-
-att_encoding_info(BinProps) ->
-    DiskLen = couch_util:get_value(<<"length">>, BinProps),
-    case couch_util:get_value(<<"encoding">>, BinProps) of
-    undefined ->
-        {identity, DiskLen};
-    Enc ->
-        EncodedLen = couch_util:get_value(<<"encoded_length">>, BinProps, DiskLen),
-        {list_to_existing_atom(?b2l(Enc)), EncodedLen}
-    end.
-
-to_doc_info(FullDocInfo) ->
-    {DocInfo, _Path} = to_doc_info_path(FullDocInfo),
-    DocInfo.
-
-max_seq(Tree, UpdateSeq) ->
-    FoldFun = fun({_Pos, _Key}, Value, _Type, MaxOldSeq) ->
-        case Value of
-            {_Deleted, _DiskPos, OldTreeSeq} ->
-                % Older versions didn't track data sizes.
-                erlang:max(MaxOldSeq, OldTreeSeq);
-            {_Deleted, _DiskPos, OldTreeSeq, _Size} ->
-                erlang:max(MaxOldSeq, OldTreeSeq);
-            _ ->
-                MaxOldSeq
-        end
-    end,
-    couch_key_tree:fold(FoldFun, UpdateSeq, Tree).
-
-to_doc_info_path(#full_doc_info{id=Id,rev_tree=Tree,update_seq=Seq}) ->
-    RevInfosAndPath = [
-        {#rev_info{
-            deleted = element(1, LeafVal),
-            body_sp = element(2, LeafVal),
-            seq = element(3, LeafVal),
-            rev = {Pos, RevId}
-        }, Path} || {LeafVal, {Pos, [RevId | _]} = Path} <-
-            couch_key_tree:get_all_leafs(Tree)
-    ],
-    SortedRevInfosAndPath = lists:sort(
-            fun({#rev_info{deleted=DeletedA,rev=RevA}, _PathA},
-                {#rev_info{deleted=DeletedB,rev=RevB}, _PathB}) ->
-            % sort descending by {not deleted, rev}
-            {not DeletedA, RevA} > {not DeletedB, RevB}
-        end, RevInfosAndPath),
-    [{_RevInfo, WinPath}|_] = SortedRevInfosAndPath,
-    RevInfos = [RevInfo || {RevInfo, _Path} <- SortedRevInfosAndPath],
-    {#doc_info{id=Id, high_seq=max_seq(Tree, Seq), revs=RevInfos}, WinPath}.
-
-
-
-
-att_foldl(#att{data=Bin}, Fun, Acc) when is_binary(Bin) ->
-    Fun(Bin, Acc);
-att_foldl(#att{data={Fd,Sp},md5=Md5}, Fun, Acc) ->
-    couch_stream:foldl(Fd, Sp, Md5, Fun, Acc);
-att_foldl(#att{data=DataFun,att_len=Len}, Fun, Acc) when is_function(DataFun) ->
-   fold_streamed_data(DataFun, Len, Fun, Acc).
-
-range_att_foldl(#att{data={Fd,Sp}}, From, To, Fun, Acc) ->
-   couch_stream:range_foldl(Fd, Sp, From, To, Fun, Acc).
-
-att_foldl_decode(#att{data={Fd,Sp},md5=Md5,encoding=Enc}, Fun, Acc) ->
-    couch_stream:foldl_decode(Fd, Sp, Md5, Enc, Fun, Acc);
-att_foldl_decode(#att{data=Fun2,att_len=Len, encoding=identity}, Fun, Acc) ->
-       fold_streamed_data(Fun2, Len, Fun, Acc).
-
-att_to_bin(#att{data=Bin}) when is_binary(Bin) ->
-    Bin;
-att_to_bin(#att{data=Iolist}) when is_list(Iolist) ->
-    iolist_to_binary(Iolist);
-att_to_bin(#att{data={_Fd,_Sp}}=Att) ->
-    iolist_to_binary(
-        lists:reverse(att_foldl(
-                Att,
-                fun(Bin,Acc) -> [Bin|Acc] end,
-                []
-        ))
-    );
-att_to_bin(#att{data=DataFun, att_len=Len}) when is_function(DataFun)->
-    iolist_to_binary(
-        lists:reverse(fold_streamed_data(
-            DataFun,
-            Len,
-            fun(Data, Acc) -> [Data | Acc] end,
-            []
-        ))
-    ).
-
-get_validate_doc_fun(#doc{body={Props}}=DDoc) ->
-    case couch_util:get_value(<<"validate_doc_update">>, Props) of
-    undefined ->
-        nil;
-    _Else ->
-        fun(EditDoc, DiskDoc, Ctx, SecObj) ->
-            couch_query_servers:validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj)
-        end
-    end.
-
-
-has_stubs(#doc{atts=Atts}) ->
-    has_stubs(Atts);
-has_stubs([]) ->
-    false;
-has_stubs([#att{data=stub}|_]) ->
-    true;
-has_stubs([_Att|Rest]) ->
-    has_stubs(Rest).
-
-merge_stubs(#doc{id = Id}, nil) ->
-    throw({missing_stub, <<"Previous revision missing for document ", Id/binary>>});
-merge_stubs(#doc{id=Id,atts=MemBins}=StubsDoc, #doc{atts=DiskBins}) ->
-    BinDict = dict:from_list([{Name, Att} || #att{name=Name}=Att <- DiskBins]),
-    MergedBins = lists:map(
-        fun(#att{name=Name, data=stub, revpos=StubRevPos}) ->
-            case dict:find(Name, BinDict) of
-            {ok, #att{revpos=DiskRevPos}=DiskAtt}
-                    when DiskRevPos == StubRevPos orelse StubRevPos == nil ->
-                DiskAtt;
-            _ ->
-                throw({missing_stub,
-                        <<"id:", Id/binary, ", name:", Name/binary>>})
-            end;
-        (Att) ->
-            Att
-        end, MemBins),
-    StubsDoc#doc{atts= MergedBins}.
-
-fold_streamed_data(_RcvFun, 0, _Fun, Acc) ->
-    Acc;
-fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0->
-    Bin = RcvFun(),
-    ResultAcc = Fun(Bin, Acc),
-    fold_streamed_data(RcvFun, LenLeft - size(Bin), Fun, ResultAcc).
-
-len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, SendEncodedAtts) ->
-    AttsSize = lists:foldl(fun(Att, AccAttsSize) ->
-            #att{
-                data=Data,
-                name=Name,
-                att_len=AttLen,
-                disk_len=DiskLen,
-                type=Type,
-                encoding=Encoding
-            } = Att,
-            case Data of
-            stub ->
-                AccAttsSize;
-            _ ->
-                AccAttsSize +
-                4 + % "\r\n\r\n"
-                case SendEncodedAtts of
-                true ->
-                    % header
-                    length(integer_to_list(AttLen)) +
-                    AttLen;
-                _ ->
-                    % header
-                    length(integer_to_list(DiskLen)) +
-                    DiskLen
-                end +
-                4 + % "\r\n--"
-                size(Boundary) +
-
-                % attachment headers
-                % (the length of the Content-Length has already been set)
-                size(Name) +
-                size(Type) +
-                length("\r\nContent-Disposition: attachment; filename=\"\"") +
-                length("\r\nContent-Type: ") +
-                length("\r\nContent-Length: ") +
-                case Encoding of
-                identity ->
-                    0;
-                 _ ->
-                    length(atom_to_list(Encoding)) +
-                    length("\r\nContent-Encoding: ")
-                end
-            end
-        end, 0, Atts),
-    if AttsSize == 0 ->
-        {<<"application/json">>, iolist_size(JsonBytes)};
-    true ->
-        {<<"multipart/related; boundary=\"", Boundary/binary, "\"">>,
-            2 + % "--"
-            size(Boundary) +
-            36 + % "\r\ncontent-type: application/json\r\n\r\n"
-            iolist_size(JsonBytes) +
-            4 + % "\r\n--"
-            size(Boundary) +
-            + AttsSize +
-            2 % "--"
-            }
-    end.
-
-doc_to_multi_part_stream(Boundary, JsonBytes, Atts, WriteFun,
-    SendEncodedAtts) ->
-    case lists:any(fun(#att{data=Data})-> Data /= stub end, Atts) of
-    true ->
-        WriteFun([<<"--", Boundary/binary,
-                "\r\nContent-Type: application/json\r\n\r\n">>,
-                JsonBytes, <<"\r\n--", Boundary/binary>>]),
-        atts_to_mp(Atts, Boundary, WriteFun, SendEncodedAtts);
-    false ->
-        WriteFun(JsonBytes)
-    end.
-
-atts_to_mp([], _Boundary, WriteFun, _SendEncAtts) ->
-    WriteFun(<<"--">>);
-atts_to_mp([#att{data=stub} | RestAtts], Boundary, WriteFun,
-        SendEncodedAtts) ->
-    atts_to_mp(RestAtts, Boundary, WriteFun, SendEncodedAtts);
-atts_to_mp([Att | RestAtts], Boundary, WriteFun,
-        SendEncodedAtts)  ->
-    #att{
-        name=Name,
-        att_len=AttLen,
-        disk_len=DiskLen,
-        type=Type,
-        encoding=Encoding
-    } = Att,
-
-    % write headers
-    LengthBin = case SendEncodedAtts of
-    true -> list_to_binary(integer_to_list(AttLen));
-    false -> list_to_binary(integer_to_list(DiskLen))
-    end,
-    WriteFun(<<"\r\nContent-Disposition: attachment; filename=\"", Name/binary, "\"">>),
-    WriteFun(<<"\r\nContent-Type: ", Type/binary>>),
-    WriteFun(<<"\r\nContent-Length: ", LengthBin/binary>>),
-    case Encoding of
-    identity ->
-        ok;
-    _ ->
-        EncodingBin = atom_to_binary(Encoding, latin1),
-        WriteFun(<<"\r\nContent-Encoding: ", EncodingBin/binary>>)
-    end,
-
-    % write data
-    WriteFun(<<"\r\n\r\n">>),
-    AttFun = case SendEncodedAtts of
-    false ->
-        fun att_foldl_decode/3;
-    true ->
-        fun att_foldl/3
-    end,
-    AttFun(Att, fun(Data, _) -> WriteFun(Data) end, ok),
-    WriteFun(<<"\r\n--", Boundary/binary>>),
-    atts_to_mp(RestAtts, Boundary, WriteFun, SendEncodedAtts).
-
-
-doc_from_multi_part_stream(ContentType, DataFun) ->
-    Parent = self(),
-    Parser = spawn_link(fun() ->
-        {<<"--",_/binary>>, _, _} = couch_httpd:parse_multipart_request(
-            ContentType, DataFun,
-            fun(Next) -> mp_parse_doc(Next, []) end),
-        unlink(Parent),
-        Parent ! {self(), finished}
-        end),
-    Ref = make_ref(),
-    Parser ! {get_doc_bytes, Ref, self()},
-    receive
-    {doc_bytes, Ref, DocBytes} ->
-        Doc = from_json_obj(?JSON_DECODE(DocBytes)),
-        % go through the attachments looking for 'follows' in the data,
-        % replace with function that reads the data from MIME stream.
-        ReadAttachmentDataFun = fun() ->
-            Parser ! {get_bytes, Ref, self()},
-            receive {bytes, Ref, Bytes} -> Bytes end
-        end,
-        Atts2 = lists:map(
-            fun(#att{data=follows}=A) ->
-                A#att{data=ReadAttachmentDataFun};
-            (A) ->
-                A
-            end, Doc#doc.atts),
-        WaitFun = fun() ->
-            receive {Parser, finished} -> ok end,
-            erlang:put(mochiweb_request_recv, true)
-        end,
-        {ok, Doc#doc{atts=Atts2}, WaitFun, Parser}
-    end.
-
-mp_parse_doc({headers, H}, []) ->
-    case couch_util:get_value("content-type", H) of
-    {"application/json", _} ->
-        fun (Next) ->
-            mp_parse_doc(Next, [])
-        end
-    end;
-mp_parse_doc({body, Bytes}, AccBytes) ->
-    fun (Next) ->
-        mp_parse_doc(Next, [Bytes | AccBytes])
-    end;
-mp_parse_doc(body_end, AccBytes) ->
-    receive {get_doc_bytes, Ref, From} ->
-        From ! {doc_bytes, Ref, lists:reverse(AccBytes)}
-    end,
-    fun mp_parse_atts/1.
-
-mp_parse_atts(eof) ->
-    ok;
-mp_parse_atts({headers, _H}) ->
-    fun mp_parse_atts/1;
-mp_parse_atts({body, Bytes}) ->
-    receive {get_bytes, Ref, From} ->
-        From ! {bytes, Ref, Bytes}
-    end,
-    fun mp_parse_atts/1;
-mp_parse_atts(body_end) ->
-    fun mp_parse_atts/1.
-
-
-abort_multi_part_stream(Parser) ->
-    abort_multi_part_stream(Parser, erlang:monitor(process, Parser)).
-
-abort_multi_part_stream(Parser, MonRef) ->
-    case is_process_alive(Parser) of
-    true ->
-        Parser ! {get_bytes, nil, self()},
-        receive
-        {bytes, nil, _Bytes} ->
-             abort_multi_part_stream(Parser, MonRef);
-        {'DOWN', MonRef, _, _, _} ->
-             ok
-        end;
-    false ->
-        erlang:demonitor(MonRef, [flush])
-    end.
-
-
-with_ejson_body(#doc{body = Body} = Doc) when is_binary(Body) ->
-    Doc#doc{body = couch_compress:decompress(Body)};
-with_ejson_body(#doc{body = {_}} = Doc) ->
-    Doc.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_drv.erl
----------------------------------------------------------------------
diff --git a/couch_drv.erl b/couch_drv.erl
deleted file mode 100644
index 18d1699..0000000
--- a/couch_drv.erl
+++ /dev/null
@@ -1,62 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_drv).
--behaviour(gen_server).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
-    code_change/3]).
-
--export([start_link/0]).
-
--include("couch_db.hrl").
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-init([]) ->
-    LibDir = util_driver_dir(),
-    case erl_ddll:load(LibDir, "couch_icu_driver") of
-    ok ->
-        {ok, nil};
-    {error, already_loaded} ->
-        ?LOG_INFO("~p reloading couch_icu_driver", [?MODULE]),
-        ok = erl_ddll:reload(LibDir, "couch_icu_driver"),
-        {ok, nil};
-    {error, Error} ->
-        {stop, erl_ddll:format_error(Error)}
-    end.
-
-handle_call(_Request, _From, State) ->
-    {reply, ok, State}.
-
-handle_cast(_Request, State) ->
-    {noreply, State}.
-
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-code_change(_OldVsn, State, _Extra) ->
-
-    {ok, State}.
-
-
-% private API
-util_driver_dir() ->
-    case couch_config:get("couchdb", "util_driver_dir", null) of
-    null ->
-        filename:join(couch_util:priv_dir(), "lib");
-    LibDir0 ->
-        LibDir0
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_ejson_compare.erl
----------------------------------------------------------------------
diff --git a/couch_ejson_compare.erl b/couch_ejson_compare.erl
deleted file mode 100644
index 083ff42..0000000
--- a/couch_ejson_compare.erl
+++ /dev/null
@@ -1,113 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_ejson_compare).
-
--export([less/2, less_json_ids/2, less_json/2]).
-
--on_load(init/0).
-
-
-init() ->
-    LibDir = case couch_config:get("couchdb", "util_driver_dir") of
-    undefined ->
-        filename:join(couch_util:priv_dir(), "lib");
-    LibDir0 ->
-        LibDir0
-    end,
-    NumScheds = erlang:system_info(schedulers),
-    (catch erlang:load_nif(filename:join([LibDir, ?MODULE]), NumScheds)),
-    case erlang:system_info(otp_release) of
-    "R13B03" -> true;
-    _ -> ok
-    end.
-
-
-less(A, B) ->
-    try
-        less_nif(A, B)
-    catch
-    error:badarg ->
-        % Maybe the EJSON structure is too deep, fallback to Erlang land.
-        less_erl(A, B)
-    end.
-
-less_json_ids({JsonA, IdA}, {JsonB, IdB}) ->
-    case less(JsonA, JsonB) of
-    0 ->
-        IdA < IdB;
-    Result ->
-        Result < 0
-    end.
-
-less_json(A,B) ->
-    less(A, B) < 0.
-
-
-less_nif(A, B) ->
-    less_erl(A, B).
-
-
-less_erl(A,A)                                 -> 0;
-
-less_erl(A,B) when is_atom(A), is_atom(B)     -> atom_sort(A) - atom_sort(B);
-less_erl(A,_) when is_atom(A)                 -> -1;
-less_erl(_,B) when is_atom(B)                 -> 1;
-
-less_erl(A,B) when is_number(A), is_number(B) -> A - B;
-less_erl(A,_) when is_number(A)               -> -1;
-less_erl(_,B) when is_number(B)               -> 1;
-
-less_erl(A,B) when is_binary(A), is_binary(B) -> couch_util:collate(A,B);
-less_erl(A,_) when is_binary(A)               -> -1;
-less_erl(_,B) when is_binary(B)               -> 1;
-
-less_erl(A,B) when is_list(A), is_list(B)     -> less_list(A,B);
-less_erl(A,_) when is_list(A)                 -> -1;
-less_erl(_,B) when is_list(B)                 -> 1;
-
-less_erl({A},{B}) when is_list(A), is_list(B) -> less_props(A,B);
-less_erl({A},_) when is_list(A)               -> -1;
-less_erl(_,{B}) when is_list(B)               -> 1.
-
-atom_sort(null) -> 1;
-atom_sort(false) -> 2;
-atom_sort(true) -> 3.
-
-less_props([], [_|_]) ->
-    -1;
-less_props(_, []) ->
-    1;
-less_props([{AKey, AValue}|RestA], [{BKey, BValue}|RestB]) ->
-    case couch_util:collate(AKey, BKey) of
-    0 ->
-        case less_erl(AValue, BValue) of
-        0 ->
-            less_props(RestA, RestB);
-        Result ->
-            Result
-        end;
-    Result ->
-        Result
-    end.
-
-less_list([], [_|_]) ->
-    -1;
-less_list(_, []) ->
-    1;
-less_list([A|RestA], [B|RestB]) ->
-    case less_erl(A,B) of
-    0 ->
-        less_list(RestA, RestB);
-    Result ->
-        Result
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_event_sup.erl
----------------------------------------------------------------------
diff --git a/couch_event_sup.erl b/couch_event_sup.erl
deleted file mode 100644
index 07c4879..0000000
--- a/couch_event_sup.erl
+++ /dev/null
@@ -1,73 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-%% The purpose of this module is to allow event handlers to particpate in Erlang
-%% supervisor trees. It provide a monitorable process that crashes if the event
-%% handler fails. The process, when shutdown, deregisters the event handler.
-
--module(couch_event_sup).
--behaviour(gen_server).
-
--include("couch_db.hrl").
-
--export([start_link/3,start_link/4, stop/1]).
--export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3]).
-
-%
-% Instead calling the
-% ok = gen_event:add_sup_handler(error_logger, my_log, Args)
-%
-% do this:
-% {ok, LinkedPid} = couch_event_sup:start_link(error_logger, my_log, Args)
-%
-% The benefit is the event is now part of the process tree, and can be
-% started, restarted and shutdown consistently like the rest of the server
-% components.
-%
-% And now if the "event" crashes, the supervisor is notified and can restart
-% the event handler.
-%
-% Use this form to named process:
-% {ok, LinkedPid} = couch_event_sup:start_link({local, my_log}, error_logger, my_log, Args)
-%
-
-start_link(EventMgr, EventHandler, Args) ->
-    gen_server:start_link(couch_event_sup, {EventMgr, EventHandler, Args}, []).
-
-start_link(ServerName, EventMgr, EventHandler, Args) ->
-    gen_server:start_link(ServerName, couch_event_sup, {EventMgr, EventHandler, Args}, []).
-
-stop(Pid) ->
-    gen_server:cast(Pid, stop).
-
-init({EventMgr, EventHandler, Args}) ->
-    case gen_event:add_sup_handler(EventMgr, EventHandler, Args) of
-    ok ->
-        {ok, {EventMgr, EventHandler}};
-    {stop, Error} ->
-        {stop, Error}
-    end.
-
-terminate(_Reason, _State) ->
-    ok.
-
-handle_call(_Whatever, _From, State) ->
-    {ok, State}.
-
-handle_cast(stop, State) ->
-    {stop, normal, State}.
-
-handle_info({gen_event_EXIT, _Handler, Reason}, State) ->
-    {stop, Reason, State}.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.


[26/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
support static build

This change backport the static build from rcouch.

A static build of spidermonkey is the default. To build with the
libraries installed you can pass the parameter `libs=shared` to the
make.

By default couch_collate is using ICU installed on the system except if
you pass the option `icu=static` to the Make.

At anytime you can override the compilation environnement with the
following variables:

JS_CFLAGS
JS_LIBS
ICU_CFLAGS
ICU_LDFLAGS


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/9e429fd2
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/9e429fd2
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/9e429fd2

Branch: refs/heads/import-rcouch
Commit: 9e429fd27408bc6721fbd1970054649469ddfdb8
Parents: e9a8fe8
Author: benoitc <be...@apache.org>
Authored: Wed Jan 8 23:15:49 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:20 2014 -0600

----------------------------------------------------------------------
 build_spidermonkey.sh         |  251 +++
 patches/js/patch-configure    | 3508 ++++++++++++++++++++++++++++++++++++
 patches/js/patch-configure_in |   56 +
 patches/js/patch-jscntxt_h    |   10 +
 patches/js/patch-jsprf_cpp    |   11 +
 rebar.config.script           |   95 +-
 6 files changed, 3906 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/9e429fd2/build_spidermonkey.sh
----------------------------------------------------------------------
diff --git a/build_spidermonkey.sh b/build_spidermonkey.sh
new file mode 100755
index 0000000..95a15dd
--- /dev/null
+++ b/build_spidermonkey.sh
@@ -0,0 +1,251 @@
+#!/bin/sh
+
+CORE_TOP=`pwd`
+export CORE_TOP
+
+CURLBIN=`which curl`
+if ! test -n "CURLBIN"; then
+    display_error "Error: curl is required. Add it to 'PATH'"
+    exit 1
+fi
+
+GUNZIP=`which gunzip`
+UNZIP=`which unzip`
+TAR=`which tar`
+GNUMAKE=`which gmake 2>/dev/null || which make`
+PATCHES=$CORE_TOP/patches
+STATICLIBS=$CORE_TOP/.libs
+DISTDIR=$CORE_TOP/.dists
+
+# nspr sources
+NSPR_VER=4.8.8
+NSPR_DISTNAME=nspr-$NSPR_VER.tar.gz
+NSPR_SITE=http://dl.refuge.io
+
+# spidermonkey js sources
+JS_VER=185-1.0.0
+JS_REALVER=1.8.5
+JS_DISTNAME=js$JS_VER.tar.gz
+JS_SITE=http://dl.refuge.io
+JSDIR=$STATICLIBS/js-$JS_REALVER
+JS_LIBDIR=$STATICLIBS/js/lib
+JS_INCDIR=$STATICLIBS/js/include
+
+
+[ "$MACHINE" ] || MACHINE=`(uname -m) 2>/dev/null` || MACHINE="unknown"
+[ "$RELEASE" ] || RELEASE=`(uname -r) 2>/dev/null` || RELEASE="unknown"
+[ "$SYSTEM" ] || SYSTEM=`(uname -s) 2>/dev/null`  || SYSTEM="unknown"
+[ "$BUILD" ] || VERSION=`(uname -v) 2>/dev/null` || VERSION="unknown"
+
+
+CFLAGS="-g -O2 -Wall"
+LDFLAGS="-lstdc++"
+ARCH=
+ISA64=
+GNUMAKE=make
+CC=gcc
+CXX=g++
+PATCH=patch
+case "$SYSTEM" in
+    Linux)
+        ARCH=`arch 2>/dev/null`
+        ;;
+    FreeBSD|OpenBSD|NetBSD)
+        ARCH=`(uname -p) 2>/dev/null`
+        GNUMAKE=gmake
+        ;;
+    Darwin)
+        ARCH=`(uname -p) 2>/dev/null`
+        ISA64=`(sysctl -n hw.optional.x86_64) 2>/dev/null`
+        ;;
+    Solaris)
+        ARCH=`(uname -p) 2>/dev/null`
+        GNUMAKE=gmake
+        PATCH=gpatch
+        ;;
+    *)
+        ARCH="unknown"
+        ;;
+esac
+
+
+# TODO: add mirror & signature validation support
+fetch()
+{
+    TARGET=$DISTDIR/$1
+    if ! test -f $TARGET; then
+        echo "==> Fetch $1 to $TARGET"
+        $CURLBIN --progress-bar -L $2/$1 -o $TARGET
+    fi
+}
+
+build_nspr()
+{
+    NSPR_CONFIGURE_ENV=""
+    case "$SYSTEM" in
+        Linux)
+            ARCH=`arch 2>/dev/null`
+            if [ "$ARCH" = "x86_64" ]; then
+                NSPR_CONFIGURE_ENV="--enable-64bit"
+            fi
+            CFLAGS="$CFLAGS -lpthread"
+            ;;
+        FreeBSD|OpenBSD|NetBSD)
+            ARCH=`(uname -p) 2>/dev/null`
+            if [ "$ARCH" = "x86_64" ] || [ "$ARCH" = "amd64" ]; then
+                NSPR_CONFIGURE_ENV+=--enable-64bit
+            fi
+            ;;
+        Darwin)
+            if [ "$ISA64" = "1" ]; then
+                NSPR_CONFIGURE_ENV+=--enable-64bit
+            fi
+            ;;
+    esac
+
+    fetch $NSPR_DISTNAME $NSPR_SITE
+
+    # clean the build
+    rm -rf $STATICLIBS/nspr*
+
+    echo "==> build nspr"
+    cd $STATICLIBS
+    $GUNZIP -c $DISTDIR/$NSPR_DISTNAME | $TAR xf -
+
+    cd $STATICLIBS/nspr-$NSPR_VER/mozilla/nsprpub
+    ./configure --disable-debug --enable-optimize \
+        --prefix=$STATICLIBS/nsprpub $NSPR_CONFIGURE_ENV
+
+    $GNUMAKE all
+    $GNUMAKE install
+}
+
+build_js()
+{
+
+    fetch $JS_DISTNAME $JS_SITE
+
+    # clean the build
+    rm -rf $STATICLIBS/js*
+
+    mkdir -p $JS_LIBDIR
+    mkdir -p $JS_INCDIR
+
+    cd $STATICLIBS
+    $GUNZIP -c $DISTDIR/$JS_DISTNAME | $TAR -xf -
+
+    echo "==> build js"
+    cd $JSDIR/js/src
+    $PATCH -p0 -i $PATCHES/js/patch-jsprf_cpp || echo "skipping patch"
+    $PATCH -p0 -i $PATCHES/js/patch-configure || echo "skipping patch"
+
+    env CFLAGS="$CFLAGS" LDFLAGS="$LDFLAGS" \
+        CPPFLAGS="-DXP_UNIX -DJS_C_STRINGS_ARE_UTF8" \
+        ./configure --prefix=$STATICLIBS/js \
+				    --disable-debug \
+					--enable-optimize \
+					--enable-static \
+					--disable-shared-js \
+					--disable-tests \
+					--with-system-nspr \
+					--with-nspr-prefix=$STATICLIBS/nsprpub && \
+        $GNUMAKE all || exit 1
+
+    mkdir -p $JS_INCDIR/js
+    cp $JSDIR/js/src/*.h $JS_INCDIR
+    cp $JSDIR/js/src/*.tbl $JS_INCDIR
+    cp $JSDIR/js/src/libjs_static.a $JS_LIBDIR
+}
+
+
+do_setup()
+{
+    echo "==> spidermonkey (compile)"
+    mkdir -p $DISTDIR
+    mkdir -p $STATICLIBS
+}
+
+do_builddeps()
+{
+    if [ ! -f $STATICLIBS/nsprpub/lib/libnspr4.a ]; then
+        build_nspr
+    fi
+
+    if [ ! -f $STATICLIBS/js/lib/libjs_static.a ]; then
+        build_js
+    fi
+}
+
+
+clean()
+{
+    rm -rf $STATICLIBS
+    rm -rf $DISTDIR
+}
+
+
+
+usage()
+{
+    cat << EOF
+Usage: $basename [command] [OPTIONS]
+
+The $basename command compile Mozilla Spidermonkey and ICU statically
+for couch_core.
+
+Commands:
+
+    all:        build couch_core static libs
+    clean:      clean static libs
+    -?:         display usage
+
+Report bugs at <https://github.com/refuge/couch_core>.
+EOF
+}
+
+
+if [ ! "x$COUCHDB_STATIC" = "x1" ]; then
+    exit 0
+fi
+
+if [ "x$1" = "x" ]; then
+    do_setup
+    do_builddeps
+	exit 0
+fi
+
+case "$1" in
+    all)
+        shift 1
+        do_setup
+        do_builddeps
+        ;;
+    clean)
+        shift 1
+        clean
+        ;;
+    js)
+        shift 1
+        do_setup
+        build_js
+        ;;
+    nspr)
+        shift 1
+        do_setup
+        build_nspr
+        ;;
+    help|--help|-h|-?)
+        usage
+        exit 0
+        ;;
+    *)
+        echo $basename: ERROR Unknown command $arg 1>&2
+        echo 1>&2
+        usage 1>&2
+        echo "### $basename: Exitting." 1>&2
+        exit 1;
+        ;;
+esac
+
+
+exit 0


[34/41] make couch_httpd a full couch application

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch_httpd_db.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_db.erl b/src/couch_httpd_db.erl
deleted file mode 100644
index 0a7c17c..0000000
--- a/src/couch_httpd_db.erl
+++ /dev/null
@@ -1,1226 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_db).
--include("couch_db.hrl").
-
--export([handle_request/1, handle_compact_req/2, handle_design_req/2,
-    db_req/2, couch_doc_open/4,handle_changes_req/2,
-    update_doc_result_to_json/1, update_doc_result_to_json/2,
-    handle_design_info_req/3]).
-
--import(couch_httpd,
-    [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
-    start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1,
-    start_chunked_response/3, absolute_uri/2, send/2,
-    start_response_length/4, send_error/4]).
-
--record(doc_query_args, {
-    options = [],
-    rev = nil,
-    open_revs = [],
-    update_type = interactive_edit,
-    atts_since = nil
-}).
-
-% Database request handlers
-handle_request(#httpd{path_parts=[DbName|RestParts],method=Method,
-        db_url_handlers=DbUrlHandlers}=Req)->
-    case {Method, RestParts} of
-    {'PUT', []} ->
-        create_db_req(Req, DbName);
-    {'DELETE', []} ->
-        % if we get ?rev=... the user is using a faulty script where the
-        % document id is empty by accident. Let them recover safely.
-        case couch_httpd:qs_value(Req, "rev", false) of
-            false -> delete_db_req(Req, DbName);
-            _Rev -> throw({bad_request,
-                "You tried to DELETE a database with a ?=rev parameter. "
-                ++ "Did you mean to DELETE a document instead?"})
-        end;
-    {_, []} ->
-        do_db_req(Req, fun db_req/2);
-    {_, [SecondPart|_]} ->
-        Handler = couch_util:dict_find(SecondPart, DbUrlHandlers, fun db_req/2),
-        do_db_req(Req, Handler)
-    end.
-
-handle_changes_req(#httpd{method='POST'}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    handle_changes_req1(Req, Db);
-handle_changes_req(#httpd{method='GET'}=Req, Db) ->
-    handle_changes_req1(Req, Db);
-handle_changes_req(#httpd{path_parts=[_,<<"_changes">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "GET,HEAD,POST").
-
-handle_changes_req1(Req, #db{name=DbName}=Db) ->
-    AuthDbName = ?l2b(couch_config:get("couch_httpd_auth", "authentication_db")),
-    case AuthDbName of
-    DbName ->
-        % in the authentication database, _changes is admin-only.
-        ok = couch_db:check_is_admin(Db);
-    _Else ->
-        % on other databases, _changes is free for all.
-        ok
-    end,
-    handle_changes_req2(Req, Db).
-
-handle_changes_req2(Req, Db) ->
-    MakeCallback = fun(Resp) ->
-        fun({change, {ChangeProp}=Change, _}, "eventsource") ->
-            Seq = proplists:get_value(<<"seq">>, ChangeProp),
-            send_chunk(Resp, ["data: ", ?JSON_ENCODE(Change),
-                              "\n", "id: ", ?JSON_ENCODE(Seq),
-                              "\n\n"]);
-        ({change, Change, _}, "continuous") ->
-            send_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]);
-        ({change, Change, Prepend}, _) ->
-            send_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]);
-        (start, "eventsource") ->
-            ok;
-        (start, "continuous") ->
-            ok;
-        (start, _) ->
-            send_chunk(Resp, "{\"results\":[\n");
-        ({stop, _EndSeq}, "eventsource") ->
-            end_json_response(Resp);
-        ({stop, EndSeq}, "continuous") ->
-            send_chunk(
-                Resp,
-                [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]
-            ),
-            end_json_response(Resp);
-        ({stop, EndSeq}, _) ->
-            send_chunk(
-                Resp,
-                io_lib:format("\n],\n\"last_seq\":~w}\n", [EndSeq])
-            ),
-            end_json_response(Resp);
-        (timeout, _) ->
-            send_chunk(Resp, "\n")
-        end
-    end,
-    ChangesArgs = parse_changes_query(Req, Db),
-    ChangesFun = couch_changes:handle_changes(ChangesArgs, Req, Db),
-    WrapperFun = case ChangesArgs#changes_args.feed of
-    "normal" ->
-        {ok, Info} = couch_db:get_db_info(Db),
-        CurrentEtag = couch_httpd:make_etag(Info),
-        fun(FeedChangesFun) ->
-            couch_httpd:etag_respond(
-                Req,
-                CurrentEtag,
-                fun() ->
-                    {ok, Resp} = couch_httpd:start_json_response(
-                         Req, 200, [{"ETag", CurrentEtag}]
-                    ),
-                    FeedChangesFun(MakeCallback(Resp))
-                end
-            )
-        end;
-    "eventsource" ->
-        Headers = [
-            {"Content-Type", "text/event-stream"},
-            {"Cache-Control", "no-cache"}
-        ],
-        {ok, Resp} = couch_httpd:start_chunked_response(Req, 200, Headers),
-        fun(FeedChangesFun) ->
-            FeedChangesFun(MakeCallback(Resp))
-        end;
-    _ ->
-        % "longpoll" or "continuous"
-        {ok, Resp} = couch_httpd:start_json_response(Req, 200),
-        fun(FeedChangesFun) ->
-            FeedChangesFun(MakeCallback(Resp))
-        end
-    end,
-    couch_stats_collector:increment(
-        {httpd, clients_requesting_changes}
-    ),
-    try
-        WrapperFun(ChangesFun)
-    after
-    couch_stats_collector:decrement(
-        {httpd, clients_requesting_changes}
-    )
-    end.
-
-handle_compact_req(#httpd{method='POST'}=Req, Db) ->
-    case Req#httpd.path_parts of
-        [_DbName, <<"_compact">>] ->
-            ok = couch_db:check_is_admin(Db),
-            couch_httpd:validate_ctype(Req, "application/json"),
-            {ok, _} = couch_db:start_compact(Db),
-            send_json(Req, 202, {[{ok, true}]});
-        [_DbName, <<"_compact">>, DesignName | _] ->
-            DesignId = <<"_design/", DesignName/binary>>,
-            DDoc = couch_httpd_db:couch_doc_open(
-                Db, DesignId, nil, [ejson_body]
-            ),
-            couch_mrview_http:handle_compact_req(Req, Db, DDoc)
-    end;
-
-handle_compact_req(Req, _Db) ->
-    send_method_not_allowed(Req, "POST").
-
-
-handle_design_req(#httpd{
-        path_parts=[_DbName, _Design, DesignName, <<"_",_/binary>> = Action | _Rest],
-        design_url_handlers = DesignUrlHandlers
-    }=Req, Db) ->
-    case couch_db:is_system_db(Db) of
-    true ->
-        case (catch couch_db:check_is_admin(Db)) of
-        ok -> ok;
-        _ ->
-            throw({forbidden, <<"Only admins can access design document",
-                " actions for system databases.">>})
-        end;
-    false -> ok
-    end,
-
-    % load ddoc
-    DesignId = <<"_design/", DesignName/binary>>,
-    DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
-    Handler = couch_util:dict_find(Action, DesignUrlHandlers, fun(_, _, _) ->
-        throw({not_found, <<"missing handler: ", Action/binary>>})
-    end),
-    Handler(Req, Db, DDoc);
-
-handle_design_req(Req, Db) ->
-    db_req(Req, Db).
-
-handle_design_info_req(#httpd{
-            method='GET',
-            path_parts=[_DbName, _Design, DesignName, _]
-        }=Req, Db, _DDoc) ->
-    DesignId = <<"_design/", DesignName/binary>>,
-    DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
-    couch_mrview_http:handle_info_req(Req, Db, DDoc).
-
-create_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    case couch_server:create(DbName, [{user_ctx, UserCtx}]) of
-    {ok, Db} ->
-        couch_db:close(Db),
-        DbUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
-        send_json(Req, 201, [{"Location", DbUrl}], {[{ok, true}]});
-    Error ->
-        throw(Error)
-    end.
-
-delete_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    case couch_server:delete(DbName, [{user_ctx, UserCtx}]) of
-    ok ->
-        send_json(Req, 200, {[{ok, true}]});
-    Error ->
-        throw(Error)
-    end.
-
-do_db_req(#httpd{user_ctx=UserCtx,path_parts=[DbName|_]}=Req, Fun) ->
-    case couch_db:open(DbName, [{user_ctx, UserCtx}]) of
-    {ok, Db} ->
-        try
-            Fun(Req, Db)
-        after
-            catch couch_db:close(Db)
-        end;
-    Error ->
-        throw(Error)
-    end.
-
-db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) ->
-    {ok, DbInfo} = couch_db:get_db_info(Db),
-    send_json(Req, {DbInfo});
-
-db_req(#httpd{method='POST',path_parts=[_DbName]}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    Doc = couch_doc:from_json_obj(couch_httpd:json_body(Req)),
-    validate_attachment_names(Doc),
-    Doc2 = case Doc#doc.id of
-        <<"">> ->
-            Doc#doc{id=couch_uuids:new(), revs={0, []}};
-        _ ->
-            Doc
-    end,
-    DocId = Doc2#doc.id,
-    update_doc(Req, Db, DocId, Doc2);
-
-db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
-    send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    UpdateSeq = couch_db:get_update_seq(Db),
-    CommittedSeq = couch_db:get_committed_update_seq(Db),
-    {ok, StartTime} =
-    case couch_httpd:qs_value(Req, "seq") of
-    undefined ->
-        couch_db:ensure_full_commit(Db);
-    RequiredStr ->
-        RequiredSeq = list_to_integer(RequiredStr),
-        if RequiredSeq > UpdateSeq ->
-            throw({bad_request,
-                "can't do a full commit ahead of current update_seq"});
-        RequiredSeq > CommittedSeq ->
-            couch_db:ensure_full_commit(Db);
-        true ->
-            {ok, Db#db.instance_start_time}
-        end
-    end,
-    send_json(Req, 201, {[
-        {ok, true},
-        {instance_start_time, StartTime}
-    ]});
-
-db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
-    couch_stats_collector:increment({httpd, bulk_requests}),
-    couch_httpd:validate_ctype(Req, "application/json"),
-    {JsonProps} = couch_httpd:json_body_obj(Req),
-    case couch_util:get_value(<<"docs">>, JsonProps) of
-    undefined ->
-        send_error(Req, 400, <<"bad_request">>, <<"Missing JSON list of 'docs'">>);
-    DocsArray ->
-        case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
-        "true" ->
-            Options = [full_commit];
-        "false" ->
-            Options = [delay_commit];
-        _ ->
-            Options = []
-        end,
-        case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
-        true ->
-            Docs = lists:map(
-                fun({ObjProps} = JsonObj) ->
-                    Doc = couch_doc:from_json_obj(JsonObj),
-                    validate_attachment_names(Doc),
-                    Id = case Doc#doc.id of
-                        <<>> -> couch_uuids:new();
-                        Id0 -> Id0
-                    end,
-                    case couch_util:get_value(<<"_rev">>, ObjProps) of
-                    undefined ->
-                       Revs = {0, []};
-                    Rev  ->
-                        {Pos, RevId} = couch_doc:parse_rev(Rev),
-                        Revs = {Pos, [RevId]}
-                    end,
-                    Doc#doc{id=Id,revs=Revs}
-                end,
-                DocsArray),
-            Options2 =
-            case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
-            true  -> [all_or_nothing|Options];
-            _ -> Options
-            end,
-            case couch_db:update_docs(Db, Docs, Options2) of
-            {ok, Results} ->
-                % output the results
-                DocResults = lists:zipwith(fun update_doc_result_to_json/2,
-                    Docs, Results),
-                send_json(Req, 201, DocResults);
-            {aborted, Errors} ->
-                ErrorsJson =
-                    lists:map(fun update_doc_result_to_json/1, Errors),
-                send_json(Req, 417, ErrorsJson)
-            end;
-        false ->
-            Docs = lists:map(fun(JsonObj) ->
-                    Doc = couch_doc:from_json_obj(JsonObj),
-                    validate_attachment_names(Doc),
-                    Doc
-                end, DocsArray),
-            {ok, Errors} = couch_db:update_docs(Db, Docs, Options, replicated_changes),
-            ErrorsJson =
-                lists:map(fun update_doc_result_to_json/1, Errors),
-            send_json(Req, 201, ErrorsJson)
-        end
-    end;
-db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    {IdsRevs} = couch_httpd:json_body_obj(Req),
-    IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
-
-    case couch_db:purge_docs(Db, IdsRevs2) of
-    {ok, PurgeSeq, PurgedIdsRevs} ->
-        PurgedIdsRevs2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs} <- PurgedIdsRevs],
-        send_json(Req, 200, {[{<<"purge_seq">>, PurgeSeq}, {<<"purged">>, {PurgedIdsRevs2}}]});
-    Error ->
-        throw(Error)
-    end;
-
-db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
-    {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
-    JsonDocIdRevs2 = [{Id, [couch_doc:parse_rev(RevStr) || RevStr <- RevStrs]} || {Id, RevStrs} <- JsonDocIdRevs],
-    {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
-    Results2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs, _} <- Results],
-    send_json(Req, {[
-        {missing_revs, {Results2}}
-    ]});
-
-db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
-    {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
-    JsonDocIdRevs2 =
-        [{Id, couch_doc:parse_revs(RevStrs)} || {Id, RevStrs} <- JsonDocIdRevs],
-    {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
-    Results2 =
-    lists:map(fun({Id, MissingRevs, PossibleAncestors}) ->
-        {Id,
-            {[{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
-                if PossibleAncestors == [] ->
-                    [];
-                true ->
-                    [{possible_ancestors,
-                        couch_doc:revs_to_strs(PossibleAncestors)}]
-                end}}
-    end, Results),
-    send_json(Req, {Results2});
-
-db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>]}=Req, Db) ->
-    SecObj = couch_httpd:json_body(Req),
-    ok = couch_db:set_security(Db, SecObj),
-    send_json(Req, {[{<<"ok">>, true}]});
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_security">>]}=Req, Db) ->
-    send_json(Req, couch_db:get_security(Db));
-
-db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "PUT,GET");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>]}=Req,
-        Db) ->
-    Limit = couch_httpd:json_body(Req),
-   case is_integer(Limit) of
-   true ->
-       ok = couch_db:set_revs_limit(Db, Limit),
-       send_json(Req, {[{<<"ok">>, true}]});
-   false ->
-       throw({bad_request, <<"Rev limit has to be an integer">>})
-   end;
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
-    send_json(Req, couch_db:get_revs_limit(Db));
-
-db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
-    send_method_not_allowed(Req, "PUT,GET");
-
-% Special case to enable using an unencoded slash in the URL of design docs,
-% as slashes in document IDs must otherwise be URL encoded.
-db_req(#httpd{method='GET',mochi_req=MochiReq, path_parts=[DbName,<<"_design/",_/binary>>|_]}=Req, _Db) ->
-    PathFront = "/" ++ couch_httpd:quote(binary_to_list(DbName)) ++ "/",
-    [_|PathTail] = re:split(MochiReq:get(raw_path), "_design%2F",
-        [{return, list}]),
-    couch_httpd:send_redirect(Req, PathFront ++ "_design/" ++
-        mochiweb_util:join(PathTail, "_design%2F"));
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) ->
-    db_doc_req(Req, Db, <<"_design/",Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) ->
-    db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts);
-
-
-% Special case to allow for accessing local documents without %2F
-% encoding the docid. Throws out requests that don't have the second
-% path part or that specify an attachment name.
-db_req(#httpd{path_parts=[_DbName, <<"_local">>]}, _Db) ->
-    throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local/">>]}, _Db) ->
-    throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">>, Name]}=Req, Db) ->
-    db_doc_req(Req, Db, <<"_local/", Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">> | _Rest]}, _Db) ->
-    throw({bad_request, <<"_local documents do not accept attachments.">>});
-
-db_req(#httpd{path_parts=[_, DocId]}=Req, Db) ->
-    db_doc_req(Req, Db, DocId);
-
-db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
-    db_attachment_req(Req, Db, DocId, FileNameParts).
-
-db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
-    % check for the existence of the doc to handle the 404 case.
-    couch_doc_open(Db, DocId, nil, []),
-    case couch_httpd:qs_value(Req, "rev") of
-    undefined ->
-        update_doc(Req, Db, DocId,
-                couch_doc_from_req(Req, DocId, {[{<<"_deleted">>,true}]}));
-    Rev ->
-        update_doc(Req, Db, DocId,
-                couch_doc_from_req(Req, DocId,
-                    {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]}))
-    end;
-
-db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
-    #doc_query_args{
-        rev = Rev,
-        open_revs = Revs,
-        options = Options1,
-        atts_since = AttsSince
-    } = parse_doc_query(Req),
-    Options = case AttsSince of
-    nil ->
-        Options1;
-    RevList when is_list(RevList) ->
-        [{atts_since, RevList}, attachments | Options1]
-    end,
-    case Revs of
-    [] ->
-        Doc = couch_doc_open(Db, DocId, Rev, Options),
-        send_doc(Req, Doc, Options);
-    _ ->
-        {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options),
-        case MochiReq:accepts_content_type("multipart/mixed") of
-        false ->
-            {ok, Resp} = start_json_response(Req, 200),
-            send_chunk(Resp, "["),
-            % We loop through the docs. The first time through the separator
-            % is whitespace, then a comma on subsequent iterations.
-            lists:foldl(
-                fun(Result, AccSeparator) ->
-                    case Result of
-                    {ok, Doc} ->
-                        JsonDoc = couch_doc:to_json_obj(Doc, Options),
-                        Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
-                        send_chunk(Resp, AccSeparator ++ Json);
-                    {{not_found, missing}, RevId} ->
-                        RevStr = couch_doc:rev_to_str(RevId),
-                        Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
-                        send_chunk(Resp, AccSeparator ++ Json)
-                    end,
-                    "," % AccSeparator now has a comma
-                end,
-                "", Results),
-            send_chunk(Resp, "]"),
-            end_json_response(Resp);
-        true ->
-            send_docs_multipart(Req, Results, Options)
-        end
-    end;
-
-
-db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
-    couch_httpd:validate_referer(Req),
-    couch_doc:validate_docid(DocId),
-    couch_httpd:validate_ctype(Req, "multipart/form-data"),
-    Form = couch_httpd:parse_form(Req),
-    case couch_util:get_value("_doc", Form) of
-    undefined ->
-        Rev = couch_doc:parse_rev(couch_util:get_value("_rev", Form)),
-        {ok, [{ok, Doc}]} = couch_db:open_doc_revs(Db, DocId, [Rev], []);
-    Json ->
-        Doc = couch_doc_from_req(Req, DocId, ?JSON_DECODE(Json))
-    end,
-    UpdatedAtts = [
-        #att{name=validate_attachment_name(Name),
-            type=list_to_binary(ContentType),
-            data=Content} ||
-        {Name, {ContentType, _}, Content} <-
-        proplists:get_all_values("_attachments", Form)
-    ],
-    #doc{atts=OldAtts} = Doc,
-    OldAtts2 = lists:flatmap(
-        fun(#att{name=OldName}=Att) ->
-            case [1 || A <- UpdatedAtts, A#att.name == OldName] of
-            [] -> [Att]; % the attachment wasn't in the UpdatedAtts, return it
-            _ -> [] % the attachment was in the UpdatedAtts, drop it
-            end
-        end, OldAtts),
-    NewDoc = Doc#doc{
-        atts = UpdatedAtts ++ OldAtts2
-    },
-    update_doc(Req, Db, DocId, NewDoc);
-
-db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) ->
-    couch_doc:validate_docid(DocId),
-
-    case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
-    ("multipart/related;" ++ _) = ContentType ->
-        {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(
-            ContentType, fun() -> receive_request_data(Req) end),
-        Doc = couch_doc_from_req(Req, DocId, Doc0),
-        try
-            Result = update_doc(Req, Db, DocId, Doc),
-            WaitFun(),
-            Result
-        catch throw:Err ->
-            % Document rejected by a validate_doc_update function.
-            couch_doc:abort_multi_part_stream(Parser),
-            throw(Err)
-        end;
-    _Else ->
-        Body = couch_httpd:json_body(Req),
-        Doc = couch_doc_from_req(Req, DocId, Body),
-        update_doc(Req, Db, DocId, Doc)
-    end;
-
-db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
-    SourceRev =
-    case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
-        missing_rev -> nil;
-        Rev -> Rev
-    end,
-    {TargetDocId, TargetRevs} = parse_copy_destination_header(Req),
-    % open old doc
-    Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
-    % save new doc
-    update_doc(Req, Db, TargetDocId, Doc#doc{id=TargetDocId, revs=TargetRevs});
-
-db_doc_req(Req, _Db, _DocId) ->
-    send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
-
-
-send_doc(Req, Doc, Options) ->
-    case Doc#doc.meta of
-    [] ->
-        DiskEtag = couch_httpd:doc_etag(Doc),
-        % output etag only when we have no meta
-        couch_httpd:etag_respond(Req, DiskEtag, fun() ->
-            send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options)
-        end);
-    _ ->
-        send_doc_efficiently(Req, Doc, [], Options)
-    end.
-
-
-send_doc_efficiently(Req, #doc{atts=[]}=Doc, Headers, Options) ->
-        send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-send_doc_efficiently(#httpd{mochi_req = MochiReq} = Req,
-    #doc{atts = Atts} = Doc, Headers, Options) ->
-    case lists:member(attachments, Options) of
-    true ->
-        case MochiReq:accepts_content_type("multipart/related") of
-        false ->
-            send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-        true ->
-            Boundary = couch_uuids:random(),
-            JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc,
-                    [attachments, follows, att_encoding_info | Options])),
-            {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
-                    Boundary,JsonBytes, Atts, true),
-            CType = {"Content-Type", ?b2l(ContentType)},
-            {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len),
-            couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts,
-                    fun(Data) -> couch_httpd:send(Resp, Data) end, true)
-        end;
-    false ->
-        send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
-    end.
-
-send_docs_multipart(Req, Results, Options1) ->
-    OuterBoundary = couch_uuids:random(),
-    InnerBoundary = couch_uuids:random(),
-    Options = [attachments, follows, att_encoding_info | Options1],
-    CType = {"Content-Type",
-        "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
-    {ok, Resp} = start_chunked_response(Req, 200, [CType]),
-    couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
-    lists:foreach(
-        fun({ok, #doc{atts=Atts}=Doc}) ->
-            JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
-            {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
-                    InnerBoundary, JsonBytes, Atts, true),
-            couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ",
-                    ContentType/binary, "\r\n\r\n">>),
-            couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
-                    fun(Data) -> couch_httpd:send_chunk(Resp, Data)
-                    end, true),
-             couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>);
-        ({{not_found, missing}, RevId}) ->
-             RevStr = couch_doc:rev_to_str(RevId),
-             Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
-             couch_httpd:send_chunk(Resp,
-                [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
-                Json,
-                <<"\r\n--", OuterBoundary/binary>>])
-         end, Results),
-    couch_httpd:send_chunk(Resp, <<"--">>),
-    couch_httpd:last_chunk(Resp).
-
-send_ranges_multipart(Req, ContentType, Len, Att, Ranges) ->
-    Boundary = couch_uuids:random(),
-    CType = {"Content-Type",
-        "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
-    {ok, Resp} = start_chunked_response(Req, 206, [CType]),
-    couch_httpd:send_chunk(Resp, <<"--", Boundary/binary>>),
-    lists:foreach(fun({From, To}) ->
-        ContentRange = ?l2b(make_content_range(From, To, Len)),
-        couch_httpd:send_chunk(Resp,
-            <<"\r\nContent-Type: ", ContentType/binary, "\r\n",
-            "Content-Range: ", ContentRange/binary, "\r\n",
-           "\r\n">>),
-        couch_doc:range_att_foldl(Att, From, To + 1,
-            fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
-        couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
-    end, Ranges),
-    couch_httpd:send_chunk(Resp, <<"--">>),
-    couch_httpd:last_chunk(Resp),
-    {ok, Resp}.
-
-receive_request_data(Req) ->
-    receive_request_data(Req, couch_httpd:body_length(Req)).
-
-receive_request_data(Req, LenLeft) when LenLeft > 0 ->
-    Len = erlang:min(4096, LenLeft),
-    Data = couch_httpd:recv(Req, Len),
-    {Data, fun() -> receive_request_data(Req, LenLeft - iolist_size(Data)) end};
-receive_request_data(_Req, _) ->
-    throw(<<"expected more data">>).
-
-make_content_range(From, To, Len) ->
-    io_lib:format("bytes ~B-~B/~B", [From, To, Len]).
-
-update_doc_result_to_json({{Id, Rev}, Error}) ->
-        {_Code, Err, Msg} = couch_httpd:error_info(Error),
-        {[{id, Id}, {rev, couch_doc:rev_to_str(Rev)},
-            {error, Err}, {reason, Msg}]}.
-
-update_doc_result_to_json(#doc{id=DocId}, Result) ->
-    update_doc_result_to_json(DocId, Result);
-update_doc_result_to_json(DocId, {ok, NewRev}) ->
-    {[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
-update_doc_result_to_json(DocId, Error) ->
-    {_Code, ErrorStr, Reason} = couch_httpd:error_info(Error),
-    {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
-
-
-update_doc(Req, Db, DocId, #doc{deleted=false}=Doc) ->
-    Loc = absolute_uri(Req, "/" ++ ?b2l(Db#db.name) ++ "/" ++ ?b2l(DocId)),
-    update_doc(Req, Db, DocId, Doc, [{"Location", Loc}]);
-update_doc(Req, Db, DocId, Doc) ->
-    update_doc(Req, Db, DocId, Doc, []).
-
-update_doc(Req, Db, DocId, Doc, Headers) ->
-    #doc_query_args{
-        update_type = UpdateType
-    } = parse_doc_query(Req),
-    update_doc(Req, Db, DocId, Doc, Headers, UpdateType).
-
-update_doc(Req, Db, DocId, #doc{deleted=Deleted}=Doc, Headers, UpdateType) ->
-    case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
-    "true" ->
-        Options = [full_commit];
-    "false" ->
-        Options = [delay_commit];
-    _ ->
-        Options = []
-    end,
-    case couch_httpd:qs_value(Req, "batch") of
-    "ok" ->
-        % async batching
-        spawn(fun() ->
-                case catch(couch_db:update_doc(Db, Doc, Options, UpdateType)) of
-                {ok, _} -> ok;
-                Error ->
-                    ?LOG_INFO("Batch doc error (~s): ~p",[DocId, Error])
-                end
-            end),
-        send_json(Req, 202, Headers, {[
-            {ok, true},
-            {id, DocId}
-        ]});
-    _Normal ->
-        % normal
-        {ok, NewRev} = couch_db:update_doc(Db, Doc, Options, UpdateType),
-        NewRevStr = couch_doc:rev_to_str(NewRev),
-        ResponseHeaders = [{"ETag", <<"\"", NewRevStr/binary, "\"">>}] ++ Headers,
-        send_json(Req,
-            if Deleted orelse Req#httpd.method == 'DELETE' -> 200;
-            true -> 201 end,
-            ResponseHeaders, {[
-                {ok, true},
-                {id, DocId},
-                {rev, NewRevStr}]})
-    end.
-
-couch_doc_from_req(Req, DocId, #doc{revs=Revs}=Doc) ->
-    validate_attachment_names(Doc),
-    Rev = case couch_httpd:qs_value(Req, "rev") of
-    undefined ->
-        undefined;
-    QSRev ->
-        couch_doc:parse_rev(QSRev)
-    end,
-    Revs2 =
-    case Revs of
-    {Start, [RevId|_]} ->
-        if Rev /= undefined andalso Rev /= {Start, RevId} ->
-            throw({bad_request, "Document rev from request body and query "
-                   "string have different values"});
-        true ->
-            case extract_header_rev(Req, {Start, RevId}) of
-            missing_rev -> {0, []};
-            _ -> Revs
-            end
-        end;
-    _ ->
-        case extract_header_rev(Req, Rev) of
-        missing_rev -> {0, []};
-        {Pos, RevId2} -> {Pos, [RevId2]}
-        end
-    end,
-    Doc#doc{id=DocId, revs=Revs2};
-couch_doc_from_req(Req, DocId, Json) ->
-    couch_doc_from_req(Req, DocId, couch_doc:from_json_obj(Json)).
-
-% Useful for debugging
-% couch_doc_open(Db, DocId) ->
-%   couch_doc_open(Db, DocId, nil, []).
-
-couch_doc_open(Db, DocId, Rev, Options) ->
-    case Rev of
-    nil -> % open most recent rev
-        case couch_db:open_doc(Db, DocId, Options) of
-        {ok, Doc} ->
-            Doc;
-         Error ->
-             throw(Error)
-         end;
-  _ -> % open a specific rev (deletions come back as stubs)
-      case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of
-          {ok, [{ok, Doc}]} ->
-              Doc;
-          {ok, [{{not_found, missing}, Rev}]} ->
-              throw(not_found);
-          {ok, [Else]} ->
-              throw(Else)
-      end
-  end.
-
-% Attachment request handlers
-
-db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNameParts) ->
-    FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1, FileNameParts),"/")),
-    #doc_query_args{
-        rev=Rev,
-        options=Options
-    } = parse_doc_query(Req),
-    #doc{
-        atts=Atts
-    } = Doc = couch_doc_open(Db, DocId, Rev, Options),
-    case [A || A <- Atts, A#att.name == FileName] of
-    [] ->
-        throw({not_found, "Document is missing attachment"});
-    [#att{type=Type, encoding=Enc, disk_len=DiskLen, att_len=AttLen}=Att] ->
-        Etag = case Att#att.md5 of
-            <<>> -> couch_httpd:doc_etag(Doc);
-            Md5 -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\""
-        end,
-        ReqAcceptsAttEnc = lists:member(
-           atom_to_list(Enc),
-           couch_httpd:accepted_encodings(Req)
-        ),
-        Len = case {Enc, ReqAcceptsAttEnc} of
-        {identity, _} ->
-            % stored and served in identity form
-            DiskLen;
-        {_, false} when DiskLen =/= AttLen ->
-            % Stored encoded, but client doesn't accept the encoding we used,
-            % so we need to decode on the fly.  DiskLen is the identity length
-            % of the attachment.
-            DiskLen;
-        {_, true} ->
-            % Stored and served encoded.  AttLen is the encoded length.
-            AttLen;
-        _ ->
-            % We received an encoded attachment and stored it as such, so we
-            % don't know the identity length.  The client doesn't accept the
-            % encoding, and since we cannot serve a correct Content-Length
-            % header we'll fall back to a chunked response.
-            undefined
-        end,
-        Headers = [
-            {"ETag", Etag},
-            {"Cache-Control", "must-revalidate"},
-            {"Content-Type", binary_to_list(Type)}
-        ] ++ case ReqAcceptsAttEnc of
-        true when Enc =/= identity ->
-            % RFC 2616 says that the 'identify' encoding should not be used in
-            % the Content-Encoding header
-            [{"Content-Encoding", atom_to_list(Enc)}];
-        _ ->
-            []
-        end ++ case Enc of
-            identity ->
-                [{"Accept-Ranges", "bytes"}];
-            _ ->
-                [{"Accept-Ranges", "none"}]
-        end,
-        AttFun = case ReqAcceptsAttEnc of
-        false ->
-            fun couch_doc:att_foldl_decode/3;
-        true ->
-            fun couch_doc:att_foldl/3
-        end,
-        couch_httpd:etag_respond(
-            Req,
-            Etag,
-            fun() ->
-                case Len of
-                undefined ->
-                    {ok, Resp} = start_chunked_response(Req, 200, Headers),
-                    AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
-                    last_chunk(Resp);
-                _ ->
-                    Ranges = parse_ranges(MochiReq:get(range), Len),
-                    case {Enc, Ranges} of
-                        {identity, [{From, To}]} ->
-                            Headers1 = [{"Content-Range", make_content_range(From, To, Len)}]
-                                ++ Headers,
-                            {ok, Resp} = start_response_length(Req, 206, Headers1, To - From + 1),
-                            couch_doc:range_att_foldl(Att, From, To + 1,
-                                fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp});
-                        {identity, Ranges} when is_list(Ranges) andalso length(Ranges) < 10 ->
-                            send_ranges_multipart(Req, Type, Len, Att, Ranges);
-                        _ ->
-                            Headers1 = Headers ++
-                                if Enc =:= identity orelse ReqAcceptsAttEnc =:= true ->
-                                    [{"Content-MD5", base64:encode(Att#att.md5)}];
-                                true ->
-                                    []
-                            end,
-                            {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
-                            AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
-                    end
-                end
-            end
-        )
-    end;
-
-
-db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileNameParts)
-        when (Method == 'PUT') or (Method == 'DELETE') ->
-    FileName = validate_attachment_name(
-                    mochiweb_util:join(
-                        lists:map(fun binary_to_list/1,
-                            FileNameParts),"/")),
-
-    NewAtt = case Method of
-        'DELETE' ->
-            [];
-        _ ->
-            [#att{
-                name = FileName,
-                type = case couch_httpd:header_value(Req,"Content-Type") of
-                    undefined ->
-                        % We could throw an error here or guess by the FileName.
-                        % Currently, just giving it a default.
-                        <<"application/octet-stream">>;
-                    CType ->
-                        list_to_binary(CType)
-                    end,
-                data = case couch_httpd:body_length(Req) of
-                    undefined ->
-                        <<"">>;
-                    {unknown_transfer_encoding, Unknown} ->
-                        exit({unknown_transfer_encoding, Unknown});
-                    chunked ->
-                        fun(MaxChunkSize, ChunkFun, InitState) ->
-                            couch_httpd:recv_chunked(Req, MaxChunkSize,
-                                ChunkFun, InitState)
-                        end;
-                    0 ->
-                        <<"">>;
-                    Length when is_integer(Length) ->
-                        Expect = case couch_httpd:header_value(Req, "expect") of
-                                     undefined ->
-                                         undefined;
-                                     Value when is_list(Value) ->
-                                         string:to_lower(Value)
-                                 end,
-                        case Expect of
-                            "100-continue" ->
-                                MochiReq:start_raw_response({100, gb_trees:empty()});
-                            _Else ->
-                                ok
-                        end,
-
-
-                        fun(Size) -> couch_httpd:recv(Req, Size) end
-                    end,
-                att_len = case couch_httpd:header_value(Req,"Content-Length") of
-                    undefined ->
-                        undefined;
-                    Length ->
-                        list_to_integer(Length)
-                    end,
-                md5 = get_md5_header(Req),
-                encoding = case string:to_lower(string:strip(
-                    couch_httpd:header_value(Req,"Content-Encoding","identity")
-                )) of
-                "identity" ->
-                   identity;
-                "gzip" ->
-                   gzip;
-                _ ->
-                   throw({
-                       bad_ctype,
-                       "Only gzip and identity content-encodings are supported"
-                   })
-                end
-            }]
-    end,
-
-    Doc = case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
-        missing_rev -> % make the new doc
-            couch_doc:validate_docid(DocId),
-            #doc{id=DocId};
-        Rev ->
-            case couch_db:open_doc_revs(Db, DocId, [Rev], []) of
-                {ok, [{ok, Doc0}]} -> Doc0;
-                {ok, [{{not_found, missing}, Rev}]} -> throw(conflict);
-                {ok, [Error]} -> throw(Error)
-            end
-    end,
-
-    #doc{atts=Atts} = Doc,
-    DocEdited = Doc#doc{
-        atts = NewAtt ++ [A || A <- Atts, A#att.name /= FileName]
-    },
-
-    Headers = case Method of
-    'DELETE' ->
-        [];
-    _ ->
-        [{"Location", absolute_uri(Req, "/" ++
-            ?b2l(Db#db.name) ++ "/" ++
-            ?b2l(DocId) ++ "/" ++
-            ?b2l(FileName)
-        )}]
-    end,
-    update_doc(Req, Db, DocId, DocEdited, Headers);
-
-db_attachment_req(Req, _Db, _DocId, _FileNameParts) ->
-    send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT").
-
-parse_ranges(undefined, _Len) ->
-    undefined;
-parse_ranges(fail, _Len) ->
-    undefined;
-parse_ranges(Ranges, Len) ->
-    parse_ranges(Ranges, Len, []).
-
-parse_ranges([], _Len, Acc) ->
-    lists:reverse(Acc);
-parse_ranges([{0, none}|_], _Len, _Acc) ->
-    undefined;
-parse_ranges([{From, To}|_], _Len, _Acc) when is_integer(From) andalso is_integer(To) andalso To < From ->
-    throw(requested_range_not_satisfiable);
-parse_ranges([{From, To}|Rest], Len, Acc) when is_integer(To) andalso To >= Len ->
-    parse_ranges([{From, Len-1}] ++ Rest, Len, Acc);
-parse_ranges([{none, To}|Rest], Len, Acc) ->
-    parse_ranges([{Len - To, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From, none}|Rest], Len, Acc) ->
-    parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From,To}|Rest], Len, Acc) ->
-    parse_ranges(Rest, Len, [{From, To}] ++ Acc).
-
-get_md5_header(Req) ->
-    ContentMD5 = couch_httpd:header_value(Req, "Content-MD5"),
-    Length = couch_httpd:body_length(Req),
-    Trailer = couch_httpd:header_value(Req, "Trailer"),
-    case {ContentMD5, Length, Trailer} of
-        _ when is_list(ContentMD5) orelse is_binary(ContentMD5) ->
-            base64:decode(ContentMD5);
-        {_, chunked, undefined} ->
-            <<>>;
-        {_, chunked, _} ->
-            case re:run(Trailer, "\\bContent-MD5\\b", [caseless]) of
-                {match, _} ->
-                    md5_in_footer;
-                _ ->
-                    <<>>
-            end;
-        _ ->
-            <<>>
-    end.
-
-parse_doc_query(Req) ->
-    lists:foldl(fun({Key,Value}, Args) ->
-        case {Key, Value} of
-        {"attachments", "true"} ->
-            Options = [attachments | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"meta", "true"} ->
-            Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"revs", "true"} ->
-            Options = [revs | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"local_seq", "true"} ->
-            Options = [local_seq | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"revs_info", "true"} ->
-            Options = [revs_info | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"conflicts", "true"} ->
-            Options = [conflicts | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"deleted_conflicts", "true"} ->
-            Options = [deleted_conflicts | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"rev", Rev} ->
-            Args#doc_query_args{rev=couch_doc:parse_rev(Rev)};
-        {"open_revs", "all"} ->
-            Args#doc_query_args{open_revs=all};
-        {"open_revs", RevsJsonStr} ->
-            JsonArray = ?JSON_DECODE(RevsJsonStr),
-            Args#doc_query_args{open_revs=couch_doc:parse_revs(JsonArray)};
-        {"latest", "true"} ->
-            Options = [latest | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        {"atts_since", RevsJsonStr} ->
-            JsonArray = ?JSON_DECODE(RevsJsonStr),
-            Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
-        {"new_edits", "false"} ->
-            Args#doc_query_args{update_type=replicated_changes};
-        {"new_edits", "true"} ->
-            Args#doc_query_args{update_type=interactive_edit};
-        {"att_encoding_info", "true"} ->
-            Options = [att_encoding_info | Args#doc_query_args.options],
-            Args#doc_query_args{options=Options};
-        _Else -> % unknown key value pair, ignore.
-            Args
-        end
-    end, #doc_query_args{}, couch_httpd:qs(Req)).
-
-parse_changes_query(Req, Db) ->
-    ChangesArgs = lists:foldl(fun({Key, Value}, Args) ->
-        case {string:to_lower(Key), Value} of
-        {"feed", _} ->
-            Args#changes_args{feed=Value};
-        {"descending", "true"} ->
-            Args#changes_args{dir=rev};
-        {"since", "now"} ->
-            UpdateSeq = couch_util:with_db(Db#db.name, fun(WDb) ->
-                                        couch_db:get_update_seq(WDb)
-                                end),
-            Args#changes_args{since=UpdateSeq};
-        {"since", _} ->
-            Args#changes_args{since=list_to_integer(Value)};
-        {"last-event-id", _} ->
-            Args#changes_args{since=list_to_integer(Value)};
-        {"limit", _} ->
-            Args#changes_args{limit=list_to_integer(Value)};
-        {"style", _} ->
-            Args#changes_args{style=list_to_existing_atom(Value)};
-        {"heartbeat", "true"} ->
-            Args#changes_args{heartbeat=true};
-        {"heartbeat", _} ->
-            Args#changes_args{heartbeat=list_to_integer(Value)};
-        {"timeout", _} ->
-            Args#changes_args{timeout=list_to_integer(Value)};
-        {"include_docs", "true"} ->
-            Args#changes_args{include_docs=true};
-        {"attachments", "true"} ->
-            Opts = Args#changes_args.doc_options,
-            Args#changes_args{doc_options=[attachments|Opts]};
-        {"att_encoding_info", "true"} ->
-            Opts = Args#changes_args.doc_options,
-            Args#changes_args{doc_options=[att_encoding_info|Opts]};
-        {"conflicts", "true"} ->
-            Args#changes_args{conflicts=true};
-        {"filter", _} ->
-            Args#changes_args{filter=Value};
-        _Else -> % unknown key value pair, ignore.
-            Args
-        end
-    end, #changes_args{}, couch_httpd:qs(Req)),
-    %% if it's an EventSource request with a Last-event-ID header
-    %% that should override the `since` query string, since it's
-    %% probably the browser reconnecting.
-    case ChangesArgs#changes_args.feed of
-        "eventsource" ->
-            case couch_httpd:header_value(Req, "last-event-id") of
-                undefined ->
-                    ChangesArgs;
-                Value ->
-                    ChangesArgs#changes_args{since=list_to_integer(Value)}
-            end;
-        _ ->
-            ChangesArgs
-    end.
-
-extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
-    extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
-extract_header_rev(Req, ExplicitRev) ->
-    Etag = case couch_httpd:header_value(Req, "If-Match") of
-        undefined -> undefined;
-        Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
-    end,
-    case {ExplicitRev, Etag} of
-    {undefined, undefined} -> missing_rev;
-    {_, undefined} -> ExplicitRev;
-    {undefined, _} -> Etag;
-    _ when ExplicitRev == Etag -> Etag;
-    _ ->
-        throw({bad_request, "Document rev and etag have different values"})
-    end.
-
-
-parse_copy_destination_header(Req) ->
-    case couch_httpd:header_value(Req, "Destination") of
-    undefined ->
-        throw({bad_request, "Destination header is mandatory for COPY."});
-    Destination ->
-        case re:run(Destination, "^https?://", [{capture, none}]) of
-        match ->
-            throw({bad_request, "Destination URL must be relative."});
-        nomatch ->
-            % see if ?rev=revid got appended to the Destination header
-            case re:run(Destination, "\\?", [{capture, none}]) of
-            nomatch ->
-                {list_to_binary(Destination), {0, []}};
-            match ->
-                [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
-                [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
-                {Pos, RevId} = couch_doc:parse_rev(Rev),
-                {list_to_binary(DocId), {Pos, [RevId]}}
-            end
-        end
-    end.
-
-validate_attachment_names(Doc) ->
-    lists:foreach(fun(#att{name=Name}) ->
-        validate_attachment_name(Name)
-    end, Doc#doc.atts).
-
-validate_attachment_name(Name) when is_list(Name) ->
-    validate_attachment_name(list_to_binary(Name));
-validate_attachment_name(<<"_",_/binary>>) ->
-    throw({bad_request, <<"Attachment name can't start with '_'">>});
-validate_attachment_name(Name) ->
-    case couch_util:validate_utf8(Name) of
-        true -> Name;
-        false -> throw({bad_request, <<"Attachment name is not UTF-8 encoded">>})
-    end.
-

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch_httpd_external.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_external.erl b/src/couch_httpd_external.erl
deleted file mode 100644
index 2036d25..0000000
--- a/src/couch_httpd_external.erl
+++ /dev/null
@@ -1,177 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_external).
-
--export([handle_external_req/2, handle_external_req/3]).
--export([send_external_response/2, json_req_obj/2, json_req_obj/3]).
--export([default_or_content_type/2, parse_external_response/1]).
-
--import(couch_httpd,[send_error/4]).
-
--include("couch_db.hrl").
-
-% handle_external_req/2
-% for the old type of config usage:
-% _external = {couch_httpd_external, handle_external_req}
-% with urls like
-% /db/_external/action/design/name
-handle_external_req(#httpd{
-                        path_parts=[_DbName, _External, UrlName | _Path]
-                    }=HttpReq, Db) ->
-    process_external_req(HttpReq, Db, UrlName);
-handle_external_req(#httpd{path_parts=[_, _]}=Req, _Db) ->
-    send_error(Req, 404, <<"external_server_error">>, <<"No server name specified.">>);
-handle_external_req(Req, _) ->
-    send_error(Req, 404, <<"external_server_error">>, <<"Broken assumption">>).
-
-% handle_external_req/3
-% for this type of config usage:
-% _action = {couch_httpd_external, handle_external_req, <<"action">>}
-% with urls like
-% /db/_action/design/name
-handle_external_req(HttpReq, Db, Name) ->
-    process_external_req(HttpReq, Db, Name).
-
-process_external_req(HttpReq, Db, Name) ->
-
-    Response = couch_external_manager:execute(binary_to_list(Name),
-        json_req_obj(HttpReq, Db)),
-
-    case Response of
-    {unknown_external_server, Msg} ->
-        send_error(HttpReq, 404, <<"external_server_error">>, Msg);
-    _ ->
-        send_external_response(HttpReq, Response)
-    end.
-json_req_obj(Req, Db) -> json_req_obj(Req, Db, null).
-json_req_obj(#httpd{mochi_req=Req,
-               method=Method,
-               requested_path_parts=RequestedPath,
-               path_parts=Path,
-               req_body=ReqBody
-            }, Db, DocId) ->
-    Body = case ReqBody of
-        undefined ->
-            MaxSize = list_to_integer(
-                couch_config:get("couchdb", "max_document_size", "4294967296")),
-            Req:recv_body(MaxSize);
-        Else -> Else
-    end,
-    ParsedForm = case Req:get_primary_header_value("content-type") of
-        "application/x-www-form-urlencoded" ++ _ ->
-            case Body of
-            undefined -> [];
-            _ -> mochiweb_util:parse_qs(Body)
-            end;
-        _ ->
-            []
-    end,
-    Headers = Req:get(headers),
-    Hlist = mochiweb_headers:to_list(Headers),
-    {ok, Info} = couch_db:get_db_info(Db),
-    
-% add headers...
-    {[{<<"info">>, {Info}},
-        {<<"id">>, DocId},
-        {<<"uuid">>, couch_uuids:new()},
-        {<<"method">>, Method},
-        {<<"requested_path">>, RequestedPath},
-        {<<"path">>, Path},
-        {<<"raw_path">>, ?l2b(Req:get(raw_path))},
-        {<<"query">>, json_query_keys(to_json_terms(Req:parse_qs()))},
-        {<<"headers">>, to_json_terms(Hlist)},
-        {<<"body">>, Body},
-        {<<"peer">>, ?l2b(Req:get(peer))},
-        {<<"form">>, to_json_terms(ParsedForm)},
-        {<<"cookie">>, to_json_terms(Req:parse_cookie())},
-        {<<"userCtx">>, couch_util:json_user_ctx(Db)},
-        {<<"secObj">>, couch_db:get_security(Db)}]}.
-
-to_json_terms(Data) ->
-    to_json_terms(Data, []).
-
-to_json_terms([], Acc) ->
-    {lists:reverse(Acc)};
-to_json_terms([{Key, Value} | Rest], Acc) when is_atom(Key) ->
-    to_json_terms(Rest, [{list_to_binary(atom_to_list(Key)), list_to_binary(Value)} | Acc]);
-to_json_terms([{Key, Value} | Rest], Acc) ->
-    to_json_terms(Rest, [{list_to_binary(Key), list_to_binary(Value)} | Acc]).
-
-json_query_keys({Json}) ->
-    json_query_keys(Json, []).
-json_query_keys([], Acc) ->
-    {lists:reverse(Acc)};
-json_query_keys([{<<"startkey">>, Value} | Rest], Acc) ->
-    json_query_keys(Rest, [{<<"startkey">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([{<<"endkey">>, Value} | Rest], Acc) ->
-    json_query_keys(Rest, [{<<"endkey">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([{<<"key">>, Value} | Rest], Acc) ->
-    json_query_keys(Rest, [{<<"key">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([Term | Rest], Acc) ->
-    json_query_keys(Rest, [Term|Acc]).
-
-send_external_response(Req, Response) ->
-    #extern_resp_args{
-        code = Code,
-        data = Data,
-        ctype = CType,
-        headers = Headers,
-        json = Json
-    } = parse_external_response(Response),
-    Headers1 = default_or_content_type(CType, Headers),
-    case Json of
-    nil ->
-        couch_httpd:send_response(Req, Code, Headers1, Data);
-    Json ->
-        couch_httpd:send_json(Req, Code, Headers1, Json)
-    end.
-
-parse_external_response({Response}) ->
-    lists:foldl(fun({Key,Value}, Args) ->
-        case {Key, Value} of
-            {"", _} ->
-                Args;
-            {<<"code">>, Value} ->
-                Args#extern_resp_args{code=Value};
-            {<<"stop">>, true} ->
-                Args#extern_resp_args{stop=true};
-            {<<"json">>, Value} ->
-                Args#extern_resp_args{
-                    json=Value,
-                    ctype="application/json"};
-            {<<"body">>, Value} ->
-                Args#extern_resp_args{data=Value, ctype="text/html; charset=utf-8"};
-            {<<"base64">>, Value} ->
-                Args#extern_resp_args{
-                    data=base64:decode(Value),
-                    ctype="application/binary"
-                };
-            {<<"headers">>, {Headers}} ->
-                NewHeaders = lists:map(fun({Header, HVal}) ->
-                    {binary_to_list(Header), binary_to_list(HVal)}
-                end, Headers),
-                Args#extern_resp_args{headers=NewHeaders};
-            _ -> % unknown key
-                Msg = lists:flatten(io_lib:format("Invalid data from external server: ~p", [{Key, Value}])),
-                throw({external_response_error, Msg})
-            end
-        end, #extern_resp_args{}, Response).
-
-default_or_content_type(DefaultContentType, Headers) ->
-    IsContentType = fun({X, _}) -> string:to_lower(X) == "content-type" end,
-    case lists:any(IsContentType, Headers) of
-    false ->
-        [{"Content-Type", DefaultContentType} | Headers];
-    true ->
-        Headers
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch_httpd_misc_handlers.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_misc_handlers.erl b/src/couch_httpd_misc_handlers.erl
deleted file mode 100644
index 96a05c6..0000000
--- a/src/couch_httpd_misc_handlers.erl
+++ /dev/null
@@ -1,318 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_misc_handlers).
-
--export([handle_welcome_req/2,handle_favicon_req/2,handle_utils_dir_req/2,
-    handle_all_dbs_req/1,handle_restart_req/1,
-    handle_uuids_req/1,handle_config_req/1,handle_log_req/1,
-    handle_task_status_req/1, handle_file_req/2]).
-
--export([increment_update_seq_req/2]).
-
-
--include("couch_db.hrl").
-
--import(couch_httpd,
-    [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
-    start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1,
-    start_chunked_response/3, send_error/4]).
-
-% httpd global handlers
-
-handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) ->
-    send_json(Req, {[
-        {couchdb, WelcomeMessage},
-        {uuid, couch_server:get_uuid()},
-        {version, list_to_binary(couch_server:get_version())}
-        ] ++ case couch_config:get("vendor") of
-        [] ->
-            [];
-        Properties ->
-            [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}]
-        end
-    });
-handle_welcome_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) ->
-    {{Year,Month,Day},Time} = erlang:universaltime(),
-    OneYearFromNow = {{Year+1,Month,Day},Time},
-    CachingHeaders = [
-        %favicon should expire a year from now
-        {"Cache-Control", "public, max-age=31536000"},
-        {"Expires", couch_util:rfc1123_date(OneYearFromNow)}
-    ],
-    couch_httpd:serve_file(Req, "favicon.ico", DocumentRoot, CachingHeaders);
-
-handle_favicon_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_file_req(#httpd{method='GET'}=Req, Document) ->
-    couch_httpd:serve_file(Req, filename:basename(Document), filename:dirname(Document));
-
-handle_file_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) ->
-    "/" ++ UrlPath = couch_httpd:path(Req),
-    case couch_httpd:partition(UrlPath) of
-    {_ActionKey, "/", RelativePath} ->
-        % GET /_utils/path or GET /_utils/
-        CachingHeaders =
-                [{"Cache-Control", "private, must-revalidate"}],
-        couch_httpd:serve_file(Req, RelativePath, DocumentRoot, CachingHeaders);
-    {_ActionKey, "", _RelativePath} ->
-        % GET /_utils
-        RedirectPath = couch_httpd:path(Req) ++ "/",
-        couch_httpd:send_redirect(Req, RedirectPath)
-    end;
-handle_utils_dir_req(Req, _) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-handle_all_dbs_req(#httpd{method='GET'}=Req) ->
-    {ok, DbNames} = couch_server:all_databases(),
-    send_json(Req, DbNames);
-handle_all_dbs_req(Req) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-
-handle_task_status_req(#httpd{method='GET'}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    % convert the list of prop lists to a list of json objects
-    send_json(Req, [{Props} || Props <- couch_task_status:all()]);
-handle_task_status_req(Req) ->
-    send_method_not_allowed(Req, "GET,HEAD").
-
-
-handle_restart_req(#httpd{method='POST'}=Req) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    ok = couch_httpd:verify_is_server_admin(Req),
-    Result = send_json(Req, 202, {[{ok, true}]}),
-    couch_server_sup:restart_core_server(),
-    Result;
-handle_restart_req(Req) ->
-    send_method_not_allowed(Req, "POST").
-
-
-handle_uuids_req(#httpd{method='GET'}=Req) ->
-    Count = list_to_integer(couch_httpd:qs_value(Req, "count", "1")),
-    UUIDs = [couch_uuids:new() || _ <- lists:seq(1, Count)],
-    Etag = couch_httpd:make_etag(UUIDs),
-    couch_httpd:etag_respond(Req, Etag, fun() ->
-        CacheBustingHeaders = [
-            {"Date", couch_util:rfc1123_date()},
-            {"Cache-Control", "no-cache"},
-            % Past date, ON PURPOSE!
-            {"Expires", "Fri, 01 Jan 1990 00:00:00 GMT"},
-            {"Pragma", "no-cache"},
-            {"ETag", Etag}
-        ],
-        send_json(Req, 200, CacheBustingHeaders, {[{<<"uuids">>, UUIDs}]})
-    end);
-handle_uuids_req(Req) ->
-    send_method_not_allowed(Req, "GET").
-
-
-% Config request handler
-
-
-% GET /_config/
-% GET /_config
-handle_config_req(#httpd{method='GET', path_parts=[_]}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
-        case dict:is_key(Section, Acc) of
-        true ->
-            dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
-        false ->
-            dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
-        end
-    end, dict:new(), couch_config:all()),
-    KVs = dict:fold(fun(Section, Values, Acc) ->
-        [{list_to_binary(Section), {Values}} | Acc]
-    end, [], Grouped),
-    send_json(Req, 200, {KVs});
-% GET /_config/Section
-handle_config_req(#httpd{method='GET', path_parts=[_,Section]}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    KVs = [{list_to_binary(Key), list_to_binary(Value)}
-            || {Key, Value} <- couch_config:get(Section)],
-    send_json(Req, 200, {KVs});
-% GET /_config/Section/Key
-handle_config_req(#httpd{method='GET', path_parts=[_, Section, Key]}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    case couch_config:get(Section, Key, null) of
-    null ->
-        throw({not_found, unknown_config_value});
-    Value ->
-        send_json(Req, 200, list_to_binary(Value))
-    end;
-% PUT or DELETE /_config/Section/Key
-handle_config_req(#httpd{method=Method, path_parts=[_, Section, Key]}=Req)
-      when (Method == 'PUT') or (Method == 'DELETE') ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    Persist = couch_httpd:header_value(Req, "X-Couch-Persist") /= "false",
-    case couch_config:get(<<"httpd">>, <<"config_whitelist">>, null) of
-        null ->
-            % No whitelist; allow all changes.
-            handle_approved_config_req(Req, Persist);
-        WhitelistValue ->
-            % Provide a failsafe to protect against inadvertently locking
-            % onesself out of the config by supplying a syntactically-incorrect
-            % Erlang term. To intentionally lock down the whitelist, supply a
-            % well-formed list which does not include the whitelist config
-            % variable itself.
-            FallbackWhitelist = [{<<"httpd">>, <<"config_whitelist">>}],
-
-            Whitelist = case couch_util:parse_term(WhitelistValue) of
-                {ok, Value} when is_list(Value) ->
-                    Value;
-                {ok, _NonListValue} ->
-                    FallbackWhitelist;
-                {error, _} ->
-                    [{WhitelistSection, WhitelistKey}] = FallbackWhitelist,
-                    ?LOG_ERROR("Only whitelisting ~s/~s due to error parsing: ~p",
-                               [WhitelistSection, WhitelistKey, WhitelistValue]),
-                    FallbackWhitelist
-            end,
-
-            IsRequestedKeyVal = fun(Element) ->
-                case Element of
-                    {A, B} ->
-                        % For readability, tuples may be used instead of binaries
-                        % in the whitelist.
-                        case {couch_util:to_binary(A), couch_util:to_binary(B)} of
-                            {Section, Key} ->
-                                true;
-                            {Section, <<"*">>} ->
-                                true;
-                            _Else ->
-                                false
-                        end;
-                    _Else ->
-                        false
-                end
-            end,
-
-            case lists:any(IsRequestedKeyVal, Whitelist) of
-                true ->
-                    % Allow modifying this whitelisted variable.
-                    handle_approved_config_req(Req, Persist);
-                _NotWhitelisted ->
-                    % Disallow modifying this non-whitelisted variable.
-                    send_error(Req, 400, <<"modification_not_allowed">>,
-                               ?l2b("This config variable is read-only"))
-            end
-    end;
-handle_config_req(Req) ->
-    send_method_not_allowed(Req, "GET,PUT,DELETE").
-
-% PUT /_config/Section/Key
-% "value"
-handle_approved_config_req(Req, Persist) ->
-    Query = couch_httpd:qs(Req),
-    UseRawValue = case lists:keyfind("raw", 1, Query) of
-    false            -> false; % Not specified
-    {"raw", ""}      -> false; % Specified with no value, i.e. "?raw" and "?raw="
-    {"raw", "false"} -> false;
-    {"raw", "true"}  -> true;
-    {"raw", InvalidValue} -> InvalidValue
-    end,
-    handle_approved_config_req(Req, Persist, UseRawValue).
-
-handle_approved_config_req(#httpd{method='PUT', path_parts=[_, Section, Key]}=Req,
-                           Persist, UseRawValue)
-        when UseRawValue =:= false orelse UseRawValue =:= true ->
-    RawValue = couch_httpd:json_body(Req),
-    Value = case UseRawValue of
-    true ->
-        % Client requests no change to the provided value.
-        RawValue;
-    false ->
-        % Pre-process the value as necessary.
-        case Section of
-        <<"admins">> ->
-            couch_passwords:hash_admin_password(RawValue);
-        _ ->
-            RawValue
-        end
-    end,
-
-    OldValue = couch_config:get(Section, Key, ""),
-    case couch_config:set(Section, Key, ?b2l(Value), Persist) of
-    ok ->
-        send_json(Req, 200, list_to_binary(OldValue));
-    Error ->
-        throw(Error)
-    end;
-
-handle_approved_config_req(#httpd{method='PUT'}=Req, _Persist, UseRawValue) ->
-    Err = io_lib:format("Bad value for 'raw' option: ~s", [UseRawValue]),
-    send_json(Req, 400, {[{error, ?l2b(Err)}]});
-
-% DELETE /_config/Section/Key
-handle_approved_config_req(#httpd{method='DELETE',path_parts=[_,Section,Key]}=Req,
-                           Persist, _UseRawValue) ->
-    case couch_config:get(Section, Key, null) of
-    null ->
-        throw({not_found, unknown_config_value});
-    OldValue ->
-        couch_config:delete(Section, Key, Persist),
-        send_json(Req, 200, list_to_binary(OldValue))
-    end.
-
-
-% httpd db handlers
-
-increment_update_seq_req(#httpd{method='POST'}=Req, Db) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    {ok, NewSeq} = couch_db:increment_update_seq(Db),
-    send_json(Req, {[{ok, true},
-        {update_seq, NewSeq}
-    ]});
-increment_update_seq_req(Req, _Db) ->
-    send_method_not_allowed(Req, "POST").
-
-% httpd log handlers
-
-handle_log_req(#httpd{method='GET'}=Req) ->
-    ok = couch_httpd:verify_is_server_admin(Req),
-    Bytes = list_to_integer(couch_httpd:qs_value(Req, "bytes", "1000")),
-    Offset = list_to_integer(couch_httpd:qs_value(Req, "offset", "0")),
-    Chunk = couch_log:read(Bytes, Offset),
-    {ok, Resp} = start_chunked_response(Req, 200, [
-        % send a plaintext response
-        {"Content-Type", "text/plain; charset=utf-8"},
-        {"Content-Length", integer_to_list(length(Chunk))}
-    ]),
-    send_chunk(Resp, Chunk),
-    last_chunk(Resp);
-handle_log_req(#httpd{method='POST'}=Req) ->
-    {PostBody} = couch_httpd:json_body_obj(Req),
-    Level = couch_util:get_value(<<"level">>, PostBody),
-    Message = ?b2l(couch_util:get_value(<<"message">>, PostBody)),
-    case Level of
-    <<"debug">> ->
-        ?LOG_DEBUG(Message, []),
-        send_json(Req, 200, {[{ok, true}]});
-    <<"info">> ->
-        ?LOG_INFO(Message, []),
-        send_json(Req, 200, {[{ok, true}]});
-    <<"error">> ->
-        ?LOG_ERROR(Message, []),
-        send_json(Req, 200, {[{ok, true}]});
-    _ ->
-        send_json(Req, 400, {[{error, ?l2b(io_lib:format("Unrecognized log level '~s'", [Level]))}]})
-    end;
-handle_log_req(Req) ->
-    send_method_not_allowed(Req, "GET,POST").

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/a278e0db/src/couch_httpd_oauth.erl
----------------------------------------------------------------------
diff --git a/src/couch_httpd_oauth.erl b/src/couch_httpd_oauth.erl
deleted file mode 100644
index 2094c08..0000000
--- a/src/couch_httpd_oauth.erl
+++ /dev/null
@@ -1,387 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License.  You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_oauth).
-
--include("couch_db.hrl").
--include("couch_js_functions.hrl").
-
--export([oauth_authentication_handler/1, handle_oauth_req/1]).
-
--define(OAUTH_DDOC_ID, <<"_design/oauth">>).
--define(OAUTH_VIEW_NAME, <<"oauth_credentials">>).
-
--record(callback_params, {
-    consumer,
-    token,
-    token_secret,
-    url,
-    signature,
-    params,
-    username
-}).
-
-% OAuth auth handler using per-node user db
-oauth_authentication_handler(Req) ->
-    serve_oauth(Req, fun oauth_auth_callback/2, true).
-
-
-oauth_auth_callback(Req, #callback_params{token_secret = undefined}) ->
-    couch_httpd:send_error(
-         Req, 400, <<"invalid_token">>, <<"Invalid OAuth token.">>);
-
-oauth_auth_callback(#httpd{mochi_req = MochiReq} = Req, CbParams) ->
-    Method = atom_to_list(MochiReq:get(method)),
-    #callback_params{
-        consumer = Consumer,
-        token_secret = TokenSecret,
-        url = Url,
-        signature = Sig,
-        params = Params,
-        username = User
-    } = CbParams,
-    case oauth:verify(Sig, Method, Url, Params, Consumer, TokenSecret) of
-    true ->
-        set_user_ctx(Req, User);
-    false ->
-        ?LOG_DEBUG("OAuth handler: signature verification failed for user `~p`~n"
-            "Received signature is `~p`~n"
-            "HTTP method is `~p`~n"
-            "URL is `~p`~n"
-            "Parameters are `~p`~n"
-            "Consumer is `~p`, token secret is `~p`~n"
-            "Expected signature was `~p`~n",
-            [User, Sig, Method, Url, Params, Consumer, TokenSecret,
-                oauth:signature(Method, Url, Params, Consumer, TokenSecret)]),
-        Req
-    end.
-
-
-% Look up the consumer key and get the roles to give the consumer
-set_user_ctx(_Req, undefined) ->
-    throw({bad_request, unknown_oauth_token});
-set_user_ctx(Req, Name) ->
-    case couch_auth_cache:get_user_creds(Name) of
-        nil ->
-            ?LOG_DEBUG("OAuth handler: user `~p` credentials not found", [Name]),
-            Req;
-        User ->
-            Roles = couch_util:get_value(<<"roles">>, User, []),
-            Req#httpd{user_ctx=#user_ctx{name=Name, roles=Roles}}
-    end.
-
-% OAuth request_token
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"request_token">>], method=Method}=Req1) ->
-    serve_oauth(Req1, fun(Req, CbParams) ->
-        #callback_params{
-            consumer = Consumer,
-            token_secret = TokenSecret,
-            url = Url,
-            signature = Sig,
-            params = Params
-        } = CbParams,
-        case oauth:verify(
-            Sig, atom_to_list(Method), Url, Params, Consumer, TokenSecret) of
-        true ->
-            ok(Req, <<"oauth_token=requestkey&oauth_token_secret=requestsecret">>);
-        false ->
-            invalid_signature(Req)
-        end
-    end, false);
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"authorize">>]}=Req) ->
-    {ok, serve_oauth_authorize(Req)};
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"access_token">>], method='GET'}=Req1) ->
-    serve_oauth(Req1, fun(Req, CbParams) ->
-        #callback_params{
-            consumer = Consumer,
-            token = Token,
-            url = Url,
-            signature = Sig,
-            params = Params
-        } = CbParams,
-        case Token of
-        "requestkey" ->
-            case oauth:verify(
-                Sig, "GET", Url, Params, Consumer, "requestsecret") of
-            true ->
-                ok(Req,
-                    <<"oauth_token=accesskey&oauth_token_secret=accesssecret">>);
-            false ->
-                invalid_signature(Req)
-            end;
-        _ ->
-            couch_httpd:send_error(
-                Req, 400, <<"invalid_token">>, <<"Invalid OAuth token.">>)
-        end
-    end, false);
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"access_token">>]}=Req) ->
-    couch_httpd:send_method_not_allowed(Req, "GET").
-
-invalid_signature(Req) ->
-    couch_httpd:send_error(Req, 400, <<"invalid_signature">>, <<"Invalid signature value.">>).
-
-% This needs to be protected i.e. force user to login using HTTP Basic Auth or form-based login.
-serve_oauth_authorize(#httpd{method=Method}=Req1) ->
-    case Method of
-        'GET' ->
-            % Confirm with the User that they want to authenticate the Consumer
-            serve_oauth(Req1, fun(Req, CbParams) ->
-                #callback_params{
-                    consumer = Consumer,
-                    token_secret = TokenSecret,
-                    url = Url,
-                    signature = Sig,
-                    params = Params
-                } = CbParams,
-                case oauth:verify(
-                    Sig, "GET", Url, Params, Consumer, TokenSecret) of
-                true ->
-                    ok(Req, <<"oauth_token=requestkey&",
-                        "oauth_token_secret=requestsecret">>);
-                false ->
-                    invalid_signature(Req)
-                end
-            end, false);
-        'POST' ->
-            % If the User has confirmed, we direct the User back to the Consumer with a verification code
-            serve_oauth(Req1, fun(Req, CbParams) ->
-                #callback_params{
-                    consumer = Consumer,
-                    token_secret = TokenSecret,
-                    url = Url,
-                    signature = Sig,
-                    params = Params
-                } = CbParams,
-                case oauth:verify(
-                    Sig, "POST", Url, Params, Consumer, TokenSecret) of
-                true ->
-                    %redirect(oauth_callback, oauth_token, oauth_verifier),
-                    ok(Req, <<"oauth_token=requestkey&",
-                        "oauth_token_secret=requestsecret">>);
-                false ->
-                    invalid_signature(Req)
-                end
-            end, false);
-        _ ->
-            couch_httpd:send_method_not_allowed(Req1, "GET,POST")
-    end.
-
-serve_oauth(#httpd{mochi_req=MochiReq}=Req, Fun, FailSilently) ->
-    % 1. In the HTTP Authorization header as defined in OAuth HTTP Authorization Scheme.
-    % 2. As the HTTP POST request body with a content-type of application/x-www-form-urlencoded.
-    % 3. Added to the URLs in the query part (as defined by [RFC3986] section 3).
-    AuthHeader = case MochiReq:get_header_value("authorization") of
-        undefined ->
-            "";
-        Else ->
-            [Head | Tail] = re:split(Else, "\\s", [{parts, 2}, {return, list}]),
-            case [string:to_lower(Head) | Tail] of
-                ["oauth", Rest] -> Rest;
-                _ -> ""
-            end
-    end,
-    HeaderParams = oauth:header_params_decode(AuthHeader),
-    %Realm = couch_util:get_value("realm", HeaderParams),
-
-    % get requested path
-    RequestedPath = case MochiReq:get_header_value("x-couchdb-requested-path") of
-        undefined ->
-            case MochiReq:get_header_value("x-couchdb-vhost-path") of
-                undefined ->
-                    MochiReq:get(raw_path);
-                VHostPath ->
-                    VHostPath
-            end;
-        RequestedPath0 ->
-           RequestedPath0
-    end,
-    {_, QueryString, _} = mochiweb_util:urlsplit_path(RequestedPath),
-
-    Params = proplists:delete("realm", HeaderParams) ++ mochiweb_util:parse_qs(QueryString),
-
-    ?LOG_DEBUG("OAuth Params: ~p", [Params]),
-    case couch_util:get_value("oauth_version", Params, "1.0") of
-        "1.0" ->
-            case couch_util:get_value("oauth_consumer_key", Params, undefined) of
-                undefined ->
-                    case FailSilently of
-                        true -> Req;
-                        false -> couch_httpd:send_error(Req, 400, <<"invalid_consumer">>, <<"Invalid consumer.">>)
-                    end;
-                ConsumerKey ->
-                    Url = couch_httpd:absolute_uri(Req, RequestedPath),
-                    case get_callback_params(ConsumerKey, Params, Url) of
-                        {ok, CallbackParams} ->
-                            Fun(Req, CallbackParams);
-                        invalid_consumer_token_pair ->
-                            couch_httpd:send_error(
-                                Req, 400,
-                                <<"invalid_consumer_token_pair">>,
-                                <<"Invalid consumer and token pair.">>);
-                        {error, {Error, Reason}} ->
-                            couch_httpd:send_error(Req, 400, Error, Reason)
-                    end
-            end;
-        _ ->
-            couch_httpd:send_error(Req, 400, <<"invalid_oauth_version">>, <<"Invalid OAuth version.">>)
-    end.
-
-
-get_callback_params(ConsumerKey, Params, Url) ->
-    Token = couch_util:get_value("oauth_token", Params),
-    SigMethod = sig_method(Params),
-    CbParams0 = #callback_params{
-        token = Token,
-        signature = couch_util:get_value("oauth_signature", Params),
-        params = proplists:delete("oauth_signature", Params),
-        url = Url
-    },
-    case oauth_credentials_info(Token, ConsumerKey) of
-    nil ->
-        invalid_consumer_token_pair;
-    {error, _} = Err ->
-        Err;
-    {OauthCreds} ->
-        User = couch_util:get_value(<<"username">>, OauthCreds, []),
-        ConsumerSecret = ?b2l(couch_util:get_value(
-            <<"consumer_secret">>, OauthCreds, <<>>)),
-        TokenSecret = ?b2l(couch_util:get_value(
-            <<"token_secret">>, OauthCreds, <<>>)),
-        case (User =:= []) orelse (ConsumerSecret =:= []) orelse
-            (TokenSecret =:= []) of
-        true ->
-            invalid_consumer_token_pair;
-        false ->
-            CbParams = CbParams0#callback_params{
-                consumer = {ConsumerKey, ConsumerSecret, SigMethod},
-                token_secret = TokenSecret,
-                username = User
-            },
-            ?LOG_DEBUG("Got OAuth credentials, for ConsumerKey `~p` and "
-                "Token `~p`, from the views, User: `~p`, "
-                "ConsumerSecret: `~p`, TokenSecret: `~p`",
-                [ConsumerKey, Token, User, ConsumerSecret, TokenSecret]),
-            {ok, CbParams}
-        end
-    end.
-
-
-sig_method(Params) ->
-    sig_method_1(couch_util:get_value("oauth_signature_method", Params)).
-sig_method_1("PLAINTEXT") ->
-    plaintext;
-% sig_method_1("RSA-SHA1") ->
-%    rsa_sha1;
-sig_method_1("HMAC-SHA1") ->
-    hmac_sha1;
-sig_method_1(_) ->
-    undefined.
-
-
-ok(#httpd{mochi_req=MochiReq}, Body) ->
-    {ok, MochiReq:respond({200, [], Body})}.
-
-
-oauth_credentials_info(Token, ConsumerKey) ->
-    case use_auth_db() of
-    {ok, Db} ->
-        Result = case query_oauth_view(Db, [?l2b(ConsumerKey), ?l2b(Token)]) of
-        [] ->
-            nil;
-        [Creds] ->
-            Creds;
-        [_ | _] ->
-            Reason = iolist_to_binary(
-                io_lib:format("Found multiple OAuth credentials for the pair "
-                    " (consumer_key: `~p`, token: `~p`)", [ConsumerKey, Token])),
-            {error, {<<"oauth_token_consumer_key_pair">>, Reason}}
-        end,
-        couch_db:close(Db),
-        Result;
-    nil ->
-        {
-            case couch_config:get("oauth_consumer_secrets", ConsumerKey) of
-            undefined -> [];
-            ConsumerSecret -> [{<<"consumer_secret">>, ?l2b(ConsumerSecret)}]
-            end
-            ++
-            case couch_config:get("oauth_token_secrets", Token) of
-            undefined -> [];
-            TokenSecret -> [{<<"token_secret">>, ?l2b(TokenSecret)}]
-            end
-            ++
-            case couch_config:get("oauth_token_users", Token) of
-            undefined -> [];
-            User -> [{<<"username">>, ?l2b(User)}]
-            end
-        }
-    end.
-
-
-use_auth_db() ->
-    case couch_config:get("couch_httpd_oauth", "use_users_db", "false") of
-    "false" ->
-        nil;
-    "true" ->
-        AuthDb = open_auth_db(),
-        {ok, _AuthDb2} = ensure_oauth_views_exist(AuthDb)
-    end.
-
-
-open_auth_db() ->
-    DbName = ?l2b(couch_config:get("couch_httpd_auth", "authentication_db")),
-    DbOptions = [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}],
-    {ok, AuthDb} = couch_db:open_int(DbName, DbOptions),
-    AuthDb.
-
-
-ensure_oauth_views_exist(AuthDb) ->
-    case couch_db:open_doc(AuthDb, ?OAUTH_DDOC_ID, []) of
-    {ok, _DDoc} ->
-        {ok, AuthDb};
-    _ ->
-        {ok, DDoc} = get_oauth_ddoc(),
-        {ok, _Rev} = couch_db:update_doc(AuthDb, DDoc, []),
-        {ok, _AuthDb2} = couch_db:reopen(AuthDb)
-    end.
-
-
-get_oauth_ddoc() ->
-    Json = {[
-        {<<"_id">>, ?OAUTH_DDOC_ID},
-        {<<"language">>, <<"javascript">>},
-        {<<"views">>,
-            {[
-                {?OAUTH_VIEW_NAME,
-                    {[
-                        {<<"map">>, ?OAUTH_MAP_FUN}
-                    ]}
-                }
-            ]}
-        }
-    ]},
-    {ok, couch_doc:from_json_obj(Json)}.
-
-
-query_oauth_view(Db, Key) ->
-    ViewOptions = [
-        {start_key, Key},
-        {end_key, Key}
-    ],
-    Callback = fun({row, Row}, Acc) ->
-            {ok, [couch_util:get_value(value, Row) | Acc]};
-        (_, Acc) ->
-            {ok, Acc}
-    end,
-    {ok, Result} = couch_mrview:query_view(
-        Db, ?OAUTH_DDOC_ID, ?OAUTH_VIEW_NAME, ViewOptions, Callback, []),
-    Result.


[17/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
inital move to rebar compilation

- move src/apps
- download dependencies using rebar
- replace ejson by jiffy
- replace couch_drv & couch_ejson_compare by couch_collate


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/75f30dbe
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/75f30dbe
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/75f30dbe

Branch: refs/heads/import-rcouch
Commit: 75f30dbeab09fa93e3c1ecc08078df8d3aa58016
Parents: ae8612b
Author: benoitc <be...@apache.org>
Authored: Mon Jan 6 21:12:45 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:14 2014 -0600

----------------------------------------------------------------------
 Makefile.am                          |  198 -----
 couch.app.tpl.in                     |   27 -
 couch.erl                            |   39 -
 couch_app.erl                        |   56 --
 couch_auth_cache.erl                 |  425 ---------
 couch_btree.erl                      |  714 ---------------
 couch_changes.erl                    |  577 ------------
 couch_compaction_daemon.erl          |  504 -----------
 couch_compress.erl                   |   84 --
 couch_config.erl                     |  251 ------
 couch_config_writer.erl              |   88 --
 couch_db.erl                         | 1358 -----------------------------
 couch_db.hrl                         |  286 ------
 couch_db_update_notifier.erl         |   82 --
 couch_db_update_notifier_sup.erl     |   61 --
 couch_db_updater.erl                 | 1035 ----------------------
 couch_doc.erl                        |  650 --------------
 couch_drv.erl                        |   62 --
 couch_ejson_compare.erl              |  113 ---
 couch_event_sup.erl                  |   73 --
 couch_external_manager.erl           |  101 ---
 couch_external_server.erl            |   70 --
 couch_file.erl                       |  532 -----------
 couch_httpd.erl                      | 1114 -----------------------
 couch_httpd_auth.erl                 |  380 --------
 couch_httpd_cors.erl                 |  351 --------
 couch_httpd_db.erl                   | 1226 --------------------------
 couch_httpd_external.erl             |  177 ----
 couch_httpd_misc_handlers.erl        |  318 -------
 couch_httpd_oauth.erl                |  387 --------
 couch_httpd_proxy.erl                |  426 ---------
 couch_httpd_rewrite.erl              |  484 ----------
 couch_httpd_stats_handlers.erl       |   56 --
 couch_httpd_vhost.erl                |  383 --------
 couch_js_functions.hrl               |  170 ----
 couch_key_tree.erl                   |  422 ---------
 couch_log.erl                        |  254 ------
 couch_native_process.erl             |  409 ---------
 couch_os_daemons.erl                 |  374 --------
 couch_os_process.erl                 |  216 -----
 couch_passwords.erl                  |  119 ---
 couch_primary_sup.erl                |   66 --
 couch_query_servers.erl              |  616 -------------
 couch_ref_counter.erl                |  111 ---
 couch_secondary_sup.erl              |   49 --
 couch_server.erl                     |  499 -----------
 couch_server_sup.erl                 |  164 ----
 couch_stats_aggregator.erl           |  297 -------
 couch_stats_collector.erl            |  136 ---
 couch_stream.erl                     |  299 -------
 couch_task_status.erl                |  151 ----
 couch_users_db.erl                   |  121 ---
 couch_util.erl                       |  489 -----------
 couch_uuids.erl                      |  103 ---
 couch_work_queue.erl                 |  187 ----
 include/couch_db.hrl                 |  286 ++++++
 json_stream_parse.erl                |  432 ---------
 src/Makefile.am                      |  198 +++++
 src/couch.app.src                    |   23 +
 src/couch.erl                        |   58 ++
 src/couch_app.erl                    |   36 +
 src/couch_auth_cache.erl             |  425 +++++++++
 src/couch_btree.erl                  |  714 +++++++++++++++
 src/couch_changes.erl                |  577 ++++++++++++
 src/couch_compaction_daemon.erl      |  504 +++++++++++
 src/couch_compress.erl               |   84 ++
 src/couch_config.erl                 |  251 ++++++
 src/couch_config_writer.erl          |   88 ++
 src/couch_db.erl                     | 1358 +++++++++++++++++++++++++++++
 src/couch_db_update_notifier.erl     |   82 ++
 src/couch_db_update_notifier_sup.erl |   61 ++
 src/couch_db_updater.erl             | 1035 ++++++++++++++++++++++
 src/couch_doc.erl                    |  650 ++++++++++++++
 src/couch_ejson_compare.erl          |   81 ++
 src/couch_event_sup.erl              |   73 ++
 src/couch_external_manager.erl       |  101 +++
 src/couch_external_server.erl        |   70 ++
 src/couch_file.erl                   |  532 +++++++++++
 src/couch_httpd.erl                  | 1114 +++++++++++++++++++++++
 src/couch_httpd_auth.erl             |  380 ++++++++
 src/couch_httpd_cors.erl             |  351 ++++++++
 src/couch_httpd_db.erl               | 1226 ++++++++++++++++++++++++++
 src/couch_httpd_external.erl         |  177 ++++
 src/couch_httpd_misc_handlers.erl    |  318 +++++++
 src/couch_httpd_oauth.erl            |  387 ++++++++
 src/couch_httpd_proxy.erl            |  426 +++++++++
 src/couch_httpd_rewrite.erl          |  484 ++++++++++
 src/couch_httpd_stats_handlers.erl   |   56 ++
 src/couch_httpd_vhost.erl            |  383 ++++++++
 src/couch_js_functions.hrl           |  170 ++++
 src/couch_key_tree.erl               |  422 +++++++++
 src/couch_log.erl                    |  254 ++++++
 src/couch_native_process.erl         |  409 +++++++++
 src/couch_os_daemons.erl             |  374 ++++++++
 src/couch_os_process.erl             |  216 +++++
 src/couch_passwords.erl              |  119 +++
 src/couch_primary_sup.erl            |   66 ++
 src/couch_query_servers.erl          |  616 +++++++++++++
 src/couch_ref_counter.erl            |  111 +++
 src/couch_secondary_sup.erl          |   49 ++
 src/couch_server.erl                 |  499 +++++++++++
 src/couch_server_sup.erl             |  164 ++++
 src/couch_stats_aggregator.erl       |  297 +++++++
 src/couch_stats_collector.erl        |  136 +++
 src/couch_stream.erl                 |  299 +++++++
 src/couch_task_status.erl            |  151 ++++
 src/couch_users_db.erl               |  121 +++
 src/couch_util.erl                   |  487 +++++++++++
 src/couch_uuids.erl                  |  103 +++
 src/couch_work_queue.erl             |  187 ++++
 src/json_stream_parse.erl            |  432 +++++++++
 111 files changed, 18271 insertions(+), 18372 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
deleted file mode 100644
index 9fe19bc..0000000
--- a/Makefile.am
+++ /dev/null
@@ -1,198 +0,0 @@
-## Licensed under the Apache License, Version 2.0 (the "License"); you may not
-## use this file except in compliance with the License. You may obtain a copy of
-## the License at
-##
-##   http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-## License for the specific language governing permissions and limitations under
-## the License.
-
-SUBDIRS = priv
-
-# devdocdir = $(localdocdir)/developer/couchdb
-couchlibdir = $(localerlanglibdir)/couch-$(version)
-couchincludedir = $(couchlibdir)/include
-couchebindir = $(couchlibdir)/ebin
-
-couchinclude_DATA = couch_db.hrl couch_js_functions.hrl
-couchebin_DATA = $(compiled_files)
-
-# dist_devdoc_DATA = $(doc_base) $(doc_modules)
-
-CLEANFILES = $(compiled_files) $(doc_base)
-
-# CLEANFILES = $(doc_modules) edoc-info
-
-source_files = \
-    couch.erl \
-    couch_app.erl \
-    couch_auth_cache.erl \
-    couch_btree.erl \
-    couch_changes.erl \
-    couch_compaction_daemon.erl \
-    couch_compress.erl \
-    couch_config.erl \
-    couch_config_writer.erl \
-    couch_db.erl \
-    couch_db_update_notifier.erl \
-    couch_db_update_notifier_sup.erl \
-    couch_doc.erl \
-    couch_drv.erl \
-    couch_ejson_compare.erl \
-    couch_event_sup.erl \
-    couch_external_manager.erl \
-    couch_external_server.erl \
-    couch_file.erl \
-    couch_httpd.erl \
-    couch_httpd_db.erl \
-    couch_httpd_auth.erl \
-    couch_httpd_cors.erl \
-    couch_httpd_oauth.erl \
-    couch_httpd_external.erl \
-    couch_httpd_misc_handlers.erl \
-    couch_httpd_proxy.erl \
-    couch_httpd_rewrite.erl \
-    couch_httpd_stats_handlers.erl \
-    couch_httpd_vhost.erl \
-    couch_key_tree.erl \
-    couch_log.erl \
-    couch_native_process.erl \
-    couch_os_daemons.erl \
-    couch_os_process.erl \
-    couch_passwords.erl \
-    couch_primary_sup.erl \
-    couch_query_servers.erl \
-    couch_ref_counter.erl \
-    couch_secondary_sup.erl \
-    couch_server.erl \
-    couch_server_sup.erl \
-    couch_stats_aggregator.erl \
-    couch_stats_collector.erl \
-    couch_stream.erl \
-    couch_task_status.erl \
-    couch_users_db.erl \
-    couch_util.erl \
-    couch_uuids.erl \
-    couch_db_updater.erl \
-    couch_work_queue.erl \
-    json_stream_parse.erl
-
-EXTRA_DIST = $(source_files) couch_db.hrl couch_js_functions.hrl
-
-compiled_files = \
-    couch.app \
-    couch.beam \
-    couch_app.beam \
-    couch_auth_cache.beam \
-    couch_btree.beam \
-    couch_changes.beam \
-    couch_compaction_daemon.beam \
-    couch_compress.beam \
-    couch_config.beam \
-    couch_config_writer.beam \
-    couch_db.beam \
-    couch_db_update_notifier.beam \
-    couch_db_update_notifier_sup.beam \
-    couch_doc.beam \
-    couch_drv.beam \
-    couch_ejson_compare.beam \
-    couch_event_sup.beam \
-    couch_external_manager.beam \
-    couch_external_server.beam \
-    couch_file.beam \
-    couch_httpd.beam \
-    couch_httpd_db.beam \
-    couch_httpd_auth.beam \
-    couch_httpd_oauth.beam \
-    couch_httpd_cors.beam \
-    couch_httpd_proxy.beam \
-    couch_httpd_external.beam \
-    couch_httpd_misc_handlers.beam \
-    couch_httpd_rewrite.beam \
-    couch_httpd_stats_handlers.beam \
-    couch_httpd_vhost.beam \
-    couch_key_tree.beam \
-    couch_log.beam \
-    couch_native_process.beam \
-    couch_os_daemons.beam \
-    couch_os_process.beam \
-    couch_passwords.beam \
-    couch_primary_sup.beam \
-    couch_query_servers.beam \
-    couch_ref_counter.beam \
-    couch_secondary_sup.beam \
-    couch_server.beam \
-    couch_server_sup.beam \
-    couch_stats_aggregator.beam \
-    couch_stats_collector.beam \
-    couch_stream.beam \
-    couch_task_status.beam \
-    couch_users_db.beam \
-    couch_util.beam \
-    couch_uuids.beam \
-    couch_db_updater.beam \
-    couch_work_queue.beam \
-    json_stream_parse.beam
-
-# doc_base = \
-#     erlang.png \
-#     index.html \
-#     modules-frame.html \
-#     overview-summary.html \
-#     packages-frame.html \
-#     stylesheet.css
-
-# doc_modules = \
-#     couch_btree.html \
-#     couch_config.html \
-#     couch_config_writer.html \
-#     couch_db.html \
-#     couch_db_update_notifier.html \
-#     couch_db_update_notifier_sup.html \
-#     couch_doc.html \
-#     couch_event_sup.html \
-#     couch_file.html \
-#     couch_httpd.html \
-#     couch_key_tree.html \
-#     couch_log.html \
-#     couch_query_servers.html \
-#     couch_rep.html \
-#     couch_rep_sup.html \
-#     couch_server.html \
-#     couch_server_sup.html \
-#     couch_stream.html \
-#     couch_util.html
-
-if WINDOWS
-couch.app: couch.app.tpl
-	modules=`find . -name "*.erl" \! -name ".*" -exec basename {} .erl \; | tr '\n' ',' | sed "s/,$$//"`; \
-	sed -e "s|%package_name%|@package_name@|g" \
-			-e "s|%version%|@version@|g" \
-			-e "s|@modules@|$$modules|g" \
-			-e "s|%localconfdir%|../etc/couchdb|g" \
-			-e "s|@defaultini@|default.ini|g" \
-			-e "s|@localini@|local.ini|g" > \
-	$@ < $<
-else
-couch.app: couch.app.tpl
-	modules=`{ find . -name "*.erl" \! -name ".*" -exec basename {} .erl \; | tr '\n' ','; echo ''; } | sed "s/,$$//"`; \
-	sed -e "s|%package_name%|@package_name@|g" \
-			-e "s|%version%|@version@|g" \
-			-e "s|@modules@|$$modules|g" \
-			-e "s|%localconfdir%|@localconfdir@|g" \
-			-e "s|@defaultini@|default.ini|g" \
-			-e "s|@localini@|local.ini|g" > \
-	$@ < $<
-	chmod +x $@
-endif
-
-# $(dist_devdoc_DATA): edoc-info
-
-# $(ERL) -noshell -run edoc_run files [\"$<\"]
-
-%.beam: %.erl couch_db.hrl couch_js_functions.hrl
-	$(ERLC) $(ERLC_FLAGS) ${TEST} $<;
-

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch.app.tpl.in
----------------------------------------------------------------------
diff --git a/couch.app.tpl.in b/couch.app.tpl.in
deleted file mode 100644
index 2d75b91..0000000
--- a/couch.app.tpl.in
+++ /dev/null
@@ -1,27 +0,0 @@
-{application, couch, [
-    {description, "@package_name@"},
-    {vsn, "@version@"},
-    {modules, [@modules@]},
-    {registered, [
-        couch_config,
-        couch_db_update,
-        couch_db_update_notifier_sup,
-        couch_external_manager,
-        couch_httpd,
-        couch_log,
-        couch_primary_services,
-        couch_query_servers,
-        couch_secondary_services,
-        couch_server,
-        couch_server_sup,
-        couch_stats_aggregator,
-        couch_stats_collector,
-        couch_task_status
-    ]},
-    {mod, {couch_app, [
-        "%localconfdir%/@defaultini@",
-        "%localconfdir%/@localini@"
-    ]}},
-    {applications, [kernel, stdlib]},
-    {included_applications, [crypto, sasl, inets, oauth, ibrowse, mochiweb, os_mon]}
-]}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch.erl
----------------------------------------------------------------------
diff --git a/couch.erl b/couch.erl
deleted file mode 100644
index c18df0b..0000000
--- a/couch.erl
+++ /dev/null
@@ -1,39 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch).
-
--compile(export_all).
-
-start() ->
-    ok = application:start(couch).
-
-stop() ->
-    application:stop(couch).
-
-restart() ->
-    case stop() of
-    ok ->
-        start();
-    {error, {not_started,couch}} ->
-        start();
-    {error, Reason} ->
-        {error, Reason}
-    end.
-
-reload() ->
-    case supervisor:terminate_child(couch_server_sup, couch_config) of
-    ok ->
-        supervisor:restart_child(couch_server_sup, couch_config);
-    {error, Reason} ->
-        {error, Reason}
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_app.erl
----------------------------------------------------------------------
diff --git a/couch_app.erl b/couch_app.erl
deleted file mode 100644
index 9644877..0000000
--- a/couch_app.erl
+++ /dev/null
@@ -1,56 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_app).
-
--behaviour(application).
-
--include("couch_db.hrl").
-
--export([start/2, stop/1]).
-
-start(_Type, DefaultIniFiles) ->
-    IniFiles = get_ini_files(DefaultIniFiles),
-    case start_apps([crypto, asn1, public_key, sasl, inets, oauth, ssl, ibrowse, syntax_tools, compiler, xmerl, mochiweb, os_mon]) of
-    ok ->
-        couch_server_sup:start_link(IniFiles);
-    {error, Reason} ->
-        {error, Reason}
-    end.
-
-stop(_) ->
-    ok.
-
-get_ini_files(Default) ->
-    case init:get_argument(couch_ini) of
-    error ->
-        Default;
-    {ok, [[]]} ->
-        Default;
-    {ok, [Values]} ->
-        Values
-    end.
-
-start_apps([]) ->
-    ok;
-start_apps([App|Rest]) ->
-    case application:start(App) of
-    ok ->
-       start_apps(Rest);
-    {error, {already_started, App}} ->
-       start_apps(Rest);
-    {error, _Reason} when App =:= public_key ->
-       % ignore on R12B5
-       start_apps(Rest);
-    {error, _Reason} ->
-       {error, {app_would_not_start, App}}
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_auth_cache.erl
----------------------------------------------------------------------
diff --git a/couch_auth_cache.erl b/couch_auth_cache.erl
deleted file mode 100644
index 42ccd44..0000000
--- a/couch_auth_cache.erl
+++ /dev/null
@@ -1,425 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_auth_cache).
--behaviour(gen_server).
-
-% public API
--export([get_user_creds/1]).
-
-% gen_server API
--export([start_link/0, init/1, handle_call/3, handle_info/2, handle_cast/2]).
--export([code_change/3, terminate/2]).
-
--include("couch_db.hrl").
--include("couch_js_functions.hrl").
-
--define(STATE, auth_state_ets).
--define(BY_USER, auth_by_user_ets).
--define(BY_ATIME, auth_by_atime_ets).
-
--record(state, {
-    max_cache_size = 0,
-    cache_size = 0,
-    db_notifier = nil,
-    db_mon_ref = nil
-}).
-
-
--spec get_user_creds(UserName::string() | binary()) ->
-    Credentials::list() | nil.
-
-get_user_creds(UserName) when is_list(UserName) ->
-    get_user_creds(?l2b(UserName));
-
-get_user_creds(UserName) ->
-    UserCreds = case couch_config:get("admins", ?b2l(UserName)) of
-    "-hashed-" ++ HashedPwdAndSalt ->
-        % the name is an admin, now check to see if there is a user doc
-        % which has a matching name, salt, and password_sha
-        [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
-        case get_from_cache(UserName) of
-        nil ->
-            make_admin_doc(HashedPwd, Salt, []);
-        UserProps when is_list(UserProps) ->
-            make_admin_doc(HashedPwd, Salt, couch_util:get_value(<<"roles">>, UserProps))
-        end;
-    "-pbkdf2-" ++ HashedPwdSaltAndIterations ->
-        [HashedPwd, Salt, Iterations] = string:tokens(HashedPwdSaltAndIterations, ","),
-        case get_from_cache(UserName) of
-        nil ->
-            make_admin_doc(HashedPwd, Salt, Iterations, []);
-        UserProps when is_list(UserProps) ->
-            make_admin_doc(HashedPwd, Salt, Iterations, couch_util:get_value(<<"roles">>, UserProps))
-    end;
-    _Else ->
-        get_from_cache(UserName)
-    end,
-    validate_user_creds(UserCreds).
-
-make_admin_doc(HashedPwd, Salt, ExtraRoles) ->
-    [{<<"roles">>, [<<"_admin">>|ExtraRoles]},
-     {<<"salt">>, ?l2b(Salt)},
-     {<<"password_scheme">>, <<"simple">>},
-     {<<"password_sha">>, ?l2b(HashedPwd)}].
-
-make_admin_doc(DerivedKey, Salt, Iterations, ExtraRoles) ->
-    [{<<"roles">>, [<<"_admin">>|ExtraRoles]},
-     {<<"salt">>, ?l2b(Salt)},
-     {<<"iterations">>, list_to_integer(Iterations)},
-     {<<"password_scheme">>, <<"pbkdf2">>},
-     {<<"derived_key">>, ?l2b(DerivedKey)}].
-
-get_from_cache(UserName) ->
-    exec_if_auth_db(
-        fun(_AuthDb) ->
-            maybe_refresh_cache(),
-            case ets:lookup(?BY_USER, UserName) of
-            [] ->
-                gen_server:call(?MODULE, {fetch, UserName}, infinity);
-            [{UserName, {Credentials, _ATime}}] ->
-                couch_stats_collector:increment({couchdb, auth_cache_hits}),
-                gen_server:cast(?MODULE, {cache_hit, UserName}),
-                Credentials
-            end
-        end,
-        nil
-    ).
-
-
-validate_user_creds(nil) ->
-    nil;
-validate_user_creds(UserCreds) ->
-    case couch_util:get_value(<<"_conflicts">>, UserCreds) of
-    undefined ->
-        ok;
-    _ConflictList ->
-        throw({unauthorized,
-            <<"User document conflicts must be resolved before the document",
-              " is used for authentication purposes.">>
-        })
-    end,
-    UserCreds.
-
-
-start_link() ->
-    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-
-init(_) ->
-    ?STATE = ets:new(?STATE, [set, protected, named_table]),
-    ?BY_USER = ets:new(?BY_USER, [set, protected, named_table]),
-    ?BY_ATIME = ets:new(?BY_ATIME, [ordered_set, private, named_table]),
-    process_flag(trap_exit, true),
-    ok = couch_config:register(
-        fun("couch_httpd_auth", "auth_cache_size", SizeList) ->
-            Size = list_to_integer(SizeList),
-            ok = gen_server:call(?MODULE, {new_max_cache_size, Size}, infinity);
-        ("couch_httpd_auth", "authentication_db", _DbName) ->
-            ok = gen_server:call(?MODULE, reinit_cache, infinity)
-        end
-    ),
-    {ok, Notifier} = couch_db_update_notifier:start_link(fun handle_db_event/1),
-    State = #state{
-        db_notifier = Notifier,
-        max_cache_size = list_to_integer(
-            couch_config:get("couch_httpd_auth", "auth_cache_size", "50")
-        )
-    },
-    {ok, reinit_cache(State)}.
-
-
-handle_db_event({Event, DbName}) ->
-    [{auth_db_name, AuthDbName}] = ets:lookup(?STATE, auth_db_name),
-    case DbName =:= AuthDbName of
-    true ->
-        case Event of
-        created -> gen_server:call(?MODULE, reinit_cache, infinity);
-        compacted -> gen_server:call(?MODULE, auth_db_compacted, infinity);
-        _Else   -> ok
-        end;
-    false ->
-        ok
-    end.
-
-
-handle_call(reinit_cache, _From, State) ->
-    catch erlang:demonitor(State#state.db_mon_ref, [flush]),
-    exec_if_auth_db(fun(AuthDb) -> catch couch_db:close(AuthDb) end),
-    {reply, ok, reinit_cache(State)};
-
-handle_call(auth_db_compacted, _From, State) ->
-    exec_if_auth_db(
-        fun(AuthDb) ->
-            true = ets:insert(?STATE, {auth_db, reopen_auth_db(AuthDb)})
-        end
-    ),
-    {reply, ok, State};
-
-handle_call({new_max_cache_size, NewSize},
-        _From, #state{cache_size = Size} = State) when NewSize >= Size ->
-    {reply, ok, State#state{max_cache_size = NewSize}};
-
-handle_call({new_max_cache_size, NewSize}, _From, State) ->
-    free_mru_cache_entries(State#state.cache_size - NewSize),
-    {reply, ok, State#state{max_cache_size = NewSize, cache_size = NewSize}};
-
-handle_call({fetch, UserName}, _From, State) ->
-    {Credentials, NewState} = case ets:lookup(?BY_USER, UserName) of
-    [{UserName, {Creds, ATime}}] ->
-        couch_stats_collector:increment({couchdb, auth_cache_hits}),
-        cache_hit(UserName, Creds, ATime),
-        {Creds, State};
-    [] ->
-        couch_stats_collector:increment({couchdb, auth_cache_misses}),
-        Creds = get_user_props_from_db(UserName),
-        State1 = add_cache_entry(UserName, Creds, erlang:now(), State),
-        {Creds, State1}
-    end,
-    {reply, Credentials, NewState};
-
-handle_call(refresh, _From, State) ->
-    exec_if_auth_db(fun refresh_entries/1),
-    {reply, ok, State}.
-
-
-handle_cast({cache_hit, UserName}, State) ->
-    case ets:lookup(?BY_USER, UserName) of
-    [{UserName, {Credentials, ATime}}] ->
-        cache_hit(UserName, Credentials, ATime);
-    _ ->
-        ok
-    end,
-    {noreply, State}.
-
-
-handle_info({'DOWN', Ref, _, _, _Reason}, #state{db_mon_ref = Ref} = State) ->
-    {noreply, reinit_cache(State)}.
-
-
-terminate(_Reason, #state{db_notifier = Notifier}) ->
-    couch_db_update_notifier:stop(Notifier),
-    exec_if_auth_db(fun(AuthDb) -> catch couch_db:close(AuthDb) end),
-    true = ets:delete(?BY_USER),
-    true = ets:delete(?BY_ATIME),
-    true = ets:delete(?STATE).
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-clear_cache(State) ->
-    exec_if_auth_db(fun(AuthDb) -> catch couch_db:close(AuthDb) end),
-    true = ets:delete_all_objects(?BY_USER),
-    true = ets:delete_all_objects(?BY_ATIME),
-    State#state{cache_size = 0}.
-
-
-reinit_cache(State) ->
-    NewState = clear_cache(State),
-    AuthDbName = ?l2b(couch_config:get("couch_httpd_auth", "authentication_db")),
-    true = ets:insert(?STATE, {auth_db_name, AuthDbName}),
-    AuthDb = open_auth_db(),
-    true = ets:insert(?STATE, {auth_db, AuthDb}),
-    NewState#state{db_mon_ref = couch_db:monitor(AuthDb)}.
-
-
-add_cache_entry(_, _, _, #state{max_cache_size = 0} = State) ->
-    State;
-add_cache_entry(UserName, Credentials, ATime, State) ->
-    case State#state.cache_size >= State#state.max_cache_size of
-    true ->
-        free_mru_cache_entry();
-    false ->
-        ok
-    end,
-    true = ets:insert(?BY_ATIME, {ATime, UserName}),
-    true = ets:insert(?BY_USER, {UserName, {Credentials, ATime}}),
-    State#state{cache_size = couch_util:get_value(size, ets:info(?BY_USER))}.
-
-free_mru_cache_entries(0) ->
-    ok;
-free_mru_cache_entries(N) when N > 0 ->
-    free_mru_cache_entry(),
-    free_mru_cache_entries(N - 1).
-
-free_mru_cache_entry() ->
-    MruTime = ets:last(?BY_ATIME),
-    [{MruTime, UserName}] = ets:lookup(?BY_ATIME, MruTime),
-    true = ets:delete(?BY_ATIME, MruTime),
-    true = ets:delete(?BY_USER, UserName).
-
-
-cache_hit(UserName, Credentials, ATime) ->
-    NewATime = erlang:now(),
-    true = ets:delete(?BY_ATIME, ATime),
-    true = ets:insert(?BY_ATIME, {NewATime, UserName}),
-    true = ets:insert(?BY_USER, {UserName, {Credentials, NewATime}}).
-
-
-refresh_entries(AuthDb) ->
-    case reopen_auth_db(AuthDb) of
-    nil ->
-        ok;
-    AuthDb2 ->
-        case AuthDb2#db.update_seq > AuthDb#db.update_seq of
-        true ->
-            {ok, _, _} = couch_db:enum_docs_since(
-                AuthDb2,
-                AuthDb#db.update_seq,
-                fun(DocInfo, _, _) -> refresh_entry(AuthDb2, DocInfo) end,
-                AuthDb#db.update_seq,
-                []
-            ),
-            true = ets:insert(?STATE, {auth_db, AuthDb2});
-        false ->
-            ok
-        end
-    end.
-
-
-refresh_entry(Db, #doc_info{high_seq = DocSeq} = DocInfo) ->
-    case is_user_doc(DocInfo) of
-    {true, UserName} ->
-        case ets:lookup(?BY_USER, UserName) of
-        [] ->
-            ok;
-        [{UserName, {_OldCreds, ATime}}] ->
-            {ok, Doc} = couch_db:open_doc(Db, DocInfo, [conflicts, deleted]),
-            NewCreds = user_creds(Doc),
-            true = ets:insert(?BY_USER, {UserName, {NewCreds, ATime}})
-        end;
-    false ->
-        ok
-    end,
-    {ok, DocSeq}.
-
-
-user_creds(#doc{deleted = true}) ->
-    nil;
-user_creds(#doc{} = Doc) ->
-    {Creds} = couch_doc:to_json_obj(Doc, []),
-    Creds.
-
-
-is_user_doc(#doc_info{id = <<"org.couchdb.user:", UserName/binary>>}) ->
-    {true, UserName};
-is_user_doc(_) ->
-    false.
-
-
-maybe_refresh_cache() ->
-    case cache_needs_refresh() of
-    true ->
-        ok = gen_server:call(?MODULE, refresh, infinity);
-    false ->
-        ok
-    end.
-
-
-cache_needs_refresh() ->
-    exec_if_auth_db(
-        fun(AuthDb) ->
-            case reopen_auth_db(AuthDb) of
-            nil ->
-                false;
-            AuthDb2 ->
-                AuthDb2#db.update_seq > AuthDb#db.update_seq
-            end
-        end,
-        false
-    ).
-
-
-reopen_auth_db(AuthDb) ->
-    case (catch couch_db:reopen(AuthDb)) of
-    {ok, AuthDb2} ->
-        AuthDb2;
-    _ ->
-        nil
-    end.
-
-
-exec_if_auth_db(Fun) ->
-    exec_if_auth_db(Fun, ok).
-
-exec_if_auth_db(Fun, DefRes) ->
-    case ets:lookup(?STATE, auth_db) of
-    [{auth_db, #db{} = AuthDb}] ->
-        Fun(AuthDb);
-    _ ->
-        DefRes
-    end.
-
-
-open_auth_db() ->
-    [{auth_db_name, DbName}] = ets:lookup(?STATE, auth_db_name),
-    {ok, AuthDb} = ensure_users_db_exists(DbName, [sys_db]),
-    AuthDb.
-
-
-get_user_props_from_db(UserName) ->
-    exec_if_auth_db(
-        fun(AuthDb) ->
-            Db = reopen_auth_db(AuthDb),
-            DocId = <<"org.couchdb.user:", UserName/binary>>,
-            try
-                {ok, Doc} = couch_db:open_doc(Db, DocId, [conflicts]),
-                {DocProps} = couch_doc:to_json_obj(Doc, []),
-                DocProps
-            catch
-            _:_Error ->
-                nil
-            end
-        end,
-        nil
-    ).
-
-ensure_users_db_exists(DbName, Options) ->
-    Options1 = [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}, nologifmissing | Options],
-    case couch_db:open(DbName, Options1) of
-    {ok, Db} ->
-        ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
-        {ok, Db};
-    _Error ->
-        {ok, Db} = couch_db:create(DbName, Options1),
-        ok = ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
-        {ok, Db}
-    end.
-
-ensure_auth_ddoc_exists(Db, DDocId) ->
-    case couch_db:open_doc(Db, DDocId) of
-    {not_found, _Reason} ->
-        {ok, AuthDesign} = auth_design_doc(DDocId),
-        {ok, _Rev} = couch_db:update_doc(Db, AuthDesign, []);
-    {ok, Doc} ->
-        {Props} = couch_doc:to_json_obj(Doc, []),
-        case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
-            ?AUTH_DB_DOC_VALIDATE_FUNCTION ->
-                ok;
-            _ ->
-                Props1 = lists:keyreplace(<<"validate_doc_update">>, 1, Props,
-                    {<<"validate_doc_update">>,
-                    ?AUTH_DB_DOC_VALIDATE_FUNCTION}),
-                couch_db:update_doc(Db, couch_doc:from_json_obj({Props1}), [])
-        end
-    end,
-    ok.
-
-auth_design_doc(DocId) ->
-    DocProps = [
-        {<<"_id">>, DocId},
-        {<<"language">>,<<"javascript">>},
-        {<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION}
-    ],
-    {ok, couch_doc:from_json_obj({DocProps})}.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_btree.erl
----------------------------------------------------------------------
diff --git a/couch_btree.erl b/couch_btree.erl
deleted file mode 100644
index 789819e..0000000
--- a/couch_btree.erl
+++ /dev/null
@@ -1,714 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_btree).
-
--export([open/2, open/3, query_modify/4, add/2, add_remove/3]).
--export([fold/4, full_reduce/1, final_reduce/2, size/1, foldl/3, foldl/4]).
--export([fold_reduce/4, lookup/2, get_state/1, set_options/2]).
--export([less/3]).
-
--include("couch_db.hrl").
--define(CHUNK_THRESHOLD, 16#4ff).
-
-extract(#btree{extract_kv=Extract}, Value) ->
-    Extract(Value).
-
-assemble(#btree{assemble_kv=Assemble}, Key, Value) ->
-    Assemble(Key, Value).
-
-less(#btree{less=Less}, A, B) ->
-    Less(A, B).
-
-% pass in 'nil' for State if a new Btree.
-open(State, Fd) ->
-    {ok, #btree{root=State, fd=Fd}}.
-
-set_options(Bt, []) ->
-    Bt;
-set_options(Bt, [{split, Extract}|Rest]) ->
-    set_options(Bt#btree{extract_kv=Extract}, Rest);
-set_options(Bt, [{join, Assemble}|Rest]) ->
-    set_options(Bt#btree{assemble_kv=Assemble}, Rest);
-set_options(Bt, [{less, Less}|Rest]) ->
-    set_options(Bt#btree{less=Less}, Rest);
-set_options(Bt, [{reduce, Reduce}|Rest]) ->
-    set_options(Bt#btree{reduce=Reduce}, Rest);
-set_options(Bt, [{compression, Comp}|Rest]) ->
-    set_options(Bt#btree{compression=Comp}, Rest).
-
-open(State, Fd, Options) ->
-    {ok, set_options(#btree{root=State, fd=Fd}, Options)}.
-
-get_state(#btree{root=Root}) ->
-    Root.
-
-final_reduce(#btree{reduce=Reduce}, Val) ->
-    final_reduce(Reduce, Val);
-final_reduce(Reduce, {[], []}) ->
-    Reduce(reduce, []);
-final_reduce(_Bt, {[], [Red]}) ->
-    Red;
-final_reduce(Reduce, {[], Reductions}) ->
-    Reduce(rereduce, Reductions);
-final_reduce(Reduce, {KVs, Reductions}) ->
-    Red = Reduce(reduce, KVs),
-    final_reduce(Reduce, {[], [Red | Reductions]}).
-
-fold_reduce(#btree{root=Root}=Bt, Fun, Acc, Options) ->
-    Dir = couch_util:get_value(dir, Options, fwd),
-    StartKey = couch_util:get_value(start_key, Options),
-    InEndRangeFun = make_key_in_end_range_function(Bt, Dir, Options),
-    KeyGroupFun = couch_util:get_value(key_group_fun, Options, fun(_,_) -> true end),
-    try
-        {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, Root, StartKey, InEndRangeFun, undefined, [], [],
-            KeyGroupFun, Fun, Acc),
-        if GroupedKey2 == undefined ->
-            {ok, Acc2};
-        true ->
-            case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of
-            {ok, Acc3} -> {ok, Acc3};
-            {stop, Acc3} -> {ok, Acc3}
-            end
-        end
-    catch
-        throw:{stop, AccDone} -> {ok, AccDone}
-    end.
-
-full_reduce(#btree{root=nil,reduce=Reduce}) ->
-    {ok, Reduce(reduce, [])};
-full_reduce(#btree{root=Root}) ->
-    {ok, element(2, Root)}.
-
-size(#btree{root = nil}) ->
-    0;
-size(#btree{root = {_P, _Red}}) ->
-    % pre 1.2 format
-    nil;
-size(#btree{root = {_P, _Red, Size}}) ->
-    Size.
-
-% wraps a 2 arity function with the proper 3 arity function
-convert_fun_arity(Fun) when is_function(Fun, 2) ->
-    fun
-        (visit, KV, _Reds, AccIn) -> Fun(KV, AccIn);
-        (traverse, _K, _Red, AccIn) -> {ok, AccIn}
-    end;
-convert_fun_arity(Fun) when is_function(Fun, 3) ->
-    fun
-        (visit, KV, Reds, AccIn) -> Fun(KV, Reds, AccIn);
-        (traverse, _K, _Red, AccIn) -> {ok, AccIn}
-    end;
-convert_fun_arity(Fun) when is_function(Fun, 4) ->
-    Fun.    % Already arity 4
-
-make_key_in_end_range_function(#btree{less=Less}, fwd, Options) ->
-    case couch_util:get_value(end_key_gt, Options) of
-    undefined ->
-        case couch_util:get_value(end_key, Options) of
-        undefined ->
-            fun(_Key) -> true end;
-        LastKey ->
-            fun(Key) -> not Less(LastKey, Key) end
-        end;
-    EndKey ->
-        fun(Key) -> Less(Key, EndKey) end
-    end;
-make_key_in_end_range_function(#btree{less=Less}, rev, Options) ->
-    case couch_util:get_value(end_key_gt, Options) of
-    undefined ->
-        case couch_util:get_value(end_key, Options) of
-        undefined ->
-            fun(_Key) -> true end;
-        LastKey ->
-            fun(Key) -> not Less(Key, LastKey) end
-        end;
-    EndKey ->
-        fun(Key) -> Less(EndKey, Key) end
-    end.
-
-
-foldl(Bt, Fun, Acc) ->
-    fold(Bt, Fun, Acc, []).
-
-foldl(Bt, Fun, Acc, Options) ->
-    fold(Bt, Fun, Acc, Options).
-
-
-fold(#btree{root=nil}, _Fun, Acc, _Options) ->
-    {ok, {[], []}, Acc};
-fold(#btree{root=Root}=Bt, Fun, Acc, Options) ->
-    Dir = couch_util:get_value(dir, Options, fwd),
-    InRange = make_key_in_end_range_function(Bt, Dir, Options),
-    Result =
-    case couch_util:get_value(start_key, Options) of
-    undefined ->
-        stream_node(Bt, [], Bt#btree.root, InRange, Dir,
-                convert_fun_arity(Fun), Acc);
-    StartKey ->
-        stream_node(Bt, [], Bt#btree.root, StartKey, InRange, Dir,
-                convert_fun_arity(Fun), Acc)
-    end,
-    case Result of
-    {ok, Acc2}->
-        FullReduction = element(2, Root),
-        {ok, {[], [FullReduction]}, Acc2};
-    {stop, LastReduction, Acc2} ->
-        {ok, LastReduction, Acc2}
-    end.
-
-add(Bt, InsertKeyValues) ->
-    add_remove(Bt, InsertKeyValues, []).
-
-add_remove(Bt, InsertKeyValues, RemoveKeys) ->
-    {ok, [], Bt2} = query_modify(Bt, [], InsertKeyValues, RemoveKeys),
-    {ok, Bt2}.
-
-query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) ->
-    #btree{root=Root} = Bt,
-    InsertActions = lists:map(
-        fun(KeyValue) ->
-            {Key, Value} = extract(Bt, KeyValue),
-            {insert, Key, Value}
-        end, InsertValues),
-    RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys],
-    FetchActions = [{fetch, Key, nil} || Key <- LookupKeys],
-    SortFun =
-        fun({OpA, A, _}, {OpB, B, _}) ->
-            case A == B of
-            % A and B are equal, sort by op.
-            true -> op_order(OpA) < op_order(OpB);
-            false ->
-                less(Bt, A, B)
-            end
-        end,
-    Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])),
-    {ok, KeyPointers, QueryResults} = modify_node(Bt, Root, Actions, []),
-    {ok, NewRoot} = complete_root(Bt, KeyPointers),
-    {ok, QueryResults, Bt#btree{root=NewRoot}}.
-
-% for ordering different operations with the same key.
-% fetch < remove < insert
-op_order(fetch) -> 1;
-op_order(remove) -> 2;
-op_order(insert) -> 3.
-
-lookup(#btree{root=Root, less=Less}=Bt, Keys) ->
-    SortedKeys = lists:sort(Less, Keys),
-    {ok, SortedResults} = lookup(Bt, Root, SortedKeys),
-    % We want to return the results in the same order as the keys were input
-    % but we may have changed the order when we sorted. So we need to put the
-    % order back into the results.
-    couch_util:reorder_results(Keys, SortedResults).
-
-lookup(_Bt, nil, Keys) ->
-    {ok, [{Key, not_found} || Key <- Keys]};
-lookup(Bt, Node, Keys) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []);
-    kv_node ->
-        lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, [])
-    end.
-
-lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
-    {ok, lists:reverse(Output)};
-lookup_kpnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
-    {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
-lookup_kpnode(Bt, NodeTuple, LowerBound, [FirstLookupKey | _] = LookupKeys, Output) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), FirstLookupKey),
-    {Key, PointerInfo} = element(N, NodeTuple),
-    SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end,
-    case lists:splitwith(SplitFun, LookupKeys) of
-    {[], GreaterQueries} ->
-        lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output);
-    {LessEqQueries, GreaterQueries} ->
-        {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries),
-        lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output))
-    end.
-
-
-lookup_kvnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
-    {ok, lists:reverse(Output)};
-lookup_kvnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
-    % keys not found
-    {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
-lookup_kvnode(Bt, NodeTuple, LowerBound, [LookupKey | RestLookupKeys], Output) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), LookupKey),
-    {Key, Value} = element(N, NodeTuple),
-    case less(Bt, LookupKey, Key) of
-    true ->
-        % LookupKey is less than Key
-        lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]);
-    false ->
-        case less(Bt, Key, LookupKey) of
-        true ->
-            % LookupKey is greater than Key
-            lookup_kvnode(Bt, NodeTuple, N+1, RestLookupKeys, [{LookupKey, not_found} | Output]);
-        false ->
-            % LookupKey is equal to Key
-            lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output])
-        end
-    end.
-
-
-complete_root(_Bt, []) ->
-    {ok, nil};
-complete_root(_Bt, [{_Key, PointerInfo}])->
-    {ok, PointerInfo};
-complete_root(Bt, KPs) ->
-    {ok, ResultKeyPointers} = write_node(Bt, kp_node, KPs),
-    complete_root(Bt, ResultKeyPointers).
-
-%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%%
-% It is inaccurate as it does not account for compression when blocks are
-% written. Plus with the "case byte_size(term_to_binary(InList)) of" code
-% it's probably really inefficient.
-
-chunkify(InList) ->
-    case ?term_size(InList) of
-    Size when Size > ?CHUNK_THRESHOLD ->
-        NumberOfChunksLikely = ((Size div ?CHUNK_THRESHOLD) + 1),
-        ChunkThreshold = Size div NumberOfChunksLikely,
-        chunkify(InList, ChunkThreshold, [], 0, []);
-    _Else ->
-        [InList]
-    end.
-
-chunkify([], _ChunkThreshold, [], 0, OutputChunks) ->
-    lists:reverse(OutputChunks);
-chunkify([], _ChunkThreshold, OutList, _OutListSize, OutputChunks) ->
-    lists:reverse([lists:reverse(OutList) | OutputChunks]);
-chunkify([InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) ->
-    case ?term_size(InElement) of
-    Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] ->
-        chunkify(RestInList, ChunkThreshold, [], 0, [lists:reverse([InElement | OutList]) | OutputChunks]);
-    Size ->
-        chunkify(RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks)
-    end.
-
-modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
-    case RootPointerInfo of
-    nil ->
-        NodeType = kv_node,
-        NodeList = [];
-    _Tuple ->
-        Pointer = element(1, RootPointerInfo),
-        {NodeType, NodeList} = get_node(Bt, Pointer)
-    end,
-    NodeTuple = list_to_tuple(NodeList),
-
-    {ok, NewNodeList, QueryOutput2} =
-    case NodeType of
-    kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput);
-    kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput)
-    end,
-    case NewNodeList of
-    [] ->  % no nodes remain
-        {ok, [], QueryOutput2};
-    NodeList ->  % nothing changed
-        {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
-        {ok, [{LastKey, RootPointerInfo}], QueryOutput2};
-    _Else2 ->
-        {ok, ResultList} = write_node(Bt, NodeType, NewNodeList),
-        {ok, ResultList, QueryOutput2}
-    end.
-
-reduce_node(#btree{reduce=nil}, _NodeType, _NodeList) ->
-    [];
-reduce_node(#btree{reduce=R}, kp_node, NodeList) ->
-    R(rereduce, [element(2, Node) || {_K, Node} <- NodeList]);
-reduce_node(#btree{reduce=R}=Bt, kv_node, NodeList) ->
-    R(reduce, [assemble(Bt, K, V) || {K, V} <- NodeList]).
-
-reduce_tree_size(kv_node, NodeSize, _KvList) ->
-    NodeSize;
-reduce_tree_size(kp_node, NodeSize, []) ->
-    NodeSize;
-reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red}} | _]) ->
-    % pre 1.2 format
-    nil;
-reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red, nil}} | _]) ->
-    nil;
-reduce_tree_size(kp_node, NodeSize, [{_K, {_P, _Red, Sz}} | NodeList]) ->
-    reduce_tree_size(kp_node, NodeSize + Sz, NodeList).
-
-get_node(#btree{fd = Fd}, NodePos) ->
-    {ok, {NodeType, NodeList}} = couch_file:pread_term(Fd, NodePos),
-    {NodeType, NodeList}.
-
-write_node(#btree{fd = Fd, compression = Comp} = Bt, NodeType, NodeList) ->
-    % split up nodes into smaller sizes
-    NodeListList = chunkify(NodeList),
-    % now write out each chunk and return the KeyPointer pairs for those nodes
-    ResultList = [
-        begin
-            {ok, Pointer, Size} = couch_file:append_term(
-                Fd, {NodeType, ANodeList}, [{compression, Comp}]),
-            {LastKey, _} = lists:last(ANodeList),
-            SubTreeSize = reduce_tree_size(NodeType, Size, ANodeList),
-            {LastKey, {Pointer, reduce_node(Bt, NodeType, ANodeList), SubTreeSize}}
-        end
-    ||
-        ANodeList <- NodeListList
-    ],
-    {ok, ResultList}.
-
-modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) ->
-    modify_node(Bt, nil, Actions, QueryOutput);
-modify_kpnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
-    {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
-            tuple_size(NodeTuple), [])), QueryOutput};
-modify_kpnode(Bt, NodeTuple, LowerBound,
-        [{_, FirstActionKey, _}|_]=Actions, ResultNode, QueryOutput) ->
-    Sz = tuple_size(NodeTuple),
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, Sz, FirstActionKey),
-    case N =:= Sz of
-    true  ->
-        % perform remaining actions on last node
-        {_, PointerInfo} = element(Sz, NodeTuple),
-        {ok, ChildKPs, QueryOutput2} =
-            modify_node(Bt, PointerInfo, Actions, QueryOutput),
-        NodeList = lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
-            Sz - 1, ChildKPs)),
-        {ok, NodeList, QueryOutput2};
-    false ->
-        {NodeKey, PointerInfo} = element(N, NodeTuple),
-        SplitFun = fun({_ActionType, ActionKey, _ActionValue}) ->
-                not less(Bt, NodeKey, ActionKey)
-            end,
-        {LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions),
-        {ok, ChildKPs, QueryOutput2} =
-                modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput),
-        ResultNode2 = lists:reverse(ChildKPs, bounded_tuple_to_revlist(NodeTuple,
-                LowerBound, N - 1, ResultNode)),
-        modify_kpnode(Bt, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2)
-    end.
-
-bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End ->
-    Tail;
-bounded_tuple_to_revlist(Tuple, Start, End, Tail) ->
-    bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]).
-
-bounded_tuple_to_list(Tuple, Start, End, Tail) ->
-    bounded_tuple_to_list2(Tuple, Start, End, [], Tail).
-
-bounded_tuple_to_list2(_Tuple, Start, End, Acc, Tail) when Start > End ->
-    lists:reverse(Acc, Tail);
-bounded_tuple_to_list2(Tuple, Start, End, Acc, Tail) ->
-    bounded_tuple_to_list2(Tuple, Start + 1, End, [element(Start, Tuple) | Acc], Tail).
-
-find_first_gteq(_Bt, _Tuple, Start, End, _Key) when Start == End ->
-    End;
-find_first_gteq(Bt, Tuple, Start, End, Key) ->
-    Mid = Start + ((End - Start) div 2),
-    {TupleKey, _} = element(Mid, Tuple),
-    case less(Bt, TupleKey, Key) of
-    true ->
-        find_first_gteq(Bt, Tuple, Mid+1, End, Key);
-    false ->
-        find_first_gteq(Bt, Tuple, Start, Mid, Key)
-    end.
-
-modify_kvnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
-    {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), [])), QueryOutput};
-modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) when LowerBound > tuple_size(NodeTuple) ->
-    case ActionType of
-    insert ->
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-    remove ->
-        % just drop the action
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput);
-    fetch ->
-        % the key/value must not exist in the tree
-        modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
-    end;
-modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], AccNode, QueryOutput) ->
-    N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), ActionKey),
-    {Key, Value} = element(N, NodeTuple),
-    ResultNode =  bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode),
-    case less(Bt, ActionKey, Key) of
-    true ->
-        case ActionType of
-        insert ->
-            % ActionKey is less than the Key, so insert
-            modify_kvnode(Bt, NodeTuple, N, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-        remove ->
-            % ActionKey is less than the Key, just drop the action
-            modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput);
-        fetch ->
-            % ActionKey is less than the Key, the key/value must not exist in the tree
-            modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
-        end;
-    false ->
-        % ActionKey and Key are maybe equal.
-        case less(Bt, Key, ActionKey) of
-        false ->
-            case ActionType of
-            insert ->
-                modify_kvnode(Bt, NodeTuple, N+1, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
-            remove ->
-                modify_kvnode(Bt, NodeTuple, N+1, RestActions, ResultNode, QueryOutput);
-            fetch ->
-                % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node
-                % since an identical action key can follow it.
-                modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{ok, assemble(Bt, Key, Value)} | QueryOutput])
-            end;
-        true ->
-            modify_kvnode(Bt, NodeTuple, N + 1, [{ActionType, ActionKey, ActionValue} | RestActions], [{Key, Value} | ResultNode], QueryOutput)
-        end
-    end.
-
-
-reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _InEndRangeFun, GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) ->
-    {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_node(Bt, Dir, Node, KeyStart, InEndRangeFun, GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    P = element(1, Node),
-    case get_node(Bt, P) of
-    {kp_node, NodeList} ->
-        NodeList2 = adjust_dir(Dir, NodeList),
-        reduce_stream_kp_node(Bt, Dir, NodeList2, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc);
-    {kv_node, KVs} ->
-        KVs2 = adjust_dir(Dir, KVs),
-        reduce_stream_kv_node(Bt, Dir, KVs2, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc)
-    end.
-
-reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, InEndRangeFun,
-                        GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc) ->
-
-    GTEKeyStartKVs =
-    case KeyStart of
-    undefined ->
-        KVs;
-    _ ->
-        DropFun = case Dir of
-        fwd ->
-            fun({Key, _}) -> less(Bt, Key, KeyStart) end;
-        rev ->
-            fun({Key, _}) -> less(Bt, KeyStart, Key) end
-        end,
-        lists:dropwhile(DropFun, KVs)
-    end,
-    KVs2 = lists:takewhile(
-        fun({Key, _}) -> InEndRangeFun(Key) end, GTEKeyStartKVs),
-    reduce_stream_kv_node2(Bt, KVs2, GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc).
-
-
-reduce_stream_kv_node2(_Bt, [], GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-        _KeyGroupFun, _Fun, Acc) ->
-    {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_kv_node2(Bt, [{Key, Value}| RestKVs], GroupedKey, GroupedKVsAcc,
-        GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    case GroupedKey of
-    undefined ->
-        reduce_stream_kv_node2(Bt, RestKVs, Key,
-                [assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc);
-    _ ->
-
-        case KeyGroupFun(GroupedKey, Key) of
-        true ->
-            reduce_stream_kv_node2(Bt, RestKVs, GroupedKey,
-                [assemble(Bt,Key,Value)|GroupedKVsAcc], GroupedRedsAcc, KeyGroupFun,
-                Fun, Acc);
-        false ->
-            case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of
-            {ok, Acc2} ->
-                reduce_stream_kv_node2(Bt, RestKVs, Key, [assemble(Bt,Key,Value)],
-                    [], KeyGroupFun, Fun, Acc2);
-            {stop, Acc2} ->
-                throw({stop, Acc2})
-            end
-        end
-    end.
-
-reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
-                        GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
-                        KeyGroupFun, Fun, Acc) ->
-    Nodes =
-    case KeyStart of
-    undefined ->
-        NodeList;
-    _ ->
-        case Dir of
-        fwd ->
-            lists:dropwhile(fun({Key, _}) -> less(Bt, Key, KeyStart) end, NodeList);
-        rev ->
-            RevKPs = lists:reverse(NodeList),
-            case lists:splitwith(fun({Key, _}) -> less(Bt, Key, KeyStart) end, RevKPs) of
-            {_Before, []} ->
-                NodeList;
-            {Before, [FirstAfter | _]} ->
-                [FirstAfter | lists:reverse(Before)]
-            end
-        end
-    end,
-    {InRange, MaybeInRange} = lists:splitwith(
-        fun({Key, _}) -> InEndRangeFun(Key) end, Nodes),
-    NodesInRange = case MaybeInRange of
-    [FirstMaybeInRange | _] when Dir =:= fwd ->
-        InRange ++ [FirstMaybeInRange];
-    _ ->
-        InRange
-    end,
-    reduce_stream_kp_node2(Bt, Dir, NodesInRange, KeyStart, InEndRangeFun,
-        GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc).
-
-
-reduce_stream_kp_node2(Bt, Dir, [{_Key, NodeInfo} | RestNodeList], KeyStart, InEndRangeFun,
-                        undefined, [], [], KeyGroupFun, Fun, Acc) ->
-    {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, undefined,
-                [], [], KeyGroupFun, Fun, Acc),
-    reduce_stream_kp_node2(Bt, Dir, RestNodeList, KeyStart, InEndRangeFun, GroupedKey2,
-            GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
-reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
-        GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
-    {Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) ->
-        KeyGroupFun(GroupedKey, Key) end, NodeList),
-    {GroupedNodes, UngroupedNodes} =
-    case Grouped0 of
-    [] ->
-        {Grouped0, Ungrouped0};
-    _ ->
-        [FirstGrouped | RestGrouped] = lists:reverse(Grouped0),
-        {RestGrouped, [FirstGrouped | Ungrouped0]}
-    end,
-    GroupedReds = [element(2, Node) || {_, Node} <- GroupedNodes],
-    case UngroupedNodes of
-    [{_Key, NodeInfo}|RestNodes] ->
-        {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
-            reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, GroupedKey,
-                GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc),
-        reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, InEndRangeFun, GroupedKey2,
-                GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
-    [] ->
-        {ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey}
-    end.
-
-adjust_dir(fwd, List) ->
-    List;
-adjust_dir(rev, List) ->
-    lists:reverse(List).
-
-stream_node(Bt, Reds, Node, StartKey, InRange, Dir, Fun, Acc) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc);
-    kv_node ->
-        stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc)
-    end.
-
-stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc) ->
-    Pointer = element(1, Node),
-    {NodeType, NodeList} = get_node(Bt, Pointer),
-    case NodeType of
-    kp_node ->
-        stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc);
-    kv_node ->
-        stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc)
-    end.
-
-stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) ->
-    {ok, Acc};
-stream_kp_node(Bt, Reds, [{Key, Node} | Rest], InRange, Dir, Fun, Acc) ->
-    Red = element(2, Node),
-    case Fun(traverse, Key, Red, Acc) of
-    {ok, Acc2} ->
-        case stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc2) of
-        {ok, Acc3} ->
-            stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc3);
-        {stop, LastReds, Acc3} ->
-            {stop, LastReds, Acc3}
-        end;
-    {skip, Acc2} ->
-        stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2)
-    end.
-
-drop_nodes(_Bt, Reds, _StartKey, []) ->
-    {Reds, []};
-drop_nodes(Bt, Reds, StartKey, [{NodeKey, Node} | RestKPs]) ->
-    case less(Bt, NodeKey, StartKey) of
-    true ->
-        drop_nodes(Bt, [element(2, Node) | Reds], StartKey, RestKPs);
-    false ->
-        {Reds, [{NodeKey, Node} | RestKPs]}
-    end.
-
-stream_kp_node(Bt, Reds, KPs, StartKey, InRange, Dir, Fun, Acc) ->
-    {NewReds, NodesToStream} =
-    case Dir of
-    fwd ->
-        % drop all nodes sorting before the key
-        drop_nodes(Bt, Reds, StartKey, KPs);
-    rev ->
-        % keep all nodes sorting before the key, AND the first node to sort after
-        RevKPs = lists:reverse(KPs),
-         case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of
-        {_RevsBefore, []} ->
-            % everything sorts before it
-            {Reds, KPs};
-        {RevBefore, [FirstAfter | Drop]} ->
-            {[element(2, Node) || {_K, Node} <- Drop] ++ Reds,
-                 [FirstAfter | lists:reverse(RevBefore)]}
-        end
-    end,
-    case NodesToStream of
-    [] ->
-        {ok, Acc};
-    [{_Key, Node} | Rest] ->
-        case stream_node(Bt, NewReds, Node, StartKey, InRange, Dir, Fun, Acc) of
-        {ok, Acc2} ->
-            Red = element(2, Node),
-            stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2);
-        {stop, LastReds, Acc2} ->
-            {stop, LastReds, Acc2}
-        end
-    end.
-
-stream_kv_node(Bt, Reds, KVs, StartKey, InRange, Dir, Fun, Acc) ->
-    DropFun =
-    case Dir of
-    fwd ->
-        fun({Key, _}) -> less(Bt, Key, StartKey) end;
-    rev ->
-        fun({Key, _}) -> less(Bt, StartKey, Key) end
-    end,
-    {LTKVs, GTEKVs} = lists:splitwith(DropFun, KVs),
-    AssembleLTKVs = [assemble(Bt,K,V) || {K,V} <- LTKVs],
-    stream_kv_node2(Bt, Reds, AssembleLTKVs, GTEKVs, InRange, Dir, Fun, Acc).
-
-stream_kv_node2(_Bt, _Reds, _PrevKVs, [], _InRange, _Dir, _Fun, Acc) ->
-    {ok, Acc};
-stream_kv_node2(Bt, Reds, PrevKVs, [{K,V} | RestKVs], InRange, Dir, Fun, Acc) ->
-    case InRange(K) of
-    false ->
-        {stop, {PrevKVs, Reds}, Acc};
-    true ->
-        AssembledKV = assemble(Bt, K, V),
-        case Fun(visit, AssembledKV, {PrevKVs, Reds}, Acc) of
-        {ok, Acc2} ->
-            stream_kv_node2(Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2);
-        {stop, Acc2} ->
-            {stop, {PrevKVs, Reds}, Acc2}
-        end
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/75f30dbe/couch_changes.erl
----------------------------------------------------------------------
diff --git a/couch_changes.erl b/couch_changes.erl
deleted file mode 100644
index 6edde32..0000000
--- a/couch_changes.erl
+++ /dev/null
@@ -1,577 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_changes).
--include("couch_db.hrl").
-
--export([handle_changes/3]).
-
-% For the builtin filter _docs_ids, this is the maximum number
-% of documents for which we trigger the optimized code path.
--define(MAX_DOC_IDS, 100).
-
--record(changes_acc, {
-    db,
-    seq,
-    prepend,
-    filter,
-    callback,
-    user_acc,
-    resp_type,
-    limit,
-    include_docs,
-    doc_options,
-    conflicts,
-    timeout,
-    timeout_fun
-}).
-
-%% @type Req -> #httpd{} | {json_req, JsonObj()}
-handle_changes(Args1, Req, Db0) ->
-    #changes_args{
-        style = Style,
-        filter = FilterName,
-        feed = Feed,
-        dir = Dir,
-        since = Since
-    } = Args1,
-    {FilterFun, FilterArgs} = make_filter_fun(FilterName, Style, Req, Db0),
-    Args = Args1#changes_args{filter_fun = FilterFun, filter_args = FilterArgs},
-    Start = fun() ->
-        {ok, Db} = couch_db:reopen(Db0),
-        StartSeq = case Dir of
-        rev ->
-            couch_db:get_update_seq(Db);
-        fwd ->
-            Since
-        end,
-        {Db, StartSeq}
-    end,
-    % begin timer to deal with heartbeat when filter function fails
-    case Args#changes_args.heartbeat of
-    undefined ->
-        erlang:erase(last_changes_heartbeat);
-    Val when is_integer(Val); Val =:= true ->
-        put(last_changes_heartbeat, now())
-    end,
-
-    case lists:member(Feed, ["continuous", "longpoll", "eventsource"]) of
-    true ->
-        fun(CallbackAcc) ->
-            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
-            Self = self(),
-            {ok, Notify} = couch_db_update_notifier:start_link(
-                fun({_, DbName}) when  Db0#db.name == DbName ->
-                    Self ! db_updated;
-                (_) ->
-                    ok
-                end
-            ),
-            {Db, StartSeq} = Start(),
-            UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
-            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
-            Acc0 = build_acc(Args, Callback, UserAcc2, Db, StartSeq,
-                             <<"">>, Timeout, TimeoutFun),
-            try
-                keep_sending_changes(
-                    Args#changes_args{dir=fwd},
-                    Acc0,
-                    true)
-            after
-                couch_db_update_notifier:stop(Notify),
-                get_rest_db_updated(ok) % clean out any remaining update messages
-            end
-        end;
-    false ->
-        fun(CallbackAcc) ->
-            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
-            UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
-            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
-            {Db, StartSeq} = Start(),
-            Acc0 = build_acc(Args#changes_args{feed="normal"}, Callback,
-                             UserAcc2, Db, StartSeq, <<>>, Timeout, TimeoutFun),
-            {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
-                send_changes(
-                    Args#changes_args{feed="normal"},
-                    Acc0,
-                    true),
-            end_sending_changes(Callback, UserAcc3, LastSeq, Feed)
-        end
-    end.
-
-get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 3) ->
-    Pair;
-get_callback_acc(Callback) when is_function(Callback, 2) ->
-    {fun(Ev, Data, _) -> Callback(Ev, Data) end, ok}.
-
-%% @type Req -> #httpd{} | {json_req, JsonObj()}
-make_filter_fun([$_ | _] = FilterName, Style, Req, Db) ->
-    builtin_filter_fun(FilterName, Style, Req, Db);
-make_filter_fun(FilterName, Style, Req, Db) ->
-    {os_filter_fun(FilterName, Style, Req, Db), []}.
-
-os_filter_fun(FilterName, Style, Req, Db) ->
-    case [list_to_binary(couch_httpd:unquote(Part))
-            || Part <- string:tokens(FilterName, "/")] of
-    [] ->
-        fun(_Db2, #doc_info{revs=Revs}) ->
-                builtin_results(Style, Revs)
-        end;
-    [DName, FName] ->
-        DesignId = <<"_design/", DName/binary>>,
-        DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
-        % validate that the ddoc has the filter fun
-        #doc{body={Props}} = DDoc,
-        couch_util:get_nested_json_value({Props}, [<<"filters">>, FName]),
-        fun(Db2, DocInfo) ->
-            DocInfos =
-            case Style of
-            main_only ->
-                [DocInfo];
-            all_docs ->
-                [DocInfo#doc_info{revs=[Rev]}|| Rev <- DocInfo#doc_info.revs]
-            end,
-            Docs = [Doc || {ok, Doc} <- [
-                    couch_db:open_doc(Db2, DocInfo2, [deleted, conflicts])
-                        || DocInfo2 <- DocInfos]],
-            {ok, Passes} = couch_query_servers:filter_docs(
-                Req, Db2, DDoc, FName, Docs
-            ),
-            [{[{<<"rev">>, couch_doc:rev_to_str({RevPos,RevId})}]}
-                || {Pass, #doc{revs={RevPos,[RevId|_]}}}
-                <- lists:zip(Passes, Docs), Pass == true]
-        end;
-    _Else ->
-        throw({bad_request,
-            "filter parameter must be of the form `designname/filtername`"})
-    end.
-
-builtin_filter_fun("_doc_ids", Style, {json_req, {Props}}, _Db) ->
-    DocIds = couch_util:get_value(<<"doc_ids">>, Props),
-    {filter_docids(DocIds, Style), DocIds};
-builtin_filter_fun("_doc_ids", Style, #httpd{method='POST'}=Req, _Db) ->
-    {Props} = couch_httpd:json_body_obj(Req),
-    DocIds =  couch_util:get_value(<<"doc_ids">>, Props, nil),
-    {filter_docids(DocIds, Style), DocIds};
-builtin_filter_fun("_doc_ids", Style, #httpd{method='GET'}=Req, _Db) ->
-    DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")),
-    {filter_docids(DocIds, Style), DocIds};
-builtin_filter_fun("_design", Style, _Req, _Db) ->
-    {filter_designdoc(Style), []};
-builtin_filter_fun("_view", Style, Req, Db) ->
-    ViewName = couch_httpd:qs_value(Req, "view", ""),
-    {filter_view(ViewName, Style, Db), []};
-builtin_filter_fun(_FilterName, _Style, _Req, _Db) ->
-    throw({bad_request, "unknown builtin filter name"}).
-
-filter_docids(DocIds, Style) when is_list(DocIds)->
-    fun(_Db, #doc_info{id=DocId, revs=Revs}) ->
-            case lists:member(DocId, DocIds) of
-                true ->
-                    builtin_results(Style, Revs);
-                _ -> []
-            end
-    end;
-filter_docids(_, _) ->
-    throw({bad_request, "`doc_ids` filter parameter is not a list."}).
-
-filter_designdoc(Style) ->
-    fun(_Db, #doc_info{id=DocId, revs=Revs}) ->
-            case DocId of
-            <<"_design", _/binary>> ->
-                    builtin_results(Style, Revs);
-                _ -> []
-            end
-    end.
-
-filter_view("", _Style, _Db) ->
-    throw({bad_request, "`view` filter parameter is not provided."});
-filter_view(ViewName, Style, Db) ->
-    case [list_to_binary(couch_httpd:unquote(Part))
-            || Part <- string:tokens(ViewName, "/")] of
-        [] ->
-            throw({bad_request, "Invalid `view` parameter."});
-        [DName, VName] ->
-            DesignId = <<"_design/", DName/binary>>,
-            DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
-            % validate that the ddoc has the filter fun
-            #doc{body={Props}} = DDoc,
-            couch_util:get_nested_json_value({Props}, [<<"views">>, VName]),
-            fun(Db2, DocInfo) ->
-                DocInfos =
-                case Style of
-                main_only ->
-                    [DocInfo];
-                all_docs ->
-                    [DocInfo#doc_info{revs=[Rev]}|| Rev <- DocInfo#doc_info.revs]
-                end,
-                Docs = [Doc || {ok, Doc} <- [
-                        couch_db:open_doc(Db2, DocInfo2, [deleted, conflicts])
-                            || DocInfo2 <- DocInfos]],
-                {ok, Passes} = couch_query_servers:filter_view(
-                    DDoc, VName, Docs
-                ),
-                [{[{<<"rev">>, couch_doc:rev_to_str({RevPos,RevId})}]}
-                    || {Pass, #doc{revs={RevPos,[RevId|_]}}}
-                    <- lists:zip(Passes, Docs), Pass == true]
-            end
-        end.
-
-builtin_results(Style, [#rev_info{rev=Rev}|_]=Revs) ->
-    case Style of
-        main_only ->
-            [{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}];
-        all_docs ->
-            [{[{<<"rev">>, couch_doc:rev_to_str(R)}]}
-                || #rev_info{rev=R} <- Revs]
-    end.
-
-get_changes_timeout(Args, Callback) ->
-    #changes_args{
-        heartbeat = Heartbeat,
-        timeout = Timeout,
-        feed = ResponseType
-    } = Args,
-    DefaultTimeout = list_to_integer(
-        couch_config:get("httpd", "changes_timeout", "60000")
-    ),
-    case Heartbeat of
-    undefined ->
-        case Timeout of
-        undefined ->
-            {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
-        infinity ->
-            {infinity, fun(UserAcc) -> {stop, UserAcc} end};
-        _ ->
-            {lists:min([DefaultTimeout, Timeout]),
-                fun(UserAcc) -> {stop, UserAcc} end}
-        end;
-    true ->
-        {DefaultTimeout,
-            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end};
-    _ ->
-        {lists:min([DefaultTimeout, Heartbeat]),
-            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end}
-    end.
-
-start_sending_changes(_Callback, UserAcc, ResponseType)
-        when ResponseType =:= "continuous"
-        orelse ResponseType =:= "eventsource" ->
-    UserAcc;
-start_sending_changes(Callback, UserAcc, ResponseType) ->
-    Callback(start, ResponseType, UserAcc).
-
-build_acc(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout, TimeoutFun) ->
-    #changes_args{
-        include_docs = IncludeDocs,
-        doc_options = DocOpts,
-        conflicts = Conflicts,
-        limit = Limit,
-        feed = ResponseType,
-        filter_fun = FilterFun
-    } = Args,
-    #changes_acc{
-        db = Db,
-        seq = StartSeq,
-        prepend = Prepend,
-        filter = FilterFun,
-        callback = Callback,
-        user_acc = UserAcc,
-        resp_type = ResponseType,
-        limit = Limit,
-        include_docs = IncludeDocs,
-        doc_options = DocOpts,
-        conflicts = Conflicts,
-        timeout = Timeout,
-        timeout_fun = TimeoutFun
-    }.
-
-send_changes(Args, Acc0, FirstRound) ->
-    #changes_args{
-        dir = Dir,
-        filter = FilterName,
-        filter_args = FilterArgs
-    } = Args,
-    #changes_acc{
-        db = Db,
-        seq = StartSeq
-    } = Acc0,
-    case FirstRound of
-    true ->
-        case FilterName of
-        "_doc_ids" when length(FilterArgs) =< ?MAX_DOC_IDS ->
-            send_changes_doc_ids(
-                FilterArgs, Db, StartSeq, Dir, fun changes_enumerator/2, Acc0);
-        "_design" ->
-            send_changes_design_docs(
-                Db, StartSeq, Dir, fun changes_enumerator/2, Acc0);
-        _ ->
-            couch_db:changes_since(
-                Db, StartSeq, fun changes_enumerator/2, [{dir, Dir}], Acc0)
-        end;
-    false ->
-        couch_db:changes_since(
-            Db, StartSeq, fun changes_enumerator/2, [{dir, Dir}], Acc0)
-    end.
-
-
-send_changes_doc_ids(DocIds, Db, StartSeq, Dir, Fun, Acc0) ->
-    Lookups = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, DocIds),
-    FullDocInfos = lists:foldl(
-        fun({ok, FDI}, Acc) ->
-            [FDI | Acc];
-        (not_found, Acc) ->
-            Acc
-        end,
-        [], Lookups),
-    send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0).
-
-
-send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0) ->
-    FoldFun = fun(FullDocInfo, _, Acc) ->
-        {ok, [FullDocInfo | Acc]}
-    end,
-    KeyOpts = [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}],
-    {ok, _, FullDocInfos} = couch_btree:fold(
-        Db#db.fulldocinfo_by_id_btree, FoldFun, [], KeyOpts),
-    send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0).
-
-
-send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
-    FoldFun = case Dir of
-    fwd ->
-        fun lists:foldl/3;
-    rev ->
-        fun lists:foldr/3
-    end,
-    GreaterFun = case Dir of
-    fwd ->
-        fun(A, B) -> A > B end;
-    rev ->
-        fun(A, B) -> A =< B end
-    end,
-    DocInfos = lists:foldl(
-        fun(FDI, Acc) ->
-            DI = couch_doc:to_doc_info(FDI),
-            case GreaterFun(DI#doc_info.high_seq, StartSeq) of
-            true ->
-                [DI | Acc];
-            false ->
-                Acc
-            end
-        end,
-        [], FullDocInfos),
-    SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
-    FinalAcc = try
-        FoldFun(
-            fun(DocInfo, Acc) ->
-                case Fun(DocInfo, Acc) of
-                {ok, NewAcc} ->
-                    NewAcc;
-                {stop, NewAcc} ->
-                    throw({stop, NewAcc})
-                end
-            end,
-            Acc0, SortedDocInfos)
-    catch
-    throw:{stop, Acc} ->
-        Acc
-    end,
-    case Dir of
-    fwd ->
-        {ok, FinalAcc#changes_acc{seq = couch_db:get_update_seq(Db)}};
-    rev ->
-        {ok, FinalAcc}
-    end.
-
-
-keep_sending_changes(Args, Acc0, FirstRound) ->
-    #changes_args{
-        feed = ResponseType,
-        limit = Limit,
-        db_open_options = DbOptions
-    } = Args,
-
-    {ok, ChangesAcc} = send_changes(
-        Args#changes_args{dir=fwd},
-        Acc0,
-        FirstRound),
-    #changes_acc{
-        db = Db, callback = Callback, timeout = Timeout, timeout_fun = TimeoutFun,
-        seq = EndSeq, prepend = Prepend2, user_acc = UserAcc2, limit = NewLimit
-    } = ChangesAcc,
-
-    couch_db:close(Db),
-    if Limit > NewLimit, ResponseType == "longpoll" ->
-        end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType);
-    true ->
-        case wait_db_updated(Timeout, TimeoutFun, UserAcc2) of
-        {updated, UserAcc4} ->
-            DbOptions1 = [{user_ctx, Db#db.user_ctx} | DbOptions],
-            case couch_db:open(Db#db.name, DbOptions1) of
-            {ok, Db2} ->
-                keep_sending_changes(
-                  Args#changes_args{limit=NewLimit},
-                  ChangesAcc#changes_acc{
-                    db = Db2,
-                    user_acc = UserAcc4,
-                    seq = EndSeq,
-                    prepend = Prepend2,
-                    timeout = Timeout,
-                    timeout_fun = TimeoutFun},
-                  false);
-            _Else ->
-                end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType)
-            end;
-        {stop, UserAcc4} ->
-            end_sending_changes(Callback, UserAcc4, EndSeq, ResponseType)
-        end
-    end.
-
-end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) ->
-    Callback({stop, EndSeq}, ResponseType, UserAcc).
-
-changes_enumerator(DocInfo, #changes_acc{resp_type = ResponseType} = Acc)
-        when ResponseType =:= "continuous"
-        orelse ResponseType =:= "eventsource" ->
-    #changes_acc{
-        filter = FilterFun, callback = Callback,
-        user_acc = UserAcc, limit = Limit, db = Db,
-        timeout = Timeout, timeout_fun = TimeoutFun
-    } = Acc,
-    #doc_info{high_seq = Seq} = DocInfo,
-    Results0 = FilterFun(Db, DocInfo),
-    Results = [Result || Result <- Results0, Result /= null],
-    %% TODO: I'm thinking this should be < 1 and not =< 1
-    Go = if Limit =< 1 -> stop; true -> ok end,
-    case Results of
-    [] ->
-        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
-        case Done of
-        stop ->
-            {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
-        ok ->
-            {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
-        end;
-    _ ->
-        ChangesRow = changes_row(Results, DocInfo, Acc),
-        UserAcc2 = Callback({change, ChangesRow, <<>>}, ResponseType, UserAcc),
-        reset_heartbeat(),
-        {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2, limit = Limit - 1}}
-    end;
-changes_enumerator(DocInfo, Acc) ->
-    #changes_acc{
-        filter = FilterFun, callback = Callback, prepend = Prepend,
-        user_acc = UserAcc, limit = Limit, resp_type = ResponseType, db = Db,
-        timeout = Timeout, timeout_fun = TimeoutFun
-    } = Acc,
-    #doc_info{high_seq = Seq} = DocInfo,
-    Results0 = FilterFun(Db, DocInfo),
-    Results = [Result || Result <- Results0, Result /= null],
-    Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
-    case Results of
-    [] ->
-        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
-        case Done of
-        stop ->
-            {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
-        ok ->
-            {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
-        end;
-    _ ->
-        ChangesRow = changes_row(Results, DocInfo, Acc),
-        UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
-        reset_heartbeat(),
-        {Go, Acc#changes_acc{
-            seq = Seq, prepend = <<",\n">>,
-            user_acc = UserAcc2, limit = Limit - 1}}
-    end.
-
-
-changes_row(Results, DocInfo, Acc) ->
-    #doc_info{
-        id = Id, high_seq = Seq, revs = [#rev_info{deleted = Del} | _]
-    } = DocInfo,
-    #changes_acc{
-        db = Db,
-        include_docs = IncDoc,
-        doc_options = DocOpts,
-        conflicts = Conflicts
-    } = Acc,
-    {[{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++
-        deleted_item(Del) ++ case IncDoc of
-            true ->
-                Opts = case Conflicts of
-                    true -> [deleted, conflicts];
-                    false -> [deleted]
-                end,
-                Doc = couch_index_util:load_doc(Db, DocInfo, Opts),
-                case Doc of
-                    null ->
-                        [{doc, null}];
-                    _ ->
-                        [{doc, couch_doc:to_json_obj(Doc, DocOpts)}]
-                end;
-            false ->
-                []
-        end}.
-
-deleted_item(true) -> [{<<"deleted">>, true}];
-deleted_item(_) -> [].
-
-% waits for a db_updated msg, if there are multiple msgs, collects them.
-wait_db_updated(Timeout, TimeoutFun, UserAcc) ->
-    receive
-    db_updated ->
-        get_rest_db_updated(UserAcc)
-    after Timeout ->
-        {Go, UserAcc2} = TimeoutFun(UserAcc),
-        case Go of
-        ok ->
-            wait_db_updated(Timeout, TimeoutFun, UserAcc2);
-        stop ->
-            {stop, UserAcc2}
-        end
-    end.
-
-get_rest_db_updated(UserAcc) ->
-    receive
-    db_updated ->
-        get_rest_db_updated(UserAcc)
-    after 0 ->
-        {updated, UserAcc}
-    end.
-
-reset_heartbeat() ->
-    case get(last_changes_heartbeat) of
-    undefined ->
-        ok;
-    _ ->
-        put(last_changes_heartbeat, now())
-    end.
-
-maybe_heartbeat(Timeout, TimeoutFun, Acc) ->
-    Before = get(last_changes_heartbeat),
-    case Before of
-    undefined ->
-        {ok, Acc};
-    _ ->
-        Now = now(),
-        case timer:now_diff(Now, Before) div 1000 >= Timeout of
-        true ->
-            Acc2 = TimeoutFun(Acc),
-            put(last_changes_heartbeat, Now),
-            Acc2;
-        false ->
-            {ok, Acc}
-        end
-    end.


[31/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
couch_drv has been replaced by couch_collate


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/64779a30
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/64779a30
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/64779a30

Branch: refs/heads/import-rcouch
Commit: 64779a30c95c514c17a5006251c478206844ddc4
Parents: 0d3662a
Author: benoitc <be...@apache.org>
Authored: Tue Jan 7 00:16:16 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:20 2014 -0600

----------------------------------------------------------------------
 src/couch_primary_sup.erl | 6 ------
 1 file changed, 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/64779a30/src/couch_primary_sup.erl
----------------------------------------------------------------------
diff --git a/src/couch_primary_sup.erl b/src/couch_primary_sup.erl
index 3bb5875..7c4fde2 100644
--- a/src/couch_primary_sup.erl
+++ b/src/couch_primary_sup.erl
@@ -19,12 +19,6 @@ start_link() ->
 
 init([]) ->
     Children = [
-        {collation_driver,
-            {couch_drv, start_link, []},
-            permanent,
-            infinity,
-            supervisor,
-            [couch_drv]},
         {couch_task_status,
             {couch_task_status, start_link, []},
             permanent,


[24/41] support static build

Posted by da...@apache.org.
http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/9e429fd2/patches/js/patch-configure_in
----------------------------------------------------------------------
diff --git a/patches/js/patch-configure_in b/patches/js/patch-configure_in
new file mode 100644
index 0000000..94194d4
--- /dev/null
+++ b/patches/js/patch-configure_in
@@ -0,0 +1,56 @@
+--- configure.in.orig	2012-06-13 21:24:15.000000000 +0200
++++ configure.in	2012-06-13 21:29:10.000000000 +0200
+@@ -1038,17 +1038,6 @@
+     dnl /usr/bin/g(cc|++)-$GCC_VERSION.
+     MOZ_PATH_PROGS(PBBUILD, pbbuild xcodebuild pbxbuild)
+ 
+-    case "$PBBUILD" in
+-      *xcodebuild*)
+-        changequote(,)
+-        XCODEBUILD_VERSION=`$PBBUILD -version 2>/dev/null | xargs | sed -e 's/.*DevToolsCore-\([0-9]*\).*/\1/'`
+-        changequote([,])
+-        if test -n "$XCODEBUILD_VERSION" && test "$XCODEBUILD_VERSION" -ge 620 ; then
+-          HAS_XCODE_2_1=1;
+-        fi
+-      ;;
+-    esac
+-
+     dnl sdp was formerly in /Developer/Tools.  As of Mac OS X 10.4 (Darwin 8),
+     dnl it has moved into /usr/bin.
+     MOZ_PATH_PROG(SDP, sdp, :, [$PATH:/usr/bin:/Developer/Tools])
+@@ -1056,8 +1045,6 @@
+ esac
+ 
+ AC_SUBST(GCC_VERSION)
+-AC_SUBST(XCODEBUILD_VERSION)
+-AC_SUBST(HAS_XCODE_2_1)
+ 
+ dnl The universal machinery sets UNIVERSAL_BINARY to inform packager.mk
+ dnl that a universal binary is being produced.
+@@ -4656,6 +4643,12 @@
+ 
+ dnl Setup default CPU arch for arm target
+ case "$target_cpu" in
++  armv5*)
++    MOZ_ARM_ARCH=armv5
++  ;;
++  armv6*)
++    MOZ_ARM_ARCH=armv6
++  ;;
+   arm*)
+     MOZ_ARM_ARCH=armv7
+   ;;
+@@ -4716,6 +4709,13 @@
+   esac
+ else
+   case "$target_cpu" in
++    armv6*)
++      if test "$GNU_CC"; then
++        CFLAGS="$CFLAGS -march=armv6j -mthumb-interwork -msoft-float"
++        CXXFLAGS="$CXXFLAGS -march=armv6j -mthumb-interwork -msoft-float"
++        ASFLAGS="$ASFLAGS -march=armv6j -mthumb-interwork -msoft-float"
++      fi
++      ;;
+     arm*)
+       if test "$GNU_CC"; then
+         CFLAGS="$CFLAGS -march=armv5te -mthumb-interwork -msoft-float"

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/9e429fd2/patches/js/patch-jscntxt_h
----------------------------------------------------------------------
diff --git a/patches/js/patch-jscntxt_h b/patches/js/patch-jscntxt_h
new file mode 100644
index 0000000..2df18be
--- /dev/null
+++ b/patches/js/patch-jscntxt_h
@@ -0,0 +1,10 @@
+--- jscntxt.h.orig	2011-10-08 12:18:14.000000000 +0200
++++ jscntxt.h	2011-10-08 12:18:18.000000000 +0200
+@@ -44,6 +44,7 @@
+  * JS execution context.
+  */
+ #include <string.h>
++#include <sys/types.h>
+ 
+ /* Gross special case for Gecko, which defines malloc/calloc/free. */
+ #ifdef mozilla_mozalloc_macro_wrappers_h

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/9e429fd2/patches/js/patch-jsprf_cpp
----------------------------------------------------------------------
diff --git a/patches/js/patch-jsprf_cpp b/patches/js/patch-jsprf_cpp
new file mode 100644
index 0000000..29a51e7
--- /dev/null
+++ b/patches/js/patch-jsprf_cpp
@@ -0,0 +1,11 @@
+--- jsprf.cpp.orig	2011-03-31 12:11:03.000000000 +0200
++++ jsprf.cpp	2011-03-31 12:11:09.000000000 +0200
+@@ -58,6 +58,8 @@
+ */
+ #ifdef HAVE_VA_COPY
+ #define VARARGS_ASSIGN(foo, bar)        VA_COPY(foo,bar)
++#elif defined(va_copy)
++#define VARARGS_ASSIGN(foo, bar)        va_copy(foo,bar)
+ #elif defined(HAVE_VA_LIST_AS_ARRAY)
+ #define VARARGS_ASSIGN(foo, bar)        foo[0] = bar[0]
+ #else

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/9e429fd2/rebar.config.script
----------------------------------------------------------------------
diff --git a/rebar.config.script b/rebar.config.script
index 2b187b6..5a75a3d 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -68,41 +68,86 @@ GetFlag = fun(Name, Default) ->
     end,
 
 
-JSLIBS = GetFlag("JS_LIBS", "-lmozjs185"),
-JSCFLAGS = GetFlag("JS_CFLAGS", "-I/usr/include/js"),
-
-{CFLAGS, LDFLAGS}  = case os:type() of
-    {unix, darwin} ->
-        {"-DXP_UNIX " ++ JSCFLAGS, JSLIBS};
-    {unix, _} ->
-        {"-DXP_UNIX " ++ JSCFLAGS, JSLIBS ++ " -lm"};
+
+PortEnv = case os:getenv("COUCHDB_STATIC") of
+    "1" ->
+        {ok, Cwd} = file:get_cwd(),
+        IncJs = filename:join([Cwd, ".libs", "js", "include"]),
+        IncNspr= filename:join([Cwd, ".libs", "nsprpub", "include"]),
+        StaticJs = filename:join([Cwd, ".libs", "js", "lib",
+                                 "libjs_static.a"]),
+        StaticNspr = filename:join([Cwd, ".libs", "nsprpub", "lib",
+                                    "libnspr4.a"]),
+
+
+        CFLAGS = GetFlag("JS_CFLAGS", "-I" ++ IncJs ++
+                                      " -I" ++ IncNspr ++ " -DXP_UNIX -Wall"),
+        LDFLAGS = GetFlag("JS_LIBS", StaticJs ++ " " ++ StaticNspr),
+
+        {CFLAGS1, LDFLAGS1} = case os:getenv("WITHOUT_CURL") of
+            "1" -> {"-DWITHOUT_CURL " ++ CFLAGS, LDFLAGS};
+            _ -> {CFLAGS, LDFLAGS ++ " -lcurl"}
+        end,
+
+        [
+            {"CFLAGS", "$CFLAGS -Wall -c -g -O2 " ++ CFLAGS1},
+            {"LDFLAGS", "$LDFLAGS " ++ LDFLAGS1 ++ " -lstdc++"},
+            {"linux.*", "LDFLAGS", "$LDFLAGS " ++ LDFLAGS1 ++
+                                   " -lstdc++ -lpthread -lm"},
+
+            %% Prevent the make->gmake transition from infecting
+            %% MAKEFLAGS with bad flags that confuse gmake
+            {"freebsd.*", "MAKEFLAGS", ""},
+            {"freebsd.*","LDFLAGS", "$LDFLAGS " ++ LDFLAGS1 ++
+                                     " -lstdc++ -lpthread"}
+        ];
     _ ->
-        {"-DXP_WIN " ++ JSCFLAGS, JSLIBS}
+        JSLIBS = GetFlag("JS_LIBS", "-lmozjs185"),
+
+        {CFLAGS, LDFLAGS}  = case os:type() of
+            {unix, darwin} ->
+                JSCFLAGS = GetFlag("JS_CFLAGS", "-I/usr/local/include/js"),
+                {"-DXP_UNIX " ++ JSCFLAGS, JSLIBS};
+            {unix, linux} ->
+                %% generally linux install in /usr/include
+                JSCFLAGS = GetFlag("JS_CFLAGS", "-I/usr/include/js"),
+                {"-DXP_UNIX " ++ JSCFLAGS, JSLIBS ++ " -lm"};
+            {unix, _} ->
+                %% most likely a bsd
+                JSCFLAGS = GetFlag("JS_CFLAGS", "-I/usr/local/include/js"),
+                {"-DXP_UNIX " ++ JSCFLAGS, JSLIBS ++ " -lm"};
+            _ ->
+                JSCFLAGS = GetFlag("JS_CFLAGS", "-I/usr/include/js"),
+                {"-DXP_WIN " ++ JSCFLAGS, JSLIBS}
+        end,
+
+        {CFLAGS1, LDFLAGS1} = case os:getenv("WITHOUT_CURL") of
+            "1" -> {"-DWITHOUT_CURL " ++ CFLAGS, LDFLAGS};
+            _ -> {CFLAGS, LDFLAGS ++ " -lcurl"}
+        end,
+
+        [
+            {"CFLAGS",  "$CFLAGS -Wall -c -g -O2 " ++ CFLAGS1},
+            {"LDFLAGS", "$LDFLAGS " ++ LDFLAGS1}
+        ]
 end,
 
-{CFLAGS1, LDFLAGS1} = case os:getenv("WITHOUT_CURL") of
-    "1" -> {"-DWITHOUT_CURL " ++ CFLAGS, LDFLAGS};
-    _ -> {CFLAGS, LDFLAGS ++ " -lcurl"}
-end,
+PortSpecs0 = [{filename:join(["priv", CouchJSName]), ["c_src/couch_js/*.c"]}],
 
-PortSpecs0 = case os:type() of
+PortSpecs = case os:type() of
     {win32, _} ->
-        [{filename:join(["priv", "couchspawnkillable"]),
+        PortSpecs0 ++ [{filename:join(["priv", "couchspawnkillable"]),
             ["c_src/spawnkillable/*.c"]}];
     _ ->
         {ok, _} = file:copy("priv/couchspawnkillable.sh",
                             "priv/couchspawnkillable"),
         os:cmd("chmod +x priv/couchspawnkillable"),
-        []
+        PortSpecs0
     end,
 
-PortEnv = [{port_env, [
-            {"CFLAGS",  "$CFLAGS -Wall -c -g -O2 " ++ CFLAGS1},
-            {"LDFLAGS", "$LDFLAGS " ++ LDFLAGS1}]},
-
-           {port_specs, [
-            {filename:join(["priv", CouchJSName]),
-            ["c_src/couch_js/*.c"]}] ++ PortSpecs0}
-],
+PortInfo = [{port_env, PortEnv},
+            {port_specs, PortSpecs},
+            {pre_hooks, [{compile, "./build_spidermonkey.sh"}]},
+            {post_hooks, [{clean, "./build_spidermonkey.sh clean"}]}],
 
-lists:keymerge(1,lists:keysort(1, PortEnv), lists:keysort(1, CONFIG)).
+lists:keymerge(1,lists:keysort(1, PortInfo), lists:keysort(1, CONFIG)).


[36/41] couch commit: updated refs/heads/import-rcouch to f07bbfc

Posted by da...@apache.org.
fix tests


Project: http://git-wip-us.apache.org/repos/asf/couchdb-couch/repo
Commit: http://git-wip-us.apache.org/repos/asf/couchdb-couch/commit/cad57e5a
Tree: http://git-wip-us.apache.org/repos/asf/couchdb-couch/tree/cad57e5a
Diff: http://git-wip-us.apache.org/repos/asf/couchdb-couch/diff/cad57e5a

Branch: refs/heads/import-rcouch
Commit: cad57e5a580ce76a23b23e496b1d3b0c059ed6cc
Parents: c7c431a
Author: benoitc <be...@apache.org>
Authored: Thu Jan 9 20:56:15 2014 +0100
Committer: Paul J. Davis <pa...@gmail.com>
Committed: Tue Feb 11 02:05:21 2014 -0600

----------------------------------------------------------------------
 src/couch.app.src.script | 2 +-
 src/couch.erl            | 1 -
 src/couch_util.erl       | 1 +
 3 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/cad57e5a/src/couch.app.src.script
----------------------------------------------------------------------
diff --git a/src/couch.app.src.script b/src/couch.app.src.script
index 599efee..c947ead 100644
--- a/src/couch.app.src.script
+++ b/src/couch.app.src.script
@@ -61,7 +61,7 @@ end,
         ]},
         {mod, {couch_app, []}},
         {env, [{couch_rel, RelVsn}]},
-        {applications, [kernel, stdlib, crypto, sasl, public_key, ssl,
+        {applications, [kernel, stdlib, crypto, sasl, asn1, public_key, ssl,
                         inets, oauth, ibrowse, mochiweb, os_mon]}
     ]}
 ].

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/cad57e5a/src/couch.erl
----------------------------------------------------------------------
diff --git a/src/couch.erl b/src/couch.erl
index 8526e68..4b2032a 100644
--- a/src/couch.erl
+++ b/src/couch.erl
@@ -43,7 +43,6 @@ release_version() ->
     end.
 
 start() ->
-    application:load(couch),
     couch_util:start_app_deps(couch),
     application:start(couch).
 

http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/cad57e5a/src/couch_util.erl
----------------------------------------------------------------------
diff --git a/src/couch_util.erl b/src/couch_util.erl
index 76a9293..f4d66ef 100644
--- a/src/couch_util.erl
+++ b/src/couch_util.erl
@@ -40,6 +40,7 @@
 %% @spec start_app_deps(App :: atom()) -> ok
 %% @doc Start depedent applications of App.
 start_app_deps(App) ->
+    application:load(App),
     {ok, DepApps} = application:get_key(App, applications),
     [ensure_started(A) || A <- DepApps],
     ok.