You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by cm...@apache.org on 2008/03/29 00:32:30 UTC

svn commit: r642432 [16/16] - in /incubator/couchdb/trunk: ./ bin/ build-contrib/ etc/ etc/conf/ etc/default/ etc/init/ etc/launchd/ etc/logrotate.d/ share/ share/server/ share/www/ share/www/browse/ share/www/image/ share/www/script/ share/www/style/ ...

Added: incubator/couchdb/trunk/src/couchdb/couch_util.erl
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/couchdb/couch_util.erl?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/couchdb/couch_util.erl (added)
+++ incubator/couchdb/trunk/src/couchdb/couch_util.erl Fri Mar 28 16:32:19 2008
@@ -0,0 +1,316 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License.  You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_util).
+-behaviour(gen_server).
+
+-export([start_link/0,start_link/1]).
+-export([parse_ini/1]).
+-export([new_uuid/0, rand32/0, implode/2, collate/2, collate/3]).
+-export([abs_pathname/1,abs_pathname/2, trim/1, ascii_lower/1, test/0]).
+-export([encodeBase64/1, decodeBase64/1]).
+
+-export([init/1, terminate/2, handle_call/3]).
+-export([handle_cast/2,code_change/3,handle_info/2]).
+
+
+start_link() ->
+    start_link("").
+
+start_link("") ->
+    start_link(filename:join(code:priv_dir(couch), "lib"));
+start_link(LibDir) ->
+    case erl_ddll:load_driver(LibDir, "couch_erl_driver") of
+    ok -> ok;
+    {error, already_loaded} -> ok;
+    {error, ErrorDesc} -> exit({error, ErrorDesc})
+    end,
+    gen_server:start_link({local, couch_util}, couch_util, [], []).
+
+
+new_uuid() ->
+    gen_server:call(couch_util, new_uuid).
+
+% returns a random integer
+rand32() ->
+    gen_server:call(couch_util, rand32).
+
+% given a pathname "../foo/bar/" it gives back the fully qualified
+% absolute pathname.
+abs_pathname(" " ++ Filename) ->
+    % strip leading whitspace
+    abs_pathname(Filename);
+abs_pathname([$/ |_]=Filename) ->
+    Filename;
+abs_pathname(Filename) ->
+    {ok, Cwd} = file:get_cwd(),
+    {Filename2, Args} = separate_cmd_args(Filename, ""),
+    abs_pathname(Filename2, Cwd) ++ Args.
+
+abs_pathname(Filename, Dir) ->
+    Name = filename:absname(Filename, Dir ++ "/"),
+    OutFilename = filename:join(fix_path_list(filename:split(Name), [])),
+    % If the filename is a dir (last char slash, put back end slash
+    case string:right(Filename,1) of
+    "/" ->
+        OutFilename ++ "/";
+    "\\" ->
+        OutFilename ++ "/";
+    _Else->
+        OutFilename
+    end.
+
+% if this as an executable with arguments, seperate out the arguments
+% ""./foo\ bar.sh -baz=blah" -> {"./foo\ bar.sh", " -baz=blah"}
+separate_cmd_args("", CmdAcc) ->
+    {lists:reverse(CmdAcc), ""};
+separate_cmd_args("\\ " ++ Rest, CmdAcc) -> % handle skipped value
+    separate_cmd_args(Rest, " \\" ++ CmdAcc);
+separate_cmd_args(" " ++ Rest, CmdAcc) ->
+    {lists:reverse(CmdAcc), " " ++ Rest};
+separate_cmd_args([Char|Rest], CmdAcc) ->
+    separate_cmd_args(Rest, [Char | CmdAcc]).
+
+% lowercases string bytes that are the ascii characters A-Z.
+% All other characters/bytes are ignored.
+ascii_lower(String) ->
+    ascii_lower(String, []).
+
+ascii_lower([], Acc) ->
+    lists:reverse(Acc);
+ascii_lower([Char | RestString], Acc) when Char >= $A, Char =< $B ->
+    ascii_lower(RestString, [Char + ($a-$A) | Acc]);
+ascii_lower([Char | RestString], Acc)->
+    ascii_lower(RestString, [Char | Acc]).
+
+% Is a character whitespace?
+is_whitespace($\s)-> true;
+is_whitespace($\t)-> true;
+is_whitespace($\n)-> true;
+is_whitespace($\r)-> true;
+is_whitespace(_Else) -> false.
+
+
+% removes leading and trailing whitespace from a string
+trim(String) ->
+    String2 = lists:dropwhile(fun is_whitespace/1, String),
+    lists:reverse(lists:dropwhile(fun is_whitespace/1, lists:reverse(String2))).
+
+% takes a heirarchical list of dirs and removes the dots ".", double dots
+% ".." and the corresponding parent dirs.
+fix_path_list([], Acc) ->
+    lists:reverse(Acc);
+fix_path_list([".."|Rest], [_PrevAcc|RestAcc]) ->
+    fix_path_list(Rest, RestAcc);
+fix_path_list(["."|Rest], Acc) ->
+    fix_path_list(Rest, Acc);
+fix_path_list([Dir | Rest], Acc) ->
+    fix_path_list(Rest, [Dir | Acc]).
+
+
+implode(List, Sep) ->
+    implode(List, Sep, []).
+
+implode([], _Sep, Acc) ->
+    lists:flatten(lists:reverse(Acc));
+implode([H], Sep, Acc) ->
+    implode([], Sep, [H|Acc]);
+implode([H|T], Sep, Acc) ->
+    implode(T, Sep, [Sep,H|Acc]).
+
+
+% This is a simple ini parser. it just converts the string
+% contents of a file like this:
+%
+%; comments are ignored
+%;commentedoutvariable=foo
+%this is line that gets ignored because it has no equals sign
+%[this line gets ignored because it starts with a bracket but doesn't end with one
+%bloodtype=Ragu
+%[Some Section]
+%timeout=30
+%Default=zuh ; another comment (leading space or tab before a semi is necessary to be a comment if not at beginning of line)
+%[Another Section]
+%key with spaces=a value with stuff; and no comment
+%oops="it doesn't qet quoted strings with semis quite right ; it thinks it's part comment"
+%
+%And converts it into this:
+%[{{"","bloodtype"},"Ragu"},
+% {{"Some Section","timeout"},"30"},
+% {{"Some section","Default"}, "zuh"},
+% {{"Another Section", "key with spaces"}, "a value with stuff; and no comment"},
+% {{"Another Section", "oops"}, "\"it doesn't qet quoted strings with semis quite right"}]
+%
+
+parse_ini(FileContents) ->
+    {ok, Lines} = regexp:split(FileContents, "\r\n|\n|\r|\032"),
+    {_, ParsedIniValues} =
+    lists:foldl(fun(Line, {AccSectionName, AccValues}) ->
+            case string:strip(Line) of
+            "[" ++ Rest ->
+                case regexp:split(Rest, "\\]") of
+                {ok, [NewSectionName, ""]} ->
+                    {NewSectionName, AccValues};
+                _Else -> % end bracket not at end, ignore this line
+                    {AccSectionName, AccValues}
+                end;
+            ";" ++ _Comment ->
+                {AccSectionName, AccValues};
+            Line2 ->
+                case regexp:split(Line2, "=") of
+                {ok, [_SingleElement]} -> % no "=" found, ignore this line
+                    {AccSectionName, AccValues};
+                {ok, [""|_LineValues]} -> % line begins with "=", ignore
+                    {AccSectionName, AccValues};
+                {ok, [ValueName|LineValues]} -> % yeehaw, got a line!
+                    RemainingLine = implode(LineValues, "="),
+                    {ok, [LineValue | _Rest]} = regexp:split(RemainingLine, " ;|\t;"), % removes comments
+                    {AccSectionName, [{{AccSectionName, ValueName}, LineValue} | AccValues]}
+                end
+            end
+        end, {"", []}, Lines),
+    {ok, lists:reverse(ParsedIniValues)}.
+
+init([]) ->
+    {A,B,C} = erlang:now(),
+    random:seed(A,B,C),
+    {ok, dummy_server}.
+
+terminate(_Reason, _Server) ->
+    ok.
+
+handle_call(new_uuid, _From, Server) ->
+    {reply, new_uuid_int(), Server};
+handle_call(rand32, _From, Server) ->
+    {reply, rand32_int(), Server}.
+
+handle_cast(_Msg, State) ->
+    {noreply,State}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+handle_info(_Info, State) ->
+    {noreply, State}.
+
+
+new_uuid_int() ->
+    % eventually make this a C callout for a real guid (collisions are far less likely
+    % when using a proper generation function). For now we just fake it.
+    Num1 = random:uniform(16#FFFFFFFF + 1) - 1,
+    Num2 = random:uniform(16#FFFFFFFF + 1) - 1,
+    Num3 = random:uniform(16#FFFFFFFF + 1) - 1,
+    Num4 = random:uniform(16#FFFFFFFF + 1) - 1,
+    lists:flatten(io_lib:format("~8.16.0B~8.16.0B~8.16.0B~8.16.0B", [Num1, Num2, Num3, Num4])).
+
+
+
+rand32_int() ->
+    random:uniform(16#FFFFFFFF + 1) - 1.
+
+drv_port() ->
+    case get(couch_drv_port) of
+    undefined ->
+        Port = open_port({spawn, "couch_erl_driver"}, []),
+        put(couch_drv_port, Port),
+        Port;
+    Port ->
+     Port
+    end.
+
+collate(A, B) ->
+    collate(A, B, []).
+
+collate(A, B, Options) when is_list(A), is_list(B) ->
+    Operation =
+    case lists:member(nocase, Options) of
+        true -> 1; % Case insensitive
+        false -> 0 % Case sensitive
+    end,
+    Port = drv_port(),
+    LenA = length(A),
+    LenB = length(B),
+    Bin = list_to_binary([<<LenA:32/native>>, A, <<LenB:32/native>>, B]),
+    case erlang:port_control(Port, Operation, Bin) of
+        [0] -> -1;
+        [1] -> 1;
+        [2] -> 0
+    end.
+
+
+
+
+%%% Purpose : Base 64 encoding and decoding.
+%%% Copied from ssl_base_64 to avoid using the
+%%% erlang ssl library
+
+-define(st(X,A), ((X-A+256) div 256)).
+-define(CHARS, 64).
+
+%% A PEM encoding consists of characters A-Z, a-z, 0-9, +, / and
+%% =. Each character encodes a 6 bits value from 0 to 63 (A = 0, / =
+%% 63); = is a padding character.
+%%
+
+%%
+%% encode64(Bytes|Binary) -> Chars
+%%
+%% Take 3 bytes a time (3 x 8 = 24 bits), and make 4 characters out of
+%% them (4 x 6 = 24 bits).
+%%
+encodeBase64(Bs) when list(Bs) ->
+    encodeBase64(list_to_binary(Bs));
+encodeBase64(<<B:3/binary, Bs/binary>>) ->
+    <<C1:6, C2:6, C3:6, C4:6>> = B,
+    [enc(C1), enc(C2), enc(C3), enc(C4)| encodeBase64(Bs)];
+encodeBase64(<<B:2/binary>>) ->
+    <<C1:6, C2:6, C3:6, _:6>> = <<B/binary, 0>>,
+    [enc(C1), enc(C2), enc(C3), $=];
+encodeBase64(<<B:1/binary>>) ->
+    <<C1:6, C2:6, _:12>> = <<B/binary, 0, 0>>,
+    [enc(C1), enc(C2), $=, $=];
+encodeBase64(<<>>) ->
+    [].
+
+%%
+%% decodeBase64(Chars) -> Binary
+%%
+decodeBase64(Cs) ->
+    list_to_binary(decode1(Cs)).
+
+decode1([C1, C2, $=, $=]) ->
+    <<B1, _:16>> = <<(dec(C1)):6, (dec(C2)):6, 0:12>>,
+    [B1];
+decode1([C1, C2, C3, $=]) ->
+    <<B1, B2, _:8>> = <<(dec(C1)):6, (dec(C2)):6, (dec(C3)):6, (dec(0)):6>>,
+    [B1, B2];
+decode1([C1, C2, C3, C4| Cs]) ->
+    Bin = <<(dec(C1)):6, (dec(C2)):6, (dec(C3)):6, (dec(C4)):6>>,
+    [Bin| decode1(Cs)];
+decode1([]) ->
+    [].
+
+%% enc/1 and dec/1
+%%
+%% Mapping: 0-25 -> A-Z, 26-51 -> a-z, 52-61 -> 0-9, 62 -> +, 63 -> /
+%%
+enc(C) ->
+    65 + C + 6*?st(C,26) - 75*?st(C,52) -15*?st(C,62) + 3*?st(C,63).
+
+dec(C) ->
+    62*?st(C,43) + ?st(C,47) + (C-59)*?st(C,48) - 69*?st(C,65) - 6*?st(C,97).
+
+
+
+test() ->
+    start_link("debug"),
+    collate("a","b",[]).

Added: incubator/couchdb/trunk/src/couchdb/couch_view.erl
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/couchdb/couch_view.erl?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/couchdb/couch_view.erl (added)
+++ incubator/couchdb/trunk/src/couchdb/couch_view.erl Fri Mar 28 16:32:19 2008
@@ -0,0 +1,616 @@
+%   Copyright 2007, 2008 Damien Katz <da...@yahoo.com>
+%
+%   Licensed under the Apache License, Version 2.0 (the "License");
+%   you may not use this file except in compliance with the License.
+%   You may obtain a copy of the License at
+%
+%       http://www.apache.org/licenses/LICENSE-2.0
+%
+%   Unless required by applicable law or agreed to in writing, software
+%   distributed under the License is distributed on an "AS IS" BASIS,
+%   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%   See the License for the specific language governing permissions and
+%   limitations under the License.
+
+-module(couch_view).
+-behaviour(gen_server).
+
+-export([start_link/1,fold/4,fold/5,less_json/2, start_update_loop/3, start_temp_update_loop/4]).
+-export([init/1,terminate/2,handle_call/3,handle_cast/2,handle_info/2,code_change/3]).
+
+-include("couch_db.hrl").
+
+% arbitrarily chosen amount of memory to use before flushing to disk
+-define(FLUSH_MAX_MEM, 10000000).
+
+-record(group,
+    {db,
+    fd,
+    name,
+    def_lang,
+    views,
+    id_btree,
+    current_seq,
+    query_server=nil
+    }).
+
+-record(view,
+    {id_num,
+    name,
+    btree,
+    def
+    }).
+
+-record(server,
+    {root_dir
+    }).
+
+start_link(RootDir) ->
+    gen_server:start_link({local, couch_view}, couch_view, RootDir, []).
+
+
+
+get_temp_updater(DbName, Type, Src) ->
+    {ok, Pid} = gen_server:call(couch_view, {start_temp_updater, DbName, Type, Src}),
+    Pid.
+
+get_updater(DbName, GroupId) ->
+    {ok, Pid} = gen_server:call(couch_view, {start_updater, DbName, GroupId}),
+    Pid.
+    
+get_updated_group(Pid) ->
+	Mref = erlang:monitor(process, Pid),
+    receive
+	{'DOWN', Mref, _, _, Reason} ->
+	    throw(Reason)
+    after 0 ->
+	    Pid ! {self(), get_updated},
+	    receive
+    	{Pid, Response} ->
+    	    erlang:demonitor(Mref),
+    	    receive 
+    		{'DOWN', Mref, _, _, _} -> 
+    		    Response
+    	    after 0 -> 
+    		    Response
+    	    end;
+    	{'DOWN', Mref, _, _, Reason} ->
+    	    throw(Reason)
+        end
+    end.
+
+fold(ViewInfo, Dir, Fun, Acc) ->
+    fold(ViewInfo, nil, Dir, Fun, Acc).
+
+fold({temp, DbName, Type, Src}, StartKey, Dir, Fun, Acc) ->
+    {ok, #group{views=[View]}} = get_updated_group(get_temp_updater(DbName, Type, Src)),
+    fold_view(View#view.btree, StartKey, Dir, Fun, Acc);
+fold({DbName, GroupId, ViewName}, StartKey, Dir, Fun, Acc) ->
+    {ok, #group{views=Views}} = get_updated_group(get_updater(DbName, GroupId)),
+    Btree = get_view_btree(Views, ViewName),
+    fold_view(Btree, StartKey, Dir, Fun, Acc).
+    
+fold_view(Btree, StartKey, Dir, Fun, Acc) ->
+    TotalRowCount = couch_btree:row_count(Btree),
+    WrapperFun = fun({{Key, DocId}, Value}, Offset, WrapperAcc) ->
+            Fun(DocId, Key, Value, Offset, TotalRowCount, WrapperAcc)
+        end,
+    {ok, AccResult} = couch_btree:fold(Btree, StartKey, Dir, WrapperFun, Acc),
+    {ok, TotalRowCount, AccResult}.
+
+
+get_view_btree([], _ViewName) ->
+    throw({not_found, missing_named_view});
+get_view_btree([View | _RestViews], ViewName) when View#view.name == ViewName ->
+    View#view.btree;
+get_view_btree([_View | RestViews], ViewName) ->
+    get_view_btree(RestViews, ViewName).
+
+
+init(RootDir) ->
+    UpdateNotifierFun =
+        fun({deleted, DbName}) ->
+            gen_server:cast(couch_view, {reset_indexes, DbName});
+        ({created, DbName}) ->
+            gen_server:cast(couch_view, {reset_indexes, DbName});
+        (_Else) ->
+            ok
+        end,
+    couch_db_update_notifier:start_link(UpdateNotifierFun),
+    ets:new(couch_views_by_db, [bag, private, named_table]),
+    ets:new(couch_views_by_name, [set, protected, named_table]),
+    ets:new(couch_views_by_updater, [set, private, named_table]),
+    ets:new(couch_views_temp_fd_by_db, [set, protected, named_table]),
+    process_flag(trap_exit, true),
+    {ok, #server{root_dir=RootDir}}.
+
+terminate(_Reason, _) ->
+    catch ets:delete(couch_views_by_name),
+    catch ets:delete(couch_views_by_updater),
+    catch ets:delete(couch_views_by_db),
+    catch ets:delete(couch_views_temp_fd_by_db).
+
+
+handle_call({start_temp_updater, DbName, Lang, Query}, _From, #server{root_dir=Root}=Server) ->
+    <<SigInt:128/integer>> = erlang:md5(Lang ++ Query),
+    Name = lists:flatten(io_lib:format("_temp_~.36B",[SigInt])),
+    Pid = 
+    case ets:lookup(couch_views_by_name, {DbName, Name}) of
+    [] ->
+        case ets:lookup(couch_views_temp_fd_by_db, DbName) of
+        [] ->
+            FileName = Root ++ "/." ++ DbName ++ "_temp",
+            {ok, Fd} = couch_file:open(FileName, [create, overwrite]),
+            Count = 0;
+        [{_, Fd, Count}] ->
+            ok
+        end,
+        couch_log:debug("Spawning new temp update process for db ~s.", [DbName]),
+        NewPid = spawn_link(couch_view, start_temp_update_loop, [DbName, Fd, Lang, Query]),
+        true = ets:insert(couch_views_temp_fd_by_db, {DbName, Fd, Count + 1}),
+        add_to_ets(NewPid, DbName, Name),
+        NewPid;
+    [{_, ExistingPid0}] ->
+        ExistingPid0
+    end,
+    {reply, {ok, Pid}, Server};
+handle_call({start_updater, DbName, GroupId}, _From, #server{root_dir=Root}=Server) ->
+    Pid = 
+    case ets:lookup(couch_views_by_name, {DbName, GroupId}) of
+    [] ->
+        couch_log:debug("Spawning new update process for view group ~s in database ~s.", [GroupId, DbName]),
+        NewPid = spawn_link(couch_view, start_update_loop, [Root, DbName, GroupId]),
+        add_to_ets(NewPid, DbName, GroupId),
+        NewPid;
+    [{_, ExistingPid0}] ->
+        ExistingPid0
+    end,
+    {reply, {ok, Pid}, Server}.
+
+handle_cast({reset_indexes, DbName}, #server{root_dir=Root}=Server) ->
+    % shutdown all the updaters
+    Names = ets:lookup(couch_views_by_db, DbName),
+    lists:foreach(
+        fun({_DbName, GroupId}) ->
+            couch_log:debug("Killing update process for view group ~s. in database ~s.", [GroupId, DbName]),
+            [{_, Pid}] = ets:lookup(couch_views_by_name, {DbName, GroupId}),
+            exit(Pid, kill),
+            receive {'EXIT', Pid, _} ->
+                delete_from_ets(Pid, DbName, GroupId)
+            end
+        end, Names),
+    delete_index_dir(Root, DbName),
+    file:delete(Root ++ "/." ++ DbName ++ "_temp"),
+    {noreply, Server}.
+
+handle_info({'EXIT', FromPid, Reason}, #server{root_dir=RootDir}=Server) ->
+    case ets:lookup(couch_views_by_updater, FromPid) of
+    [] -> % non-updater linked process must have died, we propagate the error
+        exit(Reason);
+    [{_, {DbName, "_temp_" ++ _ = GroupId}}] ->
+        delete_from_ets(FromPid, DbName, GroupId),
+        [{_, Fd, Count}] = ets:lookup(couch_views_temp_fd_by_db, DbName),
+        case Count of
+        1 -> % Last ref
+            couch_file:close(Fd),
+            file:delete(RootDir ++ "/." ++ DbName ++ "_temp"),
+            true = ets:delete(couch_views_temp_fd_by_db, DbName);
+        _ ->
+            true = ets:insert(couch_views_temp_fd_by_db, {DbName, Fd, Count - 1})
+        end;
+    [{_, {DbName, GroupId}}] ->
+        delete_from_ets(FromPid, DbName, GroupId)
+    end,
+    {noreply, Server}.
+    
+add_to_ets(Pid, DbName, GroupId) ->
+    true = ets:insert(couch_views_by_updater, {Pid, {DbName, GroupId}}),
+    true = ets:insert(couch_views_by_name, {{DbName, GroupId}, Pid}),
+    true = ets:insert(couch_views_by_db, {DbName, GroupId}).
+    
+delete_from_ets(Pid, DbName, GroupId) ->
+    true = ets:delete(couch_views_by_updater, Pid),
+    true = ets:delete(couch_views_by_name, {DbName, GroupId}),
+    true = ets:delete_object(couch_views_by_db, {DbName, GroupId}).
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+start_update_loop(RootDir, DbName, GroupId) ->
+    % wait for a notify request before doing anything. This way, we can just
+    % exit and any exits will be noticed by the callers.
+    start_update_loop(RootDir, DbName, GroupId, get_notify_pids(1000)).
+
+
+start_temp_update_loop(DbName, Fd, Lang, Query) ->
+    NotifyPids = get_notify_pids(1000),
+    case couch_server:open(DbName) of
+    {ok, Db} ->
+        View = #view{name="_temp", id_num=0, btree=nil, def=Query},
+        Group = #group{name="_temp",
+            db=Db,
+            views=[View],
+            current_seq=0,
+            def_lang=Lang,
+            id_btree=nil},
+        Group2 = disk_group_to_mem(Fd, Group),
+        temp_update_loop(Group2, NotifyPids);
+    Else ->
+        exit(Else)
+    end.
+
+temp_update_loop(Group, NotifyPids) ->
+    {ok, Group2} = update_group(Group),
+    [Pid ! {self(), {ok, Group2}} || Pid <- NotifyPids],
+    garbage_collect(),
+    temp_update_loop(Group2, get_notify_pids(100000)).
+    
+start_update_loop(RootDir, DbName, GroupId, NotifyPids) ->
+    {Db, DefLang, Defs} =
+    case couch_server:open(DbName) of
+    {ok, Db0} ->
+        case couch_db:open_doc(Db0, GroupId) of
+        {ok, Doc} ->
+            case couch_doc:get_view_functions(Doc) of
+ 			none ->
+ 			    delete_index_file(RootDir, DbName, GroupId),
+ 				exit({not_found, no_views_found});
+ 			{DefLang0, Defs0} ->
+ 				{Db0, DefLang0, Defs0}
+ 			end;
+ 		Else ->
+ 		    delete_index_file(RootDir, DbName, GroupId),
+ 		    exit(Else)
+ 		end;
+ 	Else ->
+ 	    delete_index_file(RootDir, DbName, GroupId),
+ 	    exit(Else)
+ 	end,
+ 	Group = open_index_file(RootDir, DbName, GroupId, DefLang, Defs),
+    
+    try update_loop(Group#group{db=Db}, NotifyPids) of
+    _ -> ok
+    catch
+    restart ->
+        couch_file:close(Group#group.fd),
+        start_update_loop(RootDir, DbName, GroupId, NotifyPids ++ get_notify_pids())
+    end.
+
+update_loop(#group{fd=Fd}=Group, NotifyPids) ->
+    {ok, Group2} = update_group(Group),
+    ok = couch_file:write_header(Fd, <<$r, $c, $k, 0>>, mem_group_to_disk(Group2)),
+    [Pid ! {self(), {ok, Group2}} || Pid <- NotifyPids],
+    garbage_collect(),
+    update_loop(Group2).
+    
+update_loop(Group) ->
+    update_loop(Group, get_notify_pids()).
+
+% wait for the first request to come in.
+get_notify_pids(Wait) ->
+    receive
+    {Pid, get_updated} ->
+        [Pid | get_notify_pids()]
+    after Wait ->
+        exit(wait_timeout)
+	end.
+% then keep getting all available and return.
+get_notify_pids() ->
+    receive
+    {Pid, get_updated} ->
+        [Pid | get_notify_pids()]
+	after 0 ->
+	    []
+	end.
+    
+update_group(#group{db=Db,current_seq=CurrentSeq, views=Views}=Group) ->
+    ViewEmptyKVs = [{View, []} || View <- Views],
+    % compute on all docs modified since we last computed.
+    {ok, {UncomputedDocs, Group2, ViewKVsToAdd, DocIdViewIdKeys, NewSeq}}
+        = couch_db:enum_docs_since(
+            Db,
+            CurrentSeq,
+            fun(DocInfo, _, Acc) -> process_doc(Db, DocInfo, Acc) end,
+            {[], Group, ViewEmptyKVs, [], CurrentSeq}
+            ),
+
+    {Group3, Results} = view_compute(Group2, UncomputedDocs),
+    {ViewKVsToAdd2, DocIdViewIdKeys2} = view_insert_query_results(UncomputedDocs, Results, ViewKVsToAdd, DocIdViewIdKeys),
+    couch_query_servers:stop_doc_map(Group3#group.query_server),
+    if CurrentSeq /= NewSeq ->
+        {ok, Group4} = write_changes(Group3, ViewKVsToAdd2, DocIdViewIdKeys2, NewSeq),
+        {ok, Group4#group{query_server=nil}};
+    true ->
+        {ok, Group3#group{query_server=nil}}
+    end.
+    
+delete_index_dir(RootDir, DbName) ->
+    nuke_dir(RootDir ++ "/." ++ DbName ++ "_design").
+
+nuke_dir(Dir) ->
+    case file:list_dir(Dir) of
+    {error, enoent} -> ok; % doesn't exist
+    {ok, Files} ->
+        lists:foreach(
+            fun(File)->
+                Full = Dir ++ "/" ++ File,
+                case file:delete(Full) of
+                ok -> ok;
+                {error, eperm} ->
+                    ok = nuke_dir(Full)
+                end
+            end,
+            Files),    
+        ok = file:del_dir(Dir)
+    end.
+
+delete_index_file(RootDir, DbName, GroupId) ->
+    file:delete(RootDir ++ "/." ++ DbName ++ GroupId ++ ".view").
+    
+open_index_file(RootDir, DbName, GroupId, ViewLang, ViewDefs) ->
+    FileName = RootDir ++ "/." ++ DbName ++ GroupId ++".view",
+    case couch_file:open(FileName) of
+    {ok, Fd} ->
+        case couch_file:read_header(Fd, <<$r, $c, $k, 0>>) of
+        {ok, #group{views=Views}=Group} ->
+            % validate all the view definitions in the index are correct.
+            case same_view_def(Views, ViewDefs) of
+            true -> disk_group_to_mem(Fd, Group);
+            false -> reset_header(GroupId, Fd, ViewLang, ViewDefs)
+            end;
+        _ ->
+            reset_header(GroupId, Fd, ViewLang, ViewDefs)
+        end;
+    _ ->
+        case couch_file:open(FileName, [create]) of
+        {ok, Fd} ->    
+            reset_header(GroupId, Fd, ViewLang, ViewDefs);
+        Error ->
+            throw(Error)
+        end
+    end.
+
+same_view_def([], []) ->
+    true;
+same_view_def(DiskViews, ViewDefs) when DiskViews == [] orelse ViewDefs == []->
+    false;
+same_view_def([#view{name=DiskName,def=DiskDef}|RestViews], [{Name, Def}|RestDefs]) ->
+    if DiskName == Name andalso DiskDef == Def ->
+        same_view_def(RestViews, RestDefs);
+    true ->
+        false
+    end.
+
+% Given a disk ready group structure, return an initialized, in-memory version.
+disk_group_to_mem(Fd, #group{id_btree=IdState,views=Views}=Group) ->
+    {ok, IdBtree} = couch_btree:open(IdState, Fd),
+    Views2 = lists:map(
+        fun(#view{btree=BtreeState}=View) ->
+            {ok, Btree} = couch_btree:open(BtreeState, Fd, [{less, fun less_json/2}]),
+            View#view{btree=Btree}
+        end,
+        Views),
+    Group#group{fd=Fd, id_btree=IdBtree, views=Views2}.
+    
+% Given an initialized, in-memory group structure, return a disk ready version.
+mem_group_to_disk(#group{id_btree=IdBtree,views=Views}=Group) ->
+    Views2 = lists:map(
+        fun(#view{btree=Btree}=View) ->
+            State = couch_btree:get_state(Btree),
+            View#view{btree=State}
+        end,
+        Views),
+    Group#group{fd=nil, id_btree=couch_btree:get_state(IdBtree), views=Views2}.
+            
+reset_header(GroupId, Fd, DefLanguage, NamedViews) ->
+    couch_file:truncate(Fd, 0),
+    {Views, _N} = lists:mapfoldl(
+        fun({Name, Definiton}, N) ->
+            {#view{name=Name, id_num=N, btree=nil, def=Definiton}, N+1}
+        end,
+        0, NamedViews),
+    Group = #group{name=GroupId,
+        fd=Fd,
+        views=Views,
+        current_seq=0,
+        def_lang=DefLanguage,
+        id_btree=nil},
+    ok = couch_file:write_header(Fd, <<$r, $c, $k, 0>>, Group),
+    disk_group_to_mem(Fd, Group).
+
+
+
+less_json(A, B) ->
+    TypeA = type_sort(A),
+    TypeB = type_sort(B),
+    if
+    TypeA == TypeB ->
+        less_same_type(A,B);
+    true ->
+        TypeA < TypeB
+    end.
+
+type_sort(V) when is_atom(V) -> 0;
+type_sort(V) when is_integer(V) -> 1;
+type_sort(V) when is_float(V) -> 1;
+type_sort(V) when is_list(V) -> 2;
+type_sort({obj, _}) -> 4; % must come before tuple test below
+type_sort(V) when is_tuple(V) -> 3;
+type_sort(V) when is_binary(V) -> 5.
+
+atom_sort(nil) -> 0;
+atom_sort(null) -> 1;
+atom_sort(false) -> 2;
+atom_sort(true) -> 3.
+
+less_same_type(A,B) when is_atom(A) ->
+    atom_sort(A) < atom_sort(B);
+less_same_type(A,B) when is_list(A) ->
+    couch_util:collate(A, B) < 0;
+less_same_type({obj, AProps}, {obj, BProps}) ->
+    less_props(AProps, BProps);
+less_same_type(A, B) when is_tuple(A) ->
+    less_list(tuple_to_list(A),tuple_to_list(B));
+less_same_type(A, B) ->
+    A < B.
+
+ensure_list(V) when is_list(V) -> V;
+ensure_list(V) when is_atom(V) -> atom_to_list(V).
+
+less_props([], [_|_]) ->
+    true;
+less_props(_, []) ->
+    false;
+less_props([{AKey, AValue}|RestA], [{BKey, BValue}|RestB]) ->
+    case couch_util:collate(ensure_list(AKey), ensure_list(BKey)) of
+    -1 -> true;
+    1 -> false;
+    0 ->
+        case less_json(AValue, BValue) of
+        true -> true;
+        false ->
+            case less_json(BValue, AValue) of
+            true -> false;
+            false ->
+                less_props(RestA, RestB)
+            end
+        end
+    end.
+
+less_list([], [_|_]) ->
+    true;
+less_list(_, []) ->
+    false;
+less_list([A|RestA], [B|RestB]) ->
+    case less_json(A,B) of
+    true -> true;
+    false ->
+        case less_json(B,A) of
+        true -> false;
+        false ->
+            less_list(RestA, RestB)
+        end
+    end.
+
+process_doc(Db, DocInfo, {Docs, #group{name=GroupId}=Group, ViewKVs, DocIdViewIdKeys, _LastSeq}) ->
+    % This fun computes once for each document
+    #doc_info{id=DocId, update_seq=Seq, deleted=Deleted} = DocInfo,
+    case DocId of
+    GroupId ->
+        % uh oh. this is the design doc with our definitions. See if
+        % anything in the definition changed.
+        case couch_db:open_doc(Db, DocInfo) of
+        {ok, Doc} ->
+            case couch_doc:get_view_functions(Doc) of
+            none ->
+                throw(restart);
+            {DefLang, NewDefs} ->
+                case Group#group.def_lang == DefLang andalso same_view_def(Group#group.views, NewDefs) of
+                true ->
+                    % nothing changed, keeping on computing
+                    {ok, {Docs, Group, ViewKVs, DocIdViewIdKeys, Seq}};
+                false ->
+                    throw(restart)
+                end
+            end;
+        {not_found, deleted} ->
+            throw(restart)
+        end;
+    ?DESIGN_DOC_PREFIX ++ _ -> % we skip design docs
+        {ok, {Docs, Group, ViewKVs, DocIdViewIdKeys, Seq}};
+    _ ->
+        {Docs2, DocIdViewIdKeys2} =
+        if Deleted ->
+            {Docs, [{DocId, []} | DocIdViewIdKeys]};
+        true ->
+            {ok, Doc} = couch_db:open_doc(Db, DocInfo, [conflicts, deleted_conflicts]),
+            {[Doc | Docs], DocIdViewIdKeys}
+        end,
+        case process_info(self(), memory) of
+        {memory, Mem} when Mem > ?FLUSH_MAX_MEM ->
+            {Group1, Results} = view_compute(Group, Docs2),
+            {ViewKVs3, DocIdViewIdKeys3} = view_insert_query_results(Docs2, Results, ViewKVs, DocIdViewIdKeys2),
+            {ok, Group2} = write_changes(Group1, ViewKVs3, DocIdViewIdKeys3, Seq),
+            garbage_collect(),
+            ViewEmptyKeyValues = [{View, []} || View <- Group2#group.views],
+            {ok, {[], Group2, ViewEmptyKeyValues, [], Seq}};
+        _Else ->
+            {ok, {Docs2, Group, ViewKVs, DocIdViewIdKeys2, Seq}}
+        end
+    end.
+
+view_insert_query_results([], [], ViewKVs, DocIdViewIdKeysAcc) ->
+    {ViewKVs, DocIdViewIdKeysAcc};
+view_insert_query_results([Doc|RestDocs], [QueryResults | RestResults], ViewKVs, DocIdViewIdKeysAcc) ->
+    {NewViewKVs, NewViewIdKeys} = view_insert_doc_query_results(Doc, QueryResults, ViewKVs, [], []),
+    NewDocIdViewIdKeys = [{Doc#doc.id, NewViewIdKeys} | DocIdViewIdKeysAcc],
+    view_insert_query_results(RestDocs, RestResults, NewViewKVs, NewDocIdViewIdKeys).
+
+
+view_insert_doc_query_results(_Doc, [], [], ViewKVsAcc, ViewIdKeysAcc) ->
+    {lists:reverse(ViewKVsAcc), lists:reverse(ViewIdKeysAcc)};
+view_insert_doc_query_results(#doc{id=DocId}=Doc, [ResultKVs|RestResults], [{View, KVs}|RestViewKVs], ViewKVsAcc, ViewIdKeysAcc) ->
+    NewKVs = [{{Key, DocId}, Value} || {Key, Value} <- ResultKVs],
+    NewViewIdKeys = [{View#view.id_num, Key} || {Key, _Value} <- ResultKVs],
+    NewViewKVsAcc = [{View, NewKVs ++ KVs} | ViewKVsAcc],
+    NewViewIdKeysAcc = NewViewIdKeys ++ ViewIdKeysAcc,
+    view_insert_doc_query_results(Doc, RestResults, RestViewKVs, NewViewKVsAcc, NewViewIdKeysAcc).
+
+view_compute(Group, []) ->
+    {Group, []};
+view_compute(#group{def_lang=DefLang, query_server=QueryServerIn}=Group, Docs) ->
+    {ok, QueryServer} =
+    case QueryServerIn of
+    nil -> % doc map not started
+        Definitions = [View#view.def || View <- Group#group.views],
+        couch_query_servers:start_doc_map(DefLang, Definitions);
+    _ ->
+        {ok, QueryServerIn}
+    end,
+    {ok, Results} = couch_query_servers:map_docs(QueryServer, Docs),
+    {Group#group{query_server=QueryServer}, Results}.
+
+
+dict_find(Key, DefaultValue, Dict) ->
+    case dict:find(Key, Dict) of
+    {ok, Value} ->
+        Value;
+    error ->
+        DefaultValue
+    end.
+
+write_changes(Group, ViewKeyValuesToAdd, DocIdViewIdKeys, NewSeq) ->
+    #group{id_btree=IdBtree} = Group,
+
+    AddDocIdViewIdKeys = [{DocId, ViewIdKeys} || {DocId, ViewIdKeys} <- DocIdViewIdKeys, ViewIdKeys /= []],
+    RemoveDocIds = [DocId || {DocId, ViewIdKeys} <- DocIdViewIdKeys, ViewIdKeys == []],
+    LookupDocIds = [DocId || {DocId, _ViewIdKeys} <- DocIdViewIdKeys],
+
+    {ok, LookupResults, IdBtree2}
+        = couch_btree:query_modify(IdBtree, LookupDocIds, AddDocIdViewIdKeys, RemoveDocIds),
+    KeysToRemoveByView = lists:foldl(
+        fun(LookupResult, KeysToRemoveByViewAcc) ->
+            case LookupResult of
+            {ok, {DocId, ViewIdKeys}} ->
+                lists:foldl(
+                    fun({ViewId, Key}, KeysToRemoveByViewAcc2) ->
+                        dict:append(ViewId, {Key, DocId}, KeysToRemoveByViewAcc2)
+                    end,
+                    KeysToRemoveByViewAcc, ViewIdKeys);
+            {not_found, _} ->
+                KeysToRemoveByViewAcc
+            end
+        end,
+        dict:new(), LookupResults),
+
+    Views2 = [
+        begin
+            KeysToRemove = dict_find(View#view.id_num, [], KeysToRemoveByView),
+            {ok, ViewBtree2} = couch_btree:add_remove(View#view.btree, AddKeyValues, KeysToRemove),
+            View#view{btree = ViewBtree2}
+        end
+    ||
+        {View, AddKeyValues} <- ViewKeyValuesToAdd
+    ],
+    Group2 = Group#group{views=Views2, current_seq=NewSeq, id_btree=IdBtree2},
+    {ok, Group2}.

Added: incubator/couchdb/trunk/src/couchdb/mod_couch.erl
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/couchdb/mod_couch.erl?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/couchdb/mod_couch.erl (added)
+++ incubator/couchdb/trunk/src/couchdb/mod_couch.erl Fri Mar 28 16:32:19 2008
@@ -0,0 +1,891 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License.  You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mod_couch).
+
+-include("couch_db.hrl").
+
+-export([do/1, load/2, url_decode/1]).
+
+-include_lib("../couch_inets/httpd.hrl").
+
+-record(uri_parts,
+    {db = "",
+    doc = "",
+    attachment = "",
+    view = "",
+    querystr = ""}).
+
+-record(doc_query_args,
+    {
+    options = [],
+    rev = "",
+    open_revs = ""
+    }).
+
+%% do. This is the main entry point into Apache CouchDB from the HTTP server
+
+do(Mod) ->
+    #mod{request_uri=Uri,request_line=Request, parsed_header=Header,entity_body=Body} = Mod,
+    PrevTrapExit = process_flag(trap_exit, true),
+    Resp =
+    case Uri of
+    "/_utils/" ++ RestURI ->
+        % if the URI is the utils directory, then this
+        % tells mod_get (a std HTTP module) where to serve the file from
+        DocumentRoot = httpd_util:lookup(Mod#mod.config_db, document_root, ""),
+        {Path, AfterPath} = httpd_util:split_path(DocumentRoot ++ "/" ++ RestURI),
+
+        case RestURI of
+        "" ->
+            Paths = httpd_util:split_path(DocumentRoot ++ "/index.html"),
+            {proceed, [{real_name, Paths} | Mod#mod.data]};
+        _ ->
+            case filelib:is_file(Path) of
+            true ->
+                {proceed, [{real_name, {Path, AfterPath}} | Mod#mod.data]};
+            false ->
+                case filelib:is_dir(Path) of
+                true ->
+                    % this ends up causing a "Internal Server Error", need to fix.
+                    {proceed, [{response,{403,"Forbidden"}}]};
+                false ->
+                    {proceed, [{response,{404,"Not found"}}]}
+                end
+            end
+        end;
+    "/favicon.ico" ->
+        DocumentRoot = httpd_util:lookup(Mod#mod.config_db, document_root, ""),
+        RealName = DocumentRoot ++ "/" ++ Uri,
+        {Path, AfterPath} = httpd_util:split_path(RealName),
+        {proceed, [{real_name, {Path, AfterPath}} | Mod#mod.data]};
+    _ ->
+        couch_log:info("HTTP Request: ~s", [Request]),
+        couch_log:debug("Headers: ~p", [Header]),
+        couch_log:debug("Body: ~P", [Body, 100]),
+        case (catch parse_uri(Uri)) of
+        {ok, Parts} ->
+            {ok, ResponseCode} =
+            case (catch do(Mod, Parts)) of
+            {ok, ResponseCode0} ->
+                {ok, ResponseCode0};
+            Error ->
+                send_error(Mod, Error)
+            end;
+        Error ->
+            {ok, ResponseCode} = send_error(Mod, Error)
+        end,
+        couch_log:info("HTTP Response Code:~p~n", [ResponseCode]),
+        {proceed, [{response, {already_sent, ResponseCode, 0}} | Mod#mod.data]}
+    end,
+    process_flag(trap_exit, PrevTrapExit),
+    Resp.
+
+
+parse_uri(RequestUri) ->
+    % seperate out the path and query portions and
+    % strip out leading slash and question mark.
+    case regexp:split(RequestUri, "\\?") of
+    {ok, [[$/|UriPath], QueryStr]} -> ok;
+    {ok, [[$/|UriPath]]} -> QueryStr = ""
+    end,
+    % lets try to parse out the UriPath.
+    {ok, UrlParts} = regexp:split(UriPath, "/"),
+
+    {DbName, Id, Attachment, View} =
+    case UrlParts of
+    [Db] ->
+        {Db, "", "", ""};
+    [Db, "_design", Doc] ->
+        {Db, "_design/" ++ Doc, "", ""};
+    [Db, "_design", Doc, Attachment0] ->
+        {Db, "_design/" ++ Doc, Attachment0, ""};
+    [Db, "_view", Doc, ViewName] ->
+        {Db, "_design/" ++ Doc, "", ViewName};
+    [Db, "_view%2f" ++ Doc, ViewName] ->
+        {Db, "_design/" ++ Doc, "", ViewName};
+    [Db, Doc] ->
+        {Db, Doc, "", ""};
+    [Db, Doc, Attachment0] ->
+        {Db, Doc, Attachment0, ""};
+    _ ->
+        throw({invalid_uri, lists:flatten(io_lib:format("Uri has too many parts: ~p", [UrlParts]))})
+    end,
+    {ok, #uri_parts{db=url_decode(DbName),
+        doc=url_decode(Id),
+        attachment=url_decode(Attachment),
+        view=url_decode(View),
+        querystr=url_decode(QueryStr)}}.
+
+resp_json_header(Mod) ->
+    resp_json_header(Mod, []).
+
+% return json doc header values list
+resp_json_header(Mod, Options) ->
+    Types = string:tokens(proplists:get_value("accept", Mod#mod.parsed_header, ""), ", "),
+    case lists:member("application/json", Types) of
+    true ->
+        resp_header(Mod, Options) ++ [{"content-type","application/json"}];
+    false ->
+        resp_header(Mod, Options) ++ [{"content-type","text/plain;charset=utf-8"}]
+    end.
+
+% return doc header values list
+resp_header(#mod{http_version=Version}, Options) ->
+    [{"cache-control", "no-cache"},
+    {"pragma", "no-cache"},
+    {"expires", httpd_util:rfc1123_date()}] ++
+    case lists:member(no_body, Options) of
+    true -> [];
+    false ->
+        case Version == "HTTP/1.1" of
+        true ->
+            [{"transfer-encoding", "chunked"}];
+        false ->
+            [{"connection", "close"}]
+        end
+    end.
+
+
+url_decode([$%, Hi, Lo | Tail]) ->
+    Hex = erlang:list_to_integer([Hi, Lo], 16),
+    xmerl_ucs:to_utf8([Hex]) ++ url_decode(Tail);
+url_decode([H|T]) ->
+    [H |url_decode(T)];
+url_decode([]) ->
+    [].
+
+
+send_header(Mod, RespCode, Headers) ->
+    couch_log:debug("HTTP Response Headers (code ~w): ~p", [RespCode, Headers]),
+    httpd_response:send_header(Mod, RespCode, Headers).
+
+send_chunk(Mod, Data) ->
+    httpd_response:send_chunk(Mod, Data, false).
+
+send_final_chunk(Mod) ->
+    httpd_response:send_final_chunk(Mod, false).
+
+show_couch_welcome(Mod) ->
+    send_header(Mod, 200, resp_json_header(Mod)),
+    send_chunk(Mod, "{\"couchdb\": \"Welcome\", "),
+    send_chunk(Mod, "\"version\": \"" ++ couch_server:get_version()),
+    send_chunk(Mod, "\"}\n"),
+    send_final_chunk(Mod),
+    {ok, 200}.
+
+
+do(#mod{method="GET"}=Mod, #uri_parts{db=""}) ->
+    show_couch_welcome(Mod);
+do(#mod{method="GET"}=Mod, #uri_parts{db="_all_dbs", doc=""}=Parts) ->
+    send_all_dbs(Mod, Parts);
+do(#mod{method="POST"}=Mod, #uri_parts{db="_replicate", doc=""}) ->
+    handle_replication_request(Mod);
+do(#mod{method="POST"}=Mod, #uri_parts{db="_restart", doc=""}) ->
+    couch_server:remote_restart(),
+    send_ok(Mod, 201);
+do(#mod{method="POST"}=Mod, #uri_parts{doc="_missing_revs"}=Parts) ->
+    handle_missing_revs_request(Mod, Parts);
+do(#mod{method="PUT"}=Mod, #uri_parts{doc=""}=Parts) ->
+    handle_db_create(Mod, Parts);
+do(#mod{method="DELETE"}=Mod, #uri_parts{doc=""}=Parts) ->
+    handle_db_delete(Mod, Parts);
+do(#mod{method="POST"}=Mod, #uri_parts{doc="_bulk_docs"}=Parts) ->
+    handle_bulk_doc_update(Mod, Parts);
+do(#mod{method="POST"}=Mod, #uri_parts{doc=""}=Parts) ->
+    handle_doc_post(Mod, Parts);
+do(#mod{method="PUT"}=Mod, Parts) ->
+    handle_doc_put(Mod, Parts);
+do(#mod{method="DELETE"}=Mod, Parts) ->
+    handle_doc_delete(Mod, Parts);
+do(#mod{method="POST"}=Mod, #uri_parts{doc="_temp_view"}=Parts) ->
+    send_temp_view(Mod, Parts);
+do(#mod{method="GET"}=Mod, #uri_parts{doc="_all_docs"}=Parts) ->
+    send_all_docs(Mod, Parts);
+do(#mod{method="GET"}=Mod, #uri_parts{doc="_all_docs_by_seq"}=Parts) ->
+    send_all_docs_by_seq(Mod, Parts);
+do(#mod{method="GET"}=Mod, #uri_parts{doc=""}=Parts) ->
+    send_database_info(Mod, Parts);
+do(#mod{method=Method}=Mod, #uri_parts{attachment="",view=""}=Parts)
+        when Method == "GET" orelse Method == "HEAD" ->
+    #doc_query_args{open_revs=Revs} = doc_parse_query(Parts#uri_parts.querystr),
+    case Revs of
+    [] ->
+        send_doc(Mod, Parts);
+    _ ->
+        send_doc_revs(Mod, Parts)
+    end;
+do(#mod{method=Method}=Mod, #uri_parts{attachment=Att}=Parts)
+        when Att /= "", Method == "GET" orelse Method == "HEAD" ->
+    send_attachment(Mod, Parts);
+do(#mod{method="GET"}=Mod, #uri_parts{view=View}=Parts) when View /= "" ->
+    send_view(Mod, Parts).
+
+handle_db_create(Mod, #uri_parts{db=DbName}) ->
+    case couch_server:create(DbName, []) of
+    {ok, _Db} ->
+        send_ok(Mod, 201);
+    {error, database_already_exists} ->
+        Msg = io_lib:format("Database ~p already exists.", [DbName]),
+        throw({database_already_exists, Msg});
+    Error ->
+        Msg = io_lib:format("Error creating database ~p: ~p", [DbName, Error]),
+        throw({unknown_error, Msg})
+    end.
+
+handle_db_delete(Mod, #uri_parts{db=DbName}) ->
+    % delete with no doc specified, therefore database delete
+    case couch_server:delete(DbName) of
+    ok ->
+        send_ok(Mod, 202);
+    Error ->
+        throw(Error)
+    end.
+
+handle_bulk_doc_update(#mod{entity_body=RawBody}=Mod, Parts) ->
+    Options = [], % put options here.
+    Db = open_db(Parts),
+    {obj, JsonProps} = cjson:decode(RawBody),
+    DocsArray = proplists:get_value("docs", JsonProps),
+    % convert all the doc elements to native docs
+    case proplists:get_value("new_edits", JsonProps, true) of
+    true ->
+        Docs = lists:map(
+            fun({obj, ObjProps} = JsonObj) ->
+                Doc = couch_doc:from_json_obj(JsonObj),
+
+                Id =
+                case Doc#doc.id of
+                    "" -> couch_util:new_uuid();
+                    Id0 -> Id0
+                end,
+                Revs =
+                case proplists:get_value("_rev", ObjProps) of
+                    undefined -> [];
+                    Rev  -> [Rev]
+                end,
+                Doc#doc{id=Id,revs=Revs}
+            end,
+            tuple_to_list(DocsArray)),
+            
+        {ok, ResultRevs} = couch_db:update_docs(Db, Docs, Options),
+        
+        % output the results
+        DocResults = lists:zipwith(
+            fun(Doc, NewRev) ->
+                {obj, [{"id", Doc#doc.id}, {"rev", NewRev}]}
+            end,
+            Docs, ResultRevs),
+        send_ok(Mod, 201, [{new_revs, list_to_tuple(DocResults)}]);
+
+    false ->
+        Docs = [couch_doc:from_json_obj(JsonObj) || JsonObj <-  tuple_to_list(DocsArray)],
+        ok = couch_db:save_docs(Db, Docs, Options),
+        send_ok(Mod, 201)
+    end.
+
+
+
+
+doc_parse_query(QueryStr) ->
+    QueryList = httpd:parse_query(QueryStr),
+    lists:foldl(fun({Key,Value}, Args) ->
+            case {Key, Value} of
+            {"attachments", "true"} ->
+                Options = [attachments | Args#doc_query_args.options],
+                Args#doc_query_args{options=Options};
+            {"meta", "true"} ->
+                Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
+                Args#doc_query_args{options=Options};
+            {"revs", "true"} ->
+                Options = [revs | Args#doc_query_args.options],
+                Args#doc_query_args{options=Options};
+            {"revs_info", "true"} ->
+                Options = [revs_info | Args#doc_query_args.options],
+                Args#doc_query_args{options=Options};
+            {"conflicts", "true"} ->
+                Options = [conflicts | Args#doc_query_args.options],
+                Args#doc_query_args{options=Options};
+            {"deleted_conflicts", "true"} ->
+                Options = [deleted_conflicts | Args#doc_query_args.options],
+                Args#doc_query_args{options=Options};
+            {"rev", Rev} ->
+                Args#doc_query_args{rev=Rev};
+            {"open_revs", "all"} ->
+                Args#doc_query_args{open_revs=all};
+            {"open_revs", RevsJsonStr} ->
+                JsonArray = cjson:decode(RevsJsonStr),
+                Args#doc_query_args{open_revs=tuple_to_list(JsonArray)};
+            _Else -> % unknown key value pair, ignore.
+                Args
+            end
+        end,
+        #doc_query_args{}, QueryList).
+
+
+handle_doc_post(#mod{entity_body=RawBody}=Mod, Parts) ->
+    Db = open_db(Parts),
+    Json = cjson:decode(RawBody),
+    Doc = couch_doc:from_json_obj(Json),
+    Id = couch_util:new_uuid(),
+    {ok, NewRevId} = couch_db:update_doc(Db, Doc#doc{id=Id, revs=[]}, []),
+    send_ok(Mod, 201, [{"id", Id}, {"rev", NewRevId}], [{"etag", NewRevId}]).
+
+handle_doc_put(#mod{parsed_header=Headers}=Mod,
+        #uri_parts{doc=Id, querystr=QueryStr}=Parts) ->
+    #doc_query_args{options=SaveOptions} = doc_parse_query(QueryStr),
+    Db = open_db(Parts),
+    {obj, ObjProps} = Json = cjson:decode(Mod#mod.entity_body),
+    Doc = couch_doc:from_json_obj(Json),
+    Etag = proplists:get_value("if-match", Headers, ""),
+    DocRev = proplists:get_value("_rev", ObjProps, ""),
+
+    if DocRev /= "" andalso Etag /= "" andalso DocRev /= Etag ->
+        throw({invalid_request, "Document rev and etag have different values"});
+    true -> ok
+    end,
+    Revs =
+    if DocRev /= "" -> [DocRev];
+    Etag /= "" -> [Etag];
+    true -> []
+    end,
+
+    {ok, NewRevId} = couch_db:update_doc(Db, Doc#doc{id=Id, revs=Revs}, SaveOptions),
+    send_ok(Mod, 201, [{"id", Id}, {"rev", NewRevId}],[{"etag", NewRevId}]).
+
+handle_doc_delete(#mod{parsed_header=Headers}=Mod,
+        #uri_parts{doc=Id, querystr=QueryStr}=Parts) ->
+    Db = open_db(Parts),
+    #doc_query_args{rev=QueryRev} = doc_parse_query(QueryStr),
+    Etag = proplists:get_value("if-match", Headers, ""),
+    RevToDelete =
+    case {QueryRev, Etag} of
+    {"", ""} ->
+        throw({missing_rev, "Document rev/etag must be specified to delete"});
+    {_, ""} ->
+        QueryRev;
+    {"", _} ->
+        Etag;
+    _ when QueryRev == Etag ->
+        Etag;
+    _ ->
+        throw({invalid_request, "Document rev and etag have different values"})
+    end,
+    {ok, NewRev} = couch_db:delete_doc(Db, Id, [RevToDelete]),
+    send_ok(Mod, 202, [{"id", Id}, {"rev", NewRev}]).
+
+
+-record(query_args,
+    {start_key = nil,
+    end_key = <<>>,
+    count = 10000000000,    % a huge huge default number. Picked so we don't have
+                            % to do different logic for when there is no count limit
+    update = true,
+    direction = fwd,
+    start_docid = nil,
+    end_docid = <<>>,
+    skip = 0
+    }).
+
+reverse_key_default(nil) -> <<>>;
+reverse_key_default(<<>>) -> nil;
+reverse_key_default(Key) -> Key.
+
+view_parse_query(QueryStr) ->
+    QueryList = httpd:parse_query(QueryStr),
+    lists:foldl(fun({Key,Value}, Args) ->
+            case {Key, Value} of
+            {"", _} ->
+                Args;
+            {"key", Value} ->
+                JsonKey = cjson:decode(Value),
+                Args#query_args{start_key=JsonKey,end_key=JsonKey};
+            {"startkey_docid", DocId} ->
+                Args#query_args{start_docid=DocId};
+            {"startkey", Value} ->
+                Args#query_args{start_key=cjson:decode(Value)};
+            {"endkey", Value} ->
+                Args#query_args{end_key=cjson:decode(Value)};
+            {"count", Value} ->
+                case (catch list_to_integer(Value)) of
+                Count when is_integer(Count) ->
+                    if Count < 0 ->
+                        Args#query_args {
+                            direction =
+                            if Args#query_args.direction == rev -> fwd;
+                            true -> rev
+                            end,
+                            count=Count,
+                            start_key = reverse_key_default(Args#query_args.start_key),
+                            start_docid = reverse_key_default(Args#query_args.start_docid),
+                            end_key = reverse_key_default(Args#query_args.end_key),
+                            end_docid =  reverse_key_default(Args#query_args.end_docid)};
+                    true ->
+                        Args#query_args{count=Count}
+                    end;
+                _Error ->
+                    Msg = io_lib:format("Bad URL query value, number expected: count=~s", [Value]),
+                    throw({query_parse_error, Msg})
+                end;
+            {"update", "false"} ->
+                Args#query_args{update=false};
+            {"descending", "true"} ->
+                case Args#query_args.direction of
+                fwd ->
+                    Args#query_args {
+                        direction = rev,
+                        start_key = reverse_key_default(Args#query_args.start_key),
+                        start_docid = reverse_key_default(Args#query_args.start_docid),
+                        end_key = reverse_key_default(Args#query_args.end_key),
+                        end_docid =  reverse_key_default(Args#query_args.end_docid)};
+                _ ->
+                    Args %already reversed
+                end;
+            {"skip", Value} ->
+                case (catch list_to_integer(Value)) of
+                Count when is_integer(Count) ->
+                    Args#query_args{skip=Count};
+                _Error ->
+                    Msg = lists:flatten(io_lib:format(
+                    "Bad URL query value, number expected: skip=~s", [Value])),
+                    throw({query_parse_error, Msg})
+                end;
+            _ -> % unknown key
+                Msg = lists:flatten(io_lib:format(
+                    "Bad URL query key:~s", [Key])),
+                throw({query_parse_error, Msg})
+            end
+        end,
+        #query_args{}, QueryList).
+
+
+% returns db, otherwise throws exception. Note: no {ok,_}.
+open_db(#uri_parts{db=DbName}) ->
+    open_db(DbName);
+open_db(DbName) when is_list(DbName)->
+    case couch_server:open(DbName) of
+    {ok, Db} ->
+        Db;
+    Error ->
+        throw(Error)
+    end.
+
+handle_missing_revs_request(#mod{entity_body=RawJson}=Mod, Parts) ->
+    Db = open_db(Parts),
+    {obj, JsonDocIdRevs} = cjson:decode(RawJson),
+    DocIdRevs = [{Id, tuple_to_list(Revs)} || {Id, Revs} <- JsonDocIdRevs],
+    {ok, Results} = couch_db:get_missing_revs(Db, DocIdRevs),
+    JsonResults = [{Id, list_to_tuple(Revs)} || {Id, Revs} <- Results],
+    send_json(Mod, 200, {obj, [{missing_revs, {obj, JsonResults}}]}).
+
+handle_replication_request(#mod{entity_body=RawJson}=Mod) ->
+    {obj, Props} = cjson:decode(RawJson),
+    Src = proplists:get_value("source", Props),
+    Tgt = proplists:get_value("target", Props),
+    {obj, Options} = proplists:get_value("options", Props, {obj, []}),
+    {ok, {obj, JsonResults}} = couch_rep:replicate(Src, Tgt, Options),
+    send_ok(Mod, 200, JsonResults).
+
+
+
+send_database_info(Mod, #uri_parts{db=DbName}=Parts) ->
+    Db = open_db(Parts),
+    {ok, InfoList} = couch_db:get_db_info(Db),
+    ok = send_header(Mod, 200, resp_json_header(Mod)),
+    DocCount = proplists:get_value(doc_count, InfoList),
+    LastUpdateSequence = proplists:get_value(last_update_seq, InfoList),
+    ok = send_chunk(Mod, "{\"db_name\": \"" ++ DbName ++
+        "\", \"doc_count\":" ++ integer_to_list(DocCount) ++
+        ", \"update_seq\":" ++ integer_to_list(LastUpdateSequence)++"}"),
+    ok = send_final_chunk(Mod),
+    {ok, 200}.
+
+send_doc(#mod{parsed_header=Headers}=Mod,
+        #uri_parts{doc=DocId,querystr=QueryStr}=Parts) ->
+    Db = open_db(Parts),
+    #doc_query_args{rev=Rev, options=Options} = doc_parse_query(QueryStr),
+    case Rev of
+    "" ->
+        % open most recent rev
+        case couch_db:open_doc(Db, DocId, Options) of
+        {ok, #doc{revs=[DocRev|_]}=Doc} ->
+            Etag = proplists:get_value("if-none-match", Headers),
+            if Options == [] andalso Etag == DocRev ->
+                ok = send_header(Mod, 304,
+                        resp_header(Mod, [no_body]) ++ [{"etag", DocRev}]),
+                {ok, 304};
+            true ->
+                send_json(Mod, 200, couch_doc:to_json_obj(Doc, Options),
+                        if Options == [] -> [{"etag", DocRev}]; true -> [] end)
+            end;
+        Error ->
+            throw(Error)
+        end;
+    _ ->
+        % open a specific rev (deletions come back as stubs)
+        case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of
+        {ok, [{ok, Doc}]} ->
+            send_json(Mod, 200, couch_doc:to_json_obj(Doc, Options), [{"etag", Rev}]);
+        {ok, [Else]} ->
+            throw(Else)
+        end
+    end.
+
+send_doc_revs(Mod, #uri_parts{doc=DocId,querystr=QueryStr}=Parts) ->
+    Db = open_db(Parts),
+    #doc_query_args{options=Options, open_revs=Revs} = doc_parse_query(QueryStr),
+    {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options),
+    ok = send_header(Mod, 200, resp_json_header(Mod)),
+    ok = send_chunk(Mod, "["),
+    % We loop through the docs. The first time through the separator
+    % is whitespace, then a comma on subsequent iterations.
+    lists:foldl(
+        fun(Result, AccSeparator) ->
+            case Result of
+            {ok, Doc} ->
+                JsonDoc= couch_doc:to_json_obj(Doc, Options),
+                ok = send_chunk(Mod, AccSeparator ++ lists:flatten(cjson:encode({obj, [{ok, JsonDoc}]})));
+            {{not_found, missing}, RevId} ->
+                Json = {obj, [{"missing", RevId}]},
+                ok = send_chunk(Mod, AccSeparator ++ lists:flatten(cjson:encode(Json)))
+            end,
+            "," % AccSeparator now has a comma
+        end,
+        "", Results),
+    ok = send_chunk(Mod, "]"),
+    ok = send_final_chunk(Mod),
+    {ok, 200}.
+
+send_attachment(#mod{method=Method} = Mod,
+        #uri_parts{doc=DocId,attachment=Attachment}=Parts) ->
+    Db = open_db(Parts),
+    case couch_db:open_doc(Db, DocId, []) of
+    {ok, #doc{attachments=Attachments}} ->
+        case proplists:get_value(Attachment, Attachments) of
+        undefined ->
+            throw({not_found, missing});
+        {Type, Bin} ->
+            ok = send_header(Mod, 200, resp_header(Mod, Type) ++
+                [{"content-type", Type},
+                {"content-length", integer_to_list(couch_doc:bin_size(Bin))}]),
+            case Method of
+            "GET" ->
+                couch_doc:bin_foldl(Bin,
+                        fun(BinSegment, []) ->
+                            ok = send_chunk(Mod, BinSegment),
+                            {ok, []}
+                        end,
+                        []);
+            "HEAD" ->
+                ok
+            end,
+            ok = send_final_chunk(Mod),
+            {ok, 200}
+        end;
+    Error ->
+        throw(Error)
+    end.
+
+
+send_json(Mod, Code, JsonData) ->
+    send_json(Mod, Code, JsonData, []).
+
+send_json(#mod{method=Method}=Mod, Code, JsonData, Headers) ->
+    case Method of
+    "HEAD" ->
+        ok = send_header(Mod, Code, resp_json_header(Mod, [no_body]) ++ Headers);
+    _ ->
+        ok = send_header(Mod, Code, resp_json_header(Mod) ++ Headers),
+        ok = send_chunk(Mod, lists:flatten([cjson:encode(JsonData) | "\n"])),
+        ok = send_final_chunk(Mod)
+    end,
+    {ok, Code}.
+
+
+send_ok(Mod, Code) ->
+        send_ok(Mod, Code, []).
+
+send_ok(Mod, Code, AdditionalProps) ->
+    send_ok(Mod, Code, AdditionalProps, []).
+
+send_ok(Mod, Code, AdditionalProps, AdditionalHeaders) ->
+    send_json(Mod, Code, {obj, [{ok, true}|AdditionalProps]}, AdditionalHeaders).
+
+
+make_view_fold_fun(Mod, QueryArgs) ->
+    #query_args{
+        end_key=EndKey,
+        end_docid=EndDocId,
+        direction=Dir,
+        count=Count
+        } = QueryArgs,
+
+    PassedEndFun =
+    case Dir of
+    fwd ->
+        fun(ViewKey, ViewId) ->
+            couch_view:less_json({EndKey,EndDocId}, {ViewKey,ViewId})
+        end;
+    rev->
+        fun(ViewKey, ViewId) ->
+            couch_view:less_json({ViewKey, ViewId}, {EndKey,EndDocId})
+        end
+    end,
+
+    NegCountFun =
+    fun(Id, Key, Value, Offset, TotalViewCount, {AccCount, AccSkip, HeaderSent, AccRevRows}) ->
+        PassedEnd = PassedEndFun(Key, Id),
+        case {PassedEnd, AccCount, AccSkip, HeaderSent} of
+        {true,_,_,_} ->
+            % The stop key has been passed, stop looping.
+            {stop, {AccCount, AccSkip, HeaderSent, AccRevRows}};
+        {_,0,_,_} ->
+            {stop, {0, 0, HeaderSent, AccRevRows}}; % we've done "count" rows, stop foldling
+        {_,_,AccSkip,_} when AccSkip > 0 ->
+            {ok, {AccCount, AccSkip - 1, HeaderSent, AccRevRows}};
+        {_,AccCount,_,header_sent} ->
+            JsonObj = {obj, [{"key",Key},{"id",Id},{"value",Value}]},
+            {ok, {AccCount + 1, 0, header_sent, [cjson:encode(JsonObj), "," | AccRevRows]}};
+        {_,_,_,header_not_sent} ->
+            ok = send_header(Mod, 200, resp_json_header(Mod)),
+            Offset2= TotalViewCount - Offset -
+                lists:min([TotalViewCount - Offset, - AccCount]),
+            JsonBegin = io_lib:format("{\"total_rows\":~w,\"offset\":~w,\"rows\":[",
+                    [TotalViewCount, Offset2]),
+            JsonObj = {obj, [{"key",Key},{"id",Id},{"value",Value}]},
+            ok = send_chunk(Mod, lists:flatten(JsonBegin)),
+            {ok, {AccCount + 1, 0, header_sent, [cjson:encode(JsonObj) | AccRevRows]}}
+        end
+    end,
+
+    PosCountFun =
+    fun(Id, Key, Value, Offset, TotalViewCount, {AccCount, AccSkip, HeaderSent, AccRevRows}) ->
+        PassedEnd = PassedEndFun(Key, Id),
+        case {PassedEnd, AccCount, AccSkip, HeaderSent} of
+        {true,_,_,_} ->
+            % The stop key has been passed, stop looping.
+            {stop, {AccCount, AccSkip, HeaderSent, AccRevRows}};
+        {_,0,_,_} ->
+            {stop, {0, 0, HeaderSent, AccRevRows}}; % we've done "count" rows, stop foldling
+        {_,_,AccSkip,_} when AccSkip > 0 ->
+            {ok, {AccCount, AccSkip - 1, HeaderSent, AccRevRows}};
+        {_,AccCount,_,header_sent} when (AccCount > 0) ->
+            JsonObj = {obj, [{"key",Key},{"id",Id},{"value",Value}]},
+            ok = send_chunk(Mod, "," ++  lists:flatten(cjson:encode(JsonObj))),
+            {ok, {AccCount - 1, 0, header_sent, AccRevRows}};
+        {_,_,_,header_not_sent} ->
+            ok = send_header(Mod, 200, resp_json_header(Mod)),
+            JsonBegin = io_lib:format("{\"total_rows\":~w,\"offset\":~w,\"rows\":[",
+                    [TotalViewCount, Offset]),
+            JsonObj = {obj, [{"key",Key},{"id",Id},{"value",Value}]},
+            ok = send_chunk(Mod, lists:flatten(JsonBegin ++ cjson:encode(JsonObj))),
+            {ok, {AccCount - 1, 0, header_sent, AccRevRows}}
+        end
+    end,
+    case Count > 0 of
+    true ->     PosCountFun;
+    false ->    NegCountFun
+    end.
+
+finish_view_fold(Mod, FoldResult) ->
+    case FoldResult of
+    {ok, TotalRows, {_, _, header_not_sent, _}} ->
+        % nothing found in the view, nothing has been returned
+        % send empty view
+        ok = send_header(Mod, 200, resp_json_header(Mod)),
+        JsonEmptyView = lists:flatten(
+            io_lib:format("{\"total_rows\":~w,\"rows\":[]}\n",
+                [TotalRows])),
+        ok = send_chunk(Mod, JsonEmptyView),
+        ok = send_final_chunk(Mod),
+        {ok, 200};
+    {ok, _TotalRows, {_, _, header_sent, AccRevRows}} ->
+        % end the view
+        ok = send_chunk(Mod, lists:flatten(AccRevRows) ++ "]}\n"),
+        ok = send_final_chunk(Mod),
+        {ok, 200};
+    Error ->
+        throw(Error)
+    end.
+    
+
+send_temp_view(#mod{entity_body=Body,parsed_header=Headers}=Mod,
+        #uri_parts{db=DbName, querystr=QueryStr}) ->
+    #query_args{
+        start_key=StartKey,
+        count=Count,
+        skip=SkipCount,
+        direction=Dir,
+        start_docid=StartDocId} = QueryArgs = view_parse_query(QueryStr),
+    Type0 = proplists:get_value("content-type", Headers, "text/javascript"),
+    % remove the charset ("...;charset=foo") if its there
+    {ok, [Type|_]} = regexp:split(Type0, ";"),
+    View = {temp, DbName, Type, Body},
+    Start = {StartKey, StartDocId},
+    FoldlFun = make_view_fold_fun(Mod, QueryArgs),
+    FoldAccInit = {Count, SkipCount, header_not_sent, []},
+    FoldResult = couch_view:fold(View, Start, Dir, FoldlFun, FoldAccInit),
+    finish_view_fold(Mod, FoldResult).
+
+
+send_view(Mod, #uri_parts{db=DbName, doc=DesignDocId, view=ViewId, querystr=QueryStr}) ->
+    #query_args{
+        start_key=StartKey,
+        count=Count,
+        skip=SkipCount,
+        direction=Dir,
+        start_docid=StartDocId} = QueryArgs = view_parse_query(QueryStr),
+    View = {DbName, DesignDocId, ViewId},
+    Start = {StartKey, StartDocId},
+    FoldlFun = make_view_fold_fun(Mod, QueryArgs),
+    FoldAccInit = {Count, SkipCount, header_not_sent, []},
+    Result = couch_view:fold(View, Start, Dir, FoldlFun, FoldAccInit),
+    finish_view_fold(Mod, Result).
+
+
+send_all_docs(Mod, #uri_parts{querystr=QueryStr}=Parts) ->
+    Db = open_db(Parts),
+    #query_args{
+        start_key=StartKey,
+        start_docid=StartDocId,
+        count=Count,
+        skip=SkipCount,
+        direction=Dir} = QueryArgs = view_parse_query(QueryStr),
+    {ok, Info} = couch_db:get_db_info(Db),
+    TotalRowCount = proplists:get_value(doc_count, Info),
+
+    StartId =
+    if is_list(StartKey) -> StartKey;
+    true -> StartDocId
+    end,
+
+    FoldlFun = make_view_fold_fun(Mod, QueryArgs),
+    AdapterFun =
+        fun(#full_doc_info{id=Id}=FullDocInfo, Offset, Acc) ->
+            case couch_doc:to_doc_info(FullDocInfo) of
+            #doc_info{deleted=false, rev=Rev} ->
+                FoldlFun(Id, Id, {obj, [{rev, Rev}]}, Offset, TotalRowCount, Acc);
+            #doc_info{deleted=true} ->
+                {ok, Acc}
+            end
+        end,
+    {ok, FoldResult} = couch_db:enum_docs(Db, StartId, Dir, AdapterFun,
+            {Count, SkipCount, header_not_sent, []}),
+    finish_view_fold(Mod, {ok, TotalRowCount, FoldResult}).
+
+send_all_docs_by_seq(Mod, #uri_parts{querystr=QueryStr}=Parts) ->
+    Db = open_db(Parts),
+    QueryArgs = view_parse_query(QueryStr),
+    #query_args{
+        start_key=StartKey,
+        count=Count,
+        skip=SkipCount,
+        direction=Dir} = QueryArgs,
+
+    {ok, Info} = couch_db:get_db_info(Db),
+    TotalRowCount = proplists:get_value(doc_count, Info),
+
+    FoldlFun = make_view_fold_fun(Mod, QueryArgs),
+
+    StartKey2 =
+    case StartKey of
+    nil -> 0;
+    <<>> -> 100000000000;
+    StartKey when is_integer(StartKey) -> StartKey
+    end,
+    {ok, FoldResult} =
+    couch_db:enum_docs_since(Db, StartKey2, Dir,
+        fun(DocInfo, Offset, Acc) ->
+            #doc_info{
+                id=Id,
+                rev=Rev,
+                update_seq=UpdateSeq,
+                deleted=Deleted,
+                conflict_revs=ConflictRevs,
+                deleted_conflict_revs=DelConflictRevs} = DocInfo,
+            Json =
+            {obj,
+                [{"rev", Rev}] ++
+                case ConflictRevs of
+                []  ->  [];
+                _   ->  [{"conflicts", list_to_tuple(ConflictRevs)}]
+                end ++
+                case DelConflictRevs of
+                []  ->  [];
+                _   ->  [{"deleted_conflicts", list_to_tuple(DelConflictRevs)}]
+                end ++
+                case Deleted of
+                true -> [{"deleted", true}];
+                false -> []
+                end
+                },
+            FoldlFun(Id, UpdateSeq, Json, Offset, TotalRowCount, Acc)
+        end, {Count, SkipCount, header_not_sent, []}),
+    finish_view_fold(Mod, {ok, TotalRowCount, FoldResult}).
+
+
+
+send_all_dbs(Mod, _Parts)->
+    {ok, DbNames} = couch_server:all_databases(),
+    ok = send_header(Mod, 200, resp_json_header(Mod)),
+    ok = send_chunk(Mod, lists:flatten(cjson:encode(list_to_tuple(DbNames)))),
+    ok = send_final_chunk(Mod),
+    {ok, 200}.
+
+send_error(Mod, Error) ->
+    {Json, Code} = error_to_json(Error),
+    couch_log:info("HTTP Error (code ~w): ~p", [Code,  Json]),
+    send_json(Mod, Code, Json).
+
+
+
+% convert an error response into a json object and http error code.
+error_to_json(Error) ->
+    {HttpCode, Atom, Reason} = error_to_json0(Error),
+    Reason1 =
+        case (catch io_lib:format("~s", [Reason])) of
+        Reason0 when is_list(Reason0) ->
+            lists:flatten(Reason0);
+        _ ->
+            lists:flatten(io_lib:format("~p", [Reason])) % else term to text
+        end,
+    Json =
+        {obj,
+            [{error, atom_to_list(Atom)},
+            {reason, Reason1}]},
+    {Json, HttpCode}.
+
+error_to_json0(not_found) ->
+    {404, not_found, "missing"};
+error_to_json0({missing_rev, Msg}) ->
+    {412, missing_rev, Msg};
+error_to_json0({not_found, Reason}) ->
+    {404, not_found, Reason};
+error_to_json0({database_already_exists, Reason}) ->
+    {409, database_already_exists, Reason};
+error_to_json0(conflict) ->
+    {412, conflict, "Update conflict"};
+error_to_json0({doc_validation, Msg}) ->
+    {406, doc_validation, Msg};
+error_to_json0({Id, Reason}) when is_atom(Id) ->
+    {500, Id, Reason};
+error_to_json0(Error) ->
+    {500, error, Error}.
+
+%%
+%% Configuration
+%%
+
+%% load
+
+load("Foo Bar", []) ->
+    {ok, [], {script_alias, {"foo", "bar"}}}.

Added: incubator/couchdb/trunk/src/fulltext/lucene/CouchConfig.java
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/fulltext/lucene/CouchConfig.java?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/fulltext/lucene/CouchConfig.java (added)
+++ incubator/couchdb/trunk/src/fulltext/lucene/CouchConfig.java Fri Mar 28 16:32:19 2008
@@ -0,0 +1,62 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License.  You may obtain a copy of the
+License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations under the License.
+
+*/
+
+import java.util.*;
+
+
+class CouchConfig
+{
+/*  private CouchDocument[] documents;
+*/
+    private Hashtable documents;
+    private long updateSequence;
+
+    public CouchConfig()
+    {
+        documents = new Hashtable();
+        updateSequence = 0;
+    }
+
+    public void setUpdateSequence(long newUpdateSequence)
+    {
+        updateSequence = newUpdateSequence;
+    }
+
+    public long getUpdateSequence()
+    {
+        return updateSequence;
+    }
+
+    public void addDocument(com.fourspaces.couchdb.Document document)
+    {
+        String field;
+//      System.out.println(document);
+        field = document.getString("__couchdb_database");
+//      System.out.println(field);
+        if(field != null) {
+            documents.put(field, document);
+        }
+    }
+
+    public Hashtable getDocuments()
+    {
+        return documents;
+    }
+
+    public boolean hasDb(String db)
+    {
+        return documents.containsKey(db);
+    }
+}

Added: incubator/couchdb/trunk/src/fulltext/lucene/CouchDbDirFilter.java
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/fulltext/lucene/CouchDbDirFilter.java?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/fulltext/lucene/CouchDbDirFilter.java (added)
+++ incubator/couchdb/trunk/src/fulltext/lucene/CouchDbDirFilter.java Fri Mar 28 16:32:19 2008
@@ -0,0 +1,30 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License.  You may obtain a copy of the
+License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations under the License.
+
+*/
+
+/*
+
+LuceneIndexer creates a lucene index by intrementally fetching changes from a a
+Apache CouchDB server. It is managed by the Apache CouchDB daemon.
+
+*/
+import java.io.*;
+
+class CouchDbDirFilter implements FilenameFilter
+{
+    public boolean accept(File dir, String name)
+    {
+        return new File(dir, name).isFile();
+    }
+}

Added: incubator/couchdb/trunk/src/fulltext/lucene/LuceneIndexer.java
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/fulltext/lucene/LuceneIndexer.java?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/fulltext/lucene/LuceneIndexer.java (added)
+++ incubator/couchdb/trunk/src/fulltext/lucene/LuceneIndexer.java Fri Mar 28 16:32:19 2008
@@ -0,0 +1,355 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License.  You may obtain a copy of the
+License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations under the License.
+
+*/
+
+/*
+
+LuceneIndexer creates a lucene index by incrementally fetching changes from a a
+Apache CouchDB server. It is managed by the Apache CouchDB daemon.
+
+I know this is Java and there should be a lot of OO going on, but it
+isn't. Sorry about that.
+
+*/
+
+//basics
+import java.io.*;
+import java.net.*;
+import java.util.*;
+import java.nio.channels.FileChannel;
+import java.nio.ByteBuffer;
+import java.lang.reflect.*;
+
+
+//couchdb4j
+//import com.fourspaces.couchdb.*;
+
+//xml
+import org.xml.sax.*;
+import org.xml.sax.helpers.*;
+import javax.xml.parsers.*;
+
+//lucene
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.SimpleAnalyzer;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Hits;
+import org.apache.lucene.search.TermQuery;
+
+public class LuceneIndexer
+{
+    private static CouchConfig configuration;
+    private static com.fourspaces.couchdb.Session s;
+
+    public static void main(String[] args) throws Exception
+    {
+/*      BufferedWriter out = new BufferedWriter(new FileWriter("LuceneIndexer.log"));
+        out.write("indexer started");out.flush();
+*/
+        String db;
+/*      out.write("indexer about to read config");out.flush();*/
+        connect();
+        readConfig();
+
+/*      out.write("indexer read config: " + configuration.getDocuments());out.flush();*/
+
+        BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
+        try {
+            while((db = in.readLine()) != null) {
+/*              out.write("indexer got a poke");out.flush();*/
+
+                if(db.equals("couchdbfulltext")) {
+/*                  System.out.println("refresh config");
+
+*/                  readConfig();
+/*                  out.write("indexer refreshed config");out.flush();*/
+
+                }
+
+/*              out.write("indexer has table: " + db + "?");*/
+
+                if(!configuration.hasDb(db)) {
+/*                  out.write("... no wait for input");out.flush();*/
+
+                    continue;
+                }
+
+/*              out.write("yeppa");out.flush();*/
+
+
+                createIndexDir(db);
+                indexChanges(db);
+/*              System.out.println(db + " to revision: " + revision);*/
+            }
+        } catch (IOException e) {
+/*          out.write("indexer caught IO exception: " + e.getMessage());out.flush();*/
+
+        }
+/*      System.out.println("Lucene Indexer stopped");*/
+/*      out.write("indexer stopped");out.flush();*/
+
+/*      out.close();*/
+
+    }
+
+    public static void connect() throws Exception
+    {
+        s = null;
+        com.fourspaces.couchdb.Session s = new com.fourspaces.couchdb.Session("locahost", 5984);
+    }
+
+    public static void readConfig() throws Exception
+    {
+        //get all docs in /$ftconfig
+        //return array of config docs
+        configuration = null;
+        configuration = new CouchConfig();
+        com.fourspaces.couchdb.Database db = s.getDatabase("couchdbfulltext");
+        com.fourspaces.couchdb.ViewResults changedDocuments = db.getAllDocuments(0);
+
+        for (com.fourspaces.couchdb.Document d: changedDocuments.getResults()) {
+            configuration.addDocument(d);
+        }
+
+/*      for(int i = 0; i < changedDocuments.length; i++) {
+            CouchDocument document = changedDocuments[i];
+            document = loadDocumentData(document, "couchdbfulltext");
+            configuration.addDocument(document);
+        }
+*/  }
+
+    public static void indexChanges(String db) throws Exception
+    {
+//      System.out.println("Updating index for '" + db + "' from revision: " + revision);
+        int sequence = -1;
+        try {
+            com.fourspaces.couchdb.Database _db = s.getDatabase(db);
+            sequence = _db.getUpdateSeq();
+            com.fourspaces.couchdb.ViewResults changedDocuments = _db.getAllDocuments(sequence);
+
+            if(changedDocuments.size() == 0) {
+//              System.out.println("Index is up-to date at sequence_id: " + revision);
+                return;
+            }
+
+            boolean delete = false;
+
+            for (com.fourspaces.couchdb.Document d: changedDocuments.getResults()) {
+                delete = d.getBoolean("delete");
+                documentAddToIndex(db, d, delete);
+            }
+/*          for(int idx = 0; idx < changedDocuments.length; idx++) {
+                com.fourspaces.couchdb.Document document = changedDocuments[idx];
+                sequence = document.getUpdateSequence();
+                delete = document.getDelete();
+//              System.out.println("Doing: " + document + " with squence: " + sequence + " delete: "+document.getDelete() + " hash code:" + document.hashCode());
+
+                document = loadDocumentData(document, db);
+    //          System.out.println(changedDocuments[idx]);
+                // remove from lucene if exists, add to lucene.
+
+                documentAddToIndex(db, document, delete);
+            }
+*/      //  CouchDocument document = getDocumentByRevision(db, revision);
+            setRevisionForDb(db, sequence);
+        } catch(Exception e) {
+//          System.out.println("Warning: " + db + " says: " + e.getMessage());
+        }
+    }
+
+    public static void documentAddToIndex(String db, com.fourspaces.couchdb.Document document, boolean delete) throws IOException
+    {
+        String index = "Lucene/Index/" + db;
+        boolean create = true;
+
+/*      System.out.println("DEBUG: delete: " + delete);*/
+/*      System.out.println("DEBUG: create index? " + create);*/
+
+        if(IndexReader.indexExists(index)) {
+            create = false;
+            Term term = new Term("__couchdb_document_id", document.getId());
+/*          System.out.println("DEBUG: Deleting: " + document + " with term:" + term);*/
+            IndexReader reader = IndexReader.open(index);
+            reader.deleteDocuments(term);
+/*          System.out.println("DEBUG: reader has deletions: " + reader.hasDeletions());*/
+
+            reader.close();
+        }
+
+        if(!delete) {
+            Analyzer analyzer = new SimpleAnalyzer();
+
+            IndexWriter writer = new IndexWriter(index, analyzer, create);
+            writer.setUseCompoundFile(true);
+
+/*          Collection fields = document.keys();*/
+            Document luceneDocument = new Document();
+
+/*          Set tmpKeys = fields.keySet();
+            Object keys[] = tmpKeys.toArray();
+*/          String keywords = "";
+
+            for (Iterator it = document.keys(); it.hasNext(); ) {
+                Object key = it.next();
+                String value = document.getString((String)key);
+
+                if(key.equals("__couchdb_document_id") || key.equals("__couchdb_document_revision")) {
+                        luceneDocument.add(new Field((String)key, value, Field.Store.YES, Field.Index.UN_TOKENIZED));
+                } else {
+                    luceneDocument.add(new Field((String)key, value, Field.Store.YES, Field.Index.TOKENIZED));
+                    keywords = keywords + " " + value;
+                }
+            }
+            if(keywords.length() > 0) {
+                luceneDocument.add(new Field("__couchdb_keywords", keywords, Field.Store.YES, Field.Index.TOKENIZED));
+            }
+
+
+/*          for(int idx = 0; idx < keys.length; idx++) {
+    //          System.out.println("DEBUG: Add Field: "+ keys[idx] + " with value: " + fields.get(keys[idx]));
+                Hashtable field = (Hashtable)fields.get(keys[idx]);
+                if(field == null) {return;}
+                for(int fieldIdx = 0; fieldIdx < field.size(); fieldIdx++) {
+                    String value = (String)field.get(fieldIdx);
+                    if(value == null) {
+                        value = "";
+                    }
+    //              System.out.println("DEBUG: fieldIdx:" + fieldIdx + " and value: "+ value);
+                    String key = (String)keys[idx];
+                    if(key.equals("__couchdb_document_id") || key.equals("__couchdb_document_revision")) {
+                        luceneDocument.add(new Field(key, value, Field.Store.YES, Field.Index.UN_TOKENIZED));
+                    } else {
+                        luceneDocument.add(new Field(key, value, Field.Store.YES, Field.Index.TOKENIZED));
+                        keywords = keywords + " " + value;
+                    }
+                }
+*///            }
+            writer.addDocument(luceneDocument);
+            writer.optimize();
+            writer.close();
+        }
+    }
+
+
+    private static void setRevisionForDb(String db, long revision) throws Exception
+    {
+        File dbFile = new File("Lucene/State/" + db);
+
+        RandomAccessFile stateFile = new RandomAccessFile("Lucene/State/" + db, "rwd");
+        stateFile.writeBytes(String.valueOf(revision));
+        return;
+    }
+
+    private static String[] getDBs()
+    {
+        File dbRoot = new File("db_root");
+        if(!dbRoot.isDirectory()) {
+            return new String[0];
+        }
+
+        String[] dbs = dbRoot.list(new CouchDbDirFilter());
+
+        return dbs;
+    }
+
+    private static long getRevisionForDb(String db) throws Exception
+    {
+
+        File dbFile = new File("Lucene/State/" + db);
+        if(!dbFile.exists()) {
+            return 0;
+        }
+
+
+        RandomAccessFile stateFile = new RandomAccessFile("Lucene/State/" + db, "r");
+        String revision = stateFile.readLine();
+//      System.out.println("rev: " + revision);
+        return (long)Integer.parseInt(revision);
+    }
+
+    private static void createIndexDir(String db)
+    {
+        File indexDir = new File("Lucene/Index/" + db);
+        if(!indexDir.exists()) {
+            indexDir.mkdirs();
+            System.out.println("Created Index Directory");
+        }
+
+        File stateDir = new File("Lucene/State");
+        if(!stateDir.exists()) {
+            stateDir.mkdirs();
+            System.out.println("Created State Directory");
+        }
+    }
+
+    private static XMLReader getParser(SAXCouchDocumentBuilder documentBuilder) throws Exception
+    {
+        SAXParserFactory factory = SAXParserFactory.newInstance();
+        SAXParser saxParser = factory.newSAXParser();
+        XMLReader parser = saxParser.getXMLReader();
+        parser.setContentHandler(documentBuilder);
+        return parser;
+    }
+
+    private static BufferedInputStream getUrlStream(String address) throws Exception
+    {
+        URL url = new URL(address);
+        InputStream inStream = url.openStream();
+        return new BufferedInputStream(inStream);
+    }
+
+    public static com.fourspaces.couchdb.ViewResults getChangedDocumentsSinceRevision(String db, int revision) throws Exception
+    {
+        //BufferedInputStream inBuffer = getUrlStream("http://localhost:5984/" + db + "/_all_docs_by_update_seq?startkey=" + revision);
+
+        com.fourspaces.couchdb.ViewResults newDocs = s.getDatabase(db).getAllDocuments(revision);
+
+        return newDocs;
+        //return CouchDocument[]
+
+/*      CouchDocument[] returnValue = {};
+*/      //setup xml parser
+/*      SAXCouchDocumentBuilder documentBuilder = new SAXCouchDocumentBuilder();
+        XMLReader parser = getParser(documentBuilder);
+        // Repeat until end of file
+        parser.parse(new InputSource(inBuffer));
+
+
+        return documentBuilder.getDocuments();
+*/  }
+
+
+    public static CouchDocument loadDocumentData(CouchDocument document, String db) throws Exception
+    {
+        BufferedInputStream inBuffer = getUrlStream("http://localhost:5984/" + db + "/" + document.getDocId() + "?rev=" + document.getRevision());
+
+        //setup xml parser
+        SAXCouchDocumentBuilder documentBuilder = new SAXCouchDocumentBuilder();
+        XMLReader parser = getParser(documentBuilder);
+
+        // Repeat until end of file
+        parser.parse(new InputSource(inBuffer));
+
+        return documentBuilder.getDocument();
+    }
+}

Added: incubator/couchdb/trunk/src/fulltext/lucene/LuceneSearcher.java
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/fulltext/lucene/LuceneSearcher.java?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/fulltext/lucene/LuceneSearcher.java (added)
+++ incubator/couchdb/trunk/src/fulltext/lucene/LuceneSearcher.java Fri Mar 28 16:32:19 2008
@@ -0,0 +1,90 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License.  You may obtain a copy of the
+License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations under the License.
+
+*/
+
+/*
+
+LuceneSearcher searches a lucene index.
+
+It is managed by the Apache CouchDB daemon.
+
+*/
+
+//basics
+import java.io.*;
+
+//lucene
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexReader;
+
+import org.apache.lucene.document.Document;
+
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Hits;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.Query;
+
+/*
+protocol:
+Queries will look like this:
+
+databasename\n
+the full text query\n
+
+Then the java reader will read the lines and respond
+by outputing each document result:
+ok\n
+docid1\n
+score1\n
+docid2\n
+score2\n
+docid3\n
+score3\n
+\n
+
+or:
+
+error\n
+error_id\n
+error message\n
+
+*/
+public class LuceneSearcher
+{
+    public static void main(String[] args) throws Exception
+    {
+
+        BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
+
+        String db = "";
+        String queryString = "";
+
+        while(((db = in.readLine()) != null) && ((queryString = in.readLine()) != null)) {
+
+            IndexSearcher searcher = new IndexSearcher("Lucene/Index/" + db);
+
+            Query query = new TermQuery(new Term("__couchdb_keywords", queryString));
+
+            Hits hits = searcher.search(query);
+
+            System.out.println("ok");
+            for(int i = 0; i < hits.length(); i++) {
+                Document d = hits.doc(i);
+                System.out.println(d.get("__couchdb_document_id"));
+                System.out.println(hits.score(i));
+            }
+            System.out.println();
+        }
+    }
+}

Added: incubator/couchdb/trunk/src/fulltext/lucene/readme.txt
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/fulltext/lucene/readme.txt?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/fulltext/lucene/readme.txt (added)
+++ incubator/couchdb/trunk/src/fulltext/lucene/readme.txt Fri Mar 28 16:32:19 2008
@@ -0,0 +1,41 @@
+This is still work in progress and has not been integrated into the build
+process. Good luck though :)
+
+This document describes how to use the LuceneIndexer with Apache CouchDB.
+
+Requirements:
+Apache CouchDB 0.6.4 or newer.
+Java Development Kit (JDK) 1.5
+Lucene 2.0.0 or newer
+couchdb4j (http://code.google.com/p/couchdb4j/)
+
+
+If you don't already have it,
+download lucene-core-2.0.0.jar from a mirror
+A list of mirrors can be found at
+http://www.apache.org/dyn/closer.cgi/lucene/java/
+
+Add the following line to your couch.ini:
+LuceneServer=/usr/bin/java -cp "./bin/:./lib/lucene-core.jar" LuceneIndexer=...
+
+Adjust the version number and the path to java, if needed.
+If you have lucene installed already, remove the
+'-cp "./bin/:./Lucene/lucene-core-2.0.0.jar"' part.
+
+Put lucene-core.jar and cocuhdb4j.jar into $CouchDbDir/lib
+
+Launch Apache CouchDB.
+
+The indexer will populate $CouchDbDir/Lucene/Index with an index for
+all documents in all databases.
+(indexes per database will be added soon).
+
+To see that the data is actually stored in there,
+use luke from http://www.getopt.org/luke/
+
+To use the actual index, you could use the PHP 5 Lucene Demo in the Zend Framework
+(http://framework.zend.com) or any other Lucene implementation in your favourite
+language.
+
+If you have any questions, please visit:
+http://couchdb.com/CouchDB/CouchDBWeb.nsf/vDissByDate

Propchange: incubator/couchdb/trunk/src/fulltext/lucene/readme.txt
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: incubator/couchdb/trunk/var/
------------------------------------------------------------------------------
--- svn:ignore (added)
+++ svn:ignore Fri Mar 28 16:32:19 2008
@@ -0,0 +1,2 @@
+Makefile
+Makefile.in

Added: incubator/couchdb/trunk/var/Makefile.am
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/var/Makefile.am?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/var/Makefile.am (added)
+++ incubator/couchdb/trunk/var/Makefile.am Fri Mar 28 16:32:19 2008
@@ -0,0 +1,25 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License.  You may obtain a copy
+## of the License at
+##
+##   http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+datarootdir = @prefix@/share
+
+install-data-hook:
+	if test ! "$(mkdir_p)" = ""; then \
+	    $(mkdir_p) "$(DESTDIR)$(pkgstatelibdir)"; \
+	    $(mkdir_p) "$(DESTDIR)$(pkgstatelogdir)"; \
+	    $(mkdir_p) "$(DESTDIR)$(localstatedir)/run"; \
+	else \
+	    echo "WARNING: You may have to create these directories by hand."; \
+	    mkdir -p "$(DESTDIR)$(pkgstatelibdir)"; \
+	    mkdir -p "$(DESTDIR)$(pkgstatelogdir)"; \
+	    mkdir -p "$(DESTDIR)$(localstatedir)/run"; \
+	fi