You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by ja...@apache.org on 2008/08/12 17:34:12 UTC

svn commit: r685183 - in /incubator/couchdb/branches/runtimeconfig: ./ share/server/ share/www/browse/ share/www/script/ share/www/style/ src/couchdb/

Author: jan
Date: Tue Aug 12 08:34:11 2008
New Revision: 685183

URL: http://svn.apache.org/viewvc?rev=685183&view=rev
Log:
Merged revisions 681360-685172 via svnmerge from 
https://svn.apache.org/repos/asf/incubator/couchdb/trunk

........
  r681843 | cmlenz | 2008-08-01 23:30:23 +0200 (Fri, 01 Aug 2008) | 1 line
  
  Fix auto-expanding of view editor textareas for temp views.
........
  r681852 | cmlenz | 2008-08-01 23:49:18 +0200 (Fri, 01 Aug 2008) | 1 line
  
  Fix collapsing of nested objects in Futon document view in Safari.
........
  r681878 | cmlenz | 2008-08-02 00:37:50 +0200 (Sat, 02 Aug 2008) | 1 line
  
  Experimental tabbed interface in Futon document view to switch between tabular fields display and a raw JSON display.
........
  r682560 | damien | 2008-08-05 03:43:40 +0200 (Tue, 05 Aug 2008) | 1 line
  
  Added concurrent open db limit and a LRU cache for closing old databases when limit reached (configurable via MaxDbsOpen var in couch.ini). Refactored db update code in couch_db.erl into couch_db_updater.erl.
........
  r682571 | damien | 2008-08-05 05:21:33 +0200 (Tue, 05 Aug 2008) | 1 line
  
  Added new erlang source file to build system.
........
  r682780 | nslater | 2008-08-05 18:34:28 +0200 (Tue, 05 Aug 2008) | 1 line
  
  changed example sudo invokation to use -i option
........
  r682782 | nslater | 2008-08-05 18:38:29 +0200 (Tue, 05 Aug 2008) | 1 line
  
  added note to CHANGES about documentation change
........
  r682790 | damien | 2008-08-05 18:54:45 +0200 (Tue, 05 Aug 2008) | 1 line
  
  Readded file name checks for open and delete, as an attacker could use relative file names to open/delete otherwise inaccessible db files. Also changed view server shutdown when the parent db shutdowns into a non-normal termination, so that the linked supservisor correctly removes the process from its indexes.
........
  r682792 | damien | 2008-08-05 19:06:43 +0200 (Tue, 05 Aug 2008) | 1 line
  
  Fixed bug where we attempted to match a linked port to the current fd, but fail because the fd structure is more than the linked port.
........
  r682796 | nslater | 2008-08-05 19:09:39 +0200 (Tue, 05 Aug 2008) | 1 line
  
  slight rewording
........
  r682809 | damien | 2008-08-05 19:15:16 +0200 (Tue, 05 Aug 2008) | 1 line
  
  Fixed restart testing by making it far less likely a HTTP client will get a network error on a restart request.
........
  r683704 | damien | 2008-08-07 22:28:42 +0200 (Thu, 07 Aug 2008) | 1 line
  
  Fix for bug COUCHDB-100.
........
  r684023 | damien | 2008-08-08 19:46:48 +0200 (Fri, 08 Aug 2008) | 1 line
  
  COUCHDB-103 Fixed incompatibilty with new javascript (1.8) that no longer supports sealing document, making them immutable. The problem is arrays can no longer be sealed, and an error is generated when it is attempted. The fix here is to simply remove the document sealing, and map function placed in the same design document will have to be trustworthy that they don't modify the document (the same document is passed to multiple map functions). Non-trustworthy map functions can be placed in their own design documents, they will be run in isolation.
........
  r684092 | damien | 2008-08-08 23:06:29 +0200 (Fri, 08 Aug 2008) | 1 line
  
  Idempotent document creation support, new HTTP api to generate UUIDs and support in the couch.js library for using them. Creating uuids client side ensure that document creation happens only once, despite automatic network retries.
........
  r684762 | cmlenz | 2008-08-11 16:44:09 +0200 (Mon, 11 Aug 2008) | 1 line
  
  Updated changelog for r684023, which disabled doc sealing in the JS view server.
........
  r685171 | jan | 2008-08-12 16:48:06 +0200 (Tue, 12 Aug 2008) | 1 line
  
  HTTP COPY & MOVE for documents with tests
........

Added:
    incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_db_updater.erl
      - copied unchanged from r685171, incubator/couchdb/trunk/src/couchdb/couch_db_updater.erl
Modified:
    incubator/couchdb/branches/runtimeconfig/   (props changed)
    incubator/couchdb/branches/runtimeconfig/CHANGES
    incubator/couchdb/branches/runtimeconfig/README
    incubator/couchdb/branches/runtimeconfig/share/server/main.js
    incubator/couchdb/branches/runtimeconfig/share/www/browse/document.html
    incubator/couchdb/branches/runtimeconfig/share/www/script/browse.js
    incubator/couchdb/branches/runtimeconfig/share/www/script/couch.js
    incubator/couchdb/branches/runtimeconfig/share/www/script/couch_tests.js
    incubator/couchdb/branches/runtimeconfig/share/www/style/layout.css
    incubator/couchdb/branches/runtimeconfig/src/couchdb/Makefile.am
    incubator/couchdb/branches/runtimeconfig/src/couchdb/couch.app.tpl.in
    incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_db.erl
    incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_db.hrl
    incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_file.erl
    incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_httpd.erl
    incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_rep.erl
    incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_server.erl
    incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_view.erl

Propchange: incubator/couchdb/branches/runtimeconfig/
------------------------------------------------------------------------------
--- svnmerge-integrated (original)
+++ svnmerge-integrated Tue Aug 12 08:34:11 2008
@@ -1 +1 @@
-/incubator/couchdb/trunk:1-681359
+/incubator/couchdb/trunk:1-685172

Modified: incubator/couchdb/branches/runtimeconfig/CHANGES
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/CHANGES?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/CHANGES (original)
+++ incubator/couchdb/branches/runtimeconfig/CHANGES Tue Aug 12 08:34:11 2008
@@ -27,15 +27,18 @@
 
 Javascript View Server:
 
- * Fix for sealing of nested data structure in documents in the Javascript view
-   server.
+ * Sealing of documents has been disabled due to an incompatibility with
+   SpiderMonkey 1.9.
  * Improve error handling for undefined values emitted by map functions.
    (COUCHDB-83)
+ 
 
 Packaging and System Integration:
 
  * The `couchdb` script no longer uses `awk` for configuration checks as this
    was causing portability problems.
+ * Updated `sudo` example in the README to use the `-i` option, this fixes
+   problems when invoking from a directory the `couchdb` user cannot access.
 
 Futon Utility Client:
 

Modified: incubator/couchdb/branches/runtimeconfig/README
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/README?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/README (original)
+++ incubator/couchdb/branches/runtimeconfig/README Tue Aug 12 08:34:11 2008
@@ -157,7 +157,7 @@
 
 To start the Apache CouchDB server you should run the following command:
 
-    sudo -u couchdb couchdb
+    sudo -i -u couchdb couchdb
 
 This uses the `sudo` command to run the `couchdb` command as the `couchdb` user.
 

Modified: incubator/couchdb/branches/runtimeconfig/share/server/main.js
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/share/server/main.js?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/share/server/main.js [utf-8] (original)
+++ incubator/couchdb/branches/runtimeconfig/share/server/main.js [utf-8] Tue Aug 12 08:34:11 2008
@@ -74,7 +74,19 @@
         // ]
         //
         var doc = cmd[1];
+        /*
+        Immutable document support temporarily removed.
+        
+        Removed because the seal function no longer works on JS 1.8 arrays,
+        instead returning an error. The sealing is meant to prevent map
+        functions from modifying the same document that is passed to other map
+        functions. However, only map functions in the same design document are
+        run together, so we have a reasonable expectation they can trust each
+        other. Any map fun that can't be trusted can be placed in its own
+        design document, and it cannot affect other map functions.
+        
         recursivelySeal(doc); // seal to prevent map functions from changing doc
+        */
         var buf = [];
         for (var i = 0; i < funs.length; i++) {
           map_results = [];

Modified: incubator/couchdb/branches/runtimeconfig/share/www/browse/document.html
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/share/www/browse/document.html?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/share/www/browse/document.html [utf-8] (original)
+++ incubator/couchdb/branches/runtimeconfig/share/www/browse/document.html [utf-8] Tue Aug 12 08:34:11 2008
@@ -41,6 +41,24 @@
           .attr("href", "database.html?" + encodeURIComponent(page.db.name));
         $("h1 strong").text(page.docId);
         page.updateFieldListing();
+
+        $("#tabs li.tabular a").click(function() {
+          $("#tabs li").removeClass("active");
+          $(this).parent().addClass("active");
+          $("#fields thead th:first").text("Field").attr("colspan", 1).next().show();
+          $("#fields tbody.content").show();
+          $("#fields tbody.source").hide();
+        });
+        $("#tabs li.source a").click(function() {
+          $("#tabs li").removeClass("active");
+          $(this).parent().addClass("active");
+          $("#fields thead th:first").text("Source").attr("colspan", 2).next().hide();
+          $("#fields tbody.content").hide();
+          $("#fields tbody.source").find("td").each(function() {
+            $(this).html($("<code></code>").text(prettyPrintJSON(page.doc)));
+          }).end().show();
+        });
+
         $("#toolbar button.save").click(page.saveDocument);
         $("#toolbar button.add").click(page.addField);
         $("#toolbar button.delete").click(page.deleteDocument);
@@ -61,6 +79,10 @@
         <li><button class="delete">Delete Document</button></li>
       </ul>
 
+      <ul id="tabs">
+        <li class="active tabular"><a href="#tabular">Fields</a></li>
+        <li class="source"><a href="#source">Source</a></li>
+      </ul>
       <table id="fields" class="listing" cellspacing="0">
         <col class="field"><col class="value">
         <caption>Fields</caption>
@@ -72,6 +94,9 @@
         </thead>
         <tbody class="content">
         </tbody>
+        <tbody class="source" style="display: none">
+          <tr><td colspan="2"></td></tr>
+        </tbody>
         <tbody class="footer">
           <tr>
             <td colspan="2">

Modified: incubator/couchdb/branches/runtimeconfig/share/www/script/browse.js
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/share/www/script/browse.js?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/share/www/script/browse.js [utf-8] (original)
+++ incubator/couchdb/branches/runtimeconfig/share/www/script/browse.js [utf-8] Tue Aug 12 08:34:11 2008
@@ -451,7 +451,7 @@
       db.allDocs(options);
     } else {
       if (viewName == "_temp_view") {
-        $("#viewcode").show().addClass("expanded");
+        $("#viewcode").show().removeClass("collapsed");
         var mapFun = $("#viewcode_map").val();
         $.cookies.set(db.name + ".map", mapFun);
         var reduceFun = $("#viewcode_reduce").val() || null;
@@ -507,6 +507,9 @@
   page = this;
 
   this.addField = function() {
+    if (!$("#fields tbody.content:visible").length) {
+      $("#tabs li.tabular a").click(); // switch to tabular view
+    }
     var fieldName = "unnamed";
     var fieldIdx = 1;
     while (page.doc.hasOwnProperty(fieldName)) {
@@ -626,7 +629,8 @@
   }
 
   function _addRowForField(doc, fieldName) {
-    var row = $("<tr><th></th><td></td></tr>").find("th").append($("<b></b>").text(fieldName)).end();
+    var row = $("<tr><th></th><td></td></tr>").find("th").append($("<b></b>")
+      .text(fieldName)).end().appendTo("#fields tbody.content");
     if (fieldName == "_attachments") {
       row
         .find("td").append(_renderAttachmentList(doc[fieldName]));
@@ -638,15 +642,13 @@
         }).end()
         .find("td").append(value).dblclick(function() {
           _editValue(doc, this, $(this).prev("th").text());
-        }).end()
-        
+        }).end();
       if (fieldName != "_id" && fieldName != "_rev") {
         row.find("th, td").attr("title", "Double click to edit");
         _initKey(doc, row, fieldName);
         _initValue(value);
       }
     }
-    row.appendTo("#fields tbody.content");
     $("#fields tbody tr").removeClass("odd").filter(":odd").addClass("odd");
     return row;
   }
@@ -766,8 +768,8 @@
   }
 
   function _initValue(value) {
-    value.find("dd").filter(":has(dl)").hide().prev("dt").addClass("collapsed");
-    value.find("dd").not(":has(dl)").addClass("inline").prev().addClass("inline");
+    value.find("dd:has(dl)").hide().prev("dt").addClass("collapsed");
+    value.find("dd:not(:has(dl))").addClass("inline").prev().addClass("inline");
     value.find("dt.collapsed").click(function() {
       $(this).toggleClass("collapsed").next().toggle();
     });

Modified: incubator/couchdb/branches/runtimeconfig/share/www/script/couch.js
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/share/www/script/couch.js?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/share/www/script/couch.js [utf-8] (original)
+++ incubator/couchdb/branches/runtimeconfig/share/www/script/couch.js [utf-8] Tue Aug 12 08:34:11 2008
@@ -42,18 +42,14 @@
   this.save = function(doc, options) {
     var req;
     if (doc._id == undefined)
-      req = request("POST", this.uri + encodeOptions(options), {
-        body: JSON.stringify(doc)
-      });
-    else
-      req = request("PUT", this.uri  + encodeURIComponent(doc._id) + encodeOptions(options), {
-        body: JSON.stringify(doc)
-      });
+      doc._id = CouchDB.newUuids(1)[0];
+
+    req = request("PUT", this.uri  + encodeURIComponent(doc._id) + encodeOptions(options), {
+      body: JSON.stringify(doc)
+    });
     var result = JSON.parse(req.responseText);
     if (req.status != 201)
       throw result;
-    // set the _id and _rev members on the input object, for caller convenience.
-    doc._id = result.id;
     doc._rev = result.rev;
     return result;
   }
@@ -91,6 +87,18 @@
   }
   
   this.bulkSave = function(docs, options) {
+    // first prepoulate the UUIDs for new documents
+    var newCount = 0
+    for (var i=0; i<docs.length; i++) {
+      if (docs[i]._id == undefined)
+        newCount++;
+    }
+    var newUuids = CouchDB.newUuids(docs.length);
+    var newCount = 0
+    for (var i=0; i<docs.length; i++) {
+      if (docs[i]._id == undefined)
+        docs[i]._id = newUuids.pop();
+    }
     var req = request("POST", this.uri + "_bulk_docs" + encodeOptions(options), {
       body: JSON.stringify({"docs": docs})
     });
@@ -98,7 +106,6 @@
     if (req.status != 201)
       throw result;
     for (var i = 0; i < docs.length; i++) {
-        docs[i]._id = result.new_revs[i].id;
         docs[i]._rev = result.new_revs[i].rev;
     }
     return result;
@@ -232,3 +239,26 @@
   req.send(options.body || "");
   return req;
 }
+
+CouchDB.uuids_cache = [];
+
+CouchDB.newUuids = function(n) {
+    if (CouchDB.uuids_cache.length >= n) {
+      var uuids = CouchDB.uuids_cache.slice(CouchDB.uuids_cache.length - n);
+      if(CouchDB.uuids_cache.length - n == 0) {
+        CouchDB.uuids_cache = [];
+      } else {
+        CouchDB.uuids_cache =
+            CouchDB.uuids_cache.slice(0, CouchDB.uuids_cache.length - n);
+      }
+      return uuids;
+    } else {
+      var req = CouchDB.request("POST", "/_uuids?count=" + (100 + n));
+      var result = JSON.parse(req.responseText);
+      if (req.status != 200)
+        throw result;
+      CouchDB.uuids_cache =
+          CouchDB.uuids_cache.concat(result.uuids.slice(0, 100));
+      return result.uuids.slice(100);
+    }
+  }

Modified: incubator/couchdb/branches/runtimeconfig/share/www/script/couch_tests.js
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/share/www/script/couch_tests.js?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/share/www/script/couch_tests.js [utf-8] (original)
+++ incubator/couchdb/branches/runtimeconfig/share/www/script/couch_tests.js [utf-8] Tue Aug 12 08:34:11 2008
@@ -16,6 +16,10 @@
   basics: function(debug) {
     var db = new CouchDB("test_suite_db");
     db.deleteDb();
+
+    // bug COUCHDB-100: DELETE on non-existent DB returns 500 instead of 404
+    db.deleteDb();
+    
     db.createDb();
     if (debug) debugger;
 
@@ -111,6 +115,53 @@
     // 1 less document should now be in the results.
     T(results.total_rows == 2);
     T(db.info().doc_count == 5);
+    
+    // copy a doc
+    T(db.save({_id:"doc_to_be_copied",v:1}).ok);
+    var xhr = CouchDB.request("COPY", "/test_suite_db/doc_to_be_copied", {
+      headers: {"Destination":"doc_that_was_copied"}
+    });
+
+    T(xhr.status == 201);
+    T(db.open("doc_that_was_copied").v == 1);
+
+    // move a doc
+
+    // test error condition
+    var xhr = CouchDB.request("MOVE", "/test_suite_db/doc_to_be_copied", {
+      headers: {"Destination":"doc_that_was_moved"}
+    });
+    T(xhr.status == 400); // bad request, MOVE requires source rev.
+
+    var rev = db.open("doc_to_be_copied")._rev;
+    var xhr = CouchDB.request("MOVE", "/test_suite_db/doc_to_be_copied?rev=" + rev, {
+      headers: {"Destination":"doc_that_was_moved"}
+    });
+
+    T(xhr.status == 201);
+    T(db.open("doc_that_was_moved").v == 1);
+    T(db.open("doc_to_be_copied") == null);
+
+    // COPY with existing target
+    T(db.save({_id:"doc_to_be_copied",v:1}).ok);
+    var doc = db.save({_id:"doc_to_be_overwritten",v:1});
+    T(doc.ok);
+
+    // error condition
+    var xhr = CouchDB.request("COPY", "/test_suite_db/doc_to_be_copied", {
+	    headers: {"Destination":"doc_to_be_overwritten"}
+	});
+    T(xhr.status == 412); // conflict
+
+    var rev = db.open("doc_to_be_overwritten")._rev;
+    var xhr = CouchDB.request("COPY", "/test_suite_db/doc_to_be_copied", {
+      headers: {"Destination":"doc_to_be_overwritten?rev=" + rev}
+    });
+    T(xhr.status == 201);
+
+    var newRev = db.open("doc_to_be_overwritten")._rev;
+    T(rev != newRev);
+
   },
 
   // Do some edit conflict detection tests
@@ -190,6 +241,42 @@
     }
   },
 
+  uuids: function(debug) {
+    var db = new CouchDB("test_suite_db");
+    db.deleteDb();
+    db.createDb();
+    if (debug) debugger;
+    
+    // a single UUID without an explicit count
+    var xhr = CouchDB.request("POST", "/_uuids");
+    T(xhr.status == 200);
+    var result = JSON.parse(xhr.responseText);
+    T(result.uuids.length == 1);
+    var first = result.uuids[0];
+
+    // a single UUID with an explicit count
+    xhr = CouchDB.request("POST", "/_uuids?count=1");
+    T(xhr.status == 200);
+    result = JSON.parse(xhr.responseText);
+    T(result.uuids.length == 1);
+    var second = result.uuids[0];
+    T(first != second);
+
+    // no collisions with 1,000 UUIDs
+    xhr = CouchDB.request("POST", "/_uuids?count=1000");
+    T(xhr.status == 200);
+    result = JSON.parse(xhr.responseText);
+    T( result.uuids.length == 1000 );
+    var seen = {};
+    for(var i in result.uuids) {
+      var id = result.uuids[i];
+      T(seen[id] === undefined);
+      seen[id] = 1;
+    }
+    
+    // check our library
+  },
+  
   bulk_docs: function(debug) {
     var db = new CouchDB("test_suite_db");
     db.deleteDb();
@@ -374,8 +461,8 @@
           // This is the reduce phase, we are reducing over emitted values from
           // the map functions.
           for(var i in values) {
-            total = total + values[i]
-            sqrTotal = sqrTotal + (values[i] * values[i])
+            total = total + values[i];
+            sqrTotal = sqrTotal + (values[i] * values[i]);
           }
           count = values.length;
         }
@@ -1069,7 +1156,7 @@
 
     var doc = {integer: 1, string: "1", array: [1, 2, 3]};
     T(db.save(doc).ok);
-
+/*
     // make sure that attempting to change the document throws an error
     var results = db.query(function(doc) {
       doc.integer = 2;
@@ -1082,7 +1169,7 @@
       emit(null, doc);
     });
     T(results.total_rows == 0);
-
+*/
     // make sure that a view cannot invoke interpreter internals such as the
     // garbage collector
     var results = db.query(function(doc) {

Modified: incubator/couchdb/branches/runtimeconfig/share/www/style/layout.css
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/share/www/style/layout.css?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/share/www/style/layout.css (original)
+++ incubator/couchdb/branches/runtimeconfig/share/www/style/layout.css Tue Aug 12 08:34:11 2008
@@ -315,9 +315,22 @@
 }
 #documents tbody.content td.value { font-size: 10px; }
 
+/* Document display tabs */
+
+#tabs { float: right; list-style: none; margin: -1.4em 0 0; }
+#tabs li { display: inline; font-size: 95%; padding: 0; }
+#tabs li.active { font-weight: bold; }
+#tabs :link, #tabs :visited { background: #dadada; color: #666;
+  border: 1px solid #a7a7a7; float: left; margin: 0 0 0 .5em;
+  padding: .5em 2em .3em; position: relative; top: 1px;
+}
+#tabs .active :link, #tabs .active :visited { background: #e9e9e9;
+  border-bottom-color: #e9e9e9; color: #333;
+}
+
 /* Document fields table */
 
-#fields { table-layout: fixed; }
+#fields { clear: right; table-layout: fixed; }
 #fields col.field { width: 33%; }
 #fields tbody.content th { padding-left: 25px; padding-right: 48px; }
 #fields tbody.content th button {
@@ -400,6 +413,9 @@
 #fields tbody.content td ul.attachments li button.delete {
   background-image: url(../image/delete-mini.gif);
 }
+#fields tbody.source td code { display: block; overflow: auto;
+  white-space: pre-wrap; width: 100%;
+}
 
 /* Test suite */
 

Modified: incubator/couchdb/branches/runtimeconfig/src/couchdb/Makefile.am
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/src/couchdb/Makefile.am?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/src/couchdb/Makefile.am (original)
+++ incubator/couchdb/branches/runtimeconfig/src/couchdb/Makefile.am Tue Aug 12 08:34:11 2008
@@ -56,7 +56,8 @@
     couch_server_sup.erl \
     couch_stream.erl \
     couch_util.erl \
-    couch_view.erl
+    couch_view.erl \
+    couch_db_updater.erl
 
 compiled_files = \
     cjson.beam \
@@ -79,7 +80,8 @@
     couch_server_sup.beam \
     couch_stream.beam \
     couch_util.beam \
-    couch_view.beam
+    couch_view.beam \
+    couch_db_updater.beam
 
 doc_base = \
     erlang.png \

Modified: incubator/couchdb/branches/runtimeconfig/src/couchdb/couch.app.tpl.in
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/src/couchdb/couch.app.tpl.in?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/src/couchdb/couch.app.tpl.in (original)
+++ incubator/couchdb/branches/runtimeconfig/src/couchdb/couch.app.tpl.in Tue Aug 12 08:34:11 2008
@@ -4,6 +4,7 @@
               {modules,[couch_btree,
                         cjson,
                         couch_db,
+                        couch_db_updater,
                         couch_doc,
                         couch_query_servers,
                         couch_file,

Modified: incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_db.erl
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_db.erl?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_db.erl (original)
+++ incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_db.erl Tue Aug 12 08:34:11 2008
@@ -13,63 +13,24 @@
 -module(couch_db).
 -behaviour(gen_server).
 
--export([open/2,create/2,create/3,get_doc_info/2,start_compact/1]).
--export([save_docs/2, save_docs/3, get_db_info/1, update_doc/3, update_docs/2, update_docs/3]).
--export([delete_doc/3,open_doc/2,open_doc/3,enum_docs_since/4,enum_docs_since/5]).
--export([enum_docs/4,enum_docs/5, open_doc_revs/4, get_missing_revs/2]).
+-export([open/2,close/1,create/2,start_compact/1,get_db_info/1]).
+-export([open_ref_counted/2,num_refs/1,monitor/1]).
+-export([save_docs/3,update_doc/3,update_docs/2,update_docs/3,delete_doc/3]).
+-export([get_doc_info/2,open_doc/2,open_doc/3,open_doc_revs/4]).
+-export([get_missing_revs/2]).
+-export([enum_docs/4,enum_docs/5,enum_docs_since/4,enum_docs_since/5]).
 -export([enum_docs_since_reduce_to_count/1,enum_docs_reduce_to_count/1]).
 -export([increment_update_seq/1]).
--export([start_update_loop/2]).
+-export([start_link/3]).
 -export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
--export([start_copy_compact_int/2]).
 
--export([btree_by_id_split/1,
-            btree_by_id_join/2,
-            btree_by_id_reduce/2,
-            btree_by_seq_split/1,
-            btree_by_seq_join/2,
-            btree_by_seq_reduce/2]).
 
 -include("couch_db.hrl").
 
--record(db_header,
-    {write_version = 0,
-     update_seq = 0,
-     summary_stream_state = nil,
-     fulldocinfo_by_id_btree_state = nil,
-     docinfo_by_seq_btree_state = nil,
-     local_docs_btree_state = nil,
-     doc_count=0,
-     doc_del_count=0
-    }).
-
--record(db,
-    {main_pid=nil,
-    update_pid=nil,
-    compactor_pid=nil,
-    fd,
-    header = #db_header{},
-    summary_stream,
-    fulldocinfo_by_id_btree,
-    docinfo_by_seq_btree,
-    local_docs_btree,
-    update_seq,
-    doc_count,
-    doc_del_count,
-    name,
-    filepath
-    }).
-
-% small value used in revision trees to indicate the revision isn't stored
--define(REV_MISSING, []).
-
--define(HEADER_SIG, <<$g, $m, $k, 0>>).
-
 start_link(DbName, Filepath, Options) ->
     catch start_link0(DbName, Filepath, Options).
         
 start_link0(DbName, Filepath, Options) ->
-     % first delete the old file previous compaction
     Fd = 
     case couch_file:open(Filepath, Options) of
     {ok, Fd0} ->
@@ -83,7 +44,7 @@
             ok = file:rename(Filepath ++ ".compact", Filepath),
             Fd0;
         {error, enoent} ->
-            throw({error, not_found})
+            throw(not_found)
         end;
     Else ->
         throw(Else)
@@ -105,33 +66,38 @@
     end,
     StartResult.
 
-%%% Interface functions %%%
 
-create(Filepath, Options) ->
-    create(Filepath, Filepath, Options).
+create(DbName, Options) ->
+    couch_server:create(DbName, Options).
+
+open(DbName, Options) ->
+    couch_server:open(DbName, Options).
 
-create(DbName, Filepath, Options) when is_list(Options) ->
-    start_link(DbName, Filepath, [create | Options]).
+close(#db{fd=Fd}) ->
+    couch_file:drop_ref(Fd).
 
-open(DbName, Filepath) ->
-    start_link(DbName, Filepath, []).
+open_ref_counted(MainPid, OpeningPid) ->
+    gen_server:call(MainPid, {open_ref_counted_instance, OpeningPid}).
 
+num_refs(MainPid) ->
+    gen_server:call(MainPid, num_refs).
 
-% Compaction still needs work. Right now readers and writers can get an error 
-% file compaction changeover. This doesn't need to be the case.
-start_compact(MainPid) ->
-    gen_server:cast(MainPid, start_compact).
+monitor(#db{main_pid=MainPid}) ->
+    erlang:monitor(process, MainPid).
 
-delete_doc(MainPid, Id, Revisions) ->
+start_compact(#db{update_pid=Pid}) ->
+    gen_server:cast(Pid, start_compact).
+
+delete_doc(Db, Id, Revisions) ->
     DeletedDocs = [#doc{id=Id, revs=[Rev], deleted=true} || Rev <- Revisions],
-    {ok, [Result]} = update_docs(MainPid, DeletedDocs, []),
+    {ok, [Result]} = update_docs(Db, DeletedDocs, []),
     {ok, Result}.
 
-open_doc(MainPid, IdOrDocInfo) ->
-    open_doc(MainPid, IdOrDocInfo, []).
+open_doc(Db, IdOrDocInfo) ->
+    open_doc(Db, IdOrDocInfo, []).
 
-open_doc(MainPid, Id, Options) ->
-    case open_doc_int(get_db(MainPid), Id, Options) of
+open_doc(Db, Id, Options) ->
+    case open_doc_int(Db, Id, Options) of
     {ok, #doc{deleted=true}=Doc} ->
         case lists:member(deleted, Options) of
         true ->
@@ -143,13 +109,13 @@
         Else
     end.
 
-open_doc_revs(MainPid, Id, Revs, Options) ->
-    [Result] = open_doc_revs_int(get_db(MainPid), [{Id, Revs}], Options),
+open_doc_revs(Db, Id, Revs, Options) ->
+    [Result] = open_doc_revs_int(Db, [{Id, Revs}], Options),
     Result.
 
-get_missing_revs(MainPid, IdRevsList) ->
+get_missing_revs(Db, IdRevsList) ->
     Ids = [Id1 || {Id1, _Revs} <- IdRevsList],
-    FullDocInfoResults = get_full_doc_infos(MainPid, Ids),
+    FullDocInfoResults = get_full_doc_infos(Db, Ids),
     Results = lists:zipwith(
         fun({Id, Revs}, FullDocInfoResult) ->
             case FullDocInfoResult of
@@ -177,18 +143,12 @@
     [Result] = get_full_doc_infos(Db, [Id]),
     Result.
 
-
-get_full_doc_infos(MainPid, Ids) when is_pid(MainPid) ->
-    get_full_doc_infos(get_db(MainPid), Ids);
-get_full_doc_infos(#db{}=Db, Ids) ->
+get_full_doc_infos(Db, Ids) ->
     couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids).
 
-increment_update_seq(MainPid) ->
-    gen_server:call(MainPid, increment_update_seq).
-        
+increment_update_seq(#db{update_pid=UpdatePid}) ->
+    gen_server:call(UpdatePid, increment_update_seq).
         
-get_db_info(MainPid) when is_pid(MainPid) ->
-    get_db_info(get_db(MainPid));
 get_db_info(Db) ->
     #db{fd=Fd,
         compactor_pid=Compactor,
@@ -205,12 +165,12 @@
         ],
     {ok, InfoList}.
 
-update_doc(MainPid, Doc, Options) ->
-    {ok, [NewRev]} = update_docs(MainPid, [Doc], Options),
+update_doc(Db, Doc, Options) ->
+    {ok, [NewRev]} = update_docs(Db, [Doc], Options),
     {ok, NewRev}.
 
-update_docs(MainPid, Docs) ->
-    update_docs(MainPid, Docs, []).
+update_docs(Db, Docs) ->
+    update_docs(Db, Docs, []).
     
 % group_alike_docs groups the sorted documents into sublist buckets, by id.
 % ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]]
@@ -263,7 +223,7 @@
         end
     end.
 
-update_docs(MainPid, Docs, Options) ->
+update_docs(#db{update_pid=UpdatePid}=Db, Docs, Options) ->
     % go ahead and generate the new revision ids for the documents.
     Docs2 = lists:map(
         fun(#doc{id=Id,revs=Revs}=Doc) ->
@@ -278,7 +238,6 @@
     NewRevs = [NewRev || #doc{revs=[NewRev|_]} <- Docs2],
     DocBuckets = group_alike_docs(Docs2),
     Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
-    Db = get_db(MainPid),
     
     % lookup the doc by id and get the most recent
     
@@ -298,13 +257,14 @@
     % flush unwritten binaries to disk.
     DocBuckets3 = [[doc_flush_binaries(Doc, Db#db.fd) || Doc <- Bucket] || Bucket <- DocBuckets2],
 
-    case gen_server:call(MainPid, {update_docs, DocBuckets3, [new_edits | Options]}, infinity) of
+    case gen_server:call(UpdatePid, {update_docs, DocBuckets3, [new_edits | Options]}, infinity) of
     ok -> {ok, NewRevs};
     retry ->
-        Db2 = get_db(MainPid),
+        Db2 = open_ref_counted(Db#db.main_pid, self()),
         DocBuckets4 = [[doc_flush_binaries(Doc, Db2#db.fd) || Doc <- Bucket] || Bucket <- DocBuckets3],
         % We only retry once
-        case gen_server:call(MainPid, {update_docs, DocBuckets4, [new_edits | Options]}, infinity) of
+        ok = close(Db2),
+        case gen_server:call(UpdatePid, {update_docs, DocBuckets4, [new_edits | Options]}, infinity) of
         ok -> {ok, NewRevs};
         Else -> throw(Else)
         end;
@@ -312,15 +272,11 @@
         throw(Else)
     end.
 
-save_docs(MainPid, Docs) ->
-    save_docs(MainPid, Docs, []).
-
-save_docs(MainPid, Docs, Options) ->
+save_docs(#db{update_pid=UpdatePid, fd=Fd}, Docs, Options) ->
     % flush unwritten binaries to disk.
-    Db = get_db(MainPid),
     DocBuckets = group_alike_docs(Docs),
-    DocBuckets2 = [[doc_flush_binaries(Doc, Db#db.fd) || Doc <- Bucket] || Bucket <- DocBuckets],
-    ok = gen_server:call(MainPid, {update_docs, DocBuckets2, Options}, infinity).
+    DocBuckets2 = [[doc_flush_binaries(Doc, Fd) || Doc <- Bucket] || Bucket <- DocBuckets],
+    ok = gen_server:call(UpdatePid, {update_docs, DocBuckets2, Options}, infinity).
 
 
 doc_flush_binaries(Doc, Fd) ->
@@ -379,125 +335,51 @@
     Doc#doc{attachments = NewBins}.
 
 enum_docs_since_reduce_to_count(Reds) ->
-    couch_btree:final_reduce(fun btree_by_seq_reduce/2, Reds).
+    couch_btree:final_reduce(fun couch_db_updater:btree_by_seq_reduce/2, Reds).
 
 enum_docs_reduce_to_count(Reds) ->
-    couch_btree:final_reduce(fun btree_by_id_reduce/2, Reds).
+    couch_btree:final_reduce(fun couch_db_updater:btree_by_id_reduce/2, Reds).
 
-enum_docs_since(MainPid, SinceSeq, Direction, InFun, Ctx) ->
-    Db = get_db(MainPid),
+enum_docs_since(Db, SinceSeq, Direction, InFun, Ctx) ->
     couch_btree:fold(Db#db.docinfo_by_seq_btree, SinceSeq + 1, Direction, InFun, Ctx).
 
-enum_docs_since(MainPid, SinceSeq, InFun, Acc) ->
-    enum_docs_since(MainPid, SinceSeq, fwd, InFun, Acc).
+enum_docs_since(Db, SinceSeq, InFun, Acc) ->
+    enum_docs_since(Db, SinceSeq, fwd, InFun, Acc).
 
-enum_docs(MainPid, StartId, Direction, InFun, InAcc) ->
-    Db = get_db(MainPid),
+enum_docs(Db, StartId, Direction, InFun, InAcc) ->
     couch_btree:fold(Db#db.fulldocinfo_by_id_btree, StartId, Direction, InFun, InAcc).
 
-enum_docs(MainPid, StartId, InFun, Ctx) ->
-    enum_docs(MainPid, StartId, fwd, InFun, Ctx).
+enum_docs(Db, StartId, InFun, Ctx) ->
+    enum_docs(Db, StartId, fwd, InFun, Ctx).
 
 % server functions
 
-init(InitArgs) ->
-    spawn_link(couch_db, start_update_loop, [self(), InitArgs]),
-    receive
-    {initialized, Db} ->
-        {ok, Db}
-    end.
-
-btree_by_seq_split(DocInfo) ->
-    #doc_info{
-        id = Id,
-        rev = Rev,
-        update_seq = Seq,
-        summary_pointer = Sp,
-        conflict_revs = Conflicts,
-        deleted_conflict_revs = DelConflicts,
-        deleted = Deleted} = DocInfo,
-    {Seq,{Id, Rev, Sp, Conflicts, DelConflicts, Deleted}}.
-    
-btree_by_seq_join(Seq,{Id, Rev, Sp, Conflicts, DelConflicts, Deleted}) ->
-    #doc_info{
-        id = Id,
-        rev = Rev,
-        update_seq = Seq,
-        summary_pointer = Sp,
-        conflict_revs = Conflicts,
-        deleted_conflict_revs = DelConflicts,
-        deleted = Deleted}.
-
-btree_by_id_split(#full_doc_info{id=Id, update_seq=Seq,
-        deleted=Deleted, rev_tree=Tree}) ->
-    {Id, {Seq, case Deleted of true -> 1; false-> 0 end, Tree}}.
+init({DbName, Filepath, Fd, Options}) ->
+    {ok, UpdaterPid} = gen_server:start_link(couch_db_updater, {self(), DbName, Filepath, Fd, Options}, []),
+    ok = couch_file:add_ref(Fd),
+    gen_server:call(UpdaterPid, get_db).
 
-btree_by_id_join(Id, {Seq, Deleted, Tree}) ->
-    #full_doc_info{id=Id, update_seq=Seq, deleted=Deleted==1, rev_tree=Tree}.
-    
-
-
-btree_by_id_reduce(reduce, FullDocInfos) ->
-    % count the number of deleted documents
-    length([1 || #full_doc_info{deleted=false} <- FullDocInfos]);
-btree_by_id_reduce(rereduce, Reds) ->
-    lists:sum(Reds).
-            
-btree_by_seq_reduce(reduce, DocInfos) ->
-    % count the number of deleted documents
-    length(DocInfos);
-btree_by_seq_reduce(rereduce, Reds) ->
-    lists:sum(Reds).
-
-init_db(DbName, Filepath, Fd, Header) ->
-    {ok, SummaryStream} = couch_stream:open(Header#db_header.summary_stream_state, Fd),
-    ok = couch_stream:set_min_buffer(SummaryStream, 10000),
-    {ok, IdBtree} = couch_btree:open(Header#db_header.fulldocinfo_by_id_btree_state, Fd,
-        [{split, fun btree_by_id_split/1},
-        {join, fun btree_by_id_join/2},
-        {reduce, fun btree_by_id_reduce/2}]),
-    {ok, SeqBtree} = couch_btree:open(Header#db_header.docinfo_by_seq_btree_state, Fd,
-            [{split, fun btree_by_seq_split/1},
-            {join, fun btree_by_seq_join/2},
-            {reduce, fun btree_by_seq_reduce/2}]),
-    {ok, LocalDocsBtree} = couch_btree:open(Header#db_header.local_docs_btree_state, Fd),
-
-    #db{
-        update_pid=self(),
-        fd=Fd,
-        header=Header,
-        summary_stream = SummaryStream,
-        fulldocinfo_by_id_btree = IdBtree,
-        docinfo_by_seq_btree = SeqBtree,
-        local_docs_btree = LocalDocsBtree,
-        update_seq = Header#db_header.update_seq,
-        doc_count = Header#db_header.doc_count,
-        doc_del_count = Header#db_header.doc_del_count,
-        name = DbName,
-        filepath=Filepath }.
-
-close_db(#db{fd=Fd,summary_stream=Ss}) ->
-    couch_file:close(Fd),
-    couch_stream:close(Ss).
-    
 terminate(_Reason, Db) ->
     exit(Db#db.update_pid, kill).
     
-handle_call({update_docs, DocActions, Options}, From, #db{update_pid=Updater}=Db) ->
-    Updater ! {From, update_docs, DocActions, Options},
-    {noreply, Db};
-handle_call(increment_update_seq, From, #db{update_pid=Updater}=Db) ->
-    Updater ! {From, increment_update_seq},
-    {noreply, Db};
-handle_call(get_db, _From, Db) ->
+handle_call({open_ref_counted_instance, OpenerPid}, _From, #db{fd=Fd}=Db) ->
+    ok = couch_file:add_ref(Fd, OpenerPid),
     {reply, {ok, Db}, Db};
-handle_call({db_updated, NewDb}, _From, _OldDb) ->
+handle_call(num_refs, _From, #db{fd=Fd}=Db) ->
+    {reply, couch_file:num_refs(Fd) - 1, Db};
+handle_call({db_updated, #db{fd=NewFd}=NewDb}, _From, #db{fd=OldFd}) ->
+    case NewFd == OldFd of
+    true -> ok;
+    false ->
+        couch_file:add_ref(NewFd),
+        couch_file:drop_ref(OldFd)
+    end,
     {reply, ok, NewDb}.
 
 
-handle_cast(start_compact, #db{update_pid=Updater}=Db) ->
-    Updater ! compact,
-    {noreply, Db}.
+handle_cast(Msg, Db) ->
+    ?LOG_ERROR("Bad cast message received for db ~s: ~p", [Db#db.name, Msg]),
+    exit({error, Msg}).
 
 code_change(_OldVsn, State, _Extra) ->
     {ok, State}.
@@ -508,114 +390,6 @@
 
 
 %%% Internal function %%%
-
-start_update_loop(MainPid, {DbName, Filepath, Fd, Options}) ->
-    link(Fd),
-    
-    case lists:member(create, Options) of
-    true ->
-        % create a new header and writes it to the file
-        Header =  #db_header{},
-        ok = couch_file:write_header(Fd, ?HEADER_SIG, Header),
-        % delete any old compaction files that might be hanging around
-        file:delete(Filepath ++ ".compact"),
-        file:delete(Filepath ++ ".old");
-    false ->
-        {ok, Header} = couch_file:read_header(Fd, ?HEADER_SIG)
-    end,
-    
-    Db = init_db(DbName, Filepath, Fd, Header),
-    Db2 = Db#db{main_pid=MainPid},
-    MainPid ! {initialized, Db2},
-    update_loop(Db2).
-    
-update_loop(#db{fd=Fd,name=Name,
-            filepath=Filepath,
-            main_pid=MainPid,
-            update_seq=UpdateSeq}=Db) ->
-    receive
-    {OrigFrom, update_docs, DocActions, Options} ->
-        case (catch update_docs_int(Db, DocActions, Options)) of
-        {ok, Db2} ->
-            ok = gen_server:call(MainPid, {db_updated, Db2}),
-            gen_server:reply(OrigFrom, ok),
-            couch_db_update_notifier:notify({updated, Name}),
-            update_loop(Db2);
-        retry ->
-            gen_server:reply(OrigFrom, retry),
-            update_loop(Db);
-        conflict ->
-            gen_server:reply(OrigFrom, conflict),
-            update_loop(Db);
-        Error ->
-            exit(Error) % we crashed
-        end;
-    compact ->
-        case Db#db.compactor_pid of
-        nil ->
-            ?LOG_INFO("Starting compaction for db \"~s\"", [Name]),
-            Pid = spawn_link(couch_db, start_copy_compact_int, [Db, true]),
-            Db2 = Db#db{compactor_pid=Pid},
-            ok = gen_server:call(MainPid, {db_updated, Db2}),
-            update_loop(Db2);
-        _ ->
-            update_loop(Db) % already started
-        end;
-    {compact_done, CompactFilepath} ->
-        {ok, NewFd} = couch_file:open(CompactFilepath),
-        {ok, NewHeader} = couch_file:read_header(NewFd, ?HEADER_SIG),
-        #db{update_seq=NewSeq}= NewDb =
-                init_db(Name, CompactFilepath, NewFd, NewHeader),
-        case Db#db.update_seq == NewSeq of
-        true ->
-            NewDb2 = commit_data(
-                NewDb#db{
-                    main_pid = Db#db.main_pid,
-                    doc_count = Db#db.doc_count,
-                    doc_del_count = Db#db.doc_del_count,
-                    filepath = Filepath}),
-                
-            ?LOG_DEBUG("CouchDB swapping files ~s and ~s.", [Filepath, CompactFilepath]),
-            ok = file:rename(Filepath, Filepath ++ ".old"),
-            ok = file:rename(CompactFilepath, Filepath),
-            
-            couch_stream:close(Db#db.summary_stream),
-            % close file handle async.
-            % wait 5 secs before closing, allowing readers to finish
-            unlink(Fd),
-            spawn_link(fun() ->
-                receive after 5000 -> ok end,
-                couch_file:close(Fd),
-                file:delete(Filepath ++ ".old")
-                end),
-                
-            ok = gen_server:call(MainPid, {db_updated, NewDb2}),
-            ?LOG_INFO("Compaction for db ~p completed.", [Name]),
-            update_loop(NewDb2#db{compactor_pid=nil});
-        false ->
-            ?LOG_INFO("Compaction file still behind main file "
-                "(update seq=~p. compact update seq=~p). Retrying.",
-                [Db#db.update_seq, NewSeq]),
-            Pid = spawn_link(couch_db, start_copy_compact_int, [Db, false]),
-            Db2 = Db#db{compactor_pid=Pid},
-            couch_file:close(NewFd),
-            update_loop(Db2)
-        end;
-    {OrigFrom, increment_update_seq} ->
-        Db2 = commit_data(Db#db{update_seq=UpdateSeq+1}),
-        ok = gen_server:call(MainPid, {db_updated, Db2}),
-        gen_server:reply(OrigFrom, {ok, UpdateSeq+1}),
-        couch_db_update_notifier:notify({updated, Name}),
-        update_loop(Db2);
-    Else ->
-        ?LOG_ERROR("Unknown message received in db ~s:~p", [Db#db.name, Else]),
-        exit({error, Else})
-    end.
-
-get_db(MainPid) ->
-    {ok, Db} = gen_server:call(MainPid, get_db),
-    Db.
-
 open_doc_revs_int(Db, IdRevs, Options) ->
     Ids = [Id || {Id, _Revs} <- IdRevs],
     LookupResults = get_full_doc_infos(Db, Ids),
@@ -711,16 +485,6 @@
         end
     end.
 
-% rev tree functions
-
-doc_to_tree(Doc) ->
-    doc_to_tree(Doc, lists:reverse(Doc#doc.revs)).
-
-doc_to_tree(Doc, [RevId]) ->
-    [{RevId, Doc, []}];
-doc_to_tree(Doc, [RevId | Rest]) ->
-    [{RevId, ?REV_MISSING, doc_to_tree(Doc, Rest)}].
-
 make_doc(Db, Id, Deleted, SummaryPointer, RevisionPath) ->
     {BodyData, BinValues} =
     case SummaryPointer of
@@ -737,303 +501,6 @@
         attachments = BinValues,
         deleted = Deleted
         }.
-
-flush_trees(_Db, [], AccFlushedTrees) ->
-    {ok, lists:reverse(AccFlushedTrees)};
-flush_trees(#db{fd=Fd}=Db, [InfoUnflushed | RestUnflushed], AccFlushed) ->
-        #full_doc_info{rev_tree=Unflushed} = InfoUnflushed,
-        Flushed = couch_key_tree:map(
-        fun(_Rev, Value) ->
-            case Value of
-            #doc{attachments=Atts,deleted=IsDeleted}=Doc ->
-                % this node value is actually an unwritten document summary,
-                % write to disk.
-                % make sure the Fd in the written bins is the same Fd we are.
-                Bins =
-                case Atts of
-                [] -> [];
-                [{_BName, {_Type, {BinFd, _Sp, _Len}}} | _ ] when BinFd == Fd ->
-                    % convert bins, removing the FD.
-                    % All bins should have been flushed to disk already.
-                    [{BinName, {BinType, BinSp, BinLen}}
-                        || {BinName, {BinType, {_Fd, BinSp, BinLen}}}
-                        <- Atts];
-                _ ->
-                    % BinFd must not equal our Fd. This can happen when a database
-                    % is being updated during a compaction
-                    ?LOG_DEBUG("File where the attachments are written has changed. Possibly retrying.", []),
-                    throw(retry)
-                end,
-                {ok, NewSummaryPointer} = couch_stream:write_term(Db#db.summary_stream, {Doc#doc.body, Bins}),
-                {IsDeleted, NewSummaryPointer};
-            _ ->
-                Value
-            end
-        end, Unflushed),
-    flush_trees(Db, RestUnflushed, [InfoUnflushed#full_doc_info{rev_tree=Flushed} | AccFlushed]).
-
-merge_rev_trees(_NoConflicts, [], [], AccNewInfos, AccSeq) ->
-    {ok, lists:reverse(AccNewInfos), AccSeq};
-merge_rev_trees(NoConflicts, [NewDocs|RestDocsList],
-        [OldDocInfo|RestOldInfo], AccNewInfos, AccSeq) ->
-    #full_doc_info{id=Id,rev_tree=OldTree}=OldDocInfo,
-    UpdatesRevTree = lists:foldl(
-        fun(NewDoc, AccTree) ->
-            couch_key_tree:merge(AccTree, doc_to_tree(NewDoc))
-        end,
-        [], NewDocs),
-    NewRevTree = couch_key_tree:merge(OldTree, UpdatesRevTree),
-    if NewRevTree == OldTree ->
-        % nothing changed
-        merge_rev_trees(NoConflicts, RestDocsList, RestOldInfo, AccNewInfos, AccSeq);
-    true ->
-        if NoConflicts andalso OldTree /= [] ->
-            OldConflicts = couch_key_tree:count_leafs(OldTree),
-            NewConflicts = couch_key_tree:count_leafs(NewRevTree),
-            if NewConflicts > OldConflicts ->
-                throw(conflict);
-            true -> ok
-            end;
-        true -> ok
-        end,
-        NewInfo = #full_doc_info{id=Id,update_seq=AccSeq+1,rev_tree=NewRevTree},
-        merge_rev_trees(NoConflicts, RestDocsList,RestOldInfo, 
-                [NewInfo|AccNewInfos],AccSeq+1)
-    end.
-
-new_index_entries([], DocCount, DelCount, AccById, AccBySeq) ->
-    {ok, DocCount, DelCount, AccById, AccBySeq};
-new_index_entries([FullDocInfo|RestInfos], DocCount, DelCount, AccById, AccBySeq) ->
-    #doc_info{deleted=Deleted} = DocInfo = couch_doc:to_doc_info(FullDocInfo),
-    {DocCount2, DelCount2} =
-    if Deleted -> {DocCount, DelCount + 1};
-    true -> {DocCount + 1, DelCount} 
-    end,
-    new_index_entries(RestInfos, DocCount2, DelCount2, 
-        [FullDocInfo#full_doc_info{deleted=Deleted}|AccById],
-        [DocInfo|AccBySeq]).
-
-update_docs_int(Db, DocsList, Options) ->
-    #db{
-        fulldocinfo_by_id_btree = DocInfoByIdBTree,
-        docinfo_by_seq_btree = DocInfoBySeqBTree,
-        update_seq = LastSeq,
-        doc_count = FullDocCount,
-        doc_del_count = FullDelCount
-        } = Db,
-
-    % separate out the NonRep documents from the rest of the documents
-    {DocsList2, NonRepDocs} = lists:foldl(
-        fun([#doc{id=Id}=Doc | Rest]=Docs, {DocsListAcc, NonRepDocsAcc}) ->
-            case Id of
-            ?LOCAL_DOC_PREFIX ++ _ when Rest==[] ->
-                % when saving NR (non rep) documents, you can only save a single rev
-                {DocsListAcc, [Doc | NonRepDocsAcc]};
-            Id->
-                {[Docs | DocsListAcc], NonRepDocsAcc}
-            end
-        end, {[], []}, DocsList),
-    
-    Ids = [Id || [#doc{id=Id}|_] <- DocsList2], 
-    
-    % lookup up the existing documents, if they exist.
-    OldDocLookups = couch_btree:lookup(DocInfoByIdBTree, Ids),
-    OldDocInfos = lists:zipwith(
-        fun(_Id, {ok, FullDocInfo}) ->
-            FullDocInfo;
-        (Id, not_found) ->
-            #full_doc_info{id=Id}
-        end,
-        Ids, OldDocLookups),
-    
-    {OldCount, OldDelCount} = lists:foldl(
-        fun({ok, FullDocInfo}, {OldCountAcc, OldDelCountAcc}) ->
-            case couch_doc:to_doc_info(FullDocInfo) of
-            #doc_info{deleted=false} ->
-                {OldCountAcc + 1, OldDelCountAcc};
-            _ ->
-                {OldCountAcc, OldDelCountAcc + 1}
-            end;
-        (not_found, Acc) ->
-            Acc
-        end, {0, 0}, OldDocLookups),
-    
-    % Merge the new docs into the revision trees.
-    NoConflicts = lists:member(new_edits, Options),
-    {ok, NewDocInfos, NewSeq} = merge_rev_trees(NoConflicts, DocsList2, OldDocInfos, [], LastSeq),
-    
-    RemoveSeqs =
-        [ OldSeq || {ok, #full_doc_info{update_seq=OldSeq}} <- OldDocLookups],
-    
-    % All regular documents are now ready to write.
-    
-    % Try to write the local documents first, a conflict might be generated
-    {ok, Db2}  = update_local_docs(Db, NonRepDocs),
-    
-    % Write out the documents summaries (they are stored in the nodes of the rev trees)
-    {ok, FlushedDocInfos} = flush_trees(Db2, NewDocInfos, []),
-    
-    {ok, NewDocsCount, NewDelCount, InfoById, InfoBySeq} =
-        new_index_entries(FlushedDocInfos, 0, 0, [], []),
-
-    % and the indexes to the documents
-    {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree, InfoBySeq, RemoveSeqs),
-    {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree, InfoById, []),
-
-    Db3 = Db2#db{
-        fulldocinfo_by_id_btree = DocInfoByIdBTree2,
-        docinfo_by_seq_btree = DocInfoBySeqBTree2,
-        update_seq = NewSeq,
-        doc_count = FullDocCount + NewDocsCount - OldCount,
-        doc_del_count = FullDelCount + NewDelCount - OldDelCount},
-
-    case lists:member(delay_commit, Options) of
-    true ->
-        {ok, Db3};
-    false ->
-        {ok, commit_data(Db3)}
-    end.
-
-update_local_docs(#db{local_docs_btree=Btree}=Db, Docs) ->
-    Ids = [Id || #doc{id=Id} <- Docs],
-    OldDocLookups = couch_btree:lookup(Btree, Ids),
-    BtreeEntries = lists:zipwith(
-        fun(#doc{id=Id,deleted=Delete,revs=Revs,body=Body}, OldDocLookup) ->
-            NewRev =
-            case Revs of
-                [] -> 0;
-                [RevStr|_] -> list_to_integer(RevStr)
-            end,
-            OldRev =
-            case OldDocLookup of
-                {ok, {_, {OldRev0, _}}} -> OldRev0;
-                not_found -> 0
-            end,
-            case OldRev + 1 == NewRev of
-            true ->
-                case Delete of
-                    false -> {update, {Id, {NewRev, Body}}};
-                    true  -> {remove, Id}
-                end;
-            false ->
-                throw(conflict)
-            end
-            
-        end, Docs, OldDocLookups),
-
-    BtreeIdsRemove = [Id || {remove, Id} <- BtreeEntries],
-    BtreeIdsUpdate = [ByIdDocInfo || {update, ByIdDocInfo} <- BtreeEntries],
-
-    {ok, Btree2} =
-        couch_btree:add_remove(Btree, BtreeIdsUpdate, BtreeIdsRemove),
-
-    {ok, Db#db{local_docs_btree = Btree2}}.
-
-
-
-commit_data(#db{fd=Fd, header=Header} = Db) ->
-    Header2 = Header#db_header{
-        update_seq = Db#db.update_seq,
-        summary_stream_state = couch_stream:get_state(Db#db.summary_stream),
-        docinfo_by_seq_btree_state = couch_btree:get_state(Db#db.docinfo_by_seq_btree),
-        fulldocinfo_by_id_btree_state = couch_btree:get_state(Db#db.fulldocinfo_by_id_btree),
-        local_docs_btree_state = couch_btree:get_state(Db#db.local_docs_btree),
-        doc_count = Db#db.doc_count,
-        doc_del_count = Db#db.doc_del_count
-        },
-    if Header == Header2 ->
-        Db; % unchanged. nothing to do
-    true ->
-        ok = couch_file:write_header(Fd, ?HEADER_SIG, Header2),
-        Db#db{header = Header2}
-    end.
-
-copy_raw_doc(SrcFd, SrcSp, DestFd, DestStream) ->
-    {ok, {BodyData, BinInfos}} = couch_stream:read_term(SrcFd, SrcSp),
-    % copy the bin values
-    NewBinInfos = lists:map(fun({Name, {Type, BinSp, Len}}) ->
-        {ok, NewBinSp} = couch_stream:copy_to_new_stream(SrcFd, BinSp, Len, DestFd),
-        {Name, {Type, NewBinSp, Len}}
-        end, BinInfos),
-    % now write the document summary
-    {ok, Sp} = couch_stream:write_term(DestStream, {BodyData, NewBinInfos}),
-    Sp.
-
-copy_rev_tree(_SrcFd, _DestFd, _DestStream, []) ->
-    [];
-copy_rev_tree(SrcFd, DestFd, DestStream, [{RevId, {IsDel, Sp}, []} | RestTree]) ->
-    % This is a leaf node, copy it over
-    NewSp = copy_raw_doc(SrcFd, Sp, DestFd, DestStream),
-    [{RevId, {IsDel, NewSp}, []} | copy_rev_tree(SrcFd, DestFd, DestStream, RestTree)];
-copy_rev_tree(SrcFd, DestFd, DestStream, [{RevId, _, SubTree} | RestTree]) ->
-    % inner node, only copy info/data from leaf nodes
-    [{RevId, ?REV_MISSING, copy_rev_tree(SrcFd, DestFd, DestStream, SubTree)} | copy_rev_tree(SrcFd, DestFd, DestStream, RestTree)].
-    
-copy_docs(#db{fd=SrcFd}=Db, #db{fd=DestFd,summary_stream=DestStream}=NewDb, InfoBySeq) ->
-    Ids = [Id || #doc_info{id=Id} <- InfoBySeq],
-    LookupResults = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids),
-    NewFullDocInfos = lists:map(
-        fun({ok, #full_doc_info{rev_tree=RevTree}=Info}) ->
-            Info#full_doc_info{rev_tree=copy_rev_tree(SrcFd, DestFd, DestStream, RevTree)}
-        end, LookupResults),
-    NewDocInfos = [couch_doc:to_doc_info(FullDocInfo) || FullDocInfo <- NewFullDocInfos],
-    {ok, DocInfoBTree} =
-        couch_btree:add_remove(NewDb#db.docinfo_by_seq_btree, NewDocInfos, []),
-    {ok, FullDocInfoBTree} =
-        couch_btree:add_remove(NewDb#db.fulldocinfo_by_id_btree, NewFullDocInfos, []),
-    NewDb#db{fulldocinfo_by_id_btree=FullDocInfoBTree, docinfo_by_seq_btree=DocInfoBTree}.
-
-
-          
-copy_compact_docs(Db, NewDb) ->
-    EnumBySeqFun =
-    fun(#doc_info{update_seq=Seq}=DocInfo, _Offset, {AccNewDb, AccUncopied}) ->
-        case couch_util:should_flush() of
-        true ->
-            NewDb2 = copy_docs(Db, AccNewDb, lists:reverse([DocInfo | AccUncopied])),
-            {ok, {commit_data(NewDb2#db{update_seq=Seq}), []}};
-        false ->    
-            {ok, {AccNewDb, [DocInfo | AccUncopied]}}
-        end
-    end,
-    {ok, {NewDb2, Uncopied}} =
-        couch_btree:foldl(Db#db.docinfo_by_seq_btree, NewDb#db.update_seq + 1, EnumBySeqFun, {NewDb, []}),
-
-    case Uncopied of
-    [#doc_info{update_seq=LastSeq} | _] ->
-        commit_data( copy_docs(Db, NewDb2#db{update_seq=LastSeq},
-            lists:reverse(Uncopied)));
-    [] ->
-        NewDb2
-    end.
-
-start_copy_compact_int(#db{name=Name,filepath=Filepath}=Db, CopyLocal) ->
-    CompactFile = Filepath ++ ".compact",
-    ?LOG_DEBUG("Compaction process spawned for db \"~s\"", [Name]),
-    case couch_file:open(CompactFile) of
-    {ok, Fd} ->
-        ?LOG_DEBUG("Found existing compaction file for db \"~s\"", [Name]),
-        {ok, Header} = couch_file:read_header(Fd, ?HEADER_SIG);
-    {error, enoent} -> %
-        {ok, Fd} = couch_file:open(CompactFile, [create]),
-        Header =  #db_header{},
-        ok = couch_file:write_header(Fd, ?HEADER_SIG, Header)
-    end,
-    NewDb = init_db(Name, CompactFile, Fd, Header),
-    NewDb2 = copy_compact_docs(Db, NewDb),
-    NewDb3 =
-    case CopyLocal of
-    true ->
-        % suck up all the local docs into memory and write them to the new db
-        {ok, LocalDocs} = couch_btree:foldl(Db#db.local_docs_btree,
-                fun(Value, _Offset, Acc) -> {ok, [Value | Acc]} end, []),
-        {ok, NewLocalBtree} = couch_btree:add(NewDb2#db.local_docs_btree, LocalDocs),
-        commit_data(NewDb2#db{local_docs_btree=NewLocalBtree});
-    _ ->
-        NewDb2
-    end,
-    close_db(NewDb3),
-    Db#db.update_pid ! {compact_done, CompactFile}.
     
     
     
\ No newline at end of file

Modified: incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_db.hrl
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_db.hrl?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_db.hrl (original)
+++ incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_db.hrl Tue Aug 12 08:34:11 2008
@@ -69,3 +69,40 @@
     % couch_db:open_doc(Db, Id, Options).
     meta = []
     }).
+    
+    
+
+
+
+-record(db_header,
+    {write_version = 0,
+     update_seq = 0,
+     summary_stream_state = nil,
+     fulldocinfo_by_id_btree_state = nil,
+     docinfo_by_seq_btree_state = nil,
+     local_docs_btree_state = nil,
+     doc_count=0,
+     doc_del_count=0
+    }).
+
+-record(db,
+    {main_pid=nil,
+    update_pid=nil,
+    compactor_pid=nil,
+    fd,
+    header = #db_header{},
+    summary_stream,
+    fulldocinfo_by_id_btree,
+    docinfo_by_seq_btree,
+    local_docs_btree,
+    update_seq,
+    doc_count,
+    doc_del_count,
+    name,
+    filepath
+    }).
+    
+    
+
+% small value used in revision trees to indicate the revision isn't stored
+-define(REV_MISSING, []).

Modified: incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_file.erl
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_file.erl?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_file.erl (original)
+++ incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_file.erl Tue Aug 12 08:34:11 2008
@@ -20,6 +20,7 @@
 -export([open/1, open/2, close/1, pread/3, pwrite/3, expand/2, bytes/1, sync/1]).
 -export([append_term/2, pread_term/2,write_header/3, read_header/2, truncate/2]).
 -export([init/1, terminate/2, handle_call/3, handle_cast/2, code_change/3, handle_info/2]).
+-export([close_maybe/1,drop_ref/1,drop_ref/2,add_ref/1,add_ref/2,num_refs/1]).
 
 %%----------------------------------------------------------------------
 %% Args:   Valid Options are [create] and [create,overwrite].
@@ -164,7 +165,25 @@
 %%----------------------------------------------------------------------
 close(Fd) ->
     gen_server:cast(Fd, close).
+    
+close_maybe(Fd) ->
+    gen_server:cast(Fd, {close_maybe, self()}).
+
+drop_ref(Fd) ->
+    drop_ref(Fd, self()).
+    
+drop_ref(Fd, Pid) ->
+    gen_server:cast(Fd, {drop_ref, Pid}).
+
+
+add_ref(Fd) ->
+    add_ref(Fd, self()).
 
+add_ref(Fd, Pid) ->
+    gen_server:call(Fd, {add_ref, Pid}).
+
+num_refs(Fd) ->
+    gen_server:call(Fd, num_refs).
 
 write_header(Fd, Prefix, Data) ->
     TermBin = term_to_binary(Data),
@@ -267,7 +286,7 @@
 
 init_status_error(ReturnPid, Error) ->
     ReturnPid ! {self(), Error}, % signal back error status
-    self() ! self_close, % tell ourself to close async
+    gen_server:cast(self(), close), % tell ourself to close async
     {ok, nil}.
 
 % server functions
@@ -342,16 +361,57 @@
     {ok, <<TermLen:32>>}
         = file:pread(Fd, Pos, 4),
     {ok, Bin} = file:pread(Fd, Pos + 4, TermLen),
-    {reply, {ok, Bin}, Fd}.
+    {reply, {ok, Bin}, Fd};
+handle_call({add_ref, Pid},_From, Fd) ->
+    undefined = put(Pid, erlang:monitor(process, Pid)),
+    {reply, ok, Fd};
+handle_call(num_refs, _From, Fd) ->
+    {monitors, Monitors} =  process_info(self(), monitors),
+    {reply, length(Monitors), Fd}.
+
 
 
 handle_cast(close, Fd) ->
-    {stop,normal,Fd}. % causes terminate to be called
+    {stop,normal,Fd};
+handle_cast({close_maybe, Pid}, Fd) ->
+    catch unlink(Pid),
+    maybe_close_async(Fd);
+handle_cast({drop_ref, Pid}, Fd) ->
+    % don't check return of demonitor. The process could haved crashed causing
+    % the {'DOWN', ...} message to be sent and the process unmonitored.
+    erlang:demonitor(erase(Pid), [flush]),
+    maybe_close_async(Fd).
+
 
 code_change(_OldVsn, State, _Extra) ->
     {ok, State}.
 
-handle_info(self_close, State) ->
-    {stop,normal,State};
-handle_info(_Info, State) ->
-    {noreply, State}.
+handle_info({'DOWN', MonitorRef, _Type, Pid, _Info}, Fd) ->
+    MonitorRef = erase(Pid),
+    maybe_close_async(Fd);
+handle_info(Info, Fd) ->
+    exit({error, {Info, Fd}}).
+
+
+
+should_close(Fd) ->
+    case process_info(self(), links) of
+    {links, [_]} ->
+        % no linkers left (except our fd port). What about monitors?
+        case process_info(self(), monitors) of
+        {monitors, []} ->
+            true;
+        _ ->
+            false
+        end;
+    {links,  [_|_]} ->
+        false
+    end.
+
+maybe_close_async(Fd) ->
+    case should_close(Fd) of
+    true ->
+        {stop,normal,Fd};
+    false ->
+        {noreply,Fd}
+    end.

Modified: incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_httpd.erl
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_httpd.erl?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_httpd.erl (original)
+++ incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_httpd.erl Tue Aug 12 08:34:11 2008
@@ -72,15 +72,21 @@
     % alias HEAD to GET as mochiweb takes care of stripping the body
     Method = case Req:get(method) of
         'HEAD' -> 'GET';
-        Other -> Other
+        Other -> 
+          % handling of non standard HTTP verbs. Should be fixe din gen_tcp:recv()
+          case Other of
+            "COPY" -> 'COPY';
+            "MOVE" -> 'MOVE';
+            StandardMethod -> StandardMethod
+          end
     end,
 
     % for the path, use the raw path with the query string and fragment
     % removed, but URL quoting left intact
     {Path, _, _} = mochiweb_util:urlsplit_path(Req:get(raw_path)),
 
-    ?LOG_DEBUG("~s ~s ~p~nHeaders: ~p", [
-        atom_to_list(Req:get(method)),
+    ?LOG_DEBUG("~p ~s ~p~nHeaders: ~p", [
+        Method,
         Path,
         Req:get(version),
         mochiweb_headers:to_list(Req:get(headers))
@@ -93,9 +99,10 @@
             send_error(Req, Error)
     end,
 
-    ?LOG_INFO("~s - - ~p ~B", [
+    ?LOG_INFO("~s - - ~p ~s ~B", [
         Req:get(peer),
-        atom_to_list(Req:get(method)) ++ " " ++ Path,
+        Method,
+        Path,
         Resp:get(code)
     ]).
     
@@ -115,6 +122,8 @@
             handle_replicate_request(Req, Method);
         "/_restart" ->
             handle_restart_request(Req, Method);
+        "/_uuids" ->
+            handle_uuids_request(Req, Method);
         "/_utils" ->
             {ok, Req:respond({301, [
                 {"Location", "/_utils/"}
@@ -161,12 +170,24 @@
     throw({method_not_allowed, "POST"}).
 
 handle_restart_request(Req, 'POST') ->
-    couch_server:remote_restart(),
-    send_json(Req, {obj, [{ok, true}]});
+    Response = send_json(Req, {obj, [{ok, true}]}),
+    spawn(fun() -> couch_server:remote_restart() end),
+    Response;
 
 handle_restart_request(_Req, _Method) ->
     throw({method_not_allowed, "POST"}).
 
+handle_uuids_request(Req, 'POST') ->
+    Count = list_to_integer(proplists:get_value("count", Req:parse_qs(), "1")),
+    % generate the uuids
+    UUIDs = [ couch_util:new_uuid() || _ <- lists:seq(1,Count)],
+    % send a JSON response
+    send_json(Req, {obj, [{"uuids", list_to_tuple(UUIDs)}]});
+
+handle_uuids_request(_Req, _Method) ->
+    throw({method_not_allowed, "POST"}).
+
+
 % Database request handlers
 
 handle_db_request(Req, Method, {Path}) ->
@@ -176,7 +197,8 @@
 
 handle_db_request(Req, 'PUT', {DbName, []}) ->
     case couch_server:create(DbName, []) of
-        {ok, _Db} ->
+        {ok, Db} ->
+            couch_db:close(Db),
             send_json(Req, 201, {obj, [{ok, true}]});
         {error, database_already_exists} ->
             Msg = io_lib:format("Database ~p already exists.", [DbName]),
@@ -186,20 +208,28 @@
             throw({unknown_error, Msg})
     end;
 
+handle_db_request(Req, 'DELETE', {DbName, []}) ->
+    case couch_server:delete(DbName) of
+    ok ->
+        send_json(Req, 200, {obj, [
+            {ok, true}
+        ]});
+    Error ->
+        throw(Error)
+    end;
+    
 handle_db_request(Req, Method, {DbName, Rest}) ->
-    case couch_server:open(DbName) of
+    case couch_db:open(DbName, []) of
         {ok, Db} ->
-            handle_db_request(Req, Method, {DbName, Db, Rest});
+            try 
+                handle_db_request(Req, Method, {DbName, Db, Rest})
+            after
+                couch_db:close(Db)
+            end;
         Error ->
             throw(Error)
     end;
 
-handle_db_request(Req, 'DELETE', {DbName, _Db, []}) ->
-    ok = couch_server:delete(DbName),
-    send_json(Req, 200, {obj, [
-        {ok, true}
-    ]});
-
 handle_db_request(Req, 'GET', {DbName, Db, []}) ->
     {ok, DbInfo} = couch_db:get_db_info(Db),
     send_json(Req, {obj, [{db_name, DbName} | DbInfo]});
@@ -542,24 +572,7 @@
     } = parse_doc_query(Req),
     case Revs of
     [] ->
-        case Rev of
-        "" -> % open most recent rev
-            case couch_db:open_doc(Db, DocId, Options) of
-                {ok, #doc{revs=[DocRev|_]}=Doc} ->
-                    true;
-                Error ->
-                    Doc = DocRev = undefined,
-                    throw(Error)
-            end;
-        _ -> % open a specific rev (deletions come back as stubs)
-            case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of
-                {ok, [{ok, Doc}]} ->
-                    DocRev = Rev;
-                {ok, [Else]} ->
-                    Doc = DocRev = undefined,
-                    throw(Else)
-            end
-        end,
+        {Doc, DocRev} = couch_doc_open(Db, DocId, Rev, Options),
         Etag = none_match(Req, DocRev),
         AdditionalHeaders = case Doc#doc.meta of
             [] -> [{"Etag", Etag}]; % output etag when we have no meta
@@ -622,8 +635,94 @@
         {rev, NewRev}
     ]});
 
+handle_doc_request(Req, 'COPY', _DbName, Db, SourceDocId) ->
+  SourceRev = case extract_header_rev(Req) of
+    missing_rev -> [];
+    Rev -> Rev
+  end,
+  
+  {TargetDocId, TargetRev} = parse_copy_destination_header(Req),
+  
+  % open revision Rev or Current
+  {Doc, _DocRev} = couch_doc_open(Db, SourceDocId, SourceRev, []),
+
+  % save new doc
+  {ok, NewTargetRev} = couch_db:update_doc(Db, Doc#doc{id=TargetDocId, revs=TargetRev}, []),
+
+  send_json(Req, 201, [{"Etag", "\"" ++ NewTargetRev ++ "\""}], {obj, [
+      {ok, true},
+      {id, TargetDocId},
+      {rev, NewTargetRev}
+  ]});
+
+handle_doc_request(Req, 'MOVE', _DbName, Db, SourceDocId) ->
+  SourceRev = case extract_header_rev(Req) of
+    missing_rev -> 
+      throw({
+        bad_request, 
+        "MOVE requires a specified rev parameter for the origin resource."}
+      );
+    Rev -> Rev
+  end,
+  
+  {TargetDocId, TargetRev} = parse_copy_destination_header(Req),
+
+  % open revision Rev or Current
+  {Doc, _DocRev} = couch_doc_open(Db, SourceDocId, SourceRev, []),
+
+  % save new doc & delete old doc in one operation
+  Docs = [
+    Doc#doc{id=TargetDocId, revs=TargetRev},
+    #doc{id=SourceDocId, revs=[SourceRev], deleted=true}
+  ],
+
+  {ok, ResultRevs} = couch_db:update_docs(Db, Docs, []),
+
+  DocResults = lists:zipwith(
+      fun(FDoc, NewRev) ->
+          {obj, [{"id", FDoc#doc.id}, {"rev", NewRev}]}
+      end,
+      Docs, ResultRevs),
+  send_json(Req, 201, {obj, [
+      {ok, true},
+      {new_revs, list_to_tuple(DocResults)}
+  ]});
+
 handle_doc_request(_Req, _Method, _DbName, _Db, _DocId) ->
-    throw({method_not_allowed, "DELETE,GET,HEAD,PUT"}).
+    throw({method_not_allowed, "DELETE,GET,HEAD,PUT,COPY,MOVE"}).
+
+% Useful for debugging
+% couch_doc_open(Db, DocId) ->
+%   couch_doc_open(Db, DocId, [], []).
+  
+couch_doc_open(Db, DocId, Rev, Options) ->
+  case Rev of
+  "" -> % open most recent rev
+      case couch_db:open_doc(Db, DocId, Options) of
+          {ok, #doc{revs=[DocRev|_]}=Doc} ->
+              {Doc, DocRev};
+          Error ->
+              throw(Error)
+      end;
+  _ -> % open a specific rev (deletions come back as stubs)
+      case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of
+          {ok, [{ok, Doc}]} ->
+              {Doc, Rev};
+          {ok, [Else]} ->
+              throw(Else)
+      end
+  end.
+
+parse_copy_destination_header(Req) ->
+  Destination = Req:get_header_value("Destination"),
+  case regexp:match(Destination, "\\?") of
+    nomatch -> 
+      {Destination, []};
+    {match, _, _} ->
+      {ok, [DocId, RevQueryOptions]} = regexp:split(Destination, "\\?"),
+      {ok, [_RevQueryKey, Rev]} = regexp:split(RevQueryOptions, "="),
+      {DocId, [Rev]}
+  end.
 
 % Attachment request handlers
 

Modified: incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_rep.erl
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_rep.erl?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_rep.erl (original)
+++ incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_rep.erl Tue Aug 12 08:34:11 2008
@@ -43,7 +43,18 @@
 
 replicate(Source, Target, Options) ->
     {ok, DbSrc} = open_db(Source),
-    {ok, DbTgt} = open_db(Target),
+    try
+        {ok, DbTgt} = open_db(Target),
+        try
+            replicate2(Source, DbSrc, Target, DbTgt, Options)
+        after
+            close_db(DbTgt)
+        end        
+    after
+        close_db(DbSrc)
+    end.
+    
+replicate2(Source, DbSrc, Target, DbTgt, Options) ->
     {ok, HostName} = inet:gethostname(),
 
     RepRecKey = ?LOCAL_DOC_PREFIX ++ HostName ++ ":" ++ Source ++ ":" ++ Target,
@@ -237,7 +248,12 @@
         {ok, "http" ++ DbName ++ "/"}
     end;
 open_db(DbName)->
-    couch_server:open(DbName).
+    couch_db:open(DbName, []).
+
+close_db("http" ++ _)->
+    ok;
+close_db(DbName)->
+    couch_db:close(DbName).
 
 
 enum_docs_since(DbUrl, StartSeq, InFun, InAcc) when is_list(DbUrl) ->

Modified: incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_server.erl
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_server.erl?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_server.erl (original)
+++ incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_server.erl Tue Aug 12 08:34:11 2008
@@ -15,7 +15,7 @@
 -behaviour(application).
 
 -export([start/0,start/1,start/2,stop/0,stop/1,restart/0]).
--export([open/1,create/2,delete/1,all_databases/0,get_version/0]).
+-export([open/2,create/2,delete/1,all_databases/0,get_version/0]).
 -export([init/1, handle_call/3,sup_start_link/0]).
 -export([handle_cast/2,code_change/3,handle_info/2,terminate/2]).
 -export([dev_start/0,remote_restart/0]).
@@ -25,7 +25,9 @@
 -record(server,{
     root_dir = [],
     dbname_regexp,
-    options=[]
+    remote_restart=[],
+    max_dbs_open=100,
+    current_dbs_open=0
     }).
 
 start() ->
@@ -75,33 +77,41 @@
 
     gen_server:start_link({local, couch_server}, couch_server, {RootDir, Options}, []).
 
-open(Filename) ->
-    gen_server:call(couch_server, {open, Filename}).
+open(DbName, Options) ->
+    gen_server:call(couch_server, {open, DbName, Options}).
 
-create(Filename, Options) ->
-    gen_server:call(couch_server, {create, Filename, Options}).
+create(DbName, Options) ->
+    gen_server:call(couch_server, {create, DbName, Options}).
 
-delete(Filename) ->
-    gen_server:call(couch_server, {delete, Filename}).
+delete(DbName) ->
+    gen_server:call(couch_server, {delete, DbName}).
 
 remote_restart() ->
     gen_server:call(couch_server, remote_restart).
 
-init({RootDir, Options}) ->
-    {ok, RegExp} = regexp:parse("^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*$"),
-    {ok, #server{root_dir=RootDir, dbname_regexp=RegExp, options=Options}}.
-
-check_filename(#server{dbname_regexp=RegExp}, Filename) ->
-    case regexp:match(Filename, RegExp) of
+check_dbname(#server{dbname_regexp=RegExp}, DbName) ->
+    case regexp:match(DbName, RegExp) of
     nomatch ->
         {error, illegal_database_name};
     _Match ->
         ok
     end.
 
-get_full_filename(Server, Filename) ->
-    filename:join([Server#server.root_dir, "./" ++ Filename ++ ".couch"]).
+get_full_filename(Server, DbName) ->
+    filename:join([Server#server.root_dir, "./" ++ DbName ++ ".couch"]).
 
+init({RootDir, Options}) ->
+    {ok, RegExp} = regexp:parse("^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*$"),
+    ets:new(couch_dbs_by_name, [set, private, named_table]),
+    ets:new(couch_dbs_by_pid, [set, private, named_table]),
+    ets:new(couch_dbs_by_lru, [ordered_set, private, named_table]),
+    process_flag(trap_exit, true),
+    MaxDbsOpen = proplists:get_value(max_dbs_open, Options),
+    RemoteRestart = proplists:get_value(remote_restart, Options),
+    {ok, #server{root_dir=RootDir,
+                dbname_regexp=RegExp,
+                max_dbs_open=MaxDbsOpen,
+                remote_restart=RemoteRestart}}.
 
 terminate(_Reason, _Server) ->
     ok.
@@ -120,107 +130,153 @@
     {ok, Filenames}.
 
 
+maybe_close_lru_db(#server{current_dbs_open=NumOpen, max_dbs_open=MaxOpen}=Server)
+        when NumOpen < MaxOpen ->
+    {ok, Server};
+maybe_close_lru_db(#server{current_dbs_open=NumOpen}=Server) ->
+    % must free up the lru db.
+    case try_close_lru(now()) of
+    ok -> {ok, Server#server{current_dbs_open=NumOpen-1}};
+    Error -> Error
+    end.
+
+try_close_lru(StartTime) ->
+    LruTime = ets:first(couch_dbs_by_lru),
+    if LruTime > StartTime ->
+        % this means we've looped through all our opened dbs and found them
+        % all in use.
+        {error, all_dbs_active};
+    true ->
+        [{_, DbName}] = ets:lookup(couch_dbs_by_lru, LruTime),
+        [{_, {MainPid, LruTime}}] = ets:lookup(couch_dbs_by_name, DbName),
+        case couch_db:num_refs(MainPid) of
+        0 ->
+            exit(MainPid, kill),
+            receive {'EXIT', MainPid, _Reason} -> ok end,
+            true = ets:delete(couch_dbs_by_lru, LruTime),
+            true = ets:delete(couch_dbs_by_name, DbName),
+            true = ets:delete(couch_dbs_by_pid, MainPid),
+            ok;
+        _NumRefs ->
+            % this still has referrers. Go ahead and give it a current lru time
+            % and try the next one in the table.
+            NewLruTime = now(),
+            true = ets:insert(couch_dbs_by_name, {DbName, {MainPid, NewLruTime}}),
+            true = ets:insert(couch_dbs_by_pid, {MainPid, DbName}),
+            true = ets:delete(couch_dbs_by_lru, LruTime),
+            true = ets:insert(couch_dbs_by_lru, {NewLruTime, DbName}),
+            try_close_lru(StartTime)
+        end
+    end.
+
+handle_call(get_server, _From, Server) ->
+    {reply, Server, Server};
+
 handle_call(get_root, _From, #server{root_dir=Root}=Server) ->
     {reply, {ok, Root}, Server};
-handle_call({open, Filename}, From, Server) ->
-    case check_filename(Server, Filename) of
-    {error, Error} ->
-        {reply, {error, Error}, Server};
+handle_call({open, DbName, Options}, {FromPid,_}, Server) ->
+    case check_dbname(Server, DbName) of
     ok ->
-        Filepath = get_full_filename(Server, Filename),
-        Result = supervisor:start_child(couch_server_sup,
-            {Filename,
-                {couch_db, open, [Filename, Filepath]},
-                transient ,
-                infinity,
-                supervisor,
-                [couch_db]}),
-        case Result of
-        {ok, Db} ->
-            {reply, {ok, Db}, Server};
-        {error, already_present} ->
-            ok = supervisor:delete_child(couch_server_sup, Filename),
-            % call self recursively
-            handle_call({open, Filename}, From, Server);
-        {error, {already_started, Db}} ->
-            {reply, {ok, Db}, Server};
-        {error, {not_found, _}} ->
-            {reply, not_found, Server};
-        {error, {Error, _}} ->
-            {reply, {error, Error}, Server}
-        end
+        Filepath = get_full_filename(Server, DbName),
+        LruTime = now(),
+        case ets:lookup(couch_dbs_by_name, DbName) of
+        [] ->    
+            case maybe_close_lru_db(Server) of
+            {ok, Server2} ->
+                case couch_db:start_link(DbName, Filepath, Options) of
+                {ok, MainPid} ->
+                    true = ets:insert(couch_dbs_by_name, {DbName, {MainPid, LruTime}}),
+                    true = ets:insert(couch_dbs_by_pid, {MainPid, DbName}),
+                    true = ets:insert(couch_dbs_by_lru, {LruTime, DbName}),
+                    DbsOpen = Server2#server.current_dbs_open + 1,
+                    {reply,
+                        couch_db:open_ref_counted(MainPid, FromPid),
+                        Server2#server{current_dbs_open=DbsOpen}};
+                Error ->
+                    {reply, Error, Server2}
+                end;
+            CloseError ->
+                {reply, CloseError, Server}
+            end;
+        [{_, {MainPid, PrevLruTime}}] ->
+            true = ets:insert(couch_dbs_by_name, {DbName, {MainPid, LruTime}}),
+            true = ets:delete(couch_dbs_by_lru, PrevLruTime),
+            true = ets:insert(couch_dbs_by_lru, {LruTime, DbName}),
+            {reply, couch_db:open_ref_counted(MainPid, FromPid), Server}
+        end;
+    Error ->
+        {reply, Error, Server}
     end;
-handle_call({create, Filename, Options}, _From, Server) ->
-    case check_filename(Server, Filename) of
-    {error, Error} ->
-        {reply, {error, Error}, Server};
+handle_call({create, DbName, Options}, {FromPid,_}, Server) ->
+    case check_dbname(Server, DbName) of
     ok ->
-        Filepath = get_full_filename(Server, Filename),
-        ChildSpec = {Filename,
-                {couch_db, create, [Filename, Filepath, Options]},
-                transient,
-                infinity,
-                supervisor,
-                [couch_db]},
-        Result =
-        case supervisor:delete_child(couch_server_sup, Filename) of
-        ok ->
-            sup_start_child(couch_server_sup, ChildSpec);
-        {error, not_found} ->
-            sup_start_child(couch_server_sup, ChildSpec);
-        {error, running} ->
-            % a server process for this database already started. Maybe kill it
-            case lists:member(overwrite, Options) of
-            true ->
-                supervisor:terminate_child(couch_server_sup, Filename),
-                ok = supervisor:delete_child(couch_server_sup, Filename),
-                sup_start_child(couch_server_sup, ChildSpec);
-            false ->
-                {error, database_already_exists}
-            end
-        end,
-        case Result of
-        {ok, _Db} -> couch_db_update_notifier:notify({created, Filename});
-        _ -> ok
-        end,
-        {reply, Result, Server}
+        Filepath = get_full_filename(Server, DbName),
+
+        case ets:lookup(couch_dbs_by_name, DbName) of
+        [] ->
+            case couch_db:start_link(DbName, Filepath, [create|Options]) of
+            {ok, MainPid} ->
+                LruTime = now(),
+                true = ets:insert(couch_dbs_by_name, {DbName, {MainPid, LruTime}}),
+                true = ets:insert(couch_dbs_by_pid, {MainPid, DbName}),
+                true = ets:insert(couch_dbs_by_lru, {LruTime, DbName}),
+                DbsOpen = Server#server.current_dbs_open + 1,
+                {reply,
+                    couch_db:open_ref_counted(MainPid, FromPid), 
+                    Server#server{current_dbs_open=DbsOpen}};
+            Error ->
+                {reply, Error, Server}
+            end;
+        [_AlreadyRunningDb] ->
+            {reply, {error, file_exists}, Server}
+        end;
+    Error ->
+        {reply, Error, Server}
     end;
-handle_call({delete, Filename}, _From, Server) ->
-    FullFilepath = get_full_filename(Server, Filename),
-    supervisor:terminate_child(couch_server_sup, Filename),
-    supervisor:delete_child(couch_server_sup, Filename),
-    case file:delete(FullFilepath) of
+handle_call({delete, DbName}, _From, Server) ->
+    case check_dbname(Server, DbName) of
     ok ->
-        couch_db_update_notifier:notify({deleted, Filename}),
-        {reply, ok, Server};
-    {error, enoent} ->
-        {reply, not_found, Server};
-    Else ->
-        {reply, Else, Server}
+        FullFilepath = get_full_filename(Server, DbName),
+        Server2 = 
+        case ets:lookup(couch_dbs_by_name, DbName) of
+        [] -> Server;
+        [{_, {Pid, LruTime}}] ->
+            exit(Pid, kill),
+            receive {'EXIT', Pid, _Reason} -> ok end,
+            true = ets:delete(couch_dbs_by_name, DbName),
+            true = ets:delete(couch_dbs_by_pid, Pid),
+            true = ets:delete(couch_dbs_by_lru, LruTime),
+            DbsOpen = Server#server.current_dbs_open - 1,
+            Server#server{current_dbs_open=DbsOpen}
+        end,
+        case file:delete(FullFilepath) of
+        ok ->
+            couch_db_update_notifier:notify({deleted, DbName}),
+            {reply, ok, Server2};
+        {error, enoent} ->
+            {reply, not_found, Server2};
+        Else ->
+            {reply, Else, Server2}
+        end;
+    Error ->
+        {reply, Error, Server}
     end;
-handle_call(remote_restart, _From, #server{options=Options}=Server) ->
-    case proplists:get_value(remote_restart, Options) of
-    true ->
-        exit(self(), restart);
-    _ ->
-        ok
-    end,
+handle_call(remote_restart, _From, #server{remote_restart=false}=Server) ->
+    {reply, ok, Server};
+handle_call(remote_restart, _From, #server{remote_restart=true}=Server) ->
+    exit(couch_server_sup, restart),
     {reply, ok, Server}.
 
-% this function is just to strip out the child spec error stuff if hit
-sup_start_child(couch_server_sup, ChildSpec) ->
-    case supervisor:start_child(couch_server_sup, ChildSpec) of
-    {error, {Error, _ChildInfo}} ->
-        {error, Error};
-    Else ->
-        Else
-    end.
-
-handle_cast(_Msg, State) ->
-    {noreply,State}.
+handle_cast(Msg, _Server) ->
+    exit({unknown_cast_message, Msg}).
 
 code_change(_OldVsn, State, _Extra) ->
     {ok, State}.
 
-handle_info(_Info, State) ->
-    {noreply, State}.
+handle_info({'EXIT', Pid, _Reason}, Server) ->
+    [{Pid, DbName}] = ets:lookup(couch_dbs_by_pid, Pid),
+    true = ets:delete(couch_dbs_by_pid, Pid),
+    true = ets:delete(couch_dbs_by_name, DbName),
+    {noreply, Server};
+handle_info(Info, _Server) ->
+    exit({unknown_message, Info}).

Modified: incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_view.erl
URL: http://svn.apache.org/viewvc/incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_view.erl?rev=685183&r1=685182&r2=685183&view=diff
==============================================================================
--- incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_view.erl (original)
+++ incubator/couchdb/branches/runtimeconfig/src/couchdb/couch_view.erl Tue Aug 12 08:34:11 2008
@@ -60,22 +60,22 @@
     Pid.
     
 get_updated_group(Pid) ->
-	Mref = erlang:monitor(process, Pid),
+    Mref = erlang:monitor(process, Pid),
     receive
-	{'DOWN', Mref, _, _, Reason} ->
-	    throw(Reason)
+    {'DOWN', Mref, _, _, Reason} ->
+        throw(Reason)
     after 0 ->
-	    Pid ! {self(), get_updated},
-	    receive
-    	{Pid, Response} ->
-    	    erlang:demonitor(Mref),
-    	    receive
-        		{'DOWN', Mref, _, _, _} -> ok
-        	    after 0 -> ok
-    	    end,
-    	    Response;
-    	{'DOWN', Mref, _, _, Reason} ->
-    	    throw(Reason)
+        Pid ! {self(), get_updated},
+        receive
+        {Pid, Response} ->
+            erlang:demonitor(Mref),
+            receive
+                {'DOWN', Mref, _, _, _} -> ok
+                after 0 -> ok
+            end,
+            Response;
+        {'DOWN', Mref, _, _, Reason} ->
+            throw(Reason)
         end
     end.
 
@@ -222,10 +222,7 @@
     terminate(Reason).
 
 terminate(_Reason) ->
-    catch ets:delete(couch_views_by_name),
-    catch ets:delete(couch_views_by_updater),
-    catch ets:delete(couch_views_by_db),
-    catch ets:delete(couch_views_temp_fd_by_db).
+    ok.
 
 
 handle_call({start_temp_updater, DbName, Lang, MapSrc, RedSrc}, _From, #server{root_dir=Root}=Server) ->
@@ -323,7 +320,7 @@
 
 start_temp_update_loop(DbName, Fd, Lang, MapSrc, RedSrc) ->
     NotifyPids = get_notify_pids(1000),
-    case couch_server:open(DbName) of
+    case couch_db:open(DbName, []) of
     {ok, Db} ->
         View = #view{map_names=["_temp"],
             id_num=0,
@@ -337,16 +334,20 @@
             def_lang=Lang,
             id_btree=nil},
         Group2 = init_group(Db, Fd, Group,nil),
-        temp_update_loop(Group2, NotifyPids);
+        couch_db:monitor(Db),
+        couch_db:close(Db),
+        temp_update_loop(DbName, Group2, NotifyPids);
     Else ->
         exit(Else)
     end.
 
-temp_update_loop(Group, NotifyPids) ->
-    {ok, Group2} = update_group(Group),
+temp_update_loop(DbName, Group, NotifyPids) ->
+    {ok, Db} = couch_db:open(DbName, []),
+    {ok, Group2} = update_group(Group#group{db=Db}),
+    couch_db:close(Db),
     [Pid ! {self(), {ok, Group2}} || Pid <- NotifyPids],
     garbage_collect(),
-    temp_update_loop(Group2, get_notify_pids(10000)).
+    temp_update_loop(DbName, Group2, get_notify_pids(10000)).
 
 
 reset_group(#group{views=Views}=Group) ->
@@ -361,21 +362,21 @@
     
 start_update_loop(RootDir, DbName, GroupId, NotifyPids) ->
     {Db, Group} =
-    case (catch couch_server:open(DbName)) of
+    case (catch couch_db:open(DbName, [])) of
     {ok, Db0} ->
         case (catch couch_db:open_doc(Db0, GroupId)) of
         {ok, Doc} ->
             {Db0, design_doc_to_view_group(Doc)};
- 		Else ->
- 		    delete_index_file(RootDir, DbName, GroupId),
- 		    exit(Else)
- 		end;
- 	Else ->
- 	    delete_index_file(RootDir, DbName, GroupId),
- 	    exit(Else)
- 	end,
- 	FileName = RootDir ++ "/." ++ DbName ++ GroupId ++".view",
- 	Group2 =
+        Else ->
+            delete_index_file(RootDir, DbName, GroupId),
+            exit(Else)
+        end;
+    Else ->
+        delete_index_file(RootDir, DbName, GroupId),
+        exit(Else)
+    end,
+    FileName = RootDir ++ "/." ++ DbName ++ GroupId ++".view",
+    Group2 =
     case couch_file:open(FileName) of
     {ok, Fd} ->
         Sig = Group#group.sig,
@@ -392,7 +393,8 @@
         Error    -> throw(Error)
         end
     end,
-    
+    couch_db:monitor(Db),
+    couch_db:close(Db),
     update_loop(RootDir, DbName, GroupId, Group2, NotifyPids).
 
 reset_file(Db, Fd, DbName, #group{sig=Sig,name=Name} = Group) ->
@@ -402,14 +404,22 @@
     init_group(Db, Fd, reset_group(Group), nil).
 
 update_loop(RootDir, DbName, GroupId, #group{sig=Sig,fd=Fd}=Group, NotifyPids) ->
-    try update_group(Group) of
-    {ok, Group2} ->    
+    {ok, Db}= couch_db:open(DbName, []),
+    Result =
+    try
+        update_group(Group#group{db=Db})
+    catch
+        throw: restart -> restart
+    after
+        couch_db:close(Db)
+    end,
+    case Result of
+    {ok, Group2} ->
         HeaderData = {Sig, get_index_header_data(Group2)},
         ok = couch_file:write_header(Fd, <<$r, $c, $k, 0>>, HeaderData),
         [Pid ! {self(), {ok, Group2}} || Pid <- NotifyPids],
         garbage_collect(),
-        update_loop(RootDir, DbName, GroupId, Group2, get_notify_pids(100000))
-    catch
+        update_loop(RootDir, DbName, GroupId, Group2, get_notify_pids(100000));
     restart ->
         couch_file:close(Group#group.fd),
         start_update_loop(RootDir, DbName, GroupId, NotifyPids ++ get_notify_pids())
@@ -420,20 +430,23 @@
     receive
     {Pid, get_updated} ->
         [Pid | get_notify_pids()];
+    {'DOWN', _MonitorRef, _Type, _Pid, _Info} ->
+        ?LOG_DEBUG("View monitor received parent db shutdown notification. Shutting down instance.", []),
+        exit(db_shutdown);
     Else ->
         ?LOG_ERROR("Unexpected message in view updater: ~p", [Else]),
         exit({error, Else})
     after Wait ->
         exit(wait_timeout)
-	end.
+    end.
 % then keep getting all available and return.
 get_notify_pids() ->
     receive
     {Pid, get_updated} ->
         [Pid | get_notify_pids()]
-	after 0 ->
-	    []
-	end.
+    after 0 ->
+        []
+    end.
     
 update_group(#group{db=Db,current_seq=CurrentSeq, views=Views}=Group) ->
     ViewEmptyKVs = [{View, []} || View <- Views],