You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by ns...@apache.org on 2009/07/06 02:33:54 UTC

svn commit: r791350 [2/5] - in /couchdb/trunk: etc/couchdb/ share/server/ share/www/ share/www/dialog/ share/www/script/ share/www/script/test/ share/www/style/ src/couchdb/ src/ibrowse/ src/mochiweb/ test/ test/etap/

Modified: couchdb/trunk/share/www/script/test/replication.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/replication.js?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/replication.js (original)
+++ couchdb/trunk/share/www/script/test/replication.js Mon Jul  6 00:33:50 2009
@@ -35,7 +35,7 @@
     dbA.createDb();
     dbB.deleteDb();
     dbB.createDb();
-    
+
     var repTests = {
       // copy and paste and put your code in. delete unused steps.
       test_template: new function () {
@@ -49,20 +49,20 @@
           // called after replicating src=B  tgt=A first time.
         };
         this.afterAB2 = function(dbA, dbB) {
-          // called after replicating src=A  tgt=B second time. 
+          // called after replicating src=A  tgt=B second time.
         };
         this.afterBA2 = function(dbA, dbB) {
           // etc...
         };
       },
-      
+
       simple_test: new function () {
         this.init = function(dbA, dbB) {
           var docs = makeDocs(0, numDocs);
           dbA.bulkSave(docs);
         };
-      
-        this.afterAB1 = function(dbA, dbB) {          
+
+        this.afterAB1 = function(dbA, dbB) {
           for (var j = 0; j < numDocs; j++) {
             var docA = dbA.open("" + j);
             var docB = dbB.open("" + j);
@@ -70,13 +70,13 @@
           }
         };
       },
-    
+
      deletes_test: new function () {
         // make sure deletes are replicated
         this.init = function(dbA, dbB) {
           T(dbA.save({_id:"foo1",value:"a"}).ok);
         };
-        
+
         this.afterAB1 = function(dbA, dbB) {
           var docA = dbA.open("foo1");
           var docB = dbB.open("foo1");
@@ -84,13 +84,13 @@
 
           dbA.deleteDoc(docA);
         };
-        
+
         this.afterAB2 = function(dbA, dbB) {
           T(dbA.open("foo1") == null);
           T(dbB.open("foo1") == null);
         };
       },
-      
+
       deleted_test : new function() {
         // docs created and deleted on a single node are also replicated
         this.init = function(dbA, dbB) {
@@ -98,7 +98,7 @@
           var docA = dbA.open("del1");
           dbA.deleteDoc(docA);
         };
-        
+
         this.afterAB1 = function(dbA, dbB) {
           var rows = dbB.allDocsBySeq().rows;
           var rowCnt = 0;
@@ -111,13 +111,13 @@
           T(rowCnt == 1);
         };
       },
-      
+
       slashes_in_ids_test: new function () {
         // make sure docs with slashes in id replicate properly
         this.init = function(dbA, dbB) {
           dbA.save({ _id:"abc/def", val:"one" });
         };
-        
+
         this.afterAB1 = function(dbA, dbB) {
           var docA = dbA.open("abc/def");
           var docB = dbB.open("abc/def");
@@ -137,7 +137,7 @@
           T(docA._rev == docB._rev);
         };
       },
-    
+
       attachments_test: new function () {
         // Test attachments
         this.init = function(dbA, dbB) {
@@ -161,34 +161,34 @@
             }
           });
         };
-        
+
         this.afterAB1 = function(dbA, dbB) {
-          var xhr = CouchDB.request("GET", 
+          var xhr = CouchDB.request("GET",
             "/test_suite_db_a/bin_doc/foo%2Bbar.txt");
           T(xhr.responseText == "This is a base64 encoded text")
 
-          xhr = CouchDB.request("GET", 
+          xhr = CouchDB.request("GET",
             "/test_suite_db_b/bin_doc/foo%2Bbar.txt");
           T(xhr.responseText == "This is a base64 encoded text")
 
           // and the design-doc
-          xhr = CouchDB.request("GET", 
+          xhr = CouchDB.request("GET",
             "/test_suite_db_a/_design/with_bin/foo%2Bbar.txt");
           T(xhr.responseText == "This is a base64 encoded text")
 
-          xhr = CouchDB.request("GET", 
+          xhr = CouchDB.request("GET",
             "/test_suite_db_b/_design/with_bin/foo%2Bbar.txt");
           T(xhr.responseText == "This is a base64 encoded text")
         };
       },
-      
+
       conflicts_test: new function () {
         // test conflicts
         this.init = function(dbA, dbB) {
           dbA.save({_id:"foo",value:"a"});
           dbB.save({_id:"foo",value:"b"});
         };
-        
+
         this.afterBA1 = function(dbA, dbB) {
           var docA = dbA.open("foo", {conflicts: true});
           var docB = dbB.open("foo", {conflicts: true});
@@ -202,7 +202,7 @@
           // delete a conflict.
           dbA.deleteDoc({_id:"foo", _rev:docA._conflicts[0]});
         };
-        
+
         this.afterBA2 = function(dbA, dbB) {
           // open documents and include the conflict meta data
           var docA = dbA.open("foo", {conflicts: true});
@@ -223,7 +223,7 @@
     }
 
     var result = CouchDB.replicate(A, B);
-    
+
     var seqA = result.source_last_seq;
     T(0 == result.history[0].start_last_seq);
     T(result.history[1] === undefined)
@@ -233,7 +233,7 @@
     }
 
     result = CouchDB.replicate(B, A);
-    
+
     var seqB = result.source_last_seq;
     T(0 == result.history[0].start_last_seq);
     T(result.history[1] === undefined)
@@ -243,14 +243,14 @@
     }
 
     var result2 = CouchDB.replicate(A, B);
-    
+
     // each successful replication produces a new session id
     T(result2.session_id != result.session_id);
-    
+
     T(seqA < result2.source_last_seq);
     T(seqA == result2.history[0].start_last_seq);
     T(result2.history[1].end_last_seq == seqA)
-    
+
     seqA = result2.source_last_seq;
 
     for(test in repTests) {
@@ -258,17 +258,17 @@
     }
 
     result = CouchDB.replicate(B, A)
-    
+
     T(seqB < result.source_last_seq);
     T(seqB == result.history[0].start_last_seq);
     T(result.history[1].end_last_seq == seqB)
-    
+
     seqB = result.source_last_seq;
 
     for(test in repTests) {
       if(repTests[test].afterBA2) repTests[test].afterBA2(dbA, dbB);
     }
-    
+
     // do an replication where nothing has changed
     result2 = CouchDB.replicate(B, A);
     T(result2.no_changes == true);

Modified: couchdb/trunk/share/www/script/test/rev_stemming.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/rev_stemming.js?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/rev_stemming.js (original)
+++ couchdb/trunk/share/www/script/test/rev_stemming.js Mon Jul  6 00:33:50 2009
@@ -18,11 +18,11 @@
   dbB.deleteDb();
   dbB.createDb();
   if (debug) debugger;
-  
+
   var newLimit = 5;
-  
+
   T(db.getDbProperty("_revs_limit") == 1000);
-  
+
   var doc = {_id:"foo",foo:0}
   for( var i=0; i < newLimit + 1; i++) {
     doc.foo++;
@@ -30,30 +30,30 @@
   }
   var doc0 = db.open("foo", {revs:true});
   T(doc0._revisions.ids.length == newLimit + 1);
-  
+
   var docBar = {_id:"bar",foo:0}
   for( var i=0; i < newLimit + 1; i++) {
     docBar.foo++;
     T(db.save(docBar).ok);
   }
   T(db.open("bar", {revs:true})._revisions.ids.length == newLimit + 1);
-  
+
   T(db.setDbProperty("_revs_limit", newLimit).ok);
-  
+
   for( var i=0; i < newLimit + 1; i++) {
     doc.foo++;
     T(db.save(doc).ok);
   }
   doc0 = db.open("foo", {revs:true});
   T(doc0._revisions.ids.length == newLimit);
-  
-  
+
+
   // If you replicate after you make more edits than the limit, you'll
   // cause a spurious edit conflict.
   CouchDB.replicate("test_suite_db_a", "test_suite_db_b");
   var docB1 = dbB.open("foo",{conflicts:true})
   T(docB1._conflicts == null);
-  
+
   for( var i=0; i < newLimit - 1; i++) {
     doc.foo++;
     T(db.save(doc).ok);
@@ -69,30 +69,30 @@
     doc.foo++;
     T(db.save(doc).ok);
   }
-  
+
   CouchDB.replicate("test_suite_db_a", "test_suite_db_b");
-  
+
   var docB2 = dbB.open("foo",{conflicts:true});
-  
+
   // we have a conflict, but the previous replicated rev is always the losing
   // conflict
   T(docB2._conflicts[0] == docB1._rev)
-  
+
   // We having already updated bar before setting the limit, so it's still got
   // a long rev history. compact to stem the revs.
-  
+
   T(db.open("bar", {revs:true})._revisions.ids.length == newLimit + 1);
-  
+
   T(db.compact().ok);
-  
+
   // compaction isn't instantaneous, loop until done
   while (db.info().compact_running) {};
-  
+
   // force reload because ETags don't honour compaction
   var req = db.request("GET", "/test_suite_db_a/bar?revs=true", {
     headers:{"if-none-match":"pommes"}
   });
-  
+
   var finalDoc = JSON.parse(req.responseText);
   TEquals(newLimit, finalDoc._revisions.ids.length,
     "should return a truncated revision list");

Modified: couchdb/trunk/share/www/script/test/security_validation.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/security_validation.js?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/security_validation.js (original)
+++ couchdb/trunk/share/www/script/test/security_validation.js Mon Jul  6 00:33:50 2009
@@ -16,7 +16,7 @@
   // specifically for this testing. It is a WWWW-Authenticate scheme named
   // X-Couch-Test-Auth, and the user names and passwords are hard coded
   // on the server-side.
-  // 
+  //
   // We could have used Basic authentication, however the XMLHttpRequest
   // implementation for Firefox and Safari, and probably other browsers are
   // broken (Firefox always prompts the user on 401 failures, Safari gives
@@ -45,7 +45,7 @@
      {section:"httpd",
       key: "WWW-Authenticate",
       value:  "X-Couch-Test-Auth"}],
-  
+
     function () {
       // try saving document usin the wrong credentials
       var wrongPasswordDb = new CouchDB("test_suite_db",
@@ -60,8 +60,8 @@
         T(wrongPasswordDb.last_req.status == 401);
       }
 
-      // test force_login=true. 
-      var resp = wrongPasswordDb.request("GET", "/_whoami?force_login=true");    
+      // test force_login=true.
+      var resp = wrongPasswordDb.request("GET", "/_whoami?force_login=true");
       var err = JSON.parse(resp.responseText);
       T(err.error == "unauthorized");
       T(resp.status == 401);
@@ -110,7 +110,7 @@
       T(user.name == "Damien Katz");
       // test that the roles are listed properly
       TEquals(user.roles, []);
-      
+
 
       // update the document
       var doc = userDb.open("testdoc");
@@ -126,7 +126,7 @@
         T(userDb.last_req.status == 403);
       }
 
-      // Now attempt to update the document as a different user, Jan 
+      // Now attempt to update the document as a different user, Jan
       var user2Db = new CouchDB("test_suite_db",
         {"WWW-Authenticate": "X-Couch-Test-Auth Jan Lehnardt:apple"}
       );
@@ -161,7 +161,7 @@
       }
 
       // Now delete document
-      T(user2Db.deleteDoc(doc).ok);      
+      T(user2Db.deleteDoc(doc).ok);
 
       // now test bulk docs
       var docs = [{_id:"bahbah",author:"Damien Katz",foo:"bar"},{_id:"fahfah",foo:"baz"}];
@@ -173,11 +173,11 @@
       T(results[0].error == undefined)
       T(results[1].rev === undefined)
       T(results[1].error == "forbidden")
-      
+
       T(db.open("bahbah"));
       T(db.open("fahfah") == null);
-      
-      
+
+
       // now all or nothing with a failure
       var docs = [{_id:"booboo",author:"Damien Katz",foo:"bar"},{_id:"foofoo",foo:"baz"}];
 
@@ -188,23 +188,23 @@
       T(results.errors[0].error == "forbidden");
       T(db.open("booboo") == null);
       T(db.open("foofoo") == null);
-      
-      
+
+
       // Now test replication
       var AuthHeaders = {"WWW-Authenticate": "X-Couch-Test-Auth Christopher Lenz:dog food"};
       var host = CouchDB.host;
       var dbPairs = [
         {source:"test_suite_db_a",
           target:"test_suite_db_b"},
-    
+
         {source:"test_suite_db_a",
           target:{url: "http://" + host + "/test_suite_db_b",
                   headers: AuthHeaders}},
-        
+
         {source:{url:"http://" + host + "/test_suite_db_a",
                  headers: AuthHeaders},
           target:"test_suite_db_b"},
-        
+
         {source:{url:"http://" + host + "/test_suite_db_a",
                  headers: AuthHeaders},
          target:{url:"http://" + host + "/test_suite_db_b",
@@ -225,7 +225,7 @@
         adminDbA.createDb();
         adminDbB.deleteDb();
         adminDbB.createDb();
-  
+
         // save and replicate a documents that will and will not pass our design
         // doc validation function.
         dbA.save({_id:"foo1",value:"a",author:"Noah Slater"});
@@ -239,44 +239,44 @@
         T(dbB.open("foo1"));
         T(dbA.open("foo2"));
         T(dbB.open("foo2"));
-  
+
         // save the design doc to dbA
         delete designDoc._rev; // clear rev from previous saves
         adminDbA.save(designDoc);
 
         // no affect on already saved docs
         T(dbA.open("bad1"));
-  
+
         // Update some docs on dbB. Since the design hasn't replicated, anything
         // is allowed.
-  
+
         // this edit will fail validation on replication to dbA (no author)
         T(dbB.save({_id:"bad2",value:"a"}).ok);
-  
+
         // this edit will fail security on replication to dbA (wrong author
         //  replicating the change)
         var foo1 = dbB.open("foo1");
         foo1.value = "b";
         dbB.save(foo1);
-  
+
         // this is a legal edit
         var foo2 = dbB.open("foo2");
         foo2.value = "b";
         dbB.save(foo2);
-  
+
         var results = CouchDB.replicate(B, A, {headers:AuthHeaders});
-  
+
         T(results.ok);
-  
+
         T(results.history[0].docs_written == 1);
         T(results.history[0].doc_write_failures == 2);
-  
+
         // bad2 should not be on dbA
         T(dbA.open("bad2") == null);
-  
+
         // The edit to foo1 should not have replicated.
         T(dbA.open("foo1").value == "a");
-  
+
         // The edit to foo2 should have replicated.
         T(dbA.open("foo2").value == "b");
       }

Modified: couchdb/trunk/share/www/script/test/show_documents.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/show_documents.js?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/show_documents.js (original)
+++ couchdb/trunk/share/www/script/test/show_documents.js Mon Jul  6 00:33:50 2009
@@ -16,12 +16,12 @@
   db.deleteDb();
   db.createDb();
   if (debug) debugger;
-      
+
   var designDoc = {
     _id:"_design/template",
     language: "javascript",
     shows: {
-      "hello" : stringFun(function(doc, req) { 
+      "hello" : stringFun(function(doc, req) {
         if (doc) {
           return "Hello World";
         } else {
@@ -77,7 +77,7 @@
         if (req.headers["Accept"].match(/image/)) {
           return {
             // a 16x16 px version of the CouchDB logo
-            "base64" : 
+            "base64" :
 ["iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAsV",
 "BMVEUAAAD////////////////////////5ur3rEBn////////////////wDBL/",
 "AADuBAe9EB3IEBz/7+//X1/qBQn2AgP/f3/ilpzsDxfpChDtDhXeCA76AQH/v7",
@@ -129,7 +129,7 @@
     }
   };
   T(db.save(designDoc).ok);
-  
+
   var doc = {"word":"plankton", "name":"Rusty"}
   var resp = db.save(doc);
   T(resp.ok);
@@ -139,7 +139,7 @@
   var xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/");
   T(xhr.status == 404, 'Should be missing');
   T(JSON.parse(xhr.responseText).reason == "Invalid path.");
-  
+
   // hello template world
   xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/hello/"+docid);
   T(xhr.responseText == "Hello World");
@@ -151,7 +151,7 @@
   // // error stacktraces
   // xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/render-error/"+docid);
   // T(JSON.parse(xhr.responseText).error == "render_error");
- 
+
   // hello template world (no docid)
   xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/hello");
   T(xhr.responseText == "Empty World");
@@ -159,21 +159,21 @@
   // // hello template world (non-existing docid)
   xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/hello/nonExistingDoc");
   T(xhr.responseText == "New World");
-  
+
   // show with doc
   xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid);
   T(xhr.responseText == "Just Rusty");
-  
+
   // show with missing doc
   xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/missingdoc");
 
   T(xhr.status == 404, 'Doc should be missing');
   T(xhr.responseText == "No such doc");
-  
+
   // show with missing func
   xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/missing/"+docid);
   T(xhr.status == 404, "function is missing");
-  
+
   // missing design doc
   xhr = CouchDB.request("GET", "/test_suite_db/_design/missingddoc/_show/just-name/"+docid);
   T(xhr.status == 404);
@@ -200,7 +200,7 @@
   T("Accept" == xhr.getResponseHeader("Vary"));
 
   // accept header switching
-  // different mime has different etag  
+  // different mime has different etag
   xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/accept-switch/"+docid, {
     headers: {"Accept": "text/html;text/plain;*/*"}
   });
@@ -227,7 +227,7 @@
     headers: {"if-none-match": etag}
   });
   // should be 304
-  T(xhr.status == 304);    
+  T(xhr.status == 304);
 
   // update the doc
   doc.name = "Crusty";
@@ -237,7 +237,7 @@
   xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid, {
     headers: {"if-none-match": etag}
   });
-  // status is 200    
+  // status is 200
   T(xhr.status == 200);
 
   // get new etag and request again
@@ -251,7 +251,7 @@
   // update design doc (but not function)
   designDoc.isChanged = true;
   T(db.save(designDoc).ok);
-  
+
   xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid, {
     headers: {"if-none-match": etag}
   });
@@ -269,7 +269,7 @@
   xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid, {
     headers: {"if-none-match": etag}
   });
-  // status is 200    
+  // status is 200
   T(xhr.status == 200);
 
 
@@ -287,7 +287,7 @@
   });
   var ct = xhr.getResponseHeader("Content-Type");
   T(/charset=utf-8/.test(ct))
-  T(/text\/html/.test(ct))  
+  T(/text\/html/.test(ct))
   T(xhr.responseText == "Ha ha, you said \"plankton\".");
 
   // now with xml

Modified: couchdb/trunk/share/www/script/test/stats.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/stats.js?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/stats.js (original)
+++ couchdb/trunk/share/www/script/test/stats.js Mon Jul  6 00:33:50 2009
@@ -76,7 +76,7 @@
         })
     },
  };
-  
+
   var request_count_tests = {
    'should increase the request count for every request': function(name) {
      var requests = requestStatsTest("httpd", "requests").current + 1;
@@ -88,7 +88,7 @@
      TEquals(requests + 1, new_requests, name);
    }
  };
- 
+
  var database_read_count_tests = {
    'should increase database reads counter when a document is read': function(name) {
      var db = new CouchDB("test_suite_db");
@@ -186,7 +186,7 @@
      TEquals(reads + 1 , new_reads, name);
    }
  };
- 
+
  var http_requests_by_method_tests = {
    'should count GET requests': function(name) {
      var requests = requestStatsTest("httpd_request_methods", "GET").current;
@@ -199,7 +199,7 @@
      CouchDB.request("POST", "/");
      var new_requests = requestStatsTest("httpd_request_methods", "GET").current;
 
-     TEquals(requests + 1, new_requests, name);        
+     TEquals(requests + 1, new_requests, name);
    },
    'should count POST requests': function(name) {
      var requests = requestStatsTest("httpd_request_methods", "POST").current;
@@ -229,7 +229,7 @@
 
      var doc = {"_id":"test"};
      db.save(doc);
-     
+
      var updates = requestStatsTest("couchdb", "database_writes").current;
      db.save(doc);
      var new_updates = requestStatsTest("couchdb", "database_writes").current;
@@ -243,7 +243,7 @@
 
      var doc = {"_id":"test"};
      db.save(doc);
-     
+
      var deletes = requestStatsTest("couchdb", "database_writes").current;
      db.deleteDoc(doc);
      var new_deletes = requestStatsTest("couchdb", "database_writes").current;
@@ -275,7 +275,7 @@
 
      var docs = makeDocs(5);
      db.bulkSave(docs);
-     
+
      var new_bulks = requestStatsTest("httpd", "bulk_requests").current;
 
      TEquals(bulks + 1, new_bulks, name);
@@ -378,7 +378,7 @@
      var options = {};
      options.headers = {"Accept": "application/json"};
      var summary = JSON.parse(CouchDB.request("GET", "/_stats", options).responseText);
-     var aggregates = ["mean", "min", "max", "stddev", 
+     var aggregates = ["mean", "min", "max", "stddev",
        "current"];
 
      for(var i in aggregates) {
@@ -386,12 +386,12 @@
      }
    }
  };
- 
+
    var tests = [
      open_databases_tests,
-     request_count_tests, 
-     database_read_count_tests, 
-     view_read_count_tests, 
+     request_count_tests,
+     database_read_count_tests,
+     view_read_count_tests,
      http_requests_by_method_tests,
      document_write_count_tests,
      response_codes_tests,
@@ -404,7 +404,7 @@
        tests[testGroup][test](test);
      }
    };
- 
+
    function createAndRequestView(db) {
      var designDoc = {
        _id:"_design/test", // turn off couch.js id escaping?
@@ -414,7 +414,7 @@
        }
      };
      db.save(designDoc);
- 
+
      db.view("test/all_docs_twice");
    }
 
@@ -422,4 +422,3 @@
      return CouchDB.requestStats(module, key, true);
    }
 }
- 
\ No newline at end of file

Modified: couchdb/trunk/share/www/script/test/uuids.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/uuids.js?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/uuids.js (original)
+++ couchdb/trunk/share/www/script/test/uuids.js Mon Jul  6 00:33:50 2009
@@ -14,20 +14,20 @@
   var testHashBustingHeaders = function(xhr) {
     T(xhr.getResponseHeader("Cache-Control").match(/no-cache/));
     T(xhr.getResponseHeader("Pragma") == "no-cache");
-    
+
     var currentTime = new Date();
     var expiresHeader = Date.parse(xhr.getResponseHeader("Expires"));
-    var dateHeader = Date.parse(xhr.getResponseHeader("Date")); 
-    
+    var dateHeader = Date.parse(xhr.getResponseHeader("Date"));
+
     T(expiresHeader < currentTime);
     T(currentTime - dateHeader < 3000);
   };
-    
+
   var db = new CouchDB("test_suite_db");
   db.deleteDb();
   db.createDb();
   if (debug) debugger;
-  
+
   // a single UUID without an explicit count
   var xhr = CouchDB.request("GET", "/_uuids");
   T(xhr.status == 200);
@@ -55,7 +55,7 @@
     T(seen[id] === undefined);
     seen[id] = 1;
   }
-  
+
   // ensure we return a 405 on POST
   xhr = CouchDB.request("POST", "/_uuids?count=1000");
   T(xhr.status == 405);

Modified: couchdb/trunk/share/www/script/test/view_collation.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/view_collation.js?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/view_collation.js (original)
+++ couchdb/trunk/share/www/script/test/view_collation.js Mon Jul  6 00:33:50 2009
@@ -85,14 +85,14 @@
     rows = db.query(queryFun, null, queryOptions).rows;
     T(rows.length == 1 && equals(rows[0].key, values[i]));
   }
-  
+
   // test inclusive_end=true (the default)
   // the inclusive_end=true functionality is limited to endkey currently
   // if you need inclusive_start=false for startkey, please do implement. ;)
   var rows = db.query(queryFun, null, {endkey : "b", inclusive_end:true}).rows;
   T(rows[rows.length-1].key == "b")
   // descending=true
-  var rows = db.query(queryFun, null, {endkey : "b", 
+  var rows = db.query(queryFun, null, {endkey : "b",
     descending:true, inclusive_end:true}).rows;
   T(rows[rows.length-1].key == "b")
 
@@ -100,13 +100,13 @@
   var rows = db.query(queryFun, null, {endkey : "b", inclusive_end:false}).rows;
   T(rows[rows.length-1].key == "aa")
   // descending=true
-  var rows = db.query(queryFun, null, {endkey : "b", 
+  var rows = db.query(queryFun, null, {endkey : "b",
     descending:true, inclusive_end:false}).rows;
   T(rows[rows.length-1].key == "B")
-  
+
   // inclusive_end=false overrides endkey_docid
   var rows = db.query(queryFun, null, {
-    endkey : "b", endkey_docid: "b", 
+    endkey : "b", endkey_docid: "b",
     inclusive_end:false}).rows;
   T(rows[rows.length-1].key == "aa")
 };

Modified: couchdb/trunk/share/www/script/test/view_errors.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/view_errors.js?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/view_errors.js (original)
+++ couchdb/trunk/share/www/script/test/view_errors.js Mon Jul  6 00:33:50 2009
@@ -15,14 +15,14 @@
   db.deleteDb();
   db.createDb();
   if (debug) debugger;
-  
-  
+
+
 
   run_on_modified_server(
     [{section: "couchdb",
       key: "os_process_timeout",
       value: "500"}],
-    function() {    
+    function() {
       var doc = {integer: 1, string: "1", array: [1, 2, 3]};
       T(db.save(doc).ok);
 
@@ -47,37 +47,37 @@
         emit([doc._id, doc.undef], null);
       });
       T(results.total_rows == 0);
-  
+
       // querying a view with invalid params should give a resonable error message
       var xhr = CouchDB.request("POST", "/test_suite_db/_temp_view?startkey=foo", {
         headers: {"Content-Type": "application/json"},
-        body: JSON.stringify({language: "javascript", 
+        body: JSON.stringify({language: "javascript",
           map : "function(doc){emit(doc.integer)}"
         })
       });
       T(JSON.parse(xhr.responseText).error == "invalid_json");
-  
+
       // views should ignore Content-Type, like the rest of CouchDB
       var xhr = CouchDB.request("POST", "/test_suite_db/_temp_view", {
         headers: {"Content-Type": "application/x-www-form-urlencoded"},
-        body: JSON.stringify({language: "javascript", 
+        body: JSON.stringify({language: "javascript",
           map : "function(doc){}"
         })
       });
       T(xhr.status == 200);
-  
+
       var map = function (doc) {emit(doc.integer, doc.integer);};
-  
+
       try {
           db.query(map, null, {group: true});
           T(0 == 1);
       } catch(e) {
           T(e.error == "query_parse_error");
       }
-  
+
       // reduce=false on map views doesn't work, so group=true will
       // never throw for temp reduce views.
-  
+
       var designDoc = {
         _id:"_design/test",
         language: "javascript",
@@ -89,7 +89,7 @@
         }
       };
       T(db.save(designDoc).ok);
-  
+
       var designDoc2 = {
         _id:"_design/testbig",
         language: "javascript",
@@ -100,14 +100,14 @@
         }
       };
       T(db.save(designDoc2).ok);
-  
+
       try {
           db.view("test/no_reduce", {group: true});
           T(0 == 1);
       } catch(e) {
           T(e.error == "query_parse_error");
       }
-  
+
       try {
         db.view("test/no_reduce", {reduce: true});
         T(0 == 1);
@@ -122,7 +122,7 @@
       } catch(e) {
           T(e.error == "query_parse_error");
       }
-  
+
       var designDoc3 = {
         _id:"_design/infinite",
         language: "javascript",
@@ -138,7 +138,7 @@
       } catch(e) {
           T(e.error == "os_process_error");
       }
-    
+
       // Check error responses for invalid multi-get bodies.
       var path = "/test_suite_db/_design/test/_view/no_reduce";
       var xhr = CouchDB.request("POST", path, {body: "[]"});

Modified: couchdb/trunk/share/www/script/test/view_multi_key_design.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/view_multi_key_design.js?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/view_multi_key_design.js (original)
+++ couchdb/trunk/share/www/script/test/view_multi_key_design.js Mon Jul  6 00:33:50 2009
@@ -53,7 +53,7 @@
     T(keys.indexOf(rows[i].key) != -1);
     T(rows[i].key == rows[i].value);
   }
-  
+
   var reduce = db.view("test/summate",{group:true},keys).rows;
   T(reduce.length == keys.length);
   for(var i=0; i<reduce.length; i++) {

Modified: couchdb/trunk/share/www/script/test/view_multi_key_temp.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/view_multi_key_temp.js?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/view_multi_key_temp.js (original)
+++ couchdb/trunk/share/www/script/test/view_multi_key_temp.js Mon Jul  6 00:33:50 2009
@@ -28,7 +28,7 @@
     T(keys.indexOf(rows[i].key) != -1);
     T(rows[i].key == rows[i].value);
   }
-  
+
   var reduce = db.query(queryFun, reduceFun, {group:true}, keys).rows;
   for(var i=0; i<reduce.length; i++) {
     T(keys.indexOf(reduce[i].key) != -1);

Modified: couchdb/trunk/share/www/script/test/view_offsets.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/view_offsets.js?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/view_offsets.js (original)
+++ couchdb/trunk/share/www/script/test/view_offsets.js Mon Jul  6 00:33:50 2009
@@ -11,12 +11,12 @@
 // the License.
 
 couchTests.view_offsets = function(debug) {
-  if (debug) debugger;      
+  if (debug) debugger;
 
   var db = new CouchDB("test_suite_db");
   db.deleteDb();
   db.createDb();
-  
+
   var designDoc = {
     _id : "_design/test",
     views : {
@@ -26,7 +26,7 @@
     }
   };
   T(db.save(designDoc).ok);
-  
+
   var docs = [
     {_id : "a1", letter : "a", number : 1, foo: "bar"},
     {_id : "a2", letter : "a", number : 2, foo: "bar"},
@@ -88,8 +88,8 @@
     ];
     db.bulkSave(docs);
 
-    var res = db.view("test/offset", { 
-      startkey: ["b",4], startkey_docid: "b4", endkey: ["b"], 
+    var res = db.view("test/offset", {
+      startkey: ["b",4], startkey_docid: "b4", endkey: ["b"],
       limit: 2, descending: true, skip: 1
     })
 

Modified: couchdb/trunk/share/www/script/test/view_pagination.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/view_pagination.js?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/view_pagination.js (original)
+++ couchdb/trunk/share/www/script/test/view_pagination.js Mon Jul  6 00:33:50 2009
@@ -71,7 +71,7 @@
         T(queryResults.rows[j].key == i + j);
       }
     }
-    
+
     // test endkey_docid
     var queryResults = db.query(function(doc) { emit(null, null);}, null, {
       startkey: null,
@@ -79,7 +79,7 @@
       endkey: null,
       endkey_docid: 40
     });
-    
+
     T(queryResults.rows.length == 35)
     T(queryResults.total_rows == docs.length)
     T(queryResults.offset == 1)

Modified: couchdb/trunk/share/www/style/layout.css
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/style/layout.css?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/share/www/style/layout.css (original)
+++ couchdb/trunk/share/www/style/layout.css Mon Jul  6 00:33:50 2009
@@ -30,7 +30,7 @@
   line-height: 1.8em; margin: 0 0 1em; padding: 0 0 0 1em; position: relative;
 }
 h1 :link, h1 :visited, h1 strong { padding: .4em .5em; }
-h1 :link, h1 :visited { 
+h1 :link, h1 :visited {
   background: url(../image/path.gif) 100% 50% no-repeat;
   color: #bbb; cursor: pointer; padding-right: 2.2em;
   text-shadow: #333 2px 2px 1px;

Modified: couchdb/trunk/src/couchdb/couch_batch_save.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_batch_save.erl?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_batch_save.erl (original)
+++ couchdb/trunk/src/couchdb/couch_batch_save.erl Mon Jul  6 00:33:50 2009
@@ -46,10 +46,10 @@
 eventually_save_doc(DbName, Doc, UserCtx) ->
     % find or create a process for the {DbName, UserCtx} pair
     {ok, Pid} = batch_pid_for_db_and_user(DbName, UserCtx),
-    % hand it the document 
+    % hand it the document
     ?LOG_DEBUG("sending doc to batch ~p",[Pid]),
     ok = send_doc_to_batch(Pid, Doc).
-    
+
 %%--------------------------------------------------------------------
 %% Function: commit_now(DbName) -> committed
 %% Description: Commits all docs for the DB. Does not reply until
@@ -72,8 +72,8 @@
 %%--------------------------------------------------------------------
 % commit_all() ->
 %     committed = gen_server:call(couch_batch_save, commit_now, infinity).
-%  
-    
+%
+
 %%====================================================================
 %% gen_server callbacks
 %%====================================================================
@@ -102,9 +102,9 @@
         batch_size=BatchSize,
         batch_interval=BatchInterval
     }=State) ->
-    % Create the pid in a serialized process. 
+    % Create the pid in a serialized process.
     % We checked before to see that we need the Pid, but the check is outside
-    % the gen_server for parellelism. We check again here to ensure we don't 
+    % the gen_server for parellelism. We check again here to ensure we don't
     % make a duplicate.
     Resp = case ets:lookup(couch_batch_save_by_db, {DbName,UserCtx}) of
         [{_, Pid}] ->
@@ -114,8 +114,8 @@
             % no match
             % start and record the doc collector process
             ?LOG_DEBUG("making a batch pid ~p",[{DbName, UserCtx}]),
-            Pid = spawn_link(fun() -> 
-                doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, new) 
+            Pid = spawn_link(fun() ->
+                doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, new)
             end),
             true = ets:insert_new(couch_batch_save_by_db, {{DbName, UserCtx}, Pid}),
             {ok, Pid}
@@ -168,7 +168,7 @@
 
 commit_user_docs(_DbName, _UserCtx, []) ->
     {ok, []};
-    
+
 commit_user_docs(DbName, UserCtx, Docs) ->
     ?LOG_INFO("Committing ~p batch docs to ~p",[length(Docs), DbName]),
     case couch_db:open(DbName, [{user_ctx, UserCtx}]) of
@@ -194,7 +194,7 @@
 
 send_commit(Pid) ->
     Pid ! {self(), commit},
-    receive 
+    receive
         {Pid, committed} ->
            ok
     end.
@@ -225,7 +225,7 @@
     end.
 
 % the loop that holds documents between commits
-doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, new) -> 
+doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, new) ->
     % start a process that triggers commit every BatchInterval milliseconds
     _IntervalPid = spawn_link(fun() -> commit_every_ms(self(), BatchInterval) end),
     doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, []);
@@ -233,7 +233,7 @@
 doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, Docs) when length(Docs) >= BatchSize->
     collector_commit(DbName, UserCtx, BatchInterval, Docs),
     exit(normal);
-    
+
 doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, Docs) ->
     receive
         {From, add_doc, Doc} ->

Modified: couchdb/trunk/src/couchdb/couch_batch_save_sup.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_batch_save_sup.erl?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_batch_save_sup.erl (original)
+++ couchdb/trunk/src/couchdb/couch_batch_save_sup.erl Mon Jul  6 00:33:50 2009
@@ -27,9 +27,9 @@
             exit(Self, reload_config)
         end),
 
-    BatchSize = list_to_integer(couch_config:get("couchdb", 
+    BatchSize = list_to_integer(couch_config:get("couchdb",
         "batch_save_size","1000")),
-    BatchInterval = list_to_integer(couch_config:get("couchdb", 
+    BatchInterval = list_to_integer(couch_config:get("couchdb",
         "batch_save_interval","1000")),
 
     Batch = {batch, {couch_batch_save, start_link, [BatchSize, BatchInterval]},

Modified: couchdb/trunk/src/couchdb/couch_btree.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_btree.erl?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_btree.erl (original)
+++ couchdb/trunk/src/couchdb/couch_btree.erl Mon Jul  6 00:33:50 2009
@@ -39,7 +39,7 @@
 % pass in 'nil' for State if a new Btree.
 open(State, Fd) ->
     {ok, #btree{root=State, fd=Fd}}.
-    
+
 set_options(Bt, []) ->
     Bt;
 set_options(Bt, [{split, Extract}|Rest]) ->
@@ -68,7 +68,7 @@
 final_reduce(Reduce, {KVs, Reductions}) ->
     Red = Reduce(reduce, KVs),
     final_reduce(Reduce, {[], [Red | Reductions]}).
-    
+
 fold_reduce(Bt, StartKey, EndKey, KeyGroupFun, Fun, Acc) ->
     fold_reduce(Bt, fwd, StartKey, EndKey, KeyGroupFun, Fun, Acc).
 
@@ -189,7 +189,7 @@
 
 lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
     {ok, lists:reverse(Output)};
-    
+
 lookup_kpnode(_Bt, NodeTuple, LowerBound, Keys, Output) when size(NodeTuple) < LowerBound ->
     {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
 
@@ -238,7 +238,7 @@
     {ok, ResultKeyPointers, Bt2} = write_node(Bt, kp_node, KPs),
     complete_root(Bt2, ResultKeyPointers).
 
-%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%% 
+%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%%
 % It is inaccurate as it does not account for compression when blocks are
 % written. Plus with the "case size(term_to_binary(InList)) of" code it's
 % probably really inefficient.
@@ -277,7 +277,7 @@
         {NodeType, NodeList} = get_node(Bt, Pointer)
     end,
     NodeTuple = list_to_tuple(NodeList),
-    
+
     {ok, NewNodeList, QueryOutput2, Bt2} =
     case NodeType of
     kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput);
@@ -320,7 +320,7 @@
         ANodeList <- NodeListList
     ],
     {ok, ResultList, Bt}.
-    
+
 modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) ->
     modify_node(Bt, nil, Actions, QueryOutput);
 modify_kpnode(Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
@@ -350,15 +350,15 @@
                 LowerBound, N - 1, ResultNode)),
         modify_kpnode(Bt2, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2)
     end.
-    
+
 bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End ->
     Tail;
 bounded_tuple_to_revlist(Tuple, Start, End, Tail) ->
     bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]).
-        
+
 bounded_tuple_to_list(Tuple, Start, End, Tail) ->
     bounded_tuple_to_list2(Tuple, Start, End, [], Tail).
-    
+
 bounded_tuple_to_list2(_Tuple, Start, End, Acc, Tail) when Start > End ->
     lists:reverse(Acc, Tail);
 bounded_tuple_to_list2(Tuple, Start, End, Acc, Tail) ->
@@ -426,10 +426,10 @@
     end.
 
 
-reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _KeyEnd, GroupedKey, GroupedKVsAcc, 
+reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _KeyEnd, GroupedKey, GroupedKVsAcc,
         GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) ->
-    {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey}; 
-reduce_stream_node(Bt, Dir, {P, _R}, KeyStart, KeyEnd, GroupedKey, GroupedKVsAcc, 
+    {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
+reduce_stream_node(Bt, Dir, {P, _R}, KeyStart, KeyEnd, GroupedKey, GroupedKVsAcc,
         GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
     case get_node(Bt, P) of
     {kp_node, NodeList} ->
@@ -475,7 +475,7 @@
         reduce_stream_kv_node2(Bt, RestKVs, Key,
                 [assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc);
     _ ->
-    
+
         case KeyGroupFun(GroupedKey, Key) of
         true ->
             reduce_stream_kv_node2(Bt, RestKVs, GroupedKey,
@@ -531,7 +531,7 @@
         GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
     {Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) ->
         KeyGroupFun(GroupedKey, Key) end, NodeList),
-    {GroupedNodes, UngroupedNodes} = 
+    {GroupedNodes, UngroupedNodes} =
     case Grouped0 of
     [] ->
         {Grouped0, Ungrouped0};
@@ -542,7 +542,7 @@
     GroupedReds = [R || {_, {_,R}} <- GroupedNodes],
     case UngroupedNodes of
     [{_Key, NodeInfo}|RestNodes] ->
-        {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} = 
+        {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
             reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, KeyEnd, GroupedKey,
                 GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc),
         reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, KeyEnd, GroupedKey2,

Modified: couchdb/trunk/src/couchdb/couch_config.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_config.erl?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_config.erl (original)
+++ couchdb/trunk/src/couchdb/couch_config.erl Mon Jul  6 00:33:50 2009
@@ -54,7 +54,7 @@
 
 get(Section, Key) ->
     ?MODULE:get(Section, Key, undefined).
-    
+
 get(Section, Key, Default) when is_binary(Section) and is_binary(Key) ->
     ?MODULE:get(?b2l(Section), ?b2l(Key), Default);
 get(Section, Key, Default) ->
@@ -194,9 +194,9 @@
                 {ok, [ValueName|LineValues]} -> % yeehaw, got a line!
                     RemainingLine = couch_util:implode(LineValues, "="),
                     % removes comments
-                    {ok, [LineValue | _Rest]} = 
+                    {ok, [LineValue | _Rest]} =
                         regexp:split(RemainingLine, " ;|\t;"),
-                    {AccSectionName, 
+                    {AccSectionName,
                 [{{AccSectionName, ValueName}, LineValue} | AccValues]}
                 end
             end

Modified: couchdb/trunk/src/couchdb/couch_config_writer.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_config_writer.erl?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_config_writer.erl (original)
+++ couchdb/trunk/src/couchdb/couch_config_writer.erl Mon Jul  6 00:33:50 2009
@@ -54,7 +54,7 @@
         _ ->
             NewFileContents2
     end,
-    
+
     ok = file:write_file(File, list_to_binary(NewFileContents)),
     ok.
 
@@ -66,7 +66,7 @@
     NewCurrentSection = parse_module(Line, OldCurrentSection),
     % if the current Section is the one we want to change, try to match
     % each line with the Option
-    NewContents = 
+    NewContents =
     case NewCurrentSection of
     Section ->
         case OldCurrentSection of
@@ -87,21 +87,21 @@
             end;
         _ -> % we got into a new [section]
             {NewLine, DoneOptions2} = append_var_to_section(
-                {{Section, Option}, Value}, 
-                Line, 
-                OldCurrentSection, 
+                {{Section, Option}, Value},
+                Line,
+                OldCurrentSection,
                 DoneOptions),
             NewLine
         end;
     _ -> % we are reading [NewCurrentSection]
         {NewLine, DoneOptions2} = append_var_to_section(
-            {{Section, Option}, Value}, 
-            Line, 
-            OldCurrentSection, 
+            {{Section, Option}, Value},
+            Line,
+            OldCurrentSection,
             DoneOptions),
         NewLine
     end,
-    % clumsy way to only append a newline character if the line is not empty. We need this to 
+    % clumsy way to only append a newline character if the line is not empty. We need this to
     % avoid having a newline inserted at the top of the target file each time we save it.
     Contents2 = case Contents of "" -> ""; _ -> Contents ++ "\n" end,
     % go to next line
@@ -110,7 +110,7 @@
 save_loop({{Section, Option}, Value}, [], OldSection, NewFileContents, DoneOptions) ->
     case lists:member(Option, DoneOptions) of
         % append Deferred Option
-        false when Section == OldSection -> 
+        false when Section == OldSection ->
             {NewFileContents ++ "\n" ++ Option ++ " = " ++ Value ++ "\n", DoneOptions};
         % we're out of new lines, just return the new file's contents
         _ -> {NewFileContents, DoneOptions}
@@ -131,7 +131,7 @@
         _ ->
             {Line, DoneOptions}
         end.
-    
+
 %% @spec parse_module(Line::string(), OldSection::string()) -> string()
 %% @doc Tries to match a line against a pattern specifying a ini module or
 %%      section ("[Section]"). Returns OldSection if no match is found.

Modified: couchdb/trunk/src/couchdb/couch_db.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_db.erl?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_db.erl (original)
+++ couchdb/trunk/src/couchdb/couch_db.erl Mon Jul  6 00:33:50 2009
@@ -140,7 +140,7 @@
     Else ->
         Else
     end.
-    
+
 %   returns {ok, DocInfo} or not_found
 get_full_doc_info(Db, Id) ->
     [Result] = get_full_doc_infos(Db, [Id]),
@@ -154,13 +154,13 @@
 
 purge_docs(#db{update_pid=UpdatePid}, IdsRevs) ->
     gen_server:call(UpdatePid, {purge_docs, IdsRevs}).
-    
+
 get_committed_update_seq(#db{committed_update_seq=Seq}) ->
     Seq.
 
 get_update_seq(#db{update_seq=Seq})->
     Seq.
-    
+
 get_purge_seq(#db{header=#db_header{purge_seq=PurgeSeq}})->
     PurgeSeq.
 
@@ -230,7 +230,7 @@
 
 name(#db{name=Name}) ->
     Name.
-    
+
 update_doc(Db, Doc, Options) ->
     case update_docs(Db, [Doc], Options) of
     {ok, [{ok, NewRev}]} ->
@@ -241,7 +241,7 @@
 
 update_docs(Db, Docs) ->
     update_docs(Db, Docs, []).
-    
+
 % group_alike_docs groups the sorted documents into sublist buckets, by id.
 % ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]]
 group_alike_docs(Docs) ->
@@ -375,7 +375,7 @@
 
 
 prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) ->
-    Errors2 = [{{Id, {Pos, Rev}}, Error} || 
+    Errors2 = [{{Id, {Pos, Rev}}, Error} ||
             {#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors],
     {lists:reverse(AccPrepped), lists:reverse(Errors2)};
 prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) ->
@@ -406,9 +406,9 @@
             fun(#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, {AccValidated, AccErrors2}) ->
                 case dict:find({Pos, RevId}, LeafRevsFullDict) of
                 {ok, {Start, Path}} ->
-                    % our unflushed doc is a leaf node. Go back on the path 
+                    % our unflushed doc is a leaf node. Go back on the path
                     % to find the previous rev that's on disk.
-                    PrevRevResult = 
+                    PrevRevResult =
                     case couch_doc:has_stubs(Doc) of
                     true ->
                         [_PrevRevFull | [PrevRevFull | _]=PrevPath]  = Path,
@@ -420,14 +420,14 @@
                             Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
                             {ok, Doc2, fun() -> DiskDoc end}
                         end;
-                    false ->    
+                    false ->
                         {ok, Doc,
                             fun() ->
                                 make_first_doc_on_disk(Db,Id,Start-1, tl(Path))
                             end}
                     end,
                     case PrevRevResult of
-                    {ok, NewDoc, LoadPrevRevFun} ->                        
+                    {ok, NewDoc, LoadPrevRevFun} ->
                         case validate_doc_update(Db, NewDoc, LoadPrevRevFun) of
                         ok ->
                             {[NewDoc | AccValidated], AccErrors2};
@@ -450,7 +450,7 @@
 update_docs(Db, Docs, Options, replicated_changes) ->
     couch_stats_collector:increment({couchdb, database_writes}),
     DocBuckets = group_alike_docs(Docs),
-    
+
     case (Db#db.validate_doc_funs /= []) orelse
         lists:any(
             fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true;
@@ -459,7 +459,7 @@
     true ->
         Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
         ExistingDocs = get_full_doc_infos(Db, Ids),
-    
+
         {DocBuckets2, DocErrors} =
                 prep_and_validate_replicated_updates(Db, DocBuckets, ExistingDocs, [], []),
         DocBuckets3 = [Bucket || [_|_]=Bucket <- DocBuckets2]; % remove empty buckets
@@ -469,7 +469,7 @@
     end,
     {ok, []} = write_and_commit(Db, DocBuckets3, [merge_conflicts | Options]),
     {ok, DocErrors};
-    
+
 update_docs(Db, Docs, Options, interactive_edit) ->
     couch_stats_collector:increment({couchdb, database_writes}),
     AllOrNothing = lists:member(all_or_nothing, Options),
@@ -485,7 +485,7 @@
             end
         end, Docs),
     DocBuckets = group_alike_docs(Docs2),
-    
+
     case (Db#db.validate_doc_funs /= []) orelse
         lists:any(
             fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) ->
@@ -497,16 +497,16 @@
         % lookup the doc by id and get the most recent
         Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
         ExistingDocInfos = get_full_doc_infos(Db, Ids),
-        
+
         {DocBucketsPrepped, Failures} =
         case AllOrNothing of
         true ->
-            prep_and_validate_replicated_updates(Db, DocBuckets, 
+            prep_and_validate_replicated_updates(Db, DocBuckets,
                     ExistingDocInfos, [], []);
         false ->
             prep_and_validate_updates(Db, DocBuckets, ExistingDocInfos, [], [])
         end,
-        
+
         % strip out any empty buckets
         DocBuckets2 = [Bucket || [_|_] = Bucket <- DocBucketsPrepped];
     false ->
@@ -517,7 +517,7 @@
     if (AllOrNothing) and (Failures /= []) ->
          {aborted, Failures};
     true ->
-        Options2 = if AllOrNothing -> [merge_conflicts]; 
+        Options2 = if AllOrNothing -> [merge_conflicts];
                 true -> [] end ++ Options,
         {ok, CommitFailures} = write_and_commit(Db, DocBuckets2, Options2),
         FailDict = dict:from_list(CommitFailures ++ Failures),
@@ -575,24 +575,24 @@
 flush_binary(Fd, {Fd0, StreamPointer, Len}) when Fd0 == Fd ->
     % already written to our file, nothing to write
     {Fd, StreamPointer, Len};
-  
+
 flush_binary(Fd, {OtherFd, StreamPointer, Len}) when is_tuple(StreamPointer) ->
-    {NewStreamData, Len} = 
+    {NewStreamData, Len} =
             couch_stream:old_copy_to_new_stream(OtherFd, StreamPointer, Len, Fd),
     {Fd, NewStreamData, Len};
 
 flush_binary(Fd, {OtherFd, StreamPointer, Len}) ->
-    {NewStreamData, Len} = 
+    {NewStreamData, Len} =
             couch_stream:copy_to_new_stream(OtherFd, StreamPointer, Fd),
     {Fd, NewStreamData, Len};
-                         
+
 flush_binary(Fd, Bin) when is_binary(Bin) ->
     with_stream(Fd, fun(OutputStream) ->
         couch_stream:write(OutputStream, Bin)
     end);
-                 
+
 flush_binary(Fd, {StreamFun, undefined}) when is_function(StreamFun) ->
-    with_stream(Fd, fun(OutputStream) -> 
+    with_stream(Fd, fun(OutputStream) ->
         % StreamFun(MaxChunkSize, WriterFun) must call WriterFun
         % once for each chunk of the attachment,
         StreamFun(4096,
@@ -606,19 +606,19 @@
                 couch_stream:write(OutputStream, Bin)
             end, ok)
     end);
-             
+
 flush_binary(Fd, {Fun, Len}) when is_function(Fun) ->
-    with_stream(Fd, fun(OutputStream) -> 
+    with_stream(Fd, fun(OutputStream) ->
         write_streamed_attachment(OutputStream, Fun, Len)
     end).
-            
+
 with_stream(Fd, Fun) ->
     {ok, OutputStream} = couch_stream:open(Fd),
     Fun(OutputStream),
     {StreamInfo, Len} = couch_stream:close(OutputStream),
     {Fd, StreamInfo, Len}.
 
-    
+
 write_streamed_attachment(_Stream, _F, 0) ->
     ok;
 write_streamed_attachment(Stream, F, LenLeft) ->
@@ -656,14 +656,14 @@
                 Infos = [DocInfo];
             all_docs ->
                 % make each rev it's own doc info
-                Infos = [DocInfo#doc_info{revs=[RevInfo]} || 
+                Infos = [DocInfo#doc_info{revs=[RevInfo]} ||
                     #rev_info{seq=RevSeq}=RevInfo <- Revs, StartSeq < RevSeq]
             end,
             Fun(Infos, Acc2)
         end, Acc).
 
 count_changes_since(Db, SinceSeq) ->
-    {ok, Changes} = 
+    {ok, Changes} =
     couch_btree:fold_reduce(Db#db.docinfo_by_seq_btree,
         SinceSeq + 1, % startkey
         ok, % endkey
@@ -673,7 +673,7 @@
         end,
         0),
     Changes.
-    
+
 enum_docs_since(Db, SinceSeq, Direction, InFun, Acc) ->
     couch_btree:fold(Db#db.docinfo_by_seq_btree, SinceSeq + 1, Direction, InFun, Acc).
 
@@ -698,13 +698,13 @@
 terminate(Reason, _Db) ->
     couch_util:terminate_linked(Reason),
     ok.
-    
+
 handle_call({open_ref_count, OpenerPid}, _, #db{fd_ref_counter=RefCntr}=Db) ->
     ok = couch_ref_counter:add(RefCntr, OpenerPid),
     {reply, {ok, Db}, Db};
-handle_call(is_idle, _From, #db{fd_ref_counter=RefCntr, compactor_pid=Compact, 
+handle_call(is_idle, _From, #db{fd_ref_counter=RefCntr, compactor_pid=Compact,
             waiting_delayed_commit=Delay}=Db) ->
-    % Idle means no referrers. Unless in the middle of a compaction file switch, 
+    % Idle means no referrers. Unless in the middle of a compaction file switch,
     % there are always at least 2 referrers, couch_db_updater and us.
     {reply, (Delay == nil) and (Compact == nil) and (couch_ref_counter:count(RefCntr) == 2), Db};
 handle_call({db_updated, NewDb}, _From, #db{fd_ref_counter=OldRefCntr}) ->
@@ -782,7 +782,7 @@
     Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos,[RevId]}),
     {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}};
 open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
-    #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} = 
+    #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} =
         DocInfo = couch_doc:to_doc_info(FullDocInfo),
     {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]),
     Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath),
@@ -799,11 +799,11 @@
     case lists:member(revs_info, Options) of
     false -> [];
     true ->
-        {[{Pos, RevPath}],[]} = 
+        {[{Pos, RevPath}],[]} =
             couch_key_tree:get_full_key_paths(RevTree, [Rev]),
-        
+
         [{revs_info, Pos, lists:map(
-            fun({Rev1, {true, _Sp, _UpdateSeq}}) -> 
+            fun({Rev1, {true, _Sp, _UpdateSeq}}) ->
                 {Rev1, deleted};
             ({Rev1, {false, _Sp, _UpdateSeq}}) ->
                 {Rev1, available};
@@ -849,7 +849,7 @@
 doc_to_tree_simple(Doc, [RevId | Rest]) ->
     [{RevId, ?REV_MISSING, doc_to_tree_simple(Doc, Rest)}].
 
-    
+
 make_doc(#db{fd=Fd}, Id, Deleted, Bp, RevisionPath) ->
     {BodyData, BinValues} =
     case Bp of
@@ -867,6 +867,6 @@
         attachments = BinValues,
         deleted = Deleted
         }.
-    
-    
-    
+
+
+

Modified: couchdb/trunk/src/couchdb/couch_db.hrl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_db.hrl?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_db.hrl (original)
+++ couchdb/trunk/src/couchdb/couch_db.hrl Mon Jul  6 00:33:50 2009
@@ -21,7 +21,7 @@
 -define(l2b(V), list_to_binary(V)).
 
 -define(DEFAULT_ATTACHMENT_CONTENT_TYPE, <<"application/octet-stream">>).
-        
+
 -define(LOG_DEBUG(Format, Args),
     case couch_log:debug_on() of
         true -> error_logger:info_report(couch_debug, {Format, Args});
@@ -69,7 +69,7 @@
     req_body = undefined,
     design_url_handlers
     }).
-    
+
 
 -record(doc,
     {
@@ -91,7 +91,7 @@
     % couch_db:open_doc(Db, Id, Options).
     meta = []
     }).
-    
+
 
 
 -record(user_ctx,
@@ -112,7 +112,7 @@
 -define(LATEST_DISK_VERSION, 3).
 
 -record(db_header,
-    {disk_version = ?LATEST_DISK_VERSION,  
+    {disk_version = ?LATEST_DISK_VERSION,
      update_seq = 0,
      unused = 0,
      fulldocinfo_by_id_btree_state = nil,

Modified: couchdb/trunk/src/couchdb/couch_db_update_notifier_sup.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_db_update_notifier_sup.erl?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_db_update_notifier_sup.erl (original)
+++ couchdb/trunk/src/couchdb/couch_db_update_notifier_sup.erl Mon Jul  6 00:33:50 2009
@@ -33,11 +33,11 @@
     ok = couch_config:register(
         fun("update_notification", Key, Value) -> reload_config(Key, Value) end
     ),
-    
+
     UpdateNotifierExes = couch_config:get("update_notification"),
-    
+
     {ok,
-        {{one_for_one, 10, 3600}, 
+        {{one_for_one, 10, 3600},
             lists:map(fun({Name, UpdateNotifierExe}) ->
                 {Name,
                 {couch_db_update_notifier, start_link, [UpdateNotifierExe]},
@@ -47,7 +47,7 @@
                     [couch_db_update_notifier]}
                 end, UpdateNotifierExes)}}.
 
-%% @doc when update_notification configuration changes, terminate the process 
+%% @doc when update_notification configuration changes, terminate the process
 %%      for that notifier and start a new one with the updated config
 reload_config(Id, Exe) ->
     ChildSpec = {

Modified: couchdb/trunk/src/couchdb/couch_db_updater.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_db_updater.erl?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_db_updater.erl (original)
+++ couchdb/trunk/src/couchdb/couch_db_updater.erl Mon Jul  6 00:33:50 2009
@@ -32,7 +32,7 @@
         ok = couch_file:upgrade_old_header(Fd, <<$g, $m, $k, 0>>), % 09 UPGRADE CODE
         {ok, Header} = couch_file:read_header(Fd)
     end,
-    
+
     Db = init_db(DbName, Filepath, Fd, Header),
     Db2 = refresh_validate_doc_funs(Db),
     {ok, Db2#db{main_pid=MainPid}}.
@@ -90,7 +90,7 @@
         } = Db,
     DocLookups = couch_btree:lookup(DocInfoByIdBTree,
             [Id || {Id, _Revs} <- IdRevs]),
-            
+
     NewDocInfos = lists:zipwith(
         fun({_Id, Revs}, {ok, #full_doc_info{rev_tree=Tree}=FullDocInfo}) ->
             case couch_key_tree:remove_leafs(Tree, Revs) of
@@ -103,17 +103,17 @@
             nil
         end,
         IdRevs, DocLookups),
-        
+
     SeqsToRemove = [Seq
             || {#full_doc_info{update_seq=Seq},_} <- NewDocInfos],
-    
+
     FullDocInfoToUpdate = [FullInfo
             || {#full_doc_info{rev_tree=Tree}=FullInfo,_}
             <- NewDocInfos, Tree /= []],
-    
+
     IdRevsPurged = [{Id, Revs}
             || {#full_doc_info{id=Id}, Revs} <- NewDocInfos],
-    
+
     {DocInfoToUpdate, NewSeq} = lists:mapfoldl(
         fun(#full_doc_info{rev_tree=Tree}=FullInfo, SeqAcc) ->
             Tree2 = couch_key_tree:map_leafs( fun(RevInfo) ->
@@ -122,27 +122,27 @@
             {couch_doc:to_doc_info(FullInfo#full_doc_info{rev_tree=Tree2}),
                 SeqAcc + 1}
         end, LastSeq, FullDocInfoToUpdate),
-    
+
     IdsToRemove = [Id || {#full_doc_info{id=Id,rev_tree=[]},_}
             <- NewDocInfos],
-    
+
     {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree,
             DocInfoToUpdate, SeqsToRemove),
     {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree,
             FullDocInfoToUpdate, IdsToRemove),
     {ok, Pointer} = couch_file:append_term(Fd, IdRevsPurged),
-    
+
     Db2 = commit_data(
         Db#db{
             fulldocinfo_by_id_btree = DocInfoByIdBTree2,
             docinfo_by_seq_btree = DocInfoBySeqBTree2,
             update_seq = NewSeq + 1,
             header=Header#db_header{purge_seq=PurgeSeq+1, purged_docs=Pointer}}),
-    
+
     ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
     couch_db_update_notifier:notify({updated, Db#db.name}),
     {reply, {ok, Db2#db.update_seq, IdRevsPurged}, Db2}.
-    
+
 
 handle_cast(start_compact, Db) ->
     case Db#db.compactor_pid of
@@ -168,10 +168,10 @@
         {ok, LocalDocs} = couch_btree:foldl(Db#db.local_docs_btree,
                 fun(Value, _Offset, Acc) -> {ok, [Value | Acc]} end, []),
         {ok, NewLocalBtree} = couch_btree:add(NewDb#db.local_docs_btree, LocalDocs),
-        
+
         NewDb2 = commit_data( NewDb#db{local_docs_btree=NewLocalBtree,
                 main_pid = Db#db.main_pid,filepath = Filepath}),
-            
+
         ?LOG_DEBUG("CouchDB swapping files ~s and ~s.",
                 [Filepath, CompactFilepath]),
         file:delete(Filepath),
@@ -198,25 +198,25 @@
 
 
 btree_by_seq_split(#doc_info{id=Id, high_seq=KeySeq, revs=Revs}) ->
-    RevInfos = [{Rev, Seq, Bp} ||  
+    RevInfos = [{Rev, Seq, Bp} ||
         #rev_info{rev=Rev,seq=Seq,deleted=false,body_sp=Bp} <- Revs],
-    DeletedRevInfos = [{Rev, Seq, Bp} ||  
+    DeletedRevInfos = [{Rev, Seq, Bp} ||
         #rev_info{rev=Rev,seq=Seq,deleted=true,body_sp=Bp} <- Revs],
     {KeySeq,{Id, RevInfos, DeletedRevInfos}}.
-    
+
 btree_by_seq_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) ->
     #doc_info{
         id = Id,
         high_seq=KeySeq,
-        revs = 
-            [#rev_info{rev=Rev,seq=Seq,deleted=false,body_sp = Bp} || 
-                {Rev, Seq, Bp} <- RevInfos] ++ 
-            [#rev_info{rev=Rev,seq=Seq,deleted=true,body_sp = Bp} || 
+        revs =
+            [#rev_info{rev=Rev,seq=Seq,deleted=false,body_sp = Bp} ||
+                {Rev, Seq, Bp} <- RevInfos] ++
+            [#rev_info{rev=Rev,seq=Seq,deleted=true,body_sp = Bp} ||
                 {Rev, Seq, Bp} <- DeletedRevInfos]};
 btree_by_seq_join(KeySeq,{Id, Rev, Bp, Conflicts, DelConflicts, Deleted}) ->
     % 09 UPGRADE CODE
     % this is the 0.9.0 and earlier by_seq record. It's missing the body pointers
-    % and individual seq nums for conflicts that are currently in the index, 
+    % and individual seq nums for conflicts that are currently in the index,
     % meaning the filtered _changes api will not work except for on main docs.
     % Simply compact a 0.9.0 database to upgrade the index.
     #doc_info{
@@ -252,7 +252,7 @@
             % This is fixed by compacting the database.
             {IsDeleted, BodyPointer, HighSeq}
         end, DiskTree),
-    
+
     #full_doc_info{id=Id, update_seq=HighSeq, deleted=Deleted==1, rev_tree=Tree}.
 
 btree_by_id_reduce(reduce, FullDocInfos) ->
@@ -262,7 +262,7 @@
 btree_by_id_reduce(rereduce, Reds) ->
     {lists:sum([Count || {Count,_} <- Reds]),
         lists:sum([DelCount || {_, DelCount} <- Reds])}.
-            
+
 btree_by_seq_reduce(reduce, DocInfos) ->
     % count the number of documents
     length(DocInfos);
@@ -293,16 +293,16 @@
     _ -> throw({database_disk_version_error, "Incorrect disk header version"})
     end,
     Less = fun less_docid/2,
-        
+
     {ok, FsyncOptions} = couch_util:parse_term(
-            couch_config:get("couchdb", "fsync_options", 
+            couch_config:get("couchdb", "fsync_options",
                     "[before_header, after_header, on_file_open]")),
-    
+
     case lists:member(on_file_open, FsyncOptions) of
     true -> ok = couch_file:sync(Fd);
     _ -> ok
     end,
-        
+
     {ok, IdBtree} = couch_btree:open(Header#db_header.fulldocinfo_by_id_btree_state, Fd,
         [{split, fun(X) -> btree_by_id_split(X) end},
         {join, fun(X,Y) -> btree_by_id_join(X,Y) end},
@@ -347,7 +347,7 @@
 
 close_db(#db{fd_ref_counter = RefCntr}) ->
     couch_ref_counter:drop(RefCntr).
-    
+
 
 refresh_validate_doc_funs(Db) ->
     {ok, DesignDocs} = couch_db:get_design_docs(Db),
@@ -424,7 +424,7 @@
             0 -> AccRemoveSeqs;
             _ -> [OldSeq | AccRemoveSeqs]
         end,
-        merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo, 
+        merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo,
                 [NewInfo|AccNewInfos], RemoveSeqs, NewConflicts, AccSeq+1)
     end.
 
@@ -443,7 +443,7 @@
 stem_full_doc_infos(#db{revs_limit=Limit}, DocInfos) ->
     [Info#full_doc_info{rev_tree=couch_key_tree:stem(Tree, Limit)} ||
             #full_doc_info{rev_tree=Tree}=Info <- DocInfos].
-        
+
 
 update_docs_int(Db, DocsList, Options) ->
     #db{
@@ -461,9 +461,9 @@
                 {[Docs | DocsListAcc], NonRepDocsAcc}
             end
         end, {[], []}, DocsList),
-    
-    Ids = [Id || [#doc{id=Id}|_] <- DocsList2], 
-    
+
+    Ids = [Id || [#doc{id=Id}|_] <- DocsList2],
+
     % lookup up the old documents, if they exist.
     OldDocLookups = couch_btree:lookup(DocInfoByIdBTree, Ids),
     OldDocInfos = lists:zipwith(
@@ -473,23 +473,23 @@
             #full_doc_info{id=Id}
         end,
         Ids, OldDocLookups),
-        
+
     % Merge the new docs into the revision trees.
     {ok, NewDocInfos0, RemoveSeqs, Conflicts, NewSeq} = merge_rev_trees(
             lists:member(merge_conflicts, Options),
             DocsList2, OldDocInfos, [], [], [], LastSeq),
-    
+
     NewFullDocInfos = stem_full_doc_infos(Db, NewDocInfos0),
-    
+
     % All documents are now ready to write.
-    
+
     {ok, LocalConflicts, Db2}  = update_local_docs(Db, NonRepDocs),
-    
+
     % Write out the document summaries (the bodies are stored in the nodes of
     % the trees, the attachments are already written to disk)
     {ok, FlushedFullDocInfos} = flush_trees(Db2, NewFullDocInfos, []),
-    
-    {IndexFullDocInfos, IndexDocInfos} = 
+
+    {IndexFullDocInfos, IndexDocInfos} =
             new_index_entries(FlushedFullDocInfos, [], []),
 
     % and the indexes
@@ -500,7 +500,7 @@
         fulldocinfo_by_id_btree = DocInfoByIdBTree2,
         docinfo_by_seq_btree = DocInfoBySeqBTree2,
         update_seq = NewSeq},
-    
+
     % Check if we just updated any design documents, and update the validation
     % funs if we did.
     case [1 || <<"_design/",_/binary>> <- Ids] of
@@ -509,8 +509,8 @@
     _ ->
         Db4 = refresh_validate_doc_funs(Db3)
     end,
-    
-    {ok, LocalConflicts ++ Conflicts, 
+
+    {ok, LocalConflicts ++ Conflicts,
             commit_data(Db4, not lists:member(full_commit, Options))}.
 
 
@@ -534,13 +534,13 @@
             false ->
                 {conflict, {Id, {0, RevStr}}}
             end
-            
+
         end, Docs, OldDocLookups),
 
     BtreeIdsRemove = [Id || {remove, Id} <- BtreeEntries],
     BtreeIdsUpdate = [ByIdDocInfo || {update, ByIdDocInfo} <- BtreeEntries],
     Conflicts = [{conflict, IdRev} || {conflict, IdRev} <- BtreeEntries],
-    
+
     {ok, Btree2} =
         couch_btree:add_remove(Btree, BtreeIdsUpdate, BtreeIdsRemove),
 
@@ -580,14 +580,14 @@
         true -> ok = couch_file:sync(Fd);
         _    -> ok
         end,
-        
+
         ok = couch_file:write_header(Fd, Header),
-        
+
         case lists:member(after_header, FsyncOptions) of
         true -> ok = couch_file:sync(Fd);
         _    -> ok
         end,
-        
+
         Db#db{waiting_delayed_commit=nil,
             header=Header,
             committed_update_seq=Db#db.update_seq}
@@ -622,11 +622,11 @@
     % inner node, only copy info/data from leaf nodes
     [{RevId, ?REV_MISSING, copy_rev_tree_attachments(SrcFd, DestFd, SubTree)} | copy_rev_tree_attachments(SrcFd, DestFd, RestTree)].
 
-    
+
 copy_docs(#db{fd=SrcFd}=Db, #db{fd=DestFd}=NewDb, InfoBySeq, Retry) ->
     Ids = [Id || #doc_info{id=Id} <- InfoBySeq],
     LookupResults = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids),
-    
+
     % write out the attachments
     NewFullDocInfos0 = lists:map(
         fun({ok, #full_doc_info{rev_tree=RevTree}=Info}) ->
@@ -656,7 +656,7 @@
         Existing = couch_btree:lookup(NewDb#db.fulldocinfo_by_id_btree, Ids),
         [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing]
     end,
-        
+
     {ok, DocInfoBTree} = couch_btree:add_remove(
             NewDb#db.docinfo_by_seq_btree, NewDocInfos, RemoveSeqs),
     {ok, FullDocInfoBTree} = couch_btree:add_remove(
@@ -665,14 +665,14 @@
               docinfo_by_seq_btree=DocInfoBTree}.
 
 
-          
+
 copy_compact(Db, NewDb0, Retry) ->
     FsyncOptions = [Op || Op <- NewDb0#db.fsync_options, Op == before_header],
     NewDb = NewDb0#db{fsync_options=FsyncOptions},
     TotalChanges = couch_db:count_changes_since(Db, NewDb#db.update_seq),
     EnumBySeqFun =
     fun(#doc_info{high_seq=Seq}=DocInfo, _Offset, {AccNewDb, AccUncopied, TotalCopied}) ->
-        couch_task_status:update("Copied ~p of ~p changes (~p%)", 
+        couch_task_status:update("Copied ~p of ~p changes (~p%)",
                 [TotalCopied, TotalChanges, (TotalCopied*100) div TotalChanges]),
         if TotalCopied rem 1000 == 0 ->
             NewDb2 = copy_docs(Db, AccNewDb, lists:reverse([DocInfo | AccUncopied]), Retry),
@@ -681,20 +681,20 @@
             true ->
                 {ok, {NewDb2#db{update_seq=Seq}, [], TotalCopied + 1}}
             end;
-        true ->    
+        true ->
             {ok, {AccNewDb, [DocInfo | AccUncopied], TotalCopied + 1}}
         end
     end,
-    
+
     couch_task_status:set_update_frequency(500),
-     
+
     {ok, {NewDb2, Uncopied, TotalChanges}} =
         couch_btree:foldl(Db#db.docinfo_by_seq_btree, NewDb#db.update_seq + 1, EnumBySeqFun, {NewDb, [], 0}),
-    
-    couch_task_status:update("Flushing"), 
-        
+
+    couch_task_status:update("Flushing"),
+
     NewDb3 = copy_docs(Db, NewDb2, lists:reverse(Uncopied), Retry),
-    
+
     % copy misc header values
     if NewDb3#db.admins /= Db#db.admins ->
         {ok, Ptr} = couch_file:append_term(NewDb3#db.fd, Db#db.admins),
@@ -702,7 +702,7 @@
     true ->
         NewDb4 = NewDb3
     end,
-    
+
     commit_data(NewDb4#db{update_seq=Db#db.update_seq}).
 
 start_copy_compact(#db{name=Name,filepath=Filepath}=Db) ->
@@ -721,7 +721,7 @@
     end,
     NewDb = init_db(Name, CompactFile, Fd, Header),
     NewDb2 = copy_compact(Db, NewDb, Retry),
-    
+
     gen_server:cast(Db#db.update_pid, {compact_done, CompactFile}),
     close_db(NewDb2).
-    
+

Modified: couchdb/trunk/src/couchdb/couch_doc.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_doc.erl?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_doc.erl (original)
+++ couchdb/trunk/src/couchdb/couch_doc.erl Mon Jul  6 00:33:50 2009
@@ -34,7 +34,7 @@
     case lists:member(revs, Options) of
     false -> [];
     true ->
-        [{<<"_revisions">>, {[{<<"start">>, Start}, 
+        [{<<"_revisions">>, {[{<<"start">>, Start},
                         {<<"ids">>, RevIds}]}}]
     end.
 
@@ -115,10 +115,10 @@
 
 to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds},
             meta=Meta}=Doc,Options)->
-    {[{<<"_id">>, Id}] 
+    {[{<<"_id">>, Id}]
         ++ to_json_rev(Start, RevIds)
         ++ to_json_body(Del, Body)
-        ++ to_json_revisions(Options, Start, RevIds) 
+        ++ to_json_revisions(Options, Start, RevIds)
         ++ to_json_meta(Meta)
         ++ to_json_attachments(Doc#doc.attachments, Options)
     }.
@@ -133,13 +133,13 @@
     parse_rev(?b2l(Rev));
 parse_rev(Rev) when is_list(Rev) ->
     SplitRev = lists:splitwith(fun($-) -> false; (_) -> true end, Rev),
-    case SplitRev of 
+    case SplitRev of
         {Pos, [$- | RevId]} -> {list_to_integer(Pos), ?l2b(RevId)};
         _Else -> throw({bad_request, <<"Invalid rev format">>})
     end;
 parse_rev(_BadRev) ->
     throw({bad_request, <<"Invalid rev format">>}).
-    
+
 parse_revs([]) ->
     [];
 parse_revs([Rev | Rest]) ->
@@ -161,20 +161,20 @@
 transfer_fields([], #doc{body=Fields}=Doc) ->
     % convert fields back to json object
     Doc#doc{body={lists:reverse(Fields)}};
-    
+
 transfer_fields([{<<"_id">>, Id} | Rest], Doc) ->
     validate_docid(Id),
     transfer_fields(Rest, Doc#doc{id=Id});
-    
+
 transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc) ->
     {Pos, RevId} = parse_rev(Rev),
     transfer_fields(Rest,
             Doc#doc{revs={Pos, [RevId]}});
-            
+
 transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc) ->
     % we already got the rev from the _revisions
     transfer_fields(Rest,Doc);
-    
+
 transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) ->
     Bins = lists:flatmap(fun({Name, {BinProps}}) ->
         case proplists:get_value(<<"stub">>, BinProps) of
@@ -190,7 +190,7 @@
         end
     end, JsonBins),
     transfer_fields(Rest, Doc#doc{attachments=Bins});
-    
+
 transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) ->
     RevIds = proplists:get_value(<<"ids">>, Props),
     Start = proplists:get_value(<<"start">>, Props),
@@ -204,7 +204,7 @@
     [throw({doc_validation, "RevId isn't a string"}) ||
             RevId <- RevIds, not is_binary(RevId)],
     transfer_fields(Rest, Doc#doc{revs={Start, RevIds}});
-    
+
 transfer_fields([{<<"_deleted">>, B} | Rest], Doc) when (B==true) or (B==false) ->
     transfer_fields(Rest, Doc#doc{deleted=B});
 
@@ -222,7 +222,7 @@
 transfer_fields([{<<"_",Name/binary>>, _} | _], _) ->
     throw({doc_validation,
             ?l2b(io_lib:format("Bad special document member: _~s", [Name]))});
-            
+
 transfer_fields([Field | Rest], #doc{body=Fields}=Doc) ->
     transfer_fields(Rest, Doc#doc{body=[Field|Fields]}).
 
@@ -237,11 +237,11 @@
 
 to_doc_info_path(#full_doc_info{id=Id,rev_tree=Tree}) ->
     RevInfosAndPath =
-        [{#rev_info{deleted=Del,body_sp=Bp,seq=Seq,rev={Pos,RevId}}, Path} || 
-            {{Del, Bp, Seq},{Pos, [RevId|_]}=Path} <- 
+        [{#rev_info{deleted=Del,body_sp=Bp,seq=Seq,rev={Pos,RevId}}, Path} ||
+            {{Del, Bp, Seq},{Pos, [RevId|_]}=Path} <-
             couch_key_tree:get_all_leafs(Tree)],
     SortedRevInfosAndPath = lists:sort(
-            fun({#rev_info{deleted=DeletedA,rev=RevA}, _PathA}, 
+            fun({#rev_info{deleted=DeletedA,rev=RevA}, _PathA},
                 {#rev_info{deleted=DeletedB,rev=RevB}, _PathB}) ->
             % sort descending by {not deleted, rev}
             {not DeletedA, RevA} > {not DeletedB, RevB}
@@ -282,7 +282,7 @@
                     Lang, FunSrc, EditDoc, DiskDoc, Ctx)
         end
     end.
-        
+
 
 has_stubs(#doc{attachments=Bins}) ->
     has_stubs(Bins);

Modified: couchdb/trunk/src/couchdb/couch_erl_driver.c
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_erl_driver.c?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_erl_driver.c (original)
+++ couchdb/trunk/src/couchdb/couch_erl_driver.c Mon Jul  6 00:33:50 2009
@@ -56,7 +56,7 @@
         return ERL_DRV_ERROR_GENERAL;
 
     pData->port = port;
-    
+
     pData->coll = ucol_open("", &status);
     if (U_FAILURE(status)) {
         couch_drv_stop((ErlDrvData)pData);
@@ -140,7 +140,7 @@
 
         return return_control_result(&response, sizeof(response), rbuf, rlen);
         }
-      
+
     default:
         return -1;
     }

Modified: couchdb/trunk/src/couchdb/couch_external_manager.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_external_manager.erl?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_external_manager.erl (original)
+++ couchdb/trunk/src/couchdb/couch_external_manager.erl Mon Jul  6 00:33:50 2009
@@ -19,7 +19,7 @@
 -include("couch_db.hrl").
 
 start_link() ->
-    gen_server:start_link({local, couch_external_manager}, 
+    gen_server:start_link({local, couch_external_manager},
         couch_external_manager, [], []).
 
 execute(UrlName, JsonReq) ->

Modified: couchdb/trunk/src/couchdb/couch_external_server.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_external_server.erl?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_external_server.erl (original)
+++ couchdb/trunk/src/couchdb/couch_external_server.erl Mon Jul  6 00:33:50 2009
@@ -14,7 +14,7 @@
 -behaviour(gen_server).
 
 -export([start_link/2, stop/1, execute/2]).
--export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]). 
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
 
 -include("couch_db.hrl").
 

Modified: couchdb/trunk/src/couchdb/couch_file.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_file.erl?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_file.erl (original)
+++ couchdb/trunk/src/couchdb/couch_file.erl Mon Jul  6 00:33:50 2009
@@ -36,7 +36,7 @@
 
 open(Filepath) ->
     open(Filepath, []).
-    
+
 open(Filepath, Options) ->
     case gen_server:start_link(couch_file,
             {Filepath, Options, self(), Ref = make_ref()}, []) of
@@ -76,7 +76,7 @@
 %%  serialized  term. Use pread_term to read the term back.
 %%  or {error, Reason}.
 %%----------------------------------------------------------------------
-    
+
 append_binary(Fd, Bin) ->
     Size = iolist_size(Bin),
     gen_server:call(Fd, {append_bin, [<<Size:32/integer>>, Bin]}, infinity).
@@ -89,7 +89,7 @@
 %%  or {error, Reason}.
 %%----------------------------------------------------------------------
 
-    
+
 pread_term(Fd, Pos) ->
     {ok, Bin} = pread_binary(Fd, Pos),
     {ok, binary_to_term(Bin)}.
@@ -178,14 +178,14 @@
     Else ->
         Else
     end.
-    
+
 write_header(Fd, Data) ->
     Bin = term_to_binary(Data),
     Md5 = erlang:md5(Bin),
     % now we assemble the final header binary and write to disk
     FinalBin = <<Md5/binary, Bin/binary>>,
     gen_server:call(Fd, {write_header, FinalBin}, infinity).
-    
+
 
 
 
@@ -301,7 +301,7 @@
 handle_call(find_header, _From, #file{fd=Fd}=File) ->
     {ok, Pos} = file:position(Fd, eof),
     {reply, find_header(Fd, Pos div ?SIZE_BLOCK), File}.
-        
+
 % 09 UPGRADE CODE
 -define(HEADER_SIZE, 2048). % size of each segment of the doubly written header
 
@@ -349,7 +349,7 @@
     _ ->
         Result
     end.
-    
+
 % 09 UPGRADE CODE
 extract_header(Prefix, Bin) ->
     SizeOfPrefix = size(Prefix),
@@ -373,7 +373,7 @@
     _ ->
         unknown_header_type
     end.
-    
+
 
 % 09 UPGRADE CODE
 write_old_header(Fd, Prefix, Data) ->
@@ -401,7 +401,7 @@
     ok = file:pwrite(Fd, 0, DblWriteBin),
     ok = file:sync(Fd).
 
-    
+
 handle_cast(close, Fd) ->
     {stop,normal,Fd}.
 
@@ -422,14 +422,14 @@
     _Error ->
         find_header(Fd, Block -1)
     end.
-    
+
 load_header(Fd, Block) ->
     {ok, <<1>>} = file:pread(Fd, Block*?SIZE_BLOCK, 1),
     {ok, <<HeaderLen:32/integer>>} = file:pread(Fd, (Block*?SIZE_BLOCK) + 1, 4),
     TotalBytes = calculate_total_read_len(1, HeaderLen),
-    {ok, <<RawBin:TotalBytes/binary>>} = 
+    {ok, <<RawBin:TotalBytes/binary>>} =
             file:pread(Fd, (Block*?SIZE_BLOCK) + 5, TotalBytes),
-    <<Md5Sig:16/binary, HeaderBin/binary>> = 
+    <<Md5Sig:16/binary, HeaderBin/binary>> =
         iolist_to_binary(remove_block_prefixes(1, RawBin)),
     Md5Sig = erlang:md5(HeaderBin),
     {ok, HeaderBin}.

Modified: couchdb/trunk/src/couchdb/couch_httpd.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_httpd.erl?rev=791350&r1=791349&r2=791350&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_httpd.erl (original)
+++ couchdb/trunk/src/couchdb/couch_httpd.erl Mon Jul  6 00:33:50 2009
@@ -34,17 +34,17 @@
 
     BindAddress = couch_config:get("httpd", "bind_address", any),
     Port = couch_config:get("httpd", "port", "5984"),
-    
+
     DefaultSpec = "{couch_httpd_db, handle_request}",
     DefaultFun = make_arity_1_fun(
         couch_config:get("httpd", "default_handler", DefaultSpec)
     ),
-    
+
     UrlHandlersList = lists:map(
         fun({UrlKey, SpecStr}) ->
             {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
         end, couch_config:get("httpd_global_handlers")),
-        
+
     DbUrlHandlersList = lists:map(
         fun({UrlKey, SpecStr}) ->
             {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
@@ -65,7 +65,7 @@
     end,
 
     % and off we go
-    
+
     {ok, Pid} = case mochiweb_http:start([
         {loop, Loop},
         {name, ?MODULE},
@@ -93,7 +93,7 @@
 
     {ok, Pid}.
 
-% SpecStr is a string like "{my_module, my_fun}" 
+% SpecStr is a string like "{my_module, my_fun}"
 %  or "{my_module, my_fun, <<"my_arg">>}"
 make_arity_1_fun(SpecStr) ->
     case couch_util:parse_term(SpecStr) of
@@ -110,11 +110,11 @@
     {ok, {Mod, Fun}} ->
         fun(Arg1, Arg2) -> apply(Mod, Fun, [Arg1, Arg2]) end
     end.
-    
+
 
 stop() ->
     mochiweb_http:stop(?MODULE).
-    
+
 
 handle_request(MochiReq, DefaultFun,
         UrlHandlers, DbUrlHandlers, DesignUrlHandlers) ->
@@ -125,7 +125,7 @@
     % removed, but URL quoting left intact
     RawUri = MochiReq:get(raw_path),
     {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
-    
+
     HandlerKey =
     case mochiweb_util:partition(Path, "/") of
     {"", "", ""} ->
@@ -139,19 +139,19 @@
         MochiReq:get(version),
         mochiweb_headers:to_list(MochiReq:get(headers))
     ]),
-    
+
     Method1 =
     case MochiReq:get(method) of
         % already an atom
         Meth when is_atom(Meth) -> Meth;
-        
+
         % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
         % possible (if any module references the atom, then it's existing).
         Meth -> couch_util:to_existing_atom(Meth)
     end,
-    
+
     increment_method_stats(Method1),
-    
+
     % alias HEAD to GET as mochiweb takes care of stripping the body
     Method = case Method1 of
         'HEAD' -> 'GET';
@@ -264,13 +264,13 @@
 
 primary_header_value(#httpd{mochi_req=MochiReq}, Key) ->
     MochiReq:get_primary_header_value(Key).
-    
+
 serve_file(#httpd{mochi_req=MochiReq}, RelativePath, DocumentRoot) ->
     {ok, MochiReq:serve_file(RelativePath, DocumentRoot, server_header())}.
 
 qs_value(Req, Key) ->
     qs_value(Req, Key, undefined).
-    
+
 qs_value(Req, Key, Default) ->
     proplists:get_value(Key, qs(Req), Default).
 
@@ -319,7 +319,7 @@
 json_body_obj(Httpd) ->
     case json_body(Httpd) of
         {Props} -> {Props};
-        _Else -> 
+        _Else ->
             throw({bad_request, "Request body must be a JSON object"})
     end.
 
@@ -457,7 +457,7 @@
     end,
     put(jsonp, undefined),
     Resp.
-        
+
 validate_callback(CallBack) when is_binary(CallBack) ->
     validate_callback(binary_to_list(CallBack));
 validate_callback([]) ->
@@ -507,10 +507,10 @@
 
 send_error(_Req, {already_sent, Resp, _Error}) ->
     {ok, Resp};
-    
+
 send_error(Req, Error) ->
     {Code, ErrorStr, ReasonStr} = error_info(Error),
-    if Code == 401 ->     
+    if Code == 401 ->
         case couch_config:get("httpd", "WWW-Authenticate", nil) of
         nil ->
             Headers = [];
@@ -524,7 +524,7 @@
 
 send_error(Req, Code, ErrorStr, ReasonStr) ->
     send_error(Req, Code, [], ErrorStr, ReasonStr).
-    
+
 send_error(Req, Code, Headers, ErrorStr, ReasonStr) ->
     send_json(Req, Code, Headers,
         {[{<<"error">>,  ErrorStr},
@@ -538,7 +538,7 @@
 send_chunked_error(Resp, Error) ->
     {Code, ErrorStr, ReasonStr} = error_info(Error),
     JsonError = {[{<<"code">>, Code},
-        {<<"error">>,  ErrorStr}, 
+        {<<"error">>,  ErrorStr},
         {<<"reason">>, ReasonStr}]},
     send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])),
     send_chunk(Resp, []).