You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by jc...@apache.org on 2009/03/13 23:15:35 UTC
svn commit: r753448 [1/2] - in /couchdb/trunk: share/ share/www/script/
share/www/script/test/ src/couchdb/
Author: jchris
Date: Fri Mar 13 22:15:34 2009
New Revision: 753448
URL: http://svn.apache.org/viewvc?rev=753448&view=rev
Log:
Commit Damien's rep_security branch to trunk.
Changes bulk_docs conflict checking.
Breaks file format, see mailing list for data upgrade procedure, or
http://wiki.apache.org/couchdb/Breaking_changes
Added:
couchdb/trunk/share/www/script/test/rev_stemming.js (with props)
Modified:
couchdb/trunk/share/Makefile.am
couchdb/trunk/share/www/script/couch.js
couchdb/trunk/share/www/script/couch_tests.js
couchdb/trunk/share/www/script/test/attachment_names.js
couchdb/trunk/share/www/script/test/attachments.js
couchdb/trunk/share/www/script/test/basics.js
couchdb/trunk/share/www/script/test/bulk_docs.js
couchdb/trunk/share/www/script/test/compact.js
couchdb/trunk/share/www/script/test/design_docs.js
couchdb/trunk/share/www/script/test/etags_views.js
couchdb/trunk/share/www/script/test/invalid_docids.js
couchdb/trunk/share/www/script/test/list_views.js
couchdb/trunk/share/www/script/test/lots_of_docs.js
couchdb/trunk/share/www/script/test/purge.js
couchdb/trunk/share/www/script/test/reduce.js
couchdb/trunk/share/www/script/test/reduce_false.js
couchdb/trunk/share/www/script/test/replication.js
couchdb/trunk/share/www/script/test/security_validation.js
couchdb/trunk/share/www/script/test/view_include_docs.js
couchdb/trunk/share/www/script/test/view_multi_key_all_docs.js
couchdb/trunk/share/www/script/test/view_multi_key_design.js
couchdb/trunk/share/www/script/test/view_multi_key_temp.js
couchdb/trunk/share/www/script/test/view_pagination.js
couchdb/trunk/src/couchdb/couch_db.erl
couchdb/trunk/src/couchdb/couch_db.hrl
couchdb/trunk/src/couchdb/couch_db_updater.erl
couchdb/trunk/src/couchdb/couch_doc.erl
couchdb/trunk/src/couchdb/couch_httpd.erl
couchdb/trunk/src/couchdb/couch_httpd_db.erl
couchdb/trunk/src/couchdb/couch_httpd_misc_handlers.erl
couchdb/trunk/src/couchdb/couch_httpd_show.erl
couchdb/trunk/src/couchdb/couch_httpd_view.erl
couchdb/trunk/src/couchdb/couch_key_tree.erl
couchdb/trunk/src/couchdb/couch_rep.erl
couchdb/trunk/src/couchdb/couch_util.erl
Modified: couchdb/trunk/share/Makefile.am
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/Makefile.am?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/Makefile.am (original)
+++ couchdb/trunk/share/Makefile.am Fri Mar 13 22:15:34 2009
@@ -104,6 +104,7 @@
www/script/test/view_sandboxing.js \
www/script/test/view_xml.js \
www/script/test/replication.js \
+ www/script/test/rev_stemming.js \
www/script/test/etags_head.js \
www/script/test/etags_views.js \
www/script/test/show_documents.js \
Modified: couchdb/trunk/share/www/script/couch.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/couch.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/couch.js [utf-8] (original)
+++ couchdb/trunk/share/www/script/couch.js [utf-8] Fri Mar 13 22:15:34 2009
@@ -98,15 +98,26 @@
if (docs[i]._id == undefined)
docs[i]._id = newUuids.pop();
}
- this.last_req = this.request("POST", this.uri + "_bulk_docs" + encodeOptions(options), {
- body: JSON.stringify({"docs": docs})
+ var json = {"docs": docs};
+ // put any options in the json
+ for (var option in options) {
+ json[option] = options[option];
+ }
+ this.last_req = this.request("POST", this.uri + "_bulk_docs", {
+ body: JSON.stringify(json)
});
- CouchDB.maybeThrowError(this.last_req);
- var result = JSON.parse(this.last_req.responseText);
- for (var i = 0; i < docs.length; i++) {
- docs[i]._rev = result.new_revs[i].rev;
+ if (this.last_req.status == 417) {
+ return {errors: JSON.parse(this.last_req.responseText)};
+ }
+ else {
+ CouchDB.maybeThrowError(this.last_req);
+ var results = JSON.parse(this.last_req.responseText);
+ for (var i = 0; i < docs.length; i++) {
+ if(results[i].rev)
+ docs[i]._rev = results[i].rev;
+ }
+ return results;
}
- return result;
}
this.ensureFullCommit = function() {
@@ -203,6 +214,20 @@
return JSON.parse(this.last_req.responseText);
}
+ this.setDbProperty = function(propId, propValue) {
+ this.last_req = this.request("PUT", this.uri + propId,{
+ body:JSON.stringify(propValue)
+ });
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ }
+
+ this.getDbProperty = function(propId) {
+ this.last_req = this.request("GET", this.uri + propId);
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ }
+
this.setAdmins = function(adminsArray) {
this.last_req = this.request("PUT", this.uri + "_admins",{
body:JSON.stringify(adminsArray)
@@ -283,8 +308,11 @@
return JSON.parse(CouchDB.last_req.responseText).version;
}
-CouchDB.replicate = function(source, target) {
+CouchDB.replicate = function(source, target, rep_options) {
+ rep_options = rep_options || {};
+ var headers = rep_options.headers || {};
CouchDB.last_req = CouchDB.request("POST", "/_replicate", {
+ headers: headers,
body: JSON.stringify({source: source, target: target})
});
CouchDB.maybeThrowError(CouchDB.last_req);
Modified: couchdb/trunk/share/www/script/couch_tests.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/couch_tests.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/couch_tests.js [utf-8] (original)
+++ couchdb/trunk/share/www/script/couch_tests.js [utf-8] Fri Mar 13 22:15:34 2009
@@ -68,6 +68,7 @@
loadTest("config.js");
loadTest("security_validation.js");
loadTest("stats.js");
+loadTest("rev_stemming.js");
function makeDocs(start, end, templateDoc) {
var templateDocSrc = templateDoc ? JSON.stringify(templateDoc) : "{}"
Modified: couchdb/trunk/share/www/script/test/attachment_names.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/attachment_names.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/attachment_names.js (original)
+++ couchdb/trunk/share/www/script/test/attachment_names.js Fri Mar 13 22:15:34 2009
@@ -10,7 +10,7 @@
// License for the specific language governing permissions and limitations under
// the License.
-couchTests.attatchment_names = function(debug) {
+couchTests.attachment_names = function(debug) {
var db = new CouchDB("test_suite_db");
db.deleteDb();
db.createDb();
Modified: couchdb/trunk/share/www/script/test/attachments.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/attachments.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/attachments.js (original)
+++ couchdb/trunk/share/www/script/test/attachments.js Fri Mar 13 22:15:34 2009
@@ -32,8 +32,8 @@
var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt");
T(xhr.responseText == "This is a base64 encoded text");
T(xhr.getResponseHeader("Content-Type") == "text/plain");
- T(xhr.getResponseHeader("Etag") == save_response.rev);
-
+ T(xhr.getResponseHeader("Etag") == '"' + save_response.rev + '"');
+
// empty attachment
var binAttDoc2 = {
_id: "bin_doc2",
Modified: couchdb/trunk/share/www/script/test/basics.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/basics.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/basics.js (original)
+++ couchdb/trunk/share/www/script/test/basics.js Fri Mar 13 22:15:34 2009
@@ -14,14 +14,14 @@
couchTests.basics = function(debug) {
var result = JSON.parse(CouchDB.request("GET", "/").responseText);
T(result.couchdb == "Welcome");
-
+
var db = new CouchDB("test_suite_db");
db.deleteDb();
// bug COUCHDB-100: DELETE on non-existent DB returns 500 instead of 404
db.deleteDb();
-
-db.createDb();
+
+ db.createDb();
// PUT on existing DB should return 412 instead of 500
xhr = CouchDB.request("PUT", "/test_suite_db/");
@@ -122,14 +122,14 @@
// make sure we can still open the old rev of the deleted doc
T(db.open(existingDoc._id, {rev: existingDoc._rev}) != null);
-
+
// make sure restart works
T(db.ensureFullCommit().ok);
restartServer();
-
+
// make sure we can still open
T(db.open(existingDoc._id, {rev: existingDoc._rev}) != null);
-
+
// test that the POST response has a Location header
var xhr = CouchDB.request("POST", "/test_suite_db", {
body: JSON.stringify({"foo":"bar"})
Modified: couchdb/trunk/share/www/script/test/bulk_docs.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/bulk_docs.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/bulk_docs.js (original)
+++ couchdb/trunk/share/www/script/test/bulk_docs.js Fri Mar 13 22:15:34 2009
@@ -19,38 +19,73 @@
var docs = makeDocs(5);
// Create the docs
- var result = db.bulkSave(docs);
- T(result.ok);
- T(result.new_revs.length == 5);
+ var results = db.bulkSave(docs);
+
+ T(results.length == 5);
for (var i = 0; i < 5; i++) {
- T(result.new_revs[i].id == docs[i]._id);
- T(result.new_revs[i].rev);
+ T(results[i].id == docs[i]._id);
+ T(results[i].rev);
+ // Update the doc
docs[i].string = docs[i].string + ".00";
}
- // Update the docs
- result = db.bulkSave(docs);
- T(result.ok);
- T(result.new_revs.length == 5);
+ // Save the docs
+ results = db.bulkSave(docs);
+ T(results.length == 5);
for (i = 0; i < 5; i++) {
- T(result.new_revs[i].id == i.toString());
+ T(results[i].id == i.toString());
+
+ // set the delete flag to delete the docs in the next step
docs[i]._deleted = true;
}
+
+ // now test a bulk update with a conflict
+ // open and save
+ var doc = db.open("0");
+ db.save(doc);
- // Delete the docs
- result = db.bulkSave(docs);
- T(result.ok);
- T(result.new_revs.length == 5);
- for (i = 0; i < 5; i++) {
+ // Now bulk delete the docs
+ results = db.bulkSave(docs);
+
+ // doc "0" should be a conflict
+ T(results.length == 5);
+ T(results[0].id == "0");
+ T(results[0].error == "conflict");
+ T(results[0].rev === undefined); // no rev member when a conflict
+
+ // but the rest are not
+ for (i = 1; i < 5; i++) {
+ T(results[i].id == i.toString());
+ T(results[i].rev)
T(db.open(docs[i]._id) == null);
}
+
+ // now force a conflict to to save
+
+ // save doc 0, this will cause a conflict when we save docs[0]
+ var doc = db.open("0");
+ docs[0] = db.open("0")
+ db.save(doc);
+
+ docs[0].shooby = "dooby";
+
+ // Now save the bulk docs, When we use all_or_nothing, we don't get conflict
+ // checking, all docs are saved regardless of conflict status, or none are
+ // saved.
+ results = db.bulkSave(docs,{all_or_nothing:true});
+ T(results.error === undefined);
+
+ var doc = db.open("0", {conflicts:true});
+ var docConflict = db.open("0", {rev:doc._conflicts[0]});
+
+ T(doc.shooby == "dooby" || docConflict.shooby == "dooby");
// verify creating a document with no id returns a new id
var req = CouchDB.request("POST", "/test_suite_db/_bulk_docs", {
body: JSON.stringify({"docs": [{"foo":"bar"}]})
});
- result = JSON.parse(req.responseText);
+ results = JSON.parse(req.responseText);
- T(result.new_revs[0].id != "");
- T(result.new_revs[0].rev != "");
+ T(results[0].id != "");
+ T(results[0].rev != "");
};
Modified: couchdb/trunk/share/www/script/test/compact.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/compact.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/compact.js (original)
+++ couchdb/trunk/share/www/script/test/compact.js Fri Mar 13 22:15:34 2009
@@ -16,8 +16,7 @@
db.createDb();
if (debug) debugger;
var docs = makeDocs(0, 10);
- var saveResult = db.bulkSave(docs);
- T(saveResult.ok);
+ db.bulkSave(docs);
var binAttDoc = {
_id: "bin_doc",
@@ -36,12 +35,11 @@
for(var i in docs) {
db.deleteDoc(docs[i]);
}
- db.setAdmins(["Foo bar"]);
var deletesize = db.info().disk_size;
T(deletesize > originalsize);
- var xhr = CouchDB.request("POST", "/test_suite_db/_compact");
- T(xhr.status == 202);
+ T(db.compact().ok);
+ T(db.last_req.status == 202);
// compaction isn't instantaneous, loop until done
while (db.info().compact_running) {};
Modified: couchdb/trunk/share/www/script/test/design_docs.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/design_docs.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/design_docs.js (original)
+++ couchdb/trunk/share/www/script/test/design_docs.js Fri Mar 13 22:15:34 2009
@@ -43,7 +43,7 @@
}
T(db.save(designDoc).ok);
- T(db.bulkSave(makeDocs(1, numDocs + 1)).ok);
+ db.bulkSave(makeDocs(1, numDocs + 1));
// test that the _all_docs view returns correctly with keys
var results = db.allDocs({startkey:"_design", endkey:"_design0"});
Modified: couchdb/trunk/share/www/script/test/etags_views.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/etags_views.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/etags_views.js (original)
+++ couchdb/trunk/share/www/script/test/etags_views.js Fri Mar 13 22:15:34 2009
@@ -42,8 +42,7 @@
T(db.save(designDoc).ok);
var xhr;
var docs = makeDocs(0, 10);
- var saveResult = db.bulkSave(docs);
- T(saveResult.ok);
+ db.bulkSave(docs);
// verify get w/Etag on map view
xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/basicView");
Modified: couchdb/trunk/share/www/script/test/invalid_docids.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/invalid_docids.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/invalid_docids.js (original)
+++ couchdb/trunk/share/www/script/test/invalid_docids.js Fri Mar 13 22:15:34 2009
@@ -23,32 +23,32 @@
//Test non-string
try {
db.save({"_id": 1});
- T(1 == 0);
+ T(1 == 0, "doc id must be string");
} catch(e) {
T(db.last_req.status == 400);
- T(e.error == "invalid_doc");
+ T(e.error == "bad_request");
}
// Test invalid _prefix
try {
db.save({"_id": "_invalid"});
- T(1 == 0);
+ T(1 == 0, "doc id may not start with underscore");
} catch(e) {
T(db.last_req.status == 400);
- T(e.error == "invalid_doc");
+ T(e.error == "bad_request");
}
// Test _bulk_docs explicitly.
var docs = [{"_id": "_design/foo"}, {"_id": "_local/bar"}];
- T(db.bulkSave(docs).ok);
+ db.bulkSave(docs);
docs.forEach(function(d) {T(db.open(d._id)._id == d._id);});
docs = [{"_id": "_invalid"}];
try {
db.bulkSave(docs);
- T(1 == 0);
+ T(1 == 0, "doc id may not start with underscore, even in bulk docs");
} catch(e) {
T(db.last_req.status == 400);
- T(e.error == "invalid_doc");
+ T(e.error == "bad_request");
}
};
Modified: couchdb/trunk/share/www/script/test/list_views.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/list_views.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/list_views.js (original)
+++ couchdb/trunk/share/www/script/test/list_views.js Fri Mar 13 22:15:34 2009
@@ -137,8 +137,7 @@
T(db.save(designDoc).ok);
var docs = makeDocs(0, 10);
- var saveResult = db.bulkSave(docs);
- T(saveResult.ok);
+ db.bulkSave(docs);
var view = db.view('lists/basicView');
T(view.total_rows == 10);
@@ -207,8 +206,7 @@
// verify the etags expire correctly
var docs = makeDocs(11, 12);
- var saveResult = db.bulkSave(docs);
- T(saveResult.ok);
+ db.bulkSave(docs);
xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/simpleForm/withReduce?group=true", {
headers: {"if-none-match": etag}
Modified: couchdb/trunk/share/www/script/test/lots_of_docs.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/lots_of_docs.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/lots_of_docs.js (original)
+++ couchdb/trunk/share/www/script/test/lots_of_docs.js Fri Mar 13 22:15:34 2009
@@ -25,7 +25,7 @@
for(var i=0; i < numDocsToCreate; i += 100) {
var createNow = Math.min(numDocsToCreate - i, 100);
var docs = makeDocs(i, i + createNow);
- T(db.bulkSave(docs).ok);
+ db.bulkSave(docs);
}
// query all documents, and return the doc.integer member as a key.
Modified: couchdb/trunk/share/www/script/test/purge.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/purge.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/purge.js (original)
+++ couchdb/trunk/share/www/script/test/purge.js Fri Mar 13 22:15:34 2009
@@ -34,7 +34,7 @@
T(db.save(designDoc).ok);
- T(db.bulkSave(makeDocs(1, numDocs + 1)).ok);
+ db.bulkSave(makeDocs(1, numDocs + 1));
// go ahead and validate the views before purging
var rows = db.view("test/all_docs_twice").rows;
Modified: couchdb/trunk/share/www/script/test/reduce.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/reduce.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/reduce.js (original)
+++ couchdb/trunk/share/www/script/test/reduce.js Fri Mar 13 22:15:34 2009
@@ -17,7 +17,7 @@
if (debug) debugger;
var numDocs = 500
var docs = makeDocs(1,numDocs + 1);
- T(db.bulkSave(docs).ok);
+ db.bulkSave(docs);
var summate = function(N) {return (N+1)*N/2;};
var map = function (doc) {
@@ -65,7 +65,7 @@
docs.push({keys:["d", "a"]});
docs.push({keys:["d", "b"]});
docs.push({keys:["d", "c"]});
- T(db.bulkSave(docs).ok);
+ db.bulkSave(docs);
T(db.info().doc_count == ((i - 1) * 10 * 11) + ((j + 1) * 11));
}
@@ -157,7 +157,7 @@
docs.push({val:80});
docs.push({val:90});
docs.push({val:100});
- T(db.bulkSave(docs).ok);
+ db.bulkSave(docs);
}
var results = db.query(map, reduceCombine);
Modified: couchdb/trunk/share/www/script/test/reduce_false.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/reduce_false.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/reduce_false.js (original)
+++ couchdb/trunk/share/www/script/test/reduce_false.js Fri Mar 13 22:15:34 2009
@@ -18,7 +18,7 @@
var numDocs = 5;
var docs = makeDocs(1,numDocs + 1);
- T(db.bulkSave(docs).ok);
+ db.bulkSave(docs);
var summate = function(N) {return (N+1)*N/2;};
var designDoc = {
Modified: couchdb/trunk/share/www/script/test/replication.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/replication.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/replication.js (original)
+++ couchdb/trunk/share/www/script/test/replication.js Fri Mar 13 22:15:34 2009
@@ -59,7 +59,7 @@
simple_test: new function () {
this.init = function(dbA, dbB) {
var docs = makeDocs(0, numDocs);
- T(dbA.bulkSave(docs).ok);
+ dbA.bulkSave(docs);
};
this.afterAB1 = function(dbA, dbB) {
@@ -160,7 +160,7 @@
dbA.deleteDoc({_id:"foo", _rev:docA._conflicts[0]});
};
- this.afterBA2 = function(dbA, dbB) {
+ this.afterBA2 = function(dbA, dbB) {
// open documents and include the conflict meta data
var docA = dbA.open("foo", {conflicts: true});
var docB = dbB.open("foo", {conflicts: true});
@@ -179,29 +179,56 @@
}
}
- T(CouchDB.replicate(A, B).ok);
+ var result = CouchDB.replicate(A, B);
+
+ var seqA = result.source_last_seq;
+ T(0 == result.history[0].start_last_seq);
+ T(result.history[1] === undefined)
for(test in repTests) {
if(repTests[test].afterAB1) repTests[test].afterAB1(dbA, dbB);
}
- T(CouchDB.replicate(B, A).ok);
+ result = CouchDB.replicate(B, A);
+
+ var seqB = result.source_last_seq;
+ T(0 == result.history[0].start_last_seq);
+ T(result.history[1] === undefined)
for(test in repTests) {
if(repTests[test].afterBA1) repTests[test].afterBA1(dbA, dbB);
}
- T(CouchDB.replicate(A, B).ok);
+ var result2 = CouchDB.replicate(A, B);
+
+ // each successful replication produces a new session id
+ T(result2.session_id != result.session_id);
+
+ T(seqA < result2.source_last_seq);
+ T(seqA == result2.history[0].start_last_seq);
+ T(result2.history[1].end_last_seq == seqA)
+
+ seqA = result2.source_last_seq;
for(test in repTests) {
if(repTests[test].afterAB2) repTests[test].afterAB2(dbA, dbB);
}
- T(CouchDB.replicate(B, A).ok);
+ result = CouchDB.replicate(B, A)
+
+ T(seqB < result.source_last_seq);
+ T(seqB == result.history[0].start_last_seq);
+ T(result.history[1].end_last_seq == seqB)
+
+ seqB = result.source_last_seq;
for(test in repTests) {
if(repTests[test].afterBA2) repTests[test].afterBA2(dbA, dbB);
}
-
+
+ // do an replication where nothing has changed
+ result2 = CouchDB.replicate(B, A);
+ T(result2.no_changes == true);
+ T(result2.session_id == result.session_id);
}
};
Added: couchdb/trunk/share/www/script/test/rev_stemming.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/rev_stemming.js?rev=753448&view=auto
==============================================================================
--- couchdb/trunk/share/www/script/test/rev_stemming.js (added)
+++ couchdb/trunk/share/www/script/test/rev_stemming.js Fri Mar 13 22:15:34 2009
@@ -0,0 +1,93 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.rev_stemming = function(debug) {
+ var db = new CouchDB("test_suite_db_a");
+ db.deleteDb();
+ db.createDb();
+ var dbB = new CouchDB("test_suite_db_b");
+ dbB.deleteDb();
+ dbB.createDb();
+ if (debug) debugger;
+
+ var newLimit = 5;
+
+ T(db.getDbProperty("_revs_limit") == 1000);
+
+ var doc = {_id:"foo",foo:0}
+ for( var i=0; i < newLimit + 1; i++) {
+ doc.foo++;
+ T(db.save(doc).ok);
+ }
+ var doc0 = db.open("foo", {revs:true});
+ T(doc0._revisions.ids.length == newLimit + 1);
+
+ var docBar = {_id:"bar",foo:0}
+ for( var i=0; i < newLimit + 1; i++) {
+ docBar.foo++;
+ T(db.save(docBar).ok);
+ }
+ T(db.open("bar", {revs:true})._revisions.ids.length == newLimit + 1);
+
+ T(db.setDbProperty("_revs_limit", newLimit).ok);
+
+ for( var i=0; i < newLimit + 1; i++) {
+ doc.foo++;
+ T(db.save(doc).ok);
+ }
+ doc0 = db.open("foo", {revs:true});
+ T(doc0._revisions.ids.length == newLimit);
+
+
+ // If you replicate after you make more edits than the limit, you'll
+ // cause a spurious edit conflict.
+ CouchDB.replicate("test_suite_db_a", "test_suite_db_b");
+ var docB1 = dbB.open("foo",{conflicts:true})
+ T(docB1._conflicts == null);
+
+ for( var i=0; i < newLimit - 1; i++) {
+ doc.foo++;
+ T(db.save(doc).ok);
+ }
+
+ // one less edit than limit, no conflict
+ CouchDB.replicate("test_suite_db_a", "test_suite_db_b");
+ var docB1 = dbB.open("foo",{conflicts:true})
+ T(docB1._conflicts == null);
+
+ //now we hit the limit
+ for( var i=0; i < newLimit; i++) {
+ doc.foo++;
+ T(db.save(doc).ok);
+ }
+
+ CouchDB.replicate("test_suite_db_a", "test_suite_db_b");
+
+ var docB2 = dbB.open("foo",{conflicts:true});
+
+ // we have a conflict, but the previous replicated rev is always the losing
+ // conflict
+ T(docB2._conflicts[0] == docB1._rev)
+
+ // We having already updated bar before setting the limit, so it's still got
+ // a long rev history. compact to stem the revs.
+
+ T(db.open("bar", {revs:true})._revisions.ids.length == newLimit + 1);
+
+ T(db.compact().ok);
+
+ // compaction isn't instantaneous, loop until done
+ while (db.info().compact_running) {};
+
+ T(db.open("bar", {revs:true})._revisions.ids.length == newLimit);
+
+};
Propchange: couchdb/trunk/share/www/script/test/rev_stemming.js
------------------------------------------------------------------------------
svn:eol-style = native
Modified: couchdb/trunk/share/www/script/test/security_validation.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/security_validation.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/security_validation.js (original)
+++ couchdb/trunk/share/www/script/test/security_validation.js Fri Mar 13 22:15:34 2009
@@ -31,28 +31,27 @@
// the "username:password" string, it's sent completely plain text.
// Firefox and Safari both deal with this correctly (which is to say
// they correctly do nothing special).
-
-
+
+
var db = new CouchDB("test_suite_db");
db.deleteDb();
db.createDb();
if (debug) debugger;
-
+
run_on_modified_server(
[{section: "httpd",
key: "authentication_handler",
value: "{couch_httpd, special_test_authentication_handler}"},
- {section:"httpd",
+ {section:"httpd",
key: "WWW-Authenticate",
value: "X-Couch-Test-Auth"}],
-
- function () {
+ function () {
// try saving document usin the wrong credentials
var wrongPasswordDb = new CouchDB("test_suite_db",
{"WWW-Authenticate": "X-Couch-Test-Auth Damien Katz:foo"}
);
-
+
try {
wrongPasswordDb.save({foo:1,author:"Damien Katz"});
T(false && "Can't get here. Should have thrown an error 1");
@@ -60,8 +59,8 @@
T(e.error == "unauthorized");
T(wrongPasswordDb.last_req.status == 401);
}
-
-
+
+
// Create the design doc that will run custom validation code
var designDoc = {
_id:"_design/test",
@@ -83,9 +82,9 @@
var userDb = new CouchDB("test_suite_db",
{"WWW-Authenticate": "X-Couch-Test-Auth Damien Katz:pecan pie"}
);
-
+
T(userDb.save({_id:"testdoc", foo:1, author:"Damien Katz"}).ok);
-
+
// Attempt to save the design as a non-admin
try {
userDb.save(designDoc);
@@ -94,17 +93,17 @@
T(e.error == "unauthorized");
T(userDb.last_req.status == 401);
}
-
- // add user as admin
- db.setAdmins(["Damien Katz"]);
-
+
+ // set user as the admin
+ T(db.setDbProperty("_admins", ["Damien Katz"]).ok);
+
T(userDb.save(designDoc).ok);
-
+
// update the document
var doc = userDb.open("testdoc");
doc.foo=2;
T(userDb.save(doc).ok);
-
+
// Save a document that's missing an author field.
try {
userDb.save({foo:1});
@@ -113,12 +112,12 @@
T(e.error == "forbidden");
T(userDb.last_req.status == 403);
}
-
+
// Now attempt to update the document as a different user, Jan
var user2Db = new CouchDB("test_suite_db",
{"WWW-Authenticate": "X-Couch-Test-Auth Jan Lehnardt:apple"}
);
-
+
var doc = user2Db.open("testdoc");
doc.foo=3;
try {
@@ -128,17 +127,17 @@
T(e.error == "unauthorized");
T(user2Db.last_req.status == 401);
}
-
+
// Now have Damien change the author to Jan
doc = userDb.open("testdoc");
doc.author="Jan Lehnardt";
T(userDb.save(doc).ok);
-
+
// Now update the document as Jan
doc = user2Db.open("testdoc");
doc.foo = 3;
T(user2Db.save(doc).ok);
-
+
// Damien can't delete it
try {
userDb.deleteDoc(doc);
@@ -147,8 +146,126 @@
T(e.error == "unauthorized");
T(userDb.last_req.status == 401);
}
-
+
// Now delete document
- T(user2Db.deleteDoc(doc).ok);
+ T(user2Db.deleteDoc(doc).ok);
+
+ // now test bulk docs
+ var docs = [{_id:"bahbah",author:"Damien Katz",foo:"bar"},{_id:"fahfah",foo:"baz"}];
+
+ // Create the docs
+ var results = db.bulkSave(docs);
+
+ T(results[0].rev)
+ T(results[0].error == undefined)
+ T(results[1].rev === undefined)
+ T(results[1].error == "forbidden")
+
+ T(db.open("bahbah"));
+ T(db.open("fahfah") == null);
+
+
+ // now all or nothing with a failure
+ var docs = [{_id:"booboo",author:"Damien Katz",foo:"bar"},{_id:"foofoo",foo:"baz"}];
+
+ // Create the docs
+ var results = db.bulkSave(docs, {all_or_nothing:true});
+
+ T(results.errors.length == 1);
+ T(results.errors[0].error == "forbidden");
+ T(db.open("booboo") == null);
+ T(db.open("foofoo") == null);
+
+
+ // Now test replication
+ var AuthHeaders = {"WWW-Authenticate": "X-Couch-Test-Auth Christopher Lenz:dog food"};
+ var host = CouchDB.host;
+ var dbPairs = [
+ {source:"test_suite_db_a",
+ target:"test_suite_db_b"},
+
+ {source:"test_suite_db_a",
+ target:{url: "http://" + host + "/test_suite_db_b",
+ headers: AuthHeaders}},
+
+ {source:{url:"http://" + host + "/test_suite_db_a",
+ headers: AuthHeaders},
+ target:"test_suite_db_b"},
+
+ {source:{url:"http://" + host + "/test_suite_db_a",
+ headers: AuthHeaders},
+ target:{url:"http://" + host + "/test_suite_db_b",
+ headers: AuthHeaders}},
+ ]
+ var adminDbA = new CouchDB("test_suite_db_a");
+ var adminDbB = new CouchDB("test_suite_db_b");
+ var dbA = new CouchDB("test_suite_db_a",
+ {"WWW-Authenticate": "X-Couch-Test-Auth Christopher Lenz:dog food"});
+ var dbB = new CouchDB("test_suite_db_b",
+ {"WWW-Authenticate": "X-Couch-Test-Auth Christopher Lenz:dog food"});
+ var xhr;
+ for (var testPair = 0; testPair < dbPairs.length; testPair++) {
+ var A = dbPairs[testPair].source
+ var B = dbPairs[testPair].target
+
+ adminDbA.deleteDb();
+ adminDbA.createDb();
+ adminDbB.deleteDb();
+ adminDbB.createDb();
+
+ // save and replicate a documents that will and will not pass our design
+ // doc validation function.
+ dbA.save({_id:"foo1",value:"a",author:"Noah Slater"});
+ dbA.save({_id:"foo2",value:"a",author:"Christopher Lenz"});
+ dbA.save({_id:"bad1",value:"a"});
+
+ T(CouchDB.replicate(A, B, {headers:AuthHeaders}).ok);
+ T(CouchDB.replicate(B, A, {headers:AuthHeaders}).ok);
+
+ T(dbA.open("foo1"));
+ T(dbB.open("foo1"));
+ T(dbA.open("foo2"));
+ T(dbB.open("foo2"));
+
+ // save the design doc to dbA
+ delete designDoc._rev; // clear rev from previous saves
+ adminDbA.save(designDoc);
+
+ // no affect on already saved docs
+ T(dbA.open("bad1"));
+
+ // Update some docs on dbB. Since the design hasn't replicated, anything
+ // is allowed.
+
+ // this edit will fail validation on replication to dbA (no author)
+ T(dbB.save({_id:"bad2",value:"a"}).ok);
+
+ // this edit will fail security on replication to dbA (wrong author
+ // replicating the change)
+ var foo1 = dbB.open("foo1");
+ foo1.value = "b";
+ dbB.save(foo1);
+
+ // this is a legal edit
+ var foo2 = dbB.open("foo2");
+ foo2.value = "b";
+ dbB.save(foo2);
+
+ var results = CouchDB.replicate(B, A, {headers:AuthHeaders});
+
+ T(results.ok);
+
+ T(results.history[0].docs_written == 1);
+ T(results.history[0].doc_write_failures == 2);
+
+ // bad2 should not be on dbA
+ T(dbA.open("bad2") == null);
+
+ // The edit to foo1 should not have replicated.
+ T(dbA.open("foo1").value == "a");
+
+ // The edit to foo2 should have replicated.
+ T(dbA.open("foo2").value == "b");
+ }
});
};
Modified: couchdb/trunk/share/www/script/test/view_include_docs.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/view_include_docs.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/view_include_docs.js (original)
+++ couchdb/trunk/share/www/script/test/view_include_docs.js Fri Mar 13 22:15:34 2009
@@ -17,7 +17,7 @@
if (debug) debugger;
var docs = makeDocs(0, 100);
- T(db.bulkSave(docs).ok);
+ db.bulkSave(docs);
var designDoc = {
_id:"_design/test",
Modified: couchdb/trunk/share/www/script/test/view_multi_key_all_docs.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/view_multi_key_all_docs.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/view_multi_key_all_docs.js (original)
+++ couchdb/trunk/share/www/script/test/view_multi_key_all_docs.js Fri Mar 13 22:15:34 2009
@@ -17,7 +17,7 @@
if (debug) debugger;
var docs = makeDocs(0, 100);
- T(db.bulkSave(docs).ok);
+ db.bulkSave(docs);
var keys = ["10","15","30","37","50"];
var rows = db.allDocs({},keys).rows;
Modified: couchdb/trunk/share/www/script/test/view_multi_key_design.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/view_multi_key_design.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/view_multi_key_design.js (original)
+++ couchdb/trunk/share/www/script/test/view_multi_key_design.js Fri Mar 13 22:15:34 2009
@@ -17,7 +17,7 @@
if (debug) debugger;
var docs = makeDocs(0, 100);
- T(db.bulkSave(docs).ok);
+ db.bulkSave(docs);
var designDoc = {
_id:"_design/test",
Modified: couchdb/trunk/share/www/script/test/view_multi_key_temp.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/view_multi_key_temp.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/view_multi_key_temp.js (original)
+++ couchdb/trunk/share/www/script/test/view_multi_key_temp.js Fri Mar 13 22:15:34 2009
@@ -17,7 +17,7 @@
if (debug) debugger;
var docs = makeDocs(0, 100);
- T(db.bulkSave(docs).ok);
+ db.bulkSave(docs);
var queryFun = function(doc) { emit(doc.integer, doc.integer) };
var reduceFun = function (keys, values) { return sum(values); };
Modified: couchdb/trunk/share/www/script/test/view_pagination.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/view_pagination.js?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/view_pagination.js (original)
+++ couchdb/trunk/share/www/script/test/view_pagination.js Fri Mar 13 22:15:34 2009
@@ -17,7 +17,7 @@
if (debug) debugger;
var docs = makeDocs(0, 100);
- T(db.bulkSave(docs).ok);
+ db.bulkSave(docs);
var queryFun = function(doc) { emit(doc.integer, null) };
var i;
Modified: couchdb/trunk/src/couchdb/couch_db.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_db.erl?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_db.erl (original)
+++ couchdb/trunk/src/couchdb/couch_db.erl Fri Mar 13 22:15:34 2009
@@ -17,6 +17,7 @@
-export([open_ref_counted/2,is_idle/1,monitor/1,count_changes_since/2]).
-export([update_doc/3,update_docs/4,update_docs/2,update_docs/3,delete_doc/3]).
-export([get_doc_info/2,open_doc/2,open_doc/3,open_doc_revs/4]).
+-export([set_revs_limit/2,get_revs_limit/1]).
-export([get_missing_revs/2,name/1,doc_to_tree/1,get_update_seq/1,get_committed_update_seq/1]).
-export([enum_docs/4,enum_docs/5,enum_docs_since/4,enum_docs_since/5]).
-export([enum_docs_since_reduce_to_count/1,enum_docs_reduce_to_count/1]).
@@ -184,22 +185,42 @@
],
{ok, InfoList}.
+check_is_admin(#db{admins=Admins, user_ctx=#user_ctx{name=Name,roles=Roles}}) ->
+ DbAdmins = [<<"_admin">> | Admins],
+ case DbAdmins -- [Name | Roles] of
+ DbAdmins -> % same list, not an admin
+ throw({unauthorized, <<"You are not a db or server admin.">>});
+ _ ->
+ ok
+ end.
+
get_admins(#db{admins=Admins}) ->
Admins.
-set_admins(#db{update_pid=UpdatePid,user_ctx=Ctx},
- Admins) when is_list(Admins) ->
- case gen_server:call(UpdatePid, {set_admins, Admins, Ctx}, infinity) of
- ok -> ok;
- Error -> throw(Error)
- end.
+set_admins(#db{update_pid=Pid}=Db, Admins) when is_list(Admins) ->
+ check_is_admin(Db),
+ gen_server:call(Pid, {set_admins, Admins}, infinity).
+
+
+get_revs_limit(#db{revs_limit=Limit}) ->
+ Limit.
+
+set_revs_limit(#db{update_pid=Pid}=Db, Limit) when Limit > 0 ->
+ check_is_admin(Db),
+ gen_server:call(Pid, {set_revs_limit, Limit}, infinity);
+set_revs_limit(_Db, _Limit) ->
+ throw(invalid_revs_limit).
name(#db{name=Name}) ->
Name.
update_doc(Db, Doc, Options) ->
- {ok, [NewRev]} = update_docs(Db, [Doc], Options),
- {ok, NewRev}.
+ case update_docs(Db, [Doc], Options) of
+ {ok, [{ok, NewRev}]} ->
+ {ok, NewRev};
+ {ok, [Error]} ->
+ throw(Error)
+ end.
update_docs(Db, Docs) ->
update_docs(Db, Docs, []).
@@ -227,146 +248,283 @@
validate_doc_update(#db{user_ctx=UserCtx, admins=Admins},
- #doc{id= <<"_design/",_/binary>>}=Doc, _GetDiskDocFun) ->
+ #doc{id= <<"_design/",_/binary>>}, _GetDiskDocFun) ->
UserNames = [UserCtx#user_ctx.name | UserCtx#user_ctx.roles],
% if the user is a server admin or db admin, allow the save
case length(UserNames -- [<<"_admin">> | Admins]) == length(UserNames) of
true ->
% not an admin
- throw({unauthorized, <<"You are not a server or database admin.">>});
+ {unauthorized, <<"You are not a server or database admin.">>};
false ->
- Doc
+ ok
end;
-validate_doc_update(#db{validate_doc_funs=[]}, Doc, _GetDiskDocFun) ->
- Doc;
-validate_doc_update(_Db, #doc{id= <<"_local/",_/binary>>}=Doc, _GetDiskDocFun) ->
- Doc;
+validate_doc_update(#db{validate_doc_funs=[]}, _Doc, _GetDiskDocFun) ->
+ ok;
+validate_doc_update(_Db, #doc{id= <<"_local/",_/binary>>}, _GetDiskDocFun) ->
+ ok;
validate_doc_update(#db{name=DbName,user_ctx=Ctx}=Db, Doc, GetDiskDocFun) ->
DiskDoc = GetDiskDocFun(),
JsonCtx = {[{<<"db">>, DbName},
{<<"name">>,Ctx#user_ctx.name},
{<<"roles">>,Ctx#user_ctx.roles}]},
- [case Fun(Doc, DiskDoc, JsonCtx) of
- ok -> ok;
- Error -> throw(Error)
- end || Fun <- Db#db.validate_doc_funs],
- Doc.
+ try [case Fun(Doc, DiskDoc, JsonCtx) of
+ ok -> ok;
+ Error -> throw(Error)
+ end || Fun <- Db#db.validate_doc_funs],
+ ok
+ catch
+ throw:Error ->
+ Error
+ end.
-prep_and_validate_new_edit(Db, #doc{id=Id,revs=[NewRev|PrevRevs]}=Doc,
+prep_and_validate_update(Db, #doc{id=Id,revs={RevStart, [_NewRev|PrevRevs]}}=Doc,
OldFullDocInfo, LeafRevsDict) ->
case PrevRevs of
[PrevRev|_] ->
- case dict:find(PrevRev, LeafRevsDict) of
- {ok, {Deleted, Sp, DiskRevs}} ->
- Doc2 = Doc#doc{revs=[NewRev|DiskRevs]},
- case couch_doc:has_stubs(Doc2) of
+ case dict:find({RevStart-1, PrevRev}, LeafRevsDict) of
+ {ok, {Deleted, DiskSp, DiskRevs}} ->
+ case couch_doc:has_stubs(Doc) of
true ->
- DiskDoc = make_doc(Db, Id, Deleted, Sp, DiskRevs),
- Doc3 = couch_doc:merge_stubs(Doc2, DiskDoc),
- validate_doc_update(Db, Doc3, fun() -> DiskDoc end);
+ DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs),
+ Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
+ {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2};
false ->
- LoadDiskDoc = fun() -> make_doc(Db,Id,Deleted,Sp,DiskRevs) end,
- validate_doc_update(Db, Doc2, LoadDiskDoc)
+ LoadDiskDoc = fun() -> make_doc(Db,Id,Deleted,DiskSp,DiskRevs) end,
+ {validate_doc_update(Db, Doc, LoadDiskDoc), Doc}
end;
error ->
- throw(conflict)
+ {conflict, Doc}
end;
[] ->
% new doc, and we have existing revs.
if OldFullDocInfo#full_doc_info.deleted ->
% existing docs are deletions
- validate_doc_update(Db, Doc, nil);
+ {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
true ->
- throw(conflict)
+ {conflict, Doc}
end
end.
+
+
+prep_and_validate_updates(_Db, [], [], AccPrepped, AccFatalErrors) ->
+ {AccPrepped, AccFatalErrors};
+prep_and_validate_updates(Db, [DocBucket|RestBuckets], [not_found|RestLookups], AccPrepped, AccErrors) ->
+ [#doc{id=Id}|_]=DocBucket,
+ % no existing revs are known,
+ {PreppedBucket, AccErrors3} = lists:foldl(
+ fun(#doc{revs=Revs}=Doc, {AccBucket, AccErrors2}) ->
+ case Revs of
+ {Pos, [NewRev,_OldRev|_]} ->
+ % old revs specified but none exist, a conflict
+ {AccBucket, [{{Id, {Pos, NewRev}}, conflict} | AccErrors2]};
+ {Pos, [NewRev]} ->
+ case validate_doc_update(Db, Doc, fun() -> nil end) of
+ ok ->
+ {[Doc | AccBucket], AccErrors2};
+ Error ->
+ {AccBucket, [{{Id, {Pos, NewRev}}, Error} | AccErrors2]}
+ end
+ end
+ end,
+ {[], AccErrors}, DocBucket),
+
+ prep_and_validate_updates(Db, RestBuckets, RestLookups,
+ [PreppedBucket | AccPrepped], AccErrors3);
+prep_and_validate_updates(Db, [DocBucket|RestBuckets],
+ [{ok, #full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo}|RestLookups],
+ AccPrepped, AccErrors) ->
+ Leafs = couch_key_tree:get_all_leafs(OldRevTree),
+ LeafRevsDict = dict:from_list([{{Start, RevId}, {Deleted, Sp, Revs}} ||
+ {{Deleted, Sp}, {Start, [RevId|_]}=Revs} <- Leafs]),
+ {PreppedBucket, AccErrors3} = lists:foldl(
+ fun(Doc, {Docs2Acc, AccErrors2}) ->
+ case prep_and_validate_update(Db, Doc, OldFullDocInfo,
+ LeafRevsDict) of
+ {ok, Doc} ->
+ {[Doc | Docs2Acc], AccErrors2};
+ {Error, #doc{id=Id,revs={Pos, [NewRev|_]}}} ->
+ % Record the error
+ {Docs2Acc, [{{Id, {Pos, NewRev}}, Error} |AccErrors2]}
+ end
+ end,
+ {[], AccErrors}, DocBucket),
+ prep_and_validate_updates(Db, RestBuckets, RestLookups, [PreppedBucket | AccPrepped], AccErrors3).
+
+
update_docs(#db{update_pid=UpdatePid}=Db, Docs, Options) ->
- update_docs(#db{update_pid=UpdatePid}=Db, Docs, Options, true).
+ update_docs(#db{update_pid=UpdatePid}=Db, Docs, Options, interactive_edit).
-update_docs(Db, Docs, Options, false) ->
+
+prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) ->
+ Errors2 = [{{Id, {Pos, Rev}}, Error} ||
+ {#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors],
+ {lists:reverse(AccPrepped), lists:reverse(Errors2)};
+prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) ->
+ case OldInfo of
+ not_found ->
+ {ValidatedBucket, AccErrors3} = lists:foldl(
+ fun(Doc, {AccPrepped2, AccErrors2}) ->
+ case validate_doc_update(Db, Doc, fun() -> nil end) of
+ ok ->
+ {[Doc | AccPrepped2], AccErrors2};
+ Error ->
+ {AccPrepped2, [{Doc, Error} | AccErrors2]}
+ end
+ end,
+ {[], AccErrors}, Bucket),
+ prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3);
+ {ok, #full_doc_info{rev_tree=OldTree}} ->
+ NewRevTree = lists:foldl(
+ fun(NewDoc, AccTree) ->
+ {NewTree, _} = couch_key_tree:merge(AccTree, [couch_db:doc_to_tree(NewDoc)]),
+ NewTree
+ end,
+ OldTree, Bucket),
+ Leafs = couch_key_tree:get_all_leafs_full(NewRevTree),
+ LeafRevsFullDict = dict:from_list( [{{Start, RevId}, FullPath} || {Start, [{RevId, _}|_]}=FullPath <- Leafs]),
+ {ValidatedBucket, AccErrors3} =
+ lists:foldl(
+ fun(#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, {AccValidated, AccErrors2}) ->
+ case dict:find({Pos, RevId}, LeafRevsFullDict) of
+ {ok, {Start, Path}} ->
+ % our unflushed doc is a leaf node. Go back on the path
+ % to find the previous rev that's on disk.
+ PrevRevResult =
+ case couch_doc:has_stubs(Doc) of
+ true ->
+ [_PrevRevFull | [PrevRevFull | _]=PrevPath] = Path,
+ case PrevRevFull of
+ {_RevId, ?REV_MISSING} ->
+ conflict;
+ {RevId, {IsDel, DiskSp}} ->
+ DiskDoc = make_doc(Db, Id, IsDel, DiskSp, PrevPath),
+ Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
+ {ok, Doc2, fun() -> DiskDoc end}
+ end;
+ false ->
+ {ok, Doc,
+ fun() ->
+ make_first_doc_on_disk(Db,Id,Start-1, tl(Path))
+ end}
+ end,
+ case PrevRevResult of
+ {ok, NewDoc, LoadPrevRevFun} ->
+ case validate_doc_update(Db, NewDoc, LoadPrevRevFun) of
+ ok ->
+ {[NewDoc | AccValidated], AccErrors2};
+ Error ->
+ {AccValidated, [{NewDoc, Error} | AccErrors2]}
+ end;
+ Error ->
+ {AccValidated, [{Doc, Error} | AccErrors2]}
+ end;
+ _ ->
+ % this doc isn't a leaf or already exists in the tree.
+ % ignore but consider it a success.
+ {AccValidated, AccErrors2}
+ end
+ end,
+ {[], AccErrors}, Bucket),
+ prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3)
+ end.
+
+update_docs(Db, Docs, Options, replicated_changes) ->
couch_stats_collector:increment({couchdb, database_writes}),
DocBuckets = group_alike_docs(Docs),
- Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
- ExistingDocs = get_full_doc_infos(Db, Ids),
+ case (Db#db.validate_doc_funs /= []) orelse
+ lists:any(
+ fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true;
+ (_) -> false
+ end, Docs) of
+ true ->
+ Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
+ ExistingDocs = get_full_doc_infos(Db, Ids),
- DocBuckets2 = lists:zipwith(
- fun(Bucket, not_found) ->
- [validate_doc_update(Db, Doc, fun()-> nil end) || Doc <- Bucket];
- (Bucket, {ok, #full_doc_info{rev_tree=OldRevTree}}) ->
- NewTree = lists:foldl(
- fun(Doc, RevTreeAcc) ->
- couch_key_tree:merge(RevTreeAcc, doc_to_tree(Doc))
- end,
- OldRevTree, Bucket),
- Leafs = couch_key_tree:get_all_leafs_full(NewTree),
- LeafRevsFullDict = dict:from_list( [{Rev, FullPath} || [{Rev, _}|_]=FullPath <- Leafs]),
- lists:flatmap(
- fun(#doc{revs=[Rev|_]}=Doc) ->
- case dict:find(Rev, LeafRevsFullDict) of
- {ok, [{Rev, #doc{id=Id}}|_]=Path} ->
- % our unflushed doc is a leaf node. Go back on the path
- % to find the previous rev that's on disk.
- LoadPrevRev = fun() ->
- make_first_doc_on_disk(Db, Id, Path)
- end,
- [validate_doc_update(Db, Doc, LoadPrevRev)];
- _ ->
- % this doc isn't a leaf or is already exists in the tree. ignore
- []
- end
- end, Bucket)
- end,
- DocBuckets, ExistingDocs),
- write_and_commit(Db, DocBuckets2, Options);
+ {DocBuckets2, DocErrors} =
+ prep_and_validate_replicated_updates(Db, DocBuckets, ExistingDocs, [], []),
+ DocBuckets3 = [Bucket || [_|_]=Bucket <- DocBuckets2]; % remove empty buckets
+ false ->
+ DocErrors = [],
+ DocBuckets3 = DocBuckets
+ end,
+ {ok, []} = write_and_commit(Db, DocBuckets3, [merge_conflicts | Options]),
+ {ok, DocErrors};
-update_docs(Db, Docs, Options, true) ->
+update_docs(Db, Docs, Options, interactive_edit) ->
couch_stats_collector:increment({couchdb, database_writes}),
-
+ AllOrNothing = lists:member(all_or_nothing, Options),
% go ahead and generate the new revision ids for the documents.
Docs2 = lists:map(
- fun(#doc{id=Id,revs=Revs}=Doc) ->
+ fun(#doc{id=Id,revs={Start, RevIds}}=Doc) ->
case Id of
<<?LOCAL_DOC_PREFIX, _/binary>> ->
- Rev = case Revs of [] -> 0; [Rev0|_] -> list_to_integer(binary_to_list(Rev0)) end,
- Doc#doc{revs=[list_to_binary(integer_to_list(Rev + 1))]};
+ Rev = case RevIds of [] -> 0; [Rev0|_] -> list_to_integer(?b2l(Rev0)) end,
+ Doc#doc{revs={Start, [?l2b(integer_to_list(Rev + 1))]}};
_ ->
- Doc#doc{revs=[list_to_binary(integer_to_list(couch_util:rand32())) | Revs]}
+ Doc#doc{revs={Start+1, [?l2b(integer_to_list(couch_util:rand32())) | RevIds]}}
end
end, Docs),
DocBuckets = group_alike_docs(Docs2),
- Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
-
- % lookup the doc by id and get the most recent
-
- ExistingDocs = get_full_doc_infos(Db, Ids),
- DocBuckets2 = lists:zipwith(
- fun(Bucket, not_found) ->
- % no existing revs on disk, make sure no old revs specified.
- [throw(conflict) || #doc{revs=[_NewRev, _OldRev | _]} <- Bucket],
- [validate_doc_update(Db, Doc, fun()-> nil end) || Doc <- Bucket];
- (Bucket, {ok, #full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo}) ->
- Leafs = couch_key_tree:get_all_leafs(OldRevTree),
- LeafRevsDict = dict:from_list([{Rev, {Deleted, Sp, Revs}} || {Rev, {Deleted, Sp}, Revs} <- Leafs]),
- [prep_and_validate_new_edit(Db, Doc, OldFullDocInfo, LeafRevsDict) || Doc <- Bucket]
+ case (Db#db.validate_doc_funs /= []) orelse
+ lists:any(
+ fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) ->
+ true;
+ (#doc{attachments=Atts}) ->
+ Atts /= []
+ end, Docs) of
+ true ->
+ % lookup the doc by id and get the most recent
+ Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
+ ExistingDocInfos = get_full_doc_infos(Db, Ids),
+
+ {DocBucketsPrepped, Failures} =
+ case AllOrNothing of
+ true ->
+ prep_and_validate_replicated_updates(Db, DocBuckets,
+ ExistingDocInfos, [], []);
+ false ->
+ prep_and_validate_updates(Db, DocBuckets, ExistingDocInfos, [], [])
end,
- DocBuckets, ExistingDocs),
- ok = write_and_commit(Db, DocBuckets2, [new_edits | Options]),
- {ok, [NewRev ||#doc{revs=[NewRev|_]} <- Docs2]}.
+
+ % strip out any empty buckets
+ DocBuckets2 = [Bucket || [_|_] = Bucket <- DocBucketsPrepped];
+ false ->
+ Failures = [],
+ DocBuckets2 = DocBuckets
+ end,
+ if (AllOrNothing) and (Failures /= []) ->
+ {aborted, Failures};
+ true ->
+ Options2 = if AllOrNothing -> [merge_conflicts];
+ true -> [] end ++ Options,
+ {ok, CommitFailures} = write_and_commit(Db, DocBuckets2, Options2),
+ FailDict = dict:from_list(CommitFailures ++ Failures),
+ % the output for each is either {ok, NewRev} or Error
+ {ok, lists:map(
+ fun(#doc{id=Id,revs={Pos, [NewRevId|_]}}) ->
+ case dict:find({Id, {Pos, NewRevId}}, FailDict) of
+ {ok, Error} ->
+ Error;
+ error ->
+ {ok, {Pos, NewRevId}}
+ end
+ end, Docs2)}
+ end.
% Returns the first available document on disk. Input list is a full rev path
% for the doc.
-make_first_doc_on_disk(_Db, _Id, []) ->
+make_first_doc_on_disk(_Db, _Id, _Pos, []) ->
nil;
-make_first_doc_on_disk(Db, Id, [{_Rev, ?REV_MISSING}|RestPath]) ->
- make_first_doc_on_disk(Db, Id, RestPath);
-make_first_doc_on_disk(Db, Id, [{_Rev, {IsDel, Sp}} |_]=DocPath) ->
+make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING}|RestPath]) ->
+ make_first_doc_on_disk(Db, Id, Pos - 1, RestPath);
+make_first_doc_on_disk(Db, Id, Pos, [{_Rev, {IsDel, Sp}} |_]=DocPath) ->
Revs = [Rev || {Rev, _} <- DocPath],
- make_doc(Db, Id, IsDel, Sp, Revs).
+ make_doc(Db, Id, IsDel, Sp, {Pos, Revs}).
write_and_commit(#db{update_pid=UpdatePid, user_ctx=Ctx}=Db, DocBuckets,
@@ -374,20 +532,18 @@
% flush unwritten binaries to disk.
DocBuckets2 = [[doc_flush_binaries(Doc, Db#db.fd) || Doc <- Bucket] || Bucket <- DocBuckets],
case gen_server:call(UpdatePid, {update_docs, DocBuckets2, Options}, infinity) of
- ok -> ok;
+ {ok, Conflicts} -> {ok, Conflicts};
retry ->
% This can happen if the db file we wrote to was swapped out by
- % compaction. Retry writing to the current file
+ % compaction. Retry by reopening the db and writing to the current file
{ok, Db2} = open_ref_counted(Db#db.main_pid, Ctx),
DocBuckets3 = [[doc_flush_binaries(Doc, Db2#db.fd) || Doc <- Bucket] || Bucket <- DocBuckets],
% We only retry once
close(Db2),
case gen_server:call(UpdatePid, {update_docs, DocBuckets3, Options}, infinity) of
- ok -> ok;
- Else -> throw(Else)
- end;
- Else->
- throw(Else)
+ {ok, Conflicts} -> {ok, Conflicts};
+ retry -> throw({update_error, compaction_retry})
+ end
end.
@@ -506,7 +662,7 @@
{Count, _DelCount} = couch_btree:final_reduce(
fun couch_db_updater:btree_by_id_reduce/2, Reds),
Count.
-
+
count_changes_since(Db, SinceSeq) ->
{ok, Changes} =
couch_btree:fold_reduce(Db#db.docinfo_by_seq_btree,
@@ -518,7 +674,7 @@
end,
ok),
Changes.
-
+
enum_docs_since(Db, SinceSeq, Direction, InFun, Ctx) ->
couch_btree:fold(Db#db.docinfo_by_seq_btree, SinceSeq + 1, Direction, InFun, Ctx).
@@ -594,11 +750,11 @@
end
end,
FoundResults =
- lists:map(fun({Rev, Value, FoundRevPath}) ->
+ lists:map(fun({Value, {Pos, [Rev|_]}=FoundRevPath}) ->
case Value of
?REV_MISSING ->
% we have the rev in our list but know nothing about it
- {{not_found, missing}, Rev};
+ {{not_found, missing}, {Pos, Rev}};
{IsDeleted, SummaryPtr} ->
{ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)}
end
@@ -616,18 +772,18 @@
open_doc_int(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = Id, _Options) ->
case couch_btree:lookup(Db#db.local_docs_btree, [Id]) of
[{ok, {_, {Rev, BodyData}}}] ->
- {ok, #doc{id=Id, revs=[list_to_binary(integer_to_list(Rev))], body=BodyData}};
+ {ok, #doc{id=Id, revs={0, [list_to_binary(integer_to_list(Rev))]}, body=BodyData}};
[not_found] ->
{not_found, missing}
end;
-open_doc_int(Db, #doc_info{id=Id,rev=Rev,deleted=IsDeleted,summary_pointer=Sp}=DocInfo, Options) ->
- Doc = make_doc(Db, Id, IsDeleted, Sp, [Rev]),
+open_doc_int(Db, #doc_info{id=Id,rev={Pos,RevId},deleted=IsDeleted,summary_pointer=Sp}=DocInfo, Options) ->
+ Doc = make_doc(Db, Id, IsDeleted, Sp, {Pos,[RevId]}),
{ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}};
open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
#doc_info{deleted=IsDeleted,rev=Rev,summary_pointer=Sp} = DocInfo =
couch_doc:to_doc_info(FullDocInfo),
- {[{_Rev,_Value, Revs}], []} = couch_key_tree:get(RevTree, [Rev]),
- Doc = make_doc(Db, Id, IsDeleted, Sp, Revs),
+ {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]),
+ Doc = make_doc(Db, Id, IsDeleted, Sp, RevPath),
{ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}};
open_doc_int(Db, Id, Options) ->
case get_full_doc_info(Db, Id) of
@@ -641,9 +797,10 @@
case lists:member(revs_info, Options) of
false -> [];
true ->
- {[RevPath],[]} =
+ {[{Pos, RevPath}],[]} =
couch_key_tree:get_full_key_paths(RevTree, [DocInfo#doc_info.rev]),
- [{revs_info, lists:map(
+
+ [{revs_info, Pos, lists:map(
fun({Rev, {true, _Sp}}) ->
{Rev, deleted};
({Rev, {false, _Sp}}) ->
@@ -670,13 +827,15 @@
end.
-doc_to_tree(Doc) ->
- doc_to_tree(Doc, lists:reverse(Doc#doc.revs)).
+doc_to_tree(#doc{revs={Start, RevIds}}=Doc) ->
+ [Tree] = doc_to_tree_simple(Doc, lists:reverse(RevIds)),
+ {Start - length(RevIds) + 1, Tree}.
+
-doc_to_tree(Doc, [RevId]) ->
+doc_to_tree_simple(Doc, [RevId]) ->
[{RevId, Doc, []}];
-doc_to_tree(Doc, [RevId | Rest]) ->
- [{RevId, ?REV_MISSING, doc_to_tree(Doc, Rest)}].
+doc_to_tree_simple(Doc, [RevId | Rest]) ->
+ [{RevId, ?REV_MISSING, doc_to_tree_simple(Doc, Rest)}].
make_doc(Db, FullDocInfo) ->
{#doc_info{id=Id,deleted=Deleted,summary_pointer=Sp}, RevPath}
@@ -703,4 +862,4 @@
}.
-
\ No newline at end of file
+
Modified: couchdb/trunk/src/couchdb/couch_db.hrl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_db.hrl?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_db.hrl (original)
+++ couchdb/trunk/src/couchdb/couch_db.hrl Fri Mar 13 22:15:34 2009
@@ -69,7 +69,7 @@
-record(doc,
{
id = <<"">>,
- revs = [],
+ revs = {0, []},
% the json body object.
body = {[]},
@@ -104,7 +104,7 @@
% if the disk revision is incremented, then new upgrade logic will need to be
% added to couch_db_updater:init_db.
--define(LATEST_DISK_VERSION, 0).
+-define(LATEST_DISK_VERSION, 1).
-record(db_header,
{disk_version = ?LATEST_DISK_VERSION,
@@ -115,13 +115,14 @@
local_docs_btree_state = nil,
purge_seq = 0,
purged_docs = nil,
- admins_ptr = nil
+ admins_ptr = nil,
+ revs_limit = 1000
}).
-record(db,
- {main_pid=nil,
- update_pid=nil,
- compactor_pid=nil,
+ {main_pid = nil,
+ update_pid = nil,
+ compactor_pid = nil,
instance_start_time, % number of microsecs since jan 1 1970 as a binary string
fd,
fd_ref_counter,
@@ -133,11 +134,12 @@
update_seq,
name,
filepath,
- validate_doc_funs=[],
- admins=[],
- admins_ptr=nil,
- user_ctx=#user_ctx{},
- waiting_delayed_commit=nil
+ validate_doc_funs = [],
+ admins = [],
+ admins_ptr = nil,
+ user_ctx = #user_ctx{},
+ waiting_delayed_commit = nil,
+ revs_limit = 1000
}).
Modified: couchdb/trunk/src/couchdb/couch_db_updater.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_db_updater.erl?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_db_updater.erl (original)
+++ couchdb/trunk/src/couchdb/couch_db_updater.erl Fri Mar 13 22:15:34 2009
@@ -44,15 +44,13 @@
{reply, {ok, Db}, Db};
handle_call({update_docs, DocActions, Options}, _From, Db) ->
try update_docs_int(Db, DocActions, Options) of
- {ok, Db2} ->
+ {ok, Conflicts, Db2} ->
ok = gen_server:call(Db#db.main_pid, {db_updated, Db2}),
couch_db_update_notifier:notify({updated, Db2#db.name}),
- {reply, ok, Db2}
+ {reply, {ok, Conflicts}, Db2}
catch
throw: retry ->
- {reply, retry, Db};
- throw: conflict ->
- {reply, conflict, Db}
+ {reply, retry, Db}
end;
handle_call(full_commit, _From, #db{waiting_delayed_commit=nil}=Db) ->
{reply, ok, Db}; % no data waiting, return ok immediately
@@ -64,18 +62,18 @@
couch_db_update_notifier:notify({updated, Db#db.name}),
{reply, {ok, Db2#db.update_seq}, Db2};
-handle_call({set_admins, NewAdmins, #user_ctx{roles=Roles}}, _From, Db) ->
- DbAdmins = [<<"_admin">> | Db#db.admins],
- case length(DbAdmins -- Roles) == length(DbAdmins) of
- true ->
- {reply, {unauthorized, <<"You are not a db or server admin.">>}, Db};
- false ->
- {ok, Ptr} = couch_file:append_term(Db#db.fd, NewAdmins),
- Db2 = commit_data(Db#db{admins=NewAdmins, admins_ptr=Ptr,
- update_seq=Db#db.update_seq+1}),
- ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
- {reply, ok, Db2}
- end;
+handle_call({set_admins, NewAdmins}, _From, Db) ->
+ {ok, Ptr} = couch_file:append_term(Db#db.fd, NewAdmins),
+ Db2 = commit_data(Db#db{admins=NewAdmins, admins_ptr=Ptr,
+ update_seq=Db#db.update_seq+1}),
+ ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
+ {reply, ok, Db2};
+
+handle_call({set_revs_limit, Limit}, _From, Db) ->
+ Db2 = commit_data(Db#db{revs_limit=Limit,
+ update_seq=Db#db.update_seq+1}),
+ ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
+ {reply, ok, Db2};
handle_call({purge_docs, _IdRevs}, _From,
#db{compactor_pid=Pid}=Db) when Pid /= nil ->
@@ -298,7 +296,8 @@
filepath = Filepath,
admins = Admins,
admins_ptr = AdminsPtr,
- instance_start_time = StartTime
+ instance_start_time = StartTime,
+ revs_limit = Header#db_header.revs_limit
}.
@@ -358,40 +357,31 @@
end, Unflushed),
flush_trees(Db, RestUnflushed, [InfoUnflushed#full_doc_info{rev_tree=Flushed} | AccFlushed]).
-merge_rev_trees(_NoConflicts, [], [], AccNewInfos, AccSeq) ->
- {ok, lists:reverse(AccNewInfos), AccSeq};
-merge_rev_trees(NoConflicts, [NewDocs|RestDocsList],
- [OldDocInfo|RestOldInfo], AccNewInfos, AccSeq) ->
- #full_doc_info{id=Id,rev_tree=OldTree}=OldDocInfo,
- UpdatesRevTree = lists:foldl(
- fun(NewDoc, AccTree) ->
- couch_key_tree:merge(AccTree, couch_db:doc_to_tree(NewDoc))
+merge_rev_trees(_MergeConflicts, [], [], AccNewInfos, AccConflicts, AccSeq) ->
+ {ok, lists:reverse(AccNewInfos), AccConflicts, AccSeq};
+merge_rev_trees(MergeConflicts, [NewDocs|RestDocsList],
+ [OldDocInfo|RestOldInfo], AccNewInfos, AccConflicts, AccSeq) ->
+ #full_doc_info{id=Id,rev_tree=OldTree,deleted=OldDeleted}=OldDocInfo,
+ {NewRevTree, NewConflicts} = lists:foldl(
+ fun(#doc{revs={Pos,[Rev|_]}}=NewDoc, {AccTree, AccConflicts2}) ->
+ case couch_key_tree:merge(AccTree, [couch_db:doc_to_tree(NewDoc)]) of
+ {_NewTree, conflicts}
+ when (not OldDeleted) and (not MergeConflicts) ->
+ {AccTree, [{{Id, {Pos,Rev}}, conflict} | AccConflicts2]};
+ {NewTree, _} ->
+ {NewTree, AccConflicts2}
+ end
end,
- [], NewDocs),
- NewRevTree = couch_key_tree:merge(OldTree, UpdatesRevTree),
+ {OldTree, AccConflicts}, NewDocs),
if NewRevTree == OldTree ->
% nothing changed
- merge_rev_trees(NoConflicts, RestDocsList, RestOldInfo, AccNewInfos, AccSeq);
+ merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo, AccNewInfos,
+ NewConflicts, AccSeq);
true ->
- if NoConflicts andalso OldTree /= [] ->
- OldConflicts = couch_key_tree:count_leafs(OldTree),
- NewConflicts = couch_key_tree:count_leafs(NewRevTree),
- if NewConflicts > OldConflicts ->
- % if all the old docs are deletions, allow this new conflict
- case [1 || {_Rev,{IsDel,_Sp},_Path} <-
- couch_key_tree:get_all_leafs(OldTree), IsDel==false] of
- [] ->
- ok;
- _ ->
- throw(conflict)
- end;
- true -> ok
- end;
- true -> ok
- end,
+ % we have updated the document, give it a new seq #
NewInfo = #full_doc_info{id=Id,update_seq=AccSeq+1,rev_tree=NewRevTree},
- merge_rev_trees(NoConflicts, RestDocsList,RestOldInfo,
- [NewInfo|AccNewInfos],AccSeq+1)
+ merge_rev_trees(MergeConflicts, RestDocsList,RestOldInfo,
+ [NewInfo|AccNewInfos], NewConflicts, AccSeq+1)
end.
new_index_entries([], AccById, AccBySeq) ->
@@ -402,19 +392,23 @@
[FullDocInfo#full_doc_info{deleted=Deleted}|AccById],
[DocInfo|AccBySeq]).
+
+stem_full_doc_infos(#db{revs_limit=Limit}, DocInfos) ->
+ [Info#full_doc_info{rev_tree=couch_key_tree:stem(Tree, Limit)} ||
+ #full_doc_info{rev_tree=Tree}=Info <- DocInfos].
+
+
update_docs_int(Db, DocsList, Options) ->
#db{
fulldocinfo_by_id_btree = DocInfoByIdBTree,
docinfo_by_seq_btree = DocInfoBySeqBTree,
update_seq = LastSeq
} = Db,
-
% separate out the NonRep documents from the rest of the documents
{DocsList2, NonRepDocs} = lists:foldl(
- fun([#doc{id=Id}=Doc | Rest]=Docs, {DocsListAcc, NonRepDocsAcc}) ->
+ fun([#doc{id=Id}=Doc | _]=Docs, {DocsListAcc, NonRepDocsAcc}) ->
case Id of
- <<?LOCAL_DOC_PREFIX, _/binary>> when Rest==[] ->
- % when saving NR (non rep) documents, you can only save a single rev
+ <<?LOCAL_DOC_PREFIX, _/binary>> ->
{DocsListAcc, [Doc | NonRepDocsAcc]};
Id->
{[Docs | DocsListAcc], NonRepDocsAcc}
@@ -434,23 +428,26 @@
Ids, OldDocLookups),
% Merge the new docs into the revision trees.
- NoConflicts = lists:member(new_edits, Options),
- {ok, NewDocInfos, NewSeq} = merge_rev_trees(NoConflicts, DocsList2, OldDocInfos, [], LastSeq),
+ {ok, NewDocInfos0, Conflicts, NewSeq} = merge_rev_trees(
+ lists:member(merge_conflicts, Options),
+ DocsList2, OldDocInfos, [], [], LastSeq),
+
+ NewDocInfos = stem_full_doc_infos(Db, NewDocInfos0),
RemoveSeqs =
- [ OldSeq || {ok, #full_doc_info{update_seq=OldSeq}} <- OldDocLookups],
+ [OldSeq || {ok, #full_doc_info{update_seq=OldSeq}} <- OldDocLookups],
- % All regular documents are now ready to write.
+ % All documents are now ready to write.
- % Try to write the local documents first, a conflict might be generated
- {ok, Db2} = update_local_docs(Db, NonRepDocs),
+ {ok, LocalConflicts, Db2} = update_local_docs(Db, NonRepDocs),
- % Write out the document summaries (they are stored in the nodes of the rev trees)
+ % Write out the document summaries (the bodies are stored in the nodes of
+ % the trees, the attachments are already written to disk)
{ok, FlushedDocInfos} = flush_trees(Db2, NewDocInfos, []),
{ok, InfoById, InfoBySeq} = new_index_entries(FlushedDocInfos, [], []),
- % and the indexes to the documents
+ % and the indexes
{ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree, InfoBySeq, RemoveSeqs),
{ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree, InfoById, []),
@@ -459,6 +456,8 @@
docinfo_by_seq_btree = DocInfoBySeqBTree2,
update_seq = NewSeq},
+ % Check if we just updated any design documents, and update the validation
+ % funs if we did.
case [1 || <<"_design/",_/binary>> <- Ids] of
[] ->
Db4 = Db3;
@@ -466,18 +465,15 @@
Db4 = refresh_validate_doc_funs(Db3)
end,
- {ok, commit_data(Db4, not lists:member(full_commit, Options))}.
-
+ {ok, LocalConflicts ++ Conflicts,
+ commit_data(Db4, not lists:member(full_commit, Options))}.
+
update_local_docs(#db{local_docs_btree=Btree}=Db, Docs) ->
Ids = [Id || #doc{id=Id} <- Docs],
OldDocLookups = couch_btree:lookup(Btree, Ids),
BtreeEntries = lists:zipwith(
- fun(#doc{id=Id,deleted=Delete,revs=Revs,body=Body}, OldDocLookup) ->
- NewRev =
- case Revs of
- [] -> 0;
- [RevStr|_] -> list_to_integer(binary_to_list(RevStr))
- end,
+ fun(#doc{id=Id,deleted=Delete,revs={0,[RevStr]},body=Body}, OldDocLookup) ->
+ NewRev = list_to_integer(?b2l(RevStr)),
OldRev =
case OldDocLookup of
{ok, {_, {OldRev0, _}}} -> OldRev0;
@@ -490,18 +486,19 @@
true -> {remove, Id}
end;
false ->
- throw(conflict)
+ {conflict, {Id, {0, RevStr}}}
end
end, Docs, OldDocLookups),
BtreeIdsRemove = [Id || {remove, Id} <- BtreeEntries],
BtreeIdsUpdate = [ByIdDocInfo || {update, ByIdDocInfo} <- BtreeEntries],
-
+ Conflicts = [{conflict, IdRev} || {conflict, IdRev} <- BtreeEntries],
+
{ok, Btree2} =
couch_btree:add_remove(Btree, BtreeIdsUpdate, BtreeIdsRemove),
- {ok, Db#db{local_docs_btree = Btree2}}.
+ {ok, Conflicts, Db#db{local_docs_btree = Btree2}}.
commit_data(Db) ->
@@ -515,7 +512,8 @@
docinfo_by_seq_btree_state = couch_btree:get_state(Db#db.docinfo_by_seq_btree),
fulldocinfo_by_id_btree_state = couch_btree:get_state(Db#db.fulldocinfo_by_id_btree),
local_docs_btree_state = couch_btree:get_state(Db#db.local_docs_btree),
- admins_ptr = Db#db.admins_ptr
+ admins_ptr = Db#db.admins_ptr,
+ revs_limit = Db#db.revs_limit
},
if Header == Header2 ->
Db;
@@ -549,6 +547,10 @@
copy_rev_tree(_SrcFd, _DestFd, _DestStream, []) ->
[];
+copy_rev_tree(SrcFd, DestFd, DestStream, [{Start, Tree} | RestTree]) ->
+ % root nner node, only copy info/data from leaf nodes
+ [Tree2] = copy_rev_tree(SrcFd, DestFd, DestStream, [Tree]),
+ [{Start, Tree2} | copy_rev_tree(SrcFd, DestFd, DestStream, RestTree)];
copy_rev_tree(SrcFd, DestFd, DestStream, [{RevId, {IsDel, Sp}, []} | RestTree]) ->
% This is a leaf node, copy it over
NewSp = copy_raw_doc(SrcFd, Sp, DestFd, DestStream),
@@ -560,10 +562,11 @@
copy_docs(#db{fd=SrcFd}=Db, #db{fd=DestFd,summary_stream=DestStream}=NewDb, InfoBySeq, Retry) ->
Ids = [Id || #doc_info{id=Id} <- InfoBySeq],
LookupResults = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids),
- NewFullDocInfos = lists:map(
+ NewFullDocInfos0 = lists:map(
fun({ok, #full_doc_info{rev_tree=RevTree}=Info}) ->
Info#full_doc_info{rev_tree=copy_rev_tree(SrcFd, DestFd, DestStream, RevTree)}
end, LookupResults),
+ NewFullDocInfos = stem_full_doc_infos(Db, NewFullDocInfos0),
NewDocInfos = [couch_doc:to_doc_info(Info) || Info <- NewFullDocInfos],
RemoveSeqs =
case Retry of
Modified: couchdb/trunk/src/couchdb/couch_doc.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_doc.erl?rev=753448&r1=753447&r2=753448&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_doc.erl (original)
+++ couchdb/trunk/src/couchdb/couch_doc.erl Fri Mar 13 22:15:34 2009
@@ -12,41 +12,53 @@
-module(couch_doc).
--export([to_doc_info/1,to_doc_info_path/1]).
+-export([to_doc_info/1,to_doc_info_path/1,parse_rev/1,parse_revs/1,rev_to_str/1,rev_to_strs/1]).
-export([bin_foldl/3,bin_size/1,bin_to_binary/1,get_validate_doc_fun/1]).
-export([from_json_obj/1,to_json_obj/2,has_stubs/1, merge_stubs/2]).
-include("couch_db.hrl").
% helpers used by to_json_obj
-to_json_rev([]) ->
+to_json_rev(0, []) ->
[];
-to_json_rev(Revs) ->
- [{<<"_rev">>, lists:nth(1, Revs)}].
+to_json_rev(Start, [FirstRevId|_]) ->
+ [{<<"_rev">>, ?l2b([integer_to_list(Start),"-",FirstRevId])}].
to_json_body(true, _Body) ->
[{<<"_deleted">>, true}];
to_json_body(false, {Body}) ->
Body.
-to_json_revs(Options, Revs) ->
+to_json_revisions(Options, Start, RevIds) ->
case lists:member(revs, Options) of
false -> [];
true ->
- [{<<"_revs">>, Revs}]
+ [{<<"_revisions">>, {[{<<"start">>, Start},
+ {<<"ids">>, RevIds}]}}]
end.
-to_json_revs_info(Meta) ->
+rev_to_str({Pos, RevId}) ->
+ ?l2b([integer_to_list(Pos),"-",RevId]).
+
+rev_to_strs([]) ->
+ [];
+rev_to_strs([{Pos, RevId}| Rest]) ->
+ [rev_to_str({Pos, RevId}) | rev_to_strs(Rest)].
+
+to_json_meta(Meta) ->
lists:map(
- fun({revs_info, RevsInfo}) ->
- JsonRevsInfo =
- [{[{rev, Rev}, {status, list_to_binary(atom_to_list(Status))}]} ||
- {Rev, Status} <- RevsInfo],
+ fun({revs_info, Start, RevsInfo}) ->
+ {JsonRevsInfo, _Pos} = lists:mapfoldl(
+ fun({RevId, Status}, PosAcc) ->
+ JsonObj = {[{<<"rev">>, rev_to_str({PosAcc, RevId})},
+ {<<"status">>, ?l2b(atom_to_list(Status))}]},
+ {JsonObj, PosAcc - 1}
+ end, Start, RevsInfo),
{<<"_revs_info">>, JsonRevsInfo};
({conflicts, Conflicts}) ->
- {<<"_conflicts">>, Conflicts};
- ({deleted_conflicts, Conflicts}) ->
- {<<"_deleted_conflicts">>, Conflicts}
+ {<<"_conflicts">>, rev_to_strs(Conflicts)};
+ ({deleted_conflicts, DConflicts}) ->
+ {<<"_deleted_conflicts">>, rev_to_strs(DConflicts)}
end, Meta).
to_json_attachment_stubs(Attachments) ->
@@ -98,17 +110,62 @@
to_json_attachment_stubs(Attachments)
end.
-to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs=Revs,meta=Meta}=Doc,Options)->
+to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds},
+ meta=Meta}=Doc,Options)->
{[{<<"_id">>, Id}]
- ++ to_json_rev(Revs)
+ ++ to_json_rev(Start, RevIds)
++ to_json_body(Del, Body)
- ++ to_json_revs(Options, Revs)
- ++ to_json_revs_info(Meta)
+ ++ to_json_revisions(Options, Start, RevIds)
+ ++ to_json_meta(Meta)
++ to_json_attachments(Doc#doc.attachments, Options)
}.
from_json_obj({Props}) ->
- {JsonBins} = proplists:get_value(<<"_attachments">>, Props, {[]}),
+ transfer_fields(Props, #doc{body=[]});
+
+from_json_obj(_Other) ->
+ throw({bad_request, "Document must be a JSON object"}).
+
+parse_rev(Rev) when is_binary(Rev) ->
+ parse_rev(?b2l(Rev));
+parse_rev(Rev) ->
+ {Pos, [$- | RevId]} = lists:splitwith(fun($-) -> false; (_) -> true end, Rev),
+ {list_to_integer(Pos), ?l2b(RevId)}.
+
+parse_revs([]) ->
+ [];
+parse_revs([Rev | Rest]) ->
+ [parse_rev(Rev) | parse_revs(Rest)].
+
+
+transfer_fields([], #doc{body=Fields}=Doc) ->
+ % convert fields back to json object
+ Doc#doc{body={lists:reverse(Fields)}};
+
+transfer_fields([{<<"_id">>, Id} | Rest], Doc) when is_binary(Id) ->
+ case Id of
+ <<"_design/", _/binary>> -> ok;
+ <<"_local/", _/binary>> -> ok;
+ <<"_", _/binary>> ->
+ throw({bad_request, <<"Only reserved document ids may start with underscore.">>});
+ _Else -> ok
+ end,
+ transfer_fields(Rest, Doc#doc{id=Id});
+
+transfer_fields([{<<"_id">>, Id} | _Rest], _Doc) ->
+ ?LOG_DEBUG("Document id is not a string: ~p", [Id]),
+ throw({bad_request, <<"Document id must be a string">>});
+
+transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc) ->
+ {Pos, RevId} = parse_rev(Rev),
+ transfer_fields(Rest,
+ Doc#doc{revs={Pos, [RevId]}});
+
+transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc) ->
+ % we already got the rev from the _revisions
+ transfer_fields(Rest,Doc);
+
+transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) ->
Bins = lists:flatmap(fun({Name, {BinProps}}) ->
case proplists:get_value(<<"stub">>, BinProps) of
true ->
@@ -122,51 +179,40 @@
[{Name, {Type, couch_util:decodeBase64(Value)}}]
end
end, JsonBins),
- AllowedSpecialMembers = [<<"id">>, <<"revs">>, <<"rev">>, <<"attachments">>, <<"revs_info">>,
- <<"conflicts">>, <<"deleted_conflicts">>, <<"deleted">>],
- % collect all the doc-members that start with "_"
- % if any aren't in the AllowedSpecialMembers list
- % then throw a invalid_doc error
- [case lists:member(Name, AllowedSpecialMembers) of
- true ->
- ok;
- false ->
- throw({invalid_doc, io_lib:format("Bad special document member: _~s", [Name])})
- end
- || {<<$_,Name/binary>>, _Value} <- Props],
- Revs =
- case proplists:get_value(<<"_revs">>, Props, []) of
- [] ->
- case proplists:get_value(<<"_rev">>, Props) of
- undefined -> [];
- Rev -> [Rev]
- end;
- Revs0 ->
- Revs0
- end,
- case proplists:get_value(<<"_id">>, Props, <<>>) of
- <<"_design/", _/binary>> = Id -> ok;
- <<"_local/", _/binary>> = Id -> ok;
- <<"_", _/binary>> = Id ->
- throw({invalid_doc, "Document Ids must not start with underscore."});
- Id when is_binary(Id) -> ok;
- Id ->
- ?LOG_DEBUG("Document id is not a string: ~p", [Id]),
- throw({invalid_doc, "Document id is not a string"})
+ transfer_fields(Rest, Doc#doc{attachments=Bins});
+
+transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) ->
+ RevIds = proplists:get_value(<<"ids">>, Props),
+ Start = proplists:get_value(<<"start">>, Props),
+ if not is_integer(Start) ->
+ throw({doc_validation, "_revisions.start isn't an integer."});
+ not is_list(RevIds) ->
+ throw({doc_validation, "_revisions.ids isn't a array."});
+ true ->
+ ok
end,
+ [throw({doc_validation, "RevId isn't a string"}) ||
+ RevId <- RevIds, not is_binary(RevId)],
+ transfer_fields(Rest, Doc#doc{revs={Start, RevIds}});
- % strip out the all props beginning with _
- NewBody = {[{K, V} || {<<First,_/binary>>=K, V} <- Props, First /= $_]},
- #doc{
- id = Id,
- revs = Revs,
- deleted = proplists:get_value(<<"_deleted">>, Props, false),
- body = NewBody,
- attachments = Bins
- };
+transfer_fields([{<<"_deleted">>, B} | Rest], Doc) when (B==true) or (B==false) ->
+ transfer_fields(Rest, Doc#doc{deleted=B});
-from_json_obj(_Other) ->
- throw({invalid_doc, "Document must be a JSON object"}).
+% ignored fields
+transfer_fields([{<<"_revs_info">>, _} | Rest], Doc) ->
+ transfer_fields(Rest, Doc);
+transfer_fields([{<<"_conflicts">>, _} | Rest], Doc) ->
+ transfer_fields(Rest, Doc);
+transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc) ->
+ transfer_fields(Rest, Doc);
+
+% unknown special field
+transfer_fields([{<<"_",Name/binary>>, Start} | _], _) when is_integer(Start) ->
+ throw({doc_validation,
+ ?l2b(io_lib:format("Bad special document member: _~s", [Name]))});
+
+transfer_fields([Field | Rest], #doc{body=Fields}=Doc) ->
+ transfer_fields(Rest, Doc#doc{body=[Field|Fields]}).
to_doc_info(FullDocInfo) ->
{DocInfo, _Path} = to_doc_info_path(FullDocInfo),
@@ -175,27 +221,26 @@
to_doc_info_path(#full_doc_info{id=Id,update_seq=Seq,rev_tree=Tree}) ->
LeafRevs = couch_key_tree:get_all_leafs(Tree),
SortedLeafRevs =
- lists:sort(fun({RevIdA, {IsDeletedA, _}, PathA}, {RevIdB, {IsDeletedB, _}, PathB}) ->
+ lists:sort(fun({{IsDeletedA, _}, {StartA, [RevIdA|_]}}, {{IsDeletedB, _}, {StartB, [RevIdB|_]}}) ->
% sort descending by {not deleted, then Depth, then RevisionId}
- A = {not IsDeletedA, length(PathA), RevIdA},
- B = {not IsDeletedB, length(PathB), RevIdB},
+ A = {not IsDeletedA, StartA, RevIdA},
+ B = {not IsDeletedB, StartB, RevIdB},
A > B
end,
LeafRevs),
- [{RevId, {IsDeleted, SummaryPointer}, Path} | Rest] = SortedLeafRevs,
-
+ [{{IsDeleted, SummaryPointer}, {Start, [RevId|_]}=Path} | Rest] = SortedLeafRevs,
{ConflictRevTuples, DeletedConflictRevTuples} =
- lists:splitwith(fun({_ConflictRevId, {IsDeleted1, _Sp}, _}) ->
+ lists:splitwith(fun({{IsDeleted1, _Sp}, _}) ->
not IsDeleted1
end, Rest),
- ConflictRevs = [RevId1 || {RevId1, _, _} <- ConflictRevTuples],
- DeletedConflictRevs = [RevId2 || {RevId2, _, _} <- DeletedConflictRevTuples],
+ ConflictRevs = [{Start1, RevId1} || {_, {Start1, [RevId1|_]}} <- ConflictRevTuples],
+ DeletedConflictRevs = [{Start1, RevId1} || {_, {Start1, [RevId1|_]}} <- DeletedConflictRevTuples],
DocInfo = #doc_info{
id=Id,
update_seq=Seq,
- rev = RevId,
+ rev = {Start, RevId},
summary_pointer = SummaryPointer,
conflict_revs = ConflictRevs,
deleted_conflict_revs = DeletedConflictRevs,