You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by da...@apache.org on 2009/09/17 06:04:47 UTC

svn commit: r816043 - in /couchdb/trunk: etc/couchdb/ share/www/script/ share/www/script/test/ src/couchdb/ test/etap/

Author: davisp
Date: Thu Sep 17 04:04:46 2009
New Revision: 816043

URL: http://svn.apache.org/viewvc?rev=816043&view=rev
Log:
Fixes COUCHDB-396

Makes the stats calculated over a moving window isntead of calculated for non-overlapping timeframes. This should make trend monitoring more robust.

Thanks once again to Bob Dionne for double checking this.


Added:
    couchdb/trunk/test/etap/120-stats-collect.t
    couchdb/trunk/test/etap/121-stats-aggregates.ini
    couchdb/trunk/test/etap/121-stats-aggregates.t
Removed:
    couchdb/trunk/src/couchdb/couch_stats.hrl
Modified:
    couchdb/trunk/etc/couchdb/default.ini.tpl.in
    couchdb/trunk/share/www/script/couch.js
    couchdb/trunk/share/www/script/test/stats.js
    couchdb/trunk/src/couchdb/Makefile.am
    couchdb/trunk/src/couchdb/couch_db.hrl
    couchdb/trunk/src/couchdb/couch_httpd_stats_handlers.erl
    couchdb/trunk/src/couchdb/couch_stats_aggregator.erl
    couchdb/trunk/src/couchdb/couch_stats_collector.erl

Modified: couchdb/trunk/etc/couchdb/default.ini.tpl.in
URL: http://svn.apache.org/viewvc/couchdb/trunk/etc/couchdb/default.ini.tpl.in?rev=816043&r1=816042&r2=816043&view=diff
==============================================================================
--- couchdb/trunk/etc/couchdb/default.ini.tpl.in (original)
+++ couchdb/trunk/etc/couchdb/default.ini.tpl.in Thu Sep 17 04:04:46 2009
@@ -105,3 +105,47 @@
 ;   utc_random - Time since Jan 1, 1970 UTC with microseconds
 ;     First 14 characters are the time in hex. Last 18 are random.
 algorithm = random
+
+[stats]
+; rate is in milliseconds
+rate = 1000
+; sample intervals are in seconds
+samples = [0, 60, 300, 900]
+
+; Style guide for descriptions: Start with a lowercase letter & do not add 
+; a trailing full-stop / period.
+; Please keep this in alphabetical order.
+[stats_descriptions]
+{couchdb, database_writes} = number of times a database was changed
+{couchdb, database_reads} = number of times a document was read from a database
+{couchdb, open_databases} = number of open databases
+{couchdb, open_os_files} = number of file descriptors CouchDB has open
+{couchdb, request_time} = length of a request inside CouchDB without MochiWeb
+
+{httpd, bulk_requests} = number of bulk requests
+{httpd, requests} = number of HTTP requests
+{httpd, temporary_view_reads} = number of temporary view reads
+{httpd, view_reads} = number of view reads
+{httpd, clients_requesting_changes} = number of clients for continuous _changes
+
+{httpd_request_methods, 'COPY'} = number of HTTP COPY requests
+{httpd_request_methods, 'DELETE'} = number of HTTP DELETE requests
+{httpd_request_methods, 'GET'} = number of HTTP GET requests
+{httpd_request_methods, 'HEAD'} = number of HTTP HEAD requests
+{httpd_request_methods, 'MOVE'} = number of HTTP MOVE requests
+{httpd_request_methods, 'POST'} = number of HTTP POST requests
+{httpd_request_methods, 'PUT'} = number of HTTP PUT requests
+
+{httpd_status_codes, '200'} = number of HTTP 200 OK responses
+{httpd_status_codes, '201'} = number of HTTP 201 Created responses
+{httpd_status_codes, '202'} = number of HTTP 202 Accepted responses
+{httpd_status_codes, '301'} = number of HTTP 301 Moved Permanently responses
+{httpd_status_codes, '304'} = number of HTTP 304 Not Modified responses
+{httpd_status_codes, '400'} = number of HTTP 400 Bad Request responses
+{httpd_status_codes, '401'} = number of HTTP 401 Unauthorized responses
+{httpd_status_codes, '403'} = number of HTTP 403 Forbidden responses
+{httpd_status_codes, '404'} = number of HTTP 404 Not Found responses
+{httpd_status_codes, '405'} = number of HTTP 405 Method Not Allowed responses
+{httpd_status_codes, '409'} = number of HTTP 409 Conflict responses
+{httpd_status_codes, '412'} = number of HTTP 412 Precondition Failed responses
+{httpd_status_codes, '500'} = number of HTTP 500 Internal Server Error responses

Modified: couchdb/trunk/share/www/script/couch.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/couch.js?rev=816043&r1=816042&r2=816043&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/couch.js [utf-8] (original)
+++ couchdb/trunk/share/www/script/couch.js [utf-8] Thu Sep 17 04:04:46 2009
@@ -426,7 +426,8 @@
     query_arg = "?flush=true";
   }
 
-  var stat = CouchDB.request("GET", "/_stats/" + module + "/" + key + query_arg).responseText;
+  var url = "/_stats/" + module + "/" + key + query_arg;
+  var stat = CouchDB.request("GET", url).responseText;
   return JSON.parse(stat)[module][key];
 }
 

Modified: couchdb/trunk/share/www/script/test/stats.js
URL: http://svn.apache.org/viewvc/couchdb/trunk/share/www/script/test/stats.js?rev=816043&r1=816042&r2=816043&view=diff
==============================================================================
--- couchdb/trunk/share/www/script/test/stats.js (original)
+++ couchdb/trunk/share/www/script/test/stats.js Thu Sep 17 04:04:46 2009
@@ -11,414 +11,303 @@
 // the License.
 
 couchTests.stats = function(debug) {
-  if (debug) debugger;
-
-  var open_databases_tests = {
-    'should increment the number of open databases when creating a db': function(name) {
-       var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-       db.deleteDb();
-       var open_databases = requestStatsTest("couchdb", "open_databases").current;
-       db.createDb();
-
-       var new_open_databases = requestStatsTest("couchdb", "open_databases").current;
-       TEquals(open_databases + 1, new_open_databases, name);
-     },
-    'should increment the number of open databases when opening a db': function(name) {
-       var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-       db.deleteDb();
-       db.createDb();
-
-       restartServer();
-
-       var open_databases = requestStatsTest("couchdb", "open_databases").current;
 
-       db.open("123");
+  function newDb(name, doSetup) {
+    var db = new CouchDB(name, {"X-Couch-Full-Commit": "false"});
+    if(doSetup) {
+      db.deleteDb();
+      db.createDb();
+    }
+    return db;
+  };
+
+  function getStat(mod, key) {
+    return CouchDB.requestStats(mod, key, true);
+  };
+
+  function doView(db) {
+    var designDoc = {
+      _id:"_design/test", // turn off couch.js id escaping?
+      language: "javascript",
+      views: {
+        all_docs: {map: "function(doc) {emit(doc.integer, null);}"},
+      }
+    };
+    db.save(designDoc);
+    db.view("test/all_docs");
+  };
+
+  function runTest(mod, key, funcs) {
+    var db = newDb("test_suite_db", true);
+    if(funcs.setup) funcs.setup(db);
+    var before = getStat(mod, key).current;
+    if(funcs.run) funcs.run(db);
+    var after = getStat(mod, key).current;
+    if(funcs.test) funcs.test(before, after);
+  }
 
-       var new_open_databases = requestStatsTest("couchdb", "open_databases").current;
-       TEquals(open_databases + 1, new_open_databases, name);
-     },
-       'should decrement the number of open databases when deleting': function(name) {
-       var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-       db.deleteDb();
-       db.createDb();
-       var open_databases = requestStatsTest("couchdb", "open_databases").current;
-
-       db.deleteDb();
-       var new_open_databases = requestStatsTest("couchdb", "open_databases").current;
-       TEquals(open_databases - 1, new_open_databases, name);
-     },
-    'should keep the same number of open databases when reaching the max_dbs_open limit': function(name) {
-      restartServer();
-      var max = 5;
-      run_on_modified_server(
-        [{section: "couchdb",
-          key: "max_dbs_open",
-          value: max.toString()}],
-
-        function () {
-          var dbs_open = requestStatsTest("couchdb", "open_databases").current;
-          var files_open = requestStatsTest("couchdb", "open_os_files").current;
-          for(var i=0; i<max+1; i++) {
-            var db = new CouchDB("test_suite_db" + i);
-            db.deleteDb();
-            db.createDb();
-          }
-
-          var open_databases = requestStatsTest("couchdb", "open_databases").current;
-          T(open_databases > 0 && max >= open_databases, name);
+  if (debug) debugger;
 
-          for(var i=0; i<max+1; i++) {
-            var db = new CouchDB("test_suite_db" + i);
-            db.deleteDb();
-          }
-          T(dbs_open == requestStatsTest("couchdb", "open_databases").current);
-          T(files_open == requestStatsTest("couchdb", "open_os_files").current);
-        })
+  (function() {
+    var db = newDb("test_suite_db");
+    db.deleteDb();
+  
+    var before = getStat("couchdb", "open_databases").current;
+    db.createDb();
+    var after = getStat("couchdb", "open_databases").current;
+    TEquals(before+1, after, "Creating a db increments open db count.");
+  })();
+  
+  runTest("couchdb", "open_databases", {
+    setup: function() {restartServer();},
+    run: function(db) {db.open("123");},
+    test: function(before, after) {
+      TEquals(before+1, after, "Opening a db increments open db count.");
+    }
+  });
+  
+  runTest("couchdb", "open_databases", {
+    run: function(db) {db.deleteDb();},
+    test: function(before, after) {
+      TEquals(before-1, after, "Deleting a db decrements open db count.");
+    }
+  });
+  
+  (function() {
+    restartServer();
+    var max = 5;
+    
+    var testFun = function() {
+      var pre_dbs = getStat("couchdb", "open_databases").current || 0;
+      var pre_files = getStat("couchdb", "open_os_files").current || 0;
+      
+      for(var i = 0; i < max*2; i++) {
+        newDb("test_suite_db_" + i, true);
+      }
+      
+      var open_dbs = getStat("couchdb", "open_databases").current;
+      TEquals(open_dbs > 0, true, "We actually opened some dbs.");
+      TEquals(open_dbs, max, "We only have max db's open.");
+      
+      for(var i = 0; i < max * 2; i++) {
+        newDb("test_suite_db_" + i).deleteDb();
+      }
+      
+      var post_dbs = getStat("couchdb", "open_databases").current;
+      var post_files = getStat("couchdb", "open_os_files").current;
+      TEquals(pre_dbs, post_dbs, "We have the same number of open dbs.");
+      TEquals(pre_files, post_files, "We have the same number of open files.");
+    };
+    
+    run_on_modified_server(
+      [{section: "couchdb", key: "max_dbs_open", value: "5"}],
+      testFun
+    );
+  })();
+  
+  // Just fetching the before value is the extra +1 in test
+  runTest("httpd", "requests", {
+    run: function() {CouchDB.request("GET", "/");},
+    test: function(before, after) {
+      TEquals(before+2, after, "Request counts are incremented properly.");
+    }
+  });
+  
+  runTest("couchdb", "database_reads", {
+    setup: function(db) {db.save({"_id": "test"});},
+    run: function(db) {db.open("test");},
+    test: function(before, after) {
+      TEquals(before+1, after, "Reading a doc increments docs reads.");
+    }
+  });
+  
+  runTest("couchdb", "database_reads", {
+    setup: function(db) {db.save({"_id": "test"});},
+    run: function(db) {db.request("GET", "/");},
+    test: function(before, after) {
+      TEquals(before, after, "Only doc reads increment doc reads.");
+    }
+  });
+  
+  runTest("couchdb", "database_reads", {
+    setup: function(db) {db.save({"_id": "test"});},
+    run: function(db) {db.open("test", {"open_revs": "all"});},
+    test: function(before, after) {
+      TEquals(before+1, after, "Reading doc revs increments docs reads.");
+    }
+  });
+  
+  runTest("couchdb", "database_writes", {
+    run: function(db) {db.save({"a": "1"});},
+    test: function(before, after) {
+      TEquals(before+1, after, "Saving docs incrememnts doc writes.");
+    }
+  });
+  
+  runTest("couchdb", "database_writes", {
+    run: function(db) {
+      CouchDB.request("POST", "/test_suite_db", {body: '{"a": "1"}'})
     },
- };
-
-  var request_count_tests = {
-   'should increase the request count for every request': function(name) {
-     var requests = requestStatsTest("httpd", "requests").current + 1;
-
-     CouchDB.request("GET", "/");
-
-     var new_requests = requestStatsTest("httpd", "requests").current;
-
-     TEquals(requests + 1, new_requests, name);
-   }
- };
-
- var database_read_count_tests = {
-   'should increase database reads counter when a document is read': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-     db.save({"_id":"test"});
-
-     var reads = requestStatsTest("couchdb", "database_reads").current;
-     db.open("test");
-     var new_reads = requestStatsTest("couchdb", "database_reads").current;
-
-     TEquals(reads + 1 , new_reads, name);
-   },
-   'should not increase database read counter when a non-document is read': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-     db.save({"_id":"test"});
-
-     var reads = requestStatsTest("couchdb", "database_reads").current;
-     CouchDB.request("GET", "/");
-     var new_reads = requestStatsTest("couchdb", "database_reads").current;
-
-     TEquals(reads, new_reads, name);
-   },
-   'should increase database read counter when a document\'s revisions are read': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-     db.save({"_id":"test"});
-
-     var reads = requestStatsTest("couchdb", "database_reads").current;
-     db.open("test", {"open_revs":"all"});
-     var new_reads = requestStatsTest("couchdb", "database_reads").current;
-
-     TEquals(reads + 1 , new_reads, name);
-   }
- };
-
- var view_read_count_tests = {
-   'should increase the permanent view read counter': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-
-     var reads = requestStatsTest("httpd", "view_reads").current;
-     createAndRequestView(db);
-     var new_reads = requestStatsTest("httpd", "view_reads").current;
-
-     TEquals(reads + 1 , new_reads, name);
-   },
-   'should not increase the permanent view read counter when a document is read': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-     db.save({"_id":"test"});
-
-     var reads = requestStatsTest("httpd", "view_reads").current;
-     db.open("test");
-     var new_reads = requestStatsTest("httpd", "view_reads").current;
-
-     TEquals(reads, new_reads, name);
-   },
-   'should not increase the permanent view read counter when a temporary view is read': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-
-     var reads = requestStatsTest("httpd", "view_reads").current;
-     db.query(function(doc) { emit(doc._id)});
-     var new_reads = requestStatsTest("httpd", "view_reads").current;
-
-     TEquals(reads, new_reads, name);
-   },
-   'should increase the temporary view read counter': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-
-     var reads = requestStatsTest("httpd", "temporary_view_reads").current;
-     db.query(function(doc) { emit(doc._id)});
-     var new_reads = requestStatsTest("httpd", "temporary_view_reads").current;
-
-     TEquals(reads + 1, new_reads, name);
-   },
-   'should increase the temporary view read counter when querying a permanent view': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-
-     var reads = requestStatsTest("httpd", "view_reads").current;
-     createAndRequestView(db);
-     var new_reads = requestStatsTest("httpd", "view_reads").current;
-
-     TEquals(reads + 1 , new_reads, name);
-   }
- };
-
- var http_requests_by_method_tests = {
-   'should count GET requests': function(name) {
-     var requests = requestStatsTest("httpd_request_methods", "GET").current;
-     var new_requests = requestStatsTest("httpd_request_methods", "GET").current;
-
-     TEquals(requests + 1, new_requests, name);
-   },
-   'should not count GET requests for POST request': function(name) {
-     var requests = requestStatsTest("httpd_request_methods", "GET").current;
-     CouchDB.request("POST", "/");
-     var new_requests = requestStatsTest("httpd_request_methods", "GET").current;
-
-     TEquals(requests + 1, new_requests, name);
-   },
-   'should count POST requests': function(name) {
-     var requests = requestStatsTest("httpd_request_methods", "POST").current;
-     CouchDB.request("POST", "/");
-     var new_requests = requestStatsTest("httpd_request_methods", "POST").current;
-
-     TEquals(requests + 1, new_requests, name);
-   }
- };
-
- var document_write_count_tests = {
-   'should increment database changes counter for document creates': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-
-     var creates = requestStatsTest("couchdb", "database_writes").current;
-     db.save({"a":"1"});
-     var new_creates = requestStatsTest("couchdb", "database_writes").current;
-
-     TEquals(creates + 1, new_creates, name);
-   },
-   'should increment database changes counter for document updates': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-
-     var doc = {"_id":"test"};
-     db.save(doc);
-
-     var updates = requestStatsTest("couchdb", "database_writes").current;
-     db.save(doc);
-     var new_updates = requestStatsTest("couchdb", "database_writes").current;
-
-     TEquals(updates + 1, new_updates, name);
-   },
-   'should increment database changes counter for document deletes': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-
-     var doc = {"_id":"test"};
-     db.save(doc);
-
-     var deletes = requestStatsTest("couchdb", "database_writes").current;
-     db.deleteDoc(doc);
-     var new_deletes = requestStatsTest("couchdb", "database_writes").current;
-
-     TEquals(deletes + 1, new_deletes, name);
-   },
-   'should increment database changes counter for document copies': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-
-     var doc = {"_id":"test"};
-     db.save(doc);
-
-     var copies = requestStatsTest("couchdb", "database_writes").current;
-     CouchDB.request("COPY", "/test_suite_db/test", {
-       headers: {"Destination":"copy_of_test"}
-     });
-     var new_copies = requestStatsTest("couchdb", "database_writes").current;
-
-     TEquals(copies + 1, new_copies, name);
-   },
-   'should increase the bulk doc counter': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-
-     var bulks = requestStatsTest("httpd", "bulk_requests").current;
-
-     var docs = makeDocs(5);
-     db.bulkSave(docs);
-
-     var new_bulks = requestStatsTest("httpd", "bulk_requests").current;
-
-     TEquals(bulks + 1, new_bulks, name);
-   },
-   'should increment database changes counter for document creates using POST': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-
-     var creates = requestStatsTest("couchdb", "database_writes").current;
-     CouchDB.request("POST", "/test_suite_db", {body:'{"a":"1"}'});
-     var new_creates = requestStatsTest("couchdb", "database_writes").current;
-
-     TEquals(creates + 1, new_creates, name);
-   },
-   'should increment database changes counter when adding attachment': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-
-     var creates = requestStatsTest("couchdb", "database_writes").current;
-     CouchDB.request("PUT", "/test_suite_db/bin_doc2/foo2.txt", {
-           body:"This is no base64 encoded text",
-           headers:{"Content-Type": "text/plain;charset=utf-8"}
-     });
-     var new_creates = requestStatsTest("couchdb", "database_writes").current;
-     TEquals(creates + 1, new_creates, name);
-   },
-   'should increment database changes counter when adding attachment to existing doc': function(name) {
-     var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-     db.createDb();
-
-     var doc = {_id:"test"};
-     db.save(doc);
-
-     var updates = requestStatsTest("couchdb", "database_writes").current;
-     CouchDB.request("PUT", "/test_suite_db/test/foo2.txt?rev=" + doc._rev, {
-           body:"This is no base64 encoded text",
-           headers:{"Content-Type": "text/plain;charset=utf-8"}
-     });
-     var new_updates = requestStatsTest("couchdb", "database_writes").current;
-     TEquals(updates + 1, new_updates, name);
-   }
-
- };
- var response_codes_tests = {
-   'should increment the response code counter': function(name) {
-     var db = new CouchDB("nonexistant_db", {"X-Couch-Full-Commit":"false"});
-     db.deleteDb();
-
-     var not_founds = requestStatsTest("httpd_status_codes", "404").current;
-     CouchDB.request("GET", "/nonexistant_db");
-     var new_not_founds = requestStatsTest("httpd_status_codes", "404").current;
-
-     TEquals(not_founds + 1, new_not_founds, name);
-   },
-   'should not increment respinse code counter for other response code': function(name) {
-     var not_founds = requestStatsTest("http_status_codes", "404").current;
-     CouchDB.request("GET", "/");
-     var new_not_founds = requestStatsTest("http_status_codes", "404").current;
-
-     TEquals(not_founds, new_not_founds, name);
-   }
- };
-
- var aggregation_tests = {
-   'should return the mean': function(name) {
-     CouchDB.request("GET", "/");
-
-     var mean = requestStatsTest("httpd", "requests").mean;
-
-     T(mean >= 0, name);
-   },
-   'should return the maximum': function(name) {
-     CouchDB.request("GET", "/");
-
-     var maximum = requestStatsTest("httpd", "requests").max;
-
-     T(maximum >= 0, name);
-   },
-   'should return the minimum': function(name) {
-     CouchDB.request("GET", "/");
-
-     var minimum = requestStatsTest("httpd", "requests", "min").min;
-
-     T(minimum >= 0, name);
-   },
-   'should return the stddev': function(name) {
-     CouchDB.request("GET", "/");
-
-     var stddev = requestStatsTest("httpd", "stddev_requests").current;
-
-     T(stddev >= 0, name);
-   }
- };
-
- var summary_tests = {
-   'should show a summary of all counters with aggregated values': function(name) {
-     var options = {};
-     options.headers = {"Accept": "application/json"};
-     var summary = JSON.parse(CouchDB.request("GET", "/_stats", options).responseText);
-     var aggregates = ["mean", "min", "max", "stddev",
-       "current"];
-
-     for(var i in aggregates) {
-       T(summary.httpd.requests[aggregates[i]] >= 0, aggregates[i] + " >= 0", name);
-     }
-   }
- };
-
-   var tests = [
-     open_databases_tests,
-     request_count_tests,
-     database_read_count_tests,
-     view_read_count_tests,
-     http_requests_by_method_tests,
-     document_write_count_tests,
-     response_codes_tests,
-     aggregation_tests,
-     summary_tests
-   ];
-
-   for(var testGroup in tests) {
-     for(var test in tests[testGroup]) {
-       tests[testGroup][test](test);
-     }
-   };
-
-   function createAndRequestView(db) {
-     var designDoc = {
-       _id:"_design/test", // turn off couch.js id escaping?
-       language: "javascript",
-       views: {
-         all_docs_twice: {map: "function(doc) { emit(doc.integer, null); emit(doc.integer, null) }"},
-       }
-     };
-     db.save(designDoc);
-
-     db.view("test/all_docs_twice");
-   }
-
-   function requestStatsTest(module, key) {
-     return CouchDB.requestStats(module, key, true);
-   }
-}
+    test: function(before, after) {
+      TEquals(before+1, after, "POST'ing new docs increments doc writes.");
+    }
+  })
+  
+  runTest("couchdb", "database_writes", {
+    setup: function(db) {db.save({"_id": "test"});},
+    run: function(db) {var doc = db.open("test"); db.save(doc);},
+    test: function(before, after) {
+      TEquals(before+1, after, "Updating docs incrememnts doc writes.");
+    }
+  });
+  
+  runTest("couchdb", "database_writes", {
+    setup: function(db) {db.save({"_id": "test"});},
+    run: function(db) {var doc = db.open("test"); db.deleteDoc(doc);},
+    test: function(before, after) {
+      TEquals(before+1, after, "Deleting docs increments doc writes.");
+    }
+  });
+  
+  runTest("couchdb", "database_writes", {
+    setup: function(db) {db.save({"_id": "test"});},
+    run: function(db) {
+      CouchDB.request("COPY", "/test_suite_db/test", {
+        headers: {"Destination": "copy_of_test"}
+      });
+    },
+    test: function(before, after) {
+      TEquals(before+1, after, "Copying docs increments doc writes.");
+    }
+  });
+  
+  runTest("couchdb", "database_writes", {
+    run: function() {
+      CouchDB.request("PUT", "/test_suite_db/bin_doc2/foo2.txt", {
+        body: "This is no base64 encoded test",
+        headers: {"Content-Type": "text/plain;charset=utf-8"}
+      });
+    },
+    test: function(before, after) {
+      TEquals(before+1, after, "Create with attachment increments doc writes.");
+    }
+  });
+  
+  runTest("couchdb", "database_writes", {
+    setup: function(db) {db.save({"_id": "test"});},
+    run: function(db) {
+      var doc = db.open("test");
+      CouchDB.request("PUT", "/test_suite_db/test/foo2.txt?rev=" + doc._rev, {
+        body: "This is no base64 encoded text",
+        headers: {"Content-Type": "text/plainn;charset=utf-8"}
+      });
+    },
+    test: function(before, after) {
+      TEquals(before+1, after, "Adding attachment increments doc writes.");
+    }
+  });
+  
+  runTest("httpd", "bulk_requests", {
+    run: function(db) {db.bulkSave(makeDocs(5));},
+    test: function(before, after) {
+      TEquals(before+1, after, "The bulk_requests counter is incremented.");
+    }
+  });
+  
+  runTest("httpd", "view_reads", {
+    run: function(db) {doView(db);},
+    test: function(before, after) {
+      TEquals(before+1, after, "Reading a view increments view reads.");
+    }
+  });
+  
+  runTest("httpd", "view_reads", {
+    setup: function(db) {db.save({"_id": "test"});},
+    run: function(db) {db.open("test");},
+    test: function(before, after) {
+      TEquals(before, after, "Reading a doc doesn't increment view reads.");
+    }
+  });
+  
+  runTest("httpd", "temporary_view_reads", {
+    run: function(db) {db.query(function(doc) {emit(doc._id)})},
+    test: function(before, after) {
+      TEquals(before+1, after, "Temporary views have their own counter.");
+    }
+  });
+  
+  runTest("httpd", "temporary_view_reads", {
+    run: function(db) {doView(db);},
+    test: function(before, after) {
+      TEquals(before, after, "Permanent views don't affect temporary views.");
+    }
+  });
+  
+  runTest("httpd", "view_reads", {
+    run: function(db) {db.query(function(doc) {emit(doc._id)});},
+    test: function(before, after) {
+      TEquals(before, after, "Temporary views don't affect permanent views.");
+    }
+  });
+  
+  // Relies on getting the stats values being GET requests.
+  runTest("httpd_request_methods", "GET", {
+    test: function(before, after) {
+      TEquals(before+1, after, "Get requests are incremented properly.");
+    }
+  });
+  
+  runTest("httpd_request_methods", "GET", {
+    run: function() {CouchDB.request("POST", "/");},
+    test: function(before, after) {
+      TEquals(before+1, after, "POST requests don't affect GET counter.");
+    }
+  });
+  
+  runTest("httpd_request_methods", "POST", {
+    run: function() {CouchDB.request("POST", "/");},
+    test: function(before, after) {
+      TEquals(before+1, after, "POST requests are incremented properly.");
+    }
+  });
+  
+  runTest("httpd_status_codes", "404", {
+    run: function() {CouchDB.request("GET", "/nonexistant_db");},
+    test: function(before, after) {
+      TEquals(before+1, after, "Increments 404 counter on db not found.");
+    }
+  });
+  
+  runTest("httpd_status_codes", "404", {
+    run: function() {CouchDB.request("GET", "/");},
+    test: function(before, after) {
+      TEquals(before, after, "Getting DB info doesn't increment 404's");
+    }
+  });
+
+  (function() {
+    var aggregates = [
+      "current",
+      "description",
+      "mean",
+      "min",
+      "max",
+      "stddev",
+      "sum"
+    ];
+    var summary = JSON.parse(CouchDB.request("GET", "/_stats", {
+      headers: {"Accept": "application/json"}
+    }).responseText);
+    for(var i in summary) {
+      for(var j in summary[i]) {
+        for(var k in summary[i][j]) {
+          T(aggregates.indexOf(k) >= 0, "Unknown property name: " + j);
+        }
+        for(var k in aggregates) {
+          var mesg = "Missing required property: " + aggregates[k];
+          T(summary[i][j][aggregates[k]] !== undefined, mesg);
+        }
+      }
+    }
+  })();
+};

Modified: couchdb/trunk/src/couchdb/Makefile.am
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/Makefile.am?rev=816043&r1=816042&r2=816043&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/Makefile.am (original)
+++ couchdb/trunk/src/couchdb/Makefile.am Thu Sep 17 04:04:46 2009
@@ -109,7 +109,7 @@
     couch_db_updater.erl \
     couch_work_queue.erl
 
-EXTRA_DIST = $(source_files) couch_db.hrl couch_stats.hrl
+EXTRA_DIST = $(source_files) couch_db.hrl
 
 compiled_files = \
     couch.app \

Modified: couchdb/trunk/src/couchdb/couch_db.hrl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_db.hrl?rev=816043&r1=816042&r2=816043&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_db.hrl (original)
+++ couchdb/trunk/src/couchdb/couch_db.hrl Thu Sep 17 04:04:46 2009
@@ -20,6 +20,7 @@
 -define(JSON_ENCODE(V), mochijson2:encode(V)).
 -define(JSON_DECODE(V), mochijson2:decode(V)).
 
+-define(b2a(V), list_to_atom(binary_to_list(V))).
 -define(b2l(V), binary_to_list(V)).
 -define(l2b(V), list_to_binary(V)).
 

Modified: couchdb/trunk/src/couchdb/couch_httpd_stats_handlers.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_httpd_stats_handlers.erl?rev=816043&r1=816042&r2=816043&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_httpd_stats_handlers.erl (original)
+++ couchdb/trunk/src/couchdb/couch_httpd_stats_handlers.erl Thu Sep 17 04:04:46 2009
@@ -12,51 +12,44 @@
 
 -module(couch_httpd_stats_handlers).
 -include("couch_db.hrl").
--include("couch_stats.hrl").
 
 -export([handle_stats_req/1]).
--import(couch_httpd,
-    [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
-    start_json_response/2,send_chunk/2,end_json_response/1,
-    start_chunked_response/3, send_error/4]).
-
--define(b2a(V), list_to_atom(binary_to_list(V))).
-
--record(stats_query_args, {
-    range='0',
-    flush=false
-}).
+-import(couch_httpd, [
+    send_json/2, send_json/3, send_json/4, send_method_not_allowed/2,
+    start_json_response/2, send_chunk/2, end_json_response/1,
+    start_chunked_response/3, send_error/4
+]).
 
 handle_stats_req(#httpd{method='GET', path_parts=[_]}=Req) ->
-    send_json(Req, couch_stats_aggregator:all());
+    flush(Req),
+    send_json(Req, couch_stats_aggregator:all(range(Req)));
 
-handle_stats_req(#httpd{method='GET', path_parts=[_Stats, Module, Key]}=Req) ->
-    #stats_query_args{
-        range=Range,
-        flush=Flush
-    } = parse_stats_query(Req),
-
-    case Flush of
-        true ->
-            couch_stats_aggregator:time_passed();
-        _ -> ok
-    end,
-
-    Stats = couch_stats_aggregator:get_json({?b2a(Module), ?b2a(Key)}, Range),
-    Response = {[{Module, {[{Key, Stats}]}}]},
-    send_json(Req, Response);
+handle_stats_req(#httpd{method='GET', path_parts=[_, _Mod]}) ->
+    throw({bad_request, <<"Stat names must have exactly to parts.">>});
+
+handle_stats_req(#httpd{method='GET', path_parts=[_, Mod, Key]}=Req) ->
+    flush(Req),
+    Stats = couch_stats_aggregator:get_json({?b2a(Mod), ?b2a(Key)}, range(Req)),
+    send_json(Req, {[{Mod, {[{Key, Stats}]}}]});
+
+handle_stats_req(#httpd{method='GET', path_parts=[_, _Mod, _Key | _Extra]}) ->
+    throw({bad_request, <<"Stat names must have exactly two parts.">>});
 
 handle_stats_req(Req) ->
     send_method_not_allowed(Req, "GET").
 
-parse_stats_query(Req) ->
-    lists:foldl(fun({Key,Value}, Args) ->
-        case {Key, Value} of
-        {"range", Range} ->
-            Args#stats_query_args{range=list_to_atom(Range)};
-        {"flush", "true"} ->
-            Args#stats_query_args{flush=true};
-        _Else -> % unknown key value pair, ignore.
-            Args
-        end
-    end, #stats_query_args{}, couch_httpd:qs(Req)).
+range(Req) ->
+    case proplists:get_value("range", couch_httpd:qs(Req)) of
+        undefined ->
+            0;
+        Value ->
+            list_to_integer(Value)
+    end.
+
+flush(Req) ->
+    case proplists:get_value("flush", couch_httpd:qs(Req)) of
+        "true" ->
+            couch_stats_aggregator:collect_sample();
+        _Else ->
+            ok
+    end.

Modified: couchdb/trunk/src/couchdb/couch_stats_aggregator.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_stats_aggregator.erl?rev=816043&r1=816042&r2=816043&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_stats_aggregator.erl (original)
+++ couchdb/trunk/src/couchdb/couch_stats_aggregator.erl Thu Sep 17 04:04:46 2009
@@ -11,375 +11,283 @@
 % the License.
 
 -module(couch_stats_aggregator).
--include("couch_stats.hrl").
-
 -behaviour(gen_server).
 
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
-        terminate/2, code_change/3]).
+-export([start/0, stop/0]).
+-export([all/0, all/1, get/1, get/2, get_json/1, get_json/2, collect_sample/0]).
 
--export([start/0, stop/0,
-         get/1, get/2, get_json/1, get_json/2, all/0,
-         time_passed/0, clear_aggregates/1]).
-
--record(state, {
-    aggregates = [],
-    descriptions = []
-}).
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
 
--define(COLLECTOR, couch_stats_collector).
--define(QUEUE_MAX_LENGTH, 900). % maximimum number of seconds
+-record(aggregate, {
+    description = <<"">>,
+    seconds = 0,
+    count = 0,
+    current = null,
+    sum = null,
+    mean = null,
+    variance = null,
+    stddev = null,
+    min = null,
+    max = null,
+    samples = []
+}).
 
-% PUBLIC API
 
 start() ->
     gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
 
 stop() ->
-    gen_server:call(?MODULE, stop).
+    gen_server:cast(?MODULE, stop).
+
+all() ->
+    ?MODULE:all(0).
+all(Time) when is_binary(Time) ->
+    ?MODULE:all(list_to_integer(binary_to_list(Time)));
+all(Time) when is_atom(Time) ->
+    ?MODULE:all(list_to_integer(atom_to_list(Time)));
+all(Time) when is_integer(Time) ->
+    Aggs = ets:match(?MODULE, {{'$1', Time}, '$2'}),
+    Stats = lists:map(fun([Key, Agg]) -> {Key, Agg} end, Aggs),
+    case Stats of
+        [] ->
+            {[]};
+        _ ->
+            Ret = lists:foldl(fun({{Mod, Key}, Agg}, Acc) ->
+                CurrKeys = case proplists:lookup(Mod, Acc) of
+                    none -> [];
+                    {Mod, {Keys}} -> Keys
+                end,
+                NewMod = {[{Key, to_json_term(Agg)} | CurrKeys]},
+                [{Mod, NewMod} | proplists:delete(Mod, Acc)]
+            end, [], Stats),
+            {Ret}
+    end.
 
 get(Key) ->
-    gen_server:call(?MODULE, {get, Key}).
-get(Key, Time) ->
-    gen_server:call(?MODULE, {get, Key, Time}).
+    ?MODULE:get(Key, 0).
+get(Key, Time) when is_binary(Time) ->
+    ?MODULE:get(Key, list_to_integer(binary_to_list(Time)));
+get(Key, Time) when is_atom(Time) ->
+    ?MODULE:get(Key, list_to_integer(atom_to_list(Time)));
+get(Key, Time) when is_integer(Time) ->
+    case ets:lookup(?MODULE, {make_key(Key), Time}) of
+        [] -> #aggregate{seconds=Time};
+        [{_, Agg}] -> Agg
+    end.
 
 get_json(Key) ->
-    gen_server:call(?MODULE, {get_json, Key}).
+    get_json(Key, 0).
 get_json(Key, Time) ->
-    gen_server:call(?MODULE, {get_json, Key, Time}).
+    to_json_term(?MODULE:get(Key, Time)).
 
-time_passed() ->
-    gen_server:call(?MODULE, time_passed).
+collect_sample() ->
+    gen_server:call(?MODULE, collect_sample).
 
-clear_aggregates(Time) ->
-    gen_server:call(?MODULE, {clear_aggregates, Time}).
-
-all() ->
-    gen_server:call(?MODULE, all).
-
-% GEN_SERVER
 
 init(_) ->
+    % Create an aggregate entry for each {description, rate} pair.
     ets:new(?MODULE, [named_table, set, protected]),
-    init_timers(),
-    init_descriptions(),
-    {ok, #state{}}.
-
-handle_call({get, Key}, _, State) ->
-    Value = get_aggregate(Key, State),
-    {reply, Value, State};
-
-handle_call({get, Key, Time}, _, State) ->
-    Value = get_aggregate(Key, State, Time),
-    {reply, Value, State};
-
-handle_call({get_json, Key}, _, State) ->
-    Value = aggregate_to_json_term(get_aggregate(Key, State)),
-    {reply, Value, State};
-
-handle_call({get_json, Key, Time}, _, State) ->
-    Value = aggregate_to_json_term(get_aggregate(Key, State, Time)),
-    {reply, Value, State};
-
-handle_call(time_passed, _, OldState) ->
-
-    % the foldls below could probably be refactored into a less code-duping form
-
-    % update aggregates on incremental counters
-    NextState = lists:foldl(fun(Counter, State) ->
-        {Key, Value} = Counter,
-        update_aggregates_loop(Key, Value, State, incremental)
-    end, OldState, ?COLLECTOR:all(incremental)),
-
-    % update aggregates on absolute value counters
-    NewState = lists:foldl(fun(Counter, State) ->
-        {Key, Value} = Counter,
-        % clear the counter, we've got the important bits in State
-        ?COLLECTOR:clear(Key),
-        update_aggregates_loop(Key, Value, State, absolute)
-    end, NextState, ?COLLECTOR:all(absolute)),
-
-    {reply, ok, NewState};
-
-handle_call({clear_aggregates, Time}, _, State) ->
-    {reply, ok, do_clear_aggregates(Time, State)};
-
-handle_call(all, _ , State) ->
-    Results = do_get_all(State),
-    {reply, Results, State};
-
-handle_call(stop, _, State) ->
-    {stop, normal, stopped, State}.
-
-
-% PRIVATE API
-
-% Stats = [{Key, TimesProplist}]
-% TimesProplist = [{Time, Aggrgates}]
-% Aggregates = #aggregates{}
-%
-% [
-%  {Key, [
-%             {TimeA, #aggregates{}},
-%             {TimeB, #aggregates{}},
-%             {TimeC, #aggregates{}},
-%             {TimeD, #aggregates{}}
-%        ]
-%  },
-%
-% ]
-
-%% clear the aggregats record for a specific Time = 60 | 300 | 900
-do_clear_aggregates(Time, #state{aggregates=Stats}) ->
-    NewStats = lists:map(fun({Key, TimesProplist}) ->
-        {Key, case proplists:lookup(Time, TimesProplist) of
-            % do have stats for this key, if we don't, return Stat unmodified
+    SampleStr = couch_config:get("stats", "samples", "[0]"),
+    {ok, Samples} = couch_util:parse_term(SampleStr),
+    lists:foreach(fun({KeyStr, Value}) ->
+        {ok, Key} = couch_util:parse_term(KeyStr),
+        lists:foreach(fun(Secs) ->
+            Agg = #aggregate{
+                description=list_to_binary(Value),
+                seconds=Secs
+            },
+            ets:insert(?MODULE, {{Key, Secs}, Agg})
+        end, Samples)
+    end, couch_config:get("stats_descriptions")),
+    
+    Self = self(),
+    ok = couch_config:register(
+        fun("stats", _) -> exit(Self, config_change) end
+    ),
+    ok = couch_config:register(
+        fun("stats_descriptions", _) -> exit(Self, config_change) end
+    ),
+    
+    Rate = list_to_integer(couch_config:get("stats", "rate", "1000")),
+    % TODO: Add timer_start to kernel start options.
+    timer:apply_interval(Rate, ?MODULE, collect_sample, []).
+    
+terminate(_Reason, TRef) ->
+    timer:cancel(TRef),
+    ok.
+
+handle_call(collect_sample, _, State) ->
+    % Gather new stats values to add.
+    Incs = lists:map(fun({Key, Value}) ->
+        {Key, {incremental, Value}}
+    end, couch_stats_collector:all(incremental)),
+    Abs = lists:map(fun({Key, Values}) ->
+        couch_stats_collector:clear(Key),
+        Values2 = case Values of
+            X when is_list(X) -> X;
+            Else -> [Else]
+        end,
+        {_, Mean} = lists:foldl(fun(Val, {Count, Curr}) ->
+            {Count+1, Curr + (Val - Curr) / (Count+1)}
+        end, {0, 0}, Values2),
+        {Key, {absolute, Mean}}
+    end, couch_stats_collector:all(absolute)),
+    
+    Values = Incs ++ Abs,
+    Now = erlang:now(),
+    lists:foreach(fun({{Key, Rate}, Agg}) ->
+        NewAgg = case proplists:lookup(Key, Values) of
             none ->
-                TimesProplist;
-            % there are stats, let's unset the Time one
-            {_Time, _Stat} ->
-                [{Time, #aggregates{}} | proplists:delete(Time, TimesProplist)]
-        end}
-    end, Stats),
-    #state{aggregates=NewStats}.
-
-get_aggregate(Key, State) ->
-    %% default Time is 0, which is when CouchDB started
-    get_aggregate(Key, State, '0').
-get_aggregate(Key, #state{aggregates=StatsList}, Time) ->
-    Description = get_description(Key),
-    Aggregates = case proplists:lookup(Key, StatsList) of
-        % if we don't have any data here, return an empty record
-        none -> #aggregates{description=Description};
-        {Key, Stats} ->
-            case proplists:lookup(Time, Stats) of
-                none -> #aggregates{description=Description}; % empty record again
-                {Time, Stat} -> Stat#aggregates{description=Description}
-            end
-    end,
-    Aggregates.
+                rem_values(Now, Agg);
+            {Key, {Type, Value}} ->
+                NewValue = new_value(Type, Value, Agg#aggregate.current),
+                Agg2 = add_value(Now, NewValue, Agg),
+                rem_values(Now, Agg2)
+        end,
+        ets:insert(?MODULE, {{Key, Rate}, NewAgg})
+    end, ets:tab2list(?MODULE)),
+    {reply, ok, State}.
 
-get_description(Key) ->
-    case ets:lookup(?MODULE, Key) of
-        [] -> <<"No description yet.">>;
-        [{_Key, Description}] -> Description
-    end.
+handle_cast(stop, State) ->
+    {stop, normal, State}.
 
-%% updates all aggregates for Key
-update_aggregates_loop(Key, Values, State, CounterType) ->
-    #state{aggregates=AllStats} = State,
-    % if we don't have any aggregates yet, put a list of empty atoms in
-    % so we can loop over them in update_aggregates().
-    % [{{httpd,requests},
-    %              [{'0',{aggregates,1,1,1,0,0,1,1}},
-    %               {'60',{aggregates,1,1,1,0,0,1,1}},
-    %               {'300',{aggregates,1,1,1,0,0,1,1}},
-    %               {'900',{aggregates,1,1,1,0,0,1,1}}]}]
-    [{_Key, StatsList}] = case proplists:lookup(Key, AllStats) of
-        none -> [{Key, [
-                {'0', empty},
-                {'60', empty},
-                {'300', empty},
-                {'900', empty}
-             ]}];
-        AllStatsMatch ->
-        [AllStatsMatch]
-    end,
+handle_info(_Info, State) ->
+    {noreply, State}.
 
-    % if we  get called with a single value, wrap in in a list
-    ValuesList = case is_list(Values) of
-        false -> [Values];
-        _True -> Values
-    end,
+code_change(_OldVersion, State, _Extra) ->
+    {ok, State}.
 
-    % loop over all Time's
-    NewStats = lists:map(fun({Time, Stats}) ->
-        % loop over all values for Key
-        lists:foldl(fun(Value, Stat) ->
-            {Time, update_aggregates(Value, Stat, CounterType)}
-        end, Stats, ValuesList)
-    end, StatsList),
-
-    % put the newly calculated aggregates into State and delete the previous
-    % entry
-    #state{
-        aggregates=[{Key, NewStats} | proplists:delete(Key, AllStats)]
-    }.
 
-% does the actual updating of the aggregate record
-update_aggregates(Value, Stat, CounterType) ->
-    case Stat of
-        % the first time this is called, we don't have to calculate anything
-        % we just populate the record with Value
-        empty -> #aggregates{
-            min=Value,
-            max=Value,
-            mean=Value,
-            variance=0,
-            stddev=0,
-            count=1,
-            current=Value
-        };
-        % this sure could look nicer -- any ideas?
-        StatsRecord ->
-            #aggregates{
-                min=Min,
-                max=Max,
-                mean=Mean,
-                variance=Variance,
-                count=Count,
-                current=Current
-            } = StatsRecord,
-
-            % incremental counters need to keep track of the last update's value
-            NewValue = case CounterType of
-                incremental -> Value - Current;
-                absolute -> Value
-            end,
-                % Knuth, The Art of Computer Programming, vol. 2, p. 232.
-                NewCount = Count + 1,
-                NewMean = Mean + (NewValue - Mean) / NewCount, % NewCount is never 0.
-                NewVariance = Variance + (NewValue - Mean) * (NewValue - NewMean),
-                #aggregates{
-                    min=lists:min([NewValue, Min]),
-                    max=lists:max([NewValue, Max]),
-                    mean=NewMean,
-                    variance=NewVariance,
-                    stddev=math:sqrt(NewVariance / NewCount),
-                    count=NewCount,
-                    current=Value
-                }
+new_value(incremental, Value, null) ->
+    Value;
+new_value(incremental, Value, Current) ->
+    Value - Current;
+new_value(absolute, Value, _Current) ->
+    Value.
+
+add_value(Time, Value, #aggregate{count=Count}=Agg) when Count < 1 ->
+    Samples = case Agg#aggregate.seconds of
+        0 -> [];
+        _ -> [{Time, Value}]
+    end,
+    Agg#aggregate{
+        count=1,
+        current=Value,
+        sum=Value,
+        mean=Value,
+        variance=0.0,
+        stddev=null,
+        min=Value,
+        max=Value,
+        samples=Samples
+    };
+add_value(Time, Value, Agg) ->
+    #aggregate{
+        count=Count,
+        current=Current,
+        sum=Sum,
+        mean=Mean,
+        variance=Variance,
+        samples=Samples
+    } = Agg,
+    
+    NewCount = Count + 1,
+    NewMean = Mean + (Value - Mean) / NewCount,
+    NewVariance = Variance + (Value - Mean) * (Value - NewMean),
+    StdDev = case NewCount > 1 of
+        false -> null;
+        _ -> math:sqrt(NewVariance / (NewCount - 1))
+    end,
+    Agg2 = Agg#aggregate{
+        count=NewCount,
+        current=Current + Value,
+        sum=Sum + Value,
+        mean=NewMean,
+        variance=NewVariance,
+        stddev=StdDev,
+        min=lists:min([Agg#aggregate.min, Value]),
+        max=lists:max([Agg#aggregate.max, Value])
+    },
+    case Agg2#aggregate.seconds of
+        0 -> Agg2;
+        _ -> Agg2#aggregate{samples=[{Time, Value} | Samples]}
     end.
 
+rem_values(Time, Agg) ->
+    Seconds = Agg#aggregate.seconds,
+    Samples = Agg#aggregate.samples,
+    Pred = fun({When, _Value}) ->
+        timer:now_diff(Time, When) =< (Seconds * 1000000)
+    end,
+    {Keep, Remove} = lists:splitwith(Pred, Samples),
+    Agg2 = lists:foldl(fun({_, Value}, Acc) ->
+        rem_value(Value, Acc)
+    end, Agg, Remove),
+    Agg2#aggregate{samples=Keep}.
+
+rem_value(_Value, #aggregate{count=Count}=Agg) when Count =< 1 ->
+    #aggregate{seconds=Agg#aggregate.seconds};
+rem_value(Value, Agg) ->
+    #aggregate{
+        count=Count,
+        sum=Sum,
+        mean=Mean,
+        variance=Variance
+    } = Agg,
+
+    OldMean = (Mean * Count - Value) / (Count - 1),
+    OldVariance = Variance - (Value - OldMean) * (Value - Mean),
+    OldCount = Count - 1,
+    StdDev = case OldCount > 1 of
+        false -> null;
+        _ -> math:sqrt(clamp_value(OldVariance / (OldCount - 1)))
+    end,
+    Agg#aggregate{
+        count=OldCount,
+        sum=Sum-Value,
+        mean=clamp_value(OldMean),
+        variance=clamp_value(OldVariance),
+        stddev=StdDev
+    }.
 
-aggregate_to_json_term(#aggregates{min=Min,max=Max,mean=Mean,stddev=Stddev,count=Count,current=Current,description=Description}) ->
+to_json_term(Agg) ->
+    {Min, Max} = case Agg#aggregate.seconds > 0 of
+        false ->
+            {Agg#aggregate.min, Agg#aggregate.max};
+        _ ->
+            case length(Agg#aggregate.samples) > 0 of
+                true ->
+                    Extract = fun({_Time, Value}) -> Value end,
+                    Samples = lists:map(Extract, Agg#aggregate.samples),
+                    {lists:min(Samples), lists:max(Samples)};
+                _ ->
+                    {null, null}
+            end
+    end,
     {[
-        {current, Current},
-        {count, Count},
-        {mean, Mean},
+        {description, Agg#aggregate.description},
+        {current, round_value(Agg#aggregate.sum)},
+        {sum, round_value(Agg#aggregate.sum)},
+        {mean, round_value(Agg#aggregate.mean)},
+        {stddev, round_value(Agg#aggregate.stddev)},
         {min, Min},
-        {max, Max},
-        {stddev, Stddev},
-        {description, Description}
+        {max, Max}
     ]}.
 
-get_stats(Key, State) ->
-    aggregate_to_json_term(get_aggregate(Key, State)).
-
-% convert ets2list() list into JSON-erlang-terms.
-% Thanks to Paul Davis
-do_get_all(#state{aggregates=Stats}=State) ->
-    case Stats of
-        [] -> {[]};
-        _ ->
-        [{LastMod, LastVals} | LastRestMods] = lists:foldl(fun({{Module, Key}, _Count}, AccIn) ->
-              case AccIn of
-                  [] ->
-                      [{Module, [{Key, get_stats({Module, Key}, State)}]}];
-                  [{Module, PrevVals} | RestMods] ->
-                      [{Module, [{Key, get_stats({Module, Key}, State)} | PrevVals]} | RestMods];
-                  [{OtherMod, ModVals} | RestMods] ->
-                      [{Module, [{Key, get_stats({Module, Key}, State)}]}, {OtherMod, {lists:reverse(ModVals)}} | RestMods]
-              end
-          end, [], lists:sort(Stats)),
-          {[{LastMod, {lists:sort(LastVals)}} | LastRestMods]}
-    end.
-
-
-init_descriptions() ->
-
-    % ets is probably overkill here, but I didn't manage to keep the
-    % descriptions in the gen_server state. Which means there is probably
-    % a bug in one of the handle_call() functions most likely the one that
-    % handles the time_passed message. But don't tell anyone, the math is
-    % correct :) -- Jan
-
-
-    % Style guide for descriptions: Start with a lowercase letter & do not add
-    % a trailing full-stop / period.
-
-    % please keep this in alphabetical order
-    ets:insert(?MODULE, {{couchdb, database_writes}, <<"number of times a database was changed">>}),
-    ets:insert(?MODULE, {{couchdb, database_reads}, <<"number of times a document was read from a database">>}),
-    ets:insert(?MODULE, {{couchdb, open_databases}, <<"number of open databases">>}),
-    ets:insert(?MODULE, {{couchdb, open_os_files}, <<"number of file descriptors CouchDB has open">>}),
-    ets:insert(?MODULE, {{couchdb, request_time}, <<"length of a request inside CouchDB without MochiWeb">>}),
-
-    ets:insert(?MODULE, {{httpd, bulk_requests}, <<"number of bulk requests">>}),
-    ets:insert(?MODULE, {{httpd, requests}, <<"number of HTTP requests">>}),
-    ets:insert(?MODULE, {{httpd, temporary_view_reads}, <<"number of temporary view reads">>}),
-    ets:insert(?MODULE, {{httpd, view_reads}, <<"number of view reads">>}),
-    ets:insert(?MODULE, {{httpd, clients_requesting_changes}, <<"Number of clients currently requesting continuous _changes">>}),
-
-    ets:insert(?MODULE, {{httpd_request_methods, 'COPY'}, <<"number of HTTP COPY requests">>}),
-    ets:insert(?MODULE, {{httpd_request_methods, 'DELETE'}, <<"number of HTTP DELETE requests">>}),
-    ets:insert(?MODULE, {{httpd_request_methods, 'GET'}, <<"number of HTTP GET requests">>}),
-    ets:insert(?MODULE, {{httpd_request_methods, 'HEAD'}, <<"number of HTTP HEAD requests">>}),
-    ets:insert(?MODULE, {{httpd_request_methods, 'MOVE'}, <<"number of HTTP MOVE requests">>}),
-    ets:insert(?MODULE, {{httpd_request_methods, 'POST'}, <<"number of HTTP POST requests">>}),
-    ets:insert(?MODULE, {{httpd_request_methods, 'PUT'}, <<"number of HTTP PUT requests">>}),
-
-    ets:insert(?MODULE, {{httpd_status_codes, '200'}, <<"number of HTTP 200 OK responses">>}),
-    ets:insert(?MODULE, {{httpd_status_codes, '201'}, <<"number of HTTP 201 Created responses">>}),
-    ets:insert(?MODULE, {{httpd_status_codes, '202'}, <<"number of HTTP 202 Accepted responses">>}),
-    ets:insert(?MODULE, {{httpd_status_codes, '301'}, <<"number of HTTP 301 Moved Permanently responses">>}),
-    ets:insert(?MODULE, {{httpd_status_codes, '304'}, <<"number of HTTP 304 Not Modified responses">>}),
-    ets:insert(?MODULE, {{httpd_status_codes, '400'}, <<"number of HTTP 400 Bad Request responses">>}),
-    ets:insert(?MODULE, {{httpd_status_codes, '401'}, <<"number of HTTP 401 Unauthorized responses">>}),
-    ets:insert(?MODULE, {{httpd_status_codes, '403'}, <<"number of HTTP 403 Forbidden responses">>}),
-    ets:insert(?MODULE, {{httpd_status_codes, '404'}, <<"number of HTTP 404 Not Found responses">>}),
-    ets:insert(?MODULE, {{httpd_status_codes, '405'}, <<"number of HTTP 405 Method Not Allowed responses">>}),
-    ets:insert(?MODULE, {{httpd_status_codes, '409'}, <<"number of HTTP 409 Conflict responses">>}),
-    ets:insert(?MODULE, {{httpd_status_codes, '412'}, <<"number of HTTP 412 Precondition Failed responses">>}),
-    ets:insert(?MODULE, {{httpd_status_codes, '500'}, <<"number of HTTP 500 Internal Server Error responses">>}).
-    % please keep this in alphabetical order
-
-
-% Timer
-
-init_timers() ->
-
-    % OTP docs on timer: http://erlang.org/doc/man/timer.html
-    %   start() -> ok
-    %   Starts the timer server. Normally, the server does not need to be
-    %   started explicitly. It is started dynamically if it is needed. This is
-    %   useful during development, but in a target system the server should be
-    %   started explicitly. Use configuration parameters for kernel for this.
-    %
-    % TODO: Add timer_start to kernel start options.
-
-
-    % start timers every second, minute, five minutes and fifteen minutes
-    % in the rare event of a timer death, couch_stats_aggregator will die,
-    % too and restarted by the supervision tree, all stats (for the last
-    % fifteen minutes) are gone.
-
-    {ok, _} = timer:apply_interval(1000, ?MODULE, time_passed, []),
-    {ok, _} = timer:apply_interval(60000, ?MODULE, clear_aggregates, ['60']),
-    {ok, _} = timer:apply_interval(300000, ?MODULE, clear_aggregates, ['300']),
-    {ok, _} = timer:apply_interval(900000, ?MODULE, clear_aggregates, ['900']).
-
-
-% Unused gen_server behaviour API functions that we need to declare.
-
-%% @doc Unused
-handle_cast(foo, State) ->
-    {noreply, State}.
-
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-%% @doc Unused
-terminate(_Reason, _State) -> ok.
-
-%% @doc Unused
-code_change(_OldVersion, State, _Extra) -> {ok, State}.
-
-
-%% Tests
-
--ifdef(TEST).
-% Internal API unit tests go here
-
-
--endif.
+make_key({Mod, Val}) when is_integer(Val) ->
+    {Mod, list_to_atom(integer_to_list(Val))};
+make_key(Key) ->
+    Key.
+
+round_value(Val) when not is_number(Val) ->
+    Val;
+round_value(Val) when Val == 0 ->
+    Val;
+round_value(Val) ->
+    erlang:round(Val * 1000.0) / 1000.0.
+
+clamp_value(Val) when Val > 0.00000000000001 ->
+    Val;
+clamp_value(_) ->
+    0.0.

Modified: couchdb/trunk/src/couchdb/couch_stats_collector.erl
URL: http://svn.apache.org/viewvc/couchdb/trunk/src/couchdb/couch_stats_collector.erl?rev=816043&r1=816042&r2=816043&view=diff
==============================================================================
--- couchdb/trunk/src/couchdb/couch_stats_collector.erl (original)
+++ couchdb/trunk/src/couchdb/couch_stats_collector.erl Thu Sep 17 04:04:46 2009
@@ -18,23 +18,15 @@
 
 -behaviour(gen_server).
 
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
-        terminate/2, code_change/3]).
+-export([start/0, stop/0]).
+-export([all/0, all/1, get/1, increment/1, decrement/1, record/2, clear/1]).
+-export([track_process_count/1, track_process_count/2]).
 
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
 
--export([start/0, stop/0, get/1,
-        increment/1, decrement/1,
-        track_process_count/1, track_process_count/2,
-        record/2, clear/1,
-        all/0, all/1]).
-
--record(state, {}).
-
--define(ABSOLUTE_VALUE_COUNTER_TABLE, abs_table).
--define(HIT_COUNTER_TABLE, hit_table).
-
-
-% PUBLIC API
+-define(HIT_TABLE, stats_hit_table).
+-define(ABS_TABLE, stats_abs_table).
 
 start() ->
     gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
@@ -42,106 +34,103 @@
 stop() ->
     gen_server:call(?MODULE, stop).
 
+all() ->
+    ets:tab2list(?HIT_TABLE) ++ abs_to_list().
+
+all(Type) ->
+    case Type of
+        incremental -> ets:tab2list(?HIT_TABLE);
+        absolute -> abs_to_list()
+    end.
+
 get(Key) ->
-    case ets:lookup(?HIT_COUNTER_TABLE, Key) of
+    case ets:lookup(?HIT_TABLE, Key) of
         [] ->
-            case ets:lookup(?ABSOLUTE_VALUE_COUNTER_TABLE, Key) of
+            case ets:lookup(?ABS_TABLE, Key) of
                 [] ->
-                    0;
-                Result2 -> extract_value_from_ets_result(Key, Result2)
+                    nil;
+                AbsVals ->
+                    lists:map(fun({_, Value}) -> Value end, AbsVals)
             end;
-        [{_,Result1}] -> Result1
+        [{_, Counter}] ->
+            Counter
     end.
 
-increment({Module, Key}) when is_integer(Key) ->
-    increment({Module, list_to_atom(integer_to_list(Key))});
 increment(Key) ->
-    case catch ets:update_counter(?HIT_COUNTER_TABLE, Key, 1) of
+    Key2 = make_key(Key),
+    case catch ets:update_counter(?HIT_TABLE, Key2, 1) of
         {'EXIT', {badarg, _}} ->
-            true = ets:insert(?HIT_COUNTER_TABLE, {Key, 1}),
+            true = ets:insert(?HIT_TABLE, {Key2, 1}),
             ok;
-        _ -> ok
+        _ ->
+            ok
     end.
 
 decrement(Key) ->
-    case catch ets:update_counter(?HIT_COUNTER_TABLE, Key, -1) of
+    Key2 = make_key(Key),
+    case catch ets:update_counter(?HIT_TABLE, Key2, -1) of
         {'EXIT', {badarg, _}} ->
-            true = ets:insert(?HIT_COUNTER_TABLE, {Key, -1}),
+            true = ets:insert(?HIT_TABLE, {Key2, -1}),
             ok;
         _ -> ok
     end.
 
 record(Key, Value) ->
-    ets:insert(?ABSOLUTE_VALUE_COUNTER_TABLE, {Key, Value}).
+    true = ets:insert(?ABS_TABLE, {make_key(Key), Value}).
 
 clear(Key) ->
-    true = ets:delete(?ABSOLUTE_VALUE_COUNTER_TABLE, Key).
-
-all() ->
-    lists:append(ets:tab2list(?HIT_COUNTER_TABLE),
-        ets:tab2list(?ABSOLUTE_VALUE_COUNTER_TABLE)).
-
-all(Type) ->
-    case Type of
-        incremental -> ets:tab2list(?HIT_COUNTER_TABLE);
-        absolute -> ets:tab2list(?ABSOLUTE_VALUE_COUNTER_TABLE)
-    end.
+    true = ets:delete(?ABS_TABLE, make_key(Key)).
 
 track_process_count(Stat) ->
     track_process_count(self(), Stat).
 
 track_process_count(Pid, Stat) ->
+    MonitorFun = fun() ->
+        Ref = erlang:monitor(process, Pid),
+        receive {'DOWN', Ref, _, _, _} -> ok end,
+        couch_stats_collector:decrement(Stat)
+    end,
     case (catch couch_stats_collector:increment(Stat)) of
-    ok ->
-        spawn(
-            fun() ->
-                erlang:monitor(process, Pid),
-                receive {'DOWN', _, _, _, _} -> ok end,
-                couch_stats_collector:decrement(Stat)
-            end);
-     _ -> ok
+        ok -> spawn(MonitorFun);
+        _ -> ok
     end.
 
 
-% GEN_SERVER
-
-
 init(_) ->
-    ets:new(?HIT_COUNTER_TABLE, [named_table, set, public]),
-    ets:new(?ABSOLUTE_VALUE_COUNTER_TABLE, [named_table, duplicate_bag, public]),
-    {ok, #state{}}.
+    ets:new(?HIT_TABLE, [named_table, set, public]),
+    ets:new(?ABS_TABLE, [named_table, duplicate_bag, public]),
+    {ok, nil}.
 
+terminate(_Reason, _State) ->
+    ok.
 
 handle_call(stop, _, State) ->
     {stop, normal, stopped, State}.
 
-
-% PRIVATE API
-
-extract_value_from_ets_result(_Key, Result) ->
-    lists:map(fun({_, Value}) -> Value end, Result).
-
-
-% Unused gen_server behaviour API functions that we need to declare.
-
-%% @doc Unused
 handle_cast(foo, State) ->
     {noreply, State}.
 
 handle_info(_Info, State) ->
     {noreply, State}.
 
-%% @doc Unused
-terminate(_Reason, _State) -> ok.
-
-%% @doc Unused
-code_change(_OldVersion, State, _Extra) -> {ok, State}.
-
-
-%% Tests
-
--ifdef(TEST).
-% Internal API unit tests go here
+code_change(_OldVersion, State, _Extra) ->
+    {ok, State}.
 
 
--endif.
+make_key({Module, Key}) when is_integer(Key) ->
+    {Module, list_to_atom(integer_to_list(Key))};
+make_key(Key) ->
+    Key.
+
+abs_to_list() ->
+    SortedKVs = lists:sort(ets:tab2list(?ABS_TABLE)),
+    lists:foldl(fun({Key, Val}, Acc) ->
+        case Acc of
+            [] ->
+                [{Key, [Val]}];
+            [{Key, Prev} | Rest] ->
+                [{Key, [Val | Prev]} | Rest];
+            Others ->
+                [{Key, [Val]} | Others]
+        end
+    end, [], SortedKVs).
\ No newline at end of file

Added: couchdb/trunk/test/etap/120-stats-collect.t
URL: http://svn.apache.org/viewvc/couchdb/trunk/test/etap/120-stats-collect.t?rev=816043&view=auto
==============================================================================
--- couchdb/trunk/test/etap/120-stats-collect.t (added)
+++ couchdb/trunk/test/etap/120-stats-collect.t Thu Sep 17 04:04:46 2009
@@ -0,0 +1,150 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main(_) ->
+    code:add_pathz("src/couchdb"),
+    etap:plan(11),
+    case (catch test()) of
+        ok ->
+            etap:end_tests();
+        Other ->
+            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+            etap:bail()
+    end,
+    ok.
+
+test() ->
+    couch_stats_collector:start(),
+    ok = test_counters(),
+    ok = test_abs_values(),
+    ok = test_proc_counting(),
+    ok = test_all(),
+    ok.
+
+test_counters() ->
+    AddCount = fun() -> couch_stats_collector:increment(foo) end,
+    RemCount = fun() -> couch_stats_collector:decrement(foo) end,
+    repeat(AddCount, 100),
+    repeat(RemCount, 25),
+    repeat(AddCount, 10),
+    repeat(RemCount, 5),
+    etap:is(
+        couch_stats_collector:get(foo),
+        80,
+        "Incrememnt tracks correctly."
+    ),
+
+    repeat(RemCount, 80),
+    etap:is(
+        couch_stats_collector:get(foo),
+        0,
+        "Decremented to zaro."
+    ),
+    ok.
+
+test_abs_values() ->
+    lists:map(fun(Val) ->
+        couch_stats_collector:record(bar, Val)
+    end, lists:seq(1, 15)),
+    etap:is(
+        couch_stats_collector:get(bar),
+        lists:seq(1, 15),
+        "Absolute values are recorded correctly."
+    ),
+    
+    couch_stats_collector:clear(bar),
+    etap:is(
+        couch_stats_collector:get(bar),
+        nil,
+        "Absolute values are cleared correctly."
+    ),
+    ok.
+
+test_proc_counting() ->
+    Self = self(),
+    OnePid = spawn(fun() ->
+        couch_stats_collector:track_process_count(hoopla),
+        Self ! reporting,
+        receive sepuku -> ok end
+    end),
+    R1 = erlang:monitor(process, OnePid),
+    receive reporting -> ok end,
+    etap:is(
+        couch_stats_collector:get(hoopla),
+        1,
+        "track_process_count incrememnts the counter."
+    ),
+    
+    TwicePid = spawn(fun() ->
+        couch_stats_collector:track_process_count(hoopla),
+        couch_stats_collector:track_process_count(hoopla),
+        Self ! reporting,
+        receive sepuku -> ok end
+    end),
+    R2 = erlang:monitor(process, TwicePid),
+    receive reporting -> ok end,
+    etap:is(
+        couch_stats_collector:get(hoopla),
+        3,
+        "track_process_count allows more than one incrememnt per Pid"
+    ),
+    
+    OnePid ! sepuku,
+    receive {'DOWN', R1, _, _, _} -> ok end,
+    timer:sleep(250),
+    etap:is(
+        couch_stats_collector:get(hoopla),
+        2,
+        "Process count is decremented when process exits."
+    ),
+    
+    TwicePid ! sepuku,
+    receive {'DOWN', R2, _, _, _} -> ok end,
+    timer:sleep(250),
+    etap:is(
+        couch_stats_collector:get(hoopla),
+        0,
+        "Process count is decremented for each call to track_process_count."
+    ),
+    ok.
+
+test_all() ->
+    couch_stats_collector:record(bar, 0.0),
+    couch_stats_collector:record(bar, 1.0),
+    etap:is(
+        couch_stats_collector:all(),
+        [{foo, 0}, {hoopla, 0}, {bar, [1.0, 0.0]}],
+        "all/0 returns all counters and absolute values."
+    ),
+    
+    etap:is(
+        couch_stats_collector:all(incremental),
+        [{foo, 0}, {hoopla, 0}],
+        "all/1 returns only the specified type."
+    ),
+    
+    couch_stats_collector:record(zing, 90),
+    etap:is(
+        couch_stats_collector:all(absolute),
+        [{zing, [90]}, {bar, [1.0, 0.0]}],
+        "all/1 returns only the specified type."
+    ),
+    ok.
+
+repeat(_, 0) ->
+    ok;
+repeat(Fun, Count) ->
+    Fun(),
+    repeat(Fun, Count-1).

Added: couchdb/trunk/test/etap/121-stats-aggregates.ini
URL: http://svn.apache.org/viewvc/couchdb/trunk/test/etap/121-stats-aggregates.ini?rev=816043&view=auto
==============================================================================
--- couchdb/trunk/test/etap/121-stats-aggregates.ini (added)
+++ couchdb/trunk/test/etap/121-stats-aggregates.ini Thu Sep 17 04:04:46 2009
@@ -0,0 +1,8 @@
+[stats]
+rate = 10000000 ; We call collect_sample in testing
+samples = [0, 1]
+
+[stats_descriptions]
+{testing, stuff} = yay description
+{number, '11'} = randomosity
+

Added: couchdb/trunk/test/etap/121-stats-aggregates.t
URL: http://svn.apache.org/viewvc/couchdb/trunk/test/etap/121-stats-aggregates.t?rev=816043&view=auto
==============================================================================
--- couchdb/trunk/test/etap/121-stats-aggregates.t (added)
+++ couchdb/trunk/test/etap/121-stats-aggregates.t Thu Sep 17 04:04:46 2009
@@ -0,0 +1,165 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main(_) ->
+    code:add_pathz("src/couchdb"),
+    etap:plan(unknown),
+    case (catch test()) of
+        ok ->
+            etap:end_tests();
+        Other ->
+            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+            etap:bail()
+    end,
+    ok.
+
+test() ->
+    couch_config:start_link(["test/etap/121-stats-aggregates.ini"]),
+    couch_stats_collector:start(),
+    couch_stats_aggregator:start(),
+    ok = test_all_empty(),
+    ok = test_get_empty(),
+    ok = test_count_stats(),
+    ok = test_abs_stats(),
+    ok.
+
+test_all_empty() ->
+    {Aggs} = couch_stats_aggregator:all(),
+
+    etap:is(length(Aggs), 2, "There are only two aggregate types in testing."),
+    etap:is(
+        proplists:get_value(testing, Aggs),
+        {[{stuff, make_agg(<<"yay description">>,
+            null, null, null, null, null)}]},
+        "{testing, stuff} is empty at start."
+    ),
+    etap:is(
+        proplists:get_value(number, Aggs),
+        {[{'11', make_agg(<<"randomosity">>,
+            null, null, null, null, null)}]},
+        "{number, '11'} is empty at start."
+    ),
+    ok.
+    
+test_get_empty() ->
+    etap:is(
+        couch_stats_aggregator:get_json({testing, stuff}),
+        make_agg(<<"yay description">>, null, null, null, null, null),
+        "Getting {testing, stuff} returns an empty aggregate."
+    ),
+    etap:is(
+        couch_stats_aggregator:get_json({number, '11'}),
+        make_agg(<<"randomosity">>, null, null, null, null, null),
+        "Getting {number, '11'} returns an empty aggregate."
+    ),
+    ok.
+
+test_count_stats() ->
+    lists:foreach(fun(_) ->
+        couch_stats_collector:increment({testing, stuff})
+    end, lists:seq(1, 100)),
+    couch_stats_aggregator:collect_sample(),
+    etap:is(
+        couch_stats_aggregator:get_json({testing, stuff}),
+        make_agg(<<"yay description">>, 100, 100, null, 100, 100),
+        "COUNT: Adding values changes the stats."
+    ),
+    etap:is(
+        couch_stats_aggregator:get_json({testing, stuff}, 1),
+        make_agg(<<"yay description">>, 100, 100, null, 100, 100),
+        "COUNT: Adding values changes stats for all times."
+    ),
+
+    timer:sleep(500),
+    couch_stats_aggregator:collect_sample(),
+    etap:is(
+        couch_stats_aggregator:get_json({testing, stuff}),
+        make_agg(<<"yay description">>, 100, 50, 70.711, 0, 100),
+        "COUNT: Removing values changes stats."
+    ),
+    etap:is(
+        couch_stats_aggregator:get_json({testing, stuff}, 1),
+        make_agg(<<"yay description">>, 100, 50, 70.711, 0, 100),
+        "COUNT: Removing values changes stats for all times."
+    ),
+
+    timer:sleep(600),
+    couch_stats_aggregator:collect_sample(),
+    etap:is(
+        couch_stats_aggregator:get_json({testing, stuff}),
+        make_agg(<<"yay description">>, 100, 33.333, 57.735, 0, 100),
+        "COUNT: Letting time passes doesn't remove data from time 0 aggregates"
+    ),
+    etap:is(
+        couch_stats_aggregator:get_json({testing, stuff}, 1),
+        make_agg(<<"yay description">>, 0, 0, 0, 0, 0),
+        "COUNT: Letting time pass removes data from other time aggregates."
+    ),
+    ok.
+
+test_abs_stats() ->
+    lists:foreach(fun(X) ->
+        couch_stats_collector:record({number, 11}, X)
+    end, lists:seq(0, 10)),
+    couch_stats_aggregator:collect_sample(),
+    etap:is(
+        couch_stats_aggregator:get_json({number, 11}),
+        make_agg(<<"randomosity">>, 5, 5, null, 5, 5),
+        "ABS: Adding values changes the stats."
+    ),
+    etap:is(
+        couch_stats_aggregator:get_json({number, 11}, 1),
+        make_agg(<<"randomosity">>, 5, 5, null, 5, 5),
+        "ABS: Adding values changes stats for all times."
+    ),
+
+    timer:sleep(500),
+    couch_stats_collector:record({number, 11}, 15),
+    couch_stats_aggregator:collect_sample(),
+    etap:is(
+        couch_stats_aggregator:get_json({number, 11}),
+        make_agg(<<"randomosity">>, 20, 10, 7.071, 5, 15),
+        "ABS: New values changes stats"
+    ),
+    etap:is(
+        couch_stats_aggregator:get_json({number, 11}, 1),
+        make_agg(<<"randomosity">>, 20, 10, 7.071, 5, 15),
+        "ABS: Removing values changes stats for all times."
+    ),
+
+    timer:sleep(600),
+    couch_stats_aggregator:collect_sample(),
+    etap:is(
+        couch_stats_aggregator:get_json({number, 11}),
+        make_agg(<<"randomosity">>, 20, 10, 7.071, 5, 15),
+        "ABS: Letting time passes doesn't remove data from time 0 aggregates"
+    ),
+    etap:is(
+        couch_stats_aggregator:get_json({number, 11}, 1),
+        make_agg(<<"randomosity">>, 15, 15, null, 15, 15),
+        "ABS: Letting time pass removes data from other time aggregates."
+    ),
+    ok.
+
+make_agg(Desc, Sum, Mean, StdDev, Min, Max) ->
+    {[
+        {description, Desc},
+        {current, Sum},
+        {sum, Sum},
+        {mean, Mean},
+        {stddev, StdDev},
+        {min, Min},
+        {max, Max}
+    ]}.