You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by da...@apache.org on 2019/07/31 16:55:50 UTC

[couchdb] branch prototype/fdb-layer updated (264ddae -> 9ff8fa1)

This is an automated email from the ASF dual-hosted git repository.

davisp pushed a change to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git.


 discard 264ddae  CouchDB map indexes on FDB
 discard e8d22f2  Fix default key ranges for fold_range
 discard 6b90c68  Expose the is_replicator_db and is_user_db logic
 discard 730cf27  Make fabric2.hrl public
    omit 4786766  Fix more elixir tests
    omit f4e75ed  Expose ICU ucol_getSortKey
    omit d99d182  Disable broken couch_att tests
    omit 4317652  Reinitialize chttpd_auth_cache on config change
    omit ca33862  Fix formatting of all_docs_test.exs
    omit c040623  Implement `POST /_dbs_info`
    omit cd24f02  Fix revision tree extensions
    omit bbad907  Fix `COPY` method
    omit 8d167af  Fix bulk docs error reporting
    omit 563dd67  Implement _all_dbs/_all_docs API parameters
    omit cc24a93  Remove tests for deprecated features.
    omit 753f3a7  CouchDB background jobs
    omit 6e27b7d  Fix exception in cache auth doc update
    omit 2cbc7f0  Fix arity in changes timeout callback
    omit cc2d64d  Update get security to use fabric2
    omit 21af513  Implement `_users` db authentication
    omit 7073798  Database config changes should bump the db version
    omit 5331966  Fix validate_doc_update when recreating a document
    omit 4933c08  Allow for previously configured filters
    omit 09f4489  Convert attachment info to disk terms correctly
    omit e3f24aa  Fix revision generation on attachment upload
    omit 4fbabcc  Fix fabric2_txids:terminate/2
    omit 6835e18  Implement attachment compression
    omit e0b2dc1  Remove debug logging
    omit 767c83d  Start switching chttpd HTTP endpoints to fabric2
    omit fc1ffeb  Update ddoc_cache to use fabric2
    omit 1db8a43  Initial test suite for the fabric2 implementation
    omit ee2e4c8  Initial fabric2 implementation on FoundationDB
    omit d7b015c  Disable eunit test suite in fabric
    omit 51a3960  Update build system for FoundationDB
     add e2dd274  First public release of Dreyfus
     add 4b90276  Match new couchdb naming convention
     add c729966  properly rename this time
     add d5c5509  Plug into couchdb 2.0 with epi handlers
     add 9c563b5  fix error handling for _search_analyze
     add 0e1f788  Add ejson_body
     add f69a572  Merge pull request #1 from cloudant-labs/50663-add-ejson-body
     add 1cb551a  remove cloudant_util function call
     add 29ba95c  Allow binaries for _search_analyze
     add cee7626  Handle errors when opening an index
     add dd5f000  Merge pull request #3 from cloudant-labs/2-handle-open-errors
     add 80ba863  Use couch_crypto
     add 008051d  Merge pull request #4 from cloudant-labs/52884-use-couch-crypto
     add 574cb44  Update to new couch_epi API
     add 5f11337  Merge pull request #5 from cloudant-labs/simplify_couch_epi
     add d125b71  Add new metric to track the search request time. This metric is different from what we track in clouseau. This basically will track the overall time it took for the search request, where as the one in Clouseau will only track the search latency at the shard level.
     add 95df984  Merge pull request #6 from cloudant-labs/66031-add-metrics-dreyfus-httpd-search
     add dd49ff9  Remove trailing spaces
     add fb8c85f  Make handle_search_req return proper response
     add 6c6d5ab  Merge pull request #7 from cloudant-labs/67924-fix-handlers-response
     add 944389b  Don't crash on invalid ddoc
     add 2356160  Merge pull request #8 from cloudant-labs/69570-fix-invalid-ddoc-crash
     add 4390bb5  Improve search logging
     add a8ea666  Merge pull request #9 from cloudant-labs/75421-improve-search-logging
     add a018d1f  Tolerate open_int failure
     add 9607250  Merge pull request #10 from cloudant-labs/77650-open-int-crash
     add 3c1001d  Fix shard replacement
     add 5fbbe3e  Merge pull request #15 from cloudant-labs/84146_fix_shard_replacement
     add bc2f94b7 Add new end point to get disk size information for search index
     add d838881  Merge pull request #17 from cloudant-labs/87336-add-disk_size-end-point
     add ad3f55a  Remove couch_crypto
     add fb7b680  Merge pull request #20 from cloudant-labs/remove-couch_crypto
     add e29303c  Remove public db record
     add 3f5ba2b  Merge pull request #23 from cloudant-labs/COUCHDB-3288-remove-public-db-record
     add dde3f04  Rename search index directory in place when database is deleted
     add 30b0556  Merge pull request #22 from cloudant-labs/86318-rename-search-indexes-when-dbdeleted
     add 5eef719  Update to use pluggable storage engine APIs
     add df88b1c  Merge pull request #26 from cloudant-labs/COUCHDB-3287-pluggable-storage-engines
     add b9805be  Add ability to black list indexes (#27)
     add a4f3ca3  fix typo to noreply (#29)
     add 2712350  Improve search blacklist
     add 9ab3010  Merge pull request #31 from cloudant-labs/109229-improve-blacklist
     add bf4b2cd  Update to use new purge API
     add 0f1be2f  Merge pull request #14 from cloudant-labs/COUCHDB-3326-clustered-purge
     add 10c1015  use updated_on instead of timestamp_utc in local purge doc
     add 89368b2  Merge pull request #32 from cloudant-labs/COUCHDB-3326-use-updated_on
     add e1730b8  Avoid calls to `fabric:design_docs/1`
     add b755e72  Merge pull request #37 from cloudant-labs/fix-get-minimum-purge-seq
     add 1c18b79  Fix function_clause caused by malformed accumulator
     add 1bdca62  Merge pull request #39 from cloudant-labs/fix-function-clause-load-docs
     add 96877dc  Remove deprecated clauses
     add 092194d  Support partitioned queries
     add 74e246d  Add partition search tests
     add 3893301  Implement separate limits for partitioned queries
     add 0c59190  Merge pull request #34 from cloudant-labs/feature/database-partitions
     add 5c1bb7e  Support search with limit using POST method
     add 0413baa  Merge pull request #40 from cloudant-labs/search-using-limit-with-post
     add 8c7f5f5  Reject multiple conflicting values of `partition`
     add 5b8ecb3  Merge pull request #41 from cloudant-labs/partition-parameters
     add 57ea522  Fixed typo
     add 228ae8a  fix function_clause (#42)
     add 1ba7e98  Always send a binary when calling clouseau_rpc:delete
     add 03d8331  Merge pull request #43 from cloudant-labs/116712-fix-delete
     add 7df7402  Avoid dreyfus_index_manager for _search_disk_size.
     add 33ee6c2  Merge pull request #44 from cloudant-labs/avoid-dreyfus-index-manager-for-disk-size
     add 383c96a  add stats for search
     add 7083d1e  Merge pull request #45 from cloudant-labs/add-pricing-for-pq
     add 75d86c4  Use dedicated search IOQ channel
     add 48edbf3  Add IOQ2 metric for search traffic
     add 2fcfb6b  Merge pull request #46 from cloudant-labs/use-dedicated-search-ioq-channel
     add 996f1cc  adjust metric for io_queue-search
     add 45469b9  Merge pull request #47 from cloudant-labs/adjust-io_queue-search
     add 80e3cd8  Address EPI's startup use of dreyfus_config
     add 6f1b8d3  Add 'src/dreyfus/' from commit '80e3cd8111bda643686d9165ea6afa99d0d33cd4'
     add 8e6fa8b  Add dreyfus.js from cloudant/couchdb@c323f1943
     add b1e0037  Ensure Dreyfus JS code is included in build
     add 1513d48  Add Dreyfus to Erlang release
     add 64eb390  Make mem3_rep:go work when target shards are not yet present in shard map
     add 2650981  Merge branch 'master' into dreyfus-by-default
     add 93275c3  Improve detection of Search subsystem
     add 6f95fb3  Improve error message on Clouseau connection fail
     add 7dbd4d5  Further improve detection of Search system
     add 0d32708  Document config settings related to search system
     add 6e75355  Improve PR template with @kocolosk feedback
     add c517618  Merge branch 'master' into dreyfus-by-default
     add cbf8804  Merge pull request #2037 from kocolosk/dreyfus-by-default
     add 915a6e4  Update ioq to 2.1.2
     add e923840  Merge pull request #2062 from cloudant/update-ioq-2.1.2
     add 9d143ba  Add erlang 22 support
     add 85dc624  Fix max_document_id_length value in default.ini
     add 9d09878  Add missing purge settings to default.ini
     add 3505281  Make sure that fsync errors are raised
     add a6c0da1  Increase timeouts on two slow btree tests
     add ec2a963  Fix flaky mem3_sync_event_listener EUnit test
     add 29d484e  Fix EUnit timeouts (#2087)
     add f33378b  Fix credo complains for dreyfus
     add d584962  Minimal ExUnit setup
     add cf60cff  Move eunit tests into test/eunit directory
     add d0ccfa2  Add chained setups
     add d427f35  Unify runners for unit and integration tests
     add 25ad74a  Update .travis.yml
     add f37e1e7  Merge pull request #2039 from cloudant/exunit-simplified
     add 220462a  Retry EUnit tests on failure
     add e67903b  Fix mem3_sync_event_listener EUnit test
     add 0a5b11f  Remove local replication endpoints in CouchDB 3.x
     new 609a45d  Update build system for FoundationDB
     new 9178462  Disable eunit test suite in fabric
     new 373b42e  Initial fabric2 implementation on FoundationDB
     new 29df909  Initial test suite for the fabric2 implementation
     new 9083da6  Update ddoc_cache to use fabric2
     new 0cf5f46  Start switching chttpd HTTP endpoints to fabric2
     new 716d5b3  Remove debug logging
     new c4f1182  Implement attachment compression
     new ad31f51  Fix fabric2_txids:terminate/2
     new 1876962  Fix revision generation on attachment upload
     new bc8007b  Convert attachment info to disk terms correctly
     new f7a790e  Allow for previously configured filters
     new da85a5c  Fix validate_doc_update when recreating a document
     new 5e12e06  Database config changes should bump the db version
     new 3931685  Implement `_users` db authentication
     new d16cb14  Update get security to use fabric2
     new 920e1ff  Fix arity in changes timeout callback
     new b9ee168  Fix exception in cache auth doc update
     new 0c2d674  CouchDB background jobs
     new 40561bc  Remove tests for deprecated features.
     new a8e306d  Implement _all_dbs/_all_docs API parameters
     new 633d894  Fix bulk docs error reporting
     new bf9fa0a  Fix `COPY` method
     new e5fefbe  Fix revision tree extensions
     new 7696999  Implement `POST /_dbs_info`
     new 79ea59e  Fix formatting of all_docs_test.exs
     new 858c947  Reinitialize chttpd_auth_cache on config change
     new 8e574e9  Disable broken couch_att tests
     new d42d9b7  Expose ICU ucol_getSortKey
     new 7a3bfe6  Fix more elixir tests
     new 24c864d  Make fabric2.hrl public
     new d5a5426  Expose the is_replicator_db and is_user_db logic
     new a545b49  Fix default key ranges for fold_range
     new 9ff8fa1  CouchDB map indexes on FDB

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (264ddae)
            \
             N -- N -- N   refs/heads/prototype/fdb-layer (9ff8fa1)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 34 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 test/elixir/.credo.exs => .credo.exs               |  11 +-
 .formatter.exs                                     |   9 +
 .github/PULL_REQUEST_TEMPLATE.md                   |   7 +-
 .gitignore                                         |   7 +
 .travis.yml                                        |   8 +-
 Makefile                                           |  49 +-
 Makefile.win                                       |  22 +-
 {test/elixir/config => config}/config.exs          |   6 +-
 config/dev.exs                                     |   1 +
 config/integration.exs                             |  12 +
 config/prod.exs                                    |   1 +
 config/test.exs                                    |  12 +
 mix.exs                                            |  60 ++
 test/elixir/mix.lock => mix.lock                   |   6 +-
 rebar.config.script                                |   9 +-
 rel/apps/couch_epi.config                          |   1 +
 rel/overlay/etc/default.ini                        |  44 +-
 rel/reltool.config                                 |   2 +
 share/server/dreyfus.js                            |  62 ++
 share/server/loop.js                               |   2 +
 src/chttpd/test/{ => eunit}/chttpd_cors_test.erl   |   0
 src/chttpd/test/{ => eunit}/chttpd_csp_tests.erl   |   0
 .../chttpd_db_attachment_size_tests.erl            |   0
 .../chttpd_db_bulk_get_multipart_test.erl          |   0
 .../test/{ => eunit}/chttpd_db_bulk_get_test.erl   |   0
 .../test/{ => eunit}/chttpd_db_doc_size_tests.erl  |   0
 src/chttpd/test/{ => eunit}/chttpd_db_test.erl     |   0
 .../test/{ => eunit}/chttpd_dbs_info_test.erl      |   0
 .../test/{ => eunit}/chttpd_error_info_tests.erl   |   0
 .../test/{ => eunit}/chttpd_handlers_tests.erl     |   0
 .../{ => eunit}/chttpd_open_revs_error_test.erl    |   0
 .../test/{ => eunit}/chttpd_plugin_tests.erl       |   0
 .../test/{ => eunit}/chttpd_prefer_header_test.erl |   0
 src/chttpd/test/{ => eunit}/chttpd_purge_tests.erl |   0
 .../test/{ => eunit}/chttpd_security_tests.erl     |   0
 .../{ => eunit}/chttpd_socket_buffer_size_test.erl |   0
 src/chttpd/test/{ => eunit}/chttpd_view_test.erl   |   0
 .../test/{ => eunit}/chttpd_welcome_test.erl       |   0
 src/chttpd/test/{ => eunit}/chttpd_xframe_test.erl |   0
 src/couch/include/couch_eunit.hrl                  |   2 +-
 src/couch/src/couch_file.erl                       |  29 +-
 .../test/{ => eunit}/chttpd_endpoints_tests.erl    |   0
 .../test/{ => eunit}/couch_auth_cache_tests.erl    |   0
 src/couch/test/{ => eunit}/couch_base32_tests.erl  |   0
 .../couch_bt_engine_compactor_tests.erl            |   0
 .../test/{ => eunit}/couch_bt_engine_tests.erl     |   0
 .../{ => eunit}/couch_bt_engine_upgrade_tests.erl  |   0
 src/couch/test/{ => eunit}/couch_btree_tests.erl   |   9 +-
 src/couch/test/{ => eunit}/couch_changes_tests.erl |   0
 .../test/{ => eunit}/couch_compress_tests.erl      |   0
 src/couch/test/{ => eunit}/couch_db_doc_tests.erl  |   0
 src/couch/test/{ => eunit}/couch_db_mpr_tests.erl  |   0
 .../test/{ => eunit}/couch_db_plugin_tests.erl     |   0
 .../{ => eunit}/couch_db_props_upgrade_tests.erl   |   0
 .../test/{ => eunit}/couch_db_split_tests.erl      |   0
 src/couch/test/{ => eunit}/couch_db_tests.erl      |   0
 .../test/{ => eunit}/couch_doc_json_tests.erl      |   0
 src/couch/test/{ => eunit}/couch_doc_tests.erl     |   0
 .../test/{ => eunit}/couch_ejson_size_tests.erl    |   0
 src/couch/test/{ => eunit}/couch_etag_tests.erl    |   0
 src/couch/test/{ => eunit}/couch_file_tests.erl    |  33 +
 .../test/{ => eunit}/couch_flags_config_tests.erl  |   0
 src/couch/test/{ => eunit}/couch_flags_tests.erl   |   0
 src/couch/test/{ => eunit}/couch_hotp_tests.erl    |   0
 src/couch/test/{ => eunit}/couch_index_tests.erl   |   0
 .../test/{ => eunit}/couch_key_tree_prop_tests.erl |   0
 .../test/{ => eunit}/couch_key_tree_tests.erl      |   0
 .../test/{ => eunit}/couch_passwords_tests.erl     |   2 +-
 .../test/{ => eunit}/couch_query_servers_tests.erl |   0
 src/couch/test/{ => eunit}/couch_server_tests.erl  |   0
 src/couch/test/{ => eunit}/couch_stream_tests.erl  |   0
 .../test/{ => eunit}/couch_task_status_tests.erl   |   0
 src/couch/test/{ => eunit}/couch_totp_tests.erl    |   0
 src/couch/test/{ => eunit}/couch_util_tests.erl    |   0
 src/couch/test/{ => eunit}/couch_uuids_tests.erl   |   0
 .../test/{ => eunit}/couch_work_queue_tests.erl    |   0
 .../test/{ => eunit}/couchdb_attachments_tests.erl |   0
 src/couch/test/{ => eunit}/couchdb_auth_tests.erl  |   0
 .../{ => eunit}/couchdb_cookie_domain_tests.erl    |   0
 src/couch/test/{ => eunit}/couchdb_cors_tests.erl  |   0
 src/couch/test/{ => eunit}/couchdb_db_tests.erl    |   0
 .../test/{ => eunit}/couchdb_design_doc_tests.erl  |   0
 .../{ => eunit}/couchdb_file_compression_tests.erl |   0
 .../{ => eunit}/couchdb_location_header_tests.erl  |   0
 .../test/{ => eunit}/couchdb_mrview_cors_tests.erl |   0
 .../test/{ => eunit}/couchdb_mrview_tests.erl      |   0
 .../test/{ => eunit}/couchdb_os_proc_pool.erl      |   0
 .../{ => eunit}/couchdb_update_conflicts_tests.erl |   0
 .../test/{ => eunit}/couchdb_vhosts_tests.erl      |   0
 src/couch/test/{ => eunit}/couchdb_views_tests.erl |   0
 .../fixtures/3b835456c235b1827e012e25666152f3.view | Bin
 .../fixtures/couch_stats_aggregates.cfg            |   0
 .../fixtures/couch_stats_aggregates.ini            |   0
 .../{ => eunit}/fixtures/db_non_partitioned.couch  | Bin
 .../fixtures/db_v6_with_1_purge_req.couch          | Bin
 .../db_v6_with_1_purge_req_for_2_docs.couch        | Bin
 .../fixtures/db_v6_with_2_purge_req.couch          | Bin
 .../fixtures/db_v6_without_purge_req.couch         | Bin
 .../fixtures/db_v7_with_1_purge_req.couch          | Bin
 .../db_v7_with_1_purge_req_for_2_docs.couch        | Bin
 .../fixtures/db_v7_with_2_purge_req.couch          | Bin
 .../fixtures/db_v7_without_purge_req.couch         | Bin
 src/couch/test/{ => eunit}/fixtures/logo.png       | Bin
 src/couch/test/{ => eunit}/fixtures/multipart.http |   0
 .../{ => eunit}/fixtures/os_daemon_bad_perm.sh     |   0
 .../{ => eunit}/fixtures/os_daemon_can_reboot.sh   |   0
 .../fixtures/os_daemon_configer.escript            |   0
 .../{ => eunit}/fixtures/os_daemon_die_on_boot.sh  |   0
 .../{ => eunit}/fixtures/os_daemon_die_quickly.sh  |   0
 .../{ => eunit}/fixtures/os_daemon_looper.escript  |   0
 src/couch/test/{ => eunit}/fixtures/test.couch     | Bin
 .../test/{ => eunit}/global_changes_tests.erl      |   0
 .../test/{ => eunit}/json_stream_parse_tests.erl   |   0
 src/couch/test/{ => eunit}/test_web.erl            |   0
 src/couch/test/exunit/test_helper.exs              |   2 +
 .../test/{ => eunit}/couch_epi_basic_test.erl      |   0
 src/couch_epi/test/{ => eunit}/couch_epi_tests.erl |   4 +-
 .../test/{ => eunit}/fixtures/app_data1.cfg        |   0
 .../test/{ => eunit}/fixtures/app_data2.cfg        |   0
 .../{ => eunit}/couch_index_compaction_tests.erl   |   0
 .../{ => eunit}/couch_index_ddoc_updated_tests.erl |   0
 .../{ => eunit}/couch_log_config_listener_test.erl |   0
 .../test/{ => eunit}/couch_log_config_test.erl     |   0
 .../{ => eunit}/couch_log_error_logger_h_test.erl  |   0
 .../test/{ => eunit}/couch_log_formatter_test.erl  |   0
 .../test/{ => eunit}/couch_log_monitor_test.erl    |   0
 .../test/{ => eunit}/couch_log_server_test.erl     |   0
 src/couch_log/test/{ => eunit}/couch_log_test.erl  |   0
 .../test/{ => eunit}/couch_log_test_util.erl       |   0
 .../{ => eunit}/couch_log_trunc_io_fmt_test.erl    |   0
 .../test/{ => eunit}/couch_log_util_test.erl       |   0
 .../test/{ => eunit}/couch_log_writer_ets.erl      |   0
 .../{ => eunit}/couch_log_writer_file_test.erl     |   0
 .../{ => eunit}/couch_log_writer_stderr_test.erl   |   0
 .../{ => eunit}/couch_log_writer_syslog_test.erl   |   0
 .../test/{ => eunit}/couch_log_writer_test.erl     |   0
 .../{ => eunit}/couch_mrview_all_docs_tests.erl    |   0
 .../couch_mrview_changes_since_tests.erl           |   0
 .../{ => eunit}/couch_mrview_collation_tests.erl   |   0
 .../{ => eunit}/couch_mrview_compact_tests.erl     |   0
 .../couch_mrview_ddoc_updated_tests.erl            |   0
 .../couch_mrview_ddoc_validation_tests.erl         |   0
 .../{ => eunit}/couch_mrview_design_docs_tests.erl |   0
 .../test/{ => eunit}/couch_mrview_http_tests.erl   |   0
 .../couch_mrview_index_changes_tests.erl           |   0
 .../{ => eunit}/couch_mrview_index_info_tests.erl  |   0
 .../{ => eunit}/couch_mrview_local_docs_tests.erl  |   0
 .../{ => eunit}/couch_mrview_map_views_tests.erl   |   0
 .../couch_mrview_purge_docs_fabric_tests.erl       |   0
 .../{ => eunit}/couch_mrview_purge_docs_tests.erl  |   0
 .../{ => eunit}/couch_mrview_red_views_tests.erl   |   0
 .../test/{ => eunit}/couch_mrview_util_tests.erl   |   0
 .../test/{ => eunit}/couch_peruser_test.erl        |   0
 .../src/cpse_test_purge_replication.erl            |  16 +-
 src/couch_replicator/src/couch_replicator.erl      |   1 -
 .../src/couch_replicator_api_wrap.erl              | 150 +---
 .../src/couch_replicator_doc_processor.erl         |  12 +-
 .../src/couch_replicator_doc_processor_worker.erl  |   6 +-
 src/couch_replicator/src/couch_replicator_docs.erl |  29 +-
 .../src/couch_replicator_filters.erl               |  13 +-
 src/couch_replicator/src/couch_replicator_ids.erl  |  44 +-
 .../src/couch_replicator_scheduler_job.erl         |  31 +-
 .../src/couch_replicator_utils.erl                 |  56 +-
 .../src/couch_replicator_worker.erl                | 143 +---
 .../couch_replicator_attachments_too_large.erl     |   4 +-
 .../{ => eunit}/couch_replicator_compact_tests.erl |   5 +-
 .../couch_replicator_connection_tests.erl          |   0
 ...replicator_create_target_with_options_tests.erl |   0
 .../couch_replicator_filtered_tests.erl            |  10 +-
 .../couch_replicator_httpc_pool_tests.erl          |   0
 .../couch_replicator_id_too_long_tests.erl         |   5 +-
 .../couch_replicator_large_atts_tests.erl          |   5 +-
 .../couch_replicator_many_leaves_tests.erl         |   6 +-
 .../couch_replicator_missing_stubs_tests.erl       |   5 +-
 .../{ => eunit}/couch_replicator_proxy_tests.erl   |   0
 .../couch_replicator_rate_limiter_tests.erl        |   0
 ...ch_replicator_retain_stats_between_job_runs.erl |   0
 .../couch_replicator_selector_tests.erl            |   5 +-
 ...ch_replicator_small_max_request_size_target.erl |   5 +-
 .../{ => eunit}/couch_replicator_test_helper.erl   |   0
 .../couch_replicator_use_checkpoints_tests.erl     |   5 +-
 .../test/{ => eunit}/ddoc_cache_basic_test.erl     |   0
 .../test/{ => eunit}/ddoc_cache_coverage_test.erl  |   0
 .../test/{ => eunit}/ddoc_cache_disabled_test.erl  |   0
 .../test/{ => eunit}/ddoc_cache_entry_test.erl     |   0
 src/ddoc_cache/test/{ => eunit}/ddoc_cache_ev.erl  |   0
 .../test/{ => eunit}/ddoc_cache_eviction_test.erl  |   0
 .../test/{ => eunit}/ddoc_cache_lru_test.erl       |   0
 .../test/{ => eunit}/ddoc_cache_no_cache_test.erl  |   0
 .../{ => eunit}/ddoc_cache_open_error_test.erl     |   0
 .../test/{ => eunit}/ddoc_cache_open_test.erl      |   0
 .../test/{ => eunit}/ddoc_cache_opener_test.erl    |   0
 .../test/{ => eunit}/ddoc_cache_refresh_test.erl   |   0
 .../test/{ => eunit}/ddoc_cache_remove_test.erl    |   0
 .../test/{ => eunit}/ddoc_cache_test.hrl           |   0
 .../test/{ => eunit}/ddoc_cache_tutil.erl          |   0
 src/dreyfus/.gitignore                             |   4 +
 src/{mango => dreyfus}/LICENSE.txt                 |   2 +-
 src/dreyfus/README.md                              |  78 ++
 src/dreyfus/include/dreyfus.hrl                    |  74 ++
 src/dreyfus/priv/stats_descriptions.cfg            |  65 ++
 src/dreyfus/src/clouseau_rpc.erl                   | 114 +++
 .../mango.app.src => dreyfus/src/dreyfus.app.src}  |  20 +-
 .../src/dreyfus_app.erl}                           |  11 +-
 src/dreyfus/src/dreyfus_bookmark.erl               |  90 +++
 src/dreyfus/src/dreyfus_config.erl                 |  15 +
 src/dreyfus/src/dreyfus_epi.erl                    |  46 ++
 src/dreyfus/src/dreyfus_fabric.erl                 | 108 +++
 src/dreyfus/src/dreyfus_fabric_cleanup.erl         |  74 ++
 src/dreyfus/src/dreyfus_fabric_group1.erl          | 126 +++
 src/dreyfus/src/dreyfus_fabric_group2.erl          | 155 ++++
 src/dreyfus/src/dreyfus_fabric_info.erl            | 108 +++
 src/dreyfus/src/dreyfus_fabric_search.erl          | 265 +++++++
 src/dreyfus/src/dreyfus_httpd.erl                  | 600 ++++++++++++++
 src/dreyfus/src/dreyfus_httpd_handlers.erl         |  29 +
 src/dreyfus/src/dreyfus_index.erl                  | 367 +++++++++
 src/dreyfus/src/dreyfus_index_manager.erl          | 153 ++++
 src/dreyfus/src/dreyfus_index_updater.erl          | 181 +++++
 .../src/dreyfus_plugin_couch_db.erl}               |   6 +-
 src/dreyfus/src/dreyfus_rpc.erl                    | 130 +++
 .../src/dreyfus_sup.erl}                           |  21 +-
 src/dreyfus/src/dreyfus_util.erl                   | 418 ++++++++++
 src/dreyfus/test/dreyfus_blacklist_await_test.erl  |  76 ++
 .../test/dreyfus_blacklist_request_test.erl        |  96 +++
 src/dreyfus/test/dreyfus_config_test.erl           |  71 ++
 src/dreyfus/test/dreyfus_purge_test.erl            | 867 +++++++++++++++++++++
 src/dreyfus/test/dreyfus_test_util.erl             |  13 +
 {test => src/dreyfus/test}/elixir/mix.exs          |  13 +-
 src/dreyfus/test/elixir/mix.lock                   |   5 +
 .../elixir/run-only => src/dreyfus/test/elixir/run |   3 +-
 .../test/elixir/test/partition_search_test.exs     | 219 ++++++
 src/dreyfus/test/elixir/test/test_helper.exs       |   4 +
 .../{ => eunit}/global_changes_hooks_tests.erl     |   0
 src/mango/src/mango_idx.erl                        |   8 +-
 src/mango/src/mango_native_proc.erl                |   2 +-
 src/mem3/src/mem3_rep.erl                          |  34 +-
 src/mem3/src/mem3_sync_event_listener.erl          |  29 +-
 src/mem3/test/{ => eunit}/mem3_cluster_test.erl    |   0
 src/mem3/test/{ => eunit}/mem3_hash_test.erl       |   0
 src/mem3/test/{ => eunit}/mem3_rep_test.erl        |  17 +-
 .../test/{ => eunit}/mem3_reshard_api_test.erl     | 121 +--
 .../{ => eunit}/mem3_reshard_changes_feed_test.erl |   9 +-
 src/mem3/test/{ => eunit}/mem3_reshard_test.erl    |  37 +-
 src/mem3/test/{ => eunit}/mem3_ring_prop_tests.erl |   0
 src/mem3/test/{ => eunit}/mem3_seeds_test.erl      |   0
 .../test/eunit/mem3_sync_security_test.erl}        |  49 +-
 src/mem3/test/{ => eunit}/mem3_util_test.erl       |   0
 src/mem3/test/mem3_sync_security_test.erl          |  32 -
 support/build_js.escript                           |   2 +
 test/elixir/Makefile                               |   4 +-
 test/elixir/README.md                              | 143 ++++
 test/elixir/lib/ex_unit.ex                         |  44 ++
 test/elixir/lib/setup.ex                           |  97 +++
 test/elixir/lib/setup/common.ex                    |  22 +
 test/elixir/lib/step.ex                            |  44 ++
 test/elixir/lib/step/config.ex                     |  33 +
 test/elixir/lib/step/create_db.ex                  |  53 ++
 test/elixir/lib/step/start.ex                      |  85 ++
 test/elixir/lib/step/user.ex                       | 104 +++
 test/elixir/lib/utils.ex                           |  61 ++
 test/elixir/run                                    |   6 -
 test/elixir/test/replication_test.exs              |  40 +-
 test/elixir/test/test_helper.exs                   |   8 +-
 263 files changed, 5960 insertions(+), 719 deletions(-)
 rename test/elixir/.credo.exs => .credo.exs (95%)
 create mode 100644 .formatter.exs
 copy {test/elixir/config => config}/config.exs (89%)
 create mode 100644 config/dev.exs
 create mode 100644 config/integration.exs
 create mode 100644 config/prod.exs
 create mode 100644 config/test.exs
 create mode 100644 mix.exs
 rename test/elixir/mix.lock => mix.lock (61%)
 create mode 100644 share/server/dreyfus.js
 rename src/chttpd/test/{ => eunit}/chttpd_cors_test.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_csp_tests.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_db_attachment_size_tests.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_db_bulk_get_multipart_test.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_db_bulk_get_test.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_db_doc_size_tests.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_db_test.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_dbs_info_test.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_error_info_tests.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_handlers_tests.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_open_revs_error_test.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_plugin_tests.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_prefer_header_test.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_purge_tests.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_security_tests.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_socket_buffer_size_test.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_view_test.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_welcome_test.erl (100%)
 rename src/chttpd/test/{ => eunit}/chttpd_xframe_test.erl (100%)
 rename src/couch/test/{ => eunit}/chttpd_endpoints_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_auth_cache_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_base32_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_bt_engine_compactor_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_bt_engine_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_bt_engine_upgrade_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_btree_tests.erl (98%)
 rename src/couch/test/{ => eunit}/couch_changes_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_compress_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_db_doc_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_db_mpr_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_db_plugin_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_db_props_upgrade_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_db_split_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_db_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_doc_json_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_doc_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_ejson_size_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_etag_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_file_tests.erl (96%)
 rename src/couch/test/{ => eunit}/couch_flags_config_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_flags_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_hotp_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_index_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_key_tree_prop_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_key_tree_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_passwords_tests.erl (97%)
 rename src/couch/test/{ => eunit}/couch_query_servers_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_server_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_stream_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_task_status_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_totp_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_util_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_uuids_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couch_work_queue_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couchdb_attachments_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couchdb_auth_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couchdb_cookie_domain_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couchdb_cors_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couchdb_db_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couchdb_design_doc_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couchdb_file_compression_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couchdb_location_header_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couchdb_mrview_cors_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couchdb_mrview_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couchdb_os_proc_pool.erl (100%)
 rename src/couch/test/{ => eunit}/couchdb_update_conflicts_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couchdb_vhosts_tests.erl (100%)
 rename src/couch/test/{ => eunit}/couchdb_views_tests.erl (100%)
 rename src/couch/test/{ => eunit}/fixtures/3b835456c235b1827e012e25666152f3.view (100%)
 rename src/couch/test/{ => eunit}/fixtures/couch_stats_aggregates.cfg (100%)
 rename src/couch/test/{ => eunit}/fixtures/couch_stats_aggregates.ini (100%)
 rename src/couch/test/{ => eunit}/fixtures/db_non_partitioned.couch (100%)
 rename src/couch/test/{ => eunit}/fixtures/db_v6_with_1_purge_req.couch (100%)
 rename src/couch/test/{ => eunit}/fixtures/db_v6_with_1_purge_req_for_2_docs.couch (100%)
 rename src/couch/test/{ => eunit}/fixtures/db_v6_with_2_purge_req.couch (100%)
 rename src/couch/test/{ => eunit}/fixtures/db_v6_without_purge_req.couch (100%)
 rename src/couch/test/{ => eunit}/fixtures/db_v7_with_1_purge_req.couch (100%)
 rename src/couch/test/{ => eunit}/fixtures/db_v7_with_1_purge_req_for_2_docs.couch (100%)
 rename src/couch/test/{ => eunit}/fixtures/db_v7_with_2_purge_req.couch (100%)
 rename src/couch/test/{ => eunit}/fixtures/db_v7_without_purge_req.couch (100%)
 rename src/couch/test/{ => eunit}/fixtures/logo.png (100%)
 rename src/couch/test/{ => eunit}/fixtures/multipart.http (100%)
 rename src/couch/test/{ => eunit}/fixtures/os_daemon_bad_perm.sh (100%)
 rename src/couch/test/{ => eunit}/fixtures/os_daemon_can_reboot.sh (100%)
 rename src/couch/test/{ => eunit}/fixtures/os_daemon_configer.escript (100%)
 rename src/couch/test/{ => eunit}/fixtures/os_daemon_die_on_boot.sh (100%)
 rename src/couch/test/{ => eunit}/fixtures/os_daemon_die_quickly.sh (100%)
 rename src/couch/test/{ => eunit}/fixtures/os_daemon_looper.escript (100%)
 rename src/couch/test/{ => eunit}/fixtures/test.couch (100%)
 rename src/couch/test/{ => eunit}/global_changes_tests.erl (100%)
 rename src/couch/test/{ => eunit}/json_stream_parse_tests.erl (100%)
 rename src/couch/test/{ => eunit}/test_web.erl (100%)
 create mode 100644 src/couch/test/exunit/test_helper.exs
 rename src/couch_epi/test/{ => eunit}/couch_epi_basic_test.erl (100%)
 rename src/couch_epi/test/{ => eunit}/couch_epi_tests.erl (99%)
 rename src/couch_epi/test/{ => eunit}/fixtures/app_data1.cfg (100%)
 rename src/couch_epi/test/{ => eunit}/fixtures/app_data2.cfg (100%)
 rename src/couch_index/test/{ => eunit}/couch_index_compaction_tests.erl (100%)
 rename src/couch_index/test/{ => eunit}/couch_index_ddoc_updated_tests.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_config_listener_test.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_config_test.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_error_logger_h_test.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_formatter_test.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_monitor_test.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_server_test.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_test.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_test_util.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_trunc_io_fmt_test.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_util_test.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_writer_ets.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_writer_file_test.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_writer_stderr_test.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_writer_syslog_test.erl (100%)
 rename src/couch_log/test/{ => eunit}/couch_log_writer_test.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_all_docs_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_changes_since_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_collation_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_compact_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_ddoc_updated_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_ddoc_validation_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_design_docs_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_http_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_index_changes_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_index_info_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_local_docs_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_map_views_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_purge_docs_fabric_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_purge_docs_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_red_views_tests.erl (100%)
 rename src/couch_mrview/test/{ => eunit}/couch_mrview_util_tests.erl (100%)
 rename src/couch_peruser/test/{ => eunit}/couch_peruser_test.erl (100%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_attachments_too_large.erl (96%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_compact_tests.erl (99%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_connection_tests.erl (100%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_create_target_with_options_tests.erl (100%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_filtered_tests.erl (96%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_httpc_pool_tests.erl (100%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_id_too_long_tests.erl (95%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_large_atts_tests.erl (96%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_many_leaves_tests.erl (98%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_missing_stubs_tests.erl (97%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_proxy_tests.erl (100%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_rate_limiter_tests.erl (100%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_retain_stats_between_job_runs.erl (100%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_selector_tests.erl (96%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_small_max_request_size_target.erl (98%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_test_helper.erl (100%)
 rename src/couch_replicator/test/{ => eunit}/couch_replicator_use_checkpoints_tests.erl (97%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_basic_test.erl (100%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_coverage_test.erl (100%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_disabled_test.erl (100%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_entry_test.erl (100%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_ev.erl (100%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_eviction_test.erl (100%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_lru_test.erl (100%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_no_cache_test.erl (100%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_open_error_test.erl (100%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_open_test.erl (100%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_opener_test.erl (100%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_refresh_test.erl (100%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_remove_test.erl (100%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_test.hrl (100%)
 rename src/ddoc_cache/test/{ => eunit}/ddoc_cache_tutil.erl (100%)
 create mode 100644 src/dreyfus/.gitignore
 copy src/{mango => dreyfus}/LICENSE.txt (99%)
 create mode 100644 src/dreyfus/README.md
 create mode 100644 src/dreyfus/include/dreyfus.hrl
 create mode 100644 src/dreyfus/priv/stats_descriptions.cfg
 create mode 100644 src/dreyfus/src/clouseau_rpc.erl
 copy src/{mango/src/mango.app.src => dreyfus/src/dreyfus.app.src} (64%)
 copy src/{couch_epi/src/couch_epi_app.erl => dreyfus/src/dreyfus_app.erl} (81%)
 create mode 100644 src/dreyfus/src/dreyfus_bookmark.erl
 create mode 100644 src/dreyfus/src/dreyfus_config.erl
 create mode 100644 src/dreyfus/src/dreyfus_epi.erl
 create mode 100644 src/dreyfus/src/dreyfus_fabric.erl
 create mode 100644 src/dreyfus/src/dreyfus_fabric_cleanup.erl
 create mode 100644 src/dreyfus/src/dreyfus_fabric_group1.erl
 create mode 100644 src/dreyfus/src/dreyfus_fabric_group2.erl
 create mode 100644 src/dreyfus/src/dreyfus_fabric_info.erl
 create mode 100644 src/dreyfus/src/dreyfus_fabric_search.erl
 create mode 100644 src/dreyfus/src/dreyfus_httpd.erl
 create mode 100644 src/dreyfus/src/dreyfus_httpd_handlers.erl
 create mode 100644 src/dreyfus/src/dreyfus_index.erl
 create mode 100644 src/dreyfus/src/dreyfus_index_manager.erl
 create mode 100644 src/dreyfus/src/dreyfus_index_updater.erl
 copy src/{couch_index/src/couch_index_plugin_couch_db.erl => dreyfus/src/dreyfus_plugin_couch_db.erl} (80%)
 create mode 100644 src/dreyfus/src/dreyfus_rpc.erl
 copy src/{couch_peruser/src/couch_peruser_sup.erl => dreyfus/src/dreyfus_sup.erl} (62%)
 create mode 100644 src/dreyfus/src/dreyfus_util.erl
 create mode 100644 src/dreyfus/test/dreyfus_blacklist_await_test.erl
 create mode 100644 src/dreyfus/test/dreyfus_blacklist_request_test.erl
 create mode 100644 src/dreyfus/test/dreyfus_config_test.erl
 create mode 100644 src/dreyfus/test/dreyfus_purge_test.erl
 create mode 100644 src/dreyfus/test/dreyfus_test_util.erl
 rename {test => src/dreyfus/test}/elixir/mix.exs (56%)
 create mode 100644 src/dreyfus/test/elixir/mix.lock
 copy test/elixir/run-only => src/dreyfus/test/elixir/run (54%)
 create mode 100644 src/dreyfus/test/elixir/test/partition_search_test.exs
 create mode 100644 src/dreyfus/test/elixir/test/test_helper.exs
 rename src/global_changes/test/{ => eunit}/global_changes_hooks_tests.erl (100%)
 rename src/mem3/test/{ => eunit}/mem3_cluster_test.erl (100%)
 rename src/mem3/test/{ => eunit}/mem3_hash_test.erl (100%)
 rename src/mem3/test/{ => eunit}/mem3_rep_test.erl (97%)
 rename src/mem3/test/{ => eunit}/mem3_reshard_api_test.erl (95%)
 rename src/mem3/test/{ => eunit}/mem3_reshard_changes_feed_test.erl (98%)
 rename src/mem3/test/{ => eunit}/mem3_reshard_test.erl (98%)
 rename src/mem3/test/{ => eunit}/mem3_ring_prop_tests.erl (100%)
 rename src/mem3/test/{ => eunit}/mem3_seeds_test.erl (100%)
 copy src/{fabric/test/fabric2_trace_db_delete_tests.erl => mem3/test/eunit/mem3_sync_security_test.erl} (52%)
 rename src/mem3/test/{ => eunit}/mem3_util_test.erl (100%)
 delete mode 100644 src/mem3/test/mem3_sync_security_test.erl
 create mode 100644 test/elixir/lib/ex_unit.ex
 create mode 100644 test/elixir/lib/setup.ex
 create mode 100644 test/elixir/lib/setup/common.ex
 create mode 100644 test/elixir/lib/step.ex
 create mode 100644 test/elixir/lib/step/config.ex
 create mode 100644 test/elixir/lib/step/create_db.ex
 create mode 100644 test/elixir/lib/step/start.ex
 create mode 100644 test/elixir/lib/step/user.ex
 create mode 100644 test/elixir/lib/utils.ex
 delete mode 100755 test/elixir/run


[couchdb] 07/34: Remove debug logging

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 716d5b31cc14f92d404feb612059f140aa4e8f9d
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jun 6 11:56:58 2019 -0500

    Remove debug logging
---
 src/chttpd/src/chttpd_changes.erl | 8 --------
 src/fabric/src/fabric2_events.erl | 6 +-----
 2 files changed, 1 insertion(+), 13 deletions(-)

diff --git a/src/chttpd/src/chttpd_changes.erl b/src/chttpd/src/chttpd_changes.erl
index 30caab2..620f68d 100644
--- a/src/chttpd/src/chttpd_changes.erl
+++ b/src/chttpd/src/chttpd_changes.erl
@@ -61,7 +61,6 @@ handle_db_changes(Args, Req, Db) ->
     handle_changes(Args, Req, Db, db).
 
 handle_changes(Args1, Req, Db, Type) ->
-    ReqPid = chttpd:header_value(Req, "XKCD", "<unknown>"),
     #changes_args{
         style = Style,
         filter = FilterName,
@@ -69,7 +68,6 @@ handle_changes(Args1, Req, Db, Type) ->
         dir = Dir,
         since = Since
     } = Args1,
-    couch_log:error("XKCD: STARTING CHANGES FEED ~p for ~s : ~p", [self(), ReqPid, Since]),
     Filter = configure_filter(FilterName, Style, Req, Db),
     Args = Args1#changes_args{filter_fun = Filter},
     % The type of changes feed depends on the supplied filter. If the query is
@@ -820,7 +818,6 @@ changes_enumerator(Change0, Acc) ->
             stop -> stop
         end,
         reset_heartbeat(),
-        couch_log:error("XKCD: CHANGE SEQ: ~p", [Seq]),
         {RealGo, Acc#changes_acc{
             seq = Seq,
             user_acc = UserAcc2,
@@ -919,22 +916,17 @@ deleted_item(_) -> [].
 
 % waits for a updated msg, if there are multiple msgs, collects them.
 wait_updated(Timeout, TimeoutFun, UserAcc) ->
-    couch_log:error("XKCD: WAITING FOR UPDATE", []),
     receive
     updated ->
-        couch_log:error("XKCD: GOT UPDATED", []),
         get_rest_updated(UserAcc);
     deleted ->
-        couch_log:error("XKCD: DB DELETED", []),
         {stop, UserAcc}
     after Timeout ->
         {Go, UserAcc2} = TimeoutFun(UserAcc),
         case Go of
         ok ->
-            couch_log:error("XKCD: WAIT UPDATED TIMEOUT, RETRY", []),
             ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2);
         stop ->
-            couch_log:error("XKCD: WAIT UPDATED TIMEOUT STOP", []),
             {stop, UserAcc2}
         end
     end.
diff --git a/src/fabric/src/fabric2_events.erl b/src/fabric/src/fabric2_events.erl
index a571714..094ca2f 100644
--- a/src/fabric/src/fabric2_events.erl
+++ b/src/fabric/src/fabric2_events.erl
@@ -43,11 +43,9 @@ stop_listener(Pid) ->
 init(Parent, DbName, Mod, Fun, St) ->
     {ok, Db} = fabric2_db:open(DbName, [?ADMIN_CTX]),
     Since = fabric2_db:get_update_seq(Db),
-    couch_log:error("XKCD: START LISTENER: ~s : ~p for ~p", [DbName, Since, Parent]),
     erlang:monitor(process, Parent),
     Parent ! {self(), initialized},
-    poll(DbName, Since, Mod, Fun, St),
-    couch_log:error("XKCD: STOP LISTENER for ~p", [Parent]).
+    poll(DbName, Since, Mod, Fun, St).
 
 
 poll(DbName, Since, Mod, Fun, St) ->
@@ -56,10 +54,8 @@ poll(DbName, Since, Mod, Fun, St) ->
             {ok, Db} ->
                 case fabric2_db:get_update_seq(Db) of
                     Since ->
-                        couch_log:error("XKCD: NO UPDATE: ~s :: ~p", [DbName, Since]),
                         {{ok, St}, Since};
                     Other ->
-                        couch_log:error("XKCD: UPDATED: ~s :: ~p -> ~p", [DbName, Since, Other]),
                         {Mod:Fun(DbName, updated, St), Other}
                 end;
             Error ->


[couchdb] 16/34: Update get security to use fabric2

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit d16cb1490e9f8aaefafebeb282103879a6d0230c
Author: Eric Avdey <ei...@eiri.ca>
AuthorDate: Thu Jun 20 10:41:30 2019 -0300

    Update get security to use fabric2
---
 src/chttpd/src/chttpd_db.erl | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index 4337041..c0ac1ca 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -734,7 +734,7 @@ db_req(#httpd{method = 'PUT',path_parts = [_, <<"_security">>]} = Req, Db) ->
     end;
 
 db_req(#httpd{method='GET',path_parts=[_,<<"_security">>]}=Req, Db) ->
-    send_json(Req, fabric:get_security(Db));
+    send_json(Req, fabric2_db:get_security(Db));
 
 db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) ->
     send_method_not_allowed(Req, "PUT,GET");


[couchdb] 18/34: Fix exception in cache auth doc update

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit b9ee168426308db1f3212795fba48ee66edbec42
Author: Eric Avdey <ei...@eiri.ca>
AuthorDate: Thu Jun 20 10:44:07 2019 -0300

    Fix exception in cache auth doc update
---
 src/chttpd/src/chttpd_auth_cache.erl | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/chttpd/src/chttpd_auth_cache.erl b/src/chttpd/src/chttpd_auth_cache.erl
index fc1ee62..e986af6 100644
--- a/src/chttpd/src/chttpd_auth_cache.erl
+++ b/src/chttpd/src/chttpd_auth_cache.erl
@@ -231,7 +231,7 @@ update_doc_ignoring_conflict(DbName, Doc) ->
     try
         fabric2_db:update_doc(DbName, Doc)
     catch
-        throw:conflict ->
+        error:conflict ->
             ok
     end.
 


[couchdb] 34/34: CouchDB map indexes on FDB

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 9ff8fa1fad512b3ecfa867db6a9c82e16446f8b0
Author: Garren Smith <ga...@gmail.com>
AuthorDate: Mon Jun 17 15:45:10 2019 +0200

    CouchDB map indexes on FDB
    
    This adds couch_views which builds map indexes and stores them in FDB.
    
    Co-authored-by: Paul J. Davis <pa...@gmail.com>
---
 rebar.config.script                                |   1 +
 rel/overlay/etc/default.ini                        |   4 +
 rel/reltool.config                                 |   2 +
 src/chttpd/src/chttpd_db.erl                       |   3 +-
 src/chttpd/src/chttpd_view.erl                     |   5 +-
 src/couch_mrview/src/couch_mrview_util.erl         |   2 +-
 src/couch_views/.gitignore                         |  19 +
 src/couch_views/README.md                          |  15 +
 src/couch_views/include/couch_views.hrl            |  26 ++
 src/couch_views/rebar.config                       |  14 +
 src/couch_views/src/couch_views.app.src            |  31 ++
 src/couch_views/src/couch_views.erl                | 140 ++++++
 src/couch_views/src/couch_views_app.erl            |  31 ++
 src/couch_views/src/couch_views_encoding.erl       | 105 +++++
 src/couch_views/src/couch_views_fdb.erl            | 438 +++++++++++++++++
 src/couch_views/src/couch_views_indexer.erl        | 261 +++++++++++
 src/couch_views/src/couch_views_jobs.erl           | 109 +++++
 src/couch_views/src/couch_views_reader.erl         | 208 +++++++++
 src/couch_views/src/couch_views_server.erl         | 103 ++++
 src/couch_views/src/couch_views_sup.erl            |  46 ++
 src/couch_views/src/couch_views_util.erl           |  84 ++++
 src/couch_views/test/couch_views_encoding_test.erl |  94 ++++
 src/couch_views/test/couch_views_indexer_test.erl  | 456 ++++++++++++++++++
 src/couch_views/test/couch_views_map_test.erl      | 517 +++++++++++++++++++++
 src/fabric/include/fabric2.hrl                     |   1 +
 test/elixir/test/basics_test.exs                   |  24 +-
 test/elixir/test/map_test.exs                      | 450 ++++++++++++++++++
 test/elixir/test/view_collation_test.exs           |  28 +-
 28 files changed, 3189 insertions(+), 28 deletions(-)

diff --git a/rebar.config.script b/rebar.config.script
index 14fdf28..c1d519f 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -84,6 +84,7 @@ SubDirs = [
     "src/couch_stats",
     "src/couch_peruser",
     "src/couch_tests",
+    "src/couch_views",
     "src/ddoc_cache",
     "src/dreyfus",
     "src/fabric",
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 69f57ff..59c89b0 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -234,6 +234,10 @@ iterations = 10 ; iterations for password hashing
 ; users_db_public = false
 ; cookie_domain = example.com
 
+; Settings for view indexing
+[couch_views]
+; max_workers = 100
+
 ; CSP (Content Security Policy) Support for _utils
 [csp]
 enable = true
diff --git a/rel/reltool.config b/rel/reltool.config
index 2f03e61..907b241 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -42,6 +42,7 @@
         couch_stats,
         couch_event,
         couch_peruser,
+        couch_views,
         ddoc_cache,
         dreyfus,
         ets_lru,
@@ -101,6 +102,7 @@
     {app, couch_stats, [{incl_cond, include}]},
     {app, couch_event, [{incl_cond, include}]},
     {app, couch_peruser, [{incl_cond, include}]},
+    {app, couch_views, [{incl_cond, include}]},
     {app, ddoc_cache, [{incl_cond, include}]},
     {app, dreyfus, [{incl_cond, include}]},
     {app, ets_lru, [{incl_cond, include}]},
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index 0c7e4d5..785ca3f 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -334,7 +334,8 @@ handle_design_req(#httpd{
         path_parts=[_DbName, _Design, Name, <<"_",_/binary>> = Action | _Rest]
     }=Req, Db) ->
     DbName = fabric2_db:name(Db),
-    case ddoc_cache:open(DbName, <<"_design/", Name/binary>>) of
+%%    case ddoc_cache:open(DbName, <<"_design/", Name/binary>>) of
+    case fabric2_db:open_doc(Db, <<"_design/", Name/binary>>) of
     {ok, DDoc} ->
         Handler = chttpd_handlers:design_handler(Action, fun bad_action_req/3),
         Handler(Req, Db, DDoc);
diff --git a/src/chttpd/src/chttpd_view.erl b/src/chttpd/src/chttpd_view.erl
index 26107d7..6765cca 100644
--- a/src/chttpd/src/chttpd_view.erl
+++ b/src/chttpd/src/chttpd_view.erl
@@ -43,10 +43,9 @@ multi_query_view(Req, Db, DDoc, ViewName, Queries) ->
 design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
     Args = couch_mrview_http:parse_params(Req, Keys),
     Max = chttpd:chunked_response_buffer_size(),
+    Fun = fun view_cb/2,
     VAcc = #vacc{db=Db, req=Req, threshold=Max},
-    Options = [{user_ctx, Req#httpd.user_ctx}],
-    {ok, Resp} = fabric:query_view(Db, Options, DDoc, ViewName,
-            fun view_cb/2, VAcc, Args),
+    {ok, Resp} = couch_views:query(Db, DDoc, ViewName, Fun, VAcc, Args),
     {ok, Resp#vacc.resp}.
 
 
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
index eb68124..18a4be1 100644
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ b/src/couch_mrview/src/couch_mrview_util.erl
@@ -497,7 +497,7 @@ fold_reduce({NthRed, Lang, View}, Fun,  Acc, Options) ->
 
 
 validate_args(Db, DDoc, Args0) ->
-    {ok, State} = couch_mrview_index:init(Db, DDoc),
+    {ok, State} = couch_mrview_util:ddoc_to_mrst(fabric2_db:name(Db), DDoc),
     Args1 = apply_limit(State#mrst.partitioned, Args0),
     validate_args(State, Args1).
 
diff --git a/src/couch_views/.gitignore b/src/couch_views/.gitignore
new file mode 100644
index 0000000..f1c4554
--- /dev/null
+++ b/src/couch_views/.gitignore
@@ -0,0 +1,19 @@
+.rebar3
+_*
+.eunit
+*.o
+*.beam
+*.plt
+*.swp
+*.swo
+.erlang.cookie
+ebin
+log
+erl_crash.dump
+.rebar
+logs
+_build
+.idea
+*.iml
+rebar3.crashdump
+*~
diff --git a/src/couch_views/README.md b/src/couch_views/README.md
new file mode 100644
index 0000000..49cd82b
--- /dev/null
+++ b/src/couch_views/README.md
@@ -0,0 +1,15 @@
+CouchDB Views
+=====
+
+This is the new application that builds and runs Map/reduce views against FoundationDB.
+Currently only map indexes are supported and it will always return the full index.
+
+Code layout:
+
+* `couch_views` - Main entry point to query a view
+* `couch_views_reader` - Reads from the index for queries
+* `couch_views_indexer` - `couch_jobs` worker that builds an index from the changes feed.
+* `couch_vews_jobs` - `couch_views` interactions with `couch_jobs`. It handles adding index jobs and subscribes to jobs.
+* `couch_views_fdb` - Maps view operations to FoundationDB logic.
+* `couch_views_encoding` - Encodes view keys that are byte comparable following CouchDB view sort order.
+* `couch_views_server` - Spawns `couch_views_indexer` workers to handle index update jobs.
diff --git a/src/couch_views/include/couch_views.hrl b/src/couch_views/include/couch_views.hrl
new file mode 100644
index 0000000..2e443eb
--- /dev/null
+++ b/src/couch_views/include/couch_views.hrl
@@ -0,0 +1,26 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% indexing
+-define(VIEW_UPDATE_SEQ, 0).
+-define(VIEW_ID_INFO, 1).
+-define(VIEW_ID_RANGE, 2).
+-define(VIEW_MAP_RANGE, 3).
+
+-define(VIEW_ROW_COUNT, 0).
+-define(VIEW_KV_SIZE, 1).
+
+-define(VIEW_ROW_KEY, 0).
+-define(VIEW_ROW_VALUE, 1).
+
+% jobs api
+-define(INDEX_JOB_TYPE, <<"views">>).
diff --git a/src/couch_views/rebar.config b/src/couch_views/rebar.config
new file mode 100644
index 0000000..362c878
--- /dev/null
+++ b/src/couch_views/rebar.config
@@ -0,0 +1,14 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{cover_enabled, true}.
+{cover_print_enabled, true}.
diff --git a/src/couch_views/src/couch_views.app.src b/src/couch_views/src/couch_views.app.src
new file mode 100644
index 0000000..c80c30b
--- /dev/null
+++ b/src/couch_views/src/couch_views.app.src
@@ -0,0 +1,31 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{application, couch_views, [
+    {description, "CouchDB Views on FDB"},
+    {vsn, git},
+    {mod, {couch_views_app, []}},
+    {registered, [
+        couch_views_sup,
+        couch_views_server
+    ]},
+    {applications, [
+        kernel,
+        stdlib,
+        erlfdb,
+        couch_log,
+        config,
+        couch_stats,
+        fabric,
+        couch_jobs
+    ]}
+]}.
diff --git a/src/couch_views/src/couch_views.erl b/src/couch_views/src/couch_views.erl
new file mode 100644
index 0000000..7c7588c
--- /dev/null
+++ b/src/couch_views/src/couch_views.erl
@@ -0,0 +1,140 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views).
+
+-export([
+    query/6
+]).
+
+
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+
+
+query(Db, DDoc, ViewName, Callback, Acc0, Args0) ->
+    case fabric2_db:is_users_db(Db) of
+        true ->
+            fabric2_users_db:after_doc_read(DDoc, Db);
+        false ->
+            ok
+    end,
+
+    DbName = fabric2_db:name(Db),
+    {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+
+    #mrst{
+        views = Views
+    } = Mrst,
+
+    Args1 = to_mrargs(Args0),
+    Args2 = couch_mrview_util:set_view_type(Args1, ViewName, Views),
+    Args3 = couch_mrview_util:validate_args(Args2),
+    ok = check_range(Args3),
+    case is_reduce_view(Args3) of
+        true -> throw({not_implemented});
+        false -> ok
+    end,
+
+    ok = maybe_update_view(Db, Mrst, Args3),
+
+    try
+        couch_views_reader:read(Db, Mrst, ViewName, Callback, Acc0, Args3)
+    after
+        UpdateAfter = Args3#mrargs.update == lazy,
+        if UpdateAfter == false -> ok; true ->
+            couch_views_jobs:build_view_async(Db, Mrst)
+        end
+    end.
+
+
+maybe_update_view(_Db, _Mrst, #mrargs{update = false}) ->
+    ok;
+
+maybe_update_view(_Db, _Mrst, #mrargs{update = lazy}) ->
+    ok;
+
+maybe_update_view(Db, Mrst, _Args) ->
+    WaitSeq = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        DbSeq = fabric2_db:get_update_seq(TxDb),
+        ViewSeq = couch_views_fdb:get_update_seq(TxDb, Mrst),
+        case DbSeq == ViewSeq of
+            true -> ready;
+            false -> DbSeq
+        end
+    end),
+
+    if WaitSeq == ready -> ok; true ->
+        couch_views_jobs:build_view(Db, Mrst, WaitSeq)
+    end.
+
+
+is_reduce_view(#mrargs{view_type = ViewType}) ->
+    ViewType =:= red;
+is_reduce_view({Reduce, _, _}) ->
+    Reduce =:= red.
+
+
+to_mrargs(#mrargs{} = Args) ->
+    Args;
+
+to_mrargs(#{} = Args) ->
+    Fields = record_info(fields, mrargs),
+    Indexes = lists:seq(2, record_info(size, mrargs)),
+    LU = lists:zip(Fields, Indexes),
+
+    maps:fold(fun(Key, Value, Acc) ->
+        Index = fabric2_util:get_value(couch_util:to_existing_atom(Key), LU),
+        setelement(Index, Acc, Value)
+    end, #mrargs{}, Args).
+
+
+check_range(#mrargs{start_key = undefined}) ->
+    ok;
+
+check_range(#mrargs{end_key = undefined}) ->
+    ok;
+
+check_range(#mrargs{start_key = K, end_key = K}) ->
+    ok;
+
+check_range(Args) ->
+    #mrargs{
+        direction = Dir,
+        start_key = SK,
+        start_key_docid = SKD,
+        end_key = EK,
+        end_key_docid = EKD
+    } = Args,
+
+    case {Dir, view_cmp(SK, SKD, EK, EKD)} of
+        {fwd, false} ->
+            throw(check_range_error(<<"true">>));
+        {rev, true} ->
+            throw(check_range_error(<<"false">>));
+        _ ->
+            ok
+    end.
+
+
+check_range_error(Descending) ->
+    {query_parse_error,
+        <<"No rows can match your key range, reverse your ",
+            "start_key and end_key or set descending=",
+            Descending/binary>>}.
+
+
+view_cmp(SK, SKD, EK, EKD) ->
+    BinSK = couch_views_encoding:encode(SK, key),
+    BinEK = couch_views_encoding:encode(EK, key),
+    PackedSK = erlfdb_tuple:pack({BinSK, SKD}),
+    PackedEK = erlfdb_tuple:pack({BinEK, EKD}),
+    PackedSK =< PackedEK.
diff --git a/src/couch_views/src/couch_views_app.erl b/src/couch_views/src/couch_views_app.erl
new file mode 100644
index 0000000..5ede5ef
--- /dev/null
+++ b/src/couch_views/src/couch_views_app.erl
@@ -0,0 +1,31 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-module(couch_views_app).
+
+
+-behaviour(application).
+
+
+-export([
+    start/2,
+    stop/1
+]).
+
+
+start(_StartType, StartArgs) ->
+    couch_views_sup:start_link(StartArgs).
+
+
+stop(_State) ->
+    ok.
diff --git a/src/couch_views/src/couch_views_encoding.erl b/src/couch_views/src/couch_views_encoding.erl
new file mode 100644
index 0000000..ef5fed9
--- /dev/null
+++ b/src/couch_views/src/couch_views_encoding.erl
@@ -0,0 +1,105 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_encoding).
+
+
+-export([
+    encode/1,
+    encode/2,
+    decode/1
+]).
+
+
+-define(NULL, 0).
+-define(FALSE, 1).
+-define(TRUE, 2).
+-define(NUMBER, 3).
+-define(STRING, 4).
+-define(LIST, 5).
+-define(OBJECT, 6).
+
+
+encode(X) ->
+    encode(X, value).
+
+
+encode(X, Type) when Type == key; Type == value ->
+    erlfdb_tuple:pack(encode_int(X, Type)).
+
+
+decode(Encoded) ->
+    Val = erlfdb_tuple:unpack(Encoded),
+    decode_int(Val).
+
+
+encode_int(null, _Type) ->
+    {?NULL};
+
+encode_int(false, _Type) ->
+    {?FALSE};
+
+encode_int(true, _Type) ->
+    {?TRUE};
+
+encode_int(Num, key) when is_number(Num) ->
+    {?NUMBER, float(Num)};
+
+encode_int(Num, value) when is_number(Num) ->
+    {?NUMBER, Num};
+
+encode_int(Bin, key) when is_binary(Bin) ->
+    {?STRING, couch_util:get_sort_key(Bin)};
+
+encode_int(Bin, value) when is_binary(Bin) ->
+    {?STRING, Bin};
+
+encode_int(List, Type) when is_list(List) ->
+    Encoded = lists:map(fun(Item) ->
+        encode_int(Item, Type)
+    end, List),
+    {?LIST, list_to_tuple(Encoded)};
+
+encode_int({Props}, Type) when is_list(Props) ->
+    Encoded = lists:map(fun({K, V}) ->
+        EK = encode_int(K, Type),
+        EV = encode_int(V, Type),
+        {EK, EV}
+    end, Props),
+    {?OBJECT, list_to_tuple(Encoded)}.
+
+
+decode_int({?NULL}) ->
+    null;
+
+decode_int({?FALSE}) ->
+    false;
+
+decode_int({?TRUE}) ->
+    true;
+
+decode_int({?STRING, Bin}) ->
+    Bin;
+
+decode_int({?NUMBER, Num}) ->
+    Num;
+
+decode_int({?LIST, List}) ->
+    lists:map(fun decode_int/1, tuple_to_list(List));
+
+decode_int({?OBJECT, Object}) ->
+    Props = lists:map(fun({EK, EV}) ->
+        K = decode_int(EK),
+        V = decode_int(EV),
+        {K, V}
+    end, tuple_to_list(Object)),
+    {Props}.
diff --git a/src/couch_views/src/couch_views_fdb.erl b/src/couch_views/src/couch_views_fdb.erl
new file mode 100644
index 0000000..60ce300
--- /dev/null
+++ b/src/couch_views/src/couch_views_fdb.erl
@@ -0,0 +1,438 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_fdb).
+
+-export([
+    get_update_seq/2,
+    set_update_seq/3,
+
+    get_row_count/3,
+    get_kv_size/3,
+
+    fold_map_idx/6,
+
+    write_doc/4
+]).
+
+-ifdef(TEST).
+-compile(export_all).
+-compile(nowarn_export_all).
+-endif.
+
+-define(LIST_VALUE, 0).
+-define(JSON_VALUE, 1).
+-define(VALUE, 2).
+
+
+-include("couch_views.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("fabric/include/fabric2.hrl").
+
+
+% View Build Sequence Access
+% (<db>, ?DB_VIEWS, Sig, ?VIEW_UPDATE_SEQ) = Sequence
+
+
+get_update_seq(TxDb, #mrst{sig = Sig}) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = TxDb,
+
+    case erlfdb:wait(erlfdb:get(Tx, seq_key(DbPrefix, Sig))) of
+        not_found -> <<>>;
+        UpdateSeq -> UpdateSeq
+    end.
+
+
+set_update_seq(TxDb, Sig, Seq) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = TxDb,
+    ok = erlfdb:set(Tx, seq_key(DbPrefix, Sig), Seq).
+
+
+get_row_count(TxDb, #mrst{sig = Sig}, ViewId) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = TxDb,
+
+    case erlfdb:wait(erlfdb:get(Tx, row_count_key(DbPrefix, Sig, ViewId))) of
+        not_found -> 0; % Can this happen?
+        CountBin -> ?bin2uint(CountBin)
+    end.
+
+
+get_kv_size(TxDb, #mrst{sig = Sig}, ViewId) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = TxDb,
+
+    case erlfdb:wait(erlfdb:get(Tx, kv_size_key(DbPrefix, Sig, ViewId))) of
+        not_found -> 0; % Can this happen?
+        SizeBin -> ?bin2uint(SizeBin)
+    end.
+
+
+fold_map_idx(TxDb, Sig, ViewId, Options, Callback, Acc0) ->
+    #{
+        db_prefix := DbPrefix
+    } = TxDb,
+
+    MapIdxPrefix = map_idx_prefix(DbPrefix, Sig, ViewId),
+    FoldAcc = #{
+        prefix => MapIdxPrefix,
+        sort_key => undefined,
+        docid => undefined,
+        dupe_id => undefined,
+        callback => Callback,
+        acc => Acc0
+    },
+
+    {Fun, Acc} = case fabric2_util:get_value(dir, Options, fwd) of
+        fwd ->
+            FwdAcc = FoldAcc#{
+                next => key,
+                key => undefined
+            },
+            {fun fold_fwd/2, FwdAcc};
+        rev ->
+            RevAcc = FoldAcc#{
+                next => value,
+                value => undefined
+            },
+            {fun fold_rev/2, RevAcc}
+    end,
+
+    #{
+        acc := Acc1
+    } = fabric2_fdb:fold_range(TxDb, MapIdxPrefix, Fun, Acc, Options),
+
+    Acc1.
+
+
+write_doc(TxDb, Sig, _ViewIds, #{deleted := true} = Doc) ->
+    #{
+        id := DocId
+    } = Doc,
+
+    ExistingViewKeys = get_view_keys(TxDb, Sig, DocId),
+
+    clear_id_idx(TxDb, Sig, DocId),
+    lists:foreach(fun({ViewId, TotalKeys, TotalSize, UniqueKeys}) ->
+        clear_map_idx(TxDb, Sig, ViewId, DocId, UniqueKeys),
+        update_row_count(TxDb, Sig, ViewId, -TotalKeys),
+        update_kv_size(TxDb, Sig, ViewId, -TotalSize)
+    end, ExistingViewKeys);
+
+write_doc(TxDb, Sig, ViewIds, Doc) ->
+    #{
+        id := DocId,
+        results := Results
+    } = Doc,
+
+    ExistingViewKeys = get_view_keys(TxDb, Sig, DocId),
+
+    clear_id_idx(TxDb, Sig, DocId),
+
+    lists:foreach(fun({ViewId, NewRows}) ->
+        update_id_idx(TxDb, Sig, ViewId, DocId, NewRows),
+
+        ExistingKeys = case lists:keyfind(ViewId, 1, ExistingViewKeys) of
+            {ViewId, TotalRows, TotalSize, EKeys} ->
+                RowChange = length(NewRows) - TotalRows,
+                SizeChange = calculate_row_size(NewRows) - TotalSize,
+                update_row_count(TxDb, Sig, ViewId, RowChange),
+                update_kv_size(TxDb, Sig, ViewId, SizeChange),
+                EKeys;
+            false ->
+                RowChange = length(NewRows),
+                SizeChange = calculate_row_size(NewRows),
+                update_row_count(TxDb, Sig, ViewId, RowChange),
+                update_kv_size(TxDb, Sig, ViewId, SizeChange),
+                []
+        end,
+        update_map_idx(TxDb, Sig, ViewId, DocId, ExistingKeys, NewRows)
+    end, lists:zip(ViewIds, Results)).
+
+
+% For each row in a map view there are two rows stored in
+% FoundationDB:
+%
+%   `(EncodedSortKey, EncodedKey)`
+%   `(EncodedSortKey, EncodedValue)`
+%
+% The difference between `EncodedSortKey` and `EndcodedKey` is
+% the use of `couch_util:get_sort_key/1` which turns UTF-8
+% strings into binaries that are byte comparable. Given a sort
+% key binary we cannot recover the input so to return unmodified
+% user data we are forced to store the original.
+%
+% These two fold functions exist so that we can be fairly
+% forceful on our assertions about which rows to see. Since
+% when we're folding forward we'll see the key first. When
+% `descending=true` and we're folding in reverse we'll see
+% the value first.
+
+fold_fwd({RowKey, EncodedOriginalKey}, #{next := key} = Acc) ->
+    #{
+        prefix := Prefix
+    } = Acc,
+
+    {{SortKey, DocId}, DupeId, ?VIEW_ROW_KEY} =
+            erlfdb_tuple:unpack(RowKey, Prefix),
+    Acc#{
+        next := value,
+        key := couch_views_encoding:decode(EncodedOriginalKey),
+        sort_key := SortKey,
+        docid := DocId,
+        dupe_id := DupeId
+    };
+
+fold_fwd({RowKey, EncodedValue}, #{next := value} = Acc) ->
+    #{
+        prefix := Prefix,
+        key := Key,
+        sort_key := SortKey,
+        docid := DocId,
+        dupe_id := DupeId,
+        callback := UserCallback,
+        acc := UserAcc0
+    } = Acc,
+
+    % We're asserting there that this row is paired
+    % correctly with the previous row by relying on
+    % a badmatch if any of these values don't match.
+    {{SortKey, DocId}, DupeId, ?VIEW_ROW_VALUE} =
+            erlfdb_tuple:unpack(RowKey, Prefix),
+
+    Value = couch_views_encoding:decode(EncodedValue),
+    UserAcc1 = UserCallback(DocId, Key, Value, UserAcc0),
+
+    Acc#{
+        next := key,
+        key := undefined,
+        sort_key := undefined,
+        docid := undefined,
+        dupe_id := undefined,
+        acc := UserAcc1
+    }.
+
+
+fold_rev({RowKey, EncodedValue}, #{next := value} = Acc) ->
+    #{
+        prefix := Prefix
+    } = Acc,
+
+    {{SortKey, DocId}, DupeId, ?VIEW_ROW_VALUE} =
+            erlfdb_tuple:unpack(RowKey, Prefix),
+    Acc#{
+        next := key,
+        value := couch_views_encoding:decode(EncodedValue),
+        sort_key := SortKey,
+        docid := DocId,
+        dupe_id := DupeId
+    };
+
+fold_rev({RowKey, EncodedOriginalKey}, #{next := key} = Acc) ->
+    #{
+        prefix := Prefix,
+        value := Value,
+        sort_key := SortKey,
+        docid := DocId,
+        dupe_id := DupeId,
+        callback := UserCallback,
+        acc := UserAcc0
+    } = Acc,
+
+    % We're asserting there that this row is paired
+    % correctly with the previous row by relying on
+    % a badmatch if any of these values don't match.
+    {{SortKey, DocId}, DupeId, ?VIEW_ROW_KEY} =
+            erlfdb_tuple:unpack(RowKey, Prefix),
+
+    Key = couch_views_encoding:decode(EncodedOriginalKey),
+    UserAcc1 = UserCallback(DocId, Key, Value, UserAcc0),
+
+    Acc#{
+        next := value,
+        value := undefined,
+        sort_key := undefined,
+        docid := undefined,
+        dupe_id := undefined,
+        acc := UserAcc1
+    }.
+
+
+clear_id_idx(TxDb, Sig, DocId) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = TxDb,
+
+    {Start, End} = id_idx_range(DbPrefix, Sig, DocId),
+    ok = erlfdb:clear_range(Tx, Start, End).
+
+
+clear_map_idx(TxDb, Sig, ViewId, DocId, ViewKeys) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = TxDb,
+
+    lists:foreach(fun(ViewKey) ->
+        {Start, End} = map_idx_range(DbPrefix, Sig, ViewId, ViewKey, DocId),
+        ok = erlfdb:clear_range(Tx, Start, End)
+    end, ViewKeys).
+
+
+update_id_idx(TxDb, Sig, ViewId, DocId, NewRows) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = TxDb,
+
+    Unique = lists:usort([K || {K, _V} <- NewRows]),
+
+    Key = id_idx_key(DbPrefix, Sig, DocId, ViewId),
+    RowSize = calculate_row_size(NewRows),
+    Val = couch_views_encoding:encode([length(NewRows), RowSize, Unique]),
+    ok = erlfdb:set(Tx, Key, Val).
+
+
+update_map_idx(TxDb, Sig, ViewId, DocId, ExistingKeys, NewRows) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = TxDb,
+
+    Unique = lists:usort([K || {K, _V} <- NewRows]),
+
+    KeysToRem = ExistingKeys -- Unique,
+    lists:foreach(fun(RemKey) ->
+        {Start, End} = map_idx_range(DbPrefix, Sig, ViewId, RemKey, DocId),
+        ok = erlfdb:clear_range(Tx, Start, End)
+    end, KeysToRem),
+
+    KVsToAdd = process_rows(NewRows),
+    MapIdxPrefix = map_idx_prefix(DbPrefix, Sig, ViewId),
+
+    lists:foreach(fun({DupeId, Key1, Key2, Val}) ->
+        KK = map_idx_key(MapIdxPrefix, {Key1, DocId}, DupeId, ?VIEW_ROW_KEY),
+        VK = map_idx_key(MapIdxPrefix, {Key1, DocId}, DupeId, ?VIEW_ROW_VALUE),
+        ok = erlfdb:set(Tx, KK, Key2),
+        ok = erlfdb:set(Tx, VK, Val)
+    end, KVsToAdd).
+
+
+get_view_keys(TxDb, Sig, DocId) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = TxDb,
+    {Start, End} = id_idx_range(DbPrefix, Sig, DocId),
+    lists:map(fun({K, V}) ->
+        {?DB_VIEWS, Sig, ?VIEW_ID_RANGE, DocId, ViewId} =
+                erlfdb_tuple:unpack(K, DbPrefix),
+        [TotalKeys, TotalSize, UniqueKeys] = couch_views_encoding:decode(V),
+        {ViewId, TotalKeys, TotalSize, UniqueKeys}
+    end, erlfdb:get_range(Tx, Start, End, [])).
+
+
+update_row_count(TxDb, Sig, ViewId, Increment) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = TxDb,
+    Key = row_count_key(DbPrefix, Sig, ViewId),
+    erlfdb:add(Tx, Key, Increment).
+
+
+update_kv_size(TxDb, Sig, ViewId, Increment) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = TxDb,
+    Key = kv_size_key(DbPrefix, Sig, ViewId),
+    erlfdb:add(Tx, Key, Increment).
+
+
+seq_key(DbPrefix, Sig) ->
+    Key = {?DB_VIEWS, Sig, ?VIEW_UPDATE_SEQ},
+    erlfdb_tuple:pack(Key, DbPrefix).
+
+
+row_count_key(DbPrefix, Sig, ViewId) ->
+    Key = {?DB_VIEWS, Sig, ?VIEW_ID_INFO, ViewId, ?VIEW_ROW_COUNT},
+    erlfdb_tuple:pack(Key, DbPrefix).
+
+
+kv_size_key(DbPrefix, Sig, ViewId) ->
+    Key = {?DB_VIEWS, Sig, ?VIEW_ID_INFO, ViewId, ?VIEW_KV_SIZE},
+    erlfdb_tuple:pack(Key, DbPrefix).
+
+
+id_idx_key(DbPrefix, Sig, DocId, ViewId) ->
+    Key = {?DB_VIEWS, Sig, ?VIEW_ID_RANGE, DocId, ViewId},
+    erlfdb_tuple:pack(Key, DbPrefix).
+
+
+id_idx_range(DbPrefix, Sig, DocId) ->
+    Key = {?DB_VIEWS, Sig, ?VIEW_ID_RANGE, DocId},
+    erlfdb_tuple:range(Key, DbPrefix).
+
+
+map_idx_prefix(DbPrefix, Sig, ViewId) ->
+    Key = {?DB_VIEWS, Sig, ?VIEW_MAP_RANGE, ViewId},
+    erlfdb_tuple:pack(Key, DbPrefix).
+
+
+map_idx_key(MapIdxPrefix, MapKey, DupeId, Type) ->
+    Key = {MapKey, DupeId, Type},
+    erlfdb_tuple:pack(Key, MapIdxPrefix).
+
+
+map_idx_range(DbPrefix, Sig, ViewId, MapKey, DocId) ->
+    Encoded = couch_views_encoding:encode(MapKey, key),
+    Key = {?DB_VIEWS, Sig, ?VIEW_MAP_RANGE, ViewId, {Encoded, DocId}},
+    erlfdb_tuple:range(Key, DbPrefix).
+
+
+process_rows(Rows) ->
+    Encoded = lists:map(fun({K, V}) ->
+        EK1 = couch_views_encoding:encode(K, key),
+        EK2 = couch_views_encoding:encode(K, value),
+        EV = couch_views_encoding:encode(V, value),
+        {EK1, EK2, EV}
+    end, Rows),
+
+    Grouped = lists:foldl(fun({K1, K2, V}, Acc) ->
+        dict:append(K1, {K2, V}, Acc)
+    end, dict:new(), Encoded),
+
+    dict:fold(fun(K1, Vals, DAcc) ->
+        Vals1 = lists:keysort(2, Vals),
+        {_, Labeled} = lists:foldl(fun({K2, V}, {Count, Acc}) ->
+            {Count + 1, [{Count, K1, K2, V} | Acc]}
+        end, {0, []}, Vals1),
+        Labeled ++ DAcc
+    end, [], Grouped).
+
+
+calculate_row_size(Rows) ->
+    lists:foldl(fun({K, V}, Acc) ->
+        Acc + erlang:external_size(K) + erlang:external_size(V)
+    end, 0, Rows).
diff --git a/src/couch_views/src/couch_views_indexer.erl b/src/couch_views/src/couch_views_indexer.erl
new file mode 100644
index 0000000..a317936
--- /dev/null
+++ b/src/couch_views/src/couch_views_indexer.erl
@@ -0,0 +1,261 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_indexer).
+
+-export([
+    spawn_link/0
+]).
+
+
+-export([
+    init/0
+]).
+
+-include("couch_views.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("fabric/include/fabric2.hrl").
+
+% TODO:
+%  * Handle timeouts of transaction and other errors
+
+
+spawn_link() ->
+    proc_lib:spawn_link(?MODULE, init, []).
+
+
+init() ->
+    {ok, Job, Data} = couch_jobs:accept(?INDEX_JOB_TYPE, #{}),
+    #{
+        <<"db_name">> := DbName,
+        <<"ddoc_id">> := DDocId,
+        <<"sig">> := JobSig
+    } = Data,
+
+    {ok, Db} = fabric2_db:open(DbName, []),
+    {ok, DDoc} = fabric2_db:open_doc(Db, DDocId),
+    {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+    HexSig = fabric2_util:to_hex(Mrst#mrst.sig),
+
+    if  HexSig == JobSig -> ok; true ->
+        couch_jobs:finish(undefined, Job, Data#{
+            error => sig_changed,
+            reason => <<"Design document was modified">>
+        }),
+        exit(normal)
+    end,
+
+    State = #{
+        tx_db => undefined,
+        db_seq => undefined,
+        view_seq => undefined,
+        last_seq => undefined,
+        job => Job,
+        job_data => Data,
+        count => 0,
+        limit => num_changes(),
+        doc_acc => [],
+        design_opts => Mrst#mrst.design_opts
+    },
+
+    update(Db, Mrst, State).
+
+
+update(#{} = Db, Mrst0, State0) ->
+    {Mrst2, State3} = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        % In the first iteration of update we need
+        % to populate our db and view sequences
+        State1 = case State0 of
+            #{db_seq := undefined} ->
+                ViewSeq = couch_views_fdb:get_update_seq(TxDb, Mrst0),
+                State0#{
+                    tx_db := TxDb,
+                    db_seq := fabric2_db:get_update_seq(TxDb),
+                    view_seq := ViewSeq,
+                    last_seq := ViewSeq
+                };
+            _ ->
+                State0#{
+                    tx_db := TxDb
+                }
+        end,
+
+        {ok, State2} = fold_changes(State1),
+
+        #{
+            count := Count,
+            limit := Limit,
+            doc_acc := DocAcc,
+            last_seq := LastSeq
+        } = State2,
+
+        {Mrst1, MappedDocs} = map_docs(Mrst0, DocAcc),
+        write_docs(TxDb, Mrst1, MappedDocs, State2),
+
+        case Count < Limit of
+            true ->
+                report_progress(State2, finished),
+                {Mrst1, finished};
+            false ->
+                report_progress(State2, update),
+                {Mrst1, State2#{
+                    tx_db := undefined,
+                    count := 0,
+                    doc_acc := [],
+                    view_seq := LastSeq
+                }}
+        end
+    end),
+
+    case State3 of
+        finished ->
+            couch_query_servers:stop_doc_map(Mrst2#mrst.qserver);
+        _ ->
+            update(Db, Mrst2, State3)
+    end.
+
+
+fold_changes(State) ->
+    #{
+        view_seq := SinceSeq,
+        limit := Limit,
+        tx_db := TxDb
+    } = State,
+
+    Fun = fun process_changes/2,
+    fabric2_db:fold_changes(TxDb, SinceSeq, Fun, State, [{limit, Limit}]).
+
+
+process_changes(Change, Acc) ->
+    #{
+        doc_acc := DocAcc,
+        count := Count,
+        tx_db := TxDb,
+        design_opts := DesignOpts
+    } = Acc,
+
+    #{
+        id := Id,
+        sequence := LastSeq,
+        deleted := Deleted
+    } = Change,
+
+    IncludeDesign = lists:keymember(<<"include_design">>, 1, DesignOpts),
+
+    Acc1 = case {Id, IncludeDesign} of
+        {<<?DESIGN_DOC_PREFIX, _/binary>>, false} ->
+            maps:merge(Acc, #{
+                count => Count + 1,
+                last_seq => LastSeq
+            });
+        _ ->
+            % Making a note here that we should make fetching all the docs
+            % a parallel fdb operation
+            {ok, Doc} = case Deleted of
+                true -> {ok, []};
+                false -> fabric2_db:open_doc(TxDb, Id)
+            end,
+
+            Change1 = maps:put(doc, Doc, Change),
+            Acc#{
+                doc_acc := DocAcc ++ [Change1],
+                count := Count + 1,
+                last_seq := LastSeq
+            }
+    end,
+    {ok, Acc1}.
+
+
+map_docs(Mrst, Docs) ->
+    % Run all the non deleted docs through the view engine and
+    Mrst1 = start_query_server(Mrst),
+    QServer = Mrst1#mrst.qserver,
+    MapFun = fun
+        (#{deleted := true} = Change) ->
+            Change#{results => []};
+        (#{deleted := false} = Change) ->
+            #{doc := Doc} = Change,
+            couch_stats:increment_counter([couchdb, mrview, map_doc]),
+            {ok, RawResults} = couch_query_servers:map_doc_raw(QServer, Doc),
+            JsonResults = couch_query_servers:raw_to_ejson(RawResults),
+            ListResults = lists:map(fun(ViewResults) ->
+                [list_to_tuple(Res) || Res <- ViewResults]
+            end, JsonResults),
+            Change#{results => ListResults}
+    end,
+    {Mrst1, lists:map(MapFun, Docs)}.
+
+
+write_docs(TxDb, Mrst, Docs, State) ->
+    #mrst{
+        views = Views,
+        sig = Sig
+    } = Mrst,
+
+    #{
+        last_seq := LastSeq
+    } = State,
+
+    ViewIds = [View#mrview.id_num || View <- Views],
+
+    lists:foreach(fun(Doc) ->
+        couch_views_fdb:write_doc(TxDb, Sig, ViewIds, Doc)
+    end, Docs),
+
+    couch_views_fdb:set_update_seq(TxDb, Sig, LastSeq).
+
+
+start_query_server(#mrst{} = Mrst) ->
+    #mrst{
+        language = Language,
+        lib = Lib,
+        views = Views
+    } = Mrst,
+    Defs = [View#mrview.def || View <- Views],
+    {ok, QServer} = couch_query_servers:start_doc_map(Language, Defs, Lib),
+    Mrst#mrst{qserver = QServer}.
+
+
+report_progress(State, UpdateType) ->
+    #{
+        tx_db := TxDb,
+        job := Job,
+        job_data := JobData,
+        last_seq := LastSeq
+    } = State,
+
+    #{
+        <<"db_name">> := DbName,
+        <<"ddoc_id">> := DDocId,
+        <<"sig">> := Sig
+    } = JobData,
+
+    % Reconstruct from scratch to remove any
+    % possible existing error state.
+    NewData = #{
+        <<"db_name">> => DbName,
+        <<"ddoc_id">> => DDocId,
+        <<"sig">> => Sig,
+        <<"view_seq">> => LastSeq
+    },
+
+    case UpdateType of
+        update ->
+            couch_jobs:update(TxDb, Job, NewData);
+        finished ->
+            couch_jobs:finish(TxDb, Job, NewData)
+    end.
+
+
+num_changes() ->
+    config:get_integer("couch_views", "change_limit", 100).
diff --git a/src/couch_views/src/couch_views_jobs.erl b/src/couch_views/src/couch_views_jobs.erl
new file mode 100644
index 0000000..16fc410
--- /dev/null
+++ b/src/couch_views/src/couch_views_jobs.erl
@@ -0,0 +1,109 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_jobs).
+
+-export([
+    set_timeout/0,
+    build_view/3,
+    build_view_async/2
+]).
+
+-ifdef(TEST).
+-compile(export_all).
+-compile(nowarn_export_all).
+-endif.
+
+
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include("couch_views.hrl").
+
+
+set_timeout() ->
+    couch_jobs:set_type_timeout(?INDEX_JOB_TYPE, 6 * 1000).
+
+
+build_view(TxDb, Mrst, UpdateSeq) ->
+    {ok, JobId} = build_view_async(TxDb, Mrst),
+    case wait_for_job(JobId, UpdateSeq) of
+        ok -> ok;
+        retry -> build_view(TxDb, Mrst, UpdateSeq)
+    end.
+
+
+build_view_async(TxDb, Mrst) ->
+    JobId = job_id(TxDb, Mrst),
+    JobData = job_data(TxDb, Mrst),
+    ok = couch_jobs:add(undefined, ?INDEX_JOB_TYPE, JobId, JobData),
+    {ok, JobId}.
+
+
+wait_for_job(JobId, UpdateSeq) ->
+    case couch_jobs:subscribe(?INDEX_JOB_TYPE, JobId) of
+        {ok, Subscription, _State, _Data} ->
+            wait_for_job(JobId, Subscription, UpdateSeq);
+        {ok, finished, Data} ->
+            case Data of
+                #{<<"view_sig">> := ViewSeq} when ViewSeq >= UpdateSeq ->
+                    ok;
+                _ ->
+                    retry
+            end
+    end.
+
+
+wait_for_job(JobId, Subscription, UpdateSeq) ->
+    case wait(Subscription) of
+        {error, Error} ->
+            erlang:error(Error);
+        {finished, #{<<"error">> := Error, <<"reason">> := Reason}} ->
+            erlang:error({binary_to_existing_atom(Error, latin1), Reason});
+        {finished, #{<<"view_seq">> := ViewSeq}} when ViewSeq >= UpdateSeq ->
+            ok;
+        {finished, _} ->
+            wait_for_job(JobId, UpdateSeq);
+        {_State, #{<<"view_seq">> := ViewSeq}} when ViewSeq >= UpdateSeq ->
+            couch_jobs:unsubscribe(Subscription),
+            ok;
+        {_, _} ->
+            wait_for_job(JobId, Subscription, UpdateSeq)
+    end.
+
+
+job_id(#{name := DbName}, #mrst{sig = Sig}) ->
+    job_id(DbName, Sig);
+
+job_id(DbName, Sig) ->
+    HexSig = fabric2_util:to_hex(Sig),
+    <<DbName/binary, "-", HexSig/binary>>.
+
+
+job_data(Db, Mrst) ->
+    #mrst{
+        idx_name = DDocId,
+        sig = Sig
+    } = Mrst,
+
+    #{
+        db_name => fabric2_db:name(Db),
+        ddoc_id => DDocId,
+        sig => fabric2_util:to_hex(Sig)
+    }.
+
+
+wait(Subscription) ->
+    case couch_jobs:wait(Subscription, infinity) of
+        {?INDEX_JOB_TYPE, _JobId, JobState, JobData} ->
+            {JobState, JobData};
+        timeout ->
+            {error, timeout}
+    end.
diff --git a/src/couch_views/src/couch_views_reader.erl b/src/couch_views/src/couch_views_reader.erl
new file mode 100644
index 0000000..c7989d8
--- /dev/null
+++ b/src/couch_views/src/couch_views_reader.erl
@@ -0,0 +1,208 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_reader).
+
+-export([
+    read/6
+]).
+
+
+-include("couch_views.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("fabric/include/fabric2.hrl").
+
+
+read(Db, Mrst, ViewName, UserCallback, UserAcc0, Args) ->
+    #mrst{
+        language = Lang,
+        sig = Sig,
+        views = Views
+    } = Mrst,
+
+    ViewId = get_view_id(Lang, Args, ViewName, Views),
+    Fun = fun handle_row/4,
+
+    try
+        fabric2_fdb:transactional(Db, fun(TxDb) ->
+            TotalRows = couch_views_fdb:get_row_count(TxDb, Mrst, ViewId),
+
+            Meta = {meta, [{total, TotalRows}, {offset, null}]},
+            UserAcc1 = maybe_stop(UserCallback(Meta, UserAcc0)),
+
+            Acc0 = #{
+                db => TxDb,
+                skip => Args#mrargs.skip,
+                mrargs => undefined,
+                callback => UserCallback,
+                acc => UserAcc1
+            },
+
+            Acc1 = lists:foldl(fun(KeyArgs, KeyAcc0) ->
+                Opts = mrargs_to_fdb_options(KeyArgs),
+                KeyAcc1 = KeyAcc0#{
+                    mrargs := KeyArgs
+                },
+                couch_views_fdb:fold_map_idx(
+                        TxDb,
+                        Sig,
+                        ViewId,
+                        Opts,
+                        Fun,
+                        KeyAcc1
+                    )
+            end, Acc0, expand_keys_args(Args)),
+
+            #{
+                acc := UserAcc2
+            } = Acc1,
+            {ok, maybe_stop(UserCallback(complete, UserAcc2))}
+        end)
+    catch throw:{done, Out} ->
+        {ok, Out}
+    end.
+
+
+handle_row(_DocId, _Key, _Value, #{skip := Skip} = Acc) when Skip > 0 ->
+    Acc#{skip := Skip - 1};
+
+handle_row(DocId, Key, Value, Acc) ->
+    #{
+        db := TxDb,
+        mrargs := Args,
+        callback := UserCallback,
+        acc := UserAcc0
+    } = Acc,
+
+    BaseRow = [
+        {id, DocId},
+        {key, Key},
+        {value, Value}
+    ],
+
+    Row = BaseRow ++ if not Args#mrargs.include_docs -> []; true ->
+        DocOpts0 = Args#mrargs.doc_options,
+        DocOpts1 = DocOpts0 ++ case Args#mrargs.conflicts of
+            true -> [conflicts];
+            _ -> []
+        end,
+
+        {TargetDocId, Rev} = get_doc_id(DocId, Value),
+        DocObj = load_doc(TxDb, TargetDocId, Rev, DocOpts1),
+        [{doc, DocObj}]
+    end,
+
+    UserAcc1 = maybe_stop(UserCallback({row, Row}, UserAcc0)),
+    Acc#{acc := UserAcc1}.
+
+
+get_view_id(Lang, Args, ViewName, Views) ->
+    case couch_mrview_util:extract_view(Lang, Args, ViewName, Views) of
+        {map, View, _Args} -> View#mrview.id_num;
+        {red, {_Idx, _Lang, View}} -> View#mrview.id_num
+    end.
+
+
+expand_keys_args(#mrargs{keys = undefined} = Args) ->
+    [Args];
+
+expand_keys_args(#mrargs{keys = Keys} = Args) ->
+    lists:map(fun(Key) ->
+        Args#mrargs{
+            start_key = Key,
+            end_key = Key
+        }
+    end, Keys).
+
+
+mrargs_to_fdb_options(Args) ->
+    #mrargs{
+        start_key = StartKey0,
+        start_key_docid = StartKeyDocId,
+        end_key = EndKey0,
+        end_key_docid = EndKeyDocId,
+        direction = Direction,
+        limit = Limit,
+        skip = Skip,
+        inclusive_end = InclusiveEnd
+    } = Args,
+
+    StartKey1 = if StartKey0 == undefined -> undefined; true ->
+        couch_views_encoding:encode(StartKey0, key)
+    end,
+
+    StartKeyOpts = case {StartKey1, StartKeyDocId} of
+        {undefined, _} ->
+            [];
+        {StartKey1, StartKeyDocId} ->
+            [{start_key, {StartKey1, StartKeyDocId}}]
+    end,
+
+    EndKey1 = if EndKey0 == undefined -> undefined; true ->
+        couch_views_encoding:encode(EndKey0, key)
+    end,
+
+    EndKeyOpts = case {EndKey1, EndKeyDocId, Direction} of
+        {undefined, _, _} ->
+            [];
+        {EndKey1, <<>>, rev} when not InclusiveEnd ->
+            % When we iterate in reverse with
+            % inclusive_end=false we have to set the
+            % EndKeyDocId to <<255>> so that we don't
+            % include matching rows.
+            [{end_key_gt, {EndKey1, <<255>>}}];
+        {EndKey1, <<255>>, _} when not InclusiveEnd ->
+            % When inclusive_end=false we need to
+            % elide the default end_key_docid so as
+            % to not sort past the docids with the
+            % given end key.
+            [{end_key_gt, {EndKey1}}];
+        {EndKey1, EndKeyDocId, _} when not InclusiveEnd ->
+            [{end_key_gt, {EndKey1, EndKeyDocId}}];
+        {EndKey1, EndKeyDocId, _} when InclusiveEnd ->
+            [{end_key, {EndKey1, EndKeyDocId}}]
+    end,
+
+    [
+        {dir, Direction},
+        {limit, Limit * 2 + Skip * 2},
+        {streaming_mode, want_all}
+    ] ++ StartKeyOpts ++ EndKeyOpts.
+
+
+maybe_stop({ok, Acc}) -> Acc;
+maybe_stop({stop, Acc}) -> throw({done, Acc}).
+
+
+get_doc_id(Id, {Props}) ->
+    DocId = couch_util:get_value(<<"_id">>, Props, Id),
+    Rev = couch_util:get_value(<<"_rev">>, Props, null),
+    {DocId, Rev};
+
+get_doc_id(Id, _Value) ->
+    {Id, null}.
+
+
+load_doc(TxDb, Id, null, DocOpts) ->
+    case fabric2_db:open_doc(TxDb, Id, DocOpts) of
+        {ok, Doc} -> couch_doc:to_json_obj(Doc, DocOpts);
+        {not_found, _} -> null
+    end;
+
+load_doc(TxDb, Id, Rev, DocOpts) ->
+    Rev1 = couch_doc:parse_rev(Rev),
+    case (catch fabric2_db:open_doc_revs(TxDb, Id, [Rev1], DocOpts)) of
+        {ok, [{ok, Doc}]} -> couch_doc:to_json_obj(Doc, DocOpts);
+        {ok, [{{not_found, missing}, Rev}]} -> null;
+        {ok, [_Else]} -> null
+    end.
diff --git a/src/couch_views/src/couch_views_server.erl b/src/couch_views/src/couch_views_server.erl
new file mode 100644
index 0000000..d14216e
--- /dev/null
+++ b/src/couch_views/src/couch_views_server.erl
@@ -0,0 +1,103 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_server).
+
+
+-behaviour(gen_server).
+
+
+-export([
+    start_link/0
+]).
+
+
+-export([
+    init/1,
+    terminate/2,
+    handle_call/3,
+    handle_cast/2,
+    handle_info/2,
+    code_change/3
+]).
+
+
+-define(MAX_WORKERS, 100).
+
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+init(_) ->
+    process_flag(trap_exit, true),
+    couch_views_jobs:set_timeout(),
+    St = #{
+        workers => #{},
+        max_workers => max_workers()
+    },
+    {ok, spawn_workers(St)}.
+
+
+terminate(_, _St) ->
+    ok.
+
+
+handle_call(Msg, _From, St) ->
+    {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+    {stop, {bad_cast, Msg}, St}.
+
+
+handle_info({'EXIT', Pid, Reason}, St) ->
+    #{workers := Workers} = St,
+    case maps:is_key(Pid, Workers) of
+        true ->
+            if Reason == normal -> ok; true ->
+                LogMsg = "~p : indexer process ~p exited with ~p",
+                couch_log:error(LogMsg, [?MODULE, Pid, Reason])
+            end,
+            NewWorkers = maps:remove(Pid, Workers),
+            {noreply, spawn_workers(St#{workers := NewWorkers})};
+        false ->
+            LogMsg = "~p : unknown process ~p exited with ~p",
+            couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
+            {stop, {unknown_pid_exit, Pid}, St}
+    end;
+
+handle_info(Msg, St) ->
+    {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+    {ok, St}.
+
+
+spawn_workers(St) ->
+    #{
+        workers := Workers,
+        max_workers := MaxWorkers
+    } = St,
+    case maps:size(Workers) < MaxWorkers of
+        true ->
+            Pid = couch_views_indexer:spawn_link(),
+            NewSt = St#{workers := Workers#{Pid => true}},
+            spawn_workers(NewSt);
+        false ->
+            St
+    end.
+
+
+max_workers() ->
+    config:get_integer("couch_views", "max_workers", ?MAX_WORKERS).
diff --git a/src/couch_views/src/couch_views_sup.erl b/src/couch_views/src/couch_views_sup.erl
new file mode 100644
index 0000000..7650fdf
--- /dev/null
+++ b/src/couch_views/src/couch_views_sup.erl
@@ -0,0 +1,46 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-module(couch_views_sup).
+
+
+-behaviour(supervisor).
+
+
+-export([
+    start_link/1
+]).
+
+
+-export([
+    init/1
+]).
+
+
+start_link(Args) ->
+    supervisor:start_link({local, ?MODULE}, ?MODULE, Args).
+
+
+init([]) ->
+    Flags = #{
+        strategy => one_for_one,
+        intensity => 1,
+        period => 5
+    },
+    Children = [
+        #{
+            id => couch_views_server,
+            start => {couch_views_server, start_link, []}
+        }
+    ],
+    {ok, {Flags, Children}}.
diff --git a/src/couch_views/src/couch_views_util.erl b/src/couch_views/src/couch_views_util.erl
new file mode 100644
index 0000000..b88cfcd
--- /dev/null
+++ b/src/couch_views/src/couch_views_util.erl
@@ -0,0 +1,84 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_util).
+
+
+-export([
+    ddoc_to_mrst/2
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+-include("couch_views.hrl").
+
+
+ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
+    MakeDict = fun({Name, {MRFuns}}, DictBySrcAcc) ->
+        case couch_util:get_value(<<"map">>, MRFuns) of
+            MapSrc when MapSrc /= undefined ->
+                RedSrc = couch_util:get_value(<<"reduce">>, MRFuns, null),
+                {ViewOpts} = couch_util:get_value(<<"options">>, MRFuns, {[]}),
+                View = case dict:find({MapSrc, ViewOpts}, DictBySrcAcc) of
+                    {ok, View0} -> View0;
+                    error -> #mrview{def=MapSrc, options=ViewOpts}
+                end,
+                {MapNames, RedSrcs} = case RedSrc of
+                    null ->
+                        MNames = [Name | View#mrview.map_names],
+                        {MNames, View#mrview.reduce_funs};
+                    _ ->
+                        RedFuns = [{Name, RedSrc} | View#mrview.reduce_funs],
+                        {View#mrview.map_names, RedFuns}
+                end,
+                View2 = View#mrview{map_names=MapNames, reduce_funs=RedSrcs},
+                dict:store({MapSrc, ViewOpts}, View2, DictBySrcAcc);
+            undefined ->
+                DictBySrcAcc
+        end;
+        ({Name, Else}, DictBySrcAcc) ->
+            couch_log:error("design_doc_to_view_group ~s views ~p",
+                [Name, Else]),
+            DictBySrcAcc
+    end,
+    {DesignOpts} = proplists:get_value(<<"options">>, Fields, {[]}),
+    SeqIndexed = proplists:get_value(<<"seq_indexed">>, DesignOpts, false),
+    KeySeqIndexed = proplists:get_value(<<"keyseq_indexed">>,
+        DesignOpts, false),
+    Partitioned = proplists:get_value(<<"partitioned">>, DesignOpts, false),
+
+    {RawViews} = couch_util:get_value(<<"views">>, Fields, {[]}),
+    BySrc = lists:foldl(MakeDict, dict:new(), RawViews),
+
+    NumViews = fun({_, View}, N) ->
+            {View#mrview{id_num=N, seq_indexed=SeqIndexed,
+                keyseq_indexed=KeySeqIndexed}, N+1}
+    end,
+    {Views, _} = lists:mapfoldl(NumViews, 0, lists:sort(dict:to_list(BySrc))),
+
+    Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>),
+    Lib = couch_util:get_value(<<"lib">>, RawViews, {[]}),
+
+    IdxState = #mrst{
+        db_name=DbName,
+        idx_name=Id,
+        lib=Lib,
+        views=Views,
+        language=Language,
+        design_opts=DesignOpts,
+        seq_indexed=SeqIndexed,
+        keyseq_indexed=KeySeqIndexed,
+        partitioned=Partitioned
+    },
+    SigInfo = {Views, Language, DesignOpts, couch_index_util:sort_lib(Lib)},
+    {ok, IdxState#mrst{sig=couch_hash:md5_hash(term_to_binary(SigInfo))}}.
diff --git a/src/couch_views/test/couch_views_encoding_test.erl b/src/couch_views/test/couch_views_encoding_test.erl
new file mode 100644
index 0000000..7c26583
--- /dev/null
+++ b/src/couch_views/test/couch_views_encoding_test.erl
@@ -0,0 +1,94 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_encoding_test).
+
+-include_lib("eunit/include/eunit.hrl").
+
+val_encoding_test() ->
+    Values = [
+        null,
+        true,
+        1.0,
+        <<"a">>,
+        {[{<<"a">>, 1.0}, {<<"b">>, <<"hello">>}]}
+    ],
+    lists:foreach(fun (Val) ->
+        EncVal = couch_views_encoding:encode(Val),
+        ?assertEqual(Val, couch_views_encoding:decode(EncVal))
+    end, Values).
+
+
+correct_ordering_test() ->
+    % Load the ICU driver for couch_util:get_sort_key/1
+    {ok, CfgPid} = gen_server:start_link(config, [], []),
+    {ok, DrvPid} = gen_server:start_link(couch_drv, [], []),
+
+    Ordered = [
+        %  Special values sort before all other types
+        null,
+        false,
+        true,
+
+        % Then numbers
+        1,
+        2,
+        3.0,
+        4,
+
+        % Then text, case sensitive
+        <<"a">>,
+        <<"A">>,
+        <<"aa">>,
+        <<"b">>,
+        <<"B">>,
+        <<"ba">>,
+        <<"bb">>,
+
+        % Then arrays, compared element by element until different.
+        % Longer arrays sort after their prefixes
+        [<<"a">>],
+        [<<"b">>],
+        [<<"b">>, <<"c">>],
+        [<<"b">>, <<"c">>, <<"a">>],
+        [<<"b">>, <<"d">>],
+        [<<"b">>, <<"d">>, <<"e">>],
+
+        % Then objects, compared each key value in the list until different.
+        % Larger objects sort after their subset objects
+        {[{<<"a">>, 1}]},
+        {[{<<"a">>, 2}]},
+        {[{<<"b">>, 1}]},
+        {[{<<"b">>, 2}]},
+
+        % Member order does matter for collation
+        {[{<<"b">>, 2}, {<<"a">>, 1}]},
+        {[{<<"b">>, 2}, {<<"c">>, 2}]}
+    ],
+
+    Encoded = lists:map(fun(Elem) ->
+        K = couch_views_encoding:encode(Elem, key),
+        V = couch_views_encoding:encode(Elem, value),
+        {K, V}
+    end, Ordered),
+    Shuffled = shuffle(Encoded),
+    Reordered = lists:sort(Shuffled),
+
+    lists:foreach(fun({Original, {_K, ViewEncoded}}) ->
+        ?assertEqual(Original, couch_views_encoding:decode(ViewEncoded))
+    end, lists:zip(Ordered, Reordered)).
+
+
+shuffle(List) when is_list(List) ->
+    Tagged = [{rand:uniform(), Item} || Item <- List],
+    {_, Randomized} = lists:unzip(lists:sort(Tagged)),
+    Randomized.
diff --git a/src/couch_views/test/couch_views_indexer_test.erl b/src/couch_views/test/couch_views_indexer_test.erl
new file mode 100644
index 0000000..02c8cee
--- /dev/null
+++ b/src/couch_views/test/couch_views_indexer_test.erl
@@ -0,0 +1,456 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_indexer_test).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+
+
+-define(I_HEART_EUNIT(Tests), [{with, [T]} || T <- Tests]).
+
+
+indexer_test_() ->
+    {
+        "Test view indexing",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {
+                foreach,
+                fun foreach_setup/0,
+                fun foreach_teardown/1,
+                ?I_HEART_EUNIT([
+                    fun indexed_empty_db/1,
+                    fun indexed_single_doc/1,
+                    fun updated_docs_are_reindexed/1,
+                    fun updated_docs_without_changes_are_reindexed/1,
+                    fun deleted_docs_not_indexed/1,
+                    fun deleted_docs_are_unindexed/1,
+                    fun multipe_docs_with_same_key/1,
+                    fun multipe_keys_from_same_doc/1,
+                    fun multipe_identical_keys_from_same_doc/1
+                ])
+            }
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([
+            fabric,
+            couch_jobs,
+            couch_views
+        ]),
+    Ctx.
+
+
+cleanup(Ctx) ->
+    test_util:stop_couch(Ctx).
+
+
+foreach_setup() ->
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    Db.
+
+
+foreach_teardown(Db) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []).
+
+
+indexed_empty_db(Db) ->
+    DDoc = create_ddoc(),
+
+    {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+    {ok, Out} = couch_views:query(
+            Db,
+            DDoc,
+            <<"map_fun1">>,
+            fun fold_fun/2,
+            [],
+            #mrargs{}
+        ),
+
+    ?assertEqual([], Out).
+
+
+indexed_single_doc(Db) ->
+    DDoc = create_ddoc(),
+    Doc1 = doc(0),
+
+    {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+    {ok, _} = fabric2_db:update_doc(Db, Doc1, []),
+
+    {ok, Out} = couch_views:query(
+            Db,
+            DDoc,
+            <<"map_fun1">>,
+            fun fold_fun/2,
+            [],
+            #mrargs{}
+        ),
+
+    ?assertEqual([{row, [
+            {id, <<"0">>},
+            {key, 0},
+            {value, 0}
+        ]}], Out).
+
+
+updated_docs_are_reindexed(Db) ->
+    DDoc = create_ddoc(),
+    Doc1 = doc(0),
+
+    {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+    {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc1, []),
+
+    {ok, Out1} = couch_views:query(
+            Db,
+            DDoc,
+            <<"map_fun1">>,
+            fun fold_fun/2,
+            [],
+            #mrargs{}
+        ),
+
+    ?assertEqual([{row, [
+            {id, <<"0">>},
+            {key, 0},
+            {value, 0}
+        ]}], Out1),
+
+    Doc2 = Doc1#doc{
+        revs = {Pos, [Rev]},
+        body = {[{<<"val">>, 1}]}
+    },
+    {ok, _} = fabric2_db:update_doc(Db, Doc2, []),
+
+    {ok, Out2} = couch_views:query(
+            Db,
+            DDoc,
+            <<"map_fun1">>,
+            fun fold_fun/2,
+            [],
+            #mrargs{}
+        ),
+
+    ?assertEqual([{row, [
+            {id, <<"0">>},
+            {key, 1},
+            {value, 1}
+        ]}], Out2),
+
+    % Check that our id index is updated properly
+    % as well.
+    DbName = fabric2_db:name(Db),
+    {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+    Sig = Mrst#mrst.sig,
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        ?assertMatch(
+                [{0, 1, _, [1]}, {1, 0, 0, []}],
+                couch_views_fdb:get_view_keys(TxDb, Sig, <<"0">>)
+            )
+    end).
+
+
+updated_docs_without_changes_are_reindexed(Db) ->
+    DDoc = create_ddoc(),
+    Doc1 = doc(0),
+
+    {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+    {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc1, []),
+
+    {ok, Out1} = couch_views:query(
+            Db,
+            DDoc,
+            <<"map_fun1">>,
+            fun fold_fun/2,
+            [],
+            #mrargs{}
+        ),
+
+    ?assertEqual([{row, [
+            {id, <<"0">>},
+            {key, 0},
+            {value, 0}
+        ]}], Out1),
+
+    Doc2 = Doc1#doc{
+        revs = {Pos, [Rev]},
+        body = {[{<<"val">>, 0}]}
+    },
+    {ok, _} = fabric2_db:update_doc(Db, Doc2, []),
+
+    {ok, Out2} = couch_views:query(
+            Db,
+            DDoc,
+            <<"map_fun1">>,
+            fun fold_fun/2,
+            [],
+            #mrargs{}
+        ),
+
+    ?assertEqual([{row, [
+            {id, <<"0">>},
+            {key, 0},
+            {value, 0}
+        ]}], Out2),
+
+    % Check fdb directly to make sure we've also
+    % removed the id idx keys properly.
+    DbName = fabric2_db:name(Db),
+    {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+    Sig = Mrst#mrst.sig,
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        ?assertMatch(
+                [{0, 1, _, [0]}, {1, 0, 0, []}],
+                couch_views_fdb:get_view_keys(TxDb, Sig, <<"0">>)
+            )
+    end).
+
+
+deleted_docs_not_indexed(Db) ->
+    DDoc = create_ddoc(),
+    Doc1 = doc(0),
+
+    {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+    {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc1, []),
+    Doc2 = Doc1#doc{
+        revs = {Pos, [Rev]},
+        deleted = true,
+        body = {[{<<"val">>, 1}]}
+    },
+    {ok, _} = fabric2_db:update_doc(Db, Doc2, []),
+
+    {ok, Out} = couch_views:query(
+            Db,
+            DDoc,
+            <<"map_fun1">>,
+            fun fold_fun/2,
+            [],
+            #mrargs{}
+        ),
+
+    ?assertEqual([], Out).
+
+
+deleted_docs_are_unindexed(Db) ->
+    DDoc = create_ddoc(),
+    Doc1 = doc(0),
+
+    {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+    {ok, {Pos, Rev}} = fabric2_db:update_doc(Db, Doc1, []),
+
+    {ok, Out1} = couch_views:query(
+            Db,
+            DDoc,
+            <<"map_fun1">>,
+            fun fold_fun/2,
+            [],
+            #mrargs{}
+        ),
+
+    ?assertEqual([{row, [
+            {id, <<"0">>},
+            {key, 0},
+            {value, 0}
+        ]}], Out1),
+
+    Doc2 = Doc1#doc{
+        revs = {Pos, [Rev]},
+        deleted = true,
+        body = {[{<<"val">>, 1}]}
+    },
+    {ok, _} = fabric2_db:update_doc(Db, Doc2, []),
+
+    {ok, Out2} = couch_views:query(
+            Db,
+            DDoc,
+            <<"map_fun1">>,
+            fun fold_fun/2,
+            [],
+            #mrargs{}
+        ),
+
+    ?assertEqual([], Out2),
+
+    % Check fdb directly to make sure we've also
+    % removed the id idx keys properly.
+    DbName = fabric2_db:name(Db),
+    {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+    Sig = Mrst#mrst.sig,
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        ?assertEqual([], couch_views_fdb:get_view_keys(TxDb, Sig, <<"0">>))
+    end).
+
+
+multipe_docs_with_same_key(Db) ->
+    DDoc = create_ddoc(),
+    Doc1 = doc(0, 1),
+    Doc2 = doc(1, 1),
+
+    {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+    {ok, _} = fabric2_db:update_docs(Db, [Doc1, Doc2], []),
+
+    {ok, Out} = couch_views:query(
+            Db,
+            DDoc,
+            <<"map_fun1">>,
+            fun fold_fun/2,
+            [],
+            #mrargs{}
+        ),
+
+    ?assertEqual([
+            {row, [
+                {id, <<"0">>},
+                {key, 1},
+                {value, 1}
+            ]},
+            {row, [
+                {id, <<"1">>},
+                {key, 1},
+                {value, 1}
+            ]}
+        ], Out).
+
+
+multipe_keys_from_same_doc(Db) ->
+    DDoc = create_ddoc(multi_emit_different),
+    Doc = doc(0, 1),
+
+    {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+    {ok, _} = fabric2_db:update_doc(Db, Doc, []),
+
+    {ok, Out} = couch_views:query(
+            Db,
+            DDoc,
+            <<"map_fun1">>,
+            fun fold_fun/2,
+            [],
+            #mrargs{}
+        ),
+
+    ?assertEqual([
+            {row, [
+                {id, <<"0">>},
+                {key, 1},
+                {value, 1}
+            ]},
+            {row, [
+                {id, <<"0">>},
+                {key, <<"0">>},
+                {value, <<"0">>}
+            ]}
+        ], Out).
+
+
+multipe_identical_keys_from_same_doc(Db) ->
+    DDoc = create_ddoc(multi_emit_same),
+    Doc = doc(0, 1),
+
+    {ok, _} = fabric2_db:update_doc(Db, DDoc, []),
+    {ok, _} = fabric2_db:update_doc(Db, Doc, []),
+
+    {ok, Out} = couch_views:query(
+            Db,
+            DDoc,
+            <<"map_fun1">>,
+            fun fold_fun/2,
+            [],
+            #mrargs{}
+        ),
+
+    ?assertEqual([
+            {row, [
+                {id, <<"0">>},
+                {key, 1},
+                {value, 1}
+            ]},
+            {row, [
+                {id, <<"0">>},
+                {key, 1},
+                {value, 2}
+            ]}
+        ], Out).
+
+
+fold_fun({meta, _Meta}, Acc) ->
+    {ok, Acc};
+fold_fun({row, _} = Row, Acc) ->
+    {ok, [Row | Acc]};
+fold_fun(complete, Acc) ->
+    {ok, lists:reverse(Acc)}.
+
+
+create_ddoc() ->
+    create_ddoc(simple).
+
+
+create_ddoc(simple) ->
+    couch_doc:from_json_obj({[
+        {<<"_id">>, <<"_design/bar">>},
+        {<<"views">>, {[
+            {<<"map_fun1">>, {[
+                {<<"map">>, <<"function(doc) {emit(doc.val, doc.val);}">>}
+            ]}},
+            {<<"map_fun2">>, {[
+                {<<"map">>, <<"function(doc) {}">>}
+            ]}}
+        ]}}
+    ]});
+
+create_ddoc(multi_emit_different) ->
+    couch_doc:from_json_obj({[
+        {<<"_id">>, <<"_design/bar">>},
+        {<<"views">>, {[
+            {<<"map_fun1">>, {[
+                {<<"map">>, <<"function(doc) { "
+                    "emit(doc._id, doc._id); "
+                    "emit(doc.val, doc.val); "
+                "}">>}
+            ]}},
+            {<<"map_fun2">>, {[
+                {<<"map">>, <<"function(doc) {}">>}
+            ]}}
+        ]}}
+    ]});
+
+create_ddoc(multi_emit_same) ->
+    couch_doc:from_json_obj({[
+        {<<"_id">>, <<"_design/bar">>},
+        {<<"views">>, {[
+            {<<"map_fun1">>, {[
+                {<<"map">>, <<"function(doc) { "
+                    "emit(doc.val, doc.val * 2); "
+                    "emit(doc.val, doc.val); "
+                "}">>}
+            ]}},
+            {<<"map_fun2">>, {[
+                {<<"map">>, <<"function(doc) {}">>}
+            ]}}
+        ]}}
+    ]}).
+
+
+doc(Id) ->
+    doc(Id, Id).
+
+
+doc(Id, Val) ->
+    couch_doc:from_json_obj({[
+        {<<"_id">>, list_to_binary(integer_to_list(Id))},
+        {<<"val">>, Val}
+    ]}).
diff --git a/src/couch_views/test/couch_views_map_test.erl b/src/couch_views/test/couch_views_map_test.erl
new file mode 100644
index 0000000..0b0ab68
--- /dev/null
+++ b/src/couch_views/test/couch_views_map_test.erl
@@ -0,0 +1,517 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_views_map_test).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+
+-define(TDEF(A), {atom_to_list(A), fun A/0}).
+
+
+setup() ->
+    test_util:start_couch([fabric, couch_jobs, couch_views]).
+
+
+teardown(State) ->
+    test_util:stop_couch(State).
+
+
+map_views_test_() ->
+    {
+        "Map views",
+        {
+            setup,
+            fun setup/0,
+            fun teardown/1,
+            [
+                ?TDEF(should_map),
+                ?TDEF(should_map_with_startkey),
+                ?TDEF(should_map_with_endkey),
+                ?TDEF(should_map_with_endkey_not_inclusive),
+                ?TDEF(should_map_reverse_and_limit),
+                ?TDEF(should_map_with_range_reverse),
+                ?TDEF(should_map_with_limit_and_skip),
+                ?TDEF(should_map_with_limit_and_skip_reverse),
+                ?TDEF(should_map_with_include_docs),
+                ?TDEF(should_map_with_include_docs_reverse),
+                ?TDEF(should_map_with_startkey_with_key_array),
+                ?TDEF(should_map_with_startkey_and_endkey_with_key_array),
+                ?TDEF(should_map_empty_views),
+                ?TDEF(should_map_duplicate_keys),
+                ?TDEF(should_map_with_doc_emit),
+                ?TDEF(should_map_update_is_false),
+                ?TDEF(should_map_update_is_lazy)
+                % fun should_give_ext_size_seq_indexed_test/1
+            ]
+        }
+    }.
+
+
+should_map() ->
+    Result = run_query(<<"baz">>, #{}),
+    Expect = {ok, [
+        {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+        {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+        {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+        {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
+        {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
+        {row, [{id, <<"7">>}, {key, 7}, {value, 7}]},
+        {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
+        {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
+        {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
+    ]},
+    ?assertEqual(Expect, Result).
+
+
+should_map_with_startkey() ->
+    Result = run_query(<<"baz">>, #{start_key => 4}),
+    Expect = {ok, [
+        {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
+        {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
+        {row, [{id, <<"7">>}, {key, 7}, {value, 7}]},
+        {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
+        {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
+        {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
+    ]},
+    ?assertEqual(Expect, Result).
+
+
+should_map_with_endkey() ->
+    Result = run_query(<<"baz">>, #{end_key => 5}),
+    Expect = {ok, [
+        {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+        {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+        {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+        {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+    ]},
+    ?assertEqual(Expect, Result).
+
+
+should_map_with_endkey_not_inclusive() ->
+    Result = run_query(<<"baz">>, #{
+        end_key => 5,
+        inclusive_end => false
+    }),
+    Expect = {ok, [
+        {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+        {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+        {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+        {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}
+    ]},
+    ?assertEqual(Expect, Result).
+
+
+should_map_reverse_and_limit() ->
+    Result = run_query(<<"baz">>, #{
+        direction => rev,
+        limit => 3
+    }),
+    Expect = {ok, [
+        {row, [{id, <<"10">>}, {key, 10}, {value, 10}]},
+        {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
+        {row, [{id, <<"8">>}, {key, 8}, {value, 8}]}
+    ]},
+    ?assertEqual(Expect, Result).
+
+
+should_map_with_range_reverse() ->
+    Result = run_query(<<"baz">>, #{
+        direction => rev,
+        start_key => 5,
+        end_key => 3,
+        inclusive_end => true
+    }),
+    Expect = {ok, [
+        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
+        {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+        {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}
+    ]},
+    ?assertEqual(Expect, Result).
+
+
+should_map_with_limit_and_skip() ->
+    Result = run_query(<<"baz">>, #{
+        start_key => 2,
+        limit => 3,
+        skip => 3
+    }),
+    Expect = {ok, [
+        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
+        {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
+        {row, [{id, <<"7">>}, {key, 7}, {value, 7}]}
+    ]},
+    ?assertEqual(Expect, Result).
+
+
+should_map_with_limit_and_skip_reverse() ->
+    Result = run_query(<<"baz">>, #{
+        start_key => 10,
+        limit => 3,
+        skip => 3,
+        direction => rev
+    }),
+    Expect = {ok, [
+        {row, [{id, <<"7">>}, {key, 7}, {value, 7}]},
+        {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
+        {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+    ]},
+    ?assertEqual(Expect, Result).
+
+
+should_map_with_include_docs() ->
+    Result = run_query(<<"baz">>, #{
+        start_key => 8,
+        end_key => 8,
+        include_docs => true
+    }),
+    Doc = {[
+        {<<"_id">>, <<"8">>},
+        {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
+        {<<"val">>, 8}
+    ]},
+    Expect = {ok, [
+        {row, [{id, <<"8">>}, {key, 8}, {value, 8}, {doc, Doc}]}
+    ]},
+    ?assertEqual(Expect, Result).
+
+
+should_map_with_include_docs_reverse() ->
+    Result = run_query(<<"baz">>, #{
+        start_key => 8,
+        end_key => 8,
+        include_docs => true,
+        direction => rev
+    }),
+    Doc = {[
+        {<<"_id">>, <<"8">>},
+        {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
+        {<<"val">>, 8}
+    ]},
+    Expect = {ok, [
+        {row, [{id, <<"8">>}, {key, 8}, {value, 8}, {doc, Doc}]}
+    ]},
+    ?assertEqual(Expect, Result).
+
+
+should_map_with_startkey_with_key_array() ->
+    Rows = [
+        {row, [{id, <<"4">>}, {key, [<<"4">>, 4]}, {value, 4}]},
+        {row, [{id, <<"5">>}, {key, [<<"5">>, 5]}, {value, 5}]},
+        {row, [{id, <<"6">>}, {key, [<<"6">>, 6]}, {value, 6}]},
+        {row, [{id, <<"7">>}, {key, [<<"7">>, 7]}, {value, 7}]},
+        {row, [{id, <<"8">>}, {key, [<<"8">>, 8]}, {value, 8}]},
+        {row, [{id, <<"9">>}, {key, [<<"9">>, 9]}, {value, 9}]}
+    ],
+
+    Result = run_query(<<"boom">>, #{
+        start_key => [<<"4">>]
+    }),
+
+    ?assertEqual({ok, Rows}, Result),
+
+    ResultRev = run_query(<<"boom">>, #{
+        start_key => [<<"9">>, 9],
+        direction => rev,
+        limit => 6
+    }),
+
+    ?assertEqual({ok, lists:reverse(Rows)}, ResultRev).
+
+
+should_map_with_startkey_and_endkey_with_key_array() ->
+    Rows1 = [
+        {row, [{id, <<"4">>}, {key, [<<"4">>, 4]}, {value, 4}]},
+        {row, [{id, <<"5">>}, {key, [<<"5">>, 5]}, {value, 5}]},
+        {row, [{id, <<"6">>}, {key, [<<"6">>, 6]}, {value, 6}]},
+        {row, [{id, <<"7">>}, {key, [<<"7">>, 7]}, {value, 7}]},
+        {row, [{id, <<"8">>}, {key, [<<"8">>, 8]}, {value, 8}]}
+    ],
+
+    Rows2 = [
+        {row, [{id, <<"4">>}, {key, [<<"4">>, 4]}, {value, 4}]},
+        {row, [{id, <<"5">>}, {key, [<<"5">>, 5]}, {value, 5}]},
+        {row, [{id, <<"6">>}, {key, [<<"6">>, 6]}, {value, 6}]},
+        {row, [{id, <<"7">>}, {key, [<<"7">>, 7]}, {value, 7}]},
+        {row, [{id, <<"8">>}, {key, [<<"8">>, 8]}, {value, 8}]},
+        {row, [{id, <<"9">>}, {key, [<<"9">>, 9]}, {value, 9}]}
+    ],
+
+    Result = run_query(<<"boom">>, #{
+        start_key => [<<"4">>],
+        end_key => [<<"8">>, []]
+    }),
+
+    ?assertEqual({ok, Rows1}, Result),
+
+    ResultRev = run_query(<<"boom">>, #{
+        start_key => [<<"8">>, []],
+        end_key => [<<"4">>],
+        direction => rev
+    }),
+
+    ?assertEqual({ok, lists:reverse(Rows1)}, ResultRev),
+
+    ResultRev2 = run_query(<<"boom">>, #{
+        start_key => [<<"9">>, 9],
+        end_key => [<<"4">>],
+        direction => rev,
+        inclusive_end => false
+    }),
+
+    % Here, [<<"4">>] is less than [<<"4">>, 4] so we
+    % expect rows 9-4
+    ?assertEqual({ok, lists:reverse(Rows2)}, ResultRev2),
+
+    ResultRev3 = run_query(<<"boom">>, #{
+        start_key => [<<"9">>, 9],
+        end_key => [<<"4">>, 4],
+        direction => rev,
+        inclusive_end => false
+    }),
+
+    % Here, specifying [<<"4">>, 4] as the key will prevent
+    % us from including that row which leaves rows 9-5
+    ?assertEqual({ok, lists:reverse(lists:nthtail(1, Rows2))}, ResultRev3).
+
+
+should_map_empty_views() ->
+    Result = run_query(<<"bing">>, #{}),
+    Expect = {ok, []},
+    ?assertEqual(Expect, Result).
+
+
+should_map_with_doc_emit() ->
+    Result = run_query(<<"doc_emit">>, #{
+        start_key => 8,
+        limit => 1
+    }),
+    Doc = {[
+        {<<"_id">>, <<"8">>},
+        {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
+        {<<"val">>, 8}
+    ]},
+    Expect = {ok, [
+        {row, [{id, <<"8">>}, {key, 8}, {value, Doc}]}
+    ]},
+    ?assertEqual(Expect, Result).
+
+
+should_map_duplicate_keys() ->
+    Result = run_query(<<"duplicate_keys">>, #{
+        limit => 6
+    }),
+    Expect = {ok, [
+        {row, [{id, <<"1">>}, {key, <<"1">>}, {value, 1}]},
+        {row, [{id, <<"1">>}, {key, <<"1">>}, {value, 2}]},
+        {row, [{id, <<"10">>}, {key, <<"10">>}, {value, 10}]},
+        {row, [{id, <<"10">>}, {key, <<"10">>}, {value, 11}]},
+        {row, [{id, <<"2">>}, {key, <<"2">>}, {value, 2}]},
+        {row, [{id, <<"2">>}, {key, <<"2">>}, {value, 3}]}
+    ]},
+    ?assertEqual(Expect, Result).
+
+
+should_map_update_is_false() ->
+    Expect = {ok, [
+        {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
+        {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
+        {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
+    ]},
+
+    Expect1 = {ok, [
+        {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
+        {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
+        {row, [{id, <<"10">>}, {key, 10}, {value, 10}]},
+        {row, [{id, <<"11">>}, {key, 11}, {value, 11}]}
+    ]},
+
+    Idx = <<"baz">>,
+    DbName = ?tempdb(),
+
+    {ok, Db} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
+
+    DDoc = create_ddoc(),
+    Docs = make_docs(10),
+    fabric2_db:update_docs(Db, [DDoc | Docs]),
+
+    Args1 = #{
+        start_key => 8
+    },
+
+    Result1 = couch_views:query(Db, DDoc, Idx, fun default_cb/2,
+        [], Args1),
+    ?assertEqual(Expect, Result1),
+
+    Doc = doc(11),
+    fabric2_db:update_doc(Db, Doc),
+
+    Args2 = #{
+        start_key => 8,
+        update => false
+    },
+
+    Result2 = couch_views:query(Db, DDoc, Idx, fun default_cb/2,
+        [], Args2),
+    ?assertEqual(Expect, Result2),
+
+    Result3 = couch_views:query(Db, DDoc, Idx, fun default_cb/2,
+        [], Args1),
+    ?assertEqual(Expect1, Result3).
+
+
+should_map_update_is_lazy() ->
+    Expect = {ok, [
+        {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
+        {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
+        {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
+    ]},
+
+    Idx = <<"baz">>,
+    DbName = ?tempdb(),
+
+    {ok, Db} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
+
+    DDoc = create_ddoc(),
+    Docs = make_docs(10),
+
+    fabric2_db:update_docs(Db, [DDoc | Docs]),
+
+    Args1 = #{
+        start_key => 8,
+        update => lazy
+    },
+
+    Result1 = couch_views:query(Db, DDoc, Idx, fun default_cb/2,
+        [], Args1),
+    ?assertEqual({ok, []}, Result1),
+
+    {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+    JobId = couch_views_jobs:job_id(Db, Mrst),
+    UpdateSeq = fabric2_db:get_update_seq(Db),
+    ok = couch_views_jobs:wait_for_job(JobId, UpdateSeq),
+
+    Args2 = #{
+        start_key => 8,
+        update => false
+    },
+
+    Result2 = couch_views:query(Db, DDoc, Idx, fun default_cb/2,
+        [], Args2),
+    ?assertEqual(Expect, Result2).
+
+
+% should_give_ext_size_seq_indexed_test(Db) ->
+%     DDoc = couch_doc:from_json_obj({[
+%         {<<"_id">>, <<"_design/seqdoc">>},
+%         {<<"options">>, {[{<<"seq_indexed">>, true}]}},
+%         {<<"views">>, {[
+%                 {<<"view1">>, {[
+%                     {<<"map">>, <<"function(doc){emit(doc._id, doc._id);}">>}
+%                 ]}}
+%             ]}
+%         }
+%     ]}),
+%     {ok, _} = couch_db:update_doc(Db, DDoc, []),
+%     {ok, Db1} = couch_db:open_int(couch_db:name(Db), []),
+%     {ok, DDoc1} = couch_db:open_doc(Db1, <<"_design/seqdoc">>, [ejson_body]),
+%     couch_mrview:query_view(Db1, DDoc1, <<"view1">>, [{update, true}]),
+%     {ok, Info} = couch_mrview:get_info(Db1, DDoc),
+%     Size = couch_util:get_nested_json_value({Info}, [sizes, external]),
+%     ok = couch_db:close(Db1),
+%     ?assert(is_number(Size)).
+
+
+run_query(Idx, Args) ->
+    run_query(Idx, Args, false).
+
+
+run_query(Idx, Args, DebugCluster) ->
+    DbName = ?tempdb(),
+    {ok, Db} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
+    DDoc = create_ddoc(),
+    Docs = make_docs(10),
+    fabric2_db:update_docs(Db, [DDoc | Docs]),
+    if not DebugCluster -> ok; true ->
+        couch_views:query(Db, DDoc, Idx, fun default_cb/2, [], #{}),
+        fabric2_fdb:debug_cluster(),
+        ok
+    end,
+    couch_views:query(Db, DDoc, Idx, fun default_cb/2, [], Args).
+
+
+default_cb(complete, Acc) ->
+    {ok, lists:reverse(Acc)};
+default_cb({final, Info}, []) ->
+    {ok, [Info]};
+default_cb({final, _}, Acc) ->
+    {ok, Acc};
+default_cb({meta, _}, Acc) ->
+    {ok, Acc};
+default_cb(ok, ddoc_updated) ->
+    {ok, ddoc_updated};
+default_cb(Row, Acc) ->
+    {ok, [Row | Acc]}.
+
+
+create_ddoc() ->
+    couch_doc:from_json_obj({[
+        {<<"_id">>, <<"_design/bar">>},
+        {<<"views">>, {[
+            {<<"baz">>, {[
+                {<<"map">>, <<"function(doc) {emit(doc.val, doc.val);}">>}
+            ]}},
+            {<<"boom">>, {[
+                {<<"map">>, <<
+                    "function(doc) {\n"
+                    "   emit([doc.val.toString(), doc.val], doc.val);\n"
+                    "}"
+                >>}
+            ]}},
+            {<<"bing">>, {[
+                {<<"map">>, <<"function(doc) {}">>}
+            ]}},
+            {<<"doc_emit">>, {[
+                {<<"map">>, <<"function(doc) {emit(doc.val, doc)}">>}
+            ]}},
+            {<<"duplicate_keys">>, {[
+                {<<"map">>, <<
+                    "function(doc) {\n"
+                    "   emit(doc._id, doc.val);\n"
+                    "   emit(doc._id, doc.val + 1);\n"
+                    "}">>}
+            ]}},
+            {<<"zing">>, {[
+                {<<"map">>, <<
+                    "function(doc) {\n"
+                    "  if(doc.foo !== undefined)\n"
+                    "    emit(doc.foo, 0);\n"
+                    "}"
+                >>}
+            ]}}
+        ]}}
+    ]}).
+
+
+make_docs(Count) ->
+    [doc(I) || I <- lists:seq(1, Count)].
+
+
+doc(Id) ->
+    couch_doc:from_json_obj({[
+        {<<"_id">>, list_to_binary(integer_to_list(Id))},
+        {<<"val">>, Id}
+    ]}).
diff --git a/src/fabric/include/fabric2.hrl b/src/fabric/include/fabric2.hrl
index de1d3d1..6392d12 100644
--- a/src/fabric/include/fabric2.hrl
+++ b/src/fabric/include/fabric2.hrl
@@ -46,6 +46,7 @@
 -define(DB_DOCS, 21).
 -define(DB_LOCAL_DOCS, 22).
 -define(DB_ATTS, 23).
+-define(DB_VIEWS, 24).
 
 
 % Versions
diff --git a/test/elixir/test/basics_test.exs b/test/elixir/test/basics_test.exs
index c28c78c..363972b 100644
--- a/test/elixir/test/basics_test.exs
+++ b/test/elixir/test/basics_test.exs
@@ -178,21 +178,33 @@ defmodule BasicsTest do
 
     assert Couch.get("/#{db_name}").body["doc_count"] == 8
 
+    # Disabling until we figure out reduce functions
+    # # Test reduce function
+    # resp = Couch.get("/#{db_name}/_design/bar/_view/baz")
+    # assert hd(resp.body["rows"])["value"] == 33
+
     # Test reduce function
-    resp = Couch.get("/#{db_name}/_design/bar/_view/baz")
-    assert hd(resp.body["rows"])["value"] == 33
+    resp = Couch.get("/#{db_name}/_design/bar/_view/baz", query: %{:reduce => false})
+    assert resp.body["total_rows"] == 3
 
     # Delete doc and test for updated view results
     doc0 = Couch.get("/#{db_name}/0").body
     assert Couch.delete("/#{db_name}/0?rev=#{doc0["_rev"]}").body["ok"]
 
-    retry_until(fn ->
-      Couch.get("/#{db_name}/_design/foo/_view/baz").body["total_rows"] == 2
-    end)
+    # Disabling until we figure out reduce functions
+    # retry_until(fn ->
+    #  Couch.get("/#{db_name}/_design/foo/_view/baz").body["total_rows"] == 2
+    # end)
+
+    resp = Couch.get("/#{db_name}/_design/bar/_view/baz", query: %{:reduce => false})
+    assert resp.body["total_rows"] == 2
 
     assert Couch.get("/#{db_name}").body["doc_count"] == 7
     assert Couch.get("/#{db_name}/0").status_code == 404
-    refute Couch.get("/#{db_name}/0?rev=#{doc0["_rev"]}").status_code == 404
+
+    # No longer true. Old revisions are not stored after
+    # an update.
+    # refute Couch.get("/#{db_name}/0?rev=#{doc0["_rev"]}").status_code == 404
   end
 
   @tag :with_db
diff --git a/test/elixir/test/map_test.exs b/test/elixir/test/map_test.exs
new file mode 100644
index 0000000..04361ba
--- /dev/null
+++ b/test/elixir/test/map_test.exs
@@ -0,0 +1,450 @@
+defmodule ViewMapTest do
+  use CouchTestCase
+
+  @moduledoc """
+  Test Map functionality for views
+  """
+  def get_ids(resp) do
+    %{:body => %{"rows" => rows}} = resp
+    Enum.map(rows, fn row -> row["id"] end)
+  end
+
+  def get_keys(resp) do
+    %{:body => %{"rows" => rows}} = resp
+    Enum.map(rows, fn row -> row["key"] end)
+  end
+
+  defp create_map_docs(db_name) do
+    docs =
+      for i <- 1..10 do
+        group =
+          if rem(i, 3) == 0 do
+            "one"
+          else
+            "two"
+          end
+
+        %{
+          :_id => "doc-id-#{i}",
+          :value => i,
+          :some => "field",
+          :group => group
+        }
+      end
+
+    resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs, :w => 3})
+    assert resp.status_code == 201
+  end
+
+  setup do
+    db_name = random_db_name()
+    {:ok, _} = create_db(db_name)
+    on_exit(fn -> delete_db(db_name) end)
+
+    create_map_docs(db_name)
+
+    map_fun1 = """
+      function(doc) {
+        if (doc.some) {
+            emit(doc.value , doc.value);
+        }
+
+        if (doc._id.indexOf("_design") > -1) {
+            emit(0, "ddoc")
+        }
+      }
+    """
+
+    map_fun2 = """
+      function(doc) {
+        if (doc.group) {
+          emit([doc.some, doc.group], 1);
+        }
+      }
+    """
+
+    map_fun3 = """
+      function(doc) {
+        if (doc.group) {
+            emit(doc.group, 1);
+        }
+      }
+    """
+
+    body = %{
+      :w => 3,
+      :docs => [
+        %{
+          _id: "_design/map",
+          views: %{
+            some: %{map: map_fun1},
+            map_some: %{map: map_fun2},
+            map_group: %{map: map_fun3}
+          }
+        },
+        %{
+          _id: "_design/include_ddocs",
+          views: %{some: %{map: map_fun1}},
+          options: %{include_design: true}
+        }
+      ]
+    }
+
+    resp = Couch.post("/#{db_name}/_bulk_docs", body: body)
+    Enum.each(resp.body, &assert(&1["ok"]))
+
+    {:ok, [db_name: db_name]}
+  end
+
+  def get_reduce_result(resp) do
+    %{:body => %{"rows" => rows}} = resp
+    rows
+  end
+
+  test "query returns docs", context do
+    db_name = context[:db_name]
+
+    url = "/#{db_name}/_design/map/_view/some"
+    resp = Couch.get(url)
+    assert resp.status_code == 200
+
+    ids = get_ids(resp)
+
+    assert ids == [
+             "doc-id-1",
+             "doc-id-2",
+             "doc-id-3",
+             "doc-id-4",
+             "doc-id-5",
+             "doc-id-6",
+             "doc-id-7",
+             "doc-id-8",
+             "doc-id-9",
+             "doc-id-10"
+           ]
+
+    url = "/#{db_name}/_design/map/_view/map_some"
+    resp = Couch.get(url)
+    assert resp.status_code == 200
+
+    ids = get_ids(resp)
+
+    assert ids == [
+             "doc-id-3",
+             "doc-id-6",
+             "doc-id-9",
+             "doc-id-1",
+             "doc-id-10",
+             "doc-id-2",
+             "doc-id-4",
+             "doc-id-5",
+             "doc-id-7",
+             "doc-id-8"
+           ]
+  end
+
+  test "updated docs rebuilds index", context do
+    db_name = context[:db_name]
+
+    url = "/#{db_name}/_design/map/_view/some"
+    resp = Couch.get(url)
+    assert resp.status_code == 200
+    ids = get_ids(resp)
+
+    assert ids == [
+             "doc-id-1",
+             "doc-id-2",
+             "doc-id-3",
+             "doc-id-4",
+             "doc-id-5",
+             "doc-id-6",
+             "doc-id-7",
+             "doc-id-8",
+             "doc-id-9",
+             "doc-id-10"
+           ]
+
+    update_doc_value(db_name, "doc-id-5", 0)
+    update_doc_value(db_name, "doc-id-6", 100)
+
+    resp = Couch.get("/#{db_name}/doc-id-3")
+    doc3 = convert(resp.body)
+    resp = Couch.delete("/#{db_name}/#{doc3["_id"]}", query: %{rev: doc3["_rev"]})
+    assert resp.status_code == 200
+    #
+    resp = Couch.get("/#{db_name}/doc-id-4")
+    doc4 = convert(resp.body)
+    doc4 = Map.delete(doc4, "some")
+    resp = Couch.put("/#{db_name}/#{doc4["_id"]}", body: doc4)
+    assert resp.status_code == 201
+    #
+    resp = Couch.get("/#{db_name}/doc-id-1")
+    doc1 = convert(resp.body)
+    doc1 = Map.put(doc1, "another", "value")
+    resp = Couch.put("/#{db_name}/#{doc1["_id"]}", body: doc1)
+    assert resp.status_code == 201
+
+    url = "/#{db_name}/_design/map/_view/some"
+    resp = Couch.get(url)
+    assert resp.status_code == 200
+    ids = get_ids(resp)
+
+    assert ids == [
+             "doc-id-5",
+             "doc-id-1",
+             "doc-id-2",
+             "doc-id-7",
+             "doc-id-8",
+             "doc-id-9",
+             "doc-id-10",
+             "doc-id-6"
+           ]
+  end
+
+  test "can index design docs", context do
+    db_name = context[:db_name]
+
+    url = "/#{db_name}/_design/include_ddocs/_view/some"
+    resp = Couch.get(url, query: %{limit: 3})
+    assert resp.status_code == 200
+    ids = get_ids(resp)
+
+    assert ids == ["_design/include_ddocs", "_design/map", "doc-id-1"]
+  end
+
+  test "can use key in query string", context do
+    db_name = context[:db_name]
+
+    url = "/#{db_name}/_design/map/_view/map_group"
+    resp = Couch.get(url, query: %{limit: 3, key: "\"one\""})
+    assert resp.status_code == 200
+    ids = get_ids(resp)
+    assert ids == ["doc-id-3", "doc-id-6", "doc-id-9"]
+
+    resp =
+      Couch.get(url,
+        query: %{
+          limit: 3,
+          key: "\"one\"",
+          descending: true
+        }
+      )
+
+    assert resp.status_code == 200
+    ids = get_ids(resp)
+    assert ids == ["doc-id-9", "doc-id-6", "doc-id-3"]
+  end
+
+  test "can use keys in query string", context do
+    db_name = context[:db_name]
+
+    url = "/#{db_name}/_design/map/_view/some"
+    resp = Couch.post(url, body: %{keys: [6, 3, 9]})
+    assert resp.status_code == 200
+    ids = get_ids(resp)
+    assert ids == ["doc-id-6", "doc-id-3", "doc-id-9"]
+
+    # should ignore descending = true
+    resp = Couch.post(url, body: %{keys: [6, 3, 9], descending: true})
+    assert resp.status_code == 200
+    ids = get_ids(resp)
+    assert ids == ["doc-id-6", "doc-id-3", "doc-id-9"]
+  end
+
+  test "inclusive = false", context do
+    db_name = context[:db_name]
+
+    docs = [
+      %{key: "key1"},
+      %{key: "key2"},
+      %{key: "key3"},
+      %{key: "key4"},
+      %{key: "key4"},
+      %{key: "key5"},
+      %{
+        _id: "_design/inclusive",
+        views: %{
+          by_key: %{
+            map: """
+                function (doc) {
+                    if (doc.key) {
+                        emit(doc.key, doc);
+                    }
+                }
+            """
+          }
+        }
+      }
+    ]
+
+    resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs, :w => 3})
+    assert resp.status_code == 201
+    url = "/#{db_name}/_design/inclusive/_view/by_key"
+
+    query = %{
+      endkey: "\"key4\"",
+      inclusive_end: false
+    }
+
+    resp = Couch.get(url, query: query)
+    assert resp.status_code == 200
+    keys = get_keys(resp)
+    assert keys == ["key1", "key2", "key3"]
+
+    query = %{
+      startkey: "\"key3\"",
+      endkey: "\"key4\"",
+      inclusive_end: false
+    }
+
+    resp = Couch.get(url, query: query)
+    assert resp.status_code == 200
+    keys = get_keys(resp)
+    assert keys == ["key3"]
+
+    query = %{
+      startkey: "\"key4\"",
+      endkey: "\"key1\"",
+      inclusive_end: false,
+      descending: true
+    }
+
+    resp = Couch.get(url, query: query)
+    assert resp.status_code == 200
+    keys = get_keys(resp)
+    assert keys == ["key4", "key4", "key3", "key2"]
+  end
+
+  test "supports linked documents", context do
+    db_name = context[:db_name]
+
+    docs = [
+      %{_id: "mydoc", foo: "bar"},
+      %{_id: "join-doc", doc_id: "mydoc"},
+      %{
+        _id: "_design/join",
+        views: %{
+          by_doc_id: %{
+            map: """
+                function (doc) {
+                    if (doc.doc_id) {
+                        emit(doc._id, {_id: doc.doc_id});
+                    }
+                }
+            """
+          }
+        }
+      }
+    ]
+
+    resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs, :w => 3})
+    assert resp.status_code == 201
+
+    url = "/#{db_name}/_design/join/_view/by_doc_id"
+    resp = Couch.get(url)
+    assert resp.status_code == 200
+    %{:body => %{"rows" => [row]}} = resp
+
+    assert row == %{
+             "id" => "join-doc",
+             "key" => "join-doc",
+             "value" => %{"_id" => "mydoc"}
+           }
+
+    url = "/#{db_name}/_design/join/_view/by_doc_id"
+    resp = Couch.get(url, query: %{include_docs: true})
+    assert resp.status_code == 200
+    %{:body => %{"rows" => [doc]}} = resp
+
+    assert doc["id"] == "join-doc"
+    assert doc["doc"]["_id"] == "mydoc"
+  end
+
+  test "bad range returns error", context do
+    db_name = context[:db_name]
+
+    url = "/#{db_name}/_design/map/_view/some"
+    resp = Couch.get(url, query: %{startkey: "5", endkey: "4"})
+    assert resp.status_code == 400
+    %{:body => %{"error" => error}} = resp
+    assert error == "query_parse_error"
+  end
+
+  test "multiple emits in correct value order", context do
+    db_name = context[:db_name]
+
+    docs = [
+      %{_id: "doc1", foo: "foo", bar: "bar"},
+      %{_id: "doc2", foo: "foo", bar: "bar"},
+      %{
+        _id: "_design/emit",
+        views: %{
+          multiple_emit: %{
+            map: """
+                function (doc) {
+                  if (!doc.foo) {
+                    return;
+                  }
+                  emit(doc.foo);
+                  emit(doc.bar);
+                  emit(doc.foo);
+                  emit(doc.bar, 'multiple values!');
+                  emit(doc.bar, 'crayon!');
+                }
+            """
+          }
+        }
+      }
+    ]
+
+    resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs, :w => 3})
+    assert resp.status_code == 201
+
+    url = "/#{db_name}/_design/emit/_view/multiple_emit"
+    resp = Couch.post(url, body: %{keys: ["foo", "bar"]})
+    assert resp.status_code == 200
+    %{:body => %{"rows" => rows}} = resp
+
+    assert Enum.at(rows, 0)["key"] == "foo"
+    assert Enum.at(rows, 0)["id"] == "doc1"
+    assert Enum.at(rows, 1)["key"] == "foo"
+    assert Enum.at(rows, 1)["id"] == "doc1"
+
+    assert Enum.at(rows, 2)["key"] == "foo"
+    assert Enum.at(rows, 2)["id"] == "doc2"
+    assert Enum.at(rows, 3)["key"] == "foo"
+    assert Enum.at(rows, 3)["id"] == "doc2"
+
+    assert Enum.at(rows, 4)["key"] == "bar"
+    assert Enum.at(rows, 4)["id"] == "doc1"
+    assert Enum.at(rows, 4)["value"] == :null
+    assert Enum.at(rows, 5)["key"] == "bar"
+    assert Enum.at(rows, 5)["id"] == "doc1"
+    assert Enum.at(rows, 5)["value"] == "crayon!"
+    assert Enum.at(rows, 6)["key"] == "bar"
+    assert Enum.at(rows, 6)["id"] == "doc1"
+    assert Enum.at(rows, 6)["value"] == "multiple values!"
+
+    assert Enum.at(rows, 7)["key"] == "bar"
+    assert Enum.at(rows, 7)["id"] == "doc2"
+    assert Enum.at(rows, 7)["value"] == :null
+    assert Enum.at(rows, 8)["key"] == "bar"
+    assert Enum.at(rows, 8)["id"] == "doc2"
+    assert Enum.at(rows, 8)["value"] == "crayon!"
+    assert Enum.at(rows, 9)["key"] == "bar"
+    assert Enum.at(rows, 9)["id"] == "doc2"
+    assert Enum.at(rows, 9)["value"] == "multiple values!"
+  end
+
+  def update_doc_value(db_name, id, value) do
+    resp = Couch.get("/#{db_name}/#{id}")
+    doc = convert(resp.body)
+    doc = Map.put(doc, "value", value)
+    resp = Couch.put("/#{db_name}/#{id}", body: doc)
+    assert resp.status_code == 201
+  end
+
+  def convert(value) do
+    :jiffy.decode(:jiffy.encode(value), [:return_maps])
+  end
+end
diff --git a/test/elixir/test/view_collation_test.exs b/test/elixir/test/view_collation_test.exs
index 7563ba4..bf30031 100644
--- a/test/elixir/test/view_collation_test.exs
+++ b/test/elixir/test/view_collation_test.exs
@@ -70,34 +70,28 @@ defmodule ViewCollationTest do
   end
 
   test "ascending collation order", context do
-    retry_until(fn ->
-      resp = Couch.get(url(context))
-      pairs = Enum.zip(resp.body["rows"], @values)
+    resp = Couch.get(url(context))
+    pairs = Enum.zip(resp.body["rows"], @values)
 
-      Enum.each(pairs, fn {row, value} ->
-        assert row["key"] == convert(value)
-      end)
+    Enum.each(pairs, fn {row, value} ->
+      assert row["key"] == convert(value)
     end)
   end
 
   test "descending collation order", context do
-    retry_until(fn ->
-      resp = Couch.get(url(context), query: %{"descending" => "true"})
-      pairs = Enum.zip(resp.body["rows"], Enum.reverse(@values))
+    resp = Couch.get(url(context), query: %{"descending" => "true"})
+    pairs = Enum.zip(resp.body["rows"], Enum.reverse(@values))
 
-      Enum.each(pairs, fn {row, value} ->
-        assert row["key"] == convert(value)
-      end)
+    Enum.each(pairs, fn {row, value} ->
+      assert row["key"] == convert(value)
     end)
   end
 
   test "key query option", context do
     Enum.each(@values, fn value ->
-      retry_until(fn ->
-        resp = Couch.get(url(context), query: %{:key => :jiffy.encode(value)})
-        assert length(resp.body["rows"]) == 1
-        assert Enum.at(resp.body["rows"], 0)["key"] == convert(value)
-      end)
+      resp = Couch.get(url(context), query: %{:key => :jiffy.encode(value)})
+      assert length(resp.body["rows"]) == 1
+      assert Enum.at(resp.body["rows"], 0)["key"] == convert(value)
     end)
   end
 


[couchdb] 14/34: Database config changes should bump the db version

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 5e12e06a46ff2d8c6ff0a3c39f52c527f21519e9
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Mon Jun 10 14:36:55 2019 -0500

    Database config changes should bump the db version
    
    This was a remnant before we used a version per database.
---
 src/fabric/src/fabric2_fdb.erl | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/fabric/src/fabric2_fdb.erl b/src/fabric/src/fabric2_fdb.erl
index 4f08d97..d179387 100644
--- a/src/fabric/src/fabric2_fdb.erl
+++ b/src/fabric/src/fabric2_fdb.erl
@@ -338,7 +338,7 @@ set_config(#{} = Db, ConfigKey, ConfigVal) ->
 
     Key = erlfdb_tuple:pack({?DB_CONFIG, ConfigKey}, DbPrefix),
     erlfdb:set(Tx, Key, ConfigVal),
-    bump_metadata_version(Tx).
+    bump_db_version(Db).
 
 
 get_stat(#{} = Db, StatKey) ->


[couchdb] 33/34: Fix default key ranges for fold_range

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit a545b49991906e8aef548c9e9c677513a0c16623
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jul 25 12:37:05 2019 -0500

    Fix default key ranges for fold_range
    
    If a start or end key is not specified we still need to scope the range
    read to the given `RangePrefix`.
---
 src/fabric/src/fabric2_fdb.erl | 31 ++++++++++++++++---------------
 1 file changed, 16 insertions(+), 15 deletions(-)

diff --git a/src/fabric/src/fabric2_fdb.erl b/src/fabric/src/fabric2_fdb.erl
index 670ce8b..71cb68f 100644
--- a/src/fabric/src/fabric2_fdb.erl
+++ b/src/fabric/src/fabric2_fdb.erl
@@ -985,18 +985,19 @@ get_fold_opts(RangePrefix, Options) ->
 
     % Set the maximum bounds for the start and endkey
     StartKey2 = case StartKey1 of
-        undefined -> <<>>;
-        SK2 -> SK2
+        undefined ->
+            <<RangePrefix/binary, 16#00>>;
+        SK2 ->
+            erlfdb_tuple:pack({SK2}, RangePrefix)
     end,
 
     EndKey2 = case EndKey1 of
-        undefined -> <<255>>;
-        EK2 -> EK2
+        undefined ->
+            <<RangePrefix/binary, 16#FF>>;
+        EK2 ->
+            erlfdb_tuple:pack({EK2}, RangePrefix)
     end,
 
-    StartKey3 = erlfdb_tuple:pack({StartKey2}, RangePrefix),
-    EndKey3 = erlfdb_tuple:pack({EndKey2}, RangePrefix),
-
     % FoundationDB ranges are applied as SK <= key < EK
     % By default, CouchDB is SK <= key <= EK with the
     % optional inclusive_end=false option changing that
@@ -1006,20 +1007,20 @@ get_fold_opts(RangePrefix, Options) ->
     % Thus we have this wonderful bit of logic to account
     % for all of those combinations.
 
-    StartKey4 = case {Reverse, InclusiveEnd} of
+    StartKey3 = case {Reverse, InclusiveEnd} of
         {true, false} ->
-            erlfdb_key:first_greater_than(StartKey3);
+            erlfdb_key:first_greater_than(StartKey2);
         _ ->
-            StartKey3
+            StartKey2
     end,
 
-    EndKey4 = case {Reverse, InclusiveEnd} of
+    EndKey3 = case {Reverse, InclusiveEnd} of
         {false, true} when EndKey0 /= undefined ->
-            erlfdb_key:first_greater_than(EndKey3);
+            erlfdb_key:first_greater_than(EndKey2);
         {true, _} ->
-            erlfdb_key:first_greater_than(EndKey3);
+            erlfdb_key:first_greater_than(EndKey2);
         _ ->
-            EndKey3
+            EndKey2
     end,
 
     Skip = case fabric2_util:get_value(skip, Options) of
@@ -1053,7 +1054,7 @@ get_fold_opts(RangePrefix, Options) ->
             ++ StreamingMode
             ++ Snapshot,
 
-    {StartKey4, EndKey4, Skip, OutOpts}.
+    {StartKey3, EndKey3, Skip, OutOpts}.
 
 
 fold_range_cb(KV, {skip, 0, Callback, Acc}) ->


[couchdb] 02/34: Disable eunit test suite in fabric

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 9178462f7548a5f183fb05a6db7562b66eafe216
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Jun 5 13:09:50 2019 -0500

    Disable eunit test suite in fabric
    
    Most of these tests are for quorum and clustered response handling which
    will no longer exist with FoundationDB. Eventually we'll want to go
    through these and pick out anything that is still applicable and ensure
    that we re-add them to the new test suite.
---
 src/fabric/src/fabric.erl                        | 100 +--
 src/fabric/src/fabric_db_create.erl              |  60 +-
 src/fabric/src/fabric_db_info.erl                |  62 +-
 src/fabric/src/fabric_doc_open.erl               | 821 ++++++++++----------
 src/fabric/src/fabric_doc_open_revs.erl          | 932 +++++++++++------------
 src/fabric/src/fabric_doc_purge.erl              | 692 ++++++++---------
 src/fabric/src/fabric_doc_update.erl             | 282 +++----
 src/fabric/src/fabric_rpc.erl                    |  38 +-
 src/fabric/src/fabric_streams.erl                | 157 ++--
 src/fabric/src/fabric_util.erl                   |  48 +-
 src/fabric/src/fabric_view.erl                   | 188 ++---
 src/fabric/src/fabric_view_changes.erl           | 362 ++++-----
 src/fabric/test/eunit/fabric_rpc_purge_tests.erl | 307 --------
 13 files changed, 1872 insertions(+), 2177 deletions(-)

diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl
index 6d04184..a1f74a8 100644
--- a/src/fabric/src/fabric.erl
+++ b/src/fabric/src/fabric.erl
@@ -658,53 +658,53 @@ set_namespace(NS, #mrargs{extra = Extra} = Args) ->
     Args#mrargs{extra = [{namespace, NS} | Extra]}.
 
 
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-update_doc_test_() ->
-    {
-        "Update doc tests", {
-            setup, fun setup/0, fun teardown/1,
-            fun(Ctx) -> [
-                should_throw_conflict(Ctx)
-            ] end
-        }
-    }.
-
-should_throw_conflict(Doc) ->
-    ?_test(begin
-        ?assertThrow(conflict, update_doc(<<"test-db">>, Doc, []))
-    end).
-
-
-setup() ->
-    Doc = #doc{
-        id = <<"test_doc">>,
-        revs = {3, [<<5,68,252,180,43,161,216,223,26,119,71,219,212,229,
-            159,113>>]},
-        body = {[{<<"foo">>,<<"asdf">>},{<<"author">>,<<"tom">>}]},
-        atts = [], deleted = false, meta = []
-    },
-    ok = application:ensure_started(config),
-    ok = meck:expect(mem3, shards, fun(_, _) -> [] end),
-    ok = meck:expect(mem3, quorum, fun(_) -> 1 end),
-    ok = meck:expect(rexi, cast, fun(_, _) -> ok end),
-    ok = meck:expect(rexi_utils, recv,
-        fun(_, _, _, _, _, _) ->
-            {ok, {error, [{Doc, conflict}]}}
-        end),
-    ok = meck:expect(couch_util, reorder_results,
-        fun(_, [{_, Res}]) ->
-            [Res]
-        end),
-    ok = meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
-    ok = meck:expect(rexi_monitor, stop, fun(_) -> ok end),
-    Doc.
-
-
-teardown(_) ->
-    meck:unload(),
-    ok = application:stop(config).
-
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% update_doc_test_() ->
+%%     {
+%%         "Update doc tests", {
+%%             setup, fun setup/0, fun teardown/1,
+%%             fun(Ctx) -> [
+%%                 should_throw_conflict(Ctx)
+%%             ] end
+%%         }
+%%     }.
+%%
+%% should_throw_conflict(Doc) ->
+%%     ?_test(begin
+%%         ?assertThrow(conflict, update_doc(<<"test-db">>, Doc, []))
+%%     end).
+%%
+%%
+%% setup() ->
+%%     Doc = #doc{
+%%         id = <<"test_doc">>,
+%%         revs = {3, [<<5,68,252,180,43,161,216,223,26,119,71,219,212,229,
+%%             159,113>>]},
+%%         body = {[{<<"foo">>,<<"asdf">>},{<<"author">>,<<"tom">>}]},
+%%         atts = [], deleted = false, meta = []
+%%     },
+%%     ok = application:ensure_started(config),
+%%     ok = meck:expect(mem3, shards, fun(_, _) -> [] end),
+%%     ok = meck:expect(mem3, quorum, fun(_) -> 1 end),
+%%     ok = meck:expect(rexi, cast, fun(_, _) -> ok end),
+%%     ok = meck:expect(rexi_utils, recv,
+%%         fun(_, _, _, _, _, _) ->
+%%             {ok, {error, [{Doc, conflict}]}}
+%%         end),
+%%     ok = meck:expect(couch_util, reorder_results,
+%%         fun(_, [{_, Res}]) ->
+%%             [Res]
+%%         end),
+%%     ok = meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
+%%     ok = meck:expect(rexi_monitor, stop, fun(_) -> ok end),
+%%     Doc.
+%%
+%%
+%% teardown(_) ->
+%%     meck:unload(),
+%%     ok = application:stop(config).
+%%
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_db_create.erl b/src/fabric/src/fabric_db_create.erl
index 2edc6dc..81f1ecb 100644
--- a/src/fabric/src/fabric_db_create.erl
+++ b/src/fabric/src/fabric_db_create.erl
@@ -185,33 +185,33 @@ make_document([#shard{dbname=DbName}|_] = Shards, Suffix, Options) ->
 
 db_exists(DbName) -> is_list(catch mem3:shards(DbName)).
 
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-db_exists_for_existing_db_test() ->
-    start_meck_(),
-    Mock = fun(DbName) when is_binary(DbName) ->
-        [#shard{dbname = DbName, range = [0,100]}]
-    end,
-    ok = meck:expect(mem3, shards, Mock),
-    ?assertEqual(true, db_exists(<<"foobar">>)),
-    ?assertEqual(true, meck:validate(mem3)),
-    stop_meck_().
-
-db_exists_for_missing_db_test() ->
-    start_meck_(),
-    Mock = fun(DbName) ->
-        erlang:error(database_does_not_exist, DbName)
-    end,
-    ok = meck:expect(mem3, shards, Mock),
-    ?assertEqual(false, db_exists(<<"foobar">>)),
-    ?assertEqual(false, meck:validate(mem3)),
-    stop_meck_().
-
-start_meck_() ->
-    ok = meck:new(mem3).
-
-stop_meck_() ->
-    ok = meck:unload(mem3).
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% db_exists_for_existing_db_test() ->
+%%     start_meck_(),
+%%     Mock = fun(DbName) when is_binary(DbName) ->
+%%         [#shard{dbname = DbName, range = [0,100]}]
+%%     end,
+%%     ok = meck:expect(mem3, shards, Mock),
+%%     ?assertEqual(true, db_exists(<<"foobar">>)),
+%%     ?assertEqual(true, meck:validate(mem3)),
+%%     stop_meck_().
+%%
+%% db_exists_for_missing_db_test() ->
+%%     start_meck_(),
+%%     Mock = fun(DbName) ->
+%%         erlang:error(database_does_not_exist, DbName)
+%%     end,
+%%     ok = meck:expect(mem3, shards, Mock),
+%%     ?assertEqual(false, db_exists(<<"foobar">>)),
+%%     ?assertEqual(false, meck:validate(mem3)),
+%%     stop_meck_().
+%%
+%% start_meck_() ->
+%%     ok = meck:new(mem3).
+%%
+%% stop_meck_() ->
+%%     ok = meck:unload(mem3).
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_db_info.erl b/src/fabric/src/fabric_db_info.erl
index bb7a353..8e4cd9e 100644
--- a/src/fabric/src/fabric_db_info.erl
+++ b/src/fabric/src/fabric_db_info.erl
@@ -155,34 +155,34 @@ get_cluster_info(Shards) ->
     {ok, [{q, Q}, {n, N}, {w, WR}, {r, WR}]}.
 
 
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-get_cluster_info_test_() ->
-    {
-        setup,
-        fun setup/0,
-        fun get_cluster_info_test_generator/1
-    }.
-
-
-setup() ->
-    Quorums = [1, 2, 3],
-    Shards = [1, 3, 5, 8, 12, 24],
-    [{N, Q} || N <- Quorums, Q <- Shards].
-
-get_cluster_info_test_generator([]) ->
-    [];
-get_cluster_info_test_generator([{N, Q} | Rest]) ->
-    {generator,
-    fun() ->
-        Nodes = lists:seq(1, 8),
-        Shards = mem3_util:create_partition_map(<<"foo">>, N, Q, Nodes),
-        {ok, Info} = get_cluster_info(Shards),
-        [
-            ?_assertEqual(N, couch_util:get_value(n, Info)),
-            ?_assertEqual(Q, couch_util:get_value(q, Info))
-        ] ++ get_cluster_info_test_generator(Rest)
-    end}.
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% get_cluster_info_test_() ->
+%%     {
+%%         setup,
+%%         fun setup/0,
+%%         fun get_cluster_info_test_generator/1
+%%     }.
+%%
+%%
+%% setup() ->
+%%     Quorums = [1, 2, 3],
+%%     Shards = [1, 3, 5, 8, 12, 24],
+%%     [{N, Q} || N <- Quorums, Q <- Shards].
+%%
+%% get_cluster_info_test_generator([]) ->
+%%     [];
+%% get_cluster_info_test_generator([{N, Q} | Rest]) ->
+%%     {generator,
+%%     fun() ->
+%%         Nodes = lists:seq(1, 8),
+%%         Shards = mem3_util:create_partition_map(<<"foo">>, N, Q, Nodes),
+%%         {ok, Info} = get_cluster_info(Shards),
+%%         [
+%%             ?_assertEqual(N, couch_util:get_value(n, Info)),
+%%             ?_assertEqual(Q, couch_util:get_value(q, Info))
+%%         ] ++ get_cluster_info_test_generator(Rest)
+%%     end}.
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_doc_open.erl b/src/fabric/src/fabric_doc_open.erl
index 743ad8c7..224800c 100644
--- a/src/fabric/src/fabric_doc_open.erl
+++ b/src/fabric/src/fabric_doc_open.erl
@@ -182,414 +182,415 @@ format_reply(Else, _) ->
     Else.
 
 
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
 
-
-setup() ->
-    meck:new([
-        couch_log,
-        couch_stats,
-        fabric,
-        fabric_util,
-        mem3,
-        rexi,
-        rexi_monitor
-    ], [passthrough]).
-
-
-teardown(_) ->
-    meck:unload().
-
-
-open_doc_test_() ->
-    {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            t_is_r_met(),
-            t_handle_message_down(),
-            t_handle_message_exit(),
-            t_handle_message_reply(),
-            t_store_node_revs(),
-            t_read_repair(),
-            t_handle_response_quorum_met(),
-            t_get_doc_info()
-        ]
-    }.
-
-
-t_is_r_met() ->
-    ?_test(begin
-        Workers0 = [],
-        Workers1 = [nil],
-        Workers2 = [nil, nil],
-
-        SuccessCases = [
-            {{true, foo}, [fabric_util:kv(foo, 2)], 2},
-            {{true, foo}, [fabric_util:kv(foo, 3)], 2},
-            {{true, foo}, [fabric_util:kv(foo, 1)], 1},
-            {{true, foo}, [fabric_util:kv(foo, 2), fabric_util:kv(bar, 1)], 2},
-            {{true, bar}, [fabric_util:kv(bar, 1), fabric_util:kv(bar, 2)], 2},
-            {{true, bar}, [fabric_util:kv(bar, 2), fabric_util:kv(foo, 1)], 2}
-        ],
-        lists:foreach(fun({Expect, Replies, Q}) ->
-            ?assertEqual(Expect, is_r_met(Workers0, Replies, Q))
-        end, SuccessCases),
-
-        WaitForMoreCases = [
-            {[fabric_util:kv(foo, 1)], 2},
-            {[fabric_util:kv(foo, 2)], 3},
-            {[fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2}
-        ],
-        lists:foreach(fun({Replies, Q}) ->
-            ?assertEqual(wait_for_more, is_r_met(Workers2, Replies, Q))
-        end, WaitForMoreCases),
-
-        FailureCases = [
-            {Workers0, [fabric_util:kv(foo, 1)], 2},
-            {Workers1, [fabric_util:kv(foo, 1)], 2},
-            {Workers1, [fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2},
-            {Workers1, [fabric_util:kv(foo, 2)], 3}
-        ],
-        lists:foreach(fun({Workers, Replies, Q}) ->
-            ?assertEqual(no_more_workers, is_r_met(Workers, Replies, Q))
-        end, FailureCases)
-    end).
-
-
-t_handle_message_down() ->
-    Node0 = 'foo@localhost',
-    Node1 = 'bar@localhost',
-    Down0 = {rexi_DOWN, nil, {nil, Node0}, nil},
-    Down1 = {rexi_DOWN, nil, {nil, Node1}, nil},
-    Workers0 = [#shard{node=Node0} || _ <- [a, b]],
-    Worker1 = #shard{node=Node1},
-    Workers1 = Workers0 ++ [Worker1],
-
-    ?_test(begin
-        % Stop when no more workers are left
-        ?assertEqual(
-            {stop, #acc{workers=[]}},
-            handle_message(Down0, nil, #acc{workers=Workers0})
-        ),
-
-        % Continue when we have more workers
-        ?assertEqual(
-            {ok, #acc{workers=[Worker1]}},
-            handle_message(Down0, nil, #acc{workers=Workers1})
-        ),
-
-        % A second DOWN removes the remaining workers
-        ?assertEqual(
-            {stop, #acc{workers=[]}},
-            handle_message(Down1, nil, #acc{workers=[Worker1]})
-        )
-    end).
-
-
-t_handle_message_exit() ->
-    Exit = {rexi_EXIT, nil},
-    Worker0 = #shard{ref=erlang:make_ref()},
-    Worker1 = #shard{ref=erlang:make_ref()},
-
-    ?_test(begin
-        % Only removes the specified worker
-        ?assertEqual(
-            {ok, #acc{workers=[Worker1]}},
-            handle_message(Exit, Worker0, #acc{workers=[Worker0, Worker1]})
-        ),
-
-        ?assertEqual(
-            {ok, #acc{workers=[Worker0]}},
-            handle_message(Exit, Worker1, #acc{workers=[Worker0, Worker1]})
-        ),
-
-        % We bail if it was the last worker
-        ?assertEqual(
-            {stop, #acc{workers=[]}},
-            handle_message(Exit, Worker0, #acc{workers=[Worker0]})
-        )
-    end).
-
-
-t_handle_message_reply() ->
-    Worker0 = #shard{ref=erlang:make_ref()},
-    Worker1 = #shard{ref=erlang:make_ref()},
-    Worker2 = #shard{ref=erlang:make_ref()},
-    Workers = [Worker0, Worker1, Worker2],
-    Acc0 = #acc{workers=Workers, r=2, replies=[]},
-
-    ?_test(begin
-        meck:expect(rexi, kill_all, fun(_) -> ok end),
-
-        % Test that we continue when we haven't met R yet
-        ?assertMatch(
-            {ok, #acc{
-                workers=[Worker0, Worker1],
-                replies=[{foo, {foo, 1}}]
-            }},
-            handle_message(foo, Worker2, Acc0)
-        ),
-
-        ?assertMatch(
-            {ok, #acc{
-                workers=[Worker0, Worker1],
-                replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
-            }},
-            handle_message(bar, Worker2, Acc0#acc{
-                replies=[{foo, {foo, 1}}]
-            })
-        ),
-
-        % Test that we don't get a quorum when R isn't met. q_reply
-        % isn't set and state remains unchanged and {stop, NewAcc}
-        % is returned. Bit subtle on the assertions here.
-
-        ?assertMatch(
-            {stop, #acc{workers=[], replies=[{foo, {foo, 1}}]}},
-            handle_message(foo, Worker0, Acc0#acc{workers=[Worker0]})
-        ),
-
-        ?assertMatch(
-            {stop, #acc{
-                workers=[],
-                replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
-            }},
-            handle_message(bar, Worker0, Acc0#acc{
-                workers=[Worker0],
-                replies=[{foo, {foo, 1}}]
-            })
-        ),
-
-        % Check that when R is met we stop with a new state and
-        % a q_reply.
-
-        ?assertMatch(
-            {stop, #acc{
-                workers=[],
-                replies=[{foo, {foo, 2}}],
-                state=r_met,
-                q_reply=foo
-            }},
-            handle_message(foo, Worker1, Acc0#acc{
-                workers=[Worker0, Worker1],
-                replies=[{foo, {foo, 1}}]
-            })
-        ),
-
-        ?assertEqual(
-            {stop, #acc{
-                workers=[],
-                r=1,
-                replies=[{foo, {foo, 1}}],
-                state=r_met,
-                q_reply=foo
-            }},
-            handle_message(foo, Worker0, Acc0#acc{r=1})
-        ),
-
-        ?assertMatch(
-            {stop, #acc{
-                workers=[],
-                replies=[{bar, {bar, 1}}, {foo, {foo, 2}}],
-                state=r_met,
-                q_reply=foo
-            }},
-            handle_message(foo, Worker0, Acc0#acc{
-                workers=[Worker0],
-                replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
-            })
-        )
-    end).
-
-
-t_store_node_revs() ->
-    W1 = #shard{node = w1, ref = erlang:make_ref()},
-    W2 = #shard{node = w2, ref = erlang:make_ref()},
-    W3 = #shard{node = w3, ref = erlang:make_ref()},
-    Foo1 = {ok, #doc{id = <<"bar">>, revs = {1, [<<"foo">>]}}},
-    Foo2 = {ok, #doc{id = <<"bar">>, revs = {2, [<<"foo2">>, <<"foo">>]}}},
-    NFM = {not_found, missing},
-
-    InitAcc = #acc{workers = [W1, W2, W3], replies = [], r = 2},
-
-    ?_test(begin
-        meck:expect(rexi, kill_all, fun(_) -> ok end),
-
-        % Simple case
-        {ok, #acc{node_revs = NodeRevs1}} = handle_message(Foo1, W1, InitAcc),
-        ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs1),
-
-        % Make sure we only hold the head rev
-        {ok, #acc{node_revs = NodeRevs2}} = handle_message(Foo2, W1, InitAcc),
-        ?assertEqual([{w1, [{2, <<"foo2">>}]}], NodeRevs2),
-
-        % Make sure we don't capture anything on error
-        {ok, #acc{node_revs = NodeRevs3}} = handle_message(NFM, W1, InitAcc),
-        ?assertEqual([], NodeRevs3),
-
-        % Make sure we accumulate node revs
-        Acc1 = InitAcc#acc{node_revs = [{w1, [{1, <<"foo">>}]}]},
-        {ok, #acc{node_revs = NodeRevs4}} = handle_message(Foo2, W2, Acc1),
-        ?assertEqual(
-                [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
-                NodeRevs4
-            ),
-
-        % Make sure rexi_DOWN doesn't modify node_revs
-        Down = {rexi_DOWN, nil, {nil, w1}, nil},
-        {ok, #acc{node_revs = NodeRevs5}} = handle_message(Down, W2, Acc1),
-        ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs5),
-
-        % Make sure rexi_EXIT doesn't modify node_revs
-        Exit = {rexi_EXIT, reason},
-        {ok, #acc{node_revs = NodeRevs6}} = handle_message(Exit, W2, Acc1),
-        ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs6),
-
-        % Make sure an error doesn't remove any node revs
-        {ok, #acc{node_revs = NodeRevs7}} = handle_message(NFM, W2, Acc1),
-        ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs7),
-
-        % Make sure we have all of our node_revs when meeting
-        % quorum
-        {ok, Acc2} = handle_message(Foo1, W1, InitAcc),
-        {ok, Acc3} = handle_message(Foo2, W2, Acc2),
-        {stop, Acc4} = handle_message(NFM, W3, Acc3),
-        ?assertEqual(
-                [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
-                Acc4#acc.node_revs
-            )
-    end).
-
-
-t_read_repair() ->
-    Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
-    Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
-    NFM = {not_found, missing},
-
-    ?_test(begin
-        meck:expect(couch_log, notice, fun(_, _) -> ok end),
-        meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-
-        % Test when we have actual doc data to repair
-        meck:expect(fabric, update_docs, fun(_, [_], _) -> {ok, []} end),
-        Acc0 = #acc{
-            dbname = <<"name">>,
-            replies = [fabric_util:kv(Foo1,1)]
-        },
-        ?assertEqual(Foo1, read_repair(Acc0)),
-
-        meck:expect(fabric, update_docs, fun(_, [_, _], _) -> {ok, []} end),
-        Acc1 = #acc{
-            dbname = <<"name">>,
-            replies = [fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,1)]
-        },
-        ?assertEqual(Foo2, read_repair(Acc1)),
-
-        % Test when we have nothing but errors
-        Acc2 = #acc{replies=[fabric_util:kv(NFM, 1)]},
-        ?assertEqual(NFM, read_repair(Acc2)),
-
-        Acc3 = #acc{replies=[fabric_util:kv(NFM,1), fabric_util:kv(foo,2)]},
-        ?assertEqual(NFM, read_repair(Acc3)),
-
-        Acc4 = #acc{replies=[fabric_util:kv(foo,1), fabric_util:kv(bar,1)]},
-        ?assertEqual(bar, read_repair(Acc4))
-    end).
-
-
-t_handle_response_quorum_met() ->
-    Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
-    Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
-    Bar1 = {ok, #doc{revs = {1,[<<"bar">>]}}},
-
-    ?_test(begin
-        meck:expect(couch_log, notice, fun(_, _) -> ok end),
-        meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
-        meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-
-        BasicOkAcc = #acc{
-            state=r_met,
-            replies=[fabric_util:kv(Foo1,2)],
-            q_reply=Foo1
-        },
-        ?assertEqual(Foo1, handle_response(BasicOkAcc)),
-
-        WithAncestorsAcc = #acc{
-            state=r_met,
-            replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,2)],
-            q_reply=Foo2
-        },
-        ?assertEqual(Foo2, handle_response(WithAncestorsAcc)),
-
-        % This also checks when the quorum isn't the most recent
-        % revision.
-        DeeperWinsAcc = #acc{
-            state=r_met,
-            replies=[fabric_util:kv(Foo1,2), fabric_util:kv(Foo2,1)],
-            q_reply=Foo1
-        },
-        ?assertEqual(Foo2, handle_response(DeeperWinsAcc)),
-
-        % Check that we return the proper doc based on rev
-        % (ie, pos is equal)
-        BiggerRevWinsAcc = #acc{
-            state=r_met,
-            replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Bar1,2)],
-            q_reply=Bar1
-        },
-        ?assertEqual(Foo1, handle_response(BiggerRevWinsAcc))
-
-        % r_not_met is a proxy to read_repair so we rely on
-        % read_repair_test for those conditions.
-    end).
-
-
-t_get_doc_info() ->
-    ?_test(begin
-        meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
-        meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-        meck:expect(fabric_util, submit_jobs, fun(_, _, _) -> ok end),
-        meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
-        meck:expect(rexi_monitor, stop, fun(_) -> ok end),
-        meck:expect(mem3, shards, fun(_, _) -> ok end),
-        meck:expect(mem3, n, fun(_) -> 3 end),
-        meck:expect(mem3, quorum, fun(_) -> 2 end),
-
-        meck:expect(fabric_util, recv, fun(_, _, _, _) ->
-            {ok, #acc{state = r_not_met}}
-        end),
-        Rsp1 = fabric_doc_open:go("test", "one", [doc_info]),
-        ?assertEqual({error, quorum_not_met}, Rsp1),
-
-        Rsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
-        ?assertEqual({error, quorum_not_met}, Rsp2),
-
-        meck:expect(fabric_util, recv, fun(_, _, _, _) ->
-            {ok, #acc{state = r_met, q_reply = not_found}}
-        end),
-        MissingRsp1 = fabric_doc_open:go("test", "one", [doc_info]),
-        ?assertEqual({not_found, missing}, MissingRsp1),
-        MissingRsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
-        ?assertEqual({not_found, missing}, MissingRsp2),
-
-        meck:expect(fabric_util, recv, fun(_, _, _, _) ->
-            A = #doc_info{},
-            {ok, #acc{state = r_met, q_reply = {ok, A}}}
-        end),
-        {ok, Rec1} = fabric_doc_open:go("test", "one", [doc_info]),
-        ?assert(is_record(Rec1, doc_info)),
-
-        meck:expect(fabric_util, recv, fun(_, _, _, _) ->
-            A = #full_doc_info{deleted = true},
-            {ok, #acc{state = r_met, q_reply = {ok, A}}}
-        end),
-        Rsp3 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
-        ?assertEqual({not_found, deleted}, Rsp3),
-        {ok, Rec2} = fabric_doc_open:go("test", "one", [{doc_info, full},deleted]),
-        ?assert(is_record(Rec2, full_doc_info))
-    end).
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%%
+%% setup() ->
+%%     meck:new([
+%%         couch_log,
+%%         couch_stats,
+%%         fabric,
+%%         fabric_util,
+%%         mem3,
+%%         rexi,
+%%         rexi_monitor
+%%     ], [passthrough]).
+%%
+%%
+%% teardown(_) ->
+%%     meck:unload().
+%%
+%%
+%% open_doc_test_() ->
+%%     {
+%%         foreach,
+%%         fun setup/0,
+%%         fun teardown/1,
+%%         [
+%%             t_is_r_met(),
+%%             t_handle_message_down(),
+%%             t_handle_message_exit(),
+%%             t_handle_message_reply(),
+%%             t_store_node_revs(),
+%%             t_read_repair(),
+%%             t_handle_response_quorum_met(),
+%%             t_get_doc_info()
+%%         ]
+%%     }.
+%%
+%%
+%% t_is_r_met() ->
+%%     ?_test(begin
+%%         Workers0 = [],
+%%         Workers1 = [nil],
+%%         Workers2 = [nil, nil],
+%%
+%%         SuccessCases = [
+%%             {{true, foo}, [fabric_util:kv(foo, 2)], 2},
+%%             {{true, foo}, [fabric_util:kv(foo, 3)], 2},
+%%             {{true, foo}, [fabric_util:kv(foo, 1)], 1},
+%%             {{true, foo}, [fabric_util:kv(foo, 2), fabric_util:kv(bar, 1)], 2},
+%%             {{true, bar}, [fabric_util:kv(bar, 1), fabric_util:kv(bar, 2)], 2},
+%%             {{true, bar}, [fabric_util:kv(bar, 2), fabric_util:kv(foo, 1)], 2}
+%%         ],
+%%         lists:foreach(fun({Expect, Replies, Q}) ->
+%%             ?assertEqual(Expect, is_r_met(Workers0, Replies, Q))
+%%         end, SuccessCases),
+%%
+%%         WaitForMoreCases = [
+%%             {[fabric_util:kv(foo, 1)], 2},
+%%             {[fabric_util:kv(foo, 2)], 3},
+%%             {[fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2}
+%%         ],
+%%         lists:foreach(fun({Replies, Q}) ->
+%%             ?assertEqual(wait_for_more, is_r_met(Workers2, Replies, Q))
+%%         end, WaitForMoreCases),
+%%
+%%         FailureCases = [
+%%             {Workers0, [fabric_util:kv(foo, 1)], 2},
+%%             {Workers1, [fabric_util:kv(foo, 1)], 2},
+%%             {Workers1, [fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2},
+%%             {Workers1, [fabric_util:kv(foo, 2)], 3}
+%%         ],
+%%         lists:foreach(fun({Workers, Replies, Q}) ->
+%%             ?assertEqual(no_more_workers, is_r_met(Workers, Replies, Q))
+%%         end, FailureCases)
+%%     end).
+%%
+%%
+%% t_handle_message_down() ->
+%%     Node0 = 'foo@localhost',
+%%     Node1 = 'bar@localhost',
+%%     Down0 = {rexi_DOWN, nil, {nil, Node0}, nil},
+%%     Down1 = {rexi_DOWN, nil, {nil, Node1}, nil},
+%%     Workers0 = [#shard{node=Node0} || _ <- [a, b]],
+%%     Worker1 = #shard{node=Node1},
+%%     Workers1 = Workers0 ++ [Worker1],
+%%
+%%     ?_test(begin
+%%         % Stop when no more workers are left
+%%         ?assertEqual(
+%%             {stop, #acc{workers=[]}},
+%%             handle_message(Down0, nil, #acc{workers=Workers0})
+%%         ),
+%%
+%%         % Continue when we have more workers
+%%         ?assertEqual(
+%%             {ok, #acc{workers=[Worker1]}},
+%%             handle_message(Down0, nil, #acc{workers=Workers1})
+%%         ),
+%%
+%%         % A second DOWN removes the remaining workers
+%%         ?assertEqual(
+%%             {stop, #acc{workers=[]}},
+%%             handle_message(Down1, nil, #acc{workers=[Worker1]})
+%%         )
+%%     end).
+%%
+%%
+%% t_handle_message_exit() ->
+%%     Exit = {rexi_EXIT, nil},
+%%     Worker0 = #shard{ref=erlang:make_ref()},
+%%     Worker1 = #shard{ref=erlang:make_ref()},
+%%
+%%     ?_test(begin
+%%         % Only removes the specified worker
+%%         ?assertEqual(
+%%             {ok, #acc{workers=[Worker1]}},
+%%             handle_message(Exit, Worker0, #acc{workers=[Worker0, Worker1]})
+%%         ),
+%%
+%%         ?assertEqual(
+%%             {ok, #acc{workers=[Worker0]}},
+%%             handle_message(Exit, Worker1, #acc{workers=[Worker0, Worker1]})
+%%         ),
+%%
+%%         % We bail if it was the last worker
+%%         ?assertEqual(
+%%             {stop, #acc{workers=[]}},
+%%             handle_message(Exit, Worker0, #acc{workers=[Worker0]})
+%%         )
+%%     end).
+%%
+%%
+%% t_handle_message_reply() ->
+%%     Worker0 = #shard{ref=erlang:make_ref()},
+%%     Worker1 = #shard{ref=erlang:make_ref()},
+%%     Worker2 = #shard{ref=erlang:make_ref()},
+%%     Workers = [Worker0, Worker1, Worker2],
+%%     Acc0 = #acc{workers=Workers, r=2, replies=[]},
+%%
+%%     ?_test(begin
+%%         meck:expect(rexi, kill_all, fun(_) -> ok end),
+%%
+%%         % Test that we continue when we haven't met R yet
+%%         ?assertMatch(
+%%             {ok, #acc{
+%%                 workers=[Worker0, Worker1],
+%%                 replies=[{foo, {foo, 1}}]
+%%             }},
+%%             handle_message(foo, Worker2, Acc0)
+%%         ),
+%%
+%%         ?assertMatch(
+%%             {ok, #acc{
+%%                 workers=[Worker0, Worker1],
+%%                 replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
+%%             }},
+%%             handle_message(bar, Worker2, Acc0#acc{
+%%                 replies=[{foo, {foo, 1}}]
+%%             })
+%%         ),
+%%
+%%         % Test that we don't get a quorum when R isn't met. q_reply
+%%         % isn't set and state remains unchanged and {stop, NewAcc}
+%%         % is returned. Bit subtle on the assertions here.
+%%
+%%         ?assertMatch(
+%%             {stop, #acc{workers=[], replies=[{foo, {foo, 1}}]}},
+%%             handle_message(foo, Worker0, Acc0#acc{workers=[Worker0]})
+%%         ),
+%%
+%%         ?assertMatch(
+%%             {stop, #acc{
+%%                 workers=[],
+%%                 replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
+%%             }},
+%%             handle_message(bar, Worker0, Acc0#acc{
+%%                 workers=[Worker0],
+%%                 replies=[{foo, {foo, 1}}]
+%%             })
+%%         ),
+%%
+%%         % Check that when R is met we stop with a new state and
+%%         % a q_reply.
+%%
+%%         ?assertMatch(
+%%             {stop, #acc{
+%%                 workers=[],
+%%                 replies=[{foo, {foo, 2}}],
+%%                 state=r_met,
+%%                 q_reply=foo
+%%             }},
+%%             handle_message(foo, Worker1, Acc0#acc{
+%%                 workers=[Worker0, Worker1],
+%%                 replies=[{foo, {foo, 1}}]
+%%             })
+%%         ),
+%%
+%%         ?assertEqual(
+%%             {stop, #acc{
+%%                 workers=[],
+%%                 r=1,
+%%                 replies=[{foo, {foo, 1}}],
+%%                 state=r_met,
+%%                 q_reply=foo
+%%             }},
+%%             handle_message(foo, Worker0, Acc0#acc{r=1})
+%%         ),
+%%
+%%         ?assertMatch(
+%%             {stop, #acc{
+%%                 workers=[],
+%%                 replies=[{bar, {bar, 1}}, {foo, {foo, 2}}],
+%%                 state=r_met,
+%%                 q_reply=foo
+%%             }},
+%%             handle_message(foo, Worker0, Acc0#acc{
+%%                 workers=[Worker0],
+%%                 replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
+%%             })
+%%         )
+%%     end).
+%%
+%%
+%% t_store_node_revs() ->
+%%     W1 = #shard{node = w1, ref = erlang:make_ref()},
+%%     W2 = #shard{node = w2, ref = erlang:make_ref()},
+%%     W3 = #shard{node = w3, ref = erlang:make_ref()},
+%%     Foo1 = {ok, #doc{id = <<"bar">>, revs = {1, [<<"foo">>]}}},
+%%     Foo2 = {ok, #doc{id = <<"bar">>, revs = {2, [<<"foo2">>, <<"foo">>]}}},
+%%     NFM = {not_found, missing},
+%%
+%%     InitAcc = #acc{workers = [W1, W2, W3], replies = [], r = 2},
+%%
+%%     ?_test(begin
+%%         meck:expect(rexi, kill_all, fun(_) -> ok end),
+%%
+%%         % Simple case
+%%         {ok, #acc{node_revs = NodeRevs1}} = handle_message(Foo1, W1, InitAcc),
+%%         ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs1),
+%%
+%%         % Make sure we only hold the head rev
+%%         {ok, #acc{node_revs = NodeRevs2}} = handle_message(Foo2, W1, InitAcc),
+%%         ?assertEqual([{w1, [{2, <<"foo2">>}]}], NodeRevs2),
+%%
+%%         % Make sure we don't capture anything on error
+%%         {ok, #acc{node_revs = NodeRevs3}} = handle_message(NFM, W1, InitAcc),
+%%         ?assertEqual([], NodeRevs3),
+%%
+%%         % Make sure we accumulate node revs
+%%         Acc1 = InitAcc#acc{node_revs = [{w1, [{1, <<"foo">>}]}]},
+%%         {ok, #acc{node_revs = NodeRevs4}} = handle_message(Foo2, W2, Acc1),
+%%         ?assertEqual(
+%%                 [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
+%%                 NodeRevs4
+%%             ),
+%%
+%%         % Make sure rexi_DOWN doesn't modify node_revs
+%%         Down = {rexi_DOWN, nil, {nil, w1}, nil},
+%%         {ok, #acc{node_revs = NodeRevs5}} = handle_message(Down, W2, Acc1),
+%%         ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs5),
+%%
+%%         % Make sure rexi_EXIT doesn't modify node_revs
+%%         Exit = {rexi_EXIT, reason},
+%%         {ok, #acc{node_revs = NodeRevs6}} = handle_message(Exit, W2, Acc1),
+%%         ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs6),
+%%
+%%         % Make sure an error doesn't remove any node revs
+%%         {ok, #acc{node_revs = NodeRevs7}} = handle_message(NFM, W2, Acc1),
+%%         ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs7),
+%%
+%%         % Make sure we have all of our node_revs when meeting
+%%         % quorum
+%%         {ok, Acc2} = handle_message(Foo1, W1, InitAcc),
+%%         {ok, Acc3} = handle_message(Foo2, W2, Acc2),
+%%         {stop, Acc4} = handle_message(NFM, W3, Acc3),
+%%         ?assertEqual(
+%%                 [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
+%%                 Acc4#acc.node_revs
+%%             )
+%%     end).
+%%
+%%
+%% t_read_repair() ->
+%%     Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
+%%     Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
+%%     NFM = {not_found, missing},
+%%
+%%     ?_test(begin
+%%         meck:expect(couch_log, notice, fun(_, _) -> ok end),
+%%         meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%%
+%%         % Test when we have actual doc data to repair
+%%         meck:expect(fabric, update_docs, fun(_, [_], _) -> {ok, []} end),
+%%         Acc0 = #acc{
+%%             dbname = <<"name">>,
+%%             replies = [fabric_util:kv(Foo1,1)]
+%%         },
+%%         ?assertEqual(Foo1, read_repair(Acc0)),
+%%
+%%         meck:expect(fabric, update_docs, fun(_, [_, _], _) -> {ok, []} end),
+%%         Acc1 = #acc{
+%%             dbname = <<"name">>,
+%%             replies = [fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,1)]
+%%         },
+%%         ?assertEqual(Foo2, read_repair(Acc1)),
+%%
+%%         % Test when we have nothing but errors
+%%         Acc2 = #acc{replies=[fabric_util:kv(NFM, 1)]},
+%%         ?assertEqual(NFM, read_repair(Acc2)),
+%%
+%%         Acc3 = #acc{replies=[fabric_util:kv(NFM,1), fabric_util:kv(foo,2)]},
+%%         ?assertEqual(NFM, read_repair(Acc3)),
+%%
+%%         Acc4 = #acc{replies=[fabric_util:kv(foo,1), fabric_util:kv(bar,1)]},
+%%         ?assertEqual(bar, read_repair(Acc4))
+%%     end).
+%%
+%%
+%% t_handle_response_quorum_met() ->
+%%     Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
+%%     Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
+%%     Bar1 = {ok, #doc{revs = {1,[<<"bar">>]}}},
+%%
+%%     ?_test(begin
+%%         meck:expect(couch_log, notice, fun(_, _) -> ok end),
+%%         meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
+%%         meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%%
+%%         BasicOkAcc = #acc{
+%%             state=r_met,
+%%             replies=[fabric_util:kv(Foo1,2)],
+%%             q_reply=Foo1
+%%         },
+%%         ?assertEqual(Foo1, handle_response(BasicOkAcc)),
+%%
+%%         WithAncestorsAcc = #acc{
+%%             state=r_met,
+%%             replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,2)],
+%%             q_reply=Foo2
+%%         },
+%%         ?assertEqual(Foo2, handle_response(WithAncestorsAcc)),
+%%
+%%         % This also checks when the quorum isn't the most recent
+%%         % revision.
+%%         DeeperWinsAcc = #acc{
+%%             state=r_met,
+%%             replies=[fabric_util:kv(Foo1,2), fabric_util:kv(Foo2,1)],
+%%             q_reply=Foo1
+%%         },
+%%         ?assertEqual(Foo2, handle_response(DeeperWinsAcc)),
+%%
+%%         % Check that we return the proper doc based on rev
+%%         % (ie, pos is equal)
+%%         BiggerRevWinsAcc = #acc{
+%%             state=r_met,
+%%             replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Bar1,2)],
+%%             q_reply=Bar1
+%%         },
+%%         ?assertEqual(Foo1, handle_response(BiggerRevWinsAcc))
+%%
+%%         % r_not_met is a proxy to read_repair so we rely on
+%%         % read_repair_test for those conditions.
+%%     end).
+%%
+%%
+%% t_get_doc_info() ->
+%%     ?_test(begin
+%%         meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
+%%         meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%%         meck:expect(fabric_util, submit_jobs, fun(_, _, _) -> ok end),
+%%         meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
+%%         meck:expect(rexi_monitor, stop, fun(_) -> ok end),
+%%         meck:expect(mem3, shards, fun(_, _) -> ok end),
+%%         meck:expect(mem3, n, fun(_) -> 3 end),
+%%         meck:expect(mem3, quorum, fun(_) -> 2 end),
+%%
+%%         meck:expect(fabric_util, recv, fun(_, _, _, _) ->
+%%             {ok, #acc{state = r_not_met}}
+%%         end),
+%%         Rsp1 = fabric_doc_open:go("test", "one", [doc_info]),
+%%         ?assertEqual({error, quorum_not_met}, Rsp1),
+%%
+%%         Rsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
+%%         ?assertEqual({error, quorum_not_met}, Rsp2),
+%%
+%%         meck:expect(fabric_util, recv, fun(_, _, _, _) ->
+%%             {ok, #acc{state = r_met, q_reply = not_found}}
+%%         end),
+%%         MissingRsp1 = fabric_doc_open:go("test", "one", [doc_info]),
+%%         ?assertEqual({not_found, missing}, MissingRsp1),
+%%         MissingRsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
+%%         ?assertEqual({not_found, missing}, MissingRsp2),
+%%
+%%         meck:expect(fabric_util, recv, fun(_, _, _, _) ->
+%%             A = #doc_info{},
+%%             {ok, #acc{state = r_met, q_reply = {ok, A}}}
+%%         end),
+%%         {ok, Rec1} = fabric_doc_open:go("test", "one", [doc_info]),
+%%         ?assert(is_record(Rec1, doc_info)),
+%%
+%%         meck:expect(fabric_util, recv, fun(_, _, _, _) ->
+%%             A = #full_doc_info{deleted = true},
+%%             {ok, #acc{state = r_met, q_reply = {ok, A}}}
+%%         end),
+%%         Rsp3 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
+%%         ?assertEqual({not_found, deleted}, Rsp3),
+%%         {ok, Rec2} = fabric_doc_open:go("test", "one", [{doc_info, full},deleted]),
+%%         ?assert(is_record(Rec2, full_doc_info))
+%%     end).
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_doc_open_revs.erl b/src/fabric/src/fabric_doc_open_revs.erl
index 8ac3f30..f5b6380 100644
--- a/src/fabric/src/fabric_doc_open_revs.erl
+++ b/src/fabric/src/fabric_doc_open_revs.erl
@@ -313,469 +313,469 @@ collapse_duplicate_revs_int([Reply | Rest]) ->
     [Reply | collapse_duplicate_revs(Rest)].
 
 
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-
-setup() ->
-    config:start_link([]),
-    meck:new([fabric, couch_stats, couch_log]),
-    meck:new(fabric_util, [passthrough]),
-    meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, nil} end),
-    meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-    meck:expect(couch_log, notice, fun(_, _) -> ok end),
-    meck:expect(fabric_util, cleanup, fun(_) -> ok end).
-
-
-
-teardown(_) ->
-    (catch meck:unload([fabric, couch_stats, couch_log, fabric_util])),
-    config:stop().
-
-
-state0(Revs, Latest) ->
-    #state{
-        worker_count = 3,
-        workers =
-            [#shard{node='node1'}, #shard{node='node2'}, #shard{node='node3'}],
-        r = 2,
-        revs = Revs,
-        latest = Latest
-    }.
-
-
-revs() -> [{1,<<"foo">>}, {1,<<"bar">>}, {1,<<"baz">>}].
-
-
-foo1() -> {ok, #doc{revs = {1, [<<"foo">>]}}}.
-foo2() -> {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}}.
-foo2stemmed() -> {ok, #doc{revs = {2, [<<"foo2">>]}}}.
-fooNF() -> {{not_found, missing}, {1,<<"foo">>}}.
-foo2NF() -> {{not_found, missing}, {2, <<"foo2">>}}.
-bar1() -> {ok, #doc{revs = {1, [<<"bar">>]}}}.
-barNF() -> {{not_found, missing}, {1,<<"bar">>}}.
-bazNF() -> {{not_found, missing}, {1,<<"baz">>}}.
-baz1() -> {ok, #doc{revs = {1, [<<"baz">>]}}}.
-
-
-
-open_doc_revs_test_() ->
-    {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            check_empty_response_not_quorum(),
-            check_basic_response(),
-            check_finish_quorum(),
-            check_finish_quorum_newer(),
-            check_no_quorum_on_second(),
-            check_done_on_third(),
-            check_specific_revs_first_msg(),
-            check_revs_done_on_agreement(),
-            check_latest_true(),
-            check_ancestor_counted_in_quorum(),
-            check_not_found_counts_for_descendant(),
-            check_worker_error_skipped(),
-            check_quorum_only_counts_valid_responses(),
-            check_empty_list_when_no_workers_reply(),
-            check_node_rev_stored(),
-            check_node_rev_store_head_only(),
-            check_node_rev_store_multiple(),
-            check_node_rev_dont_store_errors(),
-            check_node_rev_store_non_errors(),
-            check_node_rev_store_concatenate(),
-            check_node_rev_store_concantenate_multiple(),
-            check_node_rev_unmodified_on_down_or_exit(),
-            check_not_found_replies_are_removed_when_doc_found(),
-            check_not_found_returned_when_one_of_docs_not_found(),
-            check_not_found_returned_when_doc_not_found(),
-            check_longer_rev_list_returned(),
-            check_longer_rev_list_not_combined(),
-            check_not_found_removed_and_longer_rev_list()
-        ]
-    }.
-
-
-% Tests for revs=all
-
-
-check_empty_response_not_quorum() ->
-    % Simple smoke test that we don't think we're
-    % done with a first empty response
-    W1 = #shard{node='node1'},
-    W2 = #shard{node='node2'},
-    W3 = #shard{node='node3'},
-    ?_assertMatch(
-        {ok, #state{workers = [W2, W3]}},
-        handle_message({ok, []}, W1, state0(all, false))
-    ).
-
-
-check_basic_response() ->
-    % Check that we've handle a response
-    W1 = #shard{node='node1'},
-    W2 = #shard{node='node2'},
-    W3 = #shard{node='node3'},
-    ?_assertMatch(
-        {ok, #state{reply_count = 1, workers = [W2, W3]}},
-        handle_message({ok, [foo1(), bar1()]}, W1, state0(all, false))
-    ).
-
-
-check_finish_quorum() ->
-    % Two messages with the same revisions means we're done
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        S0 = state0(all, false),
-        {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
-        Expect = {stop, [bar1(), foo1()]},
-        ?assertEqual(Expect, handle_message({ok, [foo1(), bar1()]}, W2, S1))
-    end).
-
-
-check_finish_quorum_newer() ->
-    % We count a descendant of a revision for quorum so
-    % foo1 should count for foo2 which means we're finished.
-    % We also validate that read_repair was triggered.
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        S0 = state0(all, false),
-        {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
-        Expect = {stop, [bar1(), foo2()]},
-        ok = meck:reset(fabric),
-        ?assertEqual(Expect, handle_message({ok, [foo2(), bar1()]}, W2, S1)),
-        ok = meck:wait(fabric, update_docs, '_', 5000),
-        ?assertMatch(
-            [{_, {fabric, update_docs, [_, _, _]}, _}],
-            meck:history(fabric)
-        )
-    end).
-
-
-check_no_quorum_on_second() ->
-    % Quorum not yet met for the foo revision so we
-    % would wait for w3
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        W3 = #shard{node='node3'},
-        S0 = state0(all, false),
-        {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
-        ?assertMatch(
-            {ok, #state{workers = [W3]}},
-            handle_message({ok, [bar1()]}, W2, S1)
-        )
-    end).
-
-
-check_done_on_third() ->
-    % The third message of three means we're done no matter
-    % what. Every revision seen in this pattern should be
-    % included.
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        W3 = #shard{node='node3'},
-        S0 = state0(all, false),
-        {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
-        {ok, S2} = handle_message({ok, [bar1()]}, W2, S1),
-        Expect = {stop, [bar1(), foo1()]},
-        ?assertEqual(Expect, handle_message({ok, [bar1()]}, W3, S2))
-    end).
-
-
-% Tests for a specific list of revs
-
-
-check_specific_revs_first_msg() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        W3 = #shard{node='node3'},
-        S0 = state0(revs(), false),
-        ?assertMatch(
-            {ok, #state{reply_count = 1, workers = [W2, W3]}},
-            handle_message({ok, [foo1(), bar1(), bazNF()]}, W1, S0)
-        )
-    end).
-
-
-check_revs_done_on_agreement() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        S0 = state0(revs(), false),
-        Msg = {ok, [foo1(), bar1(), bazNF()]},
-        {ok, S1} = handle_message(Msg, W1, S0),
-        Expect = {stop, [bar1(), foo1(), bazNF()]},
-        ?assertEqual(Expect, handle_message(Msg, W2, S1))
-    end).
-
-
-check_latest_true() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        S0 = state0(revs(), true),
-        Msg1 = {ok, [foo2(), bar1(), bazNF()]},
-        Msg2 = {ok, [foo2(), bar1(), bazNF()]},
-        {ok, S1} = handle_message(Msg1, W1, S0),
-        Expect = {stop, [bar1(), foo2(), bazNF()]},
-        ?assertEqual(Expect, handle_message(Msg2, W2, S1))
-    end).
-
-
-check_ancestor_counted_in_quorum() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        S0 = state0(revs(), true),
-        Msg1 = {ok, [foo1(), bar1(), bazNF()]},
-        Msg2 = {ok, [foo2(), bar1(), bazNF()]},
-        Expect = {stop, [bar1(), foo2(), bazNF()]},
-
-        % Older first
-        {ok, S1} = handle_message(Msg1, W1, S0),
-        ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
-
-        % Newer first
-        {ok, S2} = handle_message(Msg2, W2, S0),
-        ?assertEqual(Expect, handle_message(Msg1, W1, S2))
-    end).
-
-
-check_not_found_counts_for_descendant() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        S0 = state0(revs(), true),
-        Msg1 = {ok, [foo1(), bar1(), bazNF()]},
-        Msg2 = {ok, [foo1(), bar1(), baz1()]},
-        Expect = {stop, [bar1(), baz1(), foo1()]},
-
-        % not_found first
-        {ok, S1} = handle_message(Msg1, W1, S0),
-        ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
-
-        % not_found second
-        {ok, S2} = handle_message(Msg2, W2, S0),
-        ?assertEqual(Expect, handle_message(Msg1, W1, S2))
-    end).
-
-
-check_worker_error_skipped() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        W3 = #shard{node='node3'},
-        S0 = state0(revs(), true),
-        Msg1 = {ok, [foo1(), bar1(), baz1()]},
-        Msg2 = {rexi_EXIT, reason},
-        Msg3 = {ok, [foo1(), bar1(), baz1()]},
-        Expect = {stop, [bar1(), baz1(), foo1()]},
-
-        {ok, S1} = handle_message(Msg1, W1, S0),
-        {ok, S2} = handle_message(Msg2, W2, S1),
-        ?assertEqual(Expect, handle_message(Msg3, W3, S2))
-    end).
-
-
-check_quorum_only_counts_valid_responses() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        W3 = #shard{node='node3'},
-        S0 = state0(revs(), true),
-        Msg1 = {rexi_EXIT, reason},
-        Msg2 = {rexi_EXIT, reason},
-        Msg3 = {ok, [foo1(), bar1(), baz1()]},
-        Expect = {stop, [bar1(), baz1(), foo1()]},
-
-        {ok, S1} = handle_message(Msg1, W1, S0),
-        {ok, S2} = handle_message(Msg2, W2, S1),
-        ?assertEqual(Expect, handle_message(Msg3, W3, S2))
-    end).
-
-
-check_empty_list_when_no_workers_reply() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        W3 = #shard{node='node3'},
-        S0 = state0(revs(), true),
-        Msg1 = {rexi_EXIT, reason},
-        Msg2 = {rexi_EXIT, reason},
-        Msg3 = {rexi_DOWN, nodedown, {nil, node()}, nil},
-        Expect = {stop, all_workers_died},
-
-        {ok, S1} = handle_message(Msg1, W1, S0),
-        {ok, S2} = handle_message(Msg2, W2, S1),
-        ?assertEqual(Expect, handle_message(Msg3, W3, S2))
-    end).
-
-
-check_node_rev_stored() ->
-    ?_test(begin
-        W1 = #shard{node = node1},
-        S0 = state0([], true),
-
-        {ok, S1} = handle_message({ok, [foo1()]}, W1, S0),
-        ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
-    end).
-
-
-check_node_rev_store_head_only() ->
-    ?_test(begin
-        W1 = #shard{node = node1},
-        S0 = state0([], true),
-
-        {ok, S1} = handle_message({ok, [foo2()]}, W1, S0),
-        ?assertEqual([{node1, [{2, <<"foo2">>}]}], S1#state.node_revs)
-    end).
-
-
-check_node_rev_store_multiple() ->
-    ?_test(begin
-        W1 = #shard{node = node1},
-        S0 = state0([], true),
-
-        {ok, S1} = handle_message({ok, [foo1(), foo2()]}, W1, S0),
-        ?assertEqual(
-                [{node1, [{2, <<"foo2">>}, {1, <<"foo">>}]}],
-                S1#state.node_revs
-            )
-    end).
-
-
-check_node_rev_dont_store_errors() ->
-    ?_test(begin
-        W1 = #shard{node = node1},
-        S0 = state0([], true),
-
-        {ok, S1} = handle_message({ok, [barNF()]}, W1, S0),
-        ?assertEqual([], S1#state.node_revs)
-    end).
-
-
-check_node_rev_store_non_errors() ->
-    ?_test(begin
-        W1 = #shard{node = node1},
-        S0 = state0([], true),
-
-        {ok, S1} = handle_message({ok, [foo1(), barNF()]}, W1, S0),
-        ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
-    end).
-
-
-check_node_rev_store_concatenate() ->
-    ?_test(begin
-        W2 = #shard{node = node2},
-        S0 = state0([], true),
-        S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-
-        {ok, S2} = handle_message({ok, [foo2()]}, W2, S1),
-        ?assertEqual(
-                [{node2, [{2, <<"foo2">>}]}, {node1, [{1, <<"foo">>}]}],
-                S2#state.node_revs
-            )
-    end).
-
-
-check_node_rev_store_concantenate_multiple() ->
-    ?_test(begin
-        W2 = #shard{node = node2},
-        S0 = state0([], true),
-        S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-
-        {ok, S2} = handle_message({ok, [foo2(), bar1()]}, W2, S1),
-        ?assertEqual(
-                [
-                    {node2, [{1, <<"bar">>}, {2, <<"foo2">>}]},
-                    {node1, [{1, <<"foo">>}]}
-                ],
-                S2#state.node_revs
-            )
-    end).
-
-
-check_node_rev_unmodified_on_down_or_exit() ->
-    ?_test(begin
-        W2 = #shard{node = node2},
-        S0 = state0([], true),
-        S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-
-        Down = {rexi_DOWN, nodedown, {nil, node()}, nil},
-        {ok, S2} = handle_message(Down, W2, S1),
-        ?assertEqual(
-                [{node1, [{1, <<"foo">>}]}],
-                S2#state.node_revs
-            ),
-
-        Exit = {rexi_EXIT, reason},
-        {ok, S3} = handle_message(Exit, W2, S1),
-        ?assertEqual(
-                [{node1, [{1, <<"foo">>}]}],
-                S3#state.node_revs
-            )
-    end).
-
-
-check_not_found_replies_are_removed_when_doc_found() ->
-    ?_test(begin
-        Replies = replies_to_dict([foo1(), bar1(), fooNF()]),
-        Expect = [bar1(), foo1()],
-        ?assertEqual(Expect, dict_format_replies(Replies))
-    end).
-
-check_not_found_returned_when_one_of_docs_not_found() ->
-    ?_test(begin
-        Replies = replies_to_dict([foo1(), foo2(), barNF()]),
-        Expect = [foo1(), foo2(), barNF()],
-        ?assertEqual(Expect, dict_format_replies(Replies))
-    end).
-
-check_not_found_returned_when_doc_not_found() ->
-    ?_test(begin
-        Replies = replies_to_dict([fooNF(), barNF(), bazNF()]),
-        Expect = [barNF(), bazNF(), fooNF()],
-        ?assertEqual(Expect, dict_format_replies(Replies))
-    end).
-
-check_longer_rev_list_returned() ->
-    ?_test(begin
-        Replies = replies_to_dict([foo2(), foo2stemmed()]),
-        Expect = [foo2()],
-        ?assertEqual(2, length(Replies)),
-        ?assertEqual(Expect, dict_format_replies(Replies))
-    end).
-
-check_longer_rev_list_not_combined() ->
-    ?_test(begin
-        Replies = replies_to_dict([foo2(), foo2stemmed(), bar1()]),
-        Expect = [bar1(), foo2()],
-        ?assertEqual(3, length(Replies)),
-        ?assertEqual(Expect, dict_format_replies(Replies))
-    end).
-
-check_not_found_removed_and_longer_rev_list() ->
-    ?_test(begin
-        Replies = replies_to_dict([foo2(), foo2stemmed(), foo2NF()]),
-        Expect = [foo2()],
-        ?assertEqual(3, length(Replies)),
-        ?assertEqual(Expect, dict_format_replies(Replies))
-    end).
-
-
-replies_to_dict(Replies) ->
-    [reply_to_element(R) || R <- Replies].
-
-reply_to_element({ok, #doc{revs = Revs}} = Reply) ->
-    {_, [Rev | _]} = Revs,
-    {{Rev, Revs}, {Reply, 1}};
-reply_to_element(Reply) ->
-    {Reply, {Reply, 1}}.
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%%
+%% setup() ->
+%%     config:start_link([]),
+%%     meck:new([fabric, couch_stats, couch_log]),
+%%     meck:new(fabric_util, [passthrough]),
+%%     meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, nil} end),
+%%     meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%%     meck:expect(couch_log, notice, fun(_, _) -> ok end),
+%%     meck:expect(fabric_util, cleanup, fun(_) -> ok end).
+%%
+%%
+%%
+%% teardown(_) ->
+%%     (catch meck:unload([fabric, couch_stats, couch_log, fabric_util])),
+%%     config:stop().
+%%
+%%
+%% state0(Revs, Latest) ->
+%%     #state{
+%%         worker_count = 3,
+%%         workers =
+%%             [#shard{node='node1'}, #shard{node='node2'}, #shard{node='node3'}],
+%%         r = 2,
+%%         revs = Revs,
+%%         latest = Latest
+%%     }.
+%%
+%%
+%% revs() -> [{1,<<"foo">>}, {1,<<"bar">>}, {1,<<"baz">>}].
+%%
+%%
+%% foo1() -> {ok, #doc{revs = {1, [<<"foo">>]}}}.
+%% foo2() -> {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}}.
+%% foo2stemmed() -> {ok, #doc{revs = {2, [<<"foo2">>]}}}.
+%% fooNF() -> {{not_found, missing}, {1,<<"foo">>}}.
+%% foo2NF() -> {{not_found, missing}, {2, <<"foo2">>}}.
+%% bar1() -> {ok, #doc{revs = {1, [<<"bar">>]}}}.
+%% barNF() -> {{not_found, missing}, {1,<<"bar">>}}.
+%% bazNF() -> {{not_found, missing}, {1,<<"baz">>}}.
+%% baz1() -> {ok, #doc{revs = {1, [<<"baz">>]}}}.
+%%
+%%
+%%
+%% open_doc_revs_test_() ->
+%%     {
+%%         foreach,
+%%         fun setup/0,
+%%         fun teardown/1,
+%%         [
+%%             check_empty_response_not_quorum(),
+%%             check_basic_response(),
+%%             check_finish_quorum(),
+%%             check_finish_quorum_newer(),
+%%             check_no_quorum_on_second(),
+%%             check_done_on_third(),
+%%             check_specific_revs_first_msg(),
+%%             check_revs_done_on_agreement(),
+%%             check_latest_true(),
+%%             check_ancestor_counted_in_quorum(),
+%%             check_not_found_counts_for_descendant(),
+%%             check_worker_error_skipped(),
+%%             check_quorum_only_counts_valid_responses(),
+%%             check_empty_list_when_no_workers_reply(),
+%%             check_node_rev_stored(),
+%%             check_node_rev_store_head_only(),
+%%             check_node_rev_store_multiple(),
+%%             check_node_rev_dont_store_errors(),
+%%             check_node_rev_store_non_errors(),
+%%             check_node_rev_store_concatenate(),
+%%             check_node_rev_store_concantenate_multiple(),
+%%             check_node_rev_unmodified_on_down_or_exit(),
+%%             check_not_found_replies_are_removed_when_doc_found(),
+%%             check_not_found_returned_when_one_of_docs_not_found(),
+%%             check_not_found_returned_when_doc_not_found(),
+%%             check_longer_rev_list_returned(),
+%%             check_longer_rev_list_not_combined(),
+%%             check_not_found_removed_and_longer_rev_list()
+%%         ]
+%%     }.
+%%
+%%
+%% % Tests for revs=all
+%%
+%%
+%% check_empty_response_not_quorum() ->
+%%     % Simple smoke test that we don't think we're
+%%     % done with a first empty response
+%%     W1 = #shard{node='node1'},
+%%     W2 = #shard{node='node2'},
+%%     W3 = #shard{node='node3'},
+%%     ?_assertMatch(
+%%         {ok, #state{workers = [W2, W3]}},
+%%         handle_message({ok, []}, W1, state0(all, false))
+%%     ).
+%%
+%%
+%% check_basic_response() ->
+%%     % Check that we've handle a response
+%%     W1 = #shard{node='node1'},
+%%     W2 = #shard{node='node2'},
+%%     W3 = #shard{node='node3'},
+%%     ?_assertMatch(
+%%         {ok, #state{reply_count = 1, workers = [W2, W3]}},
+%%         handle_message({ok, [foo1(), bar1()]}, W1, state0(all, false))
+%%     ).
+%%
+%%
+%% check_finish_quorum() ->
+%%     % Two messages with the same revisions means we're done
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         S0 = state0(all, false),
+%%         {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
+%%         Expect = {stop, [bar1(), foo1()]},
+%%         ?assertEqual(Expect, handle_message({ok, [foo1(), bar1()]}, W2, S1))
+%%     end).
+%%
+%%
+%% check_finish_quorum_newer() ->
+%%     % We count a descendant of a revision for quorum so
+%%     % foo1 should count for foo2 which means we're finished.
+%%     % We also validate that read_repair was triggered.
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         S0 = state0(all, false),
+%%         {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
+%%         Expect = {stop, [bar1(), foo2()]},
+%%         ok = meck:reset(fabric),
+%%         ?assertEqual(Expect, handle_message({ok, [foo2(), bar1()]}, W2, S1)),
+%%         ok = meck:wait(fabric, update_docs, '_', 5000),
+%%         ?assertMatch(
+%%             [{_, {fabric, update_docs, [_, _, _]}, _}],
+%%             meck:history(fabric)
+%%         )
+%%     end).
+%%
+%%
+%% check_no_quorum_on_second() ->
+%%     % Quorum not yet met for the foo revision so we
+%%     % would wait for w3
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         W3 = #shard{node='node3'},
+%%         S0 = state0(all, false),
+%%         {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
+%%         ?assertMatch(
+%%             {ok, #state{workers = [W3]}},
+%%             handle_message({ok, [bar1()]}, W2, S1)
+%%         )
+%%     end).
+%%
+%%
+%% check_done_on_third() ->
+%%     % The third message of three means we're done no matter
+%%     % what. Every revision seen in this pattern should be
+%%     % included.
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         W3 = #shard{node='node3'},
+%%         S0 = state0(all, false),
+%%         {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
+%%         {ok, S2} = handle_message({ok, [bar1()]}, W2, S1),
+%%         Expect = {stop, [bar1(), foo1()]},
+%%         ?assertEqual(Expect, handle_message({ok, [bar1()]}, W3, S2))
+%%     end).
+%%
+%%
+%% % Tests for a specific list of revs
+%%
+%%
+%% check_specific_revs_first_msg() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         W3 = #shard{node='node3'},
+%%         S0 = state0(revs(), false),
+%%         ?assertMatch(
+%%             {ok, #state{reply_count = 1, workers = [W2, W3]}},
+%%             handle_message({ok, [foo1(), bar1(), bazNF()]}, W1, S0)
+%%         )
+%%     end).
+%%
+%%
+%% check_revs_done_on_agreement() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         S0 = state0(revs(), false),
+%%         Msg = {ok, [foo1(), bar1(), bazNF()]},
+%%         {ok, S1} = handle_message(Msg, W1, S0),
+%%         Expect = {stop, [bar1(), foo1(), bazNF()]},
+%%         ?assertEqual(Expect, handle_message(Msg, W2, S1))
+%%     end).
+%%
+%%
+%% check_latest_true() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         S0 = state0(revs(), true),
+%%         Msg1 = {ok, [foo2(), bar1(), bazNF()]},
+%%         Msg2 = {ok, [foo2(), bar1(), bazNF()]},
+%%         {ok, S1} = handle_message(Msg1, W1, S0),
+%%         Expect = {stop, [bar1(), foo2(), bazNF()]},
+%%         ?assertEqual(Expect, handle_message(Msg2, W2, S1))
+%%     end).
+%%
+%%
+%% check_ancestor_counted_in_quorum() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         S0 = state0(revs(), true),
+%%         Msg1 = {ok, [foo1(), bar1(), bazNF()]},
+%%         Msg2 = {ok, [foo2(), bar1(), bazNF()]},
+%%         Expect = {stop, [bar1(), foo2(), bazNF()]},
+%%
+%%         % Older first
+%%         {ok, S1} = handle_message(Msg1, W1, S0),
+%%         ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
+%%
+%%         % Newer first
+%%         {ok, S2} = handle_message(Msg2, W2, S0),
+%%         ?assertEqual(Expect, handle_message(Msg1, W1, S2))
+%%     end).
+%%
+%%
+%% check_not_found_counts_for_descendant() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         S0 = state0(revs(), true),
+%%         Msg1 = {ok, [foo1(), bar1(), bazNF()]},
+%%         Msg2 = {ok, [foo1(), bar1(), baz1()]},
+%%         Expect = {stop, [bar1(), baz1(), foo1()]},
+%%
+%%         % not_found first
+%%         {ok, S1} = handle_message(Msg1, W1, S0),
+%%         ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
+%%
+%%         % not_found second
+%%         {ok, S2} = handle_message(Msg2, W2, S0),
+%%         ?assertEqual(Expect, handle_message(Msg1, W1, S2))
+%%     end).
+%%
+%%
+%% check_worker_error_skipped() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         W3 = #shard{node='node3'},
+%%         S0 = state0(revs(), true),
+%%         Msg1 = {ok, [foo1(), bar1(), baz1()]},
+%%         Msg2 = {rexi_EXIT, reason},
+%%         Msg3 = {ok, [foo1(), bar1(), baz1()]},
+%%         Expect = {stop, [bar1(), baz1(), foo1()]},
+%%
+%%         {ok, S1} = handle_message(Msg1, W1, S0),
+%%         {ok, S2} = handle_message(Msg2, W2, S1),
+%%         ?assertEqual(Expect, handle_message(Msg3, W3, S2))
+%%     end).
+%%
+%%
+%% check_quorum_only_counts_valid_responses() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         W3 = #shard{node='node3'},
+%%         S0 = state0(revs(), true),
+%%         Msg1 = {rexi_EXIT, reason},
+%%         Msg2 = {rexi_EXIT, reason},
+%%         Msg3 = {ok, [foo1(), bar1(), baz1()]},
+%%         Expect = {stop, [bar1(), baz1(), foo1()]},
+%%
+%%         {ok, S1} = handle_message(Msg1, W1, S0),
+%%         {ok, S2} = handle_message(Msg2, W2, S1),
+%%         ?assertEqual(Expect, handle_message(Msg3, W3, S2))
+%%     end).
+%%
+%%
+%% check_empty_list_when_no_workers_reply() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         W3 = #shard{node='node3'},
+%%         S0 = state0(revs(), true),
+%%         Msg1 = {rexi_EXIT, reason},
+%%         Msg2 = {rexi_EXIT, reason},
+%%         Msg3 = {rexi_DOWN, nodedown, {nil, node()}, nil},
+%%         Expect = {stop, all_workers_died},
+%%
+%%         {ok, S1} = handle_message(Msg1, W1, S0),
+%%         {ok, S2} = handle_message(Msg2, W2, S1),
+%%         ?assertEqual(Expect, handle_message(Msg3, W3, S2))
+%%     end).
+%%
+%%
+%% check_node_rev_stored() ->
+%%     ?_test(begin
+%%         W1 = #shard{node = node1},
+%%         S0 = state0([], true),
+%%
+%%         {ok, S1} = handle_message({ok, [foo1()]}, W1, S0),
+%%         ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
+%%     end).
+%%
+%%
+%% check_node_rev_store_head_only() ->
+%%     ?_test(begin
+%%         W1 = #shard{node = node1},
+%%         S0 = state0([], true),
+%%
+%%         {ok, S1} = handle_message({ok, [foo2()]}, W1, S0),
+%%         ?assertEqual([{node1, [{2, <<"foo2">>}]}], S1#state.node_revs)
+%%     end).
+%%
+%%
+%% check_node_rev_store_multiple() ->
+%%     ?_test(begin
+%%         W1 = #shard{node = node1},
+%%         S0 = state0([], true),
+%%
+%%         {ok, S1} = handle_message({ok, [foo1(), foo2()]}, W1, S0),
+%%         ?assertEqual(
+%%                 [{node1, [{2, <<"foo2">>}, {1, <<"foo">>}]}],
+%%                 S1#state.node_revs
+%%             )
+%%     end).
+%%
+%%
+%% check_node_rev_dont_store_errors() ->
+%%     ?_test(begin
+%%         W1 = #shard{node = node1},
+%%         S0 = state0([], true),
+%%
+%%         {ok, S1} = handle_message({ok, [barNF()]}, W1, S0),
+%%         ?assertEqual([], S1#state.node_revs)
+%%     end).
+%%
+%%
+%% check_node_rev_store_non_errors() ->
+%%     ?_test(begin
+%%         W1 = #shard{node = node1},
+%%         S0 = state0([], true),
+%%
+%%         {ok, S1} = handle_message({ok, [foo1(), barNF()]}, W1, S0),
+%%         ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
+%%     end).
+%%
+%%
+%% check_node_rev_store_concatenate() ->
+%%     ?_test(begin
+%%         W2 = #shard{node = node2},
+%%         S0 = state0([], true),
+%%         S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
+%%
+%%         {ok, S2} = handle_message({ok, [foo2()]}, W2, S1),
+%%         ?assertEqual(
+%%                 [{node2, [{2, <<"foo2">>}]}, {node1, [{1, <<"foo">>}]}],
+%%                 S2#state.node_revs
+%%             )
+%%     end).
+%%
+%%
+%% check_node_rev_store_concantenate_multiple() ->
+%%     ?_test(begin
+%%         W2 = #shard{node = node2},
+%%         S0 = state0([], true),
+%%         S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
+%%
+%%         {ok, S2} = handle_message({ok, [foo2(), bar1()]}, W2, S1),
+%%         ?assertEqual(
+%%                 [
+%%                     {node2, [{1, <<"bar">>}, {2, <<"foo2">>}]},
+%%                     {node1, [{1, <<"foo">>}]}
+%%                 ],
+%%                 S2#state.node_revs
+%%             )
+%%     end).
+%%
+%%
+%% check_node_rev_unmodified_on_down_or_exit() ->
+%%     ?_test(begin
+%%         W2 = #shard{node = node2},
+%%         S0 = state0([], true),
+%%         S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
+%%
+%%         Down = {rexi_DOWN, nodedown, {nil, node()}, nil},
+%%         {ok, S2} = handle_message(Down, W2, S1),
+%%         ?assertEqual(
+%%                 [{node1, [{1, <<"foo">>}]}],
+%%                 S2#state.node_revs
+%%             ),
+%%
+%%         Exit = {rexi_EXIT, reason},
+%%         {ok, S3} = handle_message(Exit, W2, S1),
+%%         ?assertEqual(
+%%                 [{node1, [{1, <<"foo">>}]}],
+%%                 S3#state.node_revs
+%%             )
+%%     end).
+%%
+%%
+%% check_not_found_replies_are_removed_when_doc_found() ->
+%%     ?_test(begin
+%%         Replies = replies_to_dict([foo1(), bar1(), fooNF()]),
+%%         Expect = [bar1(), foo1()],
+%%         ?assertEqual(Expect, dict_format_replies(Replies))
+%%     end).
+%%
+%% check_not_found_returned_when_one_of_docs_not_found() ->
+%%     ?_test(begin
+%%         Replies = replies_to_dict([foo1(), foo2(), barNF()]),
+%%         Expect = [foo1(), foo2(), barNF()],
+%%         ?assertEqual(Expect, dict_format_replies(Replies))
+%%     end).
+%%
+%% check_not_found_returned_when_doc_not_found() ->
+%%     ?_test(begin
+%%         Replies = replies_to_dict([fooNF(), barNF(), bazNF()]),
+%%         Expect = [barNF(), bazNF(), fooNF()],
+%%         ?assertEqual(Expect, dict_format_replies(Replies))
+%%     end).
+%%
+%% check_longer_rev_list_returned() ->
+%%     ?_test(begin
+%%         Replies = replies_to_dict([foo2(), foo2stemmed()]),
+%%         Expect = [foo2()],
+%%         ?assertEqual(2, length(Replies)),
+%%         ?assertEqual(Expect, dict_format_replies(Replies))
+%%     end).
+%%
+%% check_longer_rev_list_not_combined() ->
+%%     ?_test(begin
+%%         Replies = replies_to_dict([foo2(), foo2stemmed(), bar1()]),
+%%         Expect = [bar1(), foo2()],
+%%         ?assertEqual(3, length(Replies)),
+%%         ?assertEqual(Expect, dict_format_replies(Replies))
+%%     end).
+%%
+%% check_not_found_removed_and_longer_rev_list() ->
+%%     ?_test(begin
+%%         Replies = replies_to_dict([foo2(), foo2stemmed(), foo2NF()]),
+%%         Expect = [foo2()],
+%%         ?assertEqual(3, length(Replies)),
+%%         ?assertEqual(Expect, dict_format_replies(Replies))
+%%     end).
+%%
+%%
+%% replies_to_dict(Replies) ->
+%%     [reply_to_element(R) || R <- Replies].
+%%
+%% reply_to_element({ok, #doc{revs = Revs}} = Reply) ->
+%%     {_, [Rev | _]} = Revs,
+%%     {{Rev, Revs}, {Reply, 1}};
+%% reply_to_element(Reply) ->
+%%     {Reply, {Reply, 1}}.
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_doc_purge.erl b/src/fabric/src/fabric_doc_purge.erl
index 7e447ff..6d77fc2 100644
--- a/src/fabric/src/fabric_doc_purge.erl
+++ b/src/fabric/src/fabric_doc_purge.erl
@@ -224,349 +224,349 @@ has_quorum(Resps, Count, W) ->
     end.
 
 
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-purge_test_() ->
-    {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            t_w2_ok(),
-            t_w3_ok(),
-
-            t_w2_mixed_accepted(),
-            t_w3_mixed_accepted(),
-
-            t_w2_exit1_ok(),
-            t_w2_exit2_accepted(),
-            t_w2_exit3_error(),
-
-            t_w4_accepted(),
-
-            t_mixed_ok_accepted(),
-            t_mixed_errors()
-        ]
-    }.
-
-
-setup() ->
-    meck:new(couch_log),
-    meck:expect(couch_log, warning, fun(_, _) -> ok end),
-    meck:expect(couch_log, notice, fun(_, _) -> ok end).
-
-
-teardown(_) ->
-    meck:unload().
-
-
-t_w2_ok() ->
-    ?_test(begin
-        Acc0 = create_init_acc(2),
-        Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-
-        {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-        ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-        check_quorum(Acc1, false),
-
-        {stop, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, true),
-
-        Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(ok, resp_health(Resps))
-    end).
-
-
-t_w3_ok() ->
-    ?_test(begin
-        Acc0 = create_init_acc(3),
-        Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-
-        {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-        check_quorum(Acc1, false),
-
-        {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, false),
-
-        {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
-        ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-        check_quorum(Acc3, true),
-
-        Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(ok, resp_health(Resps))
-    end).
-
-
-t_w2_mixed_accepted() ->
-    ?_test(begin
-        Acc0 = create_init_acc(2),
-        Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
-        Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
-
-        {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
-        ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-        check_quorum(Acc1, false),
-
-        {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, false),
-
-        {stop, Acc3} = handle_message(Msg1, worker(3, Acc0), Acc2),
-        ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-        check_quorum(Acc3, true),
-
-        Expect = [
-            {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
-            {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
-        ],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(accepted, resp_health(Resps))
-    end).
-
-
-t_w3_mixed_accepted() ->
-    ?_test(begin
-        Acc0 = create_init_acc(3),
-        Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
-        Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
-
-        {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
-        ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-        check_quorum(Acc1, false),
-
-        {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, false),
-
-        {stop, Acc3} = handle_message(Msg2, worker(3, Acc0), Acc2),
-        ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-        check_quorum(Acc3, true),
-
-        Expect = [
-            {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
-            {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
-        ],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(accepted, resp_health(Resps))
-    end).
-
-
-t_w2_exit1_ok() ->
-    ?_test(begin
-        Acc0 = create_init_acc(2),
-        Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-        ExitMsg = {rexi_EXIT, blargh},
-
-        {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-        ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-        check_quorum(Acc1, false),
-
-        {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, false),
-
-        {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
-        ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-        check_quorum(Acc3, true),
-
-        Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(ok, resp_health(Resps))
-    end).
-
-
-t_w2_exit2_accepted() ->
-    ?_test(begin
-        Acc0 = create_init_acc(2),
-        Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-        ExitMsg = {rexi_EXIT, blargh},
-
-        {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-        ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-        check_quorum(Acc1, false),
-
-        {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, false),
-
-        {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
-        ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-        check_quorum(Acc3, true),
-
-        Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(accepted, resp_health(Resps))
-    end).
-
-
-t_w2_exit3_error() ->
-    ?_test(begin
-        Acc0 = create_init_acc(2),
-        ExitMsg = {rexi_EXIT, blargh},
-
-        {ok, Acc1} = handle_message(ExitMsg, worker(1, Acc0), Acc0),
-        ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-        check_quorum(Acc1, false),
-
-        {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, false),
-
-        {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
-        ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-        check_quorum(Acc3, true),
-
-        Expect = [
-            {error, internal_server_error},
-            {error, internal_server_error}
-        ],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(error, resp_health(Resps))
-    end).
-
-
-t_w4_accepted() ->
-    % Make sure we return when all workers have responded
-    % rather than wait around for a timeout if a user asks
-    % for a qourum with more than the available number of
-    % shards.
-    ?_test(begin
-        Acc0 = create_init_acc(4),
-        Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-
-        {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-        ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-        check_quorum(Acc1, false),
-
-        {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, false),
-
-        {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
-        ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-        check_quorum(Acc3, true),
-
-        Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(accepted, resp_health(Resps))
-    end).
-
-
-t_mixed_ok_accepted() ->
-    ?_test(begin
-        WorkerUUIDs = [
-            {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
-            {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
-            {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
-
-            {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
-            {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
-            {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
-        ],
-
-        Acc0 = #acc{
-            worker_uuids = WorkerUUIDs,
-            resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
-            uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
-            w = 2
-        },
-
-        Msg1 = {ok, [{ok, [{1, <<"foo">>}]}]},
-        Msg2 = {ok, [{ok, [{2, <<"bar">>}]}]},
-        ExitMsg = {rexi_EXIT, blargh},
-
-        {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
-        {ok, Acc2} = handle_message(Msg1, worker(2, Acc0), Acc1),
-        {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
-        {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
-        {stop, Acc5} = handle_message(Msg2, worker(6, Acc0), Acc4),
-
-        Expect = [{ok, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(accepted, resp_health(Resps))
-    end).
-
-
-t_mixed_errors() ->
-    ?_test(begin
-        WorkerUUIDs = [
-            {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
-            {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
-            {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
-
-            {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
-            {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
-            {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
-        ],
-
-        Acc0 = #acc{
-            worker_uuids = WorkerUUIDs,
-            resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
-            uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
-            w = 2
-        },
-
-        Msg = {ok, [{ok, [{1, <<"foo">>}]}]},
-        ExitMsg = {rexi_EXIT, blargh},
-
-        {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-        {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
-        {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
-        {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
-        {stop, Acc5} = handle_message(ExitMsg, worker(6, Acc0), Acc4),
-
-        Expect = [{ok, [{1, <<"foo">>}]}, {error, internal_server_error}],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(error, resp_health(Resps))
-    end).
-
-
-create_init_acc(W) ->
-    UUID1 = <<"uuid1">>,
-    UUID2 = <<"uuid2">>,
-
-    Nodes = [node1, node2, node3],
-    Shards = mem3_util:create_partition_map(<<"foo">>, 3, 1, Nodes),
-
-    % Create our worker_uuids. We're relying on the fact that
-    % we're using a fake Q=1 db so we don't have to worry
-    % about any hashing here.
-    WorkerUUIDs = lists:map(fun(Shard) ->
-        {Shard#shard{ref = erlang:make_ref()}, [UUID1, UUID2]}
-    end, Shards),
-
-    #acc{
-        worker_uuids = WorkerUUIDs,
-        resps = dict:from_list([{UUID1, []}, {UUID2, []}]),
-        uuid_counts = dict:from_list([{UUID1, 3}, {UUID2, 3}]),
-        w = W
-    }.
-
-
-worker(N, #acc{worker_uuids = WorkerUUIDs}) ->
-    {Worker, _} = lists:nth(N, WorkerUUIDs),
-    Worker.
-
-
-check_quorum(Acc, Expect) ->
-    dict:fold(fun(_Shard, Resps, _) ->
-        ?assertEqual(Expect, has_quorum(Resps, 3, Acc#acc.w))
-    end, nil, Acc#acc.resps).
-
--endif.
+%% -ifdef(TEST).
+%%
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% purge_test_() ->
+%%     {
+%%         foreach,
+%%         fun setup/0,
+%%         fun teardown/1,
+%%         [
+%%             t_w2_ok(),
+%%             t_w3_ok(),
+%%
+%%             t_w2_mixed_accepted(),
+%%             t_w3_mixed_accepted(),
+%%
+%%             t_w2_exit1_ok(),
+%%             t_w2_exit2_accepted(),
+%%             t_w2_exit3_error(),
+%%
+%%             t_w4_accepted(),
+%%
+%%             t_mixed_ok_accepted(),
+%%             t_mixed_errors()
+%%         ]
+%%     }.
+%%
+%%
+%% setup() ->
+%%     meck:new(couch_log),
+%%     meck:expect(couch_log, warning, fun(_, _) -> ok end),
+%%     meck:expect(couch_log, notice, fun(_, _) -> ok end).
+%%
+%%
+%% teardown(_) ->
+%%     meck:unload().
+%%
+%%
+%% t_w2_ok() ->
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(2),
+%%         Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%%
+%%         {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%%         ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%%         check_quorum(Acc1, false),
+%%
+%%         {stop, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, true),
+%%
+%%         Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(ok, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_w3_ok() ->
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(3),
+%%         Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%%
+%%         {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%%         check_quorum(Acc1, false),
+%%
+%%         {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, false),
+%%
+%%         {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
+%%         ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%%         check_quorum(Acc3, true),
+%%
+%%         Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(ok, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_w2_mixed_accepted() ->
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(2),
+%%         Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
+%%         Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
+%%
+%%         {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
+%%         ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%%         check_quorum(Acc1, false),
+%%
+%%         {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, false),
+%%
+%%         {stop, Acc3} = handle_message(Msg1, worker(3, Acc0), Acc2),
+%%         ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%%         check_quorum(Acc3, true),
+%%
+%%         Expect = [
+%%             {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
+%%             {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
+%%         ],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(accepted, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_w3_mixed_accepted() ->
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(3),
+%%         Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
+%%         Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
+%%
+%%         {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
+%%         ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%%         check_quorum(Acc1, false),
+%%
+%%         {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, false),
+%%
+%%         {stop, Acc3} = handle_message(Msg2, worker(3, Acc0), Acc2),
+%%         ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%%         check_quorum(Acc3, true),
+%%
+%%         Expect = [
+%%             {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
+%%             {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
+%%         ],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(accepted, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_w2_exit1_ok() ->
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(2),
+%%         Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%%         ExitMsg = {rexi_EXIT, blargh},
+%%
+%%         {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%%         ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%%         check_quorum(Acc1, false),
+%%
+%%         {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, false),
+%%
+%%         {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
+%%         ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%%         check_quorum(Acc3, true),
+%%
+%%         Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(ok, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_w2_exit2_accepted() ->
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(2),
+%%         Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%%         ExitMsg = {rexi_EXIT, blargh},
+%%
+%%         {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%%         ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%%         check_quorum(Acc1, false),
+%%
+%%         {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, false),
+%%
+%%         {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
+%%         ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%%         check_quorum(Acc3, true),
+%%
+%%         Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(accepted, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_w2_exit3_error() ->
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(2),
+%%         ExitMsg = {rexi_EXIT, blargh},
+%%
+%%         {ok, Acc1} = handle_message(ExitMsg, worker(1, Acc0), Acc0),
+%%         ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%%         check_quorum(Acc1, false),
+%%
+%%         {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, false),
+%%
+%%         {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
+%%         ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%%         check_quorum(Acc3, true),
+%%
+%%         Expect = [
+%%             {error, internal_server_error},
+%%             {error, internal_server_error}
+%%         ],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(error, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_w4_accepted() ->
+%%     % Make sure we return when all workers have responded
+%%     % rather than wait around for a timeout if a user asks
+%%     % for a qourum with more than the available number of
+%%     % shards.
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(4),
+%%         Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%%
+%%         {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%%         ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%%         check_quorum(Acc1, false),
+%%
+%%         {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, false),
+%%
+%%         {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
+%%         ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%%         check_quorum(Acc3, true),
+%%
+%%         Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(accepted, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_mixed_ok_accepted() ->
+%%     ?_test(begin
+%%         WorkerUUIDs = [
+%%             {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
+%%             {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
+%%             {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
+%%
+%%             {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
+%%             {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
+%%             {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
+%%         ],
+%%
+%%         Acc0 = #acc{
+%%             worker_uuids = WorkerUUIDs,
+%%             resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
+%%             uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
+%%             w = 2
+%%         },
+%%
+%%         Msg1 = {ok, [{ok, [{1, <<"foo">>}]}]},
+%%         Msg2 = {ok, [{ok, [{2, <<"bar">>}]}]},
+%%         ExitMsg = {rexi_EXIT, blargh},
+%%
+%%         {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
+%%         {ok, Acc2} = handle_message(Msg1, worker(2, Acc0), Acc1),
+%%         {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
+%%         {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
+%%         {stop, Acc5} = handle_message(Msg2, worker(6, Acc0), Acc4),
+%%
+%%         Expect = [{ok, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(accepted, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_mixed_errors() ->
+%%     ?_test(begin
+%%         WorkerUUIDs = [
+%%             {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
+%%             {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
+%%             {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
+%%
+%%             {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
+%%             {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
+%%             {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
+%%         ],
+%%
+%%         Acc0 = #acc{
+%%             worker_uuids = WorkerUUIDs,
+%%             resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
+%%             uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
+%%             w = 2
+%%         },
+%%
+%%         Msg = {ok, [{ok, [{1, <<"foo">>}]}]},
+%%         ExitMsg = {rexi_EXIT, blargh},
+%%
+%%         {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%%         {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
+%%         {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
+%%         {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
+%%         {stop, Acc5} = handle_message(ExitMsg, worker(6, Acc0), Acc4),
+%%
+%%         Expect = [{ok, [{1, <<"foo">>}]}, {error, internal_server_error}],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(error, resp_health(Resps))
+%%     end).
+%%
+%%
+%% create_init_acc(W) ->
+%%     UUID1 = <<"uuid1">>,
+%%     UUID2 = <<"uuid2">>,
+%%
+%%     Nodes = [node1, node2, node3],
+%%     Shards = mem3_util:create_partition_map(<<"foo">>, 3, 1, Nodes),
+%%
+%%     % Create our worker_uuids. We're relying on the fact that
+%%     % we're using a fake Q=1 db so we don't have to worry
+%%     % about any hashing here.
+%%     WorkerUUIDs = lists:map(fun(Shard) ->
+%%         {Shard#shard{ref = erlang:make_ref()}, [UUID1, UUID2]}
+%%     end, Shards),
+%%
+%%     #acc{
+%%         worker_uuids = WorkerUUIDs,
+%%         resps = dict:from_list([{UUID1, []}, {UUID2, []}]),
+%%         uuid_counts = dict:from_list([{UUID1, 3}, {UUID2, 3}]),
+%%         w = W
+%%     }.
+%%
+%%
+%% worker(N, #acc{worker_uuids = WorkerUUIDs}) ->
+%%     {Worker, _} = lists:nth(N, WorkerUUIDs),
+%%     Worker.
+%%
+%%
+%% check_quorum(Acc, Expect) ->
+%%     dict:fold(fun(_Shard, Resps, _) ->
+%%         ?assertEqual(Expect, has_quorum(Resps, 3, Acc#acc.w))
+%%     end, nil, Acc#acc.resps).
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_doc_update.erl b/src/fabric/src/fabric_doc_update.erl
index c108c9a..84f4bc4 100644
--- a/src/fabric/src/fabric_doc_update.erl
+++ b/src/fabric/src/fabric_doc_update.erl
@@ -219,144 +219,144 @@ validate_atomic_update(_DbName, AllDocs, true) ->
     end, AllDocs),
     throw({aborted, PreCommitFailures}).
 
-% eunits
-doc_update1_test() ->
-    meck:new(couch_stats),
-    meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-    meck:new(couch_log),
-    meck:expect(couch_log, warning, fun(_,_) -> ok end),
-
-    Doc1 = #doc{revs = {1,[<<"foo">>]}},
-    Doc2 = #doc{revs = {1,[<<"bar">>]}},
-    Docs = [Doc1],
-    Docs2 = [Doc2, Doc1],
-    Dict = dict:from_list([{Doc,[]} || Doc <- Docs]),
-    Dict2 = dict:from_list([{Doc,[]} || Doc <- Docs2]),
-
-    Shards =
-        mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
-    GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
-
-
-    % test for W = 2
-    AccW2 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
-        Dict},
-
-    {ok,{WaitingCountW2_1,_,_,_,_}=AccW2_1} =
-        handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW2),
-    ?assertEqual(WaitingCountW2_1,2),
-    {stop, FinalReplyW2 } =
-        handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW2_1),
-    ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW2),
-
-    % test for W = 3
-    AccW3 = {length(Shards), length(Docs), list_to_integer("3"), GroupedDocs,
-        Dict},
-
-    {ok,{WaitingCountW3_1,_,_,_,_}=AccW3_1} =
-        handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW3),
-    ?assertEqual(WaitingCountW3_1,2),
-
-    {ok,{WaitingCountW3_2,_,_,_,_}=AccW3_2} =
-        handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW3_1),
-    ?assertEqual(WaitingCountW3_2,1),
-
-    {stop, FinalReplyW3 } =
-        handle_message({ok, [{ok, Doc1}]},lists:nth(3,Shards),AccW3_2),
-    ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW3),
-
-    % test w quorum > # shards, which should fail immediately
-
-    Shards2 = mem3_util:create_partition_map("foo",1,1,["node1"]),
-    GroupedDocs2 = group_docs_by_shard_hack(<<"foo">>,Shards2,Docs),
-
-    AccW4 =
-        {length(Shards2), length(Docs), list_to_integer("2"), GroupedDocs2, Dict},
-    Bool =
-    case handle_message({ok, [{ok, Doc1}]},hd(Shards2),AccW4) of
-        {stop, _Reply} ->
-            true;
-        _ -> false
-    end,
-    ?assertEqual(Bool,true),
-
-    % Docs with no replies should end up as {error, internal_server_error}
-    SA1 = #shard{node=a, range=1},
-    SB1 = #shard{node=b, range=1},
-    SA2 = #shard{node=a, range=2},
-    SB2 = #shard{node=b, range=2},
-    GroupedDocs3 = [{SA1,[Doc1]}, {SB1,[Doc1]}, {SA2,[Doc2]}, {SB2,[Doc2]}],
-    StW5_0 = {length(GroupedDocs3), length(Docs2), 2, GroupedDocs3, Dict2},
-    {ok, StW5_1} = handle_message({ok, [{ok, "A"}]}, SA1, StW5_0),
-    {ok, StW5_2} = handle_message({rexi_EXIT, nil}, SB1, StW5_1),
-    {ok, StW5_3} = handle_message({rexi_EXIT, nil}, SA2, StW5_2),
-    {stop, ReplyW5} = handle_message({rexi_EXIT, nil}, SB2, StW5_3),
-    ?assertEqual(
-        {error, [{Doc1,{accepted,"A"}},{Doc2,{error,internal_server_error}}]},
-        ReplyW5
-    ),
-    meck:unload(couch_log),
-    meck:unload(couch_stats).
-
-
-doc_update2_test() ->
-    meck:new(couch_stats),
-    meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-    meck:new(couch_log),
-    meck:expect(couch_log, warning, fun(_,_) -> ok end),
-
-    Doc1 = #doc{revs = {1,[<<"foo">>]}},
-    Doc2 = #doc{revs = {1,[<<"bar">>]}},
-    Docs = [Doc2, Doc1],
-    Shards =
-        mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
-    GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
-    Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
-        dict:from_list([{Doc,[]} || Doc <- Docs])},
-
-    {ok,{WaitingCount1,_,_,_,_}=Acc1} =
-        handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
-    ?assertEqual(WaitingCount1,2),
-
-    {ok,{WaitingCount2,_,_,_,_}=Acc2} =
-        handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
-    ?assertEqual(WaitingCount2,1),
-
-    {stop, Reply} =
-        handle_message({rexi_EXIT, 1},lists:nth(3,Shards),Acc2),
-
-    ?assertEqual({accepted, [{Doc1,{accepted,Doc2}}, {Doc2,{accepted,Doc1}}]},
-        Reply),
-    meck:unload(couch_log),
-    meck:unload(couch_stats).
-
-doc_update3_test() ->
-    Doc1 = #doc{revs = {1,[<<"foo">>]}},
-    Doc2 = #doc{revs = {1,[<<"bar">>]}},
-    Docs = [Doc2, Doc1],
-    Shards =
-        mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
-    GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
-    Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
-        dict:from_list([{Doc,[]} || Doc <- Docs])},
-
-    {ok,{WaitingCount1,_,_,_,_}=Acc1} =
-        handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
-    ?assertEqual(WaitingCount1,2),
-
-    {ok,{WaitingCount2,_,_,_,_}=Acc2} =
-        handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
-    ?assertEqual(WaitingCount2,1),
-
-    {stop, Reply} =
-        handle_message({ok, [{ok, Doc1},{ok, Doc2}]},lists:nth(3,Shards),Acc2),
-
-    ?assertEqual({ok, [{Doc1, {ok, Doc2}},{Doc2, {ok,Doc1}}]},Reply).
-
-% needed for testing to avoid having to start the mem3 application
-group_docs_by_shard_hack(_DbName, Shards, Docs) ->
-    dict:to_list(lists:foldl(fun(#doc{id=_Id} = Doc, D0) ->
-        lists:foldl(fun(Shard, D1) ->
-            dict:append(Shard, Doc, D1)
-        end, D0, Shards)
-    end, dict:new(), Docs)).
+%% % eunits
+%% doc_update1_test() ->
+%%     meck:new(couch_stats),
+%%     meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%%     meck:new(couch_log),
+%%     meck:expect(couch_log, warning, fun(_,_) -> ok end),
+%%
+%%     Doc1 = #doc{revs = {1,[<<"foo">>]}},
+%%     Doc2 = #doc{revs = {1,[<<"bar">>]}},
+%%     Docs = [Doc1],
+%%     Docs2 = [Doc2, Doc1],
+%%     Dict = dict:from_list([{Doc,[]} || Doc <- Docs]),
+%%     Dict2 = dict:from_list([{Doc,[]} || Doc <- Docs2]),
+%%
+%%     Shards =
+%%         mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
+%%     GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
+%%
+%%
+%%     % test for W = 2
+%%     AccW2 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
+%%         Dict},
+%%
+%%     {ok,{WaitingCountW2_1,_,_,_,_}=AccW2_1} =
+%%         handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW2),
+%%     ?assertEqual(WaitingCountW2_1,2),
+%%     {stop, FinalReplyW2 } =
+%%         handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW2_1),
+%%     ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW2),
+%%
+%%     % test for W = 3
+%%     AccW3 = {length(Shards), length(Docs), list_to_integer("3"), GroupedDocs,
+%%         Dict},
+%%
+%%     {ok,{WaitingCountW3_1,_,_,_,_}=AccW3_1} =
+%%         handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW3),
+%%     ?assertEqual(WaitingCountW3_1,2),
+%%
+%%     {ok,{WaitingCountW3_2,_,_,_,_}=AccW3_2} =
+%%         handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW3_1),
+%%     ?assertEqual(WaitingCountW3_2,1),
+%%
+%%     {stop, FinalReplyW3 } =
+%%         handle_message({ok, [{ok, Doc1}]},lists:nth(3,Shards),AccW3_2),
+%%     ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW3),
+%%
+%%     % test w quorum > # shards, which should fail immediately
+%%
+%%     Shards2 = mem3_util:create_partition_map("foo",1,1,["node1"]),
+%%     GroupedDocs2 = group_docs_by_shard_hack(<<"foo">>,Shards2,Docs),
+%%
+%%     AccW4 =
+%%         {length(Shards2), length(Docs), list_to_integer("2"), GroupedDocs2, Dict},
+%%     Bool =
+%%     case handle_message({ok, [{ok, Doc1}]},hd(Shards2),AccW4) of
+%%         {stop, _Reply} ->
+%%             true;
+%%         _ -> false
+%%     end,
+%%     ?assertEqual(Bool,true),
+%%
+%%     % Docs with no replies should end up as {error, internal_server_error}
+%%     SA1 = #shard{node=a, range=1},
+%%     SB1 = #shard{node=b, range=1},
+%%     SA2 = #shard{node=a, range=2},
+%%     SB2 = #shard{node=b, range=2},
+%%     GroupedDocs3 = [{SA1,[Doc1]}, {SB1,[Doc1]}, {SA2,[Doc2]}, {SB2,[Doc2]}],
+%%     StW5_0 = {length(GroupedDocs3), length(Docs2), 2, GroupedDocs3, Dict2},
+%%     {ok, StW5_1} = handle_message({ok, [{ok, "A"}]}, SA1, StW5_0),
+%%     {ok, StW5_2} = handle_message({rexi_EXIT, nil}, SB1, StW5_1),
+%%     {ok, StW5_3} = handle_message({rexi_EXIT, nil}, SA2, StW5_2),
+%%     {stop, ReplyW5} = handle_message({rexi_EXIT, nil}, SB2, StW5_3),
+%%     ?assertEqual(
+%%         {error, [{Doc1,{accepted,"A"}},{Doc2,{error,internal_server_error}}]},
+%%         ReplyW5
+%%     ),
+%%     meck:unload(couch_log),
+%%     meck:unload(couch_stats).
+%%
+%%
+%% doc_update2_test() ->
+%%     meck:new(couch_stats),
+%%     meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%%     meck:new(couch_log),
+%%     meck:expect(couch_log, warning, fun(_,_) -> ok end),
+%%
+%%     Doc1 = #doc{revs = {1,[<<"foo">>]}},
+%%     Doc2 = #doc{revs = {1,[<<"bar">>]}},
+%%     Docs = [Doc2, Doc1],
+%%     Shards =
+%%         mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
+%%     GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
+%%     Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
+%%         dict:from_list([{Doc,[]} || Doc <- Docs])},
+%%
+%%     {ok,{WaitingCount1,_,_,_,_}=Acc1} =
+%%         handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
+%%     ?assertEqual(WaitingCount1,2),
+%%
+%%     {ok,{WaitingCount2,_,_,_,_}=Acc2} =
+%%         handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
+%%     ?assertEqual(WaitingCount2,1),
+%%
+%%     {stop, Reply} =
+%%         handle_message({rexi_EXIT, 1},lists:nth(3,Shards),Acc2),
+%%
+%%     ?assertEqual({accepted, [{Doc1,{accepted,Doc2}}, {Doc2,{accepted,Doc1}}]},
+%%         Reply),
+%%     meck:unload(couch_log),
+%%     meck:unload(couch_stats).
+%%
+%% doc_update3_test() ->
+%%     Doc1 = #doc{revs = {1,[<<"foo">>]}},
+%%     Doc2 = #doc{revs = {1,[<<"bar">>]}},
+%%     Docs = [Doc2, Doc1],
+%%     Shards =
+%%         mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
+%%     GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
+%%     Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
+%%         dict:from_list([{Doc,[]} || Doc <- Docs])},
+%%
+%%     {ok,{WaitingCount1,_,_,_,_}=Acc1} =
+%%         handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
+%%     ?assertEqual(WaitingCount1,2),
+%%
+%%     {ok,{WaitingCount2,_,_,_,_}=Acc2} =
+%%         handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
+%%     ?assertEqual(WaitingCount2,1),
+%%
+%%     {stop, Reply} =
+%%         handle_message({ok, [{ok, Doc1},{ok, Doc2}]},lists:nth(3,Shards),Acc2),
+%%
+%%     ?assertEqual({ok, [{Doc1, {ok, Doc2}},{Doc2, {ok,Doc1}}]},Reply).
+%%
+%% % needed for testing to avoid having to start the mem3 application
+%% group_docs_by_shard_hack(_DbName, Shards, Docs) ->
+%%     dict:to_list(lists:foldl(fun(#doc{id=_Id} = Doc, D0) ->
+%%         lists:foldl(fun(Shard, D1) ->
+%%             dict:append(Shard, Doc, D1)
+%%         end, D0, Shards)
+%%     end, dict:new(), Docs)).
diff --git a/src/fabric/src/fabric_rpc.erl b/src/fabric/src/fabric_rpc.erl
index 97374be..212a1da 100644
--- a/src/fabric/src/fabric_rpc.erl
+++ b/src/fabric/src/fabric_rpc.erl
@@ -643,22 +643,22 @@ uuid(Db) ->
 uuid_prefix_len() ->
     list_to_integer(config:get("fabric", "uuid_prefix_len", "7")).
 
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-maybe_filtered_json_doc_no_filter_test() ->
-    Body = {[{<<"a">>, 1}]},
-    Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
-    {JDocProps} = maybe_filtered_json_doc(Doc, [], x),
-    ExpectedProps = [{<<"_id">>, <<"1">>}, {<<"_rev">>, <<"1-r1">>}, {<<"a">>, 1}],
-    ?assertEqual(lists:keysort(1, JDocProps), ExpectedProps).
-
-maybe_filtered_json_doc_with_filter_test() ->
-    Body = {[{<<"a">>, 1}]},
-    Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
-    Fields = [<<"a">>, <<"nonexistent">>],
-    Filter = {selector, main_only, {some_selector, Fields}},
-    {JDocProps} = maybe_filtered_json_doc(Doc, [], Filter),
-    ?assertEqual(JDocProps, [{<<"a">>, 1}]).
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% maybe_filtered_json_doc_no_filter_test() ->
+%%     Body = {[{<<"a">>, 1}]},
+%%     Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
+%%     {JDocProps} = maybe_filtered_json_doc(Doc, [], x),
+%%     ExpectedProps = [{<<"_id">>, <<"1">>}, {<<"_rev">>, <<"1-r1">>}, {<<"a">>, 1}],
+%%     ?assertEqual(lists:keysort(1, JDocProps), ExpectedProps).
+%%
+%% maybe_filtered_json_doc_with_filter_test() ->
+%%     Body = {[{<<"a">>, 1}]},
+%%     Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
+%%     Fields = [<<"a">>, <<"nonexistent">>],
+%%     Filter = {selector, main_only, {some_selector, Fields}},
+%%     {JDocProps} = maybe_filtered_json_doc(Doc, [], Filter),
+%%     ?assertEqual(JDocProps, [{<<"a">>, 1}]).
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_streams.erl b/src/fabric/src/fabric_streams.erl
index 59c8b8a..98e2850 100644
--- a/src/fabric/src/fabric_streams.erl
+++ b/src/fabric/src/fabric_streams.erl
@@ -192,82 +192,83 @@ add_worker_to_cleaner(CoordinatorPid, Worker) ->
 
 
 
--ifdef(TEST).
 
--include_lib("eunit/include/eunit.hrl").
-
-worker_cleaner_test_() ->
-    {
-        "Fabric spawn_worker_cleaner test", {
-            setup, fun setup/0, fun teardown/1,
-            fun(_) -> [
-                should_clean_workers(),
-                does_not_fire_if_cleanup_called(),
-                should_clean_additional_worker_too()
-            ] end
-        }
-    }.
-
-
-should_clean_workers() ->
-    ?_test(begin
-        meck:reset(rexi),
-        erase(?WORKER_CLEANER),
-        Workers = [
-            #shard{node = 'n1', ref = make_ref()},
-            #shard{node = 'n2', ref = make_ref()}
-        ],
-        {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
-        Cleaner = spawn_worker_cleaner(Coord, Workers),
-        Ref = erlang:monitor(process, Cleaner),
-        Coord ! die,
-        receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
-        ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
-    end).
-
-
-does_not_fire_if_cleanup_called() ->
-    ?_test(begin
-        meck:reset(rexi),
-        erase(?WORKER_CLEANER),
-        Workers = [
-            #shard{node = 'n1', ref = make_ref()},
-            #shard{node = 'n2', ref = make_ref()}
-        ],
-        {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
-        Cleaner = spawn_worker_cleaner(Coord, Workers),
-        Ref = erlang:monitor(process, Cleaner),
-        cleanup(Workers),
-        Coord ! die,
-        receive {'DOWN', Ref, _, _, _} -> ok end,
-        % 2 calls would be from cleanup/1 function. If cleanup process fired
-        % too it would have been 4 calls total.
-        ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
-    end).
-
-
-should_clean_additional_worker_too() ->
-    ?_test(begin
-        meck:reset(rexi),
-        erase(?WORKER_CLEANER),
-        Workers = [
-            #shard{node = 'n1', ref = make_ref()}
-        ],
-        {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
-        Cleaner = spawn_worker_cleaner(Coord, Workers),
-        add_worker_to_cleaner(Coord, #shard{node = 'n2', ref = make_ref()}),
-        Ref = erlang:monitor(process, Cleaner),
-        Coord ! die,
-        receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
-        ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
-    end).
-
-
-setup() ->
-    ok = meck:expect(rexi, kill_all, fun(_) -> ok end).
-
-
-teardown(_) ->
-    meck:unload().
-
--endif.
+%% -ifdef(TEST).
+%%
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% worker_cleaner_test_() ->
+%%     {
+%%         "Fabric spawn_worker_cleaner test", {
+%%             setup, fun setup/0, fun teardown/1,
+%%             fun(_) -> [
+%%                 should_clean_workers(),
+%%                 does_not_fire_if_cleanup_called(),
+%%                 should_clean_additional_worker_too()
+%%             ] end
+%%         }
+%%     }.
+%%
+%%
+%% should_clean_workers() ->
+%%     ?_test(begin
+%%         meck:reset(rexi),
+%%         erase(?WORKER_CLEANER),
+%%         Workers = [
+%%             #shard{node = 'n1', ref = make_ref()},
+%%             #shard{node = 'n2', ref = make_ref()}
+%%         ],
+%%         {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+%%         Cleaner = spawn_worker_cleaner(Coord, Workers),
+%%         Ref = erlang:monitor(process, Cleaner),
+%%         Coord ! die,
+%%         receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
+%%         ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
+%%     end).
+%%
+%%
+%% does_not_fire_if_cleanup_called() ->
+%%     ?_test(begin
+%%         meck:reset(rexi),
+%%         erase(?WORKER_CLEANER),
+%%         Workers = [
+%%             #shard{node = 'n1', ref = make_ref()},
+%%             #shard{node = 'n2', ref = make_ref()}
+%%         ],
+%%         {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+%%         Cleaner = spawn_worker_cleaner(Coord, Workers),
+%%         Ref = erlang:monitor(process, Cleaner),
+%%         cleanup(Workers),
+%%         Coord ! die,
+%%         receive {'DOWN', Ref, _, _, _} -> ok end,
+%%         % 2 calls would be from cleanup/1 function. If cleanup process fired
+%%         % too it would have been 4 calls total.
+%%         ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
+%%     end).
+%%
+%%
+%% should_clean_additional_worker_too() ->
+%%     ?_test(begin
+%%         meck:reset(rexi),
+%%         erase(?WORKER_CLEANER),
+%%         Workers = [
+%%             #shard{node = 'n1', ref = make_ref()}
+%%         ],
+%%         {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+%%         Cleaner = spawn_worker_cleaner(Coord, Workers),
+%%         add_worker_to_cleaner(Coord, #shard{node = 'n2', ref = make_ref()}),
+%%         Ref = erlang:monitor(process, Cleaner),
+%%         Coord ! die,
+%%         receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
+%%         ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
+%%     end).
+%%
+%%
+%% setup() ->
+%%     ok = meck:expect(rexi, kill_all, fun(_) -> ok end).
+%%
+%%
+%% teardown(_) ->
+%%     meck:unload().
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_util.erl b/src/fabric/src/fabric_util.erl
index aaf0623..16f916c 100644
--- a/src/fabric/src/fabric_util.erl
+++ b/src/fabric/src/fabric_util.erl
@@ -189,30 +189,30 @@ create_monitors(Shards) ->
     ]),
     rexi_monitor:start(MonRefs).
 
-%% verify only id and rev are used in key.
-update_counter_test() ->
-    Reply = {ok, #doc{id = <<"id">>, revs = <<"rev">>,
-                    body = <<"body">>, atts = <<"atts">>}},
-    ?assertEqual([{{<<"id">>,<<"rev">>}, {Reply, 1}}],
-        update_counter(Reply, 1, [])).
-
-remove_ancestors_test() ->
-    Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
-    Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
-    Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
-    Bar2 = {not_found, {1,<<"bar">>}},
-    ?assertEqual(
-        [kv(Bar1,1), kv(Foo1,1)],
-        remove_ancestors([kv(Bar1,1), kv(Foo1,1)], [])
-    ),
-    ?assertEqual(
-        [kv(Bar1,1), kv(Foo2,2)],
-        remove_ancestors([kv(Bar1,1), kv(Foo1,1), kv(Foo2,1)], [])
-    ),
-    ?assertEqual(
-        [kv(Bar1,2)],
-        remove_ancestors([kv(Bar2,1), kv(Bar1,1)], [])
-    ).
+%% %% verify only id and rev are used in key.
+%% update_counter_test() ->
+%%     Reply = {ok, #doc{id = <<"id">>, revs = <<"rev">>,
+%%                     body = <<"body">>, atts = <<"atts">>}},
+%%     ?assertEqual([{{<<"id">>,<<"rev">>}, {Reply, 1}}],
+%%         update_counter(Reply, 1, [])).
+%%
+%% remove_ancestors_test() ->
+%%     Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
+%%     Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
+%%     Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
+%%     Bar2 = {not_found, {1,<<"bar">>}},
+%%     ?assertEqual(
+%%         [kv(Bar1,1), kv(Foo1,1)],
+%%         remove_ancestors([kv(Bar1,1), kv(Foo1,1)], [])
+%%     ),
+%%     ?assertEqual(
+%%         [kv(Bar1,1), kv(Foo2,2)],
+%%         remove_ancestors([kv(Bar1,1), kv(Foo1,1), kv(Foo2,1)], [])
+%%     ),
+%%     ?assertEqual(
+%%         [kv(Bar1,2)],
+%%         remove_ancestors([kv(Bar2,1), kv(Bar1,1)], [])
+%%     ).
 
 is_replicator_db(DbName) ->
     path_ends_with(DbName, <<"_replicator">>).
diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl
index 55b44e6..adde1e6 100644
--- a/src/fabric/src/fabric_view.erl
+++ b/src/fabric/src/fabric_view.erl
@@ -416,97 +416,97 @@ fix_skip_and_limit(#mrargs{} = Args) ->
 remove_finalizer(Args) ->
     couch_mrview_util:set_extra(Args, finalizer, null).
 
-% unit test
-is_progress_possible_test() ->
-    EndPoint = 2 bsl 31,
-    T1 = [[0, EndPoint-1]],
-    ?assertEqual(is_progress_possible(mk_cnts(T1)),true),
-    T2 = [[0,10],[11,20],[21,EndPoint-1]],
-    ?assertEqual(is_progress_possible(mk_cnts(T2)),true),
-    % gap
-    T3 = [[0,10],[12,EndPoint-1]],
-    ?assertEqual(is_progress_possible(mk_cnts(T3)),false),
-    % outside range
-    T4 = [[1,10],[11,20],[21,EndPoint-1]],
-    ?assertEqual(is_progress_possible(mk_cnts(T4)),false),
-    % outside range
-    T5 = [[0,10],[11,20],[21,EndPoint]],
-    ?assertEqual(is_progress_possible(mk_cnts(T5)),false),
-    T6 = [[0, 10], [11, 20], [0, 5], [6, 21], [21, EndPoint - 1]],
-    ?assertEqual(is_progress_possible(mk_cnts(T6)), true),
-    % not possible, overlap is not exact
-    T7 = [[0, 10], [13, 20], [21, EndPoint - 1], [9, 12]],
-    ?assertEqual(is_progress_possible(mk_cnts(T7)), false).
-
-
-remove_overlapping_shards_test() ->
-    Cb = undefined,
-
-    Shards = mk_cnts([[0, 10], [11, 20], [21, ?RING_END]], 3),
-
-    % Simple (exact) overlap
-    Shard1 = mk_shard("node-3", [11, 20]),
-    Shards1 = fabric_dict:store(Shard1, nil, Shards),
-    R1 = remove_overlapping_shards(Shard1, Shards1, Cb),
-    ?assertEqual([{0, 10}, {11, 20}, {21, ?RING_END}],
-        fabric_util:worker_ranges(R1)),
-    ?assert(fabric_dict:is_key(Shard1, R1)),
-
-    % Split overlap (shard overlap multiple workers)
-    Shard2 = mk_shard("node-3", [0, 20]),
-    Shards2 = fabric_dict:store(Shard2, nil, Shards),
-    R2 = remove_overlapping_shards(Shard2, Shards2, Cb),
-    ?assertEqual([{0, 20}, {21, ?RING_END}],
-        fabric_util:worker_ranges(R2)),
-    ?assert(fabric_dict:is_key(Shard2, R2)).
-
-
-get_shard_replacements_test() ->
-    Unused = [mk_shard(N, [B, E]) || {N, B, E} <- [
-        {"n1", 11, 20}, {"n1", 21, ?RING_END},
-        {"n2", 0, 4}, {"n2", 5, 10}, {"n2", 11, 20},
-        {"n3", 0, 21, ?RING_END}
-    ]],
-    Used = [mk_shard(N, [B, E]) || {N, B, E} <- [
-        {"n2", 21, ?RING_END},
-        {"n3", 0, 10}, {"n3", 11, 20}
-    ]],
-    Res = lists:sort(get_shard_replacements_int(Unused, Used)),
-    % Notice that [0, 10] range can be replaced by spawning the [0, 4] and [5,
-    % 10] workers on n1
-    Expect = [
-        {[0, 10], [mk_shard("n2", [0, 4]), mk_shard("n2", [5, 10])]},
-        {[11, 20], [mk_shard("n1", [11, 20]), mk_shard("n2", [11, 20])]},
-        {[21, ?RING_END], [mk_shard("n1", [21, ?RING_END])]}
-    ],
-    ?assertEqual(Expect, Res).
-
-
-mk_cnts(Ranges) ->
-    Shards = lists:map(fun mk_shard/1, Ranges),
-    orddict:from_list([{Shard,nil} || Shard <- Shards]).
-
-mk_cnts(Ranges, NoNodes) ->
-    orddict:from_list([{Shard,nil}
-                       || Shard <-
-                              lists:flatten(lists:map(
-                                 fun(Range) ->
-                                         mk_shards(NoNodes,Range,[])
-                                 end, Ranges))]
-                     ).
-
-mk_shards(0,_Range,Shards) ->
-    Shards;
-mk_shards(NoNodes,Range,Shards) ->
-    Name ="node-" ++ integer_to_list(NoNodes),
-    mk_shards(NoNodes-1,Range, [mk_shard(Name, Range) | Shards]).
-
-
-mk_shard([B, E]) when is_integer(B), is_integer(E) ->
-    #shard{range = [B, E]}.
-
-
-mk_shard(Name, Range) ->
-    Node = list_to_atom(Name),
-    BName = list_to_binary(Name),
-    #shard{name = BName, node = Node, range = Range}.
+%% % unit test
+%% is_progress_possible_test() ->
+%%     EndPoint = 2 bsl 31,
+%%     T1 = [[0, EndPoint-1]],
+%%     ?assertEqual(is_progress_possible(mk_cnts(T1)),true),
+%%     T2 = [[0,10],[11,20],[21,EndPoint-1]],
+%%     ?assertEqual(is_progress_possible(mk_cnts(T2)),true),
+%%     % gap
+%%     T3 = [[0,10],[12,EndPoint-1]],
+%%     ?assertEqual(is_progress_possible(mk_cnts(T3)),false),
+%%     % outside range
+%%     T4 = [[1,10],[11,20],[21,EndPoint-1]],
+%%     ?assertEqual(is_progress_possible(mk_cnts(T4)),false),
+%%     % outside range
+%%     T5 = [[0,10],[11,20],[21,EndPoint]],
+%%     ?assertEqual(is_progress_possible(mk_cnts(T5)),false),
+%%     T6 = [[0, 10], [11, 20], [0, 5], [6, 21], [21, EndPoint - 1]],
+%%     ?assertEqual(is_progress_possible(mk_cnts(T6)), true),
+%%     % not possible, overlap is not exact
+%%     T7 = [[0, 10], [13, 20], [21, EndPoint - 1], [9, 12]],
+%%     ?assertEqual(is_progress_possible(mk_cnts(T7)), false).
+%%
+%%
+%% remove_overlapping_shards_test() ->
+%%     Cb = undefined,
+%%
+%%     Shards = mk_cnts([[0, 10], [11, 20], [21, ?RING_END]], 3),
+%%
+%%     % Simple (exact) overlap
+%%     Shard1 = mk_shard("node-3", [11, 20]),
+%%     Shards1 = fabric_dict:store(Shard1, nil, Shards),
+%%     R1 = remove_overlapping_shards(Shard1, Shards1, Cb),
+%%     ?assertEqual([{0, 10}, {11, 20}, {21, ?RING_END}],
+%%         fabric_util:worker_ranges(R1)),
+%%     ?assert(fabric_dict:is_key(Shard1, R1)),
+%%
+%%     % Split overlap (shard overlap multiple workers)
+%%     Shard2 = mk_shard("node-3", [0, 20]),
+%%     Shards2 = fabric_dict:store(Shard2, nil, Shards),
+%%     R2 = remove_overlapping_shards(Shard2, Shards2, Cb),
+%%     ?assertEqual([{0, 20}, {21, ?RING_END}],
+%%         fabric_util:worker_ranges(R2)),
+%%     ?assert(fabric_dict:is_key(Shard2, R2)).
+%%
+%%
+%% get_shard_replacements_test() ->
+%%     Unused = [mk_shard(N, [B, E]) || {N, B, E} <- [
+%%         {"n1", 11, 20}, {"n1", 21, ?RING_END},
+%%         {"n2", 0, 4}, {"n2", 5, 10}, {"n2", 11, 20},
+%%         {"n3", 0, 21, ?RING_END}
+%%     ]],
+%%     Used = [mk_shard(N, [B, E]) || {N, B, E} <- [
+%%         {"n2", 21, ?RING_END},
+%%         {"n3", 0, 10}, {"n3", 11, 20}
+%%     ]],
+%%     Res = lists:sort(get_shard_replacements_int(Unused, Used)),
+%%     % Notice that [0, 10] range can be replaced by spawning the [0, 4] and [5,
+%%     % 10] workers on n1
+%%     Expect = [
+%%         {[0, 10], [mk_shard("n2", [0, 4]), mk_shard("n2", [5, 10])]},
+%%         {[11, 20], [mk_shard("n1", [11, 20]), mk_shard("n2", [11, 20])]},
+%%         {[21, ?RING_END], [mk_shard("n1", [21, ?RING_END])]}
+%%     ],
+%%     ?assertEqual(Expect, Res).
+%%
+%%
+%% mk_cnts(Ranges) ->
+%%     Shards = lists:map(fun mk_shard/1, Ranges),
+%%     orddict:from_list([{Shard,nil} || Shard <- Shards]).
+%%
+%% mk_cnts(Ranges, NoNodes) ->
+%%     orddict:from_list([{Shard,nil}
+%%                        || Shard <-
+%%                               lists:flatten(lists:map(
+%%                                  fun(Range) ->
+%%                                          mk_shards(NoNodes,Range,[])
+%%                                  end, Ranges))]
+%%                      ).
+%%
+%% mk_shards(0,_Range,Shards) ->
+%%     Shards;
+%% mk_shards(NoNodes,Range,Shards) ->
+%%     Name ="node-" ++ integer_to_list(NoNodes),
+%%     mk_shards(NoNodes-1,Range, [mk_shard(Name, Range) | Shards]).
+%%
+%%
+%% mk_shard([B, E]) when is_integer(B), is_integer(E) ->
+%%     #shard{range = [B, E]}.
+%%
+%%
+%% mk_shard(Name, Range) ->
+%%     Node = list_to_atom(Name),
+%%     BName = list_to_binary(Name),
+%%     #shard{name = BName, node = Node, range = Range}.
diff --git a/src/fabric/src/fabric_view_changes.erl b/src/fabric/src/fabric_view_changes.erl
index febbd31..3f684a3 100644
--- a/src/fabric/src/fabric_view_changes.erl
+++ b/src/fabric/src/fabric_view_changes.erl
@@ -637,184 +637,184 @@ increment_changes_epoch() ->
     application:set_env(fabric, changes_epoch, os:timestamp()).
 
 
-unpack_seq_setup() ->
-    meck:new(mem3),
-    meck:new(fabric_view),
-    meck:expect(mem3, get_shard, fun(_, _, _) -> {ok, #shard{}} end),
-    meck:expect(fabric_ring, is_progress_possible, fun(_) -> true end),
-    ok.
-
-
-unpack_seqs_test_() ->
-    {
-        setup,
-        fun unpack_seq_setup/0,
-        fun (_) -> meck:unload() end,
-        [
-            t_unpack_seqs()
-        ]
-    }.
-
-
-t_unpack_seqs() ->
-    ?_test(begin
-        % BigCouch 0.3 style.
-        assert_shards("23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-        "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-        "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA"),
-
-        % BigCouch 0.4 style.
-        assert_shards([23423,<<"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-        "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-        "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA">>]),
-
-        % BigCouch 0.4 style (as string).
-        assert_shards("[23423,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-        "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-        "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-        assert_shards("[23423 ,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-        "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-        "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-        assert_shards("[23423, \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-        "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-        "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-        assert_shards("[23423 , \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-        "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-        "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-
-        % with internal hypen
-        assert_shards("651-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
-        "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
-        "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"),
-        assert_shards([651,"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
-        "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
-        "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"]),
-
-        % CouchDB 1.2 style
-        assert_shards("\"23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-        "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-        "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"")
-    end).
-
-
-assert_shards(Packed) ->
-    ?assertMatch([{#shard{},_}|_], unpack_seqs(Packed, <<"foo">>)).
-
-
-find_replacements_test() ->
-    % None of the workers are in the live list of shard but there is a
-    % replacement on n3 for the full range. It should get picked instead of
-    % the two smaller one on n2.
-    Workers1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
-    AllShards1 = [
-        mk_shard("n1", 11, ?RING_END),
-        mk_shard("n2", 0, 4),
-        mk_shard("n2", 5, 10),
-        mk_shard("n3", 0, ?RING_END)
-    ],
-    {WorkersRes1, Dead1, Reps1} = find_replacements(Workers1, AllShards1),
-    ?assertEqual([], WorkersRes1),
-    ?assertEqual(Workers1, Dead1),
-    ?assertEqual([mk_shard("n3", 0, ?RING_END)], Reps1),
-
-    % None of the workers are in the live list of shards and there is a
-    % split replacement from n2 (range [0, 10] replaced with [0, 4], [5, 10])
-    Workers2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
-    AllShards2 = [
-        mk_shard("n1", 11, ?RING_END),
-        mk_shard("n2", 0, 4),
-        mk_shard("n2", 5, 10)
-    ],
-    {WorkersRes2, Dead2, Reps2} = find_replacements(Workers2, AllShards2),
-    ?assertEqual([], WorkersRes2),
-    ?assertEqual(Workers2, Dead2),
-    ?assertEqual([
-        mk_shard("n1", 11, ?RING_END),
-        mk_shard("n2", 0, 4),
-        mk_shard("n2", 5, 10)
-    ], lists:sort(Reps2)),
-
-    % One worker is available and one needs to be replaced. Replacement will be
-    % from two split shards
-    Workers3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
-    AllShards3 = [
-        mk_shard("n1", 11, ?RING_END),
-        mk_shard("n2", 0, 4),
-        mk_shard("n2", 5, 10),
-        mk_shard("n2", 11, ?RING_END)
-    ],
-    {WorkersRes3, Dead3, Reps3} = find_replacements(Workers3, AllShards3),
-    ?assertEqual(mk_workers([{"n2", 11, ?RING_END}]), WorkersRes3),
-    ?assertEqual(mk_workers([{"n1", 0, 10}]), Dead3),
-    ?assertEqual([
-        mk_shard("n2", 0, 4),
-        mk_shard("n2", 5, 10)
-    ], lists:sort(Reps3)),
-
-    % All workers are available. Make sure they are not killed even if there is
-    % a longer (single) shard to replace them.
-    Workers4 = mk_workers([{"n1", 0, 10}, {"n1", 11, ?RING_END}]),
-    AllShards4 = [
-        mk_shard("n1", 0, 10),
-        mk_shard("n1", 11, ?RING_END),
-        mk_shard("n2", 0, 4),
-        mk_shard("n2", 5, 10),
-        mk_shard("n3", 0, ?RING_END)
-    ],
-    {WorkersRes4, Dead4, Reps4} = find_replacements(Workers4, AllShards4),
-    ?assertEqual(Workers4, WorkersRes4),
-    ?assertEqual([], Dead4),
-    ?assertEqual([], Reps4).
-
-
-mk_workers(NodesRanges) ->
-    mk_workers(NodesRanges, nil).
-
-mk_workers(NodesRanges, Val) ->
-    orddict:from_list([{mk_shard(N, B, E), Val} || {N, B, E} <- NodesRanges]).
-
-
-mk_shard(Name, B, E) ->
-    Node = list_to_atom(Name),
-    BName = list_to_binary(Name),
-    #shard{name = BName, node = Node, range = [B, E]}.
-
-
-find_split_shard_replacements_test() ->
-    % One worker is can be replaced and one can't
-    Dead1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
-    Shards1 = [
-        mk_shard("n1", 0, 4),
-        mk_shard("n1", 5, 10),
-        mk_shard("n3", 11, ?RING_END)
-    ],
-    {Workers1, ShardsLeft1} = find_split_shard_replacements(Dead1, Shards1),
-    ?assertEqual(mk_workers([{"n1", 0, 4}, {"n1", 5, 10}], 42), Workers1),
-    ?assertEqual([mk_shard("n3", 11, ?RING_END)], ShardsLeft1),
-
-    % All workers can be replaced - one by 1 shard, another by 3 smaller shards
-    Dead2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
-    Shards2 = [
-        mk_shard("n1", 0, 10),
-        mk_shard("n2", 11, 12),
-        mk_shard("n2", 13, 14),
-        mk_shard("n2", 15, ?RING_END)
-    ],
-    {Workers2, ShardsLeft2} = find_split_shard_replacements(Dead2, Shards2),
-    ?assertEqual(mk_workers([
-       {"n1", 0, 10},
-       {"n2", 11, 12},
-       {"n2", 13, 14},
-       {"n2", 15, ?RING_END}
-    ], 42), Workers2),
-    ?assertEqual([], ShardsLeft2),
-
-    % No workers can be replaced. Ranges match but they are on different nodes
-    Dead3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
-    Shards3 = [
-        mk_shard("n2", 0, 10),
-        mk_shard("n3", 11, ?RING_END)
-    ],
-    {Workers3, ShardsLeft3} = find_split_shard_replacements(Dead3, Shards3),
-    ?assertEqual([], Workers3),
-    ?assertEqual(Shards3, ShardsLeft3).
+%% unpack_seq_setup() ->
+%%     meck:new(mem3),
+%%     meck:new(fabric_view),
+%%     meck:expect(mem3, get_shard, fun(_, _, _) -> {ok, #shard{}} end),
+%%     meck:expect(fabric_ring, is_progress_possible, fun(_) -> true end),
+%%     ok.
+%%
+%%
+%% unpack_seqs_test_() ->
+%%     {
+%%         setup,
+%%         fun unpack_seq_setup/0,
+%%         fun (_) -> meck:unload() end,
+%%         [
+%%             t_unpack_seqs()
+%%         ]
+%%     }.
+%%
+%%
+%% t_unpack_seqs() ->
+%%     ?_test(begin
+%%         % BigCouch 0.3 style.
+%%         assert_shards("23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%%         "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%%         "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA"),
+%%
+%%         % BigCouch 0.4 style.
+%%         assert_shards([23423,<<"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%%         "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%%         "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA">>]),
+%%
+%%         % BigCouch 0.4 style (as string).
+%%         assert_shards("[23423,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%%         "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%%         "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
+%%         assert_shards("[23423 ,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%%         "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%%         "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
+%%         assert_shards("[23423, \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%%         "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%%         "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
+%%         assert_shards("[23423 , \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%%         "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%%         "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
+%%
+%%         % with internal hypen
+%%         assert_shards("651-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
+%%         "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
+%%         "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"),
+%%         assert_shards([651,"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
+%%         "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
+%%         "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"]),
+%%
+%%         % CouchDB 1.2 style
+%%         assert_shards("\"23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%%         "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%%         "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"")
+%%     end).
+%%
+%%
+%% assert_shards(Packed) ->
+%%     ?assertMatch([{#shard{},_}|_], unpack_seqs(Packed, <<"foo">>)).
+%%
+%%
+%% find_replacements_test() ->
+%%     % None of the workers are in the live list of shard but there is a
+%%     % replacement on n3 for the full range. It should get picked instead of
+%%     % the two smaller one on n2.
+%%     Workers1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
+%%     AllShards1 = [
+%%         mk_shard("n1", 11, ?RING_END),
+%%         mk_shard("n2", 0, 4),
+%%         mk_shard("n2", 5, 10),
+%%         mk_shard("n3", 0, ?RING_END)
+%%     ],
+%%     {WorkersRes1, Dead1, Reps1} = find_replacements(Workers1, AllShards1),
+%%     ?assertEqual([], WorkersRes1),
+%%     ?assertEqual(Workers1, Dead1),
+%%     ?assertEqual([mk_shard("n3", 0, ?RING_END)], Reps1),
+%%
+%%     % None of the workers are in the live list of shards and there is a
+%%     % split replacement from n2 (range [0, 10] replaced with [0, 4], [5, 10])
+%%     Workers2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
+%%     AllShards2 = [
+%%         mk_shard("n1", 11, ?RING_END),
+%%         mk_shard("n2", 0, 4),
+%%         mk_shard("n2", 5, 10)
+%%     ],
+%%     {WorkersRes2, Dead2, Reps2} = find_replacements(Workers2, AllShards2),
+%%     ?assertEqual([], WorkersRes2),
+%%     ?assertEqual(Workers2, Dead2),
+%%     ?assertEqual([
+%%         mk_shard("n1", 11, ?RING_END),
+%%         mk_shard("n2", 0, 4),
+%%         mk_shard("n2", 5, 10)
+%%     ], lists:sort(Reps2)),
+%%
+%%     % One worker is available and one needs to be replaced. Replacement will be
+%%     % from two split shards
+%%     Workers3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
+%%     AllShards3 = [
+%%         mk_shard("n1", 11, ?RING_END),
+%%         mk_shard("n2", 0, 4),
+%%         mk_shard("n2", 5, 10),
+%%         mk_shard("n2", 11, ?RING_END)
+%%     ],
+%%     {WorkersRes3, Dead3, Reps3} = find_replacements(Workers3, AllShards3),
+%%     ?assertEqual(mk_workers([{"n2", 11, ?RING_END}]), WorkersRes3),
+%%     ?assertEqual(mk_workers([{"n1", 0, 10}]), Dead3),
+%%     ?assertEqual([
+%%         mk_shard("n2", 0, 4),
+%%         mk_shard("n2", 5, 10)
+%%     ], lists:sort(Reps3)),
+%%
+%%     % All workers are available. Make sure they are not killed even if there is
+%%     % a longer (single) shard to replace them.
+%%     Workers4 = mk_workers([{"n1", 0, 10}, {"n1", 11, ?RING_END}]),
+%%     AllShards4 = [
+%%         mk_shard("n1", 0, 10),
+%%         mk_shard("n1", 11, ?RING_END),
+%%         mk_shard("n2", 0, 4),
+%%         mk_shard("n2", 5, 10),
+%%         mk_shard("n3", 0, ?RING_END)
+%%     ],
+%%     {WorkersRes4, Dead4, Reps4} = find_replacements(Workers4, AllShards4),
+%%     ?assertEqual(Workers4, WorkersRes4),
+%%     ?assertEqual([], Dead4),
+%%     ?assertEqual([], Reps4).
+%%
+%%
+%% mk_workers(NodesRanges) ->
+%%     mk_workers(NodesRanges, nil).
+%%
+%% mk_workers(NodesRanges, Val) ->
+%%     orddict:from_list([{mk_shard(N, B, E), Val} || {N, B, E} <- NodesRanges]).
+%%
+%%
+%% mk_shard(Name, B, E) ->
+%%     Node = list_to_atom(Name),
+%%     BName = list_to_binary(Name),
+%%     #shard{name = BName, node = Node, range = [B, E]}.
+%%
+%%
+%% find_split_shard_replacements_test() ->
+%%     % One worker is can be replaced and one can't
+%%     Dead1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
+%%     Shards1 = [
+%%         mk_shard("n1", 0, 4),
+%%         mk_shard("n1", 5, 10),
+%%         mk_shard("n3", 11, ?RING_END)
+%%     ],
+%%     {Workers1, ShardsLeft1} = find_split_shard_replacements(Dead1, Shards1),
+%%     ?assertEqual(mk_workers([{"n1", 0, 4}, {"n1", 5, 10}], 42), Workers1),
+%%     ?assertEqual([mk_shard("n3", 11, ?RING_END)], ShardsLeft1),
+%%
+%%     % All workers can be replaced - one by 1 shard, another by 3 smaller shards
+%%     Dead2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
+%%     Shards2 = [
+%%         mk_shard("n1", 0, 10),
+%%         mk_shard("n2", 11, 12),
+%%         mk_shard("n2", 13, 14),
+%%         mk_shard("n2", 15, ?RING_END)
+%%     ],
+%%     {Workers2, ShardsLeft2} = find_split_shard_replacements(Dead2, Shards2),
+%%     ?assertEqual(mk_workers([
+%%        {"n1", 0, 10},
+%%        {"n2", 11, 12},
+%%        {"n2", 13, 14},
+%%        {"n2", 15, ?RING_END}
+%%     ], 42), Workers2),
+%%     ?assertEqual([], ShardsLeft2),
+%%
+%%     % No workers can be replaced. Ranges match but they are on different nodes
+%%     Dead3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
+%%     Shards3 = [
+%%         mk_shard("n2", 0, 10),
+%%         mk_shard("n3", 11, ?RING_END)
+%%     ],
+%%     {Workers3, ShardsLeft3} = find_split_shard_replacements(Dead3, Shards3),
+%%     ?assertEqual([], Workers3),
+%%     ?assertEqual(Shards3, ShardsLeft3).
diff --git a/src/fabric/test/eunit/fabric_rpc_purge_tests.erl b/src/fabric/test/eunit/fabric_rpc_purge_tests.erl
deleted file mode 100644
index 4eafb2b..0000000
--- a/src/fabric/test/eunit/fabric_rpc_purge_tests.erl
+++ /dev/null
@@ -1,307 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_rpc_purge_tests).
-
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(TDEF(A), {A, fun A/1}).
-
-% TODO: Add tests:
-%         - filter some updates
-%         - allow for an update that was filtered by a node
-%         - ignore lagging nodes
-
-main_test_() ->
-    {
-        setup,
-        spawn,
-        fun setup_all/0,
-        fun teardown_all/1,
-        [
-            {
-                foreach,
-                fun setup_no_purge/0,
-                fun teardown_no_purge/1,
-                lists:map(fun wrap/1, [
-                    ?TDEF(t_no_purge_no_filter)
-                ])
-            },
-            {
-                foreach,
-                fun setup_single_purge/0,
-                fun teardown_single_purge/1,
-                lists:map(fun wrap/1, [
-                    ?TDEF(t_filter),
-                    ?TDEF(t_filter_unknown_node),
-                    ?TDEF(t_filter_local_node),
-                    ?TDEF(t_no_filter_old_node),
-                    ?TDEF(t_no_filter_different_node),
-                    ?TDEF(t_no_filter_after_repl)
-                ])
-            },
-            {
-                foreach,
-                fun setup_multi_purge/0,
-                fun teardown_multi_purge/1,
-                lists:map(fun wrap/1, [
-                    ?TDEF(t_filter),
-                    ?TDEF(t_filter_unknown_node),
-                    ?TDEF(t_filter_local_node),
-                    ?TDEF(t_no_filter_old_node),
-                    ?TDEF(t_no_filter_different_node),
-                    ?TDEF(t_no_filter_after_repl)
-                ])
-            }
-        ]
-    }.
-
-
-setup_all() ->
-    test_util:start_couch().
-
-
-teardown_all(Ctx) ->
-    test_util:stop_couch(Ctx).
-
-
-setup_no_purge() ->
-    {ok, Db} = create_db(),
-    populate_db(Db),
-    couch_db:name(Db).
-
-
-teardown_no_purge(DbName) ->
-    ok = couch_server:delete(DbName, []).
-
-
-setup_single_purge() ->
-    DbName = setup_no_purge(),
-    DocId = <<"0003">>,
-    {ok, OldDoc} = open_doc(DbName, DocId),
-    purge_doc(DbName, DocId),
-    {DbName, DocId, OldDoc, 1}.
-
-
-teardown_single_purge({DbName, _, _, _}) ->
-    teardown_no_purge(DbName).
-
-
-setup_multi_purge() ->
-    DbName = setup_no_purge(),
-    DocId = <<"0003">>,
-    {ok, OldDoc} = open_doc(DbName, DocId),
-    lists:foreach(fun(I) ->
-        PDocId = iolist_to_binary(io_lib:format("~4..0b", [I])),
-        purge_doc(DbName, PDocId)
-    end, lists:seq(1, 5)),
-    {DbName, DocId, OldDoc, 3}.
-
-
-teardown_multi_purge(Ctx) ->
-    teardown_single_purge(Ctx).
-
-
-t_no_purge_no_filter(DbName) ->
-    DocId = <<"0003">>,
-
-    {ok, OldDoc} = open_doc(DbName, DocId),
-    NewDoc = create_update(OldDoc, 2),
-
-    rpc_update_doc(DbName, NewDoc),
-
-    {ok, CurrDoc} = open_doc(DbName, DocId),
-    ?assert(CurrDoc /= OldDoc),
-    ?assert(CurrDoc == NewDoc).
-
-
-t_filter({DbName, DocId, OldDoc, _PSeq}) ->
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
-    create_purge_checkpoint(DbName, 0),
-
-    rpc_update_doc(DbName, OldDoc),
-
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)).
-
-
-t_filter_unknown_node({DbName, DocId, OldDoc, _PSeq}) ->
-    % Unknown nodes are assumed to start at PurgeSeq = 0
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
-    create_purge_checkpoint(DbName, 0),
-
-    {Pos, [Rev | _]} = OldDoc#doc.revs,
-    RROpt = {read_repair, [{'blargh@127.0.0.1', [{Pos, Rev}]}]},
-    rpc_update_doc(DbName, OldDoc, [RROpt]),
-
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)).
-
-
-t_no_filter_old_node({DbName, DocId, OldDoc, PSeq}) ->
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
-    create_purge_checkpoint(DbName, PSeq),
-
-    % The random UUID is to generate a badarg exception when
-    % we try and convert it to an existing atom.
-    create_purge_checkpoint(DbName, 0, couch_uuids:random()),
-
-    rpc_update_doc(DbName, OldDoc),
-
-    ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-
-t_no_filter_different_node({DbName, DocId, OldDoc, PSeq}) ->
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
-    create_purge_checkpoint(DbName, PSeq),
-
-    % Create a valid purge for a different node
-    TgtNode = list_to_binary(atom_to_list('notfoo@127.0.0.1')),
-    create_purge_checkpoint(DbName, 0, TgtNode),
-
-    rpc_update_doc(DbName, OldDoc),
-
-    ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-
-t_filter_local_node({DbName, DocId, OldDoc, PSeq}) ->
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
-    create_purge_checkpoint(DbName, PSeq),
-
-    % Create a valid purge for a different node
-    TgtNode = list_to_binary(atom_to_list('notfoo@127.0.0.1')),
-    create_purge_checkpoint(DbName, 0, TgtNode),
-
-    % Add a local node rev to the list of node revs. It should
-    % be filtered out
-    {Pos, [Rev | _]} = OldDoc#doc.revs,
-    RROpts = [{read_repair, [
-        {tgt_node(), [{Pos, Rev}]},
-        {node(), [{1, <<"123">>}]}
-    ]}],
-    rpc_update_doc(DbName, OldDoc, RROpts),
-
-    ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-
-t_no_filter_after_repl({DbName, DocId, OldDoc, PSeq}) ->
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
-    create_purge_checkpoint(DbName, PSeq),
-
-    rpc_update_doc(DbName, OldDoc),
-
-    ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-
-wrap({Name, Fun}) ->
-    fun(Arg) ->
-        {timeout, 60, {atom_to_list(Name), fun() ->
-            process_flag(trap_exit, true),
-            Fun(Arg)
-        end}}
-    end.
-
-
-create_db() ->
-    DbName = ?tempdb(),
-    couch_db:create(DbName, [?ADMIN_CTX]).
-
-
-populate_db(Db) ->
-    Docs = lists:map(fun(Idx) ->
-        DocId = lists:flatten(io_lib:format("~4..0b", [Idx])),
-        #doc{
-            id = list_to_binary(DocId),
-            body = {[{<<"int">>, Idx}, {<<"vsn">>, 2}]}
-        }
-    end, lists:seq(1, 100)),
-    {ok, _} = couch_db:update_docs(Db, Docs).
-
-
-open_doc(DbName, DocId) ->
-    couch_util:with_db(DbName, fun(Db) ->
-        couch_db:open_doc(Db, DocId, [])
-    end).
-
-
-create_update(Doc, NewVsn) ->
-    #doc{
-        id = DocId,
-        revs = {Pos, [Rev | _] = Revs},
-        body = {Props}
-    } = Doc,
-    NewProps = lists:keyreplace(<<"vsn">>, 1, Props, {<<"vsn">>, NewVsn}),
-    NewRev = crypto:hash(md5, term_to_binary({DocId, Rev, {NewProps}})),
-    Doc#doc{
-        revs = {Pos + 1, [NewRev | Revs]},
-        body = {NewProps}
-    }.
-
-
-purge_doc(DbName, DocId) ->
-    {ok, Doc} = open_doc(DbName, DocId),
-    {Pos, [Rev | _]} = Doc#doc.revs,
-    PInfo = {couch_uuids:random(), DocId, [{Pos, Rev}]},
-    Resp = couch_util:with_db(DbName, fun(Db) ->
-        couch_db:purge_docs(Db, [PInfo], [])
-    end),
-    ?assertEqual({ok, [{ok, [{Pos, Rev}]}]}, Resp).
-
-
-create_purge_checkpoint(DbName, PurgeSeq) ->
-    create_purge_checkpoint(DbName, PurgeSeq, tgt_node_bin()).
-
-
-create_purge_checkpoint(DbName, PurgeSeq, TgtNode) when is_binary(TgtNode) ->
-    Resp = couch_util:with_db(DbName, fun(Db) ->
-        SrcUUID = couch_db:get_uuid(Db),
-        TgtUUID = couch_uuids:random(),
-        CPDoc = #doc{
-            id = mem3_rep:make_purge_id(SrcUUID, TgtUUID),
-            body = {[
-                {<<"target_node">>, TgtNode},
-                {<<"purge_seq">>, PurgeSeq}
-            ]}
-        },
-        couch_db:update_docs(Db, [CPDoc], [])
-    end),
-    ?assertMatch({ok, [_]}, Resp).
-
-
-rpc_update_doc(DbName, Doc) ->
-    {Pos, [Rev | _]} = Doc#doc.revs,
-    RROpt = {read_repair, [{tgt_node(), [{Pos, Rev}]}]},
-    rpc_update_doc(DbName, Doc, [RROpt]).
-
-
-rpc_update_doc(DbName, Doc, Opts) ->
-    Ref = erlang:make_ref(),
-    put(rexi_from, {self(), Ref}),
-    fabric_rpc:update_docs(DbName, [Doc], Opts),
-    Reply = test_util:wait(fun() ->
-        receive
-            {Ref, Reply} ->
-                Reply
-        after 0 ->
-            wait
-        end
-    end),
-    ?assertEqual({ok, []}, Reply).
-
-
-tgt_node() ->
-    'foo@127.0.0.1'.
-
-
-tgt_node_bin() ->
-    iolist_to_binary(atom_to_list(tgt_node())).


[couchdb] 25/34: Implement `POST /_dbs_info`

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 769699974daba5140967ca9790715f51785be455
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Jul 12 10:19:30 2019 -0500

    Implement `POST /_dbs_info`
---
 src/chttpd/src/chttpd_misc.erl | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index e5f0002..11d2c5b 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -157,7 +157,7 @@ all_dbs_callback({error, Reason}, #vacc{resp=Resp0}=Acc) ->
     {ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
     {ok, Acc#vacc{resp=Resp1}}.
 
-handle_dbs_info_req(#httpd{method='POST'}=Req) ->
+handle_dbs_info_req(#httpd{method='POST', user_ctx=UserCtx}=Req) ->
     chttpd:validate_ctype(Req, "application/json"),
     Props = chttpd:json_body_obj(Req),
     Keys = couch_mrview_util:get_view_keys(Props),
@@ -174,13 +174,14 @@ handle_dbs_info_req(#httpd{method='POST'}=Req) ->
     {ok, Resp} = chttpd:start_json_response(Req, 200),
     send_chunk(Resp, "["),
     lists:foldl(fun(DbName, AccSeparator) ->
-        case catch fabric:get_db_info(DbName) of
-            {ok, Result} ->
-                Json = ?JSON_ENCODE({[{key, DbName}, {info, {Result}}]}),
-                send_chunk(Resp, AccSeparator ++ Json);
-            _ ->
-                Json = ?JSON_ENCODE({[{key, DbName}, {error, not_found}]}),
-                send_chunk(Resp, AccSeparator ++ Json)
+        try
+            {ok, Db} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+            {ok, Info} = fabric2_db:get_db_info(Db),
+            Json = ?JSON_ENCODE({[{key, DbName}, {info, {Info}}]}),
+            send_chunk(Resp, AccSeparator ++ Json)
+        catch error:database_does_not_exist ->
+            ErrJson = ?JSON_ENCODE({[{key, DbName}, {error, not_found}]}),
+            send_chunk(Resp, AccSeparator ++ ErrJson)
         end,
         "," % AccSeparator now has a comma
     end, "", Keys),


[couchdb] 32/34: Expose the is_replicator_db and is_user_db logic

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit d5a5426ea10482b3c07128d443b7ad658260eb5c
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Tue Jul 16 14:26:11 2019 -0500

    Expose the is_replicator_db and is_user_db logic
    
    This exposes a single place where we can check for whether a given
    database or database name is a replicator or users database.
---
 src/fabric/src/fabric2_db.erl   | 37 +++++++++++++++++++++++++++----------
 src/fabric/src/fabric2_util.erl |  6 ++++--
 2 files changed, 31 insertions(+), 12 deletions(-)

diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index 3c3b7d3..c926da9 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -55,6 +55,8 @@
     is_partitioned/1,
     is_system_db/1,
     is_system_db_name/1,
+    is_replicator_db/1,
+    is_users_db/1,
 
     set_revs_limit/2,
     %% set_purge_infos_limit/2,
@@ -379,6 +381,29 @@ is_system_db_name(DbName) when is_binary(DbName) ->
     end.
 
 
+is_replicator_db(#{name := DbName}) ->
+    is_replicator_db(DbName);
+
+is_replicator_db(DbName) when is_binary(DbName) ->
+    fabric2_util:dbname_ends_with(DbName, <<"_replicator">>).
+
+
+is_users_db(#{name := DbName}) ->
+    is_users_db(DbName);
+
+is_users_db(DbName) when is_binary(DbName) ->
+    AuthenticationDb = config:get("chttpd_auth", "authentication_db"),
+    CfgUsersSuffix = config:get("couchdb", "users_db_suffix", "_users"),
+
+    IsAuthCache = if AuthenticationDb == undefined -> false; true ->
+        DbName == ?l2b(AuthenticationDb)
+    end,
+    IsCfgUsersDb = fabric2_util:dbname_ends_with(DbName, ?l2b(CfgUsersSuffix)),
+    IsGlobalUsersDb = fabric2_util:dbname_ends_with(DbName, <<"_users">>),
+
+    IsAuthCache orelse IsCfgUsersDb orelse IsGlobalUsersDb.
+
+
 set_revs_limit(#{} = Db, RevsLimit) ->
     check_is_admin(Db),
     RevsLimBin = ?uint2bin(RevsLimit),
@@ -734,16 +759,8 @@ fold_changes(Db, SinceSeq, UserFun, UserAcc, Options) ->
 
 
 maybe_add_sys_db_callbacks(Db) ->
-    IsReplicatorDb = fabric2_util:dbname_ends_with(Db, <<"_replicator">>),
-
-    AuthenticationDb = config:get("chttpd_auth", "authentication_db"),
-    IsAuthCache = if AuthenticationDb == undefined -> false; true ->
-        name(Db) == ?l2b(AuthenticationDb)
-    end,
-    CfgUsersSuffix = config:get("couchdb", "users_db_suffix", "_users"),
-    IsCfgUsersDb = fabric2_util:dbname_ends_with(Db, ?l2b(CfgUsersSuffix)),
-    IsGlobalUsersDb = fabric2_util:dbname_ends_with(Db, <<"_users">>),
-    IsUsersDb = IsAuthCache orelse IsCfgUsersDb orelse IsGlobalUsersDb,
+    IsReplicatorDb = is_replicator_db(Db),
+    IsUsersDb = is_users_db(Db),
 
     {BDU, ADR} = if
         IsReplicatorDb ->
diff --git a/src/fabric/src/fabric2_util.erl b/src/fabric/src/fabric2_util.erl
index 48bf7d1..2b8e49e 100644
--- a/src/fabric/src/fabric2_util.erl
+++ b/src/fabric/src/fabric2_util.erl
@@ -124,8 +124,10 @@ validate_json_list_of_strings(Member, Props) ->
     end.
 
 
-dbname_ends_with(#{} = Db, Suffix) when is_binary(Suffix) ->
-    DbName = fabric2_db:name(Db),
+dbname_ends_with(#{} = Db, Suffix) ->
+    dbname_ends_with(fabric2_db:name(Db), Suffix);
+
+dbname_ends_with(DbName, Suffix) when is_binary(DbName), is_binary(Suffix) ->
     Suffix == filename:basename(DbName).
 
 


[couchdb] 22/34: Fix bulk docs error reporting

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 633d894a3cdf140847d7dc5c4fb9b9fe4baf72de
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jul 11 14:25:24 2019 -0500

    Fix bulk docs error reporting
    
    The existing logic around return codes and term formats is labyrinthine.
    This is the result of much trial and error to get the new logic to
    behave exactly the same as the previous implementation.
---
 src/chttpd/src/chttpd_db.erl               |   2 +
 src/fabric/src/fabric2_db.erl              | 108 +++++++++++++++++------------
 src/fabric/test/fabric2_doc_crud_tests.erl |  20 ++----
 3 files changed, 73 insertions(+), 57 deletions(-)

diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index 90869c6..abdd825 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -1337,6 +1337,8 @@ update_doc_result_to_json(DocId, {ok, NewRev}) ->
     {[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
 update_doc_result_to_json(DocId, {accepted, NewRev}) ->
     {[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}, {accepted, true}]};
+update_doc_result_to_json(DocId, {{DocId, _}, Error}) ->
+    update_doc_result_to_json(DocId, Error);
 update_doc_result_to_json(DocId, Error) ->
     {_Code, ErrorStr, Reason} = chttpd:error_info(Error),
     {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index eb74a18..3ea30e7 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -584,46 +584,52 @@ update_docs(Db, Docs) ->
 
 update_docs(Db, Docs0, Options) ->
     Docs1 = apply_before_doc_update(Db, Docs0, Options),
-    Resps0 = case lists:member(replicated_changes, Options) of
-        false ->
-            fabric2_fdb:transactional(Db, fun(TxDb) ->
-                update_docs_interactive(TxDb, Docs1, Options)
-            end);
-        true ->
-            lists:map(fun(Doc) ->
+    try
+        validate_atomic_update(Docs0, lists:member(all_or_nothing, Options)),
+        Resps0 = case lists:member(replicated_changes, Options) of
+            false ->
                 fabric2_fdb:transactional(Db, fun(TxDb) ->
-                    update_doc_int(TxDb, Doc, Options)
-                end)
-            end, Docs1)
-    end,
-    % Convert errors
-    Resps1 = lists:map(fun(Resp) ->
-        case Resp of
-            {#doc{} = Doc, Error} ->
-                #doc{
-                    id = DocId,
-                    revs = Revs
-                } = Doc,
-                RevId = case Revs of
-                    {RevPos, [Rev | _]} -> {RevPos, Rev};
-                    {0, []} -> {0, <<>>}
-                end,
-                {{DocId, RevId}, Error};
-            Else ->
-                Else
+                    update_docs_interactive(TxDb, Docs1, Options)
+                end);
+            true ->
+                lists:map(fun(Doc) ->
+                    fabric2_fdb:transactional(Db, fun(TxDb) ->
+                        update_doc_int(TxDb, Doc, Options)
+                    end)
+                end, Docs1)
+        end,
+        % Convert errors
+        Resps1 = lists:map(fun(Resp) ->
+            case Resp of
+                {#doc{} = Doc, Error} ->
+                    #doc{
+                        id = DocId,
+                        revs = Revs
+                    } = Doc,
+                    RevId = case Revs of
+                        {RevPos, [Rev | _]} -> {RevPos, Rev};
+                        {0, []} -> {0, <<>>};
+                        Else -> Else
+                    end,
+                    {{DocId, RevId}, Error};
+                Else ->
+                    Else
+            end
+        end, Resps0),
+        case lists:member(replicated_changes, Options) of
+            true ->
+                {ok, lists:flatmap(fun(R) ->
+                    case R of
+                        {ok, []} -> [];
+                        {{_, _}, {ok, []}} -> [];
+                        Else -> [Else]
+                    end
+                end, Resps1)};
+            false ->
+                {ok, Resps1}
         end
-    end, Resps0),
-    case lists:member(replicated_changes, Options) of
-        true ->
-            {ok, [R || R <- Resps1, R /= {ok, []}]};
-        false ->
-            Status = lists:foldl(fun(Resp, Acc) ->
-                case Resp of
-                    {ok, _} -> Acc;
-                    _ -> error
-                end
-            end, ok, Resps1),
-            {Status, Resps1}
+    catch throw:{aborted, Errors} ->
+        {aborted, Errors}
     end.
 
 
@@ -1023,7 +1029,7 @@ update_docs_interactive(Db, #doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} = Doc,
 update_docs_interactive(Db, Doc, Options, Futures, SeenIds) ->
     case lists:member(Doc#doc.id, SeenIds) of
         true ->
-            {{error, conflict}, SeenIds};
+            {conflict, SeenIds};
         false ->
             Future = maps:get(doc_tag(Doc), Futures),
             case update_doc_interactive(Db, Doc, Future, Options) of
@@ -1066,12 +1072,12 @@ update_doc_interactive(Db, Doc0, Future, _Options) ->
     % Check that a revision was specified if required
     Doc0RevId = doc_to_revid(Doc0),
     if Doc0RevId /= {0, <<>>} orelse WinnerRevId == {0, <<>>} -> ok; true ->
-        ?RETURN({error, conflict})
+        ?RETURN({Doc0, conflict})
     end,
 
     % Check that we're not trying to create a deleted doc
     if Doc0RevId /= {0, <<>>} orelse not Doc0#doc.deleted -> ok; true ->
-        ?RETURN({error, conflict})
+        ?RETURN({Doc0, conflict})
     end,
 
     % Get the target revision to update
@@ -1088,7 +1094,7 @@ update_doc_interactive(Db, Doc0, Future, _Options) ->
                     % that we get not_found for a deleted revision
                     % because we only check for the non-deleted
                     % key in fdb
-                    ?RETURN({error, conflict})
+                    ?RETURN({Doc0, conflict})
             end
     end,
 
@@ -1191,7 +1197,7 @@ update_doc_replicated(Db, Doc0, _Options) ->
     if Status /= internal_node -> ok; true ->
         % We already know this revision so nothing
         % left to do.
-        ?RETURN({ok, []})
+        ?RETURN({Doc0, {ok, []}})
     end,
 
     % Its possible to have a replication with fewer than $revs_limit
@@ -1248,7 +1254,7 @@ update_doc_replicated(Db, Doc0, _Options) ->
 update_local_doc(Db, Doc0, _Options) ->
     Doc1 = case increment_local_doc_rev(Doc0) of
         {ok, Updated} -> Updated;
-        {error, _} = Error -> ?RETURN(Error)
+        {error, Error} -> ?RETURN({Doc0, Error})
     end,
 
     ok = fabric2_fdb:write_local_doc(Db, Doc1),
@@ -1367,6 +1373,20 @@ validate_ddoc(Db, DDoc) ->
     end.
 
 
+validate_atomic_update(_, false) ->
+    ok;
+validate_atomic_update(AllDocs, true) ->
+    % TODO actually perform the validation.  This requires some hackery, we need
+    % to basically extract the prep_and_validate_updates function from couch_db
+    % and only run that, without actually writing in case of a success.
+    Error = {not_implemented, <<"all_or_nothing is not supported">>},
+    PreCommitFailures = lists:map(fun(#doc{id=Id, revs = {Pos,Revs}}) ->
+        case Revs of [] -> RevId = <<>>; [RevId|_] -> ok end,
+        {{Id, {Pos, RevId}}, Error}
+    end, AllDocs),
+    throw({aborted, PreCommitFailures}).
+
+
 check_duplicate_attachments(#doc{atts = Atts}) ->
     lists:foldl(fun(Att, Names) ->
         Name = couch_att:fetch(name, Att),
diff --git a/src/fabric/test/fabric2_doc_crud_tests.erl b/src/fabric/test/fabric2_doc_crud_tests.erl
index 85b2766..c19c474 100644
--- a/src/fabric/test/fabric2_doc_crud_tests.erl
+++ b/src/fabric/test/fabric2_doc_crud_tests.erl
@@ -408,7 +408,7 @@ conflict_on_create_new_with_rev({Db, _}) ->
         revs = {1, [fabric2_util:uuid()]},
         body = {[{<<"foo">>, <<"bar">>}]}
     },
-    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc)).
+    ?assertThrow(conflict, fabric2_db:update_doc(Db, Doc)).
 
 
 conflict_on_update_with_no_rev({Db, _}) ->
@@ -421,7 +421,7 @@ conflict_on_update_with_no_rev({Db, _}) ->
         revs = {0, []},
         body = {[{<<"state">>, 2}]}
     },
-    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc2)).
+    ?assertThrow(conflict, fabric2_db:update_doc(Db, Doc2)).
 
 
 conflict_on_create_as_deleted({Db, _}) ->
@@ -430,7 +430,7 @@ conflict_on_create_as_deleted({Db, _}) ->
         deleted = true,
         body = {[{<<"foo">>, <<"bar">>}]}
     },
-    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc)).
+    ?assertThrow(conflict, fabric2_db:update_doc(Db, Doc)).
 
 
 conflict_on_recreate_as_deleted({Db, _}) ->
@@ -450,7 +450,7 @@ conflict_on_recreate_as_deleted({Db, _}) ->
         deleted = true,
         body = {[{<<"state">>, 3}]}
     },
-    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc3)).
+    ?assertThrow(conflict, fabric2_db:update_doc(Db, Doc3)).
 
 
 conflict_on_extend_deleted({Db, _}) ->
@@ -470,7 +470,7 @@ conflict_on_extend_deleted({Db, _}) ->
         deleted = false,
         body = {[{<<"state">>, 3}]}
     },
-    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc3)).
+    ?assertThrow(conflict, fabric2_db:update_doc(Db, Doc3)).
 
 
 open_doc_revs_basic({Db, _}) ->
@@ -725,18 +725,12 @@ create_local_doc_bad_rev({Db, _}) ->
         id = LDocId,
         revs = {0, [<<"not a number">>]}
     },
-    ?assertThrow(
-            {error, <<"Invalid rev format">>},
-            fabric2_db:update_doc(Db, Doc1)
-        ),
+    ?assertThrow(<<"Invalid rev format">>, fabric2_db:update_doc(Db, Doc1)),
 
     Doc2 = Doc1#doc{
         revs = bad_bad_rev_roy_brown
     },
-    ?assertThrow(
-            {error, <<"Invalid rev format">>},
-            fabric2_db:update_doc(Db, Doc2)
-        ).
+    ?assertThrow(<<"Invalid rev format">>, fabric2_db:update_doc(Db, Doc2)).
 
 
 create_local_doc_random_rev({Db, _}) ->


[couchdb] 13/34: Fix validate_doc_update when recreating a document

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit da85a5c574d6bd376bf02905bdf358979f3a60a5
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Mon Jun 10 14:35:11 2019 -0500

    Fix validate_doc_update when recreating a document
    
    This fixes the behavior when validating a document update that is
    recreating a previously deleted document. Before this fix we were
    sending a document body with `"_deleted":true` as the existing document.
    However, CouchDB behavior expects the previous document passed to VDU's
    to be `null` in this case.
---
 src/fabric/src/fabric2_db.erl | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index acd473f..48e50f1 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -1196,8 +1196,13 @@ prep_and_validate(Db, NewDoc, PrevRevInfo) ->
         _ -> false
     end,
 
+    WasDeleted = case PrevRevInfo of
+        not_found -> false;
+        #{deleted := D} -> D
+    end,
+
     PrevDoc = case HasStubs orelse (HasVDUs and not IsDDoc) of
-        true when PrevRevInfo /= not_found ->
+        true when PrevRevInfo /= not_found, not WasDeleted ->
             case fabric2_fdb:get_doc_body(Db, NewDoc#doc.id, PrevRevInfo) of
                 #doc{} = PDoc -> PDoc;
                 {not_found, _} -> nil


[couchdb] 01/34: Update build system for FoundationDB

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 609a45ddbcf9fcf087287e5299c3dc5638d7fbc2
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Jun 5 13:04:56 2019 -0500

    Update build system for FoundationDB
---
 .gitignore             | 3 +++
 Makefile               | 6 +++++-
 dev/run                | 9 ++++++++-
 rebar.config.script    | 7 ++++++-
 rel/files/eunit.config | 3 ++-
 test/elixir/run-only   | 3 +++
 6 files changed, 27 insertions(+), 4 deletions(-)

diff --git a/.gitignore b/.gitignore
index 6b9198d..3c8bf0d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,6 +9,7 @@
 .venv
 .DS_Store
 .rebar/
+.erlfdb/
 .eunit/
 log
 apache-couchdb-*/
@@ -44,6 +45,7 @@ src/couch/priv/couch_js/util.d
 src/couch/priv/icu_driver/couch_icu_driver.d
 src/mango/src/mango_cursor_text.nocompile
 src/docs/
+src/erlfdb/
 src/ets_lru/
 src/fauxton/
 src/folsom/
@@ -51,6 +53,7 @@ src/hqueue/
 src/hyper/
 src/ibrowse/
 src/ioq/
+src/hqueue/
 src/jiffy/
 src/ken/
 src/khash/
diff --git a/Makefile b/Makefile
index ed22509..8039106 100644
--- a/Makefile
+++ b/Makefile
@@ -234,7 +234,11 @@ python-black-update: .venv/bin/black
 .PHONY: elixir
 elixir: export MIX_ENV=integration
 elixir: elixir-init elixir-check-formatted elixir-credo devclean
-	@dev/run -a adm:pass --no-eval 'mix test --trace --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
+	@dev/run --erlang-config=rel/files/eunit.config -n 1 -a adm:pass --no-eval 'mix test --trace --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
+
+.PHONY: elixir-only
+elixir-only: devclean
+	@dev/run --erlang-config=rel/files/eunit.config -n 1 -a adm:pass --no-eval 'mix test --trace --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
 
 .PHONY: elixir-init
 elixir-init:
diff --git a/dev/run b/dev/run
index 60e7d5c..72f5a47 100755
--- a/dev/run
+++ b/dev/run
@@ -181,6 +181,12 @@ def setup_argparse():
         help="Optional key=val config overrides. Can be repeated",
     )
     parser.add_option(
+        "--erlang-config",
+        dest="erlang_config",
+        default="rel/files/sys.config",
+        help="Specify an alternative Erlang application configuration"
+    )
+    parser.add_option(
         "--degrade-cluster",
         dest="degrade_cluster",
         type=int,
@@ -222,6 +228,7 @@ def setup_context(opts, args):
         "haproxy": opts.haproxy,
         "haproxy_port": opts.haproxy_port,
         "config_overrides": opts.config_overrides,
+        "erlang_config": opts.erlang_config,
         "no_eval": opts.no_eval,
         "reset_logs": True,
         "procs": [],
@@ -559,7 +566,7 @@ def boot_node(ctx, node):
         "-args_file",
         os.path.join(node_etcdir, "vm.args"),
         "-config",
-        os.path.join(reldir, "files", "sys"),
+        os.path.join(ctx["rootdir"], ctx["erlang_config"]),
         "-couch_ini",
         os.path.join(node_etcdir, "default.ini"),
         os.path.join(node_etcdir, "local.ini"),
diff --git a/rebar.config.script b/rebar.config.script
index 6445057..d7c0d9a 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -117,7 +117,10 @@ DepDescs = [
 {ibrowse,          "ibrowse",          {tag, "CouchDB-4.0.1-1"}},
 {jiffy,            "jiffy",            {tag, "CouchDB-0.14.11-2"}},
 {mochiweb,         "mochiweb",         {tag, "v2.19.0"}},
-{meck,             "meck",             {tag, "0.8.8"}}
+{meck,             "meck",             {tag, "0.8.8"}},
+
+%% TMP - Until this is moved to a proper Apache repo
+{erlfdb,           "erlfdb",           {branch, "master"}}
 ],
 
 WithProper = lists:keyfind(with_proper, 1, CouchConfig) == {with_proper, true},
@@ -132,6 +135,8 @@ end,
 BaseUrl = "https://github.com/apache/",
 
 MakeDep = fun
+    ({erlfdb, _, Version}) ->
+        {erlfdb, ".*", {git, "https://github.com/cloudant-labs/couchdb-erlfdb", {branch, "master"}}};
     ({AppName, {url, Url}, Version}) ->
         {AppName, ".*", {git, Url, Version}};
     ({AppName, {url, Url}, Version, Options}) ->
diff --git a/rel/files/eunit.config b/rel/files/eunit.config
index 3c7457d..5e96fae 100644
--- a/rel/files/eunit.config
+++ b/rel/files/eunit.config
@@ -12,5 +12,6 @@
 
 [
     {kernel, [{error_logger, silent}]},
-    {sasl, [{sasl_error_logger, false}]}
+    {sasl, [{sasl_error_logger, false}]},
+    {fabric, [{eunit_run, true}]}
 ].
diff --git a/test/elixir/run-only b/test/elixir/run-only
new file mode 100755
index 0000000..7c2a4ae
--- /dev/null
+++ b/test/elixir/run-only
@@ -0,0 +1,3 @@
+#!/bin/bash -e
+cd "$(dirname "$0")"
+mix test --trace "$@"


[couchdb] 26/34: Fix formatting of all_docs_test.exs

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 79ea59e68a45cb41cb21ebcedb82f2880b40075a
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Jul 12 10:25:35 2019 -0500

    Fix formatting of all_docs_test.exs
---
 test/elixir/test/all_docs_test.exs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/test/elixir/test/all_docs_test.exs b/test/elixir/test/all_docs_test.exs
index dab153a..7e154eb 100644
--- a/test/elixir/test/all_docs_test.exs
+++ b/test/elixir/test/all_docs_test.exs
@@ -44,7 +44,7 @@ defmodule AllDocsTest do
     retry_until(fn ->
       resp = Couch.get("/#{db_name}/_all_docs", query: %{:startkey => "\"2\""}).body
       assert resp["offset"] == :null
-			assert Enum.at(resp["rows"], 0)["key"] == "2"
+      assert Enum.at(resp["rows"], 0)["key"] == "2"
     end)
 
     # Confirm that queries may assume raw collation


[couchdb] 08/34: Implement attachment compression

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit c4f11827afa179bfe4ecf1f2395054cc7f8e5edf
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jun 6 13:30:01 2019 -0500

    Implement attachment compression
    
    This still holds all attachment data in RAM which we'll have to revisit
    at some point.
---
 src/couch/src/couch_att.erl           | 109 +++++++++++++++++++++++-----------
 test/elixir/test/replication_test.exs |   7 ++-
 2 files changed, 77 insertions(+), 39 deletions(-)

diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl
index 0dc5fa5..90d3644 100644
--- a/src/couch/src/couch_att.erl
+++ b/src/couch/src/couch_att.erl
@@ -383,8 +383,8 @@ flush(Db, DocId, Att1) ->
 
     % If we were sent a gzip'ed attachment with no
     % length data, we have to set it here.
-    Att3 = case AttLen of
-        undefined -> store(att_len, DiskLen, Att2);
+    Att3 = case DiskLen of
+        undefined -> store(disk_len, AttLen, Att2);
         _ -> Att2
     end,
 
@@ -400,12 +400,13 @@ flush(Db, DocId, Att1) ->
             % Already flushed
             Att1;
         _ when is_binary(Data) ->
-            IdentMd5 = get_identity_md5(Data, fetch(encoding, Att4)),
+            DataMd5 = couch_hash:md5_hash(Data),
             if ReqMd5 == undefined -> ok; true ->
-                couch_util:check_md5(IdentMd5, ReqMd5)
+                couch_util:check_md5(DataMd5, ReqMd5)
             end,
-            Att5 = store(md5, IdentMd5, Att4),
-            fabric2_db:write_attachment(Db, DocId, Att5)
+            Att5 = store(md5, DataMd5, Att4),
+            Att6 = maybe_compress(Att5),
+            fabric2_db:write_attachment(Db, DocId, Att6)
     end.
 
 
@@ -451,7 +452,7 @@ read_data(Fun, Att) when is_function(Fun) ->
                     end,
                     Props0 = [
                         {data, iolist_to_binary(lists:reverse(Acc))},
-                        {disk_len, Len}
+                        {att_len, Len}
                     ],
                     Props1 = if InMd5 /= md5_in_footer -> Props0; true ->
                         [{md5, Md5} | Props0]
@@ -473,7 +474,7 @@ read_streamed_attachment(Att, _F, 0, Acc) ->
     Bin = iolist_to_binary(lists:reverse(Acc)),
     store([
         {data, Bin},
-        {disk_len, size(Bin)}
+        {att_len, size(Bin)}
     ], Att);
 
 read_streamed_attachment(_Att, _F, LenLeft, _Acc) when LenLeft < 0 ->
@@ -550,8 +551,23 @@ range_foldl(Att, From, To, Fun, Acc) ->
     range_foldl(Bin, From, To, Fun, Acc).
 
 
-foldl_decode(_Att, _Fun, _Acc) ->
-    erlang:error(not_supported).
+foldl_decode(Att, Fun, Acc) ->
+    [Encoding, Data] = fetch([encoding, data], Att),
+    case {Encoding, Data} of
+        {gzip, {loc, Db, DocId, AttId}} ->
+            NoTxDb = Db#{tx := undefined},
+            Bin = fabric2_db:read_attachment(NoTxDb, DocId, AttId),
+            foldl_decode(store(data, Bin, Att), Fun, Acc);
+        {gzip, _} when is_binary(Data) ->
+            Z = zlib:open(),
+            ok = zlib:inflateInit(Z, 16 + 15),
+            Inflated = iolist_to_binary(zlib:inflate(Z, Data)),
+            ok = zlib:inflateEnd(Z),
+            ok = zlib:close(Z),
+            foldl(Inflated, Att, Fun, Acc);
+        _ ->
+            foldl(Att, Fun, Acc)
+    end.
 
 
 to_binary(Att) ->
@@ -563,7 +579,8 @@ to_binary(Bin, _Att) when is_binary(Bin) ->
 to_binary(Iolist, _Att) when is_list(Iolist) ->
     iolist_to_binary(Iolist);
 to_binary({loc, Db, DocId, AttId}, _Att) ->
-    fabric2_db:read_attachmet(Db, DocId, AttId);
+    NoTxDb = Db#{tx := undefined},
+    fabric2_db:read_attachment(NoTxDb, DocId, AttId);
 to_binary(DataFun, Att) when is_function(DataFun)->
     Len = fetch(att_len, Att),
     iolist_to_binary(
@@ -585,15 +602,53 @@ fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0->
     fold_streamed_data(RcvFun, LenLeft - size(Bin), Fun, ResultAcc).
 
 
-get_identity_md5(Bin, gzip) ->
+maybe_compress(Att) ->
+    [Encoding, Type] = fetch([encoding, type], Att),
+    IsCompressible = is_compressible(Type),
+    CompLevel = config:get_integer("attachments", "compression_level", 0),
+    case Encoding of
+        identity when IsCompressible, CompLevel >= 1, CompLevel =< 9 ->
+            compress(Att, CompLevel);
+        _ ->
+            Att
+    end.
+
+
+compress(Att, Level) ->
+    Data = fetch(data, Att),
+
     Z = zlib:open(),
-    ok = zlib:inflateInit(Z, 16 + 15),
-    Inflated = zlib:inflate(Z, Bin),
-    ok = zlib:inflateEnd(Z),
+    % 15 = ?MAX_WBITS (defined in the zlib module)
+    % the 16 + ?MAX_WBITS formula was obtained by inspecting zlib:gzip/1
+    ok = zlib:deflateInit(Z, Level, deflated, 16 + 15, 8, default),
+    CompData = iolist_to_binary(zlib:deflate(Z, Data, finish)),
+    ok = zlib:deflateEnd(Z),
     ok = zlib:close(Z),
-    couch_hash:md5_hash(Inflated);
-get_identity_md5(Bin, _) ->
-    couch_hash:md5_hash(Bin).
+
+    store([
+        {att_len, size(CompData)},
+        {md5, couch_hash:md5_hash(CompData)},
+        {data, CompData},
+        {encoding, gzip}
+    ], Att).
+
+
+is_compressible(Type) when is_binary(Type) ->
+    is_compressible(binary_to_list(Type));
+is_compressible(Type) ->
+    TypeExpList = re:split(
+        config:get("attachments", "compressible_types", ""),
+        "\\s*,\\s*",
+        [{return, list}]
+    ),
+    lists:any(
+        fun(TypeExp) ->
+            Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
+                "(?:\\s*;.*?)?\\s*", $$],
+            re:run(Type, Regexp, [caseless]) =/= nomatch
+        end,
+        [T || T <- TypeExpList, T /= []]
+    ).
 
 
 max_attachment_size() ->
@@ -612,24 +667,6 @@ validate_attachment_size(_AttName, _AttSize, _MAxAttSize) ->
     ok.
 
 
-%% is_compressible(Type) when is_binary(Type) ->
-%%     is_compressible(binary_to_list(Type));
-%% is_compressible(Type) ->
-%%     TypeExpList = re:split(
-%%         config:get("attachments", "compressible_types", ""),
-%%         "\\s*,\\s*",
-%%         [{return, list}]
-%%     ),
-%%     lists:any(
-%%         fun(TypeExp) ->
-%%             Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
-%%                 "(?:\\s*;.*?)?\\s*", $$],
-%%             re:run(Type, Regexp, [caseless]) =/= nomatch
-%%         end,
-%%         [T || T <- TypeExpList, T /= []]
-%%     ).
-
-
 -ifdef(TEST).
 -include_lib("eunit/include/eunit.hrl").
 
diff --git a/test/elixir/test/replication_test.exs b/test/elixir/test/replication_test.exs
index 6d4360d..3ea525a 100644
--- a/test/elixir/test/replication_test.exs
+++ b/test/elixir/test/replication_test.exs
@@ -713,9 +713,10 @@ defmodule ReplicationTest do
 
       assert tgt_info["doc_count"] == src_info["doc_count"]
 
-      src_shards = seq_to_shards(src_info["update_seq"])
-      tgt_shards = seq_to_shards(tgt_info["update_seq"])
-      assert tgt_shards == src_shards
+      # This assertion is no longer valid
+      # src_shards = seq_to_shards(src_info["update_seq"])
+      # tgt_shards = seq_to_shards(tgt_info["update_seq"])
+      # assert tgt_shards == src_shards
     end)
   end
 


[couchdb] 21/34: Implement _all_dbs/_all_docs API parameters

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit a8e306d5dca1cb647d5dc51b73aa10d611ae291d
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Jun 19 11:58:47 2019 -0500

    Implement _all_dbs/_all_docs API parameters
    
    This adds the mapping of CouchDB start/end keys and so on to the similar
    yet slightly different concepts in FoundationDB. The handlers for
    `_all_dbs` and `_all_docs` have been udpated to use this new logic.
---
 src/chttpd/src/chttpd_changes.erl          |  10 +-
 src/chttpd/src/chttpd_db.erl               | 220 +++++++++++++++++----------
 src/chttpd/src/chttpd_misc.erl             |  67 ++++----
 src/fabric/src/fabric2_db.erl              | 143 +++++++++++++++--
 src/fabric/src/fabric2_fdb.erl             | 236 ++++++++++++++---------------
 src/fabric/test/fabric2_doc_fold_tests.erl |  84 +++++++++-
 test/elixir/test/all_docs_test.exs         |   3 +-
 7 files changed, 512 insertions(+), 251 deletions(-)

diff --git a/src/chttpd/src/chttpd_changes.erl b/src/chttpd/src/chttpd_changes.erl
index 0e03482..c9107d1 100644
--- a/src/chttpd/src/chttpd_changes.erl
+++ b/src/chttpd/src/chttpd_changes.erl
@@ -871,15 +871,19 @@ changes_row(Results, Change, Acc) ->
 maybe_get_changes_doc(Value, #changes_acc{include_docs=true}=Acc) ->
     #changes_acc{
         db = Db,
-        doc_options = DocOpts,
+        doc_options = DocOpts0,
         conflicts = Conflicts,
         filter = Filter
     } = Acc,
-    Opts = case Conflicts of
+    OpenOpts = case Conflicts of
         true -> [deleted, conflicts];
         false -> [deleted]
     end,
-    load_doc(Db, Value, Opts, DocOpts, Filter);
+    DocOpts1 = case Conflicts of
+        true -> [conflicts | DocOpts0];
+        false -> DocOpts0
+    end,
+    load_doc(Db, Value, OpenOpts, DocOpts1, Filter);
 
 maybe_get_changes_doc(_Value, _Acc) ->
     [].
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index c0ac1ca..90869c6 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -16,6 +16,7 @@
 
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("couch_mrview/include/couch_mrview.hrl").
+-include_lib("fabric/include/fabric.hrl").
 -include_lib("mem3/include/mem3.hrl").
 
 -export([handle_request/1, handle_compact_req/2, handle_design_req/2,
@@ -825,21 +826,151 @@ multi_all_docs_view(Req, Db, OP, Queries) ->
     {ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"),
     chttpd:end_delayed_json_response(Resp1).
 
-all_docs_view(Req, Db, _Keys, _OP) ->
-    % Args0 = couch_mrview_http:parse_params(Req, Keys),
-    % Args1 = Args0#mrargs{view_type=map},
-    % Args2 = fabric_util:validate_all_docs_args(Db, Args1),
-    % Args3 = set_namespace(OP, Args2),
-    Options = [{user_ctx, Req#httpd.user_ctx}],
+all_docs_view(Req, Db, Keys, OP) ->
+    Args0 = couch_mrview_http:parse_params(Req, Keys),
+    Args1 = set_namespace(OP, Args0),
     Max = chttpd:chunked_response_buffer_size(),
-    VAcc = #vacc{db=Db, req=Req, threshold=Max},
-    {ok, Resp} = fabric2_db:fold_docs(Db, fun view_cb/2, VAcc, Options),
-    {ok, Resp#vacc.resp}.
+    VAcc0 = #vacc{
+        db = Db,
+        req = Req,
+        threshold = Max
+    },
+    case Args1#mrargs.keys of
+        undefined ->
+            Options = [
+                {user_ctx, Req#httpd.user_ctx},
+                {dir, Args1#mrargs.direction},
+                {start_key, Args1#mrargs.start_key},
+                {end_key, Args1#mrargs.end_key},
+                {limit, Args1#mrargs.limit},
+                {skip, Args1#mrargs.skip},
+                {update_seq, Args1#mrargs.update_seq}
+            ],
+            Acc = {iter, Db, Args1, VAcc0},
+            {ok, {iter, _, _, Resp}} =
+                    fabric2_db:fold_docs(Db, fun view_cb/2, Acc, Options),
+            {ok, Resp#vacc.resp};
+        Keys0 when is_list(Keys0) ->
+            Keys1 = apply_args_to_keylist(Args1, Keys0),
+            %% namespace can be _set_ to `undefined`, so we
+            %% want simulate enum here
+            NS = case couch_util:get_value(namespace, Args1#mrargs.extra) of
+                <<"_all_docs">> -> <<"_all_docs">>;
+                <<"_design">> -> <<"_design">>;
+                <<"_local">> -> <<"_local">>;
+                _ -> <<"_all_docs">>
+            end,
+            TotalRows = fabric2_db:get_doc_count(Db, NS),
+            Meta = case Args1#mrargs.update_seq of
+                true ->
+                    UpdateSeq = fabric2_db:get_update_seq(Db),
+                    [{update_seq, UpdateSeq}];
+                false ->
+                    []
+            end ++ [{total, TotalRows}, {offset, null}],
+            {ok, VAcc1} = view_cb({meta, Meta}, VAcc0),
+            DocOpts = case Args1#mrargs.conflicts of
+                true -> [conflicts | Args1#mrargs.doc_options];
+                _ -> Args1#mrargs.doc_options
+            end ++ [{user_ctx, Req#httpd.user_ctx}],
+            IncludeDocs = Args1#mrargs.include_docs,
+            VAcc2 = lists:foldl(fun(DocId, Acc) ->
+                OpenOpts = [deleted | DocOpts],
+                Row0 = case fabric2_db:open_doc(Db, DocId, OpenOpts) of
+                    {not_found, missing} ->
+                        #view_row{key = DocId};
+                    {ok, #doc{deleted = true, revs = Revs}} ->
+                        {RevPos, [RevId | _]} = Revs,
+                        Value = {[
+                            {rev, couch_doc:rev_to_str({RevPos, RevId})},
+                            {deleted, true}
+                        ]},
+                        DocValue = if not IncludeDocs -> undefined; true ->
+                            null
+                        end,
+                        #view_row{
+                            key = DocId,
+                            id = DocId,
+                            value = Value,
+                            doc = DocValue
+                        };
+                    {ok, #doc{revs = Revs} = Doc0} ->
+                        {RevPos, [RevId | _]} = Revs,
+                        Value = {[
+                            {rev, couch_doc:rev_to_str({RevPos, RevId})}
+                        ]},
+                        DocValue = if not IncludeDocs -> undefined; true ->
+                            couch_doc:to_json_obj(Doc0, DocOpts)
+                        end,
+                        #view_row{
+                            key = DocId,
+                            id = DocId,
+                            value = Value,
+                            doc = DocValue
+                        }
+                end,
+                Row1 = fabric_view:transform_row(Row0),
+                {ok, NewAcc} = view_cb(Row1, Acc),
+                NewAcc
+            end, VAcc1, Keys1),
+            {ok, VAcc3} = view_cb(complete, VAcc2),
+            {ok, VAcc3#vacc.resp}
+    end.
+
+
+apply_args_to_keylist(Args, Keys0) ->
+    Keys1 = case Args#mrargs.direction of
+        fwd -> Keys0;
+        _ -> lists:reverse(Keys0)
+    end,
+    Keys2 = case Args#mrargs.skip < length(Keys1) of
+        true -> lists:nthtail(Args#mrargs.skip, Keys1);
+        false -> []
+    end,
+    case Args#mrargs.limit < length(Keys2) of
+        true -> lists:sublist(Keys2, Args#mrargs.limit);
+        false -> Keys2
+    end.
+
+
+view_cb({row, Row}, {iter, Db, Args, VAcc}) ->
+    NewRow = case lists:keymember(doc, 1, Row) of
+        true ->
+            chttpd_stats:incr_reads();
+        false when Args#mrargs.include_docs ->
+            {id, DocId} = lists:keyfind(id, 1, Row),
+            chttpd_stats:incr_reads(),
+            DocOpts = case Args#mrargs.conflicts of
+                true -> [conflicts | Args#mrargs.doc_options];
+                _ -> Args#mrargs.doc_options
+            end ++ [{user_ctx, (VAcc#vacc.req)#httpd.user_ctx}],
+            OpenOpts = [deleted | DocOpts],
+            DocMember = case fabric2_db:open_doc(Db, DocId, OpenOpts) of
+                {not_found, missing} ->
+                    [];
+                {ok, #doc{deleted = true}} ->
+                    [{doc, null}];
+                {ok, #doc{} = Doc} ->
+                    [{doc, couch_doc:to_json_obj(Doc, DocOpts)}]
+            end,
+            Row ++ DocMember;
+        _ ->
+            Row
+    end,
+    chttpd_stats:incr_rows(),
+    {Go, NewVAcc} = couch_mrview_http:view_cb({row, NewRow}, VAcc),
+    {Go, {iter, Db, Args, NewVAcc}};
+
+view_cb(Msg, {iter, Db, Args, VAcc}) ->
+    {Go, NewVAcc} = couch_mrview_http:view_cb(Msg, VAcc),
+    {Go, {iter, Db, Args, NewVAcc}};
 
 view_cb({row, Row} = Msg, Acc) ->
     case lists:keymember(doc, 1, Row) of
-        true -> chttpd_stats:incr_reads();
-        false -> ok
+        true ->
+            chttpd_stats:incr_reads();
+        false ->
+            ok
     end,
     chttpd_stats:incr_rows(),
     couch_mrview_http:view_cb(Msg, Acc);
@@ -2005,70 +2136,3 @@ bulk_get_json_error(DocId, Rev, Error, Reason) ->
                              {<<"rev">>, Rev},
                              {<<"error">>, Error},
                              {<<"reason">>, Reason}]}}]}).
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-monitor_attachments_test_() ->
-    {"ignore stubs",
-        fun () ->
-            Atts = [couch_att:new([{data, stub}])],
-            ?_assertEqual([], monitor_attachments(Atts))
-        end
-    }.
-
-parse_partitioned_opt_test_() ->
-    {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            t_should_allow_partitioned_db(),
-            t_should_throw_on_not_allowed_partitioned_db(),
-            t_returns_empty_array_for_partitioned_false(),
-            t_returns_empty_array_for_no_partitioned_qs()
-        ]
-    }.
-
-
-setup() ->
-    ok.
-
-teardown(_) ->
-    meck:unload().
-
-mock_request(Url) ->
-    Headers = mochiweb_headers:make([{"Host", "examples.com"}]),
-    MochiReq = mochiweb_request:new(nil, 'PUT', Url, {1, 1}, Headers),
-    #httpd{mochi_req = MochiReq}.
-
-t_should_allow_partitioned_db() ->
-    ?_test(begin
-        meck:expect(couch_flags, is_enabled, 2, true),
-        Req = mock_request("/all-test21?partitioned=true"),
-        [Partitioned, _] = parse_partitioned_opt(Req),
-        ?assertEqual(Partitioned, {partitioned, true})
-    end).
-
-t_should_throw_on_not_allowed_partitioned_db() ->
-    ?_test(begin
-        meck:expect(couch_flags, is_enabled, 2, false),
-        Req = mock_request("/all-test21?partitioned=true"),
-        Throw = {bad_request, <<"Partitioned feature is not enabled.">>},
-        ?assertThrow(Throw, parse_partitioned_opt(Req))
-    end).
-
-t_returns_empty_array_for_partitioned_false() ->
-    ?_test(begin
-        Req = mock_request("/all-test21?partitioned=false"),
-        ?assertEqual(parse_partitioned_opt(Req), [])
-    end).
-
-t_returns_empty_array_for_no_partitioned_qs() ->
-    ?_test(begin
-        Req = mock_request("/all-test21"),
-        ?assertEqual(parse_partitioned_opt(Req), [])
-    end).
-
--endif.
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index b244e84..e5f0002 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -108,39 +108,54 @@ maybe_add_csp_headers(Headers, _) ->
     Headers.
 
 handle_all_dbs_req(#httpd{method='GET'}=Req) ->
-    % TODO: Support args and options properly, transform
-    % this back into a fold call similar to the old
-    % version.
-    %% Args = couch_mrview_http:parse_params(Req, undefined),
+    #mrargs{
+        start_key = StartKey,
+        end_key = EndKey,
+        direction = Dir,
+        limit = Limit,
+        skip = Skip
+    } = couch_mrview_http:parse_params(Req, undefined),
+
+    Options = [
+        {start_key, StartKey},
+        {end_key, EndKey},
+        {dir, Dir},
+        {limit, Limit},
+        {skip, Skip}
+    ],
+
     % Eventually the Etag for this request will be derived
     % from the \xFFmetadataVersion key in fdb
     Etag = <<"foo">>,
-    %% Options = [{user_ctx, Req#httpd.user_ctx}],
+
     {ok, Resp} = chttpd:etag_respond(Req, Etag, fun() ->
-        AllDbs = fabric2_db:list_dbs(),
-        chttpd:send_json(Req, AllDbs)
-    end);
+        {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"ETag",Etag}]),
+        Callback = fun all_dbs_callback/2,
+        Acc = #vacc{req=Req,resp=Resp},
+        fabric2_db:list_dbs(Callback, Acc, Options)
+    end),
+    case is_record(Resp, vacc) of
+        true -> {ok, Resp#vacc.resp};
+        _ -> {ok, Resp}
+    end;
 handle_all_dbs_req(Req) ->
     send_method_not_allowed(Req, "GET,HEAD").
 
-%% all_dbs_callback({meta, _Meta}, #vacc{resp=Resp0}=Acc) ->
-%%     {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
-%%     {ok, Acc#vacc{resp=Resp1}};
-%% all_dbs_callback({row, Row}, #vacc{resp=Resp0}=Acc) ->
-%%     Prepend = couch_mrview_http:prepend_val(Acc),
-%%     case couch_util:get_value(id, Row) of <<"_design", _/binary>> ->
-%%         {ok, Acc};
-%%     DbName ->
-%%         {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
-%%         {ok, Acc#vacc{prepend=",", resp=Resp1}}
-%%     end;
-%% all_dbs_callback(complete, #vacc{resp=Resp0}=Acc) ->
-%%     {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "]"),
-%%     {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
-%%     {ok, Acc#vacc{resp=Resp2}};
-%% all_dbs_callback({error, Reason}, #vacc{resp=Resp0}=Acc) ->
-%%     {ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
-%%     {ok, Acc#vacc{resp=Resp1}}.
+all_dbs_callback({meta, _Meta}, #vacc{resp=Resp0}=Acc) ->
+    {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
+    {ok, Acc#vacc{resp=Resp1}};
+all_dbs_callback({row, Row}, #vacc{resp=Resp0}=Acc) ->
+    Prepend = couch_mrview_http:prepend_val(Acc),
+    DbName = couch_util:get_value(id, Row),
+    {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
+    {ok, Acc#vacc{prepend=",", resp=Resp1}};
+all_dbs_callback(complete, #vacc{resp=Resp0}=Acc) ->
+    {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "]"),
+    {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
+    {ok, Acc#vacc{resp=Resp2}};
+all_dbs_callback({error, Reason}, #vacc{resp=Resp0}=Acc) ->
+    {ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
+    {ok, Acc#vacc{resp=Resp1}}.
 
 handle_dbs_info_req(#httpd{method='POST'}=Req) ->
     chttpd:validate_ctype(Req, "application/json"),
diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index 80028a6..eb74a18 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -20,6 +20,7 @@
 
     list_dbs/0,
     list_dbs/1,
+    list_dbs/3,
 
     is_admin/1,
     check_is_admin/1,
@@ -194,8 +195,30 @@ list_dbs() ->
 
 
 list_dbs(Options) ->
+    Callback = fun(DbName, Acc) -> [DbName | Acc] end,
+    DbNames = fabric2_fdb:transactional(fun(Tx) ->
+        fabric2_fdb:list_dbs(Tx, Callback, [], Options)
+    end),
+    lists:reverse(DbNames).
+
+
+list_dbs(UserFun, UserAcc0, Options) ->
+    FoldFun = fun
+        (DbName, Acc) -> maybe_stop(UserFun({row, [{id, DbName}]}, Acc))
+    end,
     fabric2_fdb:transactional(fun(Tx) ->
-        fabric2_fdb:list_dbs(Tx, Options)
+        try
+            UserAcc1 = maybe_stop(UserFun({meta, []}, UserAcc0)),
+            UserAcc2 = fabric2_fdb:list_dbs(
+                    Tx,
+                    FoldFun,
+                    UserAcc1,
+                    Options
+                ),
+            {ok, maybe_stop(UserFun(complete, UserAcc2))}
+        catch throw:{stop, FinalUserAcc} ->
+            {ok, FinalUserAcc}
+        end
     end).
 
 
@@ -406,6 +429,7 @@ open_doc(#{} = Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, _Options) ->
 open_doc(#{} = Db, DocId, Options) ->
     NeedsTreeOpts = [revs_info, conflicts, deleted_conflicts],
     NeedsTree = (Options -- NeedsTreeOpts /= Options),
+    OpenDeleted = lists:member(deleted, Options),
     fabric2_fdb:transactional(Db, fun(TxDb) ->
         Revs = case NeedsTree of
             true -> fabric2_fdb:get_all_revs(TxDb, DocId);
@@ -414,6 +438,8 @@ open_doc(#{} = Db, DocId, Options) ->
         if Revs == [] -> {not_found, missing}; true ->
             #{winner := true} = RI = lists:last(Revs),
             case fabric2_fdb:get_doc_body(TxDb, DocId, RI) of
+                #doc{deleted = true} when not OpenDeleted ->
+                    {not_found, deleted};
                 #doc{} = Doc ->
                     apply_open_doc_opts(Doc, Revs, Options);
                 Else ->
@@ -451,8 +477,10 @@ open_doc_revs(Db, DocId, Revs, Options) ->
                         rev_path => RevPath
                     },
                     case fabric2_fdb:get_doc_body(TxDb, DocId, RevInfo) of
-                        #doc{} = Doc -> {ok, Doc};
-                        Else -> {Else, {Pos, Rev}}
+                        #doc{} = Doc ->
+                            apply_open_doc_opts(Doc, AllRevInfos, Options);
+                        Else ->
+                            {Else, {Pos, Rev}}
                     end
             end
         end, Found),
@@ -615,9 +643,40 @@ fold_docs(Db, UserFun, UserAcc) ->
     fold_docs(Db, UserFun, UserAcc, []).
 
 
-fold_docs(Db, UserFun, UserAcc, Options) ->
+fold_docs(Db, UserFun, UserAcc0, Options) ->
     fabric2_fdb:transactional(Db, fun(TxDb) ->
-        fabric2_fdb:fold_docs(TxDb, UserFun, UserAcc, Options)
+        try
+            #{
+                db_prefix := DbPrefix
+            } = TxDb,
+
+            Prefix = erlfdb_tuple:pack({?DB_ALL_DOCS}, DbPrefix),
+            DocCount = get_doc_count(TxDb),
+
+            Meta = case lists:keyfind(update_seq, 1, Options) of
+                {_, true} ->
+                    UpdateSeq = fabric2_db:get_update_seq(TxDb),
+                    [{update_seq, UpdateSeq}];
+                _ ->
+                    []
+            end ++ [{total, DocCount}, {offset, null}],
+
+            UserAcc1 = maybe_stop(UserFun({meta, Meta}, UserAcc0)),
+
+            UserAcc2 = fabric2_fdb:fold_range(TxDb, Prefix, fun({K, V}, Acc) ->
+                {DocId} = erlfdb_tuple:unpack(K, Prefix),
+                RevId = erlfdb_tuple:unpack(V),
+                maybe_stop(UserFun({row, [
+                    {id, DocId},
+                    {key, DocId},
+                    {value, {[{rev, couch_doc:rev_to_str(RevId)}]}}
+                ]}, Acc))
+            end, UserAcc1, Options),
+
+            {ok, maybe_stop(UserFun(complete, UserAcc2))}
+        catch throw:{stop, FinalUserAcc} ->
+            {ok, FinalUserAcc}
+        end
     end).
 
 
@@ -627,7 +686,44 @@ fold_changes(Db, SinceSeq, UserFun, UserAcc) ->
 
 fold_changes(Db, SinceSeq, UserFun, UserAcc, Options) ->
     fabric2_fdb:transactional(Db, fun(TxDb) ->
-        fabric2_fdb:fold_changes(TxDb, SinceSeq, UserFun, UserAcc, Options)
+        try
+            #{
+                db_prefix := DbPrefix
+            } = TxDb,
+
+            Prefix = erlfdb_tuple:pack({?DB_CHANGES}, DbPrefix),
+
+            Dir = case fabric2_util:get_value(dir, Options, fwd) of
+                rev -> rev;
+                _ -> fwd
+            end,
+
+            StartKey = get_since_seq(TxDb, Dir, SinceSeq),
+            EndKey = case Dir of
+                rev -> fabric2_util:seq_zero_vs();
+                _ -> fabric2_util:seq_max_vs()
+            end,
+            FoldOpts = [
+                {start_key, StartKey},
+                {end_key, EndKey}
+            ] ++ Options,
+
+            {ok, fabric2_fdb:fold_range(TxDb, Prefix, fun({K, V}, Acc) ->
+                {SeqVS} = erlfdb_tuple:unpack(K, Prefix),
+                {DocId, Deleted, RevId} = erlfdb_tuple:unpack(V),
+
+                Change = #{
+                    id => DocId,
+                    sequence => fabric2_fdb:vs_to_seq(SeqVS),
+                    rev_id => RevId,
+                    deleted => Deleted
+                },
+
+                maybe_stop(UserFun(Change, Acc))
+            end, UserAcc, FoldOpts)}
+        catch throw:{stop, FinalUserAcc} ->
+            {ok, FinalUserAcc}
+        end
     end).
 
 
@@ -796,7 +892,6 @@ apply_open_doc_opts(Doc, Revs, Options) ->
     IncludeConflicts = lists:member(conflicts, Options),
     IncludeDelConflicts = lists:member(deleted_conflicts, Options),
     IncludeLocalSeq = lists:member(local_seq, Options),
-    ReturnDeleted = lists:member(deleted, Options),
 
     % This revs_info becomes fairly useless now that we're
     % not keeping old document bodies around...
@@ -827,14 +922,7 @@ apply_open_doc_opts(Doc, Revs, Options) ->
         [{local_seq, fabric2_fdb:vs_to_seq(SeqVS)}]
     end,
 
-    case Doc#doc.deleted and not ReturnDeleted of
-        true ->
-            {not_found, deleted};
-        false ->
-            {ok, Doc#doc{
-                meta = Meta1 ++ Meta2 ++ Meta3 ++ Meta4
-            }}
-    end.
+    {ok, Doc#doc{meta = Meta1 ++ Meta2 ++ Meta3 ++ Meta4}}.
 
 
 filter_found_revs(RevInfo, Revs) ->
@@ -1289,6 +1377,26 @@ check_duplicate_attachments(#doc{atts = Atts}) ->
     end, ordsets:new(), Atts).
 
 
+get_since_seq(Db, rev, <<>>) ->
+    get_since_seq(Db, rev, now);
+
+get_since_seq(_Db, _Dir, Seq) when Seq == <<>>; Seq == <<"0">>; Seq == 0->
+    fabric2_util:seq_zero_vs();
+
+get_since_seq(Db, Dir, Seq) when Seq == now; Seq == <<"now">> ->
+    CurrSeq = fabric2_fdb:get_last_change(Db),
+    get_since_seq(Db, Dir, CurrSeq);
+
+get_since_seq(_Db, _Dir, Seq) when is_binary(Seq), size(Seq) == 24 ->
+    fabric2_fdb:next_vs(fabric2_fdb:seq_to_vs(Seq));
+
+get_since_seq(Db, Dir, List) when is_list(List) ->
+    get_since_seq(Db, Dir, list_to_binary(List));
+
+get_since_seq(_Db, _Dir, Seq) ->
+    erlang:error({invalid_since_seq, Seq}).
+
+
 get_leaf_path(Pos, Rev, [{Pos, [{Rev, _RevInfo} | LeafPath]} | _]) ->
     LeafPath;
 get_leaf_path(Pos, Rev, [_WrongLeaf | RestLeafs]) ->
@@ -1353,3 +1461,8 @@ rev(Rev) when is_list(Rev); is_binary(Rev) ->
 rev({Seq, Hash} = Rev) when is_integer(Seq), is_binary(Hash) ->
     Rev.
 
+
+maybe_stop({ok, Acc}) ->
+    Acc;
+maybe_stop({stop, Acc}) ->
+    throw({stop, Acc}).
diff --git a/src/fabric/src/fabric2_fdb.erl b/src/fabric/src/fabric2_fdb.erl
index 4b01826..670ce8b 100644
--- a/src/fabric/src/fabric2_fdb.erl
+++ b/src/fabric/src/fabric2_fdb.erl
@@ -24,7 +24,7 @@
     delete/1,
     exists/1,
 
-    list_dbs/2,
+    list_dbs/4,
 
     get_info/1,
     get_config/1,
@@ -50,11 +50,13 @@
     read_attachment/3,
     write_attachment/3,
 
-    fold_docs/4,
-    fold_changes/5,
     get_last_change/1,
 
+    fold_range/5,
+
     vs_to_seq/1,
+    seq_to_vs/1,
+    next_vs/1,
 
     debug_cluster/0,
     debug_cluster/2
@@ -254,16 +256,15 @@ exists(#{name := DbName} = Db) when is_binary(DbName) ->
     end.
 
 
-list_dbs(Tx, _Options) ->
+list_dbs(Tx, Callback, AccIn, Options) ->
     Root = erlfdb_directory:root(),
     CouchDB = erlfdb_directory:create_or_open(Tx, Root, [<<"couchdb">>]),
     LayerPrefix = erlfdb_directory:get_name(CouchDB),
-    {Start, End} = erlfdb_tuple:range({?ALL_DBS}, LayerPrefix),
-    Future = erlfdb:get_range(Tx, Start, End),
-    lists:map(fun({K, _V}) ->
-        {?ALL_DBS, DbName} = erlfdb_tuple:unpack(K, LayerPrefix),
-        DbName
-    end, erlfdb:wait(Future)).
+    Prefix = erlfdb_tuple:pack({?ALL_DBS}, LayerPrefix),
+    fold_range({tx, Tx}, Prefix, fun({K, _V}, Acc) ->
+        {DbName} = erlfdb_tuple:unpack(K, Prefix),
+        Callback(DbName, Acc)
+    end, AccIn, Options).
 
 
 get_info(#{} = Db) ->
@@ -508,24 +509,26 @@ write_doc(#{} = Db0, Doc, NewWinner0, OldWinner, ToUpdate, ToRemove) ->
     UpdateStatus = case {OldWinner, NewWinner} of
         {not_found, #{deleted := false}} ->
             created;
+        {not_found, #{deleted := true}} ->
+            deleted;
         {#{deleted := true}, #{deleted := false}} ->
             recreated;
         {#{deleted := false}, #{deleted := false}} ->
             updated;
         {#{deleted := false}, #{deleted := true}} ->
+            deleted;
+        {#{deleted := true}, #{deleted := true}} ->
             deleted
     end,
 
     case UpdateStatus of
-        Status when Status == created orelse Status == recreated ->
-            ADKey = erlfdb_tuple:pack({?DB_ALL_DOCS, DocId}, DbPrefix),
-            ADVal = erlfdb_tuple:pack(NewRevId),
-            ok = erlfdb:set(Tx, ADKey, ADVal);
         deleted ->
             ADKey = erlfdb_tuple:pack({?DB_ALL_DOCS, DocId}, DbPrefix),
             ok = erlfdb:clear(Tx, ADKey);
-        updated ->
-            ok
+        _ ->
+            ADKey = erlfdb_tuple:pack({?DB_ALL_DOCS, DocId}, DbPrefix),
+            ADVal = erlfdb_tuple:pack(NewRevId),
+            ok = erlfdb:set(Tx, ADKey, ADVal)
     end,
 
     % _changes
@@ -640,84 +643,6 @@ write_attachment(#{} = Db, DocId, Data) when is_binary(Data) ->
     {ok, AttId}.
 
 
-fold_docs(#{} = Db, UserFun, UserAcc0, Options) ->
-    #{
-        tx := Tx,
-        db_prefix := DbPrefix
-    } = ensure_current(Db),
-
-    {Reverse, Start, End} = get_dir_and_bounds(DbPrefix, Options),
-
-    DocCountKey = erlfdb_tuple:pack({?DB_STATS, <<"doc_count">>}, DbPrefix),
-    DocCountBin = erlfdb:wait(erlfdb:get(Tx, DocCountKey)),
-
-    try
-        UserAcc1 = maybe_stop(UserFun({meta, [
-            {total, ?bin2uint(DocCountBin)},
-            {offset, null}
-        ]}, UserAcc0)),
-
-        UserAcc2 = erlfdb:fold_range(Tx, Start, End, fun({K, V}, UserAccIn) ->
-            {?DB_ALL_DOCS, DocId} = erlfdb_tuple:unpack(K, DbPrefix),
-            RevId = erlfdb_tuple:unpack(V),
-            maybe_stop(UserFun({row, [
-                {id, DocId},
-                {key, DocId},
-                {value, couch_doc:rev_to_str(RevId)}
-            ]}, UserAccIn))
-        end, UserAcc1, [{reverse, Reverse}] ++ Options),
-
-        {ok, maybe_stop(UserFun(complete, UserAcc2))}
-    catch throw:{stop, FinalUserAcc} ->
-        {ok, FinalUserAcc}
-    end.
-
-
-fold_changes(#{} = Db, SinceSeq0, UserFun, UserAcc0, Options) ->
-    #{
-        tx := Tx,
-        db_prefix := DbPrefix
-    } = ensure_current(Db),
-
-    SinceSeq1 = get_since_seq(SinceSeq0),
-
-    Reverse = case fabric2_util:get_value(dir, Options, fwd) of
-        fwd -> false;
-        rev -> true
-    end,
-
-    {Start0, End0} = case Reverse of
-        false -> {SinceSeq1, fabric2_util:seq_max_vs()};
-        true -> {fabric2_util:seq_zero_vs(), SinceSeq1}
-    end,
-
-    Start1 = erlfdb_tuple:pack({?DB_CHANGES, Start0}, DbPrefix),
-    End1 = erlfdb_tuple:pack({?DB_CHANGES, End0}, DbPrefix),
-
-    {Start, End} = case Reverse of
-        false -> {erlfdb_key:first_greater_than(Start1), End1};
-        true -> {Start1, erlfdb_key:first_greater_than(End1)}
-    end,
-
-    try
-        {ok, erlfdb:fold_range(Tx, Start, End, fun({K, V}, UserAccIn) ->
-            {?DB_CHANGES, SeqVS} = erlfdb_tuple:unpack(K, DbPrefix),
-            {DocId, Deleted, RevId} = erlfdb_tuple:unpack(V),
-
-            Change = #{
-                id => DocId,
-                sequence => vs_to_seq(SeqVS),
-                rev_id => RevId,
-                deleted => Deleted
-            },
-
-            maybe_stop(UserFun(Change, UserAccIn))
-        end, UserAcc0, [{reverse, Reverse}] ++ Options)}
-    catch throw:{stop, FinalUserAcc} ->
-        {ok, FinalUserAcc}
-    end.
-
-
 get_last_change(#{} = Db) ->
     #{
         tx := Tx,
@@ -735,17 +660,57 @@ get_last_change(#{} = Db) ->
     end.
 
 
-maybe_stop({ok, Acc}) ->
-    Acc;
-maybe_stop({stop, Acc}) ->
-    throw({stop, Acc}).
+fold_range(#{} = Db, RangePrefix, Callback, Acc, Options) ->
+    #{
+        tx := Tx
+    } = ensure_current(Db),
+    fold_range({tx, Tx}, RangePrefix, Callback, Acc, Options);
+
+fold_range({tx, Tx}, RangePrefix, UserCallback, UserAcc, Options) ->
+    case fabric2_util:get_value(limit, Options) of
+        0 ->
+            % FoundationDB treats a limit of 0 as unlimited
+            % so we have to guard for that here.
+            UserAcc;
+        _ ->
+            {Start, End, Skip, FoldOpts} = get_fold_opts(RangePrefix, Options),
+            Callback = fun fold_range_cb/2,
+            Acc = {skip, Skip, UserCallback, UserAcc},
+            {skip, _, UserCallback, OutAcc} =
+                    erlfdb:fold_range(Tx, Start, End, Callback, Acc, FoldOpts),
+            OutAcc
+    end.
 
 
-vs_to_seq(VS) ->
+vs_to_seq(VS) when is_tuple(VS) ->
+    % 51 is the versionstamp type tag
     <<51:8, SeqBin:12/binary>> = erlfdb_tuple:pack({VS}),
     fabric2_util:to_hex(SeqBin).
 
 
+seq_to_vs(Seq) when is_binary(Seq) ->
+    Seq1 = fabric2_util:from_hex(Seq),
+    % 51 is the versionstamp type tag
+    Seq2 = <<51:8, Seq1/binary>>,
+    {VS} = erlfdb_tuple:unpack(Seq2),
+    VS.
+
+
+next_vs({versionstamp, VS, Batch, TxId}) ->
+    {V, B, T} = case TxId =< 65535 of
+        true ->
+            {VS, Batch, TxId + 1};
+        false ->
+            case Batch =< 65535 of
+                true ->
+                    {VS, Batch + 1, 0};
+                false ->
+                    {VS + 1, 0, 0}
+            end
+    end,
+    {versionstamp, V, B, T}.
+
+
 debug_cluster() ->
     debug_cluster(<<>>, <<16#FE, 16#FF, 16#FF>>).
 
@@ -753,7 +718,7 @@ debug_cluster() ->
 debug_cluster(Start, End) ->
     transactional(fun(Tx) ->
         lists:foreach(fun({Key, Val}) ->
-            io:format("~s => ~s~n", [
+            io:format(standard_error, "~s => ~s~n", [
                     string:pad(erlfdb_util:repr(Key), 60),
                     erlfdb_util:repr(Val)
                 ])
@@ -790,7 +755,7 @@ load_validate_doc_funs(#{} = Db) ->
         {end_key, <<"_design0">>}
     ],
 
-    {ok, Infos1} = fold_docs(Db, FoldFun, [], Options),
+    {ok, Infos1} = fabric2_db:fold_docs(Db, FoldFun, [], Options),
 
     Infos2 = lists:map(fun(Info) ->
         #{
@@ -999,11 +964,12 @@ chunkify_attachment(Data) ->
     end.
 
 
-get_dir_and_bounds(DbPrefix, Options) ->
-    Reverse = case fabric2_util:get_value(dir, Options, fwd) of
-        fwd -> false;
-        rev -> true
+get_fold_opts(RangePrefix, Options) ->
+    Reverse = case fabric2_util:get_value(dir, Options) of
+        rev -> true;
+        _ -> false
     end,
+
     StartKey0 = fabric2_util:get_value(start_key, Options),
     EndKeyGt = fabric2_util:get_value(end_key_gt, Options),
     EndKey0 = fabric2_util:get_value(end_key, Options, EndKeyGt),
@@ -1019,17 +985,17 @@ get_dir_and_bounds(DbPrefix, Options) ->
 
     % Set the maximum bounds for the start and endkey
     StartKey2 = case StartKey1 of
-        undefined -> {?DB_ALL_DOCS};
-        SK2 when is_binary(SK2) -> {?DB_ALL_DOCS, SK2}
+        undefined -> <<>>;
+        SK2 -> SK2
     end,
 
     EndKey2 = case EndKey1 of
-        undefined -> {?DB_ALL_DOCS, <<16#FF>>};
-        EK2 when is_binary(EK2) -> {?DB_ALL_DOCS, EK2}
+        undefined -> <<255>>;
+        EK2 -> EK2
     end,
 
-    StartKey3 = erlfdb_tuple:pack(StartKey2, DbPrefix),
-    EndKey3 = erlfdb_tuple:pack(EndKey2, DbPrefix),
+    StartKey3 = erlfdb_tuple:pack({StartKey2}, RangePrefix),
+    EndKey3 = erlfdb_tuple:pack({EndKey2}, RangePrefix),
 
     % FoundationDB ranges are applied as SK <= key < EK
     % By default, CouchDB is SK <= key <= EK with the
@@ -1056,26 +1022,46 @@ get_dir_and_bounds(DbPrefix, Options) ->
             EndKey3
     end,
 
-    {Reverse, StartKey4, EndKey4}.
+    Skip = case fabric2_util:get_value(skip, Options) of
+        S when is_integer(S), S >= 0 -> S;
+        _ -> 0
+    end,
 
+    Limit = case fabric2_util:get_value(limit, Options) of
+        L when is_integer(L), L >= 0 -> [{limit, L + Skip}];
+        undefined -> []
+    end,
 
-get_since_seq(Seq) when Seq == <<>>; Seq == <<"0">>; Seq == 0->
-    fabric2_util:seq_zero_vs();
+    TargetBytes = case fabric2_util:get_value(target_bytes, Options) of
+        T when is_integer(T), T >= 0 -> [{target_bytes, T}];
+        undefined -> []
+    end,
 
-get_since_seq(Seq) when Seq == now; Seq == <<"now">> ->
-    fabric2_util:seq_max_vs();
+    StreamingMode = case fabric2_util:get_value(streaming_mode, Options) of
+        undefined -> [];
+        Name when is_atom(Name) -> [{streaming_mode, Name}]
+    end,
+
+    Snapshot = case fabric2_util:get_value(snapshot, Options) of
+        undefined -> [];
+        B when is_boolean(B) -> [{snapshot, B}]
+    end,
+
+    OutOpts = [{reverse, Reverse}]
+            ++ Limit
+            ++ TargetBytes
+            ++ StreamingMode
+            ++ Snapshot,
+
+    {StartKey4, EndKey4, Skip, OutOpts}.
 
-get_since_seq(Seq) when is_binary(Seq), size(Seq) == 24 ->
-    Seq1 = fabric2_util:from_hex(Seq),
-    Seq2 = <<51:8, Seq1/binary>>,
-    {SeqVS} = erlfdb_tuple:unpack(Seq2),
-    SeqVS;
 
-get_since_seq(List) when is_list(List) ->
-    get_since_seq(list_to_binary(List));
+fold_range_cb(KV, {skip, 0, Callback, Acc}) ->
+    NewAcc = Callback(KV, Acc),
+    {skip, 0, Callback, NewAcc};
 
-get_since_seq(Seq) ->
-    erlang:error({invalid_since_seq, Seq}).
+fold_range_cb(_KV, {skip, N, Callback, Acc}) when is_integer(N), N > 0 ->
+    {skip, N - 1, Callback, Acc}.
 
 
 get_db_handle() ->
diff --git a/src/fabric/test/fabric2_doc_fold_tests.erl b/src/fabric/test/fabric2_doc_fold_tests.erl
index caa5f92..ee0180f 100644
--- a/src/fabric/test/fabric2_doc_fold_tests.erl
+++ b/src/fabric/test/fabric2_doc_fold_tests.erl
@@ -34,7 +34,10 @@ doc_fold_test_() ->
                 fun fold_docs_with_start_key/1,
                 fun fold_docs_with_end_key/1,
                 fun fold_docs_with_both_keys_the_same/1,
-                fun fold_docs_with_different_keys/1
+                fun fold_docs_with_different_keys/1,
+                fun fold_docs_with_limit/1,
+                fun fold_docs_with_skip/1,
+                fun fold_docs_with_skip_and_limit/1
             ]}
         }
     }.
@@ -50,7 +53,7 @@ setup() ->
             body = {[{<<"value">>, Val}]}
         },
         {ok, Rev} = fabric2_db:update_doc(Db, Doc, []),
-        {DocId, couch_doc:rev_to_str(Rev)}
+        {DocId, {[{rev, couch_doc:rev_to_str(Rev)}]}}
     end, lists:seq(1, ?DOC_COUNT)),
     {Db, lists:sort(DocIdRevs), Ctx}.
 
@@ -108,11 +111,58 @@ fold_docs_with_different_keys({Db, DocIdRevs, _}) ->
     end, lists:seq(1, 500)).
 
 
+fold_docs_with_limit({Db, DocIdRevs, _}) ->
+    lists:foreach(fun(Limit) ->
+        Opts1 = [{limit, Limit}],
+        {ok, {?DOC_COUNT, Rows1}} =
+                fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts1),
+        ?assertEqual(lists:sublist(DocIdRevs, Limit), lists:reverse(Rows1)),
+
+        Opts2 = [{dir, rev} | Opts1],
+        {ok, {?DOC_COUNT, Rows2}} =
+                fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts2),
+        ?assertEqual(
+                lists:sublist(lists:reverse(DocIdRevs), Limit),
+                lists:reverse(Rows2)
+            )
+    end, lists:seq(0, 51)).
+
+
+fold_docs_with_skip({Db, DocIdRevs, _}) ->
+    lists:foreach(fun(Skip) ->
+        Opts1 = [{skip, Skip}],
+        {ok, {?DOC_COUNT, Rows1}} =
+                fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts1),
+        Expect1 = case Skip > length(DocIdRevs) of
+            true -> [];
+            false -> lists:nthtail(Skip, DocIdRevs)
+        end,
+        ?assertEqual(Expect1, lists:reverse(Rows1)),
+
+        Opts2 = [{dir, rev} | Opts1],
+        {ok, {?DOC_COUNT, Rows2}} =
+                fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts2),
+        Expect2 = case Skip > length(DocIdRevs) of
+            true -> [];
+            false -> lists:nthtail(Skip, lists:reverse(DocIdRevs))
+        end,
+        ?assertEqual(Expect2, lists:reverse(Rows2))
+    end, lists:seq(0, 51)).
+
+
+fold_docs_with_skip_and_limit({Db, DocIdRevs, _}) ->
+    lists:foreach(fun(_) ->
+        check_skip_and_limit(Db, [], DocIdRevs),
+        check_skip_and_limit(Db, [{dir, rev}], lists:reverse(DocIdRevs))
+    end, lists:seq(1, 100)).
+
+
 check_all_combos(Db, StartKey, EndKey, Rows) ->
     Opts1 = make_opts(fwd, StartKey, EndKey, true),
     {ok, {?DOC_COUNT, Rows1}} =
             fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts1),
     ?assertEqual(lists:reverse(Rows), Rows1),
+    check_skip_and_limit(Db, Opts1, Rows),
 
     Opts2 = make_opts(fwd, StartKey, EndKey, false),
     {ok, {?DOC_COUNT, Rows2}} =
@@ -121,11 +171,13 @@ check_all_combos(Db, StartKey, EndKey, Rows) ->
         lists:reverse(all_but_last(Rows))
     end,
     ?assertEqual(Expect2, Rows2),
+    check_skip_and_limit(Db, Opts2, lists:reverse(Expect2)),
 
     Opts3 = make_opts(rev, StartKey, EndKey, true),
     {ok, {?DOC_COUNT, Rows3}} =
             fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts3),
     ?assertEqual(Rows, Rows3),
+    check_skip_and_limit(Db, Opts3, lists:reverse(Rows)),
 
     Opts4 = make_opts(rev, StartKey, EndKey, false),
     {ok, {?DOC_COUNT, Rows4}} =
@@ -133,8 +185,34 @@ check_all_combos(Db, StartKey, EndKey, Rows) ->
     Expect4 = if StartKey == undefined -> Rows; true ->
         tl(Rows)
     end,
-    ?assertEqual(Expect4, Rows4).
+    ?assertEqual(Expect4, Rows4),
+    check_skip_and_limit(Db, Opts4, lists:reverse(Expect4)).
+
+
+check_skip_and_limit(Db, Opts, []) ->
+    Skip = rand:uniform(?DOC_COUNT + 1) - 1,
+    Limit = rand:uniform(?DOC_COUNT + 1) - 1,
+    NewOpts = [{skip, Skip}, {limit, Limit} | Opts],
+    {ok, {?DOC_COUNT, OutRows}} =
+            fabric2_db:fold_docs(Db, fun fold_fun/2, [], NewOpts),
+    ?assertEqual([], OutRows);
+
+check_skip_and_limit(Db, Opts, Rows) ->
+    Skip = rand:uniform(length(Rows) + 1) - 1,
+    Limit = rand:uniform(?DOC_COUNT + 1 - Skip) - 1,
+
+    ExpectRows = case Skip >= length(Rows) of
+        true ->
+            [];
+        false ->
+            lists:sublist(lists:nthtail(Skip, Rows), Limit)
+    end,
 
+    SkipLimitOpts = [{skip, Skip}, {limit, Limit} | Opts],
+    {ok, {?DOC_COUNT, RevRows}} =
+            fabric2_db:fold_docs(Db, fun fold_fun/2, [], SkipLimitOpts),
+    OutRows = lists:reverse(RevRows),
+    ?assertEqual(ExpectRows, OutRows).
 
 
 make_opts(fwd, StartKey, EndKey, InclusiveEnd) ->
diff --git a/test/elixir/test/all_docs_test.exs b/test/elixir/test/all_docs_test.exs
index 9f6aeb6..dab153a 100644
--- a/test/elixir/test/all_docs_test.exs
+++ b/test/elixir/test/all_docs_test.exs
@@ -43,7 +43,8 @@ defmodule AllDocsTest do
     # Check _all_docs offset
     retry_until(fn ->
       resp = Couch.get("/#{db_name}/_all_docs", query: %{:startkey => "\"2\""}).body
-      assert resp["offset"] == 2
+      assert resp["offset"] == :null
+			assert Enum.at(resp["rows"], 0)["key"] == "2"
     end)
 
     # Confirm that queries may assume raw collation


[couchdb] 31/34: Make fabric2.hrl public

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 24c864d2692afd844d96123180cbf4e728403449
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Tue Jul 23 15:12:18 2019 -0500

    Make fabric2.hrl public
---
 src/fabric/{src => include}/fabric2.hrl | 0
 1 file changed, 0 insertions(+), 0 deletions(-)

diff --git a/src/fabric/src/fabric2.hrl b/src/fabric/include/fabric2.hrl
similarity index 100%
rename from src/fabric/src/fabric2.hrl
rename to src/fabric/include/fabric2.hrl


[couchdb] 29/34: Expose ICU ucol_getSortKey

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit d42d9b75e6a8f46edad13d47aef71d0f08175ddc
Author: Russell Branca <ch...@apache.org>
AuthorDate: Tue Jul 2 13:31:33 2019 -0700

    Expose ICU ucol_getSortKey
---
 src/couch/priv/icu_driver/couch_icu_driver.c |  74 ++++++++++++--
 src/couch/src/couch_util.erl                 |  13 ++-
 src/couch/test/eunit/couch_util_tests.erl    | 140 +++++++++++++++++++++++++++
 3 files changed, 219 insertions(+), 8 deletions(-)

diff --git a/src/couch/priv/icu_driver/couch_icu_driver.c b/src/couch/priv/icu_driver/couch_icu_driver.c
index 4d9bb98..ffccf2e 100644
--- a/src/couch/priv/icu_driver/couch_icu_driver.c
+++ b/src/couch/priv/icu_driver/couch_icu_driver.c
@@ -30,6 +30,8 @@ specific language governing permissions and limitations under the License.
 #include <string.h> /* for memcpy */
 #endif
 
+#define BUFFER_SIZE 1024
+
 
 typedef struct {
     ErlDrvPort port;
@@ -54,6 +56,8 @@ static ErlDrvData couch_drv_start(ErlDrvPort port, char *buff)
     UErrorCode status = U_ZERO_ERROR;
     couch_drv_data* pData = (couch_drv_data*)driver_alloc(sizeof(couch_drv_data));
 
+    set_port_control_flags(port, PORT_CONTROL_FLAG_BINARY);
+
     if (pData == NULL)
         return ERL_DRV_ERROR_GENERAL;
 
@@ -84,14 +88,17 @@ ErlDrvSSizeT
 return_control_result(void* pLocalResult, int localLen,
             char **ppRetBuf, ErlDrvSizeT returnLen)
 {
+    ErlDrvBinary* buf = NULL;
+
     if (*ppRetBuf == NULL || localLen > returnLen) {
-        *ppRetBuf = (char*)driver_alloc_binary(localLen);
-        if(*ppRetBuf == NULL) {
-            return -1;
-        }
+        buf = driver_alloc_binary(localLen);
+        memcpy(buf->orig_bytes, pLocalResult, localLen);
+        *ppRetBuf = (char*) buf;
+        return localLen;
+    } else {
+        memcpy(*ppRetBuf, pLocalResult, localLen);
+        return localLen;
     }
-    memcpy(*ppRetBuf, pLocalResult, localLen);
-    return localLen;
 }
 
 static ErlDrvSSizeT
@@ -147,6 +154,61 @@ couch_drv_control(ErlDrvData drv_data, unsigned int command,
 
         return return_control_result(&response, sizeof(response), rbuf, rlen);
         }
+    case 2: /* GET_SORT_KEY: */
+        {
+
+        UChar source[BUFFER_SIZE];
+        UChar* sourcePtr = source;
+        int32_t sourceLen = BUFFER_SIZE;
+
+        uint8_t sortKey[BUFFER_SIZE];
+        uint8_t* sortKeyPtr = sortKey;
+        int32_t sortKeyLen = BUFFER_SIZE;
+
+        int32_t inputLen;
+
+        UErrorCode status = U_ZERO_ERROR;
+        ErlDrvSSizeT res;
+
+        /* first 32bits are the length */
+        memcpy(&inputLen, pBuf, sizeof(inputLen));
+        pBuf += sizeof(inputLen);
+
+        u_strFromUTF8(sourcePtr, BUFFER_SIZE, &sourceLen, pBuf, inputLen, &status);
+
+        if (sourceLen >= BUFFER_SIZE) {
+            /* reset status or next u_strFromUTF8 call will auto-fail */
+            status = U_ZERO_ERROR;
+            sourcePtr = (UChar*) malloc(sourceLen * sizeof(UChar));
+            u_strFromUTF8(sourcePtr, sourceLen, NULL, pBuf, inputLen, &status);
+            if (U_FAILURE(status)) {
+                rbuf = NULL;
+                return 0;
+            }
+        } else if (U_FAILURE(status)) {
+            rbuf = NULL;
+            return 0;
+        }
+
+        sortKeyLen = ucol_getSortKey(pData->coll, sourcePtr, sourceLen, sortKeyPtr, BUFFER_SIZE);
+
+        if (sortKeyLen > BUFFER_SIZE) {
+            sortKeyPtr = (uint8_t*) malloc(sortKeyLen);
+            ucol_getSortKey(pData->coll, sourcePtr, sourceLen, sortKeyPtr, sortKeyLen);
+        }
+
+        res = return_control_result(sortKeyPtr, sortKeyLen, rbuf, rlen);
+
+        if (sourcePtr != source) {
+            free(sourcePtr);
+        }
+
+        if (sortKeyPtr != sortKey) {
+            free(sortKeyPtr);
+        }
+
+        return res;
+    }
 
     default:
         return -1;
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index 62e17ce..b3553a5 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -14,7 +14,7 @@
 
 -export([priv_dir/0, normpath/1, fold_files/5]).
 -export([should_flush/0, should_flush/1, to_existing_atom/1]).
--export([rand32/0, implode/2, collate/2, collate/3]).
+-export([rand32/0, implode/2, collate/2, collate/3, get_sort_key/1]).
 -export([abs_pathname/1,abs_pathname/2, trim/1, drop_dot_couch_ext/1]).
 -export([encodeBase64Url/1, decodeBase64Url/1]).
 -export([validate_utf8/1, to_hex/1, parse_term/1, dict_find/3]).
@@ -406,11 +406,20 @@ collate(A, B, Options) when is_binary(A), is_binary(B) ->
     SizeA = byte_size(A),
     SizeB = byte_size(B),
     Bin = <<SizeA:32/native, A/binary, SizeB:32/native, B/binary>>,
-    [Result] = erlang:port_control(drv_port(), Operation, Bin),
+    <<Result>> = erlang:port_control(drv_port(), Operation, Bin),
     % Result is 0 for lt, 1 for eq and 2 for gt. Subtract 1 to return the
     % expected typical -1, 0, 1
     Result - 1.
 
+get_sort_key(Str) when is_binary(Str) ->
+    Operation = 2, % get_sort_key
+    Size = byte_size(Str),
+    Bin = <<Size:32/native, Str/binary>>,
+    case erlang:port_control(drv_port(), Operation, Bin) of
+        <<>> -> error;
+        Res -> Res
+    end.
+
 should_flush() ->
     should_flush(?FLUSH_MAX_MEM).
 
diff --git a/src/couch/test/eunit/couch_util_tests.erl b/src/couch/test/eunit/couch_util_tests.erl
index 3e145c4..b518028 100644
--- a/src/couch/test/eunit/couch_util_tests.erl
+++ b/src/couch/test/eunit/couch_util_tests.erl
@@ -14,6 +14,12 @@
 
 -include_lib("couch/include/couch_eunit.hrl").
 
+% For generating poisson distributed string lengths
+% in the random unicode generation. This shoots
+% for lengths centered around 24 characters. To
+% change, replace this value with math:exp(-Length).
+-define(POISSON_LIMIT, 3.775134544279098e-11).
+-define(RANDOM_TEST_SIZE, 10000).
 
 setup() ->
     %% We cannot start driver from here since it becomes bounded to eunit
@@ -168,3 +174,137 @@ to_hex_test_() ->
         ?_assertEqual("", couch_util:to_hex(<<>>)),
         ?_assertEqual("010203faff", couch_util:to_hex(<<1, 2, 3, 250, 255>>))
     ].
+
+sort_key_test_() ->
+    {
+        "Sort Key tests",
+        [
+            {
+                foreach,
+                fun setup/0, fun teardown/1,
+                [
+                    fun test_get_sort_key/1,
+                    fun test_get_sort_key_jiffy_string/1,
+                    fun test_get_sort_key_fails_on_bad_input/1,
+                    fun test_get_sort_key_longer_than_buffer/1,
+                    fun test_sort_key_collation/1,
+                    fun test_sort_key_list_sort/1
+                ]
+            }
+        ]
+    }.
+
+test_get_sort_key(_) ->
+    Strs = [
+        <<"">>,
+        <<"foo">>,
+        <<"bar">>,
+        <<"Bar">>,
+        <<"baz">>,
+        <<"BAZ">>,
+        <<"quaz">>,
+        <<"1234fdsa">>,
+        <<"1234">>,
+        <<"pizza">>
+    ],
+    Pairs = [{S1, S2} || S1 <- Strs, S2 <- Strs],
+    lists:map(fun({S1, S2}) ->
+        S1K = couch_util:get_sort_key(S1),
+        S2K = couch_util:get_sort_key(S2),
+        SortRes = sort_keys(S1K, S2K),
+        Comment = list_to_binary(io_lib:format("strcmp(~p, ~p)", [S1, S2])),
+        CollRes = couch_util:collate(S1, S2),
+        {Comment, ?_assertEqual(SortRes, CollRes)}
+    end, Pairs).
+
+test_get_sort_key_jiffy_string(_) ->
+    %% jiffy:decode does not null terminate strings
+    %% so we use it here to test unterminated strings
+    {[{S1,S2}]} = jiffy:decode(<<"{\"foo\": \"bar\"}">>),
+    S1K = couch_util:get_sort_key(S1),
+    S2K = couch_util:get_sort_key(S2),
+    SortRes = sort_keys(S1K, S2K),
+    CollRes = couch_util:collate(S1, S2),
+    ?_assertEqual(SortRes, CollRes).
+
+test_get_sort_key_fails_on_bad_input(_) ->
+    %% generated with crypto:strong_rand_bytes
+    %% contains invalid character, should error
+    S = <<209,98,222,144,60,163,72,134,206,157>>,
+    Res = couch_util:get_sort_key(S),
+    ?_assertEqual(error, Res).
+
+test_get_sort_key_longer_than_buffer(_) ->
+    %% stack allocated buffer is 1024 units
+    %% test resize logic with strings > 1024 char
+    Extra = list_to_binary(["a" || _ <- lists:seq(1, 1200)]),
+    ?_assert(is_binary(Extra)).
+
+test_sort_key_collation(_) ->
+    ?_test(begin
+        lists:foreach(fun(_) ->
+            K1 = random_unicode_binary(),
+            SK1 = couch_util:get_sort_key(K1),
+
+            K2 = random_unicode_binary(),
+            SK2 = couch_util:get_sort_key(K2),
+
+            % Probably kinda silly but whatevs
+            ?assertEqual(couch_util:collate(K1, K1), sort_keys(SK1, SK1)),
+            ?assertEqual(couch_util:collate(K2, K2), sort_keys(SK2, SK2)),
+
+            ?assertEqual(couch_util:collate(K1, K2), sort_keys(SK1, SK2)),
+            ?assertEqual(couch_util:collate(K2, K1), sort_keys(SK2, SK1))
+        end, lists:seq(1, ?RANDOM_TEST_SIZE))
+    end).
+
+test_sort_key_list_sort(_) ->
+    ?_test(begin
+        RandomKeys = lists:map(fun(_) ->
+            random_unicode_binary()
+        end, lists:seq(1, ?RANDOM_TEST_SIZE)),
+
+        CollationSorted = lists:sort(fun(A, B) ->
+            couch_util:collate(A, B) =< 0
+        end, RandomKeys),
+
+        SortKeys = lists:map(fun(K) ->
+            {couch_util:get_sort_key(K), K}
+        end, RandomKeys),
+        {_, SortKeySorted} = lists:unzip(lists:sort(SortKeys)),
+
+        ?assertEqual(CollationSorted, SortKeySorted)
+    end).
+
+sort_keys(S1, S2) ->
+    case S1 < S2 of
+        true ->
+            -1;
+        false -> case S1 =:= S2 of
+            true ->
+                0;
+            false ->
+                1
+        end
+    end.
+
+random_unicode_binary() ->
+    Size = poisson_length(0, rand:uniform()),
+    Chars = [random_unicode_char() || _ <- lists:seq(1, Size)],
+    <<_/binary>> = unicode:characters_to_binary(Chars).
+
+poisson_length(N, Acc) when Acc > ?POISSON_LIMIT ->
+    poisson_length(N + 1, Acc * rand:uniform());
+poisson_length(N, _) ->
+    N.
+
+random_unicode_char() ->
+    BaseChar = rand:uniform(16#FFFD + 1) - 1,
+    case BaseChar of
+        BC when BC >= 16#D800, BC =< 16#DFFF ->
+            % This range is reserved for surrogate pair
+            % encodings.
+            random_unicode_char();
+        BC ->
+            BC
+    end.


[couchdb] 06/34: Start switching chttpd HTTP endpoints to fabric2

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 0cf5f463d0b73427656cea4465582f5102508627
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Jun 5 13:43:20 2019 -0500

    Start switching chttpd HTTP endpoints to fabric2
    
    This is not an exhaustive port of the entire chttpd API. However, this
    is enough to support basic CRUD operations far enough that replication
    works.
---
 src/chttpd/src/chttpd.erl              |  11 +-
 src/chttpd/src/chttpd_auth_request.erl |   7 +-
 src/chttpd/src/chttpd_changes.erl      | 973 +++++++++++++++++++++++++++++++++
 src/chttpd/src/chttpd_db.erl           | 328 +++++------
 src/chttpd/src/chttpd_external.erl     |  35 +-
 src/chttpd/src/chttpd_misc.erl         |  62 +--
 src/chttpd/src/chttpd_show.erl         |   5 +-
 src/couch_mrview/src/couch_mrview.erl  |  16 +-
 test/elixir/test/basics_test.exs       |   2 +-
 9 files changed, 1183 insertions(+), 256 deletions(-)

diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
index 1e1d638..4d32c03 100644
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -25,7 +25,7 @@
     error_info/1, parse_form/1, json_body/1, json_body_obj/1, body/1,
     doc_etag/1, make_etag/1, etag_respond/3, etag_match/2,
     partition/1, serve_file/3, serve_file/4,
-    server_header/0, start_chunked_response/3,send_chunk/2,
+    server_header/0, start_chunked_response/3,send_chunk/2,last_chunk/1,
     start_response_length/4, send/2, start_json_response/2,
     start_json_response/3, end_json_response/1, send_response/4,
     send_response_no_cors/4,
@@ -743,7 +743,14 @@ start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers0) ->
     {ok, Resp}.
 
 send_chunk(Resp, Data) ->
-    Resp:write_chunk(Data),
+    case iolist_size(Data) of
+        0 -> ok; % do nothing
+        _ -> Resp:write_chunk(Data)
+    end,
+    {ok, Resp}.
+
+last_chunk(Resp) ->
+    Resp:write_chunk([]),
     {ok, Resp}.
 
 send_response(Req, Code, Headers0, Body) ->
diff --git a/src/chttpd/src/chttpd_auth_request.erl b/src/chttpd/src/chttpd_auth_request.erl
index 96dbf98..7210905 100644
--- a/src/chttpd/src/chttpd_auth_request.erl
+++ b/src/chttpd/src/chttpd_auth_request.erl
@@ -103,7 +103,8 @@ server_authorization_check(#httpd{path_parts=[<<"_", _/binary>>|_]}=Req) ->
     require_admin(Req).
 
 db_authorization_check(#httpd{path_parts=[DbName|_],user_ctx=Ctx}=Req) ->
-    {_} = fabric:get_security(DbName, [{user_ctx, Ctx}]),
+    {ok, Db} = fabric2_db:open(DbName, [{user_ctx, Ctx}]),
+    fabric2_db:check_is_member(Db),
     Req.
 
 require_admin(Req) ->
@@ -111,8 +112,8 @@ require_admin(Req) ->
     Req.
 
 require_db_admin(#httpd{path_parts=[DbName|_],user_ctx=Ctx}=Req) ->
-    Sec = fabric:get_security(DbName, [{user_ctx, Ctx}]),
-
+    {ok, Db} = fabric2_db:open(DbName, [{user_ctx, Ctx}]),
+    Sec = fabric2_db:get_security(Db),
     case is_db_admin(Ctx,Sec) of
         true -> Req;
         false ->  throw({unauthorized, <<"You are not a server or db admin.">>})
diff --git a/src/chttpd/src/chttpd_changes.erl b/src/chttpd/src/chttpd_changes.erl
new file mode 100644
index 0000000..30caab2
--- /dev/null
+++ b/src/chttpd/src/chttpd_changes.erl
@@ -0,0 +1,973 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_changes).
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+
+-export([
+    handle_db_changes/3,
+    handle_changes/4,
+    get_changes_timeout/2,
+    wait_updated/3,
+    get_rest_updated/1,
+    configure_filter/4,
+    filter/3,
+    handle_db_event/3,
+    handle_view_event/3,
+    view_filter/3,
+    send_changes_doc_ids/6,
+    send_changes_design_docs/6
+]).
+
+-export([changes_enumerator/2]).
+
+%% export so we can use fully qualified call to facilitate hot-code upgrade
+-export([
+    keep_sending_changes/3
+]).
+
+-record(changes_acc, {
+    db,
+    view_name,
+    ddoc_name,
+    view,
+    seq,
+    prepend,
+    filter,
+    callback,
+    user_acc,
+    resp_type,
+    limit,
+    include_docs,
+    doc_options,
+    conflicts,
+    timeout,
+    timeout_fun,
+    aggregation_kvs,
+    aggregation_results
+}).
+
+handle_db_changes(Args, Req, Db) ->
+    handle_changes(Args, Req, Db, db).
+
+handle_changes(Args1, Req, Db, Type) ->
+    ReqPid = chttpd:header_value(Req, "XKCD", "<unknown>"),
+    #changes_args{
+        style = Style,
+        filter = FilterName,
+        feed = Feed,
+        dir = Dir,
+        since = Since
+    } = Args1,
+    couch_log:error("XKCD: STARTING CHANGES FEED ~p for ~s : ~p", [self(), ReqPid, Since]),
+    Filter = configure_filter(FilterName, Style, Req, Db),
+    Args = Args1#changes_args{filter_fun = Filter},
+    % The type of changes feed depends on the supplied filter. If the query is
+    % for an optimized view-filtered db changes, we need to use the view
+    % sequence tree.
+    {UseViewChanges, DDocName, ViewName} = case {Type, Filter} of
+        {{view, DDocName0, ViewName0}, _} ->
+            {true, DDocName0, ViewName0};
+        {_, {fast_view, _, DDoc, ViewName0}} ->
+            {true, DDoc#doc.id, ViewName0};
+        _ ->
+            {false, undefined, undefined}
+    end,
+    DbName = fabric2_db:name(Db),
+    {StartListenerFun, View} = if UseViewChanges ->
+        {ok, {_, View0, _}, _, _} = couch_mrview_util:get_view(
+                DbName, DDocName, ViewName, #mrargs{}),
+        case View0#mrview.seq_btree of
+            #btree{} ->
+                ok;
+            _ ->
+                throw({bad_request, "view changes not enabled"})
+        end,
+        SNFun = fun() ->
+            couch_event:link_listener(
+                 ?MODULE, handle_view_event, {self(), DDocName}, [{dbname, DbName}]
+            )
+        end,
+        {SNFun, View0};
+    true ->
+        SNFun = fun() ->
+            fabric2_events:link_listener(
+                    ?MODULE, handle_db_event, self(), [{dbname, DbName}]
+                )
+        end,
+        {SNFun, undefined}
+    end,
+    Start = fun() ->
+        StartSeq = case Dir of
+        rev ->
+            fabric2_fdb:get_update_seq(Db);
+        fwd ->
+            Since
+        end,
+        View2 = if UseViewChanges ->
+            {ok, {_, View1, _}, _, _} = couch_mrview_util:get_view(
+                    DbName, DDocName, ViewName, #mrargs{}),
+            View1;
+        true ->
+            undefined
+        end,
+        {Db, View2, StartSeq}
+    end,
+    % begin timer to deal with heartbeat when filter function fails
+    case Args#changes_args.heartbeat of
+    undefined ->
+        erlang:erase(last_changes_heartbeat);
+    Val when is_integer(Val); Val =:= true ->
+        put(last_changes_heartbeat, os:timestamp())
+    end,
+
+    case lists:member(Feed, ["continuous", "longpoll", "eventsource"]) of
+    true ->
+        fun(CallbackAcc) ->
+            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
+            {ok, Listener} = StartListenerFun(),
+
+            {Db, View, StartSeq} = Start(),
+            UserAcc2 = start_sending_changes(Callback, UserAcc),
+            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
+            Acc0 = build_acc(Args, Callback, UserAcc2, Db, StartSeq,
+                             <<"">>, Timeout, TimeoutFun, DDocName, ViewName,
+                             View),
+            try
+                keep_sending_changes(
+                    Args#changes_args{dir=fwd},
+                    Acc0,
+                    true)
+            after
+                fabric2_events:stop_listener(Listener),
+                get_rest_updated(ok) % clean out any remaining update messages
+            end
+        end;
+    false ->
+        fun(CallbackAcc) ->
+            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
+            UserAcc2 = start_sending_changes(Callback, UserAcc),
+            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
+            {Db, View, StartSeq} = Start(),
+            Acc0 = build_acc(Args#changes_args{feed="normal"}, Callback,
+                             UserAcc2, Db, StartSeq, <<>>, Timeout, TimeoutFun,
+                             DDocName, ViewName, View),
+            {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
+                send_changes(
+                    Acc0,
+                    Dir,
+                    true),
+            end_sending_changes(Callback, UserAcc3, LastSeq)
+        end
+    end.
+
+
+handle_db_event(_DbName, updated, Parent) ->
+    Parent ! updated,
+    {ok, Parent};
+handle_db_event(_DbName, deleted, Parent) ->
+    Parent ! deleted,
+    {ok, Parent};
+handle_db_event(_DbName, _Event, Parent) ->
+    {ok, Parent}.
+
+
+handle_view_event(_DbName, Msg, {Parent, DDocId}) ->
+    case Msg of
+        {index_commit, DDocId} ->
+            Parent ! updated;
+        {index_delete, DDocId} ->
+            Parent ! deleted;
+        _ ->
+            ok
+    end,
+    {ok, {Parent, DDocId}}.
+
+get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 2) ->
+    Pair;
+get_callback_acc(Callback) when is_function(Callback, 1) ->
+    {fun(Ev, _) -> Callback(Ev) end, ok}.
+
+
+configure_filter("_doc_ids", Style, Req, _Db) ->
+    {doc_ids, Style, get_doc_ids(Req)};
+configure_filter("_selector", Style, Req, _Db) ->
+    {selector, Style,  get_selector_and_fields(Req)};
+configure_filter("_design", Style, _Req, _Db) ->
+    {design_docs, Style};
+configure_filter("_view", Style, Req, Db) ->
+    ViewName = get_view_qs(Req),
+    if ViewName /= "" -> ok; true ->
+        throw({bad_request, "`view` filter parameter is not provided."})
+    end,
+    ViewNameParts = string:tokens(ViewName, "/"),
+    case [?l2b(couch_httpd:unquote(Part)) || Part <- ViewNameParts] of
+        [DName, VName] ->
+            {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
+            check_member_exists(DDoc, [<<"views">>, VName]),
+            FilterType = try
+                true = couch_util:get_nested_json_value(
+                        DDoc#doc.body,
+                        [<<"options">>, <<"seq_indexed">>]
+                ),
+                fast_view
+            catch _:_ ->
+                view
+            end,
+            case fabric2_db:is_clustered(Db) of
+                true ->
+                    DIR = fabric_util:doc_id_and_rev(DDoc),
+                    {fetch, FilterType, Style, DIR, VName};
+                false ->
+                    {FilterType, Style, DDoc, VName}
+            end;
+        [] ->
+            Msg = "`view` must be of the form `designname/viewname`",
+            throw({bad_request, Msg})
+    end;
+configure_filter([$_ | _], _Style, _Req, _Db) ->
+    throw({bad_request, "unknown builtin filter name"});
+configure_filter("", main_only, _Req, _Db) ->
+    {default, main_only};
+configure_filter("", all_docs, _Req, _Db) ->
+    {default, all_docs};
+configure_filter(FilterName, Style, Req, Db) ->
+    FilterNameParts = string:tokens(FilterName, "/"),
+    case [?l2b(couch_httpd:unquote(Part)) || Part <- FilterNameParts] of
+        [DName, FName] ->
+            {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
+            check_member_exists(DDoc, [<<"filters">>, FName]),
+            {custom, Style, Req, DDoc, FName};
+        [] ->
+            {default, Style};
+        _Else ->
+            Msg = "`filter` must be of the form `designname/filtername`",
+            throw({bad_request, Msg})
+    end.
+
+
+filter(Db, Change, {default, Style}) ->
+    apply_style(Db, Change, Style);
+filter(Db, Change, {doc_ids, Style, DocIds}) ->
+    case lists:member(maps:get(id, Change), DocIds) of
+        true ->
+            apply_style(Db, Change, Style);
+        false ->
+            []
+    end;
+filter(Db, Change, {selector, Style, {Selector, _Fields}}) ->
+    Docs = open_revs(Db, Change, Style),
+    Passes = [mango_selector:match(Selector, couch_doc:to_json_obj(Doc, []))
+        || Doc <- Docs],
+    filter_revs(Passes, Docs);
+filter(Db, Change, {design_docs, Style}) ->
+    case maps:get(id, Change) of
+        <<"_design", _/binary>> ->
+            apply_style(Db, Change, Style);
+        _ ->
+            []
+    end;
+filter(Db, Change, {FilterType, Style, DDoc, VName})
+        when FilterType == view; FilterType == fast_view ->
+    Docs = open_revs(Db, Change, Style),
+    {ok, Passes} = couch_query_servers:filter_view(DDoc, VName, Docs),
+    filter_revs(Passes, Docs);
+filter(Db, Change, {custom, Style, Req0, DDoc, FName}) ->
+    Req = case Req0 of
+        {json_req, _} -> Req0;
+        #httpd{} -> {json_req, chttpd_external:json_req_obj(Req0, Db)}
+    end,
+    Docs = open_revs(Db, Change, Style),
+    {ok, Passes} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
+    filter_revs(Passes, Docs);
+filter(A, B, C) ->
+    erlang:error({filter_error, A, B, C}).
+
+fast_view_filter(Db, {{Seq, _}, {ID, _, _}}, {fast_view, Style, _, _}) ->
+    case fabric2_db:get_doc_info(Db, ID) of
+        {ok, #doc_info{high_seq=Seq}=DocInfo} ->
+            Docs = open_revs(Db, DocInfo, Style),
+            Changes = lists:map(fun(#doc{revs={RevPos, [RevId | _]}}) ->
+                RevStr = couch_doc:rev_to_str({RevPos, RevId}),
+                {[{<<"rev">>, RevStr}]}
+            end, Docs),
+            {DocInfo, Changes};
+        {ok, #doc_info{high_seq=HighSeq}} when Seq > HighSeq ->
+            % If the view seq tree is out of date (or if the view seq tree
+            % was opened before the db) seqs may come by from the seq tree
+            % which correspond to the not-most-current revision of a document.
+            % The proper thing to do is to not send this old revision, but wait
+            % until we reopen the up-to-date view seq tree and continue the
+            % fold.
+            % I left the Seq > HighSeq guard in so if (for some godforsaken
+            % reason) the seq in the view is more current than the database,
+            % we'll throw an error.
+            {undefined, []};
+        {error, not_found} ->
+            {undefined, []}
+    end.
+
+
+
+view_filter(Db, KV, {default, Style}) ->
+    apply_view_style(Db, KV, Style).
+
+
+get_view_qs({json_req, {Props}}) ->
+    {Query} = couch_util:get_value(<<"query">>, Props, {[]}),
+    binary_to_list(couch_util:get_value(<<"view">>, Query, ""));
+get_view_qs(Req) ->
+    couch_httpd:qs_value(Req, "view", "").
+
+get_doc_ids({json_req, {Props}}) ->
+    check_docids(couch_util:get_value(<<"doc_ids">>, Props));
+get_doc_ids(#httpd{method='POST'}=Req) ->
+    couch_httpd:validate_ctype(Req, "application/json"),
+    {Props} = couch_httpd:json_body_obj(Req),
+    check_docids(couch_util:get_value(<<"doc_ids">>, Props));
+get_doc_ids(#httpd{method='GET'}=Req) ->
+    DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")),
+    check_docids(DocIds);
+get_doc_ids(_) ->
+    throw({bad_request, no_doc_ids_provided}).
+
+
+get_selector_and_fields({json_req, {Props}}) ->
+    Selector = check_selector(couch_util:get_value(<<"selector">>, Props)),
+    Fields = check_fields(couch_util:get_value(<<"fields">>, Props, nil)),
+    {Selector, Fields};
+get_selector_and_fields(#httpd{method='POST'}=Req) ->
+    couch_httpd:validate_ctype(Req, "application/json"),
+    get_selector_and_fields({json_req,  couch_httpd:json_body_obj(Req)});
+get_selector_and_fields(_) ->
+    throw({bad_request, "Selector must be specified in POST payload"}).
+
+
+check_docids(DocIds) when is_list(DocIds) ->
+    lists:foreach(fun
+        (DocId) when not is_binary(DocId) ->
+            Msg = "`doc_ids` filter parameter is not a list of doc ids.",
+            throw({bad_request, Msg});
+        (_) -> ok
+    end, DocIds),
+    DocIds;
+check_docids(_) ->
+    Msg = "`doc_ids` filter parameter is not a list of doc ids.",
+    throw({bad_request, Msg}).
+
+
+check_selector(Selector={_}) ->
+    try
+        mango_selector:normalize(Selector)
+    catch
+        {mango_error, Mod, Reason0} ->
+            {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
+            throw({bad_request, Reason})
+    end;
+check_selector(_Selector) ->
+    throw({bad_request, "Selector error: expected a JSON object"}).
+
+
+check_fields(nil) ->
+    nil;
+check_fields(Fields) when is_list(Fields) ->
+    try
+        {ok, Fields1} = mango_fields:new(Fields),
+        Fields1
+    catch
+        {mango_error, Mod, Reason0} ->
+            {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
+            throw({bad_request, Reason})
+    end;
+check_fields(_Fields) ->
+    throw({bad_request, "Selector error: fields must be JSON array"}).
+
+
+open_ddoc(Db, DDocId) ->
+    case ddoc_cache:open_doc(Db, DDocId) of
+        {ok, _} = Resp -> Resp;
+        Else -> throw(Else)
+    end.
+
+
+check_member_exists(#doc{body={Props}}, Path) ->
+    couch_util:get_nested_json_value({Props}, Path).
+
+
+apply_style(_Db, Change, main_only) ->
+    #{rev_id := RevId} = Change,
+    [{[{<<"rev">>, couch_doc:rev_to_str(RevId)}]}];
+apply_style(Db, Change, all_docs) ->
+    % We have to fetch all revs for this row
+    #{id := DocId} = Change,
+    {ok, Resps} = fabric2_db:open_doc_revs(Db, DocId, all, [deleted]),
+    lists:flatmap(fun(Resp) ->
+        case Resp of
+            {ok, #doc{revs = {Pos, [Rev | _]}}} ->
+                [{[{<<"rev">>, couch_doc:rev_to_str({Pos, Rev})}]}];
+            _ ->
+                []
+        end
+    end, Resps);
+apply_style(A, B, C) ->
+    erlang:error({changes_apply_style, A, B, C}).
+
+apply_view_style(_Db, {{_Seq, _Key}, {_ID, _Value, Rev}}, main_only) ->
+    [{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}];
+apply_view_style(Db, {{_Seq, _Key}, {ID, _Value, _Rev}}, all_docs) ->
+    case couch_db:get_doc_info(Db, ID) of
+        {ok, DocInfo} ->
+            apply_style(Db, DocInfo, all_docs);
+        {error, not_found} ->
+            []
+    end.
+
+
+open_revs(Db, Change, Style) ->
+    #{id := DocId} = Change,
+    Options = [deleted, conflicts],
+    try
+        case Style of
+            main_only ->
+                {ok, Doc} = fabric2_db:open_doc(Db, DocId, Options),
+                [Doc];
+            all_docs ->
+                {ok, Docs} = fabric2_db:open_doc_revs(Db, DocId, all, Options),
+                [Doc || {ok, Doc} <- Docs]
+        end
+    catch _:_ ->
+        % We didn't log this before, should we now?
+        []
+    end.
+
+
+filter_revs(Passes, Docs) ->
+    lists:flatmap(fun
+        ({true, #doc{revs={RevPos, [RevId | _]}}}) ->
+            RevStr = couch_doc:rev_to_str({RevPos, RevId}),
+            Change = {[{<<"rev">>, RevStr}]},
+            [Change];
+        (_) ->
+            []
+    end, lists:zip(Passes, Docs)).
+
+
+get_changes_timeout(Args, Callback) ->
+    #changes_args{
+        heartbeat = Heartbeat,
+        timeout = Timeout,
+        feed = ResponseType
+    } = Args,
+    DefaultTimeout = list_to_integer(
+        config:get("httpd", "changes_timeout", "60000")
+    ),
+    case Heartbeat of
+    undefined ->
+        case Timeout of
+        undefined ->
+            {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
+        infinity ->
+            {infinity, fun(UserAcc) -> {stop, UserAcc} end};
+        _ ->
+            {lists:min([DefaultTimeout, Timeout]),
+                fun(UserAcc) -> {stop, UserAcc} end}
+        end;
+    true ->
+        {DefaultTimeout,
+            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end};
+    _ ->
+        {lists:min([DefaultTimeout, Heartbeat]),
+            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end}
+    end.
+
+start_sending_changes(Callback, UserAcc) ->
+    {_, NewUserAcc} = Callback(start, UserAcc),
+    NewUserAcc.
+
+build_acc(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout, TimeoutFun, DDocName, ViewName, View) ->
+    #changes_args{
+        include_docs = IncludeDocs,
+        doc_options = DocOpts,
+        conflicts = Conflicts,
+        limit = Limit,
+        feed = ResponseType,
+        filter_fun = Filter
+    } = Args,
+    #changes_acc{
+        db = Db,
+        seq = StartSeq,
+        prepend = Prepend,
+        filter = Filter,
+        callback = Callback,
+        user_acc = UserAcc,
+        resp_type = ResponseType,
+        limit = Limit,
+        include_docs = IncludeDocs,
+        doc_options = DocOpts,
+        conflicts = Conflicts,
+        timeout = Timeout,
+        timeout_fun = TimeoutFun,
+        ddoc_name = DDocName,
+        view_name = ViewName,
+        view = View,
+        aggregation_results=[],
+        aggregation_kvs=[]
+    }.
+
+send_changes(Acc, Dir, FirstRound) ->
+    #changes_acc{
+        db = Db,
+        seq = StartSeq,
+        filter = Filter,
+        view = View
+    } = Acc,
+    DbEnumFun = fun changes_enumerator/2,
+    case can_optimize(FirstRound, Filter) of
+        {true, Fun} ->
+            Fun(Db, StartSeq, Dir, DbEnumFun, Acc, Filter);
+        _ ->
+            case {View, Filter}  of
+                {#mrview{}, {fast_view, _, _, _}} ->
+                    couch_mrview:view_changes_since(View, StartSeq, DbEnumFun, [{dir, Dir}], Acc);
+                {undefined, _} ->
+                    Opts = [{dir, Dir}],
+                    fabric2_db:fold_changes(Db, StartSeq, DbEnumFun, Acc, Opts);
+                {#mrview{}, _} ->
+                    ViewEnumFun = fun view_changes_enumerator/2,
+                    {Go, Acc0} = couch_mrview:view_changes_since(View, StartSeq, ViewEnumFun, [{dir, Dir}], Acc),
+                    case Acc0 of
+                        #changes_acc{aggregation_results=[]} ->
+                            {Go, Acc0};
+                        _ ->
+                            #changes_acc{
+                                aggregation_results = AggResults,
+                                aggregation_kvs = AggKVs,
+                                user_acc = UserAcc,
+                                callback = Callback,
+                                resp_type = ResponseType,
+                                prepend = Prepend
+                            } = Acc0,
+                            ChangesRow = view_changes_row(AggResults, AggKVs, Acc0),
+                            UserAcc0 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
+                            reset_heartbeat(),
+                            {Go, Acc0#changes_acc{user_acc=UserAcc0}}
+                    end
+            end
+    end.
+
+
+can_optimize(true, {doc_ids, _Style, DocIds}) ->
+    MaxDocIds = config:get_integer("couchdb",
+        "changes_doc_ids_optimization_threshold", 100),
+    if length(DocIds) =< MaxDocIds ->
+        {true, fun send_changes_doc_ids/6};
+    true ->
+        false
+    end;
+can_optimize(true, {design_docs, _Style}) ->
+    {true, fun send_changes_design_docs/6};
+can_optimize(_, _) ->
+    false.
+
+
+send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
+    Results = fabric2_db:get_full_doc_infos(Db, DocIds),
+    FullInfos = lists:foldl(fun
+        (#full_doc_info{}=FDI, Acc) -> [FDI | Acc];
+        (not_found, Acc) -> Acc
+    end, [], Results),
+    send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
+
+
+send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
+    FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
+    Opts = [
+        include_deleted,
+        {start_key, <<"_design/">>},
+        {end_key_gt, <<"_design0">>}
+    ],
+    {ok, FullInfos} = couch_db:fold_docs(Db, FoldFun, [], Opts),
+    send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
+
+
+send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
+    FoldFun = case Dir of
+        fwd -> fun lists:foldl/3;
+        rev -> fun lists:foldr/3
+    end,
+    GreaterFun = case Dir of
+        fwd -> fun(A, B) -> A > B end;
+        rev -> fun(A, B) -> A =< B end
+    end,
+    DocInfos = lists:foldl(fun(FDI, Acc) ->
+        DI = couch_doc:to_doc_info(FDI),
+        case GreaterFun(DI#doc_info.high_seq, StartSeq) of
+            true -> [DI | Acc];
+            false -> Acc
+        end
+    end, [], FullDocInfos),
+    SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
+    FinalAcc = try
+        FoldFun(fun(DocInfo, Acc) ->
+            % Kinda gross that we're munging this back to a map
+            % that will then have to re-read and rebuild the FDI
+            % for all_docs style. But c'est la vie.
+            #doc_info{
+                id = DocId,
+                high_seq = Seq,
+                revs = [#rev_info{rev = Rev, deleted = Deleted} | _]
+            } = DocInfo,
+            Change = #{
+                id => DocId,
+                sequence => Seq,
+                rev_id => Rev,
+                deleted => Deleted
+            },
+            case Fun(Change, Acc) of
+                {ok, NewAcc} ->
+                    NewAcc;
+                {stop, NewAcc} ->
+                    throw({stop, NewAcc})
+            end
+        end, Acc0, SortedDocInfos)
+    catch
+        {stop, Acc} -> Acc
+    end,
+    case Dir of
+        fwd ->
+            FinalAcc0 = case element(1, FinalAcc) of
+                changes_acc -> % we came here via couch_http or internal call
+                    FinalAcc#changes_acc{seq = fabric2_db:get_update_seq(Db)};
+                fabric_changes_acc -> % we came here via chttpd / fabric / rexi
+                    FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)}
+            end,
+            {ok, FinalAcc0};
+        rev -> {ok, FinalAcc}
+    end.
+
+
+keep_sending_changes(Args, Acc0, FirstRound) ->
+    #changes_args{
+        feed = ResponseType,
+        limit = Limit,
+        db_open_options = DbOptions
+    } = Args,
+
+    {ok, ChangesAcc} = send_changes(Acc0, fwd, FirstRound),
+
+    #changes_acc{
+        db = Db, callback = Callback,
+        timeout = Timeout, timeout_fun = TimeoutFun, seq = EndSeq,
+        prepend = Prepend2, user_acc = UserAcc2, limit = NewLimit,
+        ddoc_name = DDocName, view_name = ViewName
+    } = ChangesAcc,
+
+    if Limit > NewLimit, ResponseType == "longpoll" ->
+        end_sending_changes(Callback, UserAcc2, EndSeq);
+    true ->
+        {Go, UserAcc3} = notify_waiting_for_updates(Callback, UserAcc2),
+        if Go /= ok -> end_sending_changes(Callback, UserAcc3, EndSeq); true ->
+            case wait_updated(Timeout, TimeoutFun, UserAcc3) of
+            {updated, UserAcc4} ->
+                UserCtx = fabric2_db:get_user_ctx(Db),
+                DbOptions1 = [{user_ctx, UserCtx} | DbOptions],
+                case fabric2_db:open(fabric2_db:name(Db), DbOptions1) of
+                {ok, Db2} ->
+                    ?MODULE:keep_sending_changes(
+                      Args#changes_args{limit=NewLimit},
+                      ChangesAcc#changes_acc{
+                        db = Db2,
+                        view = maybe_refresh_view(Db2, DDocName, ViewName),
+                        user_acc = UserAcc4,
+                        seq = EndSeq,
+                        prepend = Prepend2,
+                        timeout = Timeout,
+                        timeout_fun = TimeoutFun},
+                      false);
+                _Else ->
+                    end_sending_changes(Callback, UserAcc3, EndSeq)
+                end;
+            {stop, UserAcc4} ->
+                end_sending_changes(Callback, UserAcc4, EndSeq)
+            end
+        end
+    end.
+
+maybe_refresh_view(_, undefined, undefined) ->
+    undefined;
+maybe_refresh_view(Db, DDocName, ViewName) ->
+    DbName = couch_db:name(Db),
+    {ok, {_, View, _}, _, _} = couch_mrview_util:get_view(DbName, DDocName, ViewName, #mrargs{}),
+    View.
+
+notify_waiting_for_updates(Callback, UserAcc) ->
+    Callback(waiting_for_updates, UserAcc).
+
+end_sending_changes(Callback, UserAcc, EndSeq) ->
+    Callback({stop, EndSeq, null}, UserAcc).
+
+view_changes_enumerator(Value, Acc) ->
+    #changes_acc{
+        filter = Filter, callback = Callback, prepend = Prepend,
+        user_acc = UserAcc, limit = Limit, resp_type = ResponseType, db = Db,
+        timeout = Timeout, timeout_fun = TimeoutFun, seq = CurrentSeq,
+        aggregation_kvs=AggKVs, aggregation_results=AggResults
+    } = Acc,
+
+    Results0 = view_filter(Db, Value, Filter),
+    Results = [Result || Result <- Results0, Result /= null],
+    {{Seq, _}, _} = Value,
+
+    Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
+
+    if CurrentSeq =:= Seq ->
+        NewAggKVs = case Results of
+            [] -> AggKVs;
+            _ -> [Value|AggKVs]
+        end,
+        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
+        Acc0 = Acc#changes_acc{
+            seq = Seq,
+            user_acc = UserAcc2,
+            aggregation_kvs=NewAggKVs
+        },
+        case Done of
+            stop -> {stop, Acc0};
+            ok -> {Go, Acc0}
+        end;
+    AggResults =/= [] ->
+        {NewAggKVs, NewAggResults} = case Results of
+            [] -> {[], []};
+            _ -> {[Value], Results}
+        end,
+        if ResponseType =:= "continuous" orelse ResponseType =:= "eventsource" ->
+            ChangesRow = view_changes_row(AggResults, AggKVs, Acc),
+            UserAcc2 = Callback({change, ChangesRow, <<>>}, ResponseType, UserAcc),
+            reset_heartbeat(),
+            {Go, Acc#changes_acc{
+                seq = Seq, user_acc = UserAcc2, limit = Limit - 1,
+                aggregation_kvs=NewAggKVs, aggregation_results=NewAggResults}};
+        true ->
+            ChangesRow = view_changes_row(AggResults, AggKVs, Acc),
+            UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
+            reset_heartbeat(),
+            {Go, Acc#changes_acc{
+                seq = Seq, prepend = <<",\n">>, user_acc = UserAcc2,
+                limit = Limit - 1, aggregation_kvs=[Value],
+                aggregation_results=Results}}
+        end;
+    true ->
+        {NewAggKVs, NewAggResults} = case Results of
+            [] -> {[], []};
+            _ -> {[Value], Results}
+        end,
+        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
+        Acc0 = Acc#changes_acc{
+            seq = Seq,
+            user_acc = UserAcc2,
+            aggregation_kvs=NewAggKVs,
+            aggregation_results=NewAggResults
+        },
+        case Done of
+            stop -> {stop, Acc0};
+            ok -> {Go, Acc0}
+        end
+    end.
+
+changes_enumerator(Change0, Acc) ->
+    #changes_acc{
+        filter = Filter,
+        callback = Callback,
+        user_acc = UserAcc,
+        limit = Limit,
+        db = Db,
+        timeout = Timeout,
+        timeout_fun = TimeoutFun
+    } = Acc,
+    {Change1, Results0} = case Filter of
+        {fast_view, _, _, _} ->
+            fast_view_filter(Db, Change0, Filter);
+        _ ->
+            {Change0, filter(Db, Change0, Filter)}
+    end,
+    Results = [Result || Result <- Results0, Result /= null],
+    Seq = maps:get(sequence, Change1),
+    Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
+    case Results of
+    [] ->
+        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
+        case Done of
+        stop ->
+            {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
+        ok ->
+            {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
+        end;
+    _ ->
+        ChangesRow = changes_row(Results, Change1, Acc),
+        {UserGo, UserAcc2} = Callback({change, ChangesRow}, UserAcc),
+        RealGo = case UserGo of
+            ok -> Go;
+            stop -> stop
+        end,
+        reset_heartbeat(),
+        couch_log:error("XKCD: CHANGE SEQ: ~p", [Seq]),
+        {RealGo, Acc#changes_acc{
+            seq = Seq,
+            user_acc = UserAcc2,
+            limit = Limit - 1
+        }}
+    end.
+
+
+
+view_changes_row(Results, KVs, Acc) ->
+    {Add, Remove} = lists:foldl(fun(Row, {AddAcc, RemAcc}) ->
+        {{_Seq, Key}, {_Id, Value, _Rev}} = Row,
+        case Value of
+            removed ->
+                {AddAcc, [Key|RemAcc]};
+            {dups, DupValues} ->
+                AddAcc1 = lists:foldl(fun(DupValue, AddAcc0) ->
+                    [[Key, DupValue]|AddAcc0]
+                end, AddAcc, DupValues),
+                {AddAcc1, RemAcc};
+            _ ->
+                {[[Key, Value]|AddAcc], RemAcc}
+        end
+    end, {[], []}, KVs),
+
+    % Seq, Id, and Rev should be the same for all KVs, since we're aggregating
+    % by seq.
+    [{{Seq, _Key}, {Id, _Value, Rev}}|_] = KVs,
+
+    {[
+        {<<"seq">>, Seq}, {<<"id">>, Id}, {<<"add">>, Add},
+        {<<"remove">>, Remove}, {<<"changes">>, Results}
+    ] ++ maybe_get_changes_doc({Id, Rev}, Acc)}.
+
+
+changes_row(Results, Change, Acc) ->
+    #{
+        id := Id,
+        sequence := Seq,
+        deleted := Del
+    } = Change,
+    {[
+        {<<"seq">>, Seq},
+        {<<"id">>, Id},
+        {<<"changes">>, Results}
+    ] ++ deleted_item(Del) ++ maybe_get_changes_doc(Change, Acc)}.
+
+maybe_get_changes_doc(Value, #changes_acc{include_docs=true}=Acc) ->
+    #changes_acc{
+        db = Db,
+        doc_options = DocOpts,
+        conflicts = Conflicts,
+        filter = Filter
+    } = Acc,
+    Opts = case Conflicts of
+        true -> [deleted, conflicts];
+        false -> [deleted]
+    end,
+    load_doc(Db, Value, Opts, DocOpts, Filter);
+
+maybe_get_changes_doc(_Value, _Acc) ->
+    [].
+
+
+load_doc(Db, Value, Opts, DocOpts, Filter) ->
+    case load_doc(Db, Value, Opts) of
+        null ->
+            [{doc, null}];
+        Doc ->
+            [{doc, doc_to_json(Doc, DocOpts, Filter)}]
+    end.
+
+
+load_doc(Db, Change, Opts) ->
+    #{
+        id := Id,
+        rev_id := RevId
+    } = Change,
+    case fabric2_db:open_doc_revs(Db, Id, [RevId], Opts) of
+        {ok, [{ok, Doc}]} ->
+            Doc;
+        _ ->
+            null
+    end.
+
+
+doc_to_json(Doc, DocOpts, {selector, _Style, {_Selector, Fields}})
+    when Fields =/= nil ->
+    mango_fields:extract(couch_doc:to_json_obj(Doc, DocOpts), Fields);
+doc_to_json(Doc, DocOpts, _Filter) ->
+    couch_doc:to_json_obj(Doc, DocOpts).
+
+
+deleted_item(true) -> [{<<"deleted">>, true}];
+deleted_item(_) -> [].
+
+% waits for a updated msg, if there are multiple msgs, collects them.
+wait_updated(Timeout, TimeoutFun, UserAcc) ->
+    couch_log:error("XKCD: WAITING FOR UPDATE", []),
+    receive
+    updated ->
+        couch_log:error("XKCD: GOT UPDATED", []),
+        get_rest_updated(UserAcc);
+    deleted ->
+        couch_log:error("XKCD: DB DELETED", []),
+        {stop, UserAcc}
+    after Timeout ->
+        {Go, UserAcc2} = TimeoutFun(UserAcc),
+        case Go of
+        ok ->
+            couch_log:error("XKCD: WAIT UPDATED TIMEOUT, RETRY", []),
+            ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2);
+        stop ->
+            couch_log:error("XKCD: WAIT UPDATED TIMEOUT STOP", []),
+            {stop, UserAcc2}
+        end
+    end.
+
+get_rest_updated(UserAcc) ->
+    receive
+    updated ->
+        get_rest_updated(UserAcc)
+    after 0 ->
+        {updated, UserAcc}
+    end.
+
+reset_heartbeat() ->
+    case get(last_changes_heartbeat) of
+    undefined ->
+        ok;
+    _ ->
+        put(last_changes_heartbeat, os:timestamp())
+    end.
+
+maybe_heartbeat(Timeout, TimeoutFun, Acc) ->
+    Before = get(last_changes_heartbeat),
+    case Before of
+    undefined ->
+        {ok, Acc};
+    _ ->
+        Now = os:timestamp(),
+        case timer:now_diff(Now, Before) div 1000 >= Timeout of
+        true ->
+            Acc2 = TimeoutFun(Acc),
+            put(last_changes_heartbeat, Now),
+            Acc2;
+        false ->
+            {ok, Acc}
+        end
+    end.
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index c6404b0..40c1a1e 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -93,18 +93,13 @@ handle_changes_req(#httpd{path_parts=[_,<<"_changes">>]}=Req, _Db) ->
 handle_changes_req1(#httpd{}=Req, Db) ->
     #changes_args{filter=Raw, style=Style} = Args0 = parse_changes_query(Req),
     ChangesArgs = Args0#changes_args{
-        filter_fun = couch_changes:configure_filter(Raw, Style, Req, Db),
-        db_open_options = [{user_ctx, couch_db:get_user_ctx(Db)}]
+        db_open_options = [{user_ctx, fabric2_db:get_user_ctx(Db)}]
     },
+    ChangesFun = chttpd_changes:handle_db_changes(ChangesArgs, Req, Db),
     Max = chttpd:chunked_response_buffer_size(),
     case ChangesArgs#changes_args.feed of
     "normal" ->
-        T0 = os:timestamp(),
-        {ok, Info} = fabric:get_db_info(Db),
-        Suffix = mem3:shard_suffix(Db),
-        Etag = chttpd:make_etag({Info, Suffix}),
-        DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
-        couch_stats:update_histogram([couchdb, dbinfo], DeltaT),
+        Etag = <<"foo">>,
         chttpd:etag_respond(Req, Etag, fun() ->
             Acc0 = #cacc{
                 feed = normal,
@@ -112,7 +107,7 @@ handle_changes_req1(#httpd{}=Req, Db) ->
                 mochi = Req,
                 threshold = Max
             },
-            fabric:changes(Db, fun changes_callback/2, Acc0, ChangesArgs)
+            ChangesFun({fun changes_callback/2, Acc0})
         end);
     Feed when Feed =:= "continuous"; Feed =:= "longpoll"; Feed =:= "eventsource"  ->
         couch_stats:increment_counter([couchdb, httpd, clients_requesting_changes]),
@@ -122,7 +117,7 @@ handle_changes_req1(#httpd{}=Req, Db) ->
             threshold = Max
         },
         try
-            fabric:changes(Db, fun changes_callback/2, Acc0, ChangesArgs)
+            ChangesFun({fun changes_callback/2, Acc0})
         after
             couch_stats:decrement_counter([couchdb, httpd, clients_requesting_changes])
         end;
@@ -337,7 +332,7 @@ update_partition_stats(PathParts) ->
 handle_design_req(#httpd{
         path_parts=[_DbName, _Design, Name, <<"_",_/binary>> = Action | _Rest]
     }=Req, Db) ->
-    DbName = mem3:dbname(couch_db:name(Db)),
+    DbName = fabric2_db:name(Db),
     case ddoc_cache:open(DbName, <<"_design/", Name/binary>>) of
     {ok, DDoc} ->
         Handler = chttpd_handlers:design_handler(Action, fun bad_action_req/3),
@@ -365,56 +360,33 @@ handle_design_info_req(Req, _Db, _DDoc) ->
 
 create_db_req(#httpd{}=Req, DbName) ->
     couch_httpd:verify_is_server_admin(Req),
-    N = chttpd:qs_value(Req, "n", config:get("cluster", "n", "3")),
-    Q = chttpd:qs_value(Req, "q", config:get("cluster", "q", "8")),
-    P = chttpd:qs_value(Req, "placement", config:get("cluster", "placement")),
-    EngineOpt = parse_engine_opt(Req),
-    DbProps = parse_partitioned_opt(Req),
-    Options = [
-        {n, N},
-        {q, Q},
-        {placement, P},
-        {props, DbProps}
-    ] ++ EngineOpt,
     DocUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
-    case fabric:create_db(DbName, Options) of
-    ok ->
-        send_json(Req, 201, [{"Location", DocUrl}], {[{ok, true}]});
-    accepted ->
-        send_json(Req, 202, [{"Location", DocUrl}], {[{ok, true}]});
-    {error, file_exists} ->
-        chttpd:send_error(Req, file_exists);
-    Error ->
-        throw(Error)
+    case fabric2_db:create(DbName, []) of
+        {ok, _} ->
+            send_json(Req, 201, [{"Location", DocUrl}], {[{ok, true}]});
+        {error, file_exists} ->
+            chttpd:send_error(Req, file_exists);
+        Error ->
+            throw(Error)
     end.
 
 delete_db_req(#httpd{}=Req, DbName) ->
     couch_httpd:verify_is_server_admin(Req),
-    case fabric:delete_db(DbName, []) of
-    ok ->
-        send_json(Req, 200, {[{ok, true}]});
-    accepted ->
-        send_json(Req, 202, {[{ok, true}]});
-    Error ->
-        throw(Error)
+    case fabric2_db:delete(DbName, []) of
+        ok ->
+            send_json(Req, 200, {[{ok, true}]});
+        Error ->
+            throw(Error)
     end.
 
 do_db_req(#httpd{path_parts=[DbName|_], user_ctx=Ctx}=Req, Fun) ->
-    Shard = hd(mem3:shards(DbName)),
-    Props = couch_util:get_value(props, Shard#shard.opts, []),
-    Opts = case Ctx of
-        undefined ->
-            [{props, Props}];
-        #user_ctx{} ->
-            [{user_ctx, Ctx}, {props, Props}]
-    end,
-    {ok, Db} = couch_db:clustered_db(DbName, Opts),
+    {ok, Db} = fabric2_db:open(DbName, [{user_ctx, Ctx}]),
     Fun(Req, Db).
 
-db_req(#httpd{method='GET',path_parts=[DbName]}=Req, _Db) ->
+db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) ->
     % measure the time required to generate the etag, see if it's worth it
     T0 = os:timestamp(),
-    {ok, DbInfo} = fabric:get_db_info(DbName),
+    {ok, DbInfo} = fabric2_db:get_db_info(Db),
     DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
     couch_stats:update_histogram([couchdb, dbinfo], DeltaT),
     send_json(Req, {DbInfo});
@@ -422,22 +394,22 @@ db_req(#httpd{method='GET',path_parts=[DbName]}=Req, _Db) ->
 db_req(#httpd{method='POST', path_parts=[DbName], user_ctx=Ctx}=Req, Db) ->
     chttpd:validate_ctype(Req, "application/json"),
 
-    W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    Options = [{user_ctx,Ctx}, {w,W}],
+    Options = [{user_ctx,Ctx}],
 
-    Doc = couch_db:doc_from_json_obj_validate(Db, chttpd:json_body(Req)),
-    Doc2 = case Doc#doc.id of
+    Doc0 = chttpd:json_body(Req),
+    Doc1 = couch_doc:from_json_obj_validate(Doc0, fabric2_db:name(Db)),
+    Doc2 = case Doc1#doc.id of
         <<"">> ->
-            Doc#doc{id=couch_uuids:new(), revs={0, []}};
+            Doc1#doc{id=couch_uuids:new(), revs={0, []}};
         _ ->
-            Doc
+            Doc1
     end,
     DocId = Doc2#doc.id,
     case chttpd:qs_value(Req, "batch") of
     "ok" ->
         % async_batching
         spawn(fun() ->
-                case catch(fabric:update_doc(Db, Doc2, Options)) of
+                case catch(fabric2_db:update_doc(Db, Doc2, Options)) of
                 {ok, _} ->
                     chttpd_stats:incr_writes(),
                     ok;
@@ -457,7 +429,7 @@ db_req(#httpd{method='POST', path_parts=[DbName], user_ctx=Ctx}=Req, Db) ->
         % normal
         DocUrl = absolute_uri(Req, [$/, couch_util:url_encode(DbName),
             $/, couch_util:url_encode(DocId)]),
-        case fabric:update_doc(Db, Doc2, Options) of
+        case fabric2_db:update_doc(Db, Doc2, Options) of
         {ok, NewRev} ->
             chttpd_stats:incr_writes(),
             HttpCode = 201;
@@ -475,13 +447,10 @@ db_req(#httpd{method='POST', path_parts=[DbName], user_ctx=Ctx}=Req, Db) ->
 db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
     send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
 
-db_req(#httpd{method='POST', path_parts=[DbName, <<"_ensure_full_commit">>],
-        user_ctx=Ctx}=Req, _Db) ->
+db_req(#httpd{method='POST', path_parts=[_DbName, <<"_ensure_full_commit">>],
+        user_ctx=Ctx}=Req, Db) ->
     chttpd:validate_ctype(Req, "application/json"),
-    %% use fabric call to trigger a database_does_not_exist exception
-    %% for missing databases that'd return error 404 from chttpd
-    %% get_security used to prefer shards on the same node over other nodes
-    fabric:get_security(DbName, [{user_ctx, Ctx}]),
+    #{db_prefix := <<_/binary>>} = Db,
     send_json(Req, 201, {[
         {ok, true},
         {instance_start_time, <<"0">>}
@@ -503,22 +472,17 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req,
         DocsArray0
     end,
     couch_stats:update_histogram([couchdb, httpd, bulk_docs], length(DocsArray)),
-    W = case couch_util:get_value(<<"w">>, JsonProps) of
-    Value when is_integer(Value) ->
-        integer_to_list(Value);
-    _ ->
-        chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db)))
-    end,
     case chttpd:header_value(Req, "X-Couch-Full-Commit") of
     "true" ->
-        Options = [full_commit, {user_ctx,Ctx}, {w,W}];
+        Options = [full_commit, {user_ctx,Ctx}];
     "false" ->
-        Options = [delay_commit, {user_ctx,Ctx}, {w,W}];
+        Options = [delay_commit, {user_ctx,Ctx}];
     _ ->
-        Options = [{user_ctx,Ctx}, {w,W}]
+        Options = [{user_ctx,Ctx}]
     end,
+    DbName = fabric2_db:name(Db),
     Docs = lists:map(fun(JsonObj) ->
-        Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj),
+        Doc = couch_doc:from_json_obj_validate(JsonObj, DbName),
         validate_attachment_names(Doc),
         case Doc#doc.id of
             <<>> -> Doc#doc{id = couch_uuids:new()};
@@ -532,7 +496,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req,
         true  -> [all_or_nothing|Options];
         _ -> Options
         end,
-        case fabric:update_docs(Db, Docs, Options2) of
+        case fabric2_db:update_docs(Db, Docs, Options2) of
         {ok, Results} ->
             % output the results
             chttpd_stats:incr_writes(length(Results)),
@@ -551,7 +515,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req,
             send_json(Req, 417, ErrorsJson)
         end;
     false ->
-        case fabric:update_docs(Db, Docs, [replicated_changes|Options]) of
+        case fabric2_db:update_docs(Db, Docs, [replicated_changes|Options]) of
         {ok, Errors} ->
             chttpd_stats:incr_writes(length(Docs)),
             ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
@@ -647,8 +611,7 @@ db_req(#httpd{path_parts=[_, <<"_bulk_get">>]}=Req, _Db) ->
 db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
     couch_stats:increment_counter([couchdb, httpd, purge_requests]),
     chttpd:validate_ctype(Req, "application/json"),
-    W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    Options = [{user_ctx, Req#httpd.user_ctx}, {w, W}],
+    Options = [{user_ctx, Req#httpd.user_ctx}],
     {IdsRevs} = chttpd:json_body_obj(Req),
     IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
     MaxIds = config:get_integer("purge", "max_document_id_number", 100),
@@ -723,7 +686,7 @@ db_req(#httpd{path_parts=[_,OP]}=Req, _Db) when ?IS_ALL_DOCS(OP) ->
 db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
     chttpd:validate_ctype(Req, "application/json"),
     {JsonDocIdRevs} = chttpd:json_body_obj(Req),
-    case fabric:get_missing_revs(Db, JsonDocIdRevs) of
+    case fabric2_db:get_missing_revs(Db, JsonDocIdRevs) of
         {error, Reason} ->
             chttpd:send_error(Req, Reason);
         {ok, Results} ->
@@ -740,7 +703,7 @@ db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
 db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
     chttpd:validate_ctype(Req, "application/json"),
     {JsonDocIdRevs} = chttpd:json_body_obj(Req),
-    case fabric:get_missing_revs(Db, JsonDocIdRevs) of
+    case fabric2_db:get_missing_revs(Db, JsonDocIdRevs) of
         {error, Reason} ->
             chttpd:send_error(Req, Reason);
         {ok, Results} ->
@@ -856,22 +819,22 @@ multi_all_docs_view(Req, Db, OP, Queries) ->
         200, [], FirstChunk),
     VAcc1 = VAcc0#vacc{resp=Resp0},
     VAcc2 = lists:foldl(fun(Args, Acc0) ->
-        {ok, Acc1} = fabric:all_docs(Db, Options,
+        {ok, Acc1} = fabric2_db:fold_docs(Db, Options,
             fun view_cb/2, Acc0, Args),
         Acc1
     end, VAcc1, ArgQueries),
     {ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"),
     chttpd:end_delayed_json_response(Resp1).
 
-all_docs_view(Req, Db, Keys, OP) ->
-    Args0 = couch_mrview_http:parse_params(Req, Keys),
-    Args1 = Args0#mrargs{view_type=map},
-    Args2 = fabric_util:validate_all_docs_args(Db, Args1),
-    Args3 = set_namespace(OP, Args2),
+all_docs_view(Req, Db, _Keys, _OP) ->
+    % Args0 = couch_mrview_http:parse_params(Req, Keys),
+    % Args1 = Args0#mrargs{view_type=map},
+    % Args2 = fabric_util:validate_all_docs_args(Db, Args1),
+    % Args3 = set_namespace(OP, Args2),
     Options = [{user_ctx, Req#httpd.user_ctx}],
     Max = chttpd:chunked_response_buffer_size(),
     VAcc = #vacc{db=Db, req=Req, threshold=Max},
-    {ok, Resp} = fabric:all_docs(Db, Options, fun view_cb/2, VAcc, Args3),
+    {ok, Resp} = fabric2_db:fold_docs(Db, fun view_cb/2, VAcc, Options),
     {ok, Resp#vacc.resp}.
 
 view_cb({row, Row} = Msg, Acc) ->
@@ -915,7 +878,7 @@ db_doc_req(#httpd{method='GET', mochi_req=MochiReq}=Req, Db, DocId) ->
         Doc = couch_doc_open(Db, DocId, Rev, Options2),
         send_doc(Req, Doc, Options2);
     _ ->
-        case fabric:open_revs(Db, DocId, Revs, Options) of
+        case fabric2_db:open_doc_revs(Db, DocId, Revs, Options) of
             {ok, []} when Revs == all ->
                 chttpd:send_error(Req, {not_found, missing});
             {ok, Results} ->
@@ -956,8 +919,7 @@ db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
     couch_db:validate_docid(Db, DocId),
     chttpd:validate_ctype(Req, "multipart/form-data"),
 
-    W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    Options = [{user_ctx,Ctx}, {w,W}],
+    Options = [{user_ctx,Ctx}],
 
     Form = couch_httpd:parse_form(Req),
     case proplists:is_defined("_doc", Form) of
@@ -966,7 +928,7 @@ db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
         Doc = couch_doc_from_req(Req, Db, DocId, Json);
     false ->
         Rev = couch_doc:parse_rev(list_to_binary(couch_util:get_value("_rev", Form))),
-        Doc = case fabric:open_revs(Db, DocId, [Rev], []) of
+        Doc = case fabric2_db:open_doc_revs(Db, DocId, [Rev], []) of
             {ok, [{ok, Doc0}]} ->
                 chttpd_stats:incr_reads(),
                 Doc0;
@@ -995,7 +957,7 @@ db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
     NewDoc = Doc#doc{
         atts = UpdatedAtts ++ OldAtts2
     },
-    case fabric:update_doc(Db, NewDoc, Options) of
+    case fabric2_db:update_doc(Db, NewDoc, Options) of
     {ok, NewRev} ->
         chttpd_stats:incr_writes(),
         HttpCode = 201;
@@ -1013,11 +975,10 @@ db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
     #doc_query_args{
         update_type = UpdateType
     } = parse_doc_query(Req),
-    DbName = couch_db:name(Db),
-    couch_db:validate_docid(Db, DocId),
+    DbName = fabric2_db:name(Db),
+    couch_doc:validate_docid(DocId),
 
-    W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    Options = [{user_ctx,Ctx}, {w,W}],
+    Options = [{user_ctx, Ctx}],
 
     Loc = absolute_uri(Req, [$/, couch_util:url_encode(DbName),
         $/, couch_util:url_encode(DocId)]),
@@ -1025,7 +986,7 @@ db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
     case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
     ("multipart/related;" ++ _) = ContentType ->
         couch_httpd:check_max_request_length(Req),
-        couch_httpd_multipart:num_mp_writers(mem3:n(mem3:dbname(DbName), DocId)),
+        couch_httpd_multipart:num_mp_writers(1),
         {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(ContentType,
                 fun() -> receive_request_data(Req) end),
         Doc = couch_doc_from_req(Req, Db, DocId, Doc0),
@@ -1045,7 +1006,7 @@ db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
             Doc = couch_doc_from_req(Req, Db, DocId, chttpd:json_body(Req)),
 
             spawn(fun() ->
-                    case catch(fabric:update_doc(Db, Doc, Options)) of
+                    case catch(fabric2_db:update_doc(Db, Doc, Options)) of
                     {ok, _} ->
                         chttpd_stats:incr_writes(),
                         ok;
@@ -1079,7 +1040,7 @@ db_doc_req(#httpd{method='COPY', user_ctx=Ctx}=Req, Db, SourceDocId) ->
     % open old doc
     Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
     % save new doc
-    case fabric:update_doc(Db,
+    case fabric2_db:update_doc(Db,
         Doc#doc{id=TargetDocId, revs=TargetRevs}, [{user_ctx,Ctx}]) of
     {ok, NewTargetRev} ->
         chttpd_stats:incr_writes(),
@@ -1180,7 +1141,7 @@ send_docs_multipart(Req, Results, Options1) ->
     CType = {"Content-Type",
         "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
     {ok, Resp} = start_chunked_response(Req, 200, [CType]),
-    couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
+    chttpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
     lists:foreach(
         fun({ok, #doc{atts=Atts}=Doc}) ->
             Refs = monitor_attachments(Doc#doc.atts),
@@ -1188,25 +1149,25 @@ send_docs_multipart(Req, Results, Options1) ->
             JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
             {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
                     InnerBoundary, JsonBytes, Atts, true),
-            couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ",
+            chttpd:send_chunk(Resp, <<"\r\nContent-Type: ",
                     ContentType/binary, "\r\n\r\n">>),
             couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
-                    fun(Data) -> couch_httpd:send_chunk(Resp, Data)
+                    fun(Data) -> chttpd:send_chunk(Resp, Data)
                     end, true),
-             couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>)
+             chttpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>)
             after
                 demonitor_refs(Refs)
             end;
         ({{not_found, missing}, RevId}) ->
              RevStr = couch_doc:rev_to_str(RevId),
              Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
-             couch_httpd:send_chunk(Resp,
+             chttpd:send_chunk(Resp,
                 [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
                 Json,
                 <<"\r\n--", OuterBoundary/binary>>])
          end, Results),
-    couch_httpd:send_chunk(Resp, <<"--">>),
-    couch_httpd:last_chunk(Resp).
+    chttpd:send_chunk(Resp, <<"--">>),
+    chttpd:last_chunk(Resp).
 
 bulk_get_multipart_headers({0, []}, Id, Boundary) ->
     [
@@ -1276,15 +1237,14 @@ send_updated_doc(Req, Db, DocId, Doc, Headers) ->
 
 send_updated_doc(#httpd{user_ctx=Ctx} = Req, Db, DocId, #doc{deleted=Deleted}=Doc,
         Headers, UpdateType) ->
-    W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
     Options =
         case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
         "true" ->
-            [full_commit, UpdateType, {user_ctx,Ctx}, {w,W}];
+            [full_commit, UpdateType, {user_ctx,Ctx}];
         "false" ->
-            [delay_commit, UpdateType, {user_ctx,Ctx}, {w,W}];
+            [delay_commit, UpdateType, {user_ctx,Ctx}];
         _ ->
-            [UpdateType, {user_ctx,Ctx}, {w,W}]
+            [UpdateType, {user_ctx,Ctx}]
         end,
     {Status, {etag, Etag}, Body} = update_doc(Db, DocId,
         #doc{deleted=Deleted}=Doc, Options),
@@ -1303,31 +1263,7 @@ http_code_from_status(Status) ->
     end.
 
 update_doc(Db, DocId, #doc{deleted=Deleted, body=DocBody}=Doc, Options) ->
-    {_, Ref} = spawn_monitor(fun() ->
-        try fabric:update_doc(Db, Doc, Options) of
-            Resp ->
-                exit({exit_ok, Resp})
-        catch
-            throw:Reason ->
-                exit({exit_throw, Reason});
-            error:Reason ->
-                exit({exit_error, Reason});
-            exit:Reason ->
-                exit({exit_exit, Reason})
-        end
-    end),
-    Result = receive
-        {'DOWN', Ref, _, _, {exit_ok, Ret}} ->
-            Ret;
-        {'DOWN', Ref, _, _, {exit_throw, Reason}} ->
-            throw(Reason);
-        {'DOWN', Ref, _, _, {exit_error, Reason}} ->
-            erlang:error(Reason);
-        {'DOWN', Ref, _, _, {exit_exit, Reason}} ->
-            erlang:exit(Reason)
-    end,
-
-    case Result of
+    case fabric2_db:update_doc(Db, Doc, Options) of
     {ok, NewRev} ->
         Accepted = false;
     {accepted, NewRev} ->
@@ -1374,7 +1310,7 @@ couch_doc_from_req(Req, _Db, DocId, #doc{revs=Revs} = Doc) ->
     end,
     Doc#doc{id=DocId, revs=Revs2};
 couch_doc_from_req(Req, Db, DocId, Json) ->
-    Doc = couch_db:doc_from_json_obj_validate(Db, Json),
+    Doc = couch_doc:from_json_obj_validate(Json, fabric2_db:name(Db)),
     couch_doc_from_req(Req, Db, DocId, Doc).
 
 
@@ -1382,11 +1318,10 @@ couch_doc_from_req(Req, Db, DocId, Json) ->
 % couch_doc_open(Db, DocId) ->
 %   couch_doc_open(Db, DocId, nil, []).
 
-couch_doc_open(Db, DocId, Rev, Options0) ->
-    Options = [{user_ctx, couch_db:get_user_ctx(Db)} | Options0],
+couch_doc_open(Db, DocId, Rev, Options) ->
     case Rev of
     nil -> % open most recent rev
-        case fabric:open_doc(Db, DocId, Options) of
+        case fabric2_db:open_doc(Db, DocId, Options) of
         {ok, Doc} ->
             chttpd_stats:incr_reads(),
             Doc;
@@ -1394,7 +1329,7 @@ couch_doc_open(Db, DocId, Rev, Options0) ->
              throw(Error)
          end;
     _ -> % open a specific rev (deletions come back as stubs)
-        case fabric:open_revs(Db, DocId, [Rev], Options) of
+        case fabric2_db:open_doc_revs(Db, DocId, [Rev], Options) of
         {ok, [{ok, Doc}]} ->
             chttpd_stats:incr_reads(),
             Doc;
@@ -1515,8 +1450,12 @@ db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNa
     end;
 
 
-db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNameParts)
+db_attachment_req(#httpd{method=Method}=Req, Db, DocId, FileNameParts)
         when (Method == 'PUT') or (Method == 'DELETE') ->
+    #httpd{
+        user_ctx = Ctx,
+        mochi_req = MochiReq
+    } = Req,
     FileName = validate_attachment_name(
                     mochiweb_util:join(
                         lists:map(fun binary_to_list/1,
@@ -1526,16 +1465,45 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa
         'DELETE' ->
             [];
         _ ->
-            MimeType = case couch_httpd:header_value(Req,"Content-Type") of
+            MimeType = case chttpd:header_value(Req,"Content-Type") of
                 % We could throw an error here or guess by the FileName.
                 % Currently, just giving it a default.
                 undefined -> <<"application/octet-stream">>;
                 CType -> list_to_binary(CType)
             end,
-            Data = fabric:att_receiver(Req, chttpd:body_length(Req)),
+            Data = case chttpd:body_length(Req) of
+                undefined ->
+                    <<"">>;
+                {unknown_transfer_encoding, Unknown} ->
+                    exit({unknown_transfer_encoding, Unknown});
+                chunked ->
+                    fun(MaxChunkSize, ChunkFun, InitState) ->
+                        chttpd:recv_chunked(
+                            Req, MaxChunkSize, ChunkFun, InitState
+                        )
+                    end;
+                0 ->
+                    <<"">>;
+                Length when is_integer(Length) ->
+                    Expect = case chttpd:header_value(Req, "expect") of
+                        undefined ->
+                            undefined;
+                        Value when is_list(Value) ->
+                            string:to_lower(Value)
+                    end,
+                    case Expect of
+                        "100-continue" ->
+                            MochiReq:start_raw_response({100, gb_trees:empty()});
+                        _Else ->
+                            ok
+                    end,
+                    fun() -> chttpd:recv(Req, 0) end;
+                Length ->
+                    exit({length_not_integer, Length})
+            end,
             ContentLen = case couch_httpd:header_value(Req,"Content-Length") of
                 undefined -> undefined;
-                Length -> list_to_integer(Length)
+                CL -> list_to_integer(CL)
             end,
             ContentEnc = string:to_lower(string:strip(
                 couch_httpd:header_value(Req, "Content-Encoding", "identity")
@@ -1570,7 +1538,7 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa
             couch_db:validate_docid(Db, DocId),
             #doc{id=DocId};
         Rev ->
-            case fabric:open_revs(Db, DocId, [Rev], [{user_ctx,Ctx}]) of
+            case fabric2_db:open_doc_revs(Db, DocId, [Rev], [{user_ctx,Ctx}]) of
             {ok, [{ok, Doc0}]} ->
                 chttpd_stats:incr_reads(),
                 Doc0;
@@ -1585,8 +1553,7 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa
     DocEdited = Doc#doc{
         atts = NewAtt ++ [A || A <- Atts, couch_att:fetch(name, A) /= FileName]
     },
-    W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    case fabric:update_doc(Db, DocEdited, [{user_ctx,Ctx}, {w,W}]) of
+    case fabric2_db:update_doc(Db, DocEdited, [{user_ctx,Ctx}]) of
     {ok, UpdatedRev} ->
         chttpd_stats:incr_writes(),
         HttpCode = 201;
@@ -1595,7 +1562,7 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa
         HttpCode = 202
     end,
     erlang:put(mochiweb_request_recv, true),
-    DbName = couch_db:name(Db),
+    DbName = fabric2_db:name(Db),
 
     {Status, Headers} = case Method of
         'DELETE' ->
@@ -1682,46 +1649,6 @@ get_md5_header(Req) ->
 parse_doc_query(Req) ->
     lists:foldl(fun parse_doc_query/2, #doc_query_args{}, chttpd:qs(Req)).
 
-parse_engine_opt(Req) ->
-    case chttpd:qs_value(Req, "engine") of
-        undefined ->
-            [];
-        Extension ->
-            Available = couch_server:get_engine_extensions(),
-            case lists:member(Extension, Available) of
-                true ->
-                    [{engine, iolist_to_binary(Extension)}];
-                false ->
-                    throw({bad_request, invalid_engine_extension})
-            end
-    end.
-
-
-parse_partitioned_opt(Req) ->
-    case chttpd:qs_value(Req, "partitioned") of
-        undefined ->
-            [];
-        "false" ->
-            [];
-        "true" ->
-            ok = validate_partitioned_db_enabled(Req),
-            [
-                {partitioned, true},
-                {hash, [couch_partition, hash, []]}
-            ];
-        _ ->
-            throw({bad_request, <<"Invalid `partitioned` parameter">>})
-    end.
-
-
-validate_partitioned_db_enabled(Req) ->
-    case couch_flags:is_enabled(partitioned, Req) of
-        true -> 
-            ok;
-        false ->
-            throw({bad_request, <<"Partitioned feature is not enabled.">>})
-    end.
-
 
 parse_doc_query({Key, Value}, Args) ->
     case {Key, Value} of
@@ -1791,7 +1718,7 @@ parse_changes_query(Req) ->
         {"descending", "true"} ->
             Args#changes_args{dir=rev};
         {"since", _} ->
-            Args#changes_args{since=Value};
+            Args#changes_args{since=parse_since_seq(Value)};
         {"last-event-id", _} ->
             Args#changes_args{since=Value};
         {"limit", _} ->
@@ -1845,6 +1772,27 @@ parse_changes_query(Req) ->
             ChangesArgs
     end.
 
+
+parse_since_seq(Seq) when is_binary(Seq), size(Seq) > 30 ->
+    throw({bad_request, url_encoded_since_seq});
+
+parse_since_seq(Seq) when is_binary(Seq), size(Seq) > 2 ->
+    % We have implicitly allowed the since seq to either be
+    % JSON encoded or a "raw" string. Here we just remove the
+    % surrounding quotes if they exist and are paired.
+    SeqSize = size(Seq) - 2,
+    case Seq of
+        <<"\"", S:SeqSize/binary, "\"">> -> S;
+        S -> S
+    end;
+
+parse_since_seq(Seq) when is_binary(Seq) ->
+    Seq;
+
+parse_since_seq(Seq) when is_list(Seq) ->
+    parse_since_seq(iolist_to_binary(Seq)).
+
+
 extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
     extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
 extract_header_rev(Req, ExplicitRev) ->
@@ -1885,6 +1833,8 @@ monitor_attachments(Atts) when is_list(Atts) ->
         case couch_att:fetch(data, Att) of
             {Fd, _} ->
                 [monitor(process, Fd) | Monitors];
+            {loc, _, _, _} ->
+                Monitors;
             stub ->
                 Monitors;
             Else ->
@@ -1982,7 +1932,7 @@ bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs}) ->
             bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs, Options1})
     end;
 bulk_get_open_doc_revs1(Db, Props, _, {DocId, Revs, Options}) ->
-    case fabric:open_revs(Db, DocId, Revs, Options) of
+    case fabric2_db:open_doc_revs(Db, DocId, Revs, Options) of
         {ok, []} ->
             RevStr = couch_util:get_value(<<"rev">>, Props),
             Error = {RevStr, <<"not_found">>, <<"missing">>},
diff --git a/src/chttpd/src/chttpd_external.erl b/src/chttpd/src/chttpd_external.erl
index fa35c6b..3e59ffe 100644
--- a/src/chttpd/src/chttpd_external.erl
+++ b/src/chttpd/src/chttpd_external.erl
@@ -74,7 +74,7 @@ json_req_obj_fields() ->
      <<"peer">>, <<"form">>, <<"cookie">>, <<"userCtx">>, <<"secObj">>].
 
 json_req_obj_field(<<"info">>, #httpd{}, Db, _DocId) ->
-    {ok, Info} = get_db_info(Db),
+    {ok, Info} = fabric2_db:get_db_info(Db),
     {Info};
 json_req_obj_field(<<"uuid">>, #httpd{}, _Db, _DocId) ->
     couch_uuids:new();
@@ -117,27 +117,18 @@ json_req_obj_field(<<"form">>, #httpd{mochi_req=Req, method=Method}=HttpReq, Db,
 json_req_obj_field(<<"cookie">>, #httpd{mochi_req=Req}, _Db, _DocId) ->
     to_json_terms(Req:parse_cookie());
 json_req_obj_field(<<"userCtx">>, #httpd{}, Db, _DocId) ->
-    couch_util:json_user_ctx(Db);
-json_req_obj_field(<<"secObj">>, #httpd{user_ctx=UserCtx}, Db, _DocId) ->
-    get_db_security(Db, UserCtx).
-
-
-get_db_info(Db) ->
-    case couch_db:is_clustered(Db) of
-        true ->
-            fabric:get_db_info(Db);
-        false ->
-            couch_db:get_db_info(Db)
-    end.
-
-
-get_db_security(Db, #user_ctx{}) ->
-    case couch_db:is_clustered(Db) of
-        true ->
-            fabric:get_security(Db);
-        false ->
-            couch_db:get_security(Db)
-    end.
+    json_user_ctx(Db);
+json_req_obj_field(<<"secObj">>, #httpd{user_ctx = #user_ctx{}}, Db, _DocId) ->
+    fabric2_db:get_security(Db).
+
+
+json_user_ctx(Db) ->
+    Ctx = fabric2_db:get_user_ctx(Db),
+    {[
+        {<<"db">>, fabric2_db:name(Db)},
+        {<<"name">>, Ctx#user_ctx.name},
+        {<<"roles">>, Ctx#user_ctx.roles}
+    ]}.
 
 
 to_json_terms(Data) ->
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 819d782..b244e84 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -108,43 +108,39 @@ maybe_add_csp_headers(Headers, _) ->
     Headers.
 
 handle_all_dbs_req(#httpd{method='GET'}=Req) ->
-    Args = couch_mrview_http:parse_params(Req, undefined),
-    ShardDbName = config:get("mem3", "shards_db", "_dbs"),
-    %% shard_db is not sharded but mem3:shards treats it as an edge case
-    %% so it can be pushed thru fabric
-    {ok, Info} = fabric:get_db_info(ShardDbName),
-    Etag = couch_httpd:make_etag({Info}),
-    Options = [{user_ctx, Req#httpd.user_ctx}],
+    % TODO: Support args and options properly, transform
+    % this back into a fold call similar to the old
+    % version.
+    %% Args = couch_mrview_http:parse_params(Req, undefined),
+    % Eventually the Etag for this request will be derived
+    % from the \xFFmetadataVersion key in fdb
+    Etag = <<"foo">>,
+    %% Options = [{user_ctx, Req#httpd.user_ctx}],
     {ok, Resp} = chttpd:etag_respond(Req, Etag, fun() ->
-        {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"ETag",Etag}]),
-        VAcc = #vacc{req=Req,resp=Resp},
-        fabric:all_docs(ShardDbName, Options, fun all_dbs_callback/2, VAcc, Args)
-    end),
-    case is_record(Resp, vacc) of
-        true -> {ok, Resp#vacc.resp};
-        _ -> {ok, Resp}
-    end;
+        AllDbs = fabric2_db:list_dbs(),
+        chttpd:send_json(Req, AllDbs)
+    end);
 handle_all_dbs_req(Req) ->
     send_method_not_allowed(Req, "GET,HEAD").
 
-all_dbs_callback({meta, _Meta}, #vacc{resp=Resp0}=Acc) ->
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
-    {ok, Acc#vacc{resp=Resp1}};
-all_dbs_callback({row, Row}, #vacc{resp=Resp0}=Acc) ->
-    Prepend = couch_mrview_http:prepend_val(Acc),
-    case couch_util:get_value(id, Row) of <<"_design", _/binary>> ->
-        {ok, Acc};
-    DbName ->
-        {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
-        {ok, Acc#vacc{prepend=",", resp=Resp1}}
-    end;
-all_dbs_callback(complete, #vacc{resp=Resp0}=Acc) ->
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "]"),
-    {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
-    {ok, Acc#vacc{resp=Resp2}};
-all_dbs_callback({error, Reason}, #vacc{resp=Resp0}=Acc) ->
-    {ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
-    {ok, Acc#vacc{resp=Resp1}}.
+%% all_dbs_callback({meta, _Meta}, #vacc{resp=Resp0}=Acc) ->
+%%     {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
+%%     {ok, Acc#vacc{resp=Resp1}};
+%% all_dbs_callback({row, Row}, #vacc{resp=Resp0}=Acc) ->
+%%     Prepend = couch_mrview_http:prepend_val(Acc),
+%%     case couch_util:get_value(id, Row) of <<"_design", _/binary>> ->
+%%         {ok, Acc};
+%%     DbName ->
+%%         {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
+%%         {ok, Acc#vacc{prepend=",", resp=Resp1}}
+%%     end;
+%% all_dbs_callback(complete, #vacc{resp=Resp0}=Acc) ->
+%%     {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "]"),
+%%     {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
+%%     {ok, Acc#vacc{resp=Resp2}};
+%% all_dbs_callback({error, Reason}, #vacc{resp=Resp0}=Acc) ->
+%%     {ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
+%%     {ok, Acc#vacc{resp=Resp1}}.
 
 handle_dbs_info_req(#httpd{method='POST'}=Req) ->
     chttpd:validate_ctype(Req, "application/json"),
diff --git a/src/chttpd/src/chttpd_show.erl b/src/chttpd/src/chttpd_show.erl
index c3bf119..2eb6dc3 100644
--- a/src/chttpd/src/chttpd_show.erl
+++ b/src/chttpd/src/chttpd_show.erl
@@ -123,15 +123,14 @@ send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
     JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
     JsonDoc = couch_query_servers:json_doc(Doc),
     Cmd = [<<"updates">>, UpdateName],
-    W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
     UpdateResp = couch_query_servers:ddoc_prompt(DDoc, Cmd, [JsonDoc, JsonReq]),
     JsonResp = case UpdateResp of
         [<<"up">>, {NewJsonDoc}, {JsonResp0}] ->
             case chttpd:header_value(Req, "X-Couch-Full-Commit", "false") of
             "true" ->
-                Options = [full_commit, {user_ctx, Req#httpd.user_ctx}, {w, W}];
+                Options = [full_commit, {user_ctx, Req#httpd.user_ctx}];
             _ ->
-                Options = [{user_ctx, Req#httpd.user_ctx}, {w, W}]
+                Options = [{user_ctx, Req#httpd.user_ctx}]
             end,
             NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
             couch_doc:validate_docid(NewDoc#doc.id),
diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl
index ae1d8d6..cf6f27f 100644
--- a/src/couch_mrview/src/couch_mrview.erl
+++ b/src/couch_mrview/src/couch_mrview.erl
@@ -173,8 +173,18 @@ join([H|[]], _, Acc) ->
 join([H|T], Sep, Acc) ->
     join(T, Sep, [Sep, H | Acc]).
 
+validate(#{} = Db, DDoc) ->
+    DbName = fabric2_db:name(Db),
+    IsPartitioned = fabric2_db:is_partitioned(Db),
+    validate(DbName, IsPartitioned, DDoc);
 
-validate(Db,  DDoc) ->
+validate(Db, DDoc) ->
+    DbName = couch_db:name(Db),
+    IsPartitioned = couch_db:is_partitioned(Db),
+    validate(DbName, IsPartitioned, DDoc).
+
+
+validate(DbName, IsDbPartitioned,  DDoc) ->
     ok = validate_ddoc_fields(DDoc#doc.body),
     GetName = fun
         (#mrview{map_names = [Name | _]}) -> Name;
@@ -203,9 +213,9 @@ validate(Db,  DDoc) ->
         language = Lang,
         views = Views,
         partitioned = Partitioned
-    }} = couch_mrview_util:ddoc_to_mrst(couch_db:name(Db), DDoc),
+    }} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
 
-    case {couch_db:is_partitioned(Db), Partitioned} of
+    case {IsDbPartitioned, Partitioned} of
         {false, true} ->
             throw({invalid_design_doc,
                 <<"partitioned option cannot be true in a "
diff --git a/test/elixir/test/basics_test.exs b/test/elixir/test/basics_test.exs
index 3491ef5..c28c78c 100644
--- a/test/elixir/test/basics_test.exs
+++ b/test/elixir/test/basics_test.exs
@@ -100,7 +100,7 @@ defmodule BasicsTest do
     db_name = context[:db_name]
     {:ok, _} = create_doc(db_name, sample_doc_foo())
     resp = Couch.get("/#{db_name}/foo", query: %{:local_seq => true})
-    assert resp.body["_local_seq"] == 1, "Local seq value == 1"
+    assert is_binary(resp.body["_local_seq"]), "Local seq value is a binary"
   end
 
   @tag :with_db


[couchdb] 15/34: Implement `_users` db authentication

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 393168509281b5b0c558833c8e5b194053fa002c
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Jun 7 12:46:06 2019 -0500

    Implement `_users` db authentication
    
    This changes `chttpd_auth_cache` to use FoundationDB to back the
    `_users` database including the `before_doc_update` and `after_doc_read`
    features.
---
 src/chttpd/src/chttpd_auth_cache.erl |  40 ++++++----
 src/chttpd/src/chttpd_db.erl         |   5 +-
 src/fabric/src/fabric2_db.erl        |  34 ++++++---
 src/fabric/src/fabric2_fdb.erl       |   8 +-
 src/fabric/src/fabric2_users_db.erl  | 144 +++++++++++++++++++++++++++++++++++
 src/fabric/src/fabric2_util.erl      |   7 ++
 6 files changed, 212 insertions(+), 26 deletions(-)

diff --git a/src/chttpd/src/chttpd_auth_cache.erl b/src/chttpd/src/chttpd_auth_cache.erl
index 638d8c7..d947fe6 100644
--- a/src/chttpd/src/chttpd_auth_cache.erl
+++ b/src/chttpd/src/chttpd_auth_cache.erl
@@ -52,7 +52,8 @@ get_user_creds(_Req, UserName) when is_binary(UserName) ->
 
 update_user_creds(_Req, UserDoc, _Ctx) ->
     {_, Ref} = spawn_monitor(fun() ->
-        case fabric:update_doc(dbname(), UserDoc, []) of
+        {ok, Db} = fabric2_db:open(dbname(), [?ADMIN_CTX]),
+        case fabric2_db:update_doc(Db, UserDoc) of
             {ok, _} ->
                 exit(ok);
             Else ->
@@ -100,6 +101,14 @@ maybe_increment_auth_cache_miss(UserName) ->
 %% gen_server callbacks
 
 init([]) ->
+    try
+        fabric2_db:open(dbname(), [?ADMIN_CTX])
+    catch error:database_does_not_exist ->
+        case fabric2_db:create(dbname(), [?ADMIN_CTX]) of
+            {ok, _} -> ok;
+            {error, file_exists} -> ok
+        end
+    end,
     self() ! {start_listener, 0},
     {ok, #state{}}.
 
@@ -139,7 +148,8 @@ spawn_changes(Since) ->
     Pid.
 
 listen_for_changes(Since) ->
-    ensure_auth_ddoc_exists(dbname(), <<"_design/_auth">>),
+    {ok, Db} = fabric2_db:open(dbname(), [?ADMIN_CTX]),
+    ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
     CBFun = fun ?MODULE:changes_callback/2,
     Args = #changes_args{
         feed = "continuous",
@@ -147,7 +157,8 @@ listen_for_changes(Since) ->
         heartbeat = true,
         filter = {default, main_only}
     },
-    fabric:changes(dbname(), CBFun, Since, Args).
+    ChangesFun = chttpd_changes:handle_db_changes(Args, nil, Db),
+    ChangesFun({CBFun, Since}).
 
 changes_callback(waiting_for_updates, Acc) ->
     {ok, Acc};
@@ -156,7 +167,7 @@ changes_callback(start, Since) ->
 changes_callback({stop, EndSeq, _Pending}, _) ->
     exit({seq, EndSeq});
 changes_callback({change, {Change}}, _) ->
-    case couch_util:get_value(id, Change) of
+    case couch_util:get_value(<<"id">>, Change) of
         <<"_design/", _/binary>> ->
             ok;
         DocId ->
@@ -171,7 +182,8 @@ changes_callback({error, _}, EndSeq) ->
     exit({seq, EndSeq}).
 
 load_user_from_db(UserName) ->
-    try fabric:open_doc(dbname(), docid(UserName), [?ADMIN_CTX, ejson_body, conflicts]) of
+    {ok, Db} = fabric2_db:open(dbname(), [?ADMIN_CTX]),
+    try fabric2_db:open_doc(Db, docid(UserName), [conflicts]) of
     {ok, Doc} ->
         {Props} = couch_doc:to_json_obj(Doc, []),
         Props;
@@ -183,7 +195,8 @@ load_user_from_db(UserName) ->
     end.
 
 dbname() ->
-    config:get("chttpd_auth", "authentication_db", "_users").
+    DbNameStr = config:get("chttpd_auth", "authentication_db", "_users"),
+    iolist_to_binary(DbNameStr).
 
 docid(UserName) ->
     <<"org.couchdb.user:", UserName/binary>>.
@@ -191,11 +204,11 @@ docid(UserName) ->
 username(<<"org.couchdb.user:", UserName/binary>>) ->
     UserName.
 
-ensure_auth_ddoc_exists(DbName, DDocId) ->
-    case fabric:open_doc(DbName, DDocId, [?ADMIN_CTX, ejson_body]) of
+ensure_auth_ddoc_exists(Db, DDocId) ->
+    case fabric2_db:open_doc(Db, DDocId) of
     {not_found, _Reason} ->
         {ok, AuthDesign} = couch_auth_cache:auth_design_doc(DDocId),
-        update_doc_ignoring_conflict(DbName, AuthDesign, [?ADMIN_CTX]);
+        update_doc_ignoring_conflict(Db, AuthDesign);
     {ok, Doc} ->
         {Props} = couch_doc:to_json_obj(Doc, []),
         case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
@@ -205,17 +218,18 @@ ensure_auth_ddoc_exists(DbName, DDocId) ->
                 Props1 = lists:keyreplace(<<"validate_doc_update">>, 1, Props,
                     {<<"validate_doc_update">>,
                     ?AUTH_DB_DOC_VALIDATE_FUNCTION}),
-                update_doc_ignoring_conflict(DbName, couch_doc:from_json_obj({Props1}), [?ADMIN_CTX])
+                NewDoc = couch_doc:from_json_obj({Props1}),
+                update_doc_ignoring_conflict(Db, NewDoc)
         end;
     {error, Reason} ->
-        couch_log:notice("Failed to ensure auth ddoc ~s/~s exists for reason: ~p", [DbName, DDocId, Reason]),
+        couch_log:notice("Failed to ensure auth ddoc ~s/~s exists for reason: ~p", [dbname(), DDocId, Reason]),
         ok
     end,
     ok.
 
-update_doc_ignoring_conflict(DbName, Doc, Options) ->
+update_doc_ignoring_conflict(DbName, Doc) ->
     try
-        fabric:update_doc(DbName, Doc, Options)
+        fabric2_db:update_doc(DbName, Doc)
     catch
         throw:conflict ->
             ok
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index 40c1a1e..4337041 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -724,10 +724,9 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
 db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) ->
     send_method_not_allowed(Req, "POST");
 
-db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>],user_ctx=Ctx}=Req,
-        Db) ->
+db_req(#httpd{method = 'PUT',path_parts = [_, <<"_security">>]} = Req, Db) ->
     SecObj = chttpd:json_body(Req),
-    case fabric:set_security(Db, SecObj, [{user_ctx, Ctx}]) of
+    case fabric2_db:set_security(Db, SecObj) of
         ok ->
             send_json(Req, {[{<<"ok">>, true}]});
         Else ->
diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index 48e50f1..80028a6 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -149,9 +149,10 @@ create(DbName, Options) ->
     % We cache outside of the transaction so that we're sure
     % that the transaction was committed.
     case Result of
-        #{} = Db ->
-            ok = fabric2_server:store(Db),
-            {ok, Db#{tx := undefined}};
+        #{} = Db0 ->
+            Db1 = maybe_add_sys_db_callbacks(Db0),
+            ok = fabric2_server:store(Db1),
+            {ok, Db1#{tx := undefined}};
         Error ->
             Error
     end.
@@ -167,9 +168,10 @@ open(DbName, Options) ->
             end),
             % Cache outside the transaction retry loop
             case Result of
-                #{} = Db ->
-                    ok = fabric2_server:store(Db),
-                    {ok, Db#{tx := undefined}};
+                #{} = Db0 ->
+                    Db1 = maybe_add_sys_db_callbacks(Db0),
+                    ok = fabric2_server:store(Db1),
+                    {ok, Db1#{tx := undefined}};
                 Error ->
                     Error
             end
@@ -552,18 +554,19 @@ update_docs(Db, Docs) ->
     update_docs(Db, Docs, []).
 
 
-update_docs(Db, Docs, Options) ->
+update_docs(Db, Docs0, Options) ->
+    Docs1 = apply_before_doc_update(Db, Docs0, Options),
     Resps0 = case lists:member(replicated_changes, Options) of
         false ->
             fabric2_fdb:transactional(Db, fun(TxDb) ->
-                update_docs_interactive(TxDb, Docs, Options)
+                update_docs_interactive(TxDb, Docs1, Options)
             end);
         true ->
             lists:map(fun(Doc) ->
                 fabric2_fdb:transactional(Db, fun(TxDb) ->
                     update_doc_int(TxDb, Doc, Options)
                 end)
-            end, Docs)
+            end, Docs1)
     end,
     % Convert errors
     Resps1 = lists:map(fun(Resp) ->
@@ -882,6 +885,19 @@ find_possible_ancestors(RevInfos, MissingRevs) ->
     end, RevInfos).
 
 
+apply_before_doc_update(Db, Docs, Options) ->
+    #{before_doc_update := BDU} = Db,
+    UpdateType = case lists:member(replicated_changes, Options) of
+        true -> replicated_changes;
+        false -> interactive_edit
+    end,
+    if BDU == undefined -> Docs; true ->
+        lists:map(fun(Doc) ->
+            BDU(Doc, Db, UpdateType)
+        end, Docs)
+    end.
+
+
 update_doc_int(#{} = Db, #doc{} = Doc, Options) ->
     IsLocal = case Doc#doc.id of
         <<?LOCAL_DOC_PREFIX, _/binary>> -> true;
diff --git a/src/fabric/src/fabric2_fdb.erl b/src/fabric/src/fabric2_fdb.erl
index d179387..4b01826 100644
--- a/src/fabric/src/fabric2_fdb.erl
+++ b/src/fabric/src/fabric2_fdb.erl
@@ -944,7 +944,13 @@ fdb_to_doc(Db, DocId, Pos, Path, Bin) when is_binary(Bin) ->
         body = Body,
         atts = Atts,
         deleted = Deleted
-    };
+    },
+
+    case Db of
+        #{after_doc_read := undefined} -> Doc0;
+        #{after_doc_read := ADR} -> ADR(Doc0, Db)
+    end;
+
 fdb_to_doc(_Db, _DocId, _Pos, _Path, not_found) ->
     {not_found, missing}.
 
diff --git a/src/fabric/src/fabric2_users_db.erl b/src/fabric/src/fabric2_users_db.erl
new file mode 100644
index 0000000..9a8a462
--- /dev/null
+++ b/src/fabric/src/fabric2_users_db.erl
@@ -0,0 +1,144 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_users_db).
+
+-export([
+    before_doc_update/3,
+    after_doc_read/2,
+    strip_non_public_fields/1
+]).
+
+-include_lib("couch/include/couch_db.hrl").
+
+-define(NAME, <<"name">>).
+-define(PASSWORD, <<"password">>).
+-define(DERIVED_KEY, <<"derived_key">>).
+-define(PASSWORD_SCHEME, <<"password_scheme">>).
+-define(SIMPLE, <<"simple">>).
+-define(PASSWORD_SHA, <<"password_sha">>).
+-define(PBKDF2, <<"pbkdf2">>).
+-define(ITERATIONS, <<"iterations">>).
+-define(SALT, <<"salt">>).
+-define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
+
+-define(
+    DDOCS_ADMIN_ONLY,
+    <<"Only administrators can view design docs in the users database.">>
+).
+
+% If the request's userCtx identifies an admin
+%   -> save_doc (see below)
+%
+% If the request's userCtx.name is null:
+%   -> save_doc
+%   // this is an anonymous user registering a new document
+%   // in case a user doc with the same id already exists, the anonymous
+%   // user will get a regular doc update conflict.
+% If the request's userCtx.name doesn't match the doc's name
+%   -> 404 // Not Found
+% Else
+%   -> save_doc
+before_doc_update(Doc, Db, _UpdateType) ->
+    #user_ctx{name = Name} = fabric2_db:get_user_ctx(Db),
+    DocName = get_doc_name(Doc),
+    case (catch fabric2_db:check_is_admin(Db)) of
+    ok ->
+        save_doc(Doc);
+    _ when Name =:= DocName orelse Name =:= null ->
+        save_doc(Doc);
+    _ ->
+        throw(not_found)
+    end.
+
+% If newDoc.password == null || newDoc.password == undefined:
+%   ->
+%   noop
+% Else -> // calculate password hash server side
+%    newDoc.password_sha = hash_pw(newDoc.password + salt)
+%    newDoc.salt = salt
+%    newDoc.password = null
+save_doc(#doc{body={Body}} = Doc) ->
+    %% Support both schemes to smooth migration from legacy scheme
+    Scheme = config:get("couch_httpd_auth", "password_scheme", "pbkdf2"),
+    case {fabric2_util:get_value(?PASSWORD, Body), Scheme} of
+    {null, _} -> % server admins don't have a user-db password entry
+        Doc;
+    {undefined, _} ->
+        Doc;
+    {ClearPassword, "simple"} -> % deprecated
+        Salt = couch_uuids:random(),
+        PasswordSha = couch_passwords:simple(ClearPassword, Salt),
+        Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?SIMPLE),
+        Body1 = ?replace(Body0, ?SALT, Salt),
+        Body2 = ?replace(Body1, ?PASSWORD_SHA, PasswordSha),
+        Body3 = proplists:delete(?PASSWORD, Body2),
+        Doc#doc{body={Body3}};
+    {ClearPassword, "pbkdf2"} ->
+        Iterations = list_to_integer(config:get("couch_httpd_auth", "iterations", "1000")),
+        Salt = couch_uuids:random(),
+        DerivedKey = couch_passwords:pbkdf2(ClearPassword, Salt, Iterations),
+        Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?PBKDF2),
+        Body1 = ?replace(Body0, ?ITERATIONS, Iterations),
+        Body2 = ?replace(Body1, ?DERIVED_KEY, DerivedKey),
+        Body3 = ?replace(Body2, ?SALT, Salt),
+        Body4 = proplists:delete(?PASSWORD, Body3),
+        Doc#doc{body={Body4}};
+    {_ClearPassword, Scheme} ->
+        couch_log:error("[couch_httpd_auth] password_scheme value of '~p' is invalid.", [Scheme]),
+        throw({forbidden, "Server cannot hash passwords at this time."})
+    end.
+
+
+% If the doc is a design doc
+%   If the request's userCtx identifies an admin
+%     -> return doc
+%   Else
+%     -> 403 // Forbidden
+% If the request's userCtx identifies an admin
+%   -> return doc
+% If the request's userCtx.name doesn't match the doc's name
+%   -> 404 // Not Found
+% Else
+%   -> return doc
+after_doc_read(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, Db) ->
+    case (catch fabric2_db:check_is_admin(Db)) of
+        ok -> Doc;
+        _ -> throw({forbidden, ?DDOCS_ADMIN_ONLY})
+    end;
+after_doc_read(Doc, Db) ->
+    #user_ctx{name = Name} = fabric2_db:get_user_ctx(Db),
+    DocName = get_doc_name(Doc),
+    case (catch fabric2_db:check_is_admin(Db)) of
+        ok ->
+            Doc;
+        _ when Name =:= DocName ->
+            Doc;
+        _ ->
+            Doc1 = strip_non_public_fields(Doc),
+            case Doc1 of
+                #doc{body={[]}} -> throw(not_found);
+                _ -> Doc1
+            end
+    end.
+
+
+get_doc_name(#doc{id= <<"org.couchdb.user:", Name/binary>>}) ->
+    Name;
+get_doc_name(_) ->
+    undefined.
+
+
+strip_non_public_fields(#doc{body={Props}}=Doc) ->
+    PublicFields = config:get("couch_httpd_auth", "public_fields", ""),
+    Public = re:split(PublicFields, "\\s*,\\s*", [{return, binary}]),
+    Doc#doc{body={[{K, V} || {K, V} <- Props, lists:member(K, Public)]}}.
diff --git a/src/fabric/src/fabric2_util.erl b/src/fabric/src/fabric2_util.erl
index 6e2df67..fb59d59 100644
--- a/src/fabric/src/fabric2_util.erl
+++ b/src/fabric/src/fabric2_util.erl
@@ -24,6 +24,8 @@
 
     validate_security_object/1,
 
+    dbname_ends_with/2,
+
     get_value/2,
     get_value/3,
     to_hex/1,
@@ -113,6 +115,11 @@ validate_json_list_of_strings(Member, Props) ->
     end.
 
 
+dbname_ends_with(#{} = Db, Suffix) when is_binary(Suffix) ->
+    DbName = fabric2_db:name(Db),
+    Suffix == filename:basename(DbName).
+
+
 get_value(Key, List) ->
     get_value(Key, List, undefined).
 


[couchdb] 28/34: Disable broken couch_att tests

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 8e574e987c967cb8aa319719d5f5d1f8f4a78fd7
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Jul 12 13:33:35 2019 -0500

    Disable broken couch_att tests
---
 src/couch/src/couch_att.erl | 374 ++++++++++++++++++++++----------------------
 1 file changed, 187 insertions(+), 187 deletions(-)

diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl
index 90d3644..d3c8966 100644
--- a/src/couch/src/couch_att.erl
+++ b/src/couch/src/couch_att.erl
@@ -667,190 +667,190 @@ validate_attachment_size(_AttName, _AttSize, _MAxAttSize) ->
     ok.
 
 
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-% Eww...
--include("couch_bt_engine.hrl").
-
-%% Test utilities
-
-
-empty_att() -> new().
-
-
-upgraded_empty_att() ->
-    new([{headers, undefined}]).
-
-
-%% Test groups
-
-
-attachment_upgrade_test_() ->
-    {"Lazy record upgrade tests", [
-        {"Existing record fields don't upgrade",
-            {with, empty_att(), [fun test_non_upgrading_fields/1]}
-        },
-        {"New fields upgrade",
-            {with, empty_att(), [fun test_upgrading_fields/1]}
-        }
-    ]}.
-
-
-attachment_defaults_test_() ->
-    {"Attachment defaults tests", [
-        {"Records retain old default values", [
-            {with, empty_att(), [fun test_legacy_defaults/1]}
-        ]},
-        {"Upgraded records inherit defaults", [
-            {with, upgraded_empty_att(), [fun test_legacy_defaults/1]}
-        ]},
-        {"Undefined entries are elided on upgrade", [
-            {with, upgraded_empty_att(), [fun test_elided_entries/1]}
-        ]}
-    ]}.
-
-attachment_field_api_test_() ->
-    {"Basic attachment field api", [
-        fun test_construction/0,
-        fun test_store_and_fetch/0,
-        fun test_transform/0
-    ]}.
-
-
-attachment_disk_term_test_() ->
-    BaseAttachment = new([
-        {name, <<"empty">>},
-        {type, <<"application/octet-stream">>},
-        {att_len, 0},
-        {disk_len, 0},
-        {md5, <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>},
-        {revpos, 4},
-        {data, {stream, {couch_bt_engine_stream, {fake_fd, fake_sp}}}},
-        {encoding, identity}
-    ]),
-    BaseDiskTerm = {
-        <<"empty">>,
-        <<"application/octet-stream">>,
-        fake_sp,
-        0, 0, 4,
-        <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>,
-        identity
-    },
-    Headers = [{<<"X-Foo">>, <<"bar">>}],
-    ExtendedAttachment = store(headers, Headers, BaseAttachment),
-    ExtendedDiskTerm = {BaseDiskTerm, [{headers, Headers}]},
-    FakeDb = test_util:fake_db([{engine, {couch_bt_engine, #st{fd=fake_fd}}}]),
-    {"Disk term tests", [
-        ?_assertEqual(BaseDiskTerm, to_disk_term(BaseAttachment)),
-        ?_assertEqual(BaseAttachment, from_disk_term(FakeDb, BaseDiskTerm)),
-        ?_assertEqual(ExtendedDiskTerm, to_disk_term(ExtendedAttachment)),
-        ?_assertEqual(ExtendedAttachment, from_disk_term(FakeDb, ExtendedDiskTerm))
-    ]}.
-
-
-attachment_json_term_test_() ->
-    Props = [
-        {<<"content_type">>, <<"application/json">>},
-        {<<"digest">>, <<"md5-QCNtWUNXV0UzJnEjMk92YUk1JA==">>},
-        {<<"length">>, 14},
-        {<<"revpos">>, 1}
-    ],
-    PropsInline = [{<<"data">>, <<"eyJhbnN3ZXIiOiA0Mn0=">>}] ++ Props,
-    InvalidProps = [{<<"data">>, <<"!Base64Encoded$">>}] ++ Props,
-    Att = couch_att:new([
-        {name, <<"attachment.json">>},
-        {type, <<"application/json">>}
-    ]),
-    ResultStub = couch_att:new([
-        {name, <<"attachment.json">>},
-        {type, <<"application/json">>},
-        {att_len, 14},
-        {disk_len, 14},
-        {md5, <<"@#mYCWWE3&q#2OvaI5$">>},
-        {revpos, 1},
-        {data, stub},
-        {encoding, identity}
-    ]),
-    ResultFollows = ResultStub#att{data = follows},
-    ResultInline = ResultStub#att{md5 = <<>>, data = <<"{\"answer\": 42}">>},
-    {"JSON term tests", [
-        ?_assertEqual(ResultStub, stub_from_json(Att, Props)),
-        ?_assertEqual(ResultFollows, follow_from_json(Att, Props)),
-        ?_assertEqual(ResultInline, inline_from_json(Att, PropsInline)),
-        ?_assertThrow({bad_request, _}, inline_from_json(Att, Props)),
-        ?_assertThrow({bad_request, _}, inline_from_json(Att, InvalidProps))
-    ]}.
-
-
-attachment_stub_merge_test_() ->
-    %% Stub merging needs to demonstrate revpos matching, skipping, and missing
-    %% attachment errors.
-    {"Attachment stub merging tests", []}.
-
-
-%% Test generators
-
-
-test_non_upgrading_fields(Attachment) ->
-    Pairs = [
-        {name, "cat.gif"},
-        {type, "text/very-very-plain"},
-        {att_len, 1024},
-        {disk_len, 42},
-        {md5, <<"md5-hashhashhash">>},
-        {revpos, 4},
-        {data, stub},
-        {encoding, gzip}
-    ],
-    lists:foreach(
-        fun({Field, Value}) ->
-            ?assertMatch(#att{}, Attachment),
-            Updated = store(Field, Value, Attachment),
-            ?assertMatch(#att{}, Updated)
-        end,
-    Pairs).
-
-
-test_upgrading_fields(Attachment) ->
-    ?assertMatch(#att{}, Attachment),
-    UpdatedHeaders = store(headers, [{<<"Ans">>, <<"42">>}], Attachment),
-    ?assertMatch(X when is_list(X), UpdatedHeaders),
-    UpdatedHeadersUndefined = store(headers, undefined, Attachment),
-    ?assertMatch(X when is_list(X), UpdatedHeadersUndefined).
-
-
-test_legacy_defaults(Attachment) ->
-    ?assertEqual(<<>>, fetch(md5, Attachment)),
-    ?assertEqual(0, fetch(revpos, Attachment)),
-    ?assertEqual(identity, fetch(encoding, Attachment)).
-
-
-test_elided_entries(Attachment) ->
-    ?assertNot(lists:keymember(name, 1, Attachment)),
-    ?assertNot(lists:keymember(type, 1, Attachment)),
-    ?assertNot(lists:keymember(att_len, 1, Attachment)),
-    ?assertNot(lists:keymember(disk_len, 1, Attachment)),
-    ?assertNot(lists:keymember(data, 1, Attachment)).
-
-
-test_construction() ->
-    ?assert(new() == new()),
-    Initialized = new([{name, <<"foo.bar">>}, {type, <<"application/qux">>}]),
-    ?assertEqual(<<"foo.bar">>, fetch(name, Initialized)),
-    ?assertEqual(<<"application/qux">>, fetch(type, Initialized)).
-
-
-test_store_and_fetch() ->
-    Attachment = empty_att(),
-    ?assertEqual(<<"abc">>, fetch(name, store(name, <<"abc">>, Attachment))),
-    ?assertEqual(42, fetch(ans, store(ans, 42, Attachment))).
-
-
-test_transform() ->
-    Attachment = new([{counter, 0}]),
-    Transformed = transform(counter, fun(Count) -> Count + 1 end, Attachment),
-    ?assertEqual(1, fetch(counter, Transformed)).
-
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% % Eww...
+%% -include("couch_bt_engine.hrl").
+%%
+%% %% Test utilities
+%%
+%%
+%% empty_att() -> new().
+%%
+%%
+%% upgraded_empty_att() ->
+%%     new([{headers, undefined}]).
+%%
+%%
+%% %% Test groups
+%%
+%%
+%% attachment_upgrade_test_() ->
+%%     {"Lazy record upgrade tests", [
+%%         {"Existing record fields don't upgrade",
+%%             {with, empty_att(), [fun test_non_upgrading_fields/1]}
+%%         },
+%%         {"New fields upgrade",
+%%             {with, empty_att(), [fun test_upgrading_fields/1]}
+%%         }
+%%     ]}.
+%%
+%%
+%% attachment_defaults_test_() ->
+%%     {"Attachment defaults tests", [
+%%         {"Records retain old default values", [
+%%             {with, empty_att(), [fun test_legacy_defaults/1]}
+%%         ]},
+%%         {"Upgraded records inherit defaults", [
+%%             {with, upgraded_empty_att(), [fun test_legacy_defaults/1]}
+%%         ]},
+%%         {"Undefined entries are elided on upgrade", [
+%%             {with, upgraded_empty_att(), [fun test_elided_entries/1]}
+%%         ]}
+%%     ]}.
+%%
+%% attachment_field_api_test_() ->
+%%     {"Basic attachment field api", [
+%%         fun test_construction/0,
+%%         fun test_store_and_fetch/0,
+%%         fun test_transform/0
+%%     ]}.
+%%
+%%
+%% attachment_disk_term_test_() ->
+%%     BaseAttachment = new([
+%%         {name, <<"empty">>},
+%%         {type, <<"application/octet-stream">>},
+%%         {att_len, 0},
+%%         {disk_len, 0},
+%%         {md5, <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>},
+%%         {revpos, 4},
+%%         {data, {stream, {couch_bt_engine_stream, {fake_fd, fake_sp}}}},
+%%         {encoding, identity}
+%%     ]),
+%%     BaseDiskTerm = {
+%%         <<"empty">>,
+%%         <<"application/octet-stream">>,
+%%         fake_sp,
+%%         0, 0, 4,
+%%         <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>,
+%%         identity
+%%     },
+%%     Headers = [{<<"X-Foo">>, <<"bar">>}],
+%%     ExtendedAttachment = store(headers, Headers, BaseAttachment),
+%%     ExtendedDiskTerm = {BaseDiskTerm, [{headers, Headers}]},
+%%     FakeDb = test_util:fake_db([{engine, {couch_bt_engine, #st{fd=fake_fd}}}]),
+%%     {"Disk term tests", [
+%%         ?_assertEqual(BaseDiskTerm, to_disk_term(BaseAttachment)),
+%%         ?_assertEqual(BaseAttachment, from_disk_term(FakeDb, BaseDiskTerm)),
+%%         ?_assertEqual(ExtendedDiskTerm, to_disk_term(ExtendedAttachment)),
+%%         ?_assertEqual(ExtendedAttachment, from_disk_term(FakeDb, ExtendedDiskTerm))
+%%     ]}.
+%%
+%%
+%% attachment_json_term_test_() ->
+%%     Props = [
+%%         {<<"content_type">>, <<"application/json">>},
+%%         {<<"digest">>, <<"md5-QCNtWUNXV0UzJnEjMk92YUk1JA==">>},
+%%         {<<"length">>, 14},
+%%         {<<"revpos">>, 1}
+%%     ],
+%%     PropsInline = [{<<"data">>, <<"eyJhbnN3ZXIiOiA0Mn0=">>}] ++ Props,
+%%     InvalidProps = [{<<"data">>, <<"!Base64Encoded$">>}] ++ Props,
+%%     Att = couch_att:new([
+%%         {name, <<"attachment.json">>},
+%%         {type, <<"application/json">>}
+%%     ]),
+%%     ResultStub = couch_att:new([
+%%         {name, <<"attachment.json">>},
+%%         {type, <<"application/json">>},
+%%         {att_len, 14},
+%%         {disk_len, 14},
+%%         {md5, <<"@#mYCWWE3&q#2OvaI5$">>},
+%%         {revpos, 1},
+%%         {data, stub},
+%%         {encoding, identity}
+%%     ]),
+%%     ResultFollows = ResultStub#att{data = follows},
+%%     ResultInline = ResultStub#att{md5 = <<>>, data = <<"{\"answer\": 42}">>},
+%%     {"JSON term tests", [
+%%         ?_assertEqual(ResultStub, stub_from_json(Att, Props)),
+%%         ?_assertEqual(ResultFollows, follow_from_json(Att, Props)),
+%%         ?_assertEqual(ResultInline, inline_from_json(Att, PropsInline)),
+%%         ?_assertThrow({bad_request, _}, inline_from_json(Att, Props)),
+%%         ?_assertThrow({bad_request, _}, inline_from_json(Att, InvalidProps))
+%%     ]}.
+%%
+%%
+%% attachment_stub_merge_test_() ->
+%%     %% Stub merging needs to demonstrate revpos matching, skipping, and missing
+%%     %% attachment errors.
+%%     {"Attachment stub merging tests", []}.
+%%
+%%
+%% %% Test generators
+%%
+%%
+%% test_non_upgrading_fields(Attachment) ->
+%%     Pairs = [
+%%         {name, "cat.gif"},
+%%         {type, "text/very-very-plain"},
+%%         {att_len, 1024},
+%%         {disk_len, 42},
+%%         {md5, <<"md5-hashhashhash">>},
+%%         {revpos, 4},
+%%         {data, stub},
+%%         {encoding, gzip}
+%%     ],
+%%     lists:foreach(
+%%         fun({Field, Value}) ->
+%%             ?assertMatch(#att{}, Attachment),
+%%             Updated = store(Field, Value, Attachment),
+%%             ?assertMatch(#att{}, Updated)
+%%         end,
+%%     Pairs).
+%%
+%%
+%% test_upgrading_fields(Attachment) ->
+%%     ?assertMatch(#att{}, Attachment),
+%%     UpdatedHeaders = store(headers, [{<<"Ans">>, <<"42">>}], Attachment),
+%%     ?assertMatch(X when is_list(X), UpdatedHeaders),
+%%     UpdatedHeadersUndefined = store(headers, undefined, Attachment),
+%%     ?assertMatch(X when is_list(X), UpdatedHeadersUndefined).
+%%
+%%
+%% test_legacy_defaults(Attachment) ->
+%%     ?assertEqual(<<>>, fetch(md5, Attachment)),
+%%     ?assertEqual(0, fetch(revpos, Attachment)),
+%%     ?assertEqual(identity, fetch(encoding, Attachment)).
+%%
+%%
+%% test_elided_entries(Attachment) ->
+%%     ?assertNot(lists:keymember(name, 1, Attachment)),
+%%     ?assertNot(lists:keymember(type, 1, Attachment)),
+%%     ?assertNot(lists:keymember(att_len, 1, Attachment)),
+%%     ?assertNot(lists:keymember(disk_len, 1, Attachment)),
+%%     ?assertNot(lists:keymember(data, 1, Attachment)).
+%%
+%%
+%% test_construction() ->
+%%     ?assert(new() == new()),
+%%     Initialized = new([{name, <<"foo.bar">>}, {type, <<"application/qux">>}]),
+%%     ?assertEqual(<<"foo.bar">>, fetch(name, Initialized)),
+%%     ?assertEqual(<<"application/qux">>, fetch(type, Initialized)).
+%%
+%%
+%% test_store_and_fetch() ->
+%%     Attachment = empty_att(),
+%%     ?assertEqual(<<"abc">>, fetch(name, store(name, <<"abc">>, Attachment))),
+%%     ?assertEqual(42, fetch(ans, store(ans, 42, Attachment))).
+%%
+%%
+%% test_transform() ->
+%%     Attachment = new([{counter, 0}]),
+%%     Transformed = transform(counter, fun(Count) -> Count + 1 end, Attachment),
+%%     ?assertEqual(1, fetch(counter, Transformed)).
+%%
+%%
+%% -endif.


[couchdb] 24/34: Fix revision tree extensions

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit e5fefbe46738dcc8e6a5d2c8adb08a86f2b8f066
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Jul 12 09:37:10 2019 -0500

    Fix revision tree extensions
    
    Previously I was forgetting to keep the previous history around which
    ended up limiting the revision depth to two.
---
 src/fabric/src/fabric2_db.erl   | 19 +++++++++++--------
 src/fabric/src/fabric2_util.erl |  9 +++++++++
 2 files changed, 20 insertions(+), 8 deletions(-)

diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index 3ea30e7..43d555c 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -1098,16 +1098,19 @@ update_doc_interactive(Db, Doc0, Future, _Options) ->
             end
     end,
 
-    % When recreating a deleted document we want to extend
-    % the winning revision branch rather than create a
-    % new branch. If we did not do this we could be
-    % recreating into a state that previously existed.
     Doc1 = case Winner of
         #{deleted := true} when not Doc0#doc.deleted ->
-            {WinnerRevPos, WinnerRev} = maps:get(rev_id, Winner),
-            WinnerRevPath = maps:get(rev_path, Winner),
-            Doc0#doc{revs = {WinnerRevPos, [WinnerRev | WinnerRevPath]}};
-        _ ->
+            % When recreating a deleted document we want to extend
+            % the winning revision branch rather than create a
+            % new branch. If we did not do this we could be
+            % recreating into a state that previously existed.
+            Doc0#doc{revs = fabric2_util:revinfo_to_revs(Winner)};
+        #{} ->
+            % Otherwise we're extending the target's revision
+            % history with this update
+            Doc0#doc{revs = fabric2_util:revinfo_to_revs(Target)};
+        not_found ->
+            % Creating a new doc means our revs start empty
             Doc0
     end,
 
diff --git a/src/fabric/src/fabric2_util.erl b/src/fabric/src/fabric2_util.erl
index fb59d59..48bf7d1 100644
--- a/src/fabric/src/fabric2_util.erl
+++ b/src/fabric/src/fabric2_util.erl
@@ -14,6 +14,7 @@
 
 
 -export([
+    revinfo_to_revs/1,
     revinfo_to_path/1,
     sort_revinfos/1,
 
@@ -37,6 +38,14 @@
 -include_lib("couch/include/couch_db.hrl").
 
 
+revinfo_to_revs(RevInfo) ->
+    #{
+        rev_id := {RevPos, Rev},
+        rev_path := RevPath
+    } = RevInfo,
+    {RevPos, [Rev | RevPath]}.
+
+
 revinfo_to_path(RevInfo) ->
     #{
         rev_id := {RevPos, Rev},


[couchdb] 12/34: Allow for previously configured filters

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit f7a790ea72dbe5c89658de93816e66a5ed3ec10d
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Mon Jun 10 14:33:12 2019 -0500

    Allow for previously configured filters
    
    The older chttpd/fabric split configured filters as one step in the
    coordinator instead of within each RPC worker.
---
 src/chttpd/src/chttpd_changes.erl | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/src/chttpd/src/chttpd_changes.erl b/src/chttpd/src/chttpd_changes.erl
index 620f68d..d27bbad 100644
--- a/src/chttpd/src/chttpd_changes.erl
+++ b/src/chttpd/src/chttpd_changes.erl
@@ -197,6 +197,9 @@ get_callback_acc(Callback) when is_function(Callback, 1) ->
     {fun(Ev, _) -> Callback(Ev) end, ok}.
 
 
+configure_filter(Filter, _Style, _Req, _Db) when is_tuple(Filter) ->
+    % Filter has already been configured
+    Filter;
 configure_filter("_doc_ids", Style, Req, _Db) ->
     {doc_ids, Style, get_doc_ids(Req)};
 configure_filter("_selector", Style, Req, _Db) ->


[couchdb] 11/34: Convert attachment info to disk terms correctly

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit bc8007b7018054733644805cdc430983bc2f4e73
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Jun 7 16:12:28 2019 -0500

    Convert attachment info to disk terms correctly
    
    I was accidentally skipping this step around properly
    serializing/deserializing attachments.
    
    Note to self: If someon specifies attachment headers this will likely
    break when we attempt to pack the value tuple here.
---
 src/fabric/src/fabric2_fdb.erl | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/src/fabric/src/fabric2_fdb.erl b/src/fabric/src/fabric2_fdb.erl
index 788bbc6..4f08d97 100644
--- a/src/fabric/src/fabric2_fdb.erl
+++ b/src/fabric/src/fabric2_fdb.erl
@@ -926,14 +926,19 @@ doc_to_fdb(Db, #doc{} = Doc) ->
         deleted = Deleted
     } = Doc,
 
+    DiskAtts = lists:map(fun couch_att:to_disk_term/1, Atts),
+
     Key = erlfdb_tuple:pack({?DB_DOCS, Id, Start, Rev}, DbPrefix),
-    Val = {Body, Atts, Deleted},
+    Val = {Body, DiskAtts, Deleted},
     {Key, term_to_binary(Val, [{minor_version, 1}])}.
 
 
-fdb_to_doc(_Db, DocId, Pos, Path, Bin) when is_binary(Bin) ->
-    {Body, Atts, Deleted} = binary_to_term(Bin, [safe]),
-    #doc{
+fdb_to_doc(Db, DocId, Pos, Path, Bin) when is_binary(Bin) ->
+    {Body, DiskAtts, Deleted} = binary_to_term(Bin, [safe]),
+    Atts = lists:map(fun(Att) ->
+        couch_att:from_disk_term(Db, DocId, Att)
+    end, DiskAtts),
+    Doc0 = #doc{
         id = DocId,
         revs = {Pos, Path},
         body = Body,


[couchdb] 27/34: Reinitialize chttpd_auth_cache on config change

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 858c947394623006db17ac28015458e7d0920c6d
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Jul 12 12:42:17 2019 -0500

    Reinitialize chttpd_auth_cache on config change
    
    The old test got around this by using couch_httpd_auth cache in its
    tests which is fairly odd given that we run chttpd_auth_cache in
    production. This fixes that mistake and upgrades chttpd_auth_cache so
    that it works in the test scenario of changing the authentication_db
    configuration.
---
 src/chttpd/src/chttpd_auth_cache.erl          | 58 +++++++++++++++++++++++----
 src/fabric/src/fabric2_db.erl                 |  6 ++-
 test/elixir/test/security_validation_test.exs |  4 +-
 3 files changed, 56 insertions(+), 12 deletions(-)

diff --git a/src/chttpd/src/chttpd_auth_cache.erl b/src/chttpd/src/chttpd_auth_cache.erl
index e986af6..9eee196 100644
--- a/src/chttpd/src/chttpd_auth_cache.erl
+++ b/src/chttpd/src/chttpd_auth_cache.erl
@@ -12,16 +12,19 @@
 
 -module(chttpd_auth_cache).
 -behaviour(gen_server).
+-behaviour(config_listener).
 
 -export([start_link/0, get_user_creds/2, update_user_creds/3]).
 -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
     code_change/3]).
 -export([listen_for_changes/1, changes_callback/2]).
+-export([handle_config_change/5, handle_config_terminate/3]).
 
 -include_lib("couch/include/couch_db.hrl").
 -include_lib("couch/include/couch_js_functions.hrl").
 
 -define(CACHE, chttpd_auth_cache_lru).
+-define(RELISTEN_DELAY, 5000).
 
 -record(state, {
     changes_pid,
@@ -101,17 +104,28 @@ maybe_increment_auth_cache_miss(UserName) ->
 %% gen_server callbacks
 
 init([]) ->
-    try
-        fabric2_db:open(dbname(), [?ADMIN_CTX])
-    catch error:database_does_not_exist ->
-        case fabric2_db:create(dbname(), [?ADMIN_CTX]) of
-            {ok, _} -> ok;
-            {error, file_exists} -> ok
-        end
-    end,
+    ensure_auth_db(),
+    ok = config:listen_for_changes(?MODULE, nil),
     self() ! {start_listener, 0},
     {ok, #state{}}.
 
+handle_call(reinit_cache, _From, State) ->
+    #state{
+        changes_pid = Pid
+    } = State,
+
+    % The database may currently be cached. This
+    % ensures that we've removed it so that the
+    % system db callbacks are installed.
+    fabric2_server:remove(dbname()),
+
+    ensure_auth_db(),
+    ets_lru:clear(?CACHE),
+    exit(Pid, shutdown),
+    self() ! {start_listener, 0},
+
+    {reply, ok, State#state{changes_pid = undefined}};
+
 handle_call(_Call, _From, State) ->
     {noreply, State}.
 
@@ -130,6 +144,9 @@ handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid=Pid} = State) ->
     {noreply, State#state{last_seq=Seq}};
 handle_info({start_listener, Seq}, State) ->
     {noreply, State#state{changes_pid = spawn_changes(Seq)}};
+handle_info(restart_config_listener, State) ->
+    ok = config:listen_for_changes(?MODULE, nil),
+    {noreply, State};
 handle_info(_Msg, State) ->
     {noreply, State}.
 
@@ -181,6 +198,19 @@ changes_callback({timeout, _ResponseType}, Acc) ->
 changes_callback({error, _}, EndSeq) ->
     exit({seq, EndSeq}).
 
+
+handle_config_change("chttpd_auth", "authentication_db", _DbName, _, _) ->
+    {ok, gen_server:call(?MODULE, reinit_cache, infinity)};
+handle_config_change(_, _, _, _, _) ->
+    {ok, nil}.
+
+handle_config_terminate(_, stop, _) ->
+    ok;
+handle_config_terminate(_Server, _Reason, _State) ->
+    Dst = whereis(?MODULE),
+    erlang:send_after(?RELISTEN_DELAY, Dst, restart_config_listener).
+
+
 load_user_from_db(UserName) ->
     {ok, Db} = fabric2_db:open(dbname(), [?ADMIN_CTX]),
     try fabric2_db:open_doc(Db, docid(UserName), [conflicts]) of
@@ -194,6 +224,18 @@ load_user_from_db(UserName) ->
         nil
     end.
 
+
+ensure_auth_db() ->
+    try
+        fabric2_db:open(dbname(), [?ADMIN_CTX])
+    catch error:database_does_not_exist ->
+        case fabric2_db:create(dbname(), [?ADMIN_CTX]) of
+            {ok, _} -> ok;
+            {error, file_exists} -> ok
+        end
+    end.
+
+
 dbname() ->
     DbNameStr = config:get("chttpd_auth", "authentication_db", "_users"),
     iolist_to_binary(DbNameStr).
diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index 43d555c..7114903 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -736,10 +736,14 @@ fold_changes(Db, SinceSeq, UserFun, UserAcc, Options) ->
 maybe_add_sys_db_callbacks(Db) ->
     IsReplicatorDb = fabric2_util:dbname_ends_with(Db, <<"_replicator">>),
 
+    AuthenticationDb = config:get("chttpd_auth", "authentication_db"),
+    IsAuthCache = if AuthenticationDb == undefined -> false; true ->
+        name(Db) == ?l2b(AuthenticationDb)
+    end,
     CfgUsersSuffix = config:get("couchdb", "users_db_suffix", "_users"),
     IsCfgUsersDb = fabric2_util:dbname_ends_with(Db, ?l2b(CfgUsersSuffix)),
     IsGlobalUsersDb = fabric2_util:dbname_ends_with(Db, <<"_users">>),
-    IsUsersDb = IsCfgUsersDb orelse IsGlobalUsersDb,
+    IsUsersDb = IsAuthCache orelse IsCfgUsersDb orelse IsGlobalUsersDb,
 
     {BDU, ADR} = if
         IsReplicatorDb ->
diff --git a/test/elixir/test/security_validation_test.exs b/test/elixir/test/security_validation_test.exs
index 0df3a78..e103314 100644
--- a/test/elixir/test/security_validation_test.exs
+++ b/test/elixir/test/security_validation_test.exs
@@ -53,9 +53,6 @@ defmodule SecurityValidationTest do
     on_exit(fn -> delete_db(auth_db_name) end)
 
     configs = [
-      {"httpd", "authentication_handlers",
-       "{couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}"},
-      {"couch_httpd_auth", "authentication_db", auth_db_name},
       {"chttpd_auth", "authentication_db", auth_db_name}
     ]
 
@@ -72,6 +69,7 @@ defmodule SecurityValidationTest do
     Enum.each(users, fn {name, pass} ->
       doc = %{
         :_id => "org.couchdb.user:#{name}",
+        :type => "user",
         :name => name,
         :roles => [],
         :password => pass


[couchdb] 30/34: Fix more elixir tests

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 7a3bfe669663294ab58c015611236a00a9a041b0
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Tue Jul 16 12:05:35 2019 -0500

    Fix more elixir tests
---
 src/chttpd/src/chttpd_db.erl  |  8 ++++----
 src/couch/src/couch_att.erl   | 10 +++++++---
 src/fabric/src/fabric2_db.erl | 35 ++++++++++++++++++++++++++++++++---
 3 files changed, 43 insertions(+), 10 deletions(-)

diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index a252041..0c7e4d5 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -1046,7 +1046,7 @@ db_doc_req(#httpd{method='GET', mochi_req=MochiReq}=Req, Db, DocId) ->
 
 db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
     couch_httpd:validate_referer(Req),
-    couch_db:validate_docid(Db, DocId),
+    couch_doc:validate_docid(DocId, fabric2_db:name(Db)),
     chttpd:validate_ctype(Req, "multipart/form-data"),
 
     Options = [{user_ctx,Ctx}],
@@ -1106,7 +1106,7 @@ db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
         update_type = UpdateType
     } = parse_doc_query(Req),
     DbName = fabric2_db:name(Db),
-    couch_doc:validate_docid(DocId),
+    couch_doc:validate_docid(DocId, fabric2_db:name(Db)),
 
     Options = [{user_ctx, Ctx}],
 
@@ -1667,7 +1667,7 @@ db_attachment_req(#httpd{method=Method}=Req, Db, DocId, FileNameParts)
                 % check for the existence of the doc to handle the 404 case.
                 couch_doc_open(Db, DocId, nil, [])
             end,
-            couch_db:validate_docid(Db, DocId),
+            couch_doc:validate_docid(DocId, fabric2_db:name(Db)),
             #doc{id=DocId};
         Rev ->
             case fabric2_db:open_doc_revs(Db, DocId, [Rev], [{user_ctx,Ctx}]) of
@@ -2030,7 +2030,7 @@ bulk_get_open_doc_revs1(Db, Props, Options, {}) ->
             {null, {error, Error}, Options};
         DocId ->
             try
-                couch_db:validate_docid(Db, DocId),
+                couch_doc:validate_docid(DocId, fabric2_db:name(Db)),
                 bulk_get_open_doc_revs1(Db, Props, Options, {DocId})
             catch throw:{Error, Reason} ->
                 {DocId, {error, {null, Error, Reason}}, Options}
diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl
index d3c8966..2c33362 100644
--- a/src/couch/src/couch_att.erl
+++ b/src/couch/src/couch_att.erl
@@ -384,8 +384,12 @@ flush(Db, DocId, Att1) ->
     % If we were sent a gzip'ed attachment with no
     % length data, we have to set it here.
     Att3 = case DiskLen of
-        undefined -> store(disk_len, AttLen, Att2);
-        _ -> Att2
+        undefined when AttLen /= undefined ->
+            store(disk_len, AttLen, Att2);
+        undefined when is_binary(Data) ->
+            store(disk_len, size(Data), Att2);
+        _ ->
+            Att2
     end,
 
     % If no encoding has been set, default to
@@ -537,7 +541,7 @@ range_foldl(Bin1, From, To, Fun, Acc) when is_binary(Bin1) ->
     ReadLen = To - From,
     Bin2 = case Bin1 of
         _ when size(Bin1) < From -> <<>>;
-        <<_:From/binary, B2>> -> B2
+        <<_:From/binary, B2/binary>> -> B2
     end,
     Bin3 = case Bin2 of
         _ when size(Bin2) < ReadLen -> Bin2;
diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index 7114903..3c3b7d3 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -897,7 +897,7 @@ get_members(SecProps) ->
     end.
 
 
-apply_open_doc_opts(Doc, Revs, Options) ->
+apply_open_doc_opts(Doc0, Revs, Options) ->
     IncludeRevsInfo = lists:member(revs_info, Options),
     IncludeConflicts = lists:member(conflicts, Options),
     IncludeDelConflicts = lists:member(deleted_conflicts, Options),
@@ -906,7 +906,7 @@ apply_open_doc_opts(Doc, Revs, Options) ->
     % This revs_info becomes fairly useless now that we're
     % not keeping old document bodies around...
     Meta1 = if not IncludeRevsInfo -> []; true ->
-        {Pos, [Rev | RevPath]} = Doc#doc.revs,
+        {Pos, [Rev | RevPath]} = Doc0#doc.revs,
         RevPathMissing = lists:map(fun(R) -> {R, missing} end, RevPath),
         [{revs_info, Pos, [{Rev, available} | RevPathMissing]}]
     end,
@@ -932,7 +932,36 @@ apply_open_doc_opts(Doc, Revs, Options) ->
         [{local_seq, fabric2_fdb:vs_to_seq(SeqVS)}]
     end,
 
-    {ok, Doc#doc{meta = Meta1 ++ Meta2 ++ Meta3 ++ Meta4}}.
+    Doc1 = case lists:keyfind(atts_since, 1, Options) of
+        {_, PossibleAncestors} ->
+            #doc{
+                revs = DocRevs,
+                atts = Atts0
+            } = Doc0,
+            RevPos = find_ancestor_rev_pos(DocRevs, PossibleAncestors),
+            Atts1 = lists:map(fun(Att) ->
+                [AttPos, Data] = couch_att:fetch([revpos, data], Att),
+                if  AttPos > RevPos -> couch_att:store(data, Data, Att);
+                    true -> couch_att:store(data, stub, Att)
+                end
+            end, Atts0),
+            Doc0#doc{atts = Atts1};
+        false ->
+            Doc0
+    end,
+
+    {ok, Doc1#doc{meta = Meta1 ++ Meta2 ++ Meta3 ++ Meta4}}.
+
+
+find_ancestor_rev_pos({_, []}, _PossibleAncestors) ->
+    0;
+find_ancestor_rev_pos(_DocRevs, []) ->
+    0;
+find_ancestor_rev_pos({RevPos, [RevId | Rest]}, AttsSinceRevs) ->
+    case lists:member({RevPos, RevId}, AttsSinceRevs) of
+        true -> RevPos;
+        false -> find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs)
+    end.
 
 
 filter_found_revs(RevInfo, Revs) ->


[couchdb] 04/34: Initial test suite for the fabric2 implementation

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 29df909b788f7543cbf340102ce39b1a32109516
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Jun 5 13:33:55 2019 -0500

    Initial test suite for the fabric2 implementation
    
    This provides a good bit of code coverage for the new implementation.
    We'll want to expand this to include relevant tests from the previous
    fabric test suite along with reading through the various other tests and
    ensuring that we cover the API as deeply as is appropriate for this
    layer.
---
 src/fabric/test/fabric2_changes_fold_tests.erl     | 114 +++
 src/fabric/test/fabric2_db_crud_tests.erl          |  88 +++
 src/fabric/test/fabric2_db_misc_tests.erl          | 113 +++
 src/fabric/test/fabric2_db_security_tests.erl      | 162 +++++
 src/fabric/test/fabric2_doc_count_tests.erl        | 251 +++++++
 src/fabric/test/fabric2_doc_crud_tests.erl         | 770 +++++++++++++++++++++
 src/fabric/test/fabric2_doc_fold_tests.erl         | 209 ++++++
 src/fabric/test/fabric2_fdb_tx_retry_tests.erl     | 178 +++++
 src/fabric/test/fabric2_trace_db_create_tests.erl  |  46 ++
 src/fabric/test/fabric2_trace_db_delete_tests.erl  |  49 ++
 src/fabric/test/fabric2_trace_db_open_tests.erl    |  50 ++
 src/fabric/test/fabric2_trace_doc_create_tests.erl |  86 +++
 12 files changed, 2116 insertions(+)

diff --git a/src/fabric/test/fabric2_changes_fold_tests.erl b/src/fabric/test/fabric2_changes_fold_tests.erl
new file mode 100644
index 0000000..892b448
--- /dev/null
+++ b/src/fabric/test/fabric2_changes_fold_tests.erl
@@ -0,0 +1,114 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_changes_fold_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+-define(DOC_COUNT, 25).
+
+
+changes_fold_test_() ->
+    {
+        "Test changes fold operations",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun fold_changes_basic/1,
+                fun fold_changes_since_now/1,
+                fun fold_changes_since_seq/1,
+                fun fold_changes_basic_rev/1,
+                fun fold_changes_since_now_rev/1,
+                fun fold_changes_since_seq_rev/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([fabric]),
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    Rows = lists:map(fun(Val) ->
+        DocId = fabric2_util:uuid(),
+        Doc = #doc{
+            id = DocId,
+            body = {[{<<"value">>, Val}]}
+        },
+        {ok, RevId} = fabric2_db:update_doc(Db, Doc, []),
+        UpdateSeq = fabric2_db:get_update_seq(Db),
+        #{
+            id => DocId,
+            sequence => UpdateSeq,
+            deleted => false,
+            rev_id => RevId
+        }
+    end, lists:seq(1, ?DOC_COUNT)),
+    {Db, Rows, Ctx}.
+
+
+cleanup({Db, _DocIdRevs, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+fold_changes_basic({Db, DocRows, _}) ->
+    {ok, Rows} = fabric2_db:fold_changes(Db, 0, fun fold_fun/2, []),
+    ?assertEqual(lists:reverse(DocRows), Rows).
+
+
+fold_changes_since_now({Db, _, _}) ->
+    {ok, Rows} = fabric2_db:fold_changes(Db, now, fun fold_fun/2, []),
+    ?assertEqual([], Rows).
+
+
+fold_changes_since_seq({_, [], _}) ->
+    ok;
+
+fold_changes_since_seq({Db, [Row | RestRows], _}) ->
+    #{sequence := Since} = Row,
+    {ok, Rows} = fabric2_db:fold_changes(Db, Since, fun fold_fun/2, []),
+    ?assertEqual(lists:reverse(RestRows), Rows),
+    fold_changes_since_seq({Db, RestRows, nil}).
+
+
+fold_changes_basic_rev({Db, _, _}) ->
+    Opts = [{dir, rev}],
+    {ok, Rows} = fabric2_db:fold_changes(Db, 0, fun fold_fun/2, [], Opts),
+    ?assertEqual([], Rows).
+
+
+fold_changes_since_now_rev({Db, DocRows, _}) ->
+    Opts = [{dir, rev}],
+    {ok, Rows} = fabric2_db:fold_changes(Db, now, fun fold_fun/2, [], Opts),
+    ?assertEqual(DocRows, Rows).
+
+
+fold_changes_since_seq_rev({_, [], _}) ->
+    ok;
+
+fold_changes_since_seq_rev({Db, DocRows, _}) ->
+    #{sequence := Since} = lists:last(DocRows),
+    Opts = [{dir, rev}],
+    {ok, Rows} = fabric2_db:fold_changes(Db, Since, fun fold_fun/2, [], Opts),
+    ?assertEqual(DocRows, Rows),
+    RestRows = lists:sublist(DocRows, length(DocRows) - 1),
+    fold_changes_since_seq_rev({Db, RestRows, nil}).
+
+
+fold_fun(#{} = Change, Acc) ->
+    {ok, [Change | Acc]}.
diff --git a/src/fabric/test/fabric2_db_crud_tests.erl b/src/fabric/test/fabric2_db_crud_tests.erl
new file mode 100644
index 0000000..24deeb2
--- /dev/null
+++ b/src/fabric/test/fabric2_db_crud_tests.erl
@@ -0,0 +1,88 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db_crud_tests).
+
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+-define(TDEF(A), {atom_to_list(A), fun A/0}).
+
+
+crud_test_() ->
+    {
+        "Test database CRUD operations",
+        {
+            setup,
+            fun() -> test_util:start_couch([fabric]) end,
+            fun test_util:stop_couch/1,
+            [
+                ?TDEF(create_db),
+                ?TDEF(open_db),
+                ?TDEF(delete_db),
+                ?TDEF(list_dbs)
+            ]
+        }
+    }.
+
+
+create_db() ->
+    DbName = ?tempdb(),
+    ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+    ?assertEqual(true, ets:member(fabric2_server, DbName)),
+    ?assertEqual({error, file_exists}, fabric2_db:create(DbName, [])).
+
+
+open_db() ->
+    DbName = ?tempdb(),
+    ?assertError(database_does_not_exist, fabric2_db:open(DbName, [])),
+
+    ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+    ?assertEqual(true, ets:member(fabric2_server, DbName)),
+
+    % Opening the cached version
+    ?assertMatch({ok, _}, fabric2_db:open(DbName, [])),
+
+    % Remove from cache and re-open
+    true = ets:delete(fabric2_server, DbName),
+    ?assertMatch({ok, _}, fabric2_db:open(DbName, [])).
+
+
+delete_db() ->
+    DbName = ?tempdb(),
+    ?assertError(database_does_not_exist, fabric2_db:delete(DbName, [])),
+
+    ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+    ?assertEqual(true, ets:member(fabric2_server, DbName)),
+
+    ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+    ?assertEqual(false, ets:member(fabric2_server, DbName)),
+
+    ?assertError(database_does_not_exist, fabric2_db:open(DbName, [])).
+
+
+list_dbs() ->
+    DbName = ?tempdb(),
+    AllDbs1 = fabric2_db:list_dbs(),
+
+    ?assert(is_list(AllDbs1)),
+    ?assert(not lists:member(DbName, AllDbs1)),
+
+    ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+    AllDbs2 = fabric2_db:list_dbs(),
+    ?assert(lists:member(DbName, AllDbs2)),
+
+    ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+    AllDbs3 = fabric2_db:list_dbs(),
+    ?assert(not lists:member(DbName, AllDbs3)).
diff --git a/src/fabric/test/fabric2_db_misc_tests.erl b/src/fabric/test/fabric2_db_misc_tests.erl
new file mode 100644
index 0000000..8e64056
--- /dev/null
+++ b/src/fabric/test/fabric2_db_misc_tests.erl
@@ -0,0 +1,113 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db_misc_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+-define(TDEF(A), {atom_to_list(A), fun A/1}).
+
+
+misc_test_() ->
+    {
+        "Test database miscellaney",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun empty_db_info/1,
+                fun accessors/1,
+                fun set_revs_limit/1,
+                fun set_security/1,
+                fun is_system_db/1,
+                fun ensure_full_commit/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([fabric]),
+    DbName = ?tempdb(),
+    {ok, Db} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
+    {DbName, Db, Ctx}.
+
+
+cleanup({_DbName, Db, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+empty_db_info({DbName, Db, _}) ->
+    {ok, Info} = fabric2_db:get_db_info(Db),
+    ?assertEqual(DbName, fabric2_util:get_value(db_name, Info)),
+    ?assertEqual(0, fabric2_util:get_value(doc_count, Info)),
+    ?assertEqual(0, fabric2_util:get_value(doc_del_count, Info)),
+    ?assert(is_binary(fabric2_util:get_value(update_seq, Info))).
+
+
+accessors({DbName, Db, _}) ->
+    SeqZero = fabric2_fdb:vs_to_seq(fabric2_util:seq_zero_vs()),
+    ?assertEqual(DbName, fabric2_db:name(Db)),
+    ?assertEqual(0, fabric2_db:get_instance_start_time(Db)),
+    ?assertEqual(nil, fabric2_db:get_pid(Db)),
+    ?assertEqual(undefined, fabric2_db:get_before_doc_update_fun(Db)),
+    ?assertEqual(undefined, fabric2_db:get_after_doc_read_fun(Db)),
+    ?assertEqual(SeqZero, fabric2_db:get_committed_update_seq(Db)),
+    ?assertEqual(SeqZero, fabric2_db:get_compacted_seq(Db)),
+    ?assertEqual(SeqZero, fabric2_db:get_update_seq(Db)),
+    ?assertEqual(nil, fabric2_db:get_compactor_pid(Db)),
+    ?assertEqual(1000, fabric2_db:get_revs_limit(Db)),
+    ?assertMatch(<<_:32/binary>>, fabric2_db:get_uuid(Db)),
+    ?assertEqual(true, fabric2_db:is_db(Db)),
+    ?assertEqual(false, fabric2_db:is_db(#{})),
+    ?assertEqual(false, fabric2_db:is_partitioned(Db)),
+    ?assertEqual(false, fabric2_db:is_clustered(Db)).
+
+
+set_revs_limit({DbName, Db, _}) ->
+    ?assertEqual(ok, fabric2_db:set_revs_limit(Db, 500)),
+    {ok, Db2} = fabric2_db:open(DbName, []),
+    ?assertEqual(500, fabric2_db:get_revs_limit(Db2)).
+
+
+set_security({DbName, Db, _}) ->
+    SecObj = {[
+        {<<"admins">>, {[
+            {<<"names">>, []},
+            {<<"roles">>, []}
+        ]}}
+    ]},
+    ?assertEqual(ok, fabric2_db:set_security(Db, SecObj)),
+    {ok, Db2} = fabric2_db:open(DbName, []),
+    ?assertEqual(SecObj, fabric2_db:get_security(Db2)).
+
+
+is_system_db({DbName, Db, _}) ->
+    ?assertEqual(false, fabric2_db:is_system_db(Db)),
+    ?assertEqual(false, fabric2_db:is_system_db_name("foo")),
+    ?assertEqual(false, fabric2_db:is_system_db_name(DbName)),
+    ?assertEqual(true, fabric2_db:is_system_db_name(<<"_replicator">>)),
+    ?assertEqual(true, fabric2_db:is_system_db_name("_replicator")),
+    ?assertEqual(true, fabric2_db:is_system_db_name(<<"foo/_replicator">>)),
+    ?assertEqual(false, fabric2_db:is_system_db_name(<<"f.o/_replicator">>)),
+    ?assertEqual(false, fabric2_db:is_system_db_name(<<"foo/bar">>)).
+
+
+ensure_full_commit({_, Db, _}) ->
+    ?assertEqual({ok, 0}, fabric2_db:ensure_full_commit(Db)),
+    ?assertEqual({ok, 0}, fabric2_db:ensure_full_commit(Db, 5)).
diff --git a/src/fabric/test/fabric2_db_security_tests.erl b/src/fabric/test/fabric2_db_security_tests.erl
new file mode 100644
index 0000000..9796011
--- /dev/null
+++ b/src/fabric/test/fabric2_db_security_tests.erl
@@ -0,0 +1,162 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db_security_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+security_test_() ->
+    {
+        "Test database security operations",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun is_admin_name/1,
+                fun is_not_admin_name/1,
+                fun is_admin_role/1,
+                fun is_not_admin_role/1,
+                fun check_is_admin/1,
+                fun check_is_not_admin/1,
+                fun check_is_member_name/1,
+                fun check_is_not_member_name/1,
+                fun check_is_member_role/1,
+                fun check_is_not_member_role/1,
+                fun check_admin_is_member/1,
+                fun check_is_member_of_public_db/1,
+                fun check_set_user_ctx/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([fabric]),
+    DbName = ?tempdb(),
+    {ok, Db1} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
+    SecProps = {[
+        {<<"admins">>, {[
+            {<<"names">>, [<<"admin_name1">>, <<"admin_name2">>]},
+            {<<"roles">>, [<<"admin_role1">>, <<"admin_role2">>]}
+        ]}},
+        {<<"members">>, {[
+            {<<"names">>, [<<"member_name1">>, <<"member_name2">>]},
+            {<<"roles">>, [<<"member_role1">>, <<"member_role2">>]}
+        ]}}
+    ]},
+    ok = fabric2_db:set_security(Db1, SecProps),
+    {ok, Db2} = fabric2_db:open(DbName, []),
+    {Db2, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+is_admin_name({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"admin_name1">>},
+    ?assertEqual(true, fabric2_db:is_admin(Db#{user_ctx := UserCtx})).
+
+
+is_not_admin_name({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"member1">>},
+    ?assertEqual(false, fabric2_db:is_admin(Db#{user_ctx := UserCtx})).
+
+
+is_admin_role({Db, _}) ->
+    UserCtx = #user_ctx{roles = [<<"admin_role1">>]},
+    ?assertEqual(true, fabric2_db:is_admin(Db#{user_ctx := UserCtx})).
+
+
+is_not_admin_role({Db, _}) ->
+    UserCtx = #user_ctx{roles = [<<"member_role1">>]},
+    ?assertEqual(false, fabric2_db:is_admin(Db#{user_ctx := UserCtx})).
+
+
+check_is_admin({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"admin_name1">>},
+    ?assertEqual(ok, fabric2_db:check_is_admin(Db#{user_ctx := UserCtx})).
+
+
+check_is_not_admin({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"member_name1">>},
+    ?assertThrow(
+        {unauthorized, <<"You are not a db or server admin.">>},
+        fabric2_db:check_is_admin(Db#{user_ctx := #user_ctx{}})
+    ),
+    ?assertThrow(
+        {forbidden, <<"You are not a db or server admin.">>},
+        fabric2_db:check_is_admin(Db#{user_ctx := UserCtx})
+    ).
+
+
+check_is_member_name({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"member_name1">>},
+    ?assertEqual(ok, fabric2_db:check_is_member(Db#{user_ctx := UserCtx})).
+
+
+check_is_not_member_name({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"foo">>},
+    ?assertThrow(
+        {unauthorized, <<"You are not authorized", _/binary>>},
+        fabric2_db:check_is_member(Db#{user_ctx := #user_ctx{}})
+    ),
+    ?assertThrow(
+        {forbidden, <<"You are not allowed to access", _/binary>>},
+        fabric2_db:check_is_member(Db#{user_ctx := UserCtx})
+    ).
+
+
+check_is_member_role({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"foo">>, roles = [<<"member_role1">>]},
+    ?assertEqual(ok, fabric2_db:check_is_member(Db#{user_ctx := UserCtx})).
+
+
+check_is_not_member_role({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"foo">>, roles = [<<"bar">>]},
+    ?assertThrow(
+        {forbidden, <<"You are not allowed to access", _/binary>>},
+        fabric2_db:check_is_member(Db#{user_ctx := UserCtx})
+    ).
+
+
+check_admin_is_member({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"admin_name1">>},
+    ?assertEqual(ok, fabric2_db:check_is_member(Db#{user_ctx := UserCtx})).
+
+
+check_is_member_of_public_db({Db, _}) ->
+    PublicDb = Db#{security_doc := {[]}},
+    UserCtx = #user_ctx{name = <<"foo">>, roles = [<<"bar">>]},
+    ?assertEqual(
+        ok,
+        fabric2_db:check_is_member(PublicDb#{user_ctx := #user_ctx{}})
+    ),
+    ?assertEqual(
+        ok,
+        fabric2_db:check_is_member(PublicDb#{user_ctx := UserCtx})
+    ).
+
+
+check_set_user_ctx({Db0, _}) ->
+    DbName = fabric2_db:name(Db0),
+    UserCtx = #user_ctx{name = <<"foo">>, roles = [<<"bar">>]},
+    {ok, Db1} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+    ?assertEqual(UserCtx, fabric2_db:get_user_ctx(Db1)).
+
+
diff --git a/src/fabric/test/fabric2_doc_count_tests.erl b/src/fabric/test/fabric2_doc_count_tests.erl
new file mode 100644
index 0000000..37d0840
--- /dev/null
+++ b/src/fabric/test/fabric2_doc_count_tests.erl
@@ -0,0 +1,251 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_doc_count_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+-define(DOC_COUNT, 10).
+
+
+doc_count_test_() ->
+    {
+        "Test document counting operations",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun normal_docs/1,
+                fun design_docs/1,
+                fun local_docs/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([fabric]),
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+normal_docs({Db, _}) ->
+    {DocCount, DelDocCount, DDocCount, LDocCount} = get_doc_counts(Db),
+
+    Docs1 = lists:map(fun(Id) ->
+        Doc = #doc{
+            id = integer_to_binary(Id),
+            body = {[{<<"value">>, Id}]}
+        },
+        {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc, []),
+        Doc#doc{revs = {RevPos, [Rev]}}
+    end, lists:seq(1, ?DOC_COUNT)),
+
+    check_doc_counts(
+            Db,
+            DocCount + ?DOC_COUNT,
+            DelDocCount,
+            DDocCount,
+            LDocCount
+        ),
+
+    Docs2 = lists:map(fun(Doc) ->
+        {[{<<"value">>, V}]} = Doc#doc.body,
+        NewDoc = case V rem 2 of
+            0 -> Doc#doc{deleted = true};
+            1 -> Doc
+        end,
+        {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, NewDoc, []),
+        NewDoc#doc{revs = {RevPos, [Rev]}}
+    end, Docs1),
+
+    check_doc_counts(
+            Db,
+            DocCount + ?DOC_COUNT div 2,
+            DelDocCount + ?DOC_COUNT div 2,
+            DDocCount,
+            LDocCount
+        ),
+
+    lists:map(fun(Doc) ->
+        case Doc#doc.deleted of
+            true ->
+                Undeleted = Doc#doc{
+                    revs = {0, []},
+                    deleted = false
+                },
+                {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Undeleted, []),
+                Undeleted#doc{revs = {RevPos, [Rev]}};
+            false ->
+                Doc
+        end
+    end, Docs2),
+
+    check_doc_counts(
+            Db,
+            DocCount + ?DOC_COUNT,
+            DelDocCount,
+            DDocCount,
+            LDocCount
+        ).
+
+
+design_docs({Db, _}) ->
+    {DocCount, DelDocCount, DDocCount, LDocCount} = get_doc_counts(Db),
+
+    Docs1 = lists:map(fun(Id) ->
+        BinId = integer_to_binary(Id),
+        DDocId = <<?DESIGN_DOC_PREFIX, BinId/binary>>,
+        Doc = #doc{
+            id = DDocId,
+            body = {[{<<"value">>, Id}]}
+        },
+        {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc, []),
+        Doc#doc{revs = {RevPos, [Rev]}}
+    end, lists:seq(1, ?DOC_COUNT)),
+
+    check_doc_counts(
+            Db,
+            DocCount + ?DOC_COUNT,
+            DelDocCount,
+            DDocCount + ?DOC_COUNT,
+            LDocCount
+        ),
+
+    Docs2 = lists:map(fun(Doc) ->
+        {[{<<"value">>, V}]} = Doc#doc.body,
+        NewDoc = case V rem 2 of
+            0 -> Doc#doc{deleted = true};
+            1 -> Doc
+        end,
+        {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, NewDoc, []),
+        NewDoc#doc{revs = {RevPos, [Rev]}}
+    end, Docs1),
+
+    check_doc_counts(
+            Db,
+            DocCount + ?DOC_COUNT div 2,
+            DelDocCount + ?DOC_COUNT div 2,
+            DDocCount + ?DOC_COUNT div 2,
+            LDocCount
+        ),
+
+    lists:map(fun(Doc) ->
+        case Doc#doc.deleted of
+            true ->
+                Undeleted = Doc#doc{
+                    revs = {0, []},
+                    deleted = false
+                },
+                {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Undeleted, []),
+                Undeleted#doc{revs = {RevPos, [Rev]}};
+            false ->
+                Doc
+        end
+    end, Docs2),
+
+    check_doc_counts(
+            Db,
+            DocCount + ?DOC_COUNT,
+            DelDocCount,
+            DDocCount + ?DOC_COUNT,
+            LDocCount
+        ).
+
+
+local_docs({Db, _}) ->
+    {DocCount, DelDocCount, DDocCount, LDocCount} = get_doc_counts(Db),
+
+    Docs1 = lists:map(fun(Id) ->
+        BinId = integer_to_binary(Id),
+        LDocId = <<?LOCAL_DOC_PREFIX, BinId/binary>>,
+        Doc = #doc{
+            id = LDocId,
+            body = {[{<<"value">>, Id}]}
+        },
+        {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc, []),
+        Doc#doc{revs = {RevPos, [Rev]}}
+    end, lists:seq(1, ?DOC_COUNT)),
+
+    check_doc_counts(
+            Db,
+            DocCount,
+            DelDocCount,
+            DDocCount,
+            LDocCount + ?DOC_COUNT
+        ),
+
+    Docs2 = lists:map(fun(Doc) ->
+        {[{<<"value">>, V}]} = Doc#doc.body,
+        NewDoc = case V rem 2 of
+            0 -> Doc#doc{deleted = true};
+            1 -> Doc
+        end,
+        {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, NewDoc, []),
+        NewDoc#doc{revs = {RevPos, [Rev]}}
+    end, Docs1),
+
+    check_doc_counts(
+            Db,
+            DocCount,
+            DelDocCount,
+            DDocCount,
+            LDocCount + ?DOC_COUNT div 2
+        ),
+
+    lists:map(fun(Doc) ->
+        case Doc#doc.deleted of
+            true ->
+                Undeleted = Doc#doc{
+                    revs = {0, []},
+                    deleted = false
+                },
+                {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Undeleted, []),
+                Undeleted#doc{revs = {RevPos, [Rev]}};
+            false ->
+                Doc
+        end
+    end, Docs2),
+
+    check_doc_counts(
+            Db,
+            DocCount,
+            DelDocCount,
+            DDocCount,
+            LDocCount + ?DOC_COUNT
+        ).
+
+
+get_doc_counts(Db) ->
+    DocCount = fabric2_db:get_doc_count(Db),
+    DelDocCount = fabric2_db:get_del_doc_count(Db),
+    DDocCount = fabric2_db:get_doc_count(Db, <<"_design">>),
+    LDocCount = fabric2_db:get_doc_count(Db, <<"_local">>),
+    {DocCount, DelDocCount, DDocCount, LDocCount}.
+
+
+check_doc_counts(Db, DocCount, DelDocCount, DDocCount, LDocCount) ->
+    ?assertEqual(DocCount, fabric2_db:get_doc_count(Db)),
+    ?assertEqual(DelDocCount, fabric2_db:get_del_doc_count(Db)),
+    ?assertEqual(DocCount, fabric2_db:get_doc_count(Db, <<"_all_docs">>)),
+    ?assertEqual(DDocCount, fabric2_db:get_doc_count(Db, <<"_design">>)),
+    ?assertEqual(LDocCount, fabric2_db:get_doc_count(Db, <<"_local">>)).
diff --git a/src/fabric/test/fabric2_doc_crud_tests.erl b/src/fabric/test/fabric2_doc_crud_tests.erl
new file mode 100644
index 0000000..85b2766
--- /dev/null
+++ b/src/fabric/test/fabric2_doc_crud_tests.erl
@@ -0,0 +1,770 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_doc_crud_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+doc_crud_test_() ->
+    {
+        "Test document CRUD operations",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun open_missing_doc/1,
+                fun create_new_doc/1,
+                fun create_ddoc_basic/1,
+                fun create_ddoc_requires_admin/1,
+                fun create_ddoc_requires_validation/1,
+                fun create_ddoc_requires_compilation/1,
+                fun update_doc_basic/1,
+                fun update_ddoc_basic/1,
+                fun update_doc_replicated/1,
+                fun update_doc_replicated_add_conflict/1,
+                fun update_doc_replicated_changes_winner/1,
+                fun update_doc_replicated_extension/1,
+                fun update_doc_replicate_existing_rev/1,
+                fun update_winning_conflict_branch/1,
+                fun update_non_winning_conflict_branch/1,
+                fun delete_doc_basic/1,
+                fun delete_changes_winner/1,
+                fun recreate_doc_basic/1,
+                fun conflict_on_create_new_with_rev/1,
+                fun conflict_on_update_with_no_rev/1,
+                fun conflict_on_create_as_deleted/1,
+                fun conflict_on_recreate_as_deleted/1,
+                fun conflict_on_extend_deleted/1,
+                fun open_doc_revs_basic/1,
+                fun open_doc_revs_all/1,
+                fun open_doc_revs_latest/1,
+                fun get_missing_revs_basic/1,
+                fun get_missing_revs_on_missing_doc/1,
+                fun open_missing_local_doc/1,
+                fun create_local_doc_basic/1,
+                fun update_local_doc_basic/1,
+                fun delete_local_doc_basic/1,
+                fun recreate_local_doc/1,
+                fun create_local_doc_bad_rev/1,
+                fun create_local_doc_random_rev/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([fabric]),
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+open_missing_doc({Db, _}) ->
+    ?assertEqual({not_found, missing}, fabric2_db:open_doc(Db, <<"foo">>)).
+
+
+create_new_doc({Db, _}) ->
+    Doc = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc),
+    NewDoc = Doc#doc{revs = {RevPos, [Rev]}},
+    ?assertEqual({ok, NewDoc}, fabric2_db:open_doc(Db, Doc#doc.id)).
+
+
+create_ddoc_basic({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    DDocId = <<"_design/", UUID/binary>>,
+    Doc = #doc{
+        id = DDocId,
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc),
+    NewDoc = Doc#doc{revs = {RevPos, [Rev]}},
+    ?assertEqual({ok, NewDoc}, fabric2_db:open_doc(Db, Doc#doc.id)).
+
+
+create_ddoc_requires_admin({Db, _}) ->
+    Db2 = fabric2_db:set_user_ctx(Db, #user_ctx{}),
+    UUID = fabric2_util:uuid(),
+    DDocId = <<"_design/", UUID/binary>>,
+    Doc = #doc{
+        id = DDocId,
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    ?assertThrow({unauthorized, _}, fabric2_db:update_doc(Db2, Doc)).
+
+
+create_ddoc_requires_validation({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    DDocId = <<"_design/", UUID/binary>>,
+    Doc = #doc{
+        id = DDocId,
+        body = {[
+            {<<"views">>, {[
+                {<<"foo">>, {[
+                    {<<"map">>, <<"function(doc) {}">>},
+                    {<<"reduce">>, <<"_not_a_builtin_reduce">>}
+                ]}}
+            ]}}
+        ]}
+    },
+    ?assertThrow(
+            {bad_request, invalid_design_doc, _},
+            fabric2_db:update_doc(Db, Doc)
+        ).
+
+
+create_ddoc_requires_compilation({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    DDocId = <<"_design/", UUID/binary>>,
+    Doc = #doc{
+        id = DDocId,
+        body = {[
+            {<<"language">>, <<"javascript">>},
+            {<<"views">>, {[
+                {<<"foo">>, {[
+                    {<<"map">>, <<"Hopefully this is invalid JavaScript">>}
+                ]}}
+            ]}}
+        ]}
+    },
+    ?assertThrow(
+            {bad_request, compilation_error, _},
+            fabric2_db:update_doc(Db, Doc)
+        ).
+
+
+update_doc_basic({Db, _}) ->
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"state">>, 1}]}
+    },
+    {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+    Doc2 = Doc1#doc{
+        revs = {Pos1, [Rev1]},
+        body = {[{<<"state">>, 2}]}
+    },
+    {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+    Doc3 = Doc2#doc{
+        revs = {Pos2, [Rev2, Rev1]}
+    },
+    ?assertEqual({ok, Doc3}, fabric2_db:open_doc(Db, Doc2#doc.id)).
+
+
+update_ddoc_basic({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    DDocId = <<"_design/", UUID/binary>>,
+    Doc1 = #doc{
+        id = DDocId,
+        body = {[{<<"state">>, 1}]}
+    },
+    {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+    Doc2 = Doc1#doc{
+        revs = {Pos1, [Rev1]},
+        body = {[{<<"state">>, 2}]}
+    },
+    {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+    Doc3 = Doc2#doc{
+        revs = {Pos2, [Rev2, Rev1]}
+    },
+    ?assertEqual({ok, Doc3}, fabric2_db:open_doc(Db, Doc2#doc.id)).
+
+
+update_doc_replicated({Db, _}) ->
+    Doc = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [fabric2_util:uuid(), fabric2_util:uuid()]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc, [replicated_changes]),
+    ?assertEqual({ok, Doc}, fabric2_db:open_doc(Db, Doc#doc.id)).
+
+
+update_doc_replicated_add_conflict({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    ?assertEqual({ok, Doc1}, fabric2_db:open_doc(Db, Doc1#doc.id)),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+    ?assertEqual({ok, Doc1}, fabric2_db:open_doc(Db, Doc2#doc.id)).
+
+
+update_doc_replicated_changes_winner({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    ?assertEqual({ok, Doc1}, fabric2_db:open_doc(Db, Doc1#doc.id)),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+    ?assertEqual({ok, Doc2}, fabric2_db:open_doc(Db, Doc2#doc.id)).
+
+
+update_doc_replicated_extension({Db, _}) ->
+    % No sort necessary and avoided on purpose to
+    % demonstrate that this is not sort dependent
+    Rev1 = fabric2_util:uuid(),
+    Rev2 = fabric2_util:uuid(),
+    Rev3 = fabric2_util:uuid(),
+    Rev4 = fabric2_util:uuid(),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {4, [Rev4, Rev3, Rev2]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {4, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+    {ok, Doc3} = fabric2_db:open_doc(Db, Doc2#doc.id),
+    ?assertEqual({4, [Rev4, Rev3, Rev2, Rev1]}, Doc3#doc.revs),
+    ?assertEqual(Doc2#doc{revs = undefined}, Doc3#doc{revs = undefined}).
+
+
+update_doc_replicate_existing_rev({Db, _}) ->
+    Rev1 = fabric2_util:uuid(),
+    Rev2 = fabric2_util:uuid(),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    {ok, []} = fabric2_db:update_docs(Db, [Doc1], [replicated_changes]),
+    ?assertEqual({ok, Doc1}, fabric2_db:open_doc(Db, Doc1#doc.id)).
+
+
+update_winning_conflict_branch({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+    % Update the winning branch
+    Doc3 = Doc1#doc{
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"baz">>, 2}]}
+    },
+    {ok, {3, Rev4}} = fabric2_db:update_doc(Db, Doc3),
+    {ok, Doc4} = fabric2_db:open_doc(Db, Doc3#doc.id),
+    % Assert we've got the correct winner
+    ?assertEqual({3, [Rev4, Rev3, Rev1]}, Doc4#doc.revs),
+    ?assertEqual(Doc3#doc{revs = undefined}, Doc4#doc{revs = undefined}).
+
+
+update_non_winning_conflict_branch({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+    % Update the non winning branch
+    Doc3 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"baz">>, 2}]}
+    },
+    {ok, {3, Rev4}} = fabric2_db:update_doc(Db, Doc3),
+    {ok, Doc4} = fabric2_db:open_doc(Db, Doc3#doc.id),
+    % Assert we've got the correct winner
+    ?assertEqual({3, [Rev4, Rev2, Rev1]}, Doc4#doc.revs),
+    ?assertEqual(Doc3#doc{revs = undefined}, Doc4#doc{revs = undefined}).
+
+
+delete_doc_basic({Db, _}) ->
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"state">>, 1}]}
+    },
+    {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+    Doc2 = Doc1#doc{
+        revs = {Pos1, [Rev1]},
+        deleted = true,
+        body = {[{<<"state">>, 2}]}
+    },
+    {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+    Doc3 = Doc2#doc{revs = {Pos2, [Rev2, Rev1]}},
+    ?assertEqual({ok, Doc3}, fabric2_db:open_doc(Db, Doc2#doc.id, [deleted])).
+
+
+delete_changes_winner({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+    % Delete the winning branch
+    Doc3 = Doc1#doc{
+        revs = {2, [Rev3, Rev1]},
+        deleted = true,
+        body = {[]}
+    },
+    {ok, {3, _}} = fabric2_db:update_doc(Db, Doc3),
+    ?assertEqual({ok, Doc2}, fabric2_db:open_doc(Db, Doc3#doc.id)).
+
+
+recreate_doc_basic({Db, _}) ->
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"state">>, 1}]}
+    },
+    {ok, {1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+    Doc2 = Doc1#doc{
+        revs = {1, [Rev1]},
+        deleted = true,
+        body = {[{<<"state">>, 2}]}
+    },
+    {ok, {2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+    Doc3 = Doc1#doc{
+        revs = {0, []},
+        deleted = false,
+        body = {[{<<"state">>, 3}]}
+    },
+    {ok, {3, Rev3}} = fabric2_db:update_doc(Db, Doc3),
+    {ok, Doc4} = fabric2_db:open_doc(Db, Doc3#doc.id),
+    ?assertEqual({3, [Rev3, Rev2, Rev1]}, Doc4#doc.revs),
+    ?assertEqual(Doc3#doc{revs = undefined}, Doc4#doc{revs = undefined}).
+
+
+conflict_on_create_new_with_rev({Db, _}) ->
+    Doc = #doc{
+        id = fabric2_util:uuid(),
+        revs = {1, [fabric2_util:uuid()]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc)).
+
+
+conflict_on_update_with_no_rev({Db, _}) ->
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"state">>, 1}]}
+    },
+    {ok, _} = fabric2_db:update_doc(Db, Doc1),
+    Doc2 = Doc1#doc{
+        revs = {0, []},
+        body = {[{<<"state">>, 2}]}
+    },
+    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc2)).
+
+
+conflict_on_create_as_deleted({Db, _}) ->
+    Doc = #doc{
+        id = fabric2_util:uuid(),
+        deleted = true,
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc)).
+
+
+conflict_on_recreate_as_deleted({Db, _}) ->
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"state">>, 1}]}
+    },
+    {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+    Doc2 = Doc1#doc{
+        revs = {Pos1, [Rev1]},
+        deleted = true,
+        body = {[{<<"state">>, 2}]}
+    },
+    {ok, _} = fabric2_db:update_doc(Db, Doc2),
+    Doc3 = Doc1#doc{
+        revs = {0, []},
+        deleted = true,
+        body = {[{<<"state">>, 3}]}
+    },
+    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc3)).
+
+
+conflict_on_extend_deleted({Db, _}) ->
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"state">>, 1}]}
+    },
+    {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+    Doc2 = Doc1#doc{
+        revs = {Pos1, [Rev1]},
+        deleted = true,
+        body = {[{<<"state">>, 2}]}
+    },
+    {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+    Doc3 = Doc1#doc{
+        revs = {Pos2, [Rev2]},
+        deleted = false,
+        body = {[{<<"state">>, 3}]}
+    },
+    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc3)).
+
+
+open_doc_revs_basic({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    DocId = fabric2_util:uuid(),
+    Doc1 = #doc{
+        id = DocId,
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+
+    {ok, [{ok, Doc3}]} = fabric2_db:open_doc_revs(Db, DocId, [{2, Rev3}], []),
+    ?assertEqual(Doc1, Doc3),
+
+    {ok, [{ok, Doc4}]} = fabric2_db:open_doc_revs(Db, DocId, [{2, Rev2}], []),
+    ?assertEqual(Doc2, Doc4),
+
+    Revs = [{2, Rev3}, {2, Rev2}, {1, Rev1}],
+    {ok, Docs} = fabric2_db:open_doc_revs(Db, DocId, Revs, []),
+    ?assert(length(Docs) == 3),
+    ?assert(lists:member({ok, Doc1}, Docs)),
+    ?assert(lists:member({ok, Doc2}, Docs)),
+    ?assert(lists:member({{not_found, missing}, {1, Rev1}}, Docs)),
+
+    % Make sure crazy madeup revisions are accepted
+    MissingRevs = [{5, fabric2_util:uuid()}, {1, fabric2_util:uuid()}],
+    {ok, NFMissing} = fabric2_db:open_doc_revs(Db, DocId, MissingRevs, []),
+    ?assertEqual(2, length(NFMissing)),
+    lists:foreach(fun(MR) ->
+        ?assert(lists:member({{not_found, missing}, MR}, NFMissing))
+    end, MissingRevs).
+
+
+open_doc_revs_all({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    DocId = fabric2_util:uuid(),
+    Doc1 = #doc{
+        id = DocId,
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+
+    {ok, Docs} = fabric2_db:open_doc_revs(Db, DocId, all, []),
+    ?assert(length(Docs) == 2),
+    ?assert(lists:member({ok, Doc1}, Docs)),
+    ?assert(lists:member({ok, Doc2}, Docs)).
+
+
+open_doc_revs_latest({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    DocId = fabric2_util:uuid(),
+    Doc1 = #doc{
+        id = DocId,
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+
+    Opts = [latest],
+    {ok, [{ok, Doc3}]} = fabric2_db:open_doc_revs(Db, DocId, [{2, Rev3}], Opts),
+    ?assertEqual(Doc1, Doc3),
+
+    {ok, Docs} = fabric2_db:open_doc_revs(Db, DocId, [{1, Rev1}], Opts),
+    ?assert(length(Docs) == 2),
+    ?assert(lists:member({ok, Doc1}, Docs)),
+    ?assert(lists:member({ok, Doc2}, Docs)).
+
+
+get_missing_revs_basic({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    DocId = fabric2_util:uuid(),
+    Doc1 = #doc{
+        id = DocId,
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+
+    % Check that we can find all revisions
+    AllRevs = [{1, Rev1}, {2, Rev2}, {2, Rev3}],
+    ?assertEqual(
+            {ok, []},
+            fabric2_db:get_missing_revs(Db, [{DocId, AllRevs}])
+        ),
+
+    % Check that a missing revision is found with no possible ancestors
+    MissingRev = {2, fabric2_util:uuid()},
+    ?assertEqual(
+            {ok, [{DocId, [MissingRev], []}]},
+            fabric2_db:get_missing_revs(Db, [{DocId, [MissingRev]}])
+        ),
+
+    % Check that only a missing rev is returned
+    ?assertEqual(
+            {ok, [{DocId, [MissingRev], []}]},
+            fabric2_db:get_missing_revs(Db, [{DocId, [MissingRev | AllRevs]}])
+        ),
+
+    % Check that we can find possible ancestors
+    MissingWithAncestors = {4, fabric2_util:uuid()},
+    PossibleAncestors = [{2, Rev2}, {2, Rev3}],
+    ?assertEqual(
+            {ok, [{DocId, [MissingWithAncestors], PossibleAncestors}]},
+            fabric2_db:get_missing_revs(Db, [{DocId, [MissingWithAncestors]}])
+        ).
+
+
+get_missing_revs_on_missing_doc({Db, _}) ->
+    Revs = lists:sort([
+            couch_doc:rev_to_str({1, fabric2_util:uuid()}),
+            couch_doc:rev_to_str({2, fabric2_util:uuid()}),
+            couch_doc:rev_to_str({800, fabric2_util:uuid()})
+        ]),
+    DocId = fabric2_util:uuid(),
+    {ok, Resp} = fabric2_db:get_missing_revs(Db, [{DocId, Revs}]),
+    ?assertMatch([{DocId, [_ | _], []}], Resp),
+    [{DocId, Missing, _}] = Resp,
+    MissingStrs = [couch_doc:rev_to_str(Rev) || Rev <- Missing],
+    ?assertEqual(Revs, lists:sort(MissingStrs)).
+
+
+open_missing_local_doc({Db, _}) ->
+    ?assertEqual(
+            {not_found, missing},
+            fabric2_db:open_doc(Db, <<"_local/foo">>, [])
+        ).
+
+
+create_local_doc_basic({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+    Doc1 = #doc{
+        id = LDocId,
+        revs = {0, []},
+        deleted = false,
+        body = {[{<<"ohai">>, <<"there">>}]}
+    },
+    ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+    {ok, Doc2} = fabric2_db:open_doc(Db, Doc1#doc.id, []),
+    ?assertEqual(Doc1#doc{revs = {0, [<<"1">>]}}, Doc2).
+
+
+update_local_doc_basic({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+    Doc1 = #doc{
+        id = LDocId,
+        revs = {0, []},
+        deleted = false,
+        body = {[{<<"ohai">>, <<"there">>}]}
+    },
+    ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+    Doc2 = Doc1#doc{
+        revs = {0, [<<"1">>]},
+        body = {[{<<"whiz">>, <<"bang">>}]}
+    },
+    ?assertEqual({ok, {0, <<"2">>}}, fabric2_db:update_doc(Db, Doc2)),
+    {ok, Doc3} = fabric2_db:open_doc(Db, Doc1#doc.id, []),
+    ?assertEqual(Doc2#doc{revs = {0, [<<"2">>]}}, Doc3).
+
+
+delete_local_doc_basic({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+    Doc1 = #doc{
+        id = LDocId,
+        revs = {0, []},
+        deleted = false,
+        body = {[{<<"ohai">>, <<"there">>}]}
+    },
+    ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+    Doc2 = Doc1#doc{
+        revs = {0, [<<"1">>]},
+        deleted = true,
+        body = {[]}
+    },
+    ?assertEqual({ok, {0, <<"0">>}}, fabric2_db:update_doc(Db, Doc2)),
+    ?assertEqual(
+            {not_found, missing},
+            fabric2_db:open_doc(Db, LDocId)
+        ).
+
+
+recreate_local_doc({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+    Doc1 = #doc{
+        id = LDocId,
+        revs = {0, []},
+        deleted = false,
+        body = {[{<<"ohai">>, <<"there">>}]}
+    },
+    ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+    Doc2 = Doc1#doc{
+        revs = {0, [<<"1">>]},
+        deleted = true,
+        body = {[]}
+    },
+    ?assertEqual({ok, {0, <<"0">>}}, fabric2_db:update_doc(Db, Doc2)),
+    ?assertEqual(
+            {not_found, missing},
+            fabric2_db:open_doc(Db, LDocId)
+        ),
+
+    ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+    {ok, Doc3} = fabric2_db:open_doc(Db, LDocId),
+    ?assertEqual(Doc1#doc{revs = {0, [<<"1">>]}}, Doc3).
+
+
+create_local_doc_bad_rev({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+    Doc1 = #doc{
+        id = LDocId,
+        revs = {0, [<<"not a number">>]}
+    },
+    ?assertThrow(
+            {error, <<"Invalid rev format">>},
+            fabric2_db:update_doc(Db, Doc1)
+        ),
+
+    Doc2 = Doc1#doc{
+        revs = bad_bad_rev_roy_brown
+    },
+    ?assertThrow(
+            {error, <<"Invalid rev format">>},
+            fabric2_db:update_doc(Db, Doc2)
+        ).
+
+
+create_local_doc_random_rev({Db, _}) ->
+    % Local docs don't care what rev is passed as long
+    % as long as its a number.
+    UUID = fabric2_util:uuid(),
+    LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+    Doc1 = #doc{
+        id = LDocId,
+        revs = {0, [<<"42">>]},
+        body = {[{<<"state">>, 1}]}
+    },
+    ?assertEqual({ok, {0, <<"43">>}}, fabric2_db:update_doc(Db, Doc1)),
+    {ok, Doc2} = fabric2_db:open_doc(Db, LDocId, []),
+    ?assertEqual(Doc1#doc{revs = {0, [<<"43">>]}}, Doc2),
+
+    Doc3 = Doc1#doc{
+        revs = {0, [<<"1234567890">>]},
+        body = {[{<<"state">>, 2}]}
+    },
+    ?assertEqual({ok, {0, <<"1234567891">>}}, fabric2_db:update_doc(Db, Doc3)),
+    {ok, Doc4} = fabric2_db:open_doc(Db, LDocId, []),
+    ?assertEqual(Doc3#doc{revs = {0, [<<"1234567891">>]}}, Doc4),
+
+    Doc5 = Doc1#doc{
+        revs = {0, [<<"1">>]},
+        body = {[{<<"state">>, 3}]}
+    },
+    ?assertEqual({ok, {0, <<"2">>}}, fabric2_db:update_doc(Db, Doc5)),
+    {ok, Doc6} = fabric2_db:open_doc(Db, LDocId, []),
+    ?assertEqual(Doc5#doc{revs = {0, [<<"2">>]}}, Doc6).
diff --git a/src/fabric/test/fabric2_doc_fold_tests.erl b/src/fabric/test/fabric2_doc_fold_tests.erl
new file mode 100644
index 0000000..caa5f92
--- /dev/null
+++ b/src/fabric/test/fabric2_doc_fold_tests.erl
@@ -0,0 +1,209 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_doc_fold_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+-define(DOC_COUNT, 50).
+
+
+doc_fold_test_() ->
+    {
+        "Test document fold operations",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun fold_docs_basic/1,
+                fun fold_docs_rev/1,
+                fun fold_docs_with_start_key/1,
+                fun fold_docs_with_end_key/1,
+                fun fold_docs_with_both_keys_the_same/1,
+                fun fold_docs_with_different_keys/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([fabric]),
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    DocIdRevs = lists:map(fun(Val) ->
+        DocId = fabric2_util:uuid(),
+        Doc = #doc{
+            id = DocId,
+            body = {[{<<"value">>, Val}]}
+        },
+        {ok, Rev} = fabric2_db:update_doc(Db, Doc, []),
+        {DocId, couch_doc:rev_to_str(Rev)}
+    end, lists:seq(1, ?DOC_COUNT)),
+    {Db, lists:sort(DocIdRevs), Ctx}.
+
+
+cleanup({Db, _DocIdRevs, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+fold_docs_basic({Db, DocIdRevs, _}) ->
+    {ok, {?DOC_COUNT, Rows}} = fabric2_db:fold_docs(Db, fun fold_fun/2, []),
+    ?assertEqual(DocIdRevs, lists:reverse(Rows)).
+
+
+fold_docs_rev({Db, DocIdRevs, _}) ->
+    Opts = [{dir, rev}],
+    {ok, {?DOC_COUNT, Rows}} =
+            fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts),
+    ?assertEqual(DocIdRevs, Rows).
+
+
+fold_docs_with_start_key({Db, DocIdRevs, _}) ->
+    {StartKey, _} = hd(DocIdRevs),
+    Opts = [{start_key, StartKey}],
+    {ok, {?DOC_COUNT, Rows}}
+            = fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts),
+    ?assertEqual(DocIdRevs, lists:reverse(Rows)),
+    if length(DocIdRevs) == 1 -> ok; true ->
+        fold_docs_with_start_key({Db, tl(DocIdRevs), nil})
+    end.
+
+
+fold_docs_with_end_key({Db, DocIdRevs, _}) ->
+    RevDocIdRevs = lists:reverse(DocIdRevs),
+    {EndKey, _} = hd(RevDocIdRevs),
+    Opts = [{end_key, EndKey}],
+    {ok, {?DOC_COUNT, Rows}} =
+            fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts),
+    ?assertEqual(RevDocIdRevs, Rows),
+    if length(DocIdRevs) == 1 -> ok; true ->
+        fold_docs_with_end_key({Db, lists:reverse(tl(RevDocIdRevs)), nil})
+    end.
+
+
+fold_docs_with_both_keys_the_same({Db, DocIdRevs, _}) ->
+    lists:foreach(fun({DocId, _} = Row) ->
+        check_all_combos(Db, DocId, DocId, [Row])
+    end, DocIdRevs).
+
+
+fold_docs_with_different_keys({Db, DocIdRevs, _}) ->
+    lists:foreach(fun(_) ->
+        {StartKey, EndKey, Rows} = pick_range(DocIdRevs),
+        check_all_combos(Db, StartKey, EndKey, Rows)
+    end, lists:seq(1, 500)).
+
+
+check_all_combos(Db, StartKey, EndKey, Rows) ->
+    Opts1 = make_opts(fwd, StartKey, EndKey, true),
+    {ok, {?DOC_COUNT, Rows1}} =
+            fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts1),
+    ?assertEqual(lists:reverse(Rows), Rows1),
+
+    Opts2 = make_opts(fwd, StartKey, EndKey, false),
+    {ok, {?DOC_COUNT, Rows2}} =
+            fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts2),
+    Expect2 = if EndKey == undefined -> lists:reverse(Rows); true ->
+        lists:reverse(all_but_last(Rows))
+    end,
+    ?assertEqual(Expect2, Rows2),
+
+    Opts3 = make_opts(rev, StartKey, EndKey, true),
+    {ok, {?DOC_COUNT, Rows3}} =
+            fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts3),
+    ?assertEqual(Rows, Rows3),
+
+    Opts4 = make_opts(rev, StartKey, EndKey, false),
+    {ok, {?DOC_COUNT, Rows4}} =
+            fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts4),
+    Expect4 = if StartKey == undefined -> Rows; true ->
+        tl(Rows)
+    end,
+    ?assertEqual(Expect4, Rows4).
+
+
+
+make_opts(fwd, StartKey, EndKey, InclusiveEnd) ->
+    DirOpts = case rand:uniform() =< 0.50 of
+        true -> [{dir, fwd}];
+        false -> []
+    end,
+    StartOpts = case StartKey of
+        undefined -> [];
+        <<_/binary>> -> [{start_key, StartKey}]
+    end,
+    EndOpts = case EndKey of
+        undefined -> [];
+        <<_/binary>> when InclusiveEnd -> [{end_key, EndKey}];
+        <<_/binary>> -> [{end_key_gt, EndKey}]
+    end,
+    DirOpts ++ StartOpts ++ EndOpts;
+make_opts(rev, StartKey, EndKey, InclusiveEnd) ->
+    BaseOpts = make_opts(fwd, EndKey, StartKey, InclusiveEnd),
+    [{dir, rev}] ++ BaseOpts -- [{dir, fwd}].
+
+
+all_but_last([]) ->
+    [];
+all_but_last([_]) ->
+    [];
+all_but_last(Rows) ->
+    lists:sublist(Rows, length(Rows) - 1).
+
+
+pick_range(DocIdRevs) ->
+    {StartKey, StartRow, RestRows} = pick_start_key(DocIdRevs),
+    {EndKey, EndRow, RowsBetween} = pick_end_key(RestRows),
+    {StartKey, EndKey, StartRow ++ RowsBetween ++ EndRow}.
+
+
+pick_start_key(Rows) ->
+    case rand:uniform() =< 0.1 of
+        true ->
+            {undefined, [], Rows};
+        false ->
+            Idx = rand:uniform(length(Rows)),
+            {DocId, _} = Row = lists:nth(Idx, Rows),
+            {DocId, [Row], lists:nthtail(Idx, Rows)}
+    end.
+
+
+pick_end_key([]) ->
+    {undefined, [], []};
+
+pick_end_key(Rows) ->
+    case rand:uniform() =< 0.1 of
+        true ->
+            {undefined, [], Rows};
+        false ->
+            Idx = rand:uniform(length(Rows)),
+            {DocId, _} = Row = lists:nth(Idx, Rows),
+            Tail = lists:nthtail(Idx, Rows),
+            {DocId, [Row], Rows -- [Row | Tail]}
+    end.
+
+
+fold_fun({meta, Meta}, _Acc) ->
+    Total = fabric2_util:get_value(total, Meta),
+    {ok, {Total, []}};
+fold_fun({row, Row}, {Total, Rows}) ->
+    RowId = fabric2_util:get_value(id, Row),
+    RowId = fabric2_util:get_value(key, Row),
+    RowRev = fabric2_util:get_value(value, Row),
+    {ok, {Total, [{RowId, RowRev} | Rows]}};
+fold_fun(complete, Acc) ->
+    {ok, Acc}.
diff --git a/src/fabric/test/fabric2_fdb_tx_retry_tests.erl b/src/fabric/test/fabric2_fdb_tx_retry_tests.erl
new file mode 100644
index 0000000..c924ce5
--- /dev/null
+++ b/src/fabric/test/fabric2_fdb_tx_retry_tests.erl
@@ -0,0 +1,178 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_fdb_tx_retry_tests).
+
+
+-include_lib("eunit/include/eunit.hrl").
+
+
+-define(TDEF(A), {atom_to_list(A), fun A/0}).
+
+
+meck_setup() ->
+    meck:new(erlfdb),
+    meck:new(fabric2_txids),
+    EnvSt = case application:get_env(fabric, db) of
+        {ok, Db} -> {ok, Db};
+        undefined -> undefined
+    end,
+    application:set_env(fabric, db, not_a_real_db),
+    EnvSt.
+
+
+meck_cleanup(EnvSt) ->
+    case EnvSt of
+        {ok, Db} -> application:set_env(fabric, db, Db);
+        undefined -> application:unset_env(fabric, db)
+    end,
+    meck:unload().
+
+
+retry_test_() ->
+    {
+        foreach,
+        fun meck_setup/0,
+        fun meck_cleanup/1,
+        [
+            ?TDEF(read_only_no_retry),
+            ?TDEF(read_only_commit_unknown_result),
+            ?TDEF(run_on_first_try),
+            ?TDEF(retry_when_commit_conflict),
+            ?TDEF(retry_when_txid_not_found),
+            ?TDEF(no_retry_when_txid_found)
+        ]
+    }.
+
+
+read_only_no_retry() ->
+    meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+        UserFun(not_a_real_transaction)
+    end),
+    meck:expect(erlfdb, get_last_error, fun() -> 0 end),
+    meck:expect(erlfdb, get, fun(_, _) -> foo end),
+    meck:expect(erlfdb, is_read_only, fun(_) -> true end),
+    meck:expect(fabric2_txids, remove, fun(undefined) -> ok end),
+
+    Result = fabric2_fdb:transactional(fun(Tx) ->
+        ?assertEqual(foo, erlfdb:get(Tx, bar)),
+        did_run
+    end),
+
+    ?assertEqual(did_run, Result),
+    ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+read_only_commit_unknown_result() ->
+    % Not 100% certain that this would ever actually
+    % happen in the wild but might as well test that
+    % we don't blow up if it does.
+    meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+        UserFun(not_a_real_transaction)
+    end),
+    meck:expect(erlfdb, get_last_error, fun() -> 1021 end),
+    meck:expect(erlfdb, get, fun(_, _) -> foo end),
+    meck:expect(erlfdb, is_read_only, fun(_) -> true end),
+    meck:expect(fabric2_txids, remove, fun(undefined) -> ok end),
+
+    Result = fabric2_fdb:transactional(fun(Tx) ->
+        ?assertEqual(foo, erlfdb:get(Tx, bar)),
+        did_run
+    end),
+
+    ?assertEqual(did_run, Result),
+    ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+run_on_first_try() ->
+    meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+        UserFun(not_a_real_transaction)
+    end),
+    meck:expect(erlfdb, get_last_error, fun() -> undefined end),
+    meck:expect(erlfdb, clear, fun(_, _) -> ok end),
+    meck:expect(erlfdb, is_read_only, fun(_) -> false end),
+    meck:expect(fabric2_txids, create, fun(_, _) -> <<"a txid">> end),
+    meck:expect(erlfdb, set, fun(_, <<"a txid">>, <<>>) -> ok end),
+    meck:expect(fabric2_txids, remove, fun(<<"a txid">>) -> ok end),
+
+    Result = fabric2_fdb:transactional(fun(Tx) ->
+        ?assertEqual(ok, erlfdb:clear(Tx, bang)),
+        did_run
+    end),
+
+    ?assertEqual(did_run, Result),
+    ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+retry_when_commit_conflict() ->
+    meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+        UserFun(not_a_real_transaction)
+    end),
+    meck:expect(erlfdb, get_last_error, fun() -> 1020 end),
+    meck:expect(erlfdb, clear, fun(_, _) -> ok end),
+    meck:expect(erlfdb, is_read_only, fun(_) -> false end),
+    meck:expect(fabric2_txids, create, fun(_, _) -> <<"a txid">> end),
+    meck:expect(erlfdb, set, fun(_, <<"a txid">>, <<>>) -> ok end),
+    meck:expect(fabric2_txids, remove, fun(<<"a txid">>) -> ok end),
+
+    Result = fabric2_fdb:transactional(fun(Tx) ->
+        ?assertEqual(ok, erlfdb:clear(Tx, <<"foo">>)),
+        did_run
+    end),
+
+    ?assertEqual(did_run, Result),
+    ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+retry_when_txid_not_found() ->
+    meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+        UserFun(not_a_real_transaction)
+    end),
+    meck:expect(erlfdb, get_last_error, fun() -> 1021 end),
+    meck:expect(erlfdb, get, fun(_, <<"a txid">>) -> future end),
+    meck:expect(erlfdb, wait, fun(future) -> not_found end),
+    meck:expect(erlfdb, clear, fun(_, _) -> ok end),
+    meck:expect(erlfdb, is_read_only, fun(_) -> false end),
+    meck:expect(erlfdb, set, fun(_, <<"a txid">>, <<>>) -> ok end),
+    meck:expect(fabric2_txids, remove, fun(<<"a txid">>) -> ok end),
+
+    put('$fabric_tx_id', <<"a txid">>),
+    put('$fabric_tx_result', not_the_correct_result),
+
+    Result = fabric2_fdb:transactional(fun(Tx) ->
+        ?assertEqual(ok, erlfdb:clear(Tx, <<"foo">>)),
+        yay_not_skipped
+    end),
+
+    ?assertEqual(yay_not_skipped, Result),
+    ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+no_retry_when_txid_found() ->
+    meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+        UserFun(not_a_real_transaction)
+    end),
+    meck:expect(erlfdb, get_last_error, fun() -> 1021 end),
+    meck:expect(erlfdb, get, fun(_, <<"a txid">>) -> future end),
+    meck:expect(erlfdb, wait, fun(future) -> <<>> end),
+    meck:expect(fabric2_txids, remove, fun(<<"a txid">>) -> ok end),
+
+    put('$fabric_tx_id', <<"a txid">>),
+    put('$fabric_tx_result', did_not_run),
+
+    Result = fabric2_fdb:transactional(fun(_Tx) ->
+        ?assert(false),
+        did_run
+    end),
+
+    ?assertEqual(did_not_run, Result),
+    ?assert(meck:validate([erlfdb, fabric2_txids])).
\ No newline at end of file
diff --git a/src/fabric/test/fabric2_trace_db_create_tests.erl b/src/fabric/test/fabric2_trace_db_create_tests.erl
new file mode 100644
index 0000000..09cc863
--- /dev/null
+++ b/src/fabric/test/fabric2_trace_db_create_tests.erl
@@ -0,0 +1,46 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_trace_db_create_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+trace_test_() ->
+    {
+        "Trace operation",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            [
+                fun create_db/0
+            ]
+        }
+    }.
+
+
+setup() ->
+    put(erlfdb_trace, "starting fabric"),
+    test_util:start_couch([fabric]).
+
+
+cleanup(Ctx) ->
+    test_util:stop_couch(Ctx).
+
+
+create_db() ->
+    put(erlfdb_trace, <<"create db">>),
+    {ok, _Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]).
diff --git a/src/fabric/test/fabric2_trace_db_delete_tests.erl b/src/fabric/test/fabric2_trace_db_delete_tests.erl
new file mode 100644
index 0000000..ddbb2c8
--- /dev/null
+++ b/src/fabric/test/fabric2_trace_db_delete_tests.erl
@@ -0,0 +1,49 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_trace_db_delete_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+trace_test_() ->
+    {
+        "Trace operation",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun delete_db/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    put(erlfdb_trace, "starting fabric"),
+    Ctx = test_util:start_couch([fabric]),
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    {Db, Ctx}.
+
+
+cleanup({_Db, Ctx}) ->
+    test_util:stop_couch(Ctx).
+
+
+delete_db({Db, _}) ->
+    put(erlfdb_trace, <<"delete db">>),
+    fabric2_server:remove(fabric2_db:name(Db)),
+    ok = fabric2_db:delete(fabric2_db:name(Db), []).
diff --git a/src/fabric/test/fabric2_trace_db_open_tests.erl b/src/fabric/test/fabric2_trace_db_open_tests.erl
new file mode 100644
index 0000000..71e3301
--- /dev/null
+++ b/src/fabric/test/fabric2_trace_db_open_tests.erl
@@ -0,0 +1,50 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_trace_db_open_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+trace_test_() ->
+    {
+        "Trace operation",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun open_db/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    put(erlfdb_trace, "starting fabric"),
+    Ctx = test_util:start_couch([fabric]),
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+open_db({Db, _}) ->
+    put(erlfdb_trace, <<"open db">>),
+    fabric2_server:remove(fabric2_db:name(Db)),
+    {ok, _Db} = fabric2_db:open(fabric2_db:name(Db), [{user_ctx, ?ADMIN_USER}]).
diff --git a/src/fabric/test/fabric2_trace_doc_create_tests.erl b/src/fabric/test/fabric2_trace_doc_create_tests.erl
new file mode 100644
index 0000000..1e0b47c
--- /dev/null
+++ b/src/fabric/test/fabric2_trace_doc_create_tests.erl
@@ -0,0 +1,86 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_trace_doc_create_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+doc_crud_test_() ->
+    {
+        "Test document CRUD operations",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun create_new_doc/1,
+                fun create_two_docs/1,
+                fun create_50_docs/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([fabric]),
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+create_new_doc({Db, _}) ->
+    put(erlfdb_trace, <<"one doc">>),
+    Doc = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, _} = fabric2_db:update_doc(Db, Doc).
+
+
+create_two_docs({Db, _}) ->
+    put(erlfdb_trace, <<"two docs">>),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"bam">>, <<"baz">>}]}
+    },
+    Doc2 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"bang">>, <<"bargle">>}]}
+    },
+    {ok, _} = fabric2_db:update_docs(Db, [Doc1, Doc2]).
+
+
+create_50_docs({Db, _}) ->
+    lists:foreach(fun(_) ->
+        spawn_monitor(fun() ->
+            Name = io_lib:format("50 docs : ~w", [self()]),
+            put(erlfdb_trace, iolist_to_binary(Name)),
+            Docs = lists:map(fun(Val) ->
+                #doc{
+                    id = fabric2_util:uuid(),
+                    body = {[{<<"value">>, Val}]}
+                }
+            end, lists:seq(1, 50)),
+            {ok, _} = fabric2_db:update_docs(Db, Docs)
+        end)
+    end, lists:seq(1, 5)),
+    lists:foreach(fun(_) ->
+        receive {'DOWN', _, _, _, _} -> ok end
+    end, lists:seq(1, 5)).


[couchdb] 05/34: Update ddoc_cache to use fabric2

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 9083da6c46dedc0c63d597cd12c8ea13bd9eb304
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Jun 5 13:36:02 2019 -0500

    Update ddoc_cache to use fabric2
---
 src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl     | 2 +-
 src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
index 5248469..7c3dc67 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
@@ -33,7 +33,7 @@ ddocid({_, DDocId}) ->
 
 
 recover({DbName, DDocId}) ->
-    fabric:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]).
+    fabric2_db:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]).
 
 
 insert({DbName, DDocId}, {ok, #doc{revs = Revs} = DDoc}) ->
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
index 868fa77..38445af 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
@@ -34,7 +34,7 @@ ddocid({_, DDocId, _}) ->
 
 recover({DbName, DDocId, Rev}) ->
     Opts = [ejson_body, ?ADMIN_CTX],
-    {ok, [Resp]} = fabric:open_revs(DbName, DDocId, [Rev], Opts),
+    {ok, [Resp]} = fabric2_db:open_doc_revs(DbName, DDocId, [Rev], Opts),
     Resp.
 
 


[couchdb] 09/34: Fix fabric2_txids:terminate/2

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit ad31f51f641af05241dd833a9cf9b93d726505c0
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Jun 7 12:49:49 2019 -0500

    Fix fabric2_txids:terminate/2
---
 src/fabric/src/fabric2_txids.erl | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/fabric/src/fabric2_txids.erl b/src/fabric/src/fabric2_txids.erl
index bbb8bdf..ba42741 100644
--- a/src/fabric/src/fabric2_txids.erl
+++ b/src/fabric/src/fabric2_txids.erl
@@ -75,7 +75,7 @@ terminate(_, #{txids := TxIds}) ->
         fabric2_fdb:transactional(fun(Tx) ->
             lists:foreach(fun(TxId) ->
                 erlfdb:clear(Tx, TxId)
-            end)
+            end, TxIds)
         end)
     end,
     ok.


[couchdb] 23/34: Fix `COPY` method

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit bf9fa0acb4a9a1c9921c0453bff025abe38b5b24
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Thu Jul 11 15:03:12 2019 -0500

    Fix `COPY` method
    
    Simple function change to `fabric2_db:name/1`
---
 src/chttpd/src/chttpd_db.erl | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index abdd825..a252041 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -1180,7 +1180,7 @@ db_doc_req(#httpd{method='COPY', user_ctx=Ctx}=Req, Db, SourceDocId) ->
         HttpCode = 202
     end,
     % respond
-    DbName = couch_db:name(Db),
+    DbName = fabric2_db:name(Db),
     {PartRes} = update_doc_result_to_json(TargetDocId, {ok, NewTargetRev}),
     Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(TargetDocId)),
     send_json(Req, HttpCode,


[couchdb] 10/34: Fix revision generation on attachment upload

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 1876962db1e7c1c61696dda241c35c071215244d
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Fri Jun 7 15:02:19 2019 -0500

    Fix revision generation on attachment upload
    
    When uploading an attachment we hadn't yet flushed data to FoundationDB
    which caused the md5 to be empty. The `new_revid` algorithm then
    declared that was because it was an old style attachment and thus our
    new revision would be a random number.
    
    This fix just flushes our attachments earlier in the process of updating
    a document.
---
 src/fabric/src/fabric2_db.erl  | 103 +++++++++++++++++++++++++++--------------
 src/fabric/src/fabric2_fdb.erl |   9 +---
 2 files changed, 70 insertions(+), 42 deletions(-)

diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index 02a18fa..acd473f 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -120,7 +120,7 @@
     %% validate_dbname/1,
 
     %% make_doc/5,
-    new_revid/1
+    new_revid/2
 ]).
 
 
@@ -604,9 +604,7 @@ read_attachment(Db, DocId, AttId) ->
 
 write_attachment(Db, DocId, Att) ->
     Data = couch_att:fetch(data, Att),
-    {ok, AttId} = fabric2_fdb:transactional(Db, fun(TxDb) ->
-        fabric2_fdb:write_attachment(TxDb, DocId, Data)
-    end),
+    {ok, AttId} = fabric2_fdb:write_attachment(Db, DocId, Data),
     couch_att:store(data, {loc, Db, DocId, AttId}, Att).
 
 
@@ -630,33 +628,69 @@ fold_changes(Db, SinceSeq, UserFun, UserAcc, Options) ->
     end).
 
 
-new_revid(Doc) ->
+maybe_add_sys_db_callbacks(Db) ->
+    IsReplicatorDb = fabric2_util:dbname_ends_with(Db, <<"_replicator">>),
+
+    CfgUsersSuffix = config:get("couchdb", "users_db_suffix", "_users"),
+    IsCfgUsersDb = fabric2_util:dbname_ends_with(Db, ?l2b(CfgUsersSuffix)),
+    IsGlobalUsersDb = fabric2_util:dbname_ends_with(Db, <<"_users">>),
+    IsUsersDb = IsCfgUsersDb orelse IsGlobalUsersDb,
+
+    {BDU, ADR} = if
+        IsReplicatorDb ->
+            {
+                fun couch_replicator_docs:before_doc_update/3,
+                fun couch_replicator_docs:after_doc_read/2
+            };
+        IsUsersDb ->
+            {
+                fun fabric2_users_db:before_doc_update/3,
+                fun fabric2_users_db:after_doc_read/2
+            };
+        true ->
+            {undefined, undefined}
+    end,
+
+    Db#{
+        before_doc_update := BDU,
+        after_doc_read := ADR
+    }.
+
+
+new_revid(Db, Doc) ->
     #doc{
+        id = DocId,
         body = Body,
         revs = {OldStart, OldRevs},
         atts = Atts,
         deleted = Deleted
     } = Doc,
 
-    DigestedAtts = lists:foldl(fun(Att, Acc) ->
-        [N, T, M] = couch_att:fetch([name, type, md5], Att),
-        case M == <<>> of
-            true -> Acc;
-            false -> [{N, T, M} | Acc]
+    {NewAtts, AttSigInfo} = lists:mapfoldl(fun(Att, Acc) ->
+        [Name, Type, Data, Md5] = couch_att:fetch([name, type, data, md5], Att),
+        case Data of
+            {loc, _, _, _} ->
+                {Att, [{Name, Type, Md5} | Acc]};
+            _ ->
+                Att1 = couch_att:flush(Db, DocId, Att),
+                Att2 = couch_att:store(revpos, OldStart + 1, Att1),
+                {Att2, [{Name, Type, couch_att:fetch(md5, Att2)} | Acc]}
         end
     end, [], Atts),
 
-    Rev = case DigestedAtts of
-        Atts2 when length(Atts) =/= length(Atts2) ->
-            % We must have old style non-md5 attachments
-            list_to_binary(integer_to_list(couch_util:rand32()));
-        Atts2 ->
+    Rev = case length(Atts) == length(AttSigInfo) of
+        true ->
             OldRev = case OldRevs of [] -> 0; [OldRev0 | _] -> OldRev0 end,
-            SigTerm = [Deleted, OldStart, OldRev, Body, Atts2],
-            couch_hash:md5_hash(term_to_binary(SigTerm, [{minor_version, 1}]))
+            SigTerm = [Deleted, OldStart, OldRev, Body, AttSigInfo],
+            couch_hash:md5_hash(term_to_binary(SigTerm, [{minor_version, 1}]));
+        false ->
+            erlang:error(missing_att_info)
     end,
 
-    Doc#doc{revs = {OldStart + 1, [Rev | OldRevs]}}.
+    Doc#doc{
+        revs = {OldStart + 1, [Rev | OldRevs]},
+        atts = NewAtts
+    }.
 
 
 maybe_set_user_ctx(Db, Options) ->
@@ -970,12 +1004,11 @@ update_doc_interactive(Db, Doc0, Future, _Options) ->
     % Validate the doc update and create the
     % new revinfo map
     Doc2 = prep_and_validate(Db, Doc1, Target),
+
     #doc{
         deleted = NewDeleted,
         revs = {NewRevPos, [NewRev | NewRevPath]}
-    } = Doc3 = new_revid(Doc2),
-
-    Doc4 = update_attachment_revpos(Doc3),
+    } = Doc3 = new_revid(Db, Doc2),
 
     NewRevInfo = #{
         winner => undefined,
@@ -988,9 +1021,9 @@ update_doc_interactive(Db, Doc0, Future, _Options) ->
 
     % Gather the list of possible winnig revisions
     Possible = case Target == Winner of
-        true when not Doc4#doc.deleted ->
+        true when not Doc3#doc.deleted ->
             [NewRevInfo];
-        true when Doc4#doc.deleted ->
+        true when Doc3#doc.deleted ->
             case SecondPlace of
                 #{} -> [NewRevInfo, SecondPlace];
                 not_found -> [NewRevInfo]
@@ -1015,7 +1048,7 @@ update_doc_interactive(Db, Doc0, Future, _Options) ->
 
     ok = fabric2_fdb:write_doc(
             Db,
-            Doc4,
+            Doc3,
             NewWinner,
             Winner,
             ToUpdate,
@@ -1076,6 +1109,7 @@ update_doc_replicated(Db, Doc0, _Options) ->
     LeafPath = get_leaf_path(RevPos, Rev, AllLeafsFull),
     PrevRevInfo = find_prev_revinfo(RevPos, LeafPath),
     Doc2 = prep_and_validate(Db, Doc1, PrevRevInfo),
+    Doc3 = flush_doc_atts(Db, Doc2),
 
     % Possible winners are the previous winner and
     % the new DocRevInfo
@@ -1097,7 +1131,7 @@ update_doc_replicated(Db, Doc0, _Options) ->
 
     ok = fabric2_fdb:write_doc(
             Db,
-            Doc2,
+            Doc3,
             NewWinner,
             Winner,
             ToUpdate,
@@ -1119,19 +1153,20 @@ update_local_doc(Db, Doc0, _Options) ->
     {ok, {0, integer_to_binary(Rev)}}.
 
 
-update_attachment_revpos(#doc{revs = {RevPos, _Revs}, atts = Atts0} = Doc) ->
-    Atts = lists:map(fun(Att) ->
+flush_doc_atts(Db, Doc) ->
+    #doc{
+        id = DocId,
+        atts = Atts
+    } = Doc,
+    NewAtts = lists:map(fun(Att) ->
         case couch_att:fetch(data, Att) of
-            {loc, _Db, _DocId, _AttId} ->
-                % Attachment was already on disk
+            {loc, _, _, _} ->
                 Att;
             _ ->
-                % We will write this attachment with this update
-                % so mark it with the RevPos that will be written
-                couch_att:store(revpos, RevPos, Att)
+                couch_att:flush(Db, DocId, Att)
         end
-    end, Atts0),
-    Doc#doc{atts = Atts}.
+    end, Atts),
+    Doc#doc{atts = NewAtts}.
 
 
 get_winning_rev_futures(Db, Docs) ->
diff --git a/src/fabric/src/fabric2_fdb.erl b/src/fabric/src/fabric2_fdb.erl
index 0a4f298..788bbc6 100644
--- a/src/fabric/src/fabric2_fdb.erl
+++ b/src/fabric/src/fabric2_fdb.erl
@@ -924,7 +924,7 @@ doc_to_fdb(Db, #doc{} = Doc) ->
         body = Body,
         atts = Atts,
         deleted = Deleted
-    } = doc_flush_atts(Db, Doc),
+    } = Doc,
 
     Key = erlfdb_tuple:pack({?DB_DOCS, Id, Start, Rev}, DbPrefix),
     Val = {Body, Atts, Deleted},
@@ -977,13 +977,6 @@ fdb_to_local_doc(_Db, _DocId, not_found) ->
     {not_found, missing}.
 
 
-doc_flush_atts(Db, Doc) ->
-    Atts = lists:map(fun(Att) ->
-        couch_att:flush(Db, Doc#doc.id, Att)
-    end, Doc#doc.atts),
-    Doc#doc{atts = Atts}.
-
-
 chunkify_attachment(Data) ->
     case Data of
         <<>> ->


[couchdb] 20/34: Remove tests for deprecated features.

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 40561bc83ac171f24cd9adace464112512ec08da
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Tue Jun 18 15:32:13 2019 -0500

    Remove tests for deprecated features.
    
    Neither partitioned databases or shard splitting will exist in a
    FoundationDB layer.
---
 test/elixir/test/partition_all_docs_test.exs    | 204 --------
 test/elixir/test/partition_crud_test.exs        | 360 -------------
 test/elixir/test/partition_ddoc_test.exs        | 179 -------
 test/elixir/test/partition_design_docs_test.exs |  16 -
 test/elixir/test/partition_helpers.exs          |  76 ---
 test/elixir/test/partition_mango_test.exs       | 663 ------------------------
 test/elixir/test/partition_size_limit_test.exs  | 305 -----------
 test/elixir/test/partition_size_test.exs        | 361 -------------
 test/elixir/test/partition_view_test.exs        | 374 -------------
 test/elixir/test/partition_view_update_test.exs | 160 ------
 test/elixir/test/reshard_all_docs_test.exs      |  79 ---
 test/elixir/test/reshard_basic_test.exs         | 174 -------
 test/elixir/test/reshard_changes_feed.exs       |  81 ---
 test/elixir/test/reshard_helpers.exs            | 114 ----
 test/elixir/test/test_helper.exs                |   2 -
 15 files changed, 3148 deletions(-)

diff --git a/test/elixir/test/partition_all_docs_test.exs b/test/elixir/test/partition_all_docs_test.exs
deleted file mode 100644
index 816a8d6..0000000
--- a/test/elixir/test/partition_all_docs_test.exs
+++ /dev/null
@@ -1,204 +0,0 @@
-defmodule PartitionAllDocsTest do
-  use CouchTestCase
-  import PartitionHelpers
-
-  @moduledoc """
-  Test Partition functionality for for all_docs
-  """
-
-  setup_all do
-    db_name = random_db_name()
-    {:ok, _} = create_db(db_name, query: %{partitioned: true, q: 1})
-    on_exit(fn -> delete_db(db_name) end)
-
-    create_partition_docs(db_name)
-
-    {:ok, [db_name: db_name]}
-  end
-
-  test "all_docs with partitioned:true returns partitioned fields", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_all_docs"
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert Enum.dedup(partitions) == ["foo"]
-
-    url = "/#{db_name}/_partition/bar/_all_docs"
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert Enum.dedup(partitions) == ["bar"]
-  end
-
-  test "partition all_docs errors with incorrect partition supplied", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/_bar/_all_docs"
-    resp = Couch.get(url)
-    assert resp.status_code == 400
-
-    url = "/#{db_name}/_partition//_all_docs"
-    resp = Couch.get(url)
-    assert resp.status_code == 400
-  end
-
-  test "partitioned _all_docs works with startkey, endkey range", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_all_docs"
-    resp = Couch.get(url, query: %{start_key: "\"foo:12\"", end_key: "\"foo:2\""})
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 5
-    assert Enum.dedup(partitions) == ["foo"]
-  end
-
-  test "partitioned _all_docs works with keys", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_all_docs"
-    resp = Couch.post(url, body: %{keys: ["foo:2", "foo:4", "foo:6"]})
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 3
-    assert ids == ["foo:2", "foo:4", "foo:6"]
-  end
-
-  test "partition _all_docs works with limit", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_all_docs"
-    resp = Couch.get(url, query: %{limit: 5})
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 5
-    assert Enum.dedup(partitions) == ["foo"]
-  end
-
-  test "partition _all_docs with descending", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_all_docs"
-    resp = Couch.get(url, query: %{descending: true, limit: 5})
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 5
-    assert ids == ["foo:98", "foo:96", "foo:94", "foo:92", "foo:90"]
-
-    resp = Couch.get(url, query: %{descending: false, limit: 5})
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 5
-    assert ids == ["foo:10", "foo:100", "foo:12", "foo:14", "foo:16"]
-  end
-
-  test "partition _all_docs with skip", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_all_docs"
-    resp = Couch.get(url, query: %{skip: 5, limit: 5})
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 5
-    assert ids == ["foo:18", "foo:2", "foo:20", "foo:22", "foo:24"]
-  end
-
-  test "partition _all_docs with key", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_all_docs"
-    resp = Couch.get(url, query: %{key: "\"foo:22\""})
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 1
-    assert ids == ["foo:22"]
-  end
-
-  test "partition all docs can set query limits", context do
-    set_config({"query_server_config", "partition_query_limit", "2000"})
-
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-    create_partition_ddoc(db_name)
-
-    url = "/#{db_name}/_partition/foo/_all_docs"
-
-    resp =
-      Couch.get(
-        url,
-        query: %{
-          limit: 20
-        }
-      )
-
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 20
-
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 50
-
-    resp =
-      Couch.get(
-        url,
-        query: %{
-          limit: 2000
-        }
-      )
-
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 50
-
-    resp =
-      Couch.get(
-        url,
-        query: %{
-          limit: 2001
-        }
-      )
-
-    assert resp.status_code == 400
-    %{:body => %{"reason" => reason}} = resp
-    assert Regex.match?(~r/Limit is too large/, reason)
-
-    resp =
-      Couch.get(
-        url,
-        query: %{
-          limit: 2000,
-          skip: 25
-        }
-      )
-
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 25
-  end
-
-  # This test is timing based so it could be a little flaky.
-  # If that turns out to be the case we should probably just skip it
-  @tag :pending
-  test "partition _all_docs with timeout", context do
-    set_config({"fabric", "partition_view_timeout", "1"})
-
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-
-    retry_until(fn ->
-      url = "/#{db_name}/_partition/foo/_all_docs"
-
-      case Couch.get(url) do
-        %{:body => %{"reason" => reason}} ->
-          Regex.match?(~r/not be processed in a reasonable amount of time./, reason)
-
-        _ ->
-          false
-      end
-    end)
-  end
-end
diff --git a/test/elixir/test/partition_crud_test.exs b/test/elixir/test/partition_crud_test.exs
deleted file mode 100644
index aea8069..0000000
--- a/test/elixir/test/partition_crud_test.exs
+++ /dev/null
@@ -1,360 +0,0 @@
-defmodule PartitionCrudTest do
-  use CouchTestCase
-
-  @tag :with_partitioned_db
-  test "Sets partition in db info", context do
-    db_name = context[:db_name]
-    resp = Couch.get("/#{db_name}")
-    %{body: body} = resp
-    assert body["props"] == %{"partitioned" => true}
-  end
-
-  @tag :with_partitioned_db
-  test "PUT and GET document", context do
-    db_name = context[:db_name]
-    id = "my-partition:doc"
-    url = "/#{db_name}/#{id}"
-
-    resp = Couch.put(url, body: %{partitioned_doc: true})
-    %{body: doc} = resp
-    assert resp.status_code == 201
-    assert doc["id"] == id
-
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-
-    %{body: doc} = resp
-    assert doc["_id"] == id
-  end
-
-  @tag :with_partitioned_db
-  test "PUT fails if a partition key is not supplied", context do
-    db_name = context[:db_name]
-    id = "not-partitioned"
-    url = "/#{db_name}/#{id}"
-
-    resp = Couch.put(url, body: %{partitioned_doc: false})
-    assert resp.status_code == 400
-
-    error = %{
-      "error" => "illegal_docid",
-      "reason" => "Doc id must be of form partition:id"
-    }
-
-    assert Map.get(resp, :body) == error
-  end
-
-  @tag :with_partitioned_db
-  test "PUT fails for partitions with _", context do
-    db_name = context[:db_name]
-    id = "_bad:partitioned"
-    url = "/#{db_name}/#{id}"
-
-    resp = Couch.put(url, body: %{partitioned_doc: false})
-
-    error = %{
-      "error" => "illegal_docid",
-      "reason" => "Only reserved document ids may start with underscore."
-    }
-
-    assert resp.status_code == 400
-    assert Map.get(resp, :body) == error
-  end
-
-  @tag :with_partitioned_db
-  test "PUT fails for bad partitions", context do
-    db_name = context[:db_name]
-    id = "bad:"
-    url = "/#{db_name}/#{id}"
-
-    resp = Couch.put(url, body: %{partitioned_doc: false})
-
-    error = %{
-      "error" => "illegal_docid",
-      "reason" => "Document id must not be empty"
-    }
-
-    assert resp.status_code == 400
-    assert Map.get(resp, :body) == error
-  end
-
-  @tag :with_partitioned_db
-  test "POST and GET document", context do
-    db_name = context[:db_name]
-    id = "my-partition-post:doc"
-    url = "/#{db_name}"
-
-    resp = Couch.post(url, body: %{_id: id, partitioned_doc: true})
-    assert resp.status_code == 201
-
-    resp = Couch.get("#{url}/#{id}")
-    assert resp.status_code == 200
-
-    %{body: doc} = resp
-    assert doc["_id"] == id
-  end
-
-  @tag :with_partitioned_db
-  test "POST and _bulk_get document", context do
-    db_name = context[:db_name]
-    id = "my-partition-post:doc"
-    url = "/#{db_name}"
-
-    resp = Couch.post(url, body: %{_id: id, partitioned_doc: true})
-    assert resp.status_code == 201
-
-    resp = Couch.post("#{url}/_bulk_get", body: %{docs: [%{id: id}]})
-    assert resp.status_code == 200
-
-    %{body: body} = resp
-
-    assert %{
-             "results" => [
-               %{
-                 "docs" => [
-                   %{
-                     "ok" => %{
-                       "_id" => "my-partition-post:doc",
-                       "_rev" => "1-43d86359741cb629c0953a2beb6e9d7a",
-                       "partitioned_doc" => true
-                     }
-                   }
-                 ],
-                 "id" => "my-partition-post:doc"
-               }
-             ]
-           } == body
-  end
-
-  @tag :with_partitioned_db
-  test "_bulk_get bad partitioned document", context do
-    db_name = context[:db_name]
-    id = "my-partition-post"
-    url = "/#{db_name}"
-
-    resp = Couch.post("#{url}/_bulk_get", body: %{docs: [%{id: id}]})
-    assert resp.status_code == 200
-    %{:body => body} = resp
-
-    assert %{
-             "results" => [
-               %{
-                 "docs" => [
-                   %{
-                     "error" => %{
-                       "error" => "illegal_docid",
-                       "id" => "my-partition-post",
-                       "reason" => "Doc id must be of form partition:id",
-                       "rev" => :null
-                     }
-                   }
-                 ],
-                 "id" => "my-partition-post"
-               }
-             ]
-           } == body
-  end
-
-  @tag :with_partitioned_db
-  test "POST fails if a partition key is not supplied", context do
-    db_name = context[:db_name]
-    id = "not-partitioned-post"
-    url = "/#{db_name}"
-
-    resp = Couch.post(url, body: %{_id: id, partitited_doc: false})
-    assert resp.status_code == 400
-  end
-
-  @tag :with_partitioned_db
-  test "_bulk_docs saves docs with partition key", context do
-    db_name = context[:db_name]
-
-    docs = [
-      %{_id: "foo:1"},
-      %{_id: "bar:1"}
-    ]
-
-    url = "/#{db_name}"
-    resp = Couch.post("#{url}/_bulk_docs", body: %{:docs => docs})
-    assert resp.status_code == 201
-
-    resp = Couch.get("#{url}/foo:1")
-    assert resp.status_code == 200
-
-    resp = Couch.get("#{url}/bar:1")
-    assert resp.status_code == 200
-  end
-
-  @tag :with_partitioned_db
-  test "_bulk_docs errors with missing partition key", context do
-    db_name = context[:db_name]
-
-    docs = [
-      %{_id: "foo1"}
-    ]
-
-    error = %{
-      "error" => "illegal_docid",
-      "reason" => "Doc id must be of form partition:id"
-    }
-
-    url = "/#{db_name}"
-    resp = Couch.post("#{url}/_bulk_docs", body: %{:docs => docs})
-    assert resp.status_code == 400
-    assert Map.get(resp, :body) == error
-  end
-
-  @tag :with_partitioned_db
-  test "_bulk_docs errors with bad partition key", context do
-    db_name = context[:db_name]
-
-    docs = [
-      %{_id: "_foo:1"}
-    ]
-
-    error = %{
-      "error" => "illegal_docid",
-      "reason" => "Only reserved document ids may start with underscore."
-    }
-
-    url = "/#{db_name}"
-    resp = Couch.post("#{url}/_bulk_docs", body: %{:docs => docs})
-    assert resp.status_code == 400
-    assert Map.get(resp, :body) == error
-  end
-
-  @tag :with_partitioned_db
-  test "_bulk_docs errors with bad doc key", context do
-    db_name = context[:db_name]
-
-    docs = [
-      %{_id: "foo:"}
-    ]
-
-    error = %{
-      "error" => "illegal_docid",
-      "reason" => "Document id must not be empty"
-    }
-
-    url = "/#{db_name}"
-    resp = Couch.post("#{url}/_bulk_docs", body: %{:docs => docs})
-    assert resp.status_code == 400
-    assert Map.get(resp, :body) == error
-  end
-
-  @tag :with_partitioned_db
-  test "saves attachment with partitioned doc", context do
-    db_name = context[:db_name]
-    id = "foo:doc-with-attachment"
-
-    doc = %{
-      _id: id,
-      _attachments: %{
-        "foo.txt": %{
-          content_type: "text/plain",
-          data: Base.encode64("This is a text document to save")
-        }
-      }
-    }
-
-    resp = Couch.put("/#{db_name}/#{id}", body: doc)
-
-    assert resp.status_code == 201
-
-    resp = Couch.get("/#{db_name}/#{id}")
-    assert resp.status_code == 200
-    body = Map.get(resp, :body)
-    rev = Map.get(body, "_rev")
-
-    assert body["_attachments"] == %{
-             "foo.txt" => %{
-               "content_type" => "text/plain",
-               #  "digest" => "md5-OW2BoZAtMqs1E+fAnLpNBw==",
-               # Temp remove the digest part since the digest value 
-               # seems to be different on travis
-               "digest" => body["_attachments"]["foo.txt"]["digest"],
-               "length" => 31,
-               "revpos" => 1,
-               "stub" => true
-             }
-           }
-
-    resp = Couch.get("/#{db_name}/#{id}/foo.txt")
-    assert Map.get(resp, :body) == "This is a text document to save"
-
-    resp =
-      Couch.put(
-        "/#{db_name}/#{id}/bar.txt?rev=#{rev}",
-        headers: ["Content-Type": "text/plain"],
-        body: "This is another document"
-      )
-
-    assert resp.status_code == 201
-    %{:body => body} = resp
-    assert body["ok"] == true
-    assert body["id"] == id
-  end
-
-  @tag :with_partitioned_db
-  test "can purge partitioned db docs", context do
-    db_name = context[:db_name]
-
-    doc = %{
-      _id: "foo:bar",
-      value: "some value"
-    }
-
-    resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
-    assert resp.status_code == 201
-    %{body: body} = resp
-    rev = body["rev"]
-
-    resp = Couch.get("/#{db_name}/foo:bar")
-    assert resp.status_code == 200
-
-    body = %{"foo:bar" => [rev]}
-    resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: body)
-    assert resp.status_code == 201
-
-    resp = Couch.get("/#{db_name}/foo:bar")
-    assert resp.status_code == 404
-    assert resp.body == %{"error" => "not_found", "reason" => "missing"}
-  end
-
-  @tag :with_partitioned_db
-  test "purge rejects unpartitioned docid", context do
-    db_name = context[:db_name]
-    body = %{"no_partition" => ["1-967a00dff5e02add41819138abb3284d"]}
-    resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: body)
-    assert resp.status_code == 400
-    %{body: body} = resp
-    assert body["error"] == "illegal_docid"
-  end
-
-  test "create database with bad `partitioned` value", _context do
-    resp = Couch.put("/bad-db?partitioned=tru")
-    assert resp.status_code == 400
-
-    assert Map.get(resp, :body) == %{
-             "error" => "bad_request",
-             "reason" => "Invalid `partitioned` parameter"
-           }
-  end
-
-  test "can create unpartitioned system db", _context do
-    Couch.delete("/_replicator")
-    resp = Couch.put("/_replicator")
-    assert resp.status_code == 201
-    assert resp.body == %{"ok" => true}
-  end
-
-  test "cannot create partitioned system db", _context do
-    Couch.delete("/_replicator")
-
-    resp = Couch.put("/_replicator?partitioned=true")
-    assert resp.status_code == 400
-
-    %{:body => %{"reason" => reason}} = resp
-    assert Regex.match?(~r/Cannot partition a system database/, reason)
-  end
-end
diff --git a/test/elixir/test/partition_ddoc_test.exs b/test/elixir/test/partition_ddoc_test.exs
deleted file mode 100644
index 92ecae2..0000000
--- a/test/elixir/test/partition_ddoc_test.exs
+++ /dev/null
@@ -1,179 +0,0 @@
-defmodule PartitionDDocTest do
-  use CouchTestCase
-
-  @moduledoc """
-  Test partition design doc interactions
-  """
-
-  setup do
-    db_name = random_db_name()
-    {:ok, _} = create_db(db_name, query: %{partitioned: true, q: 1})
-    on_exit(fn -> delete_db(db_name) end)
-
-    {:ok, [db_name: db_name]}
-  end
-
-  test "PUT /dbname/_design/foo", context do
-    db_name = context[:db_name]
-    resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
-    assert resp.status_code == 201
-  end
-
-  test "PUT /dbname/_design/foo to update", context do
-    db_name = context[:db_name]
-    ddoc_id = "_design/foo"
-
-    ddoc = %{
-      _id: ddoc_id,
-      stuff: "here"
-    }
-
-    resp = Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
-    assert resp.status_code == 201
-    %{body: body} = resp
-
-    ddoc = Map.put(ddoc, :_rev, body["rev"])
-    ddoc = Map.put(ddoc, :other, "attribute")
-    resp = Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
-    assert resp.status_code == 201
-  end
-
-  test "PUT /dbname/_design/foo/readme.txt", context do
-    db_name = context[:db_name]
-    ddoc_id = "_design/foo"
-
-    ddoc = %{
-      _id: ddoc_id,
-      stuff: "here"
-    }
-
-    resp = Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
-    assert resp.status_code == 201
-    %{body: body} = resp
-
-    att = "This is a readme.txt"
-
-    opts = [
-      headers: [{:"Content-Type", "text/plain"}],
-      query: [rev: body["rev"]],
-      body: att
-    ]
-
-    resp = Couch.put("/#{db_name}/#{ddoc_id}/readme.txt", opts)
-    assert resp.status_code == 201
-  end
-
-  test "DELETE /dbname/_design/foo", context do
-    db_name = context[:db_name]
-    ddoc_id = "_design/foo"
-
-    ddoc = %{
-      _id: ddoc_id,
-      stuff: "here"
-    }
-
-    resp = Couch.put("/#{db_name}/#{ddoc_id}", body: ddoc)
-    assert resp.status_code == 201
-    %{body: body} = resp
-
-    resp = Couch.delete("/#{db_name}/#{ddoc_id}", query: [rev: body["rev"]])
-    assert resp.status_code == 200
-  end
-
-  test "POST /dbname with design doc", context do
-    db_name = context[:db_name]
-    body = %{_id: "_design/foo", stuff: "here"}
-    resp = Couch.post("/#{db_name}", body: body)
-    assert resp.status_code == 201
-  end
-
-  test "POST /dbname/_bulk_docs with design doc", context do
-    db_name = context[:db_name]
-    body = %{:docs => [%{_id: "_design/foo", stuff: "here"}]}
-    resp = Couch.post("/#{db_name}/_bulk_docs", body: body)
-    assert resp.status_code == 201
-  end
-
-  test "GET /dbname/_design/foo", context do
-    db_name = context[:db_name]
-    resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
-    assert resp.status_code == 201
-
-    resp = Couch.get("/#{db_name}/_design/foo")
-    assert resp.status_code == 200
-  end
-
-  test "GET /dbname/_design/foo?rev=$rev", context do
-    db_name = context[:db_name]
-    resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
-    assert resp.status_code == 201
-    %{body: body} = resp
-
-    resp = Couch.get("/#{db_name}/_design/foo", query: [rev: body["rev"]])
-    assert resp.status_code == 200
-  end
-
-  test "GET /dbname/_bulk_get", context do
-    db_name = context[:db_name]
-    resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
-    assert resp.status_code == 201
-
-    body = %{docs: [%{id: "_design/foo"}]}
-    resp = Couch.post("/#{db_name}/_bulk_get", body: body)
-    assert resp.status_code == 200
-    %{body: body} = resp
-
-    assert length(body["results"]) == 1
-
-    %{"results" => [%{"id" => "_design/foo", "docs" => [%{"ok" => _}]}]} = body
-  end
-
-  test "GET /dbname/_bulk_get with rev", context do
-    db_name = context[:db_name]
-    resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
-    assert resp.status_code == 201
-    %{body: body} = resp
-
-    body = %{docs: [%{id: "_design/foo", rev: body["rev"]}]}
-    resp = Couch.post("/#{db_name}/_bulk_get", body: body)
-    assert resp.status_code == 200
-    %{body: body} = resp
-
-    assert length(body["results"]) == 1
-    %{"results" => [%{"id" => "_design/foo", "docs" => [%{"ok" => _}]}]} = body
-  end
-
-  test "GET /dbname/_all_docs?key=$ddoc_id", context do
-    db_name = context[:db_name]
-    resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"}, query: [w: 3])
-    assert resp.status_code == 201
-
-    resp = Couch.get("/#{db_name}/_all_docs", query: [key: "\"_design/foo\""])
-    assert resp.status_code == 200
-    %{body: body} = resp
-
-    assert length(body["rows"]) == 1
-    assert %{"rows" => [%{"id" => "_design/foo"}]} = body
-  end
-
-  @tag :skip_on_jenkins
-  test "GET /dbname/_design_docs", context do
-    db_name = context[:db_name]
-
-    retry_until(
-      fn ->
-        resp = Couch.put("/#{db_name}/_design/foo", body: %{stuff: "here"})
-        assert resp.status_code == 201
-
-        resp = Couch.get("/#{db_name}/_design_docs")
-        assert resp.status_code == 200
-        %{body: body} = resp
-
-        assert length(body["rows"]) == 1
-        %{"rows" => [%{"id" => "_design/foo"}]} = body
-      end,
-      500,
-      10_000
-    )
-  end
-end
diff --git a/test/elixir/test/partition_design_docs_test.exs b/test/elixir/test/partition_design_docs_test.exs
deleted file mode 100644
index 4ccd63f..0000000
--- a/test/elixir/test/partition_design_docs_test.exs
+++ /dev/null
@@ -1,16 +0,0 @@
-defmodule PartitionDesignDocsTest do
-  use CouchTestCase
-
-  @moduledoc """
-  Test Partition functionality for partition design docs
-  """
-
-  @tag :with_partitioned_db
-  test "/_partition/:pk/_design/doc 404", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/fakekey/_design/mrtest/_view/some"
-    resp = Couch.get(url)
-    assert resp.status_code == 404
-  end
-end
diff --git a/test/elixir/test/partition_helpers.exs b/test/elixir/test/partition_helpers.exs
deleted file mode 100644
index 6eac2b1..0000000
--- a/test/elixir/test/partition_helpers.exs
+++ /dev/null
@@ -1,76 +0,0 @@
-defmodule PartitionHelpers do
-  use ExUnit.Case
-
-  def create_partition_docs(db_name, pk1 \\ "foo", pk2 \\ "bar") do
-    docs =
-      for i <- 1..100 do
-        id =
-          if rem(i, 2) == 0 do
-            "#{pk1}:#{i}"
-          else
-            "#{pk2}:#{i}"
-          end
-
-        group =
-          if rem(i, 3) == 0 do
-            "one"
-          else
-            "two"
-          end
-
-        %{
-          :_id => id,
-          :value => i,
-          :some => "field",
-          :group => group
-        }
-      end
-
-    resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:w => 3, :docs => docs})
-    assert resp.status_code == 201
-  end
-
-  def create_partition_ddoc(db_name, opts \\ %{}) do
-    map_fn = """
-      function(doc) {
-        if (doc.some) {
-          emit(doc.value, doc.some);
-        }
-      }
-    """
-
-    default_ddoc = %{
-      views: %{
-        some: %{
-          map: map_fn
-        }
-      }
-    }
-
-    ddoc = Enum.into(opts, default_ddoc)
-
-    resp = Couch.put("/#{db_name}/_design/mrtest", body: ddoc)
-    assert resp.status_code == 201
-    assert Map.has_key?(resp.body, "ok") == true
-  end
-
-  def get_ids(resp) do
-    %{:body => %{"rows" => rows}} = resp
-    Enum.map(rows, fn row -> row["id"] end)
-  end
-
-  def get_partitions(resp) do
-    %{:body => %{"rows" => rows}} = resp
-
-    Enum.map(rows, fn row ->
-      [partition, _] = String.split(row["id"], ":")
-      partition
-    end)
-  end
-
-  def assert_correct_partition(partitions, correct_partition) do
-    assert Enum.all?(partitions, fn partition ->
-             partition == correct_partition
-           end)
-  end
-end
diff --git a/test/elixir/test/partition_mango_test.exs b/test/elixir/test/partition_mango_test.exs
deleted file mode 100644
index 3fd38d5..0000000
--- a/test/elixir/test/partition_mango_test.exs
+++ /dev/null
@@ -1,663 +0,0 @@
-defmodule PartitionMangoTest do
-  use CouchTestCase
-  import PartitionHelpers, except: [get_partitions: 1]
-
-  @moduledoc """
-  Test Partition functionality for mango
-  """
-  def create_index(db_name, fields \\ ["some"], opts \\ %{}) do
-    default_index = %{
-      index: %{
-        fields: fields
-      }
-    }
-
-    index = Enum.into(opts, default_index)
-    resp = Couch.post("/#{db_name}/_index", body: index)
-
-    assert resp.status_code == 200
-    assert resp.body["result"] == "created"
-  end
-
-  def get_partitions(resp) do
-    %{:body => %{"docs" => docs}} = resp
-
-    Enum.map(docs, fn doc ->
-      [partition, _] = String.split(doc["_id"], ":")
-      partition
-    end)
-  end
-
-  @tag :with_partitioned_db
-  test "query using _id and partition works", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-    create_index(db_name)
-
-    url = "/#{db_name}/_partition/foo/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            _id: %{
-              "$gt": "foo:"
-            }
-          },
-          limit: 20
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 20
-    assert_correct_partition(partitions, "foo")
-
-    url = "/#{db_name}/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            _id: %{
-              "$lt": "foo:"
-            }
-          },
-          limit: 20
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 20
-    assert_correct_partition(partitions, "bar")
-  end
-
-  @tag :with_partitioned_db
-  test "query using _id works for global and local query", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-    create_index(db_name)
-
-    url = "/#{db_name}/_partition/foo/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            _id: %{
-              "$gt": 0
-            }
-          },
-          limit: 20
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 20
-    assert_correct_partition(partitions, "foo")
-
-    url = "/#{db_name}/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            _id: %{
-              "$gt": 0
-            }
-          },
-          limit: 20
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 20
-    assert_correct_partition(partitions, "bar")
-  end
-
-  @tag :with_partitioned_db
-  test "query with partitioned:true using index and $eq", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-    create_index(db_name)
-
-    url = "/#{db_name}/_partition/foo/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            some: "field"
-          },
-          limit: 20
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 20
-    assert_correct_partition(partitions, "foo")
-
-    url = "/#{db_name}/_partition/bar/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            some: "field"
-          },
-          limit: 20
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 20
-    assert_correct_partition(partitions, "bar")
-  end
-
-  @tag :with_partitioned_db
-  test "partitioned query using _all_docs with $eq", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-
-    url = "/#{db_name}/_partition/foo/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            some: "field"
-          },
-          limit: 20
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 20
-    assert_correct_partition(partitions, "foo")
-
-    url = "/#{db_name}/_partition/bar/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            some: "field"
-          },
-          limit: 20
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 20
-    assert_correct_partition(partitions, "bar")
-  end
-
-  @tag :with_db
-  test "non-partitioned query using _all_docs and $eq", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-
-    url = "/#{db_name}/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            some: "field"
-          },
-          skip: 40,
-          limit: 5
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 5
-    assert partitions == ["bar", "bar", "bar", "bar", "bar"]
-
-    url = "/#{db_name}/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            some: "field"
-          },
-          skip: 50,
-          limit: 5
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 5
-    assert partitions == ["foo", "foo", "foo", "foo", "foo"]
-  end
-
-  @tag :with_partitioned_db
-  test "partitioned query using index and range scan", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name, "foo", "bar42")
-    create_index(db_name, ["value"])
-
-    url = "/#{db_name}/_partition/foo/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            value: %{
-              "$gte": 6,
-              "$lt": 16
-            }
-          }
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 5
-    assert_correct_partition(partitions, "foo")
-
-    url = "/#{db_name}/_partition/bar42/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            value: %{
-              "$gte": 6,
-              "$lt": 16
-            }
-          }
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 5
-    assert_correct_partition(partitions, "bar42")
-  end
-
-  @tag :with_partitioned_db
-  test "partitioned query using _all_docs and range scan", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-
-    url = "/#{db_name}/_partition/foo/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            value: %{
-              "$gte": 6,
-              "$lt": 16
-            }
-          }
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 5
-    assert_correct_partition(partitions, "foo")
-
-    url = "/#{db_name}/_partition/bar/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            value: %{
-              "$gte": 6,
-              "$lt": 16
-            }
-          }
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 5
-    assert_correct_partition(partitions, "bar")
-  end
-
-  @tag :with_partitioned_db
-  test "partitioned query using _all_docs", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name, "foo", "bar42")
-
-    url = "/#{db_name}/_partition/foo/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            value: %{
-              "$gte": 6,
-              "$lt": 16
-            }
-          }
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 5
-    assert_correct_partition(partitions, "foo")
-
-    url = "/#{db_name}/_partition/bar42/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            value: %{
-              "$gte": 6,
-              "$lt": 16
-            }
-          }
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 5
-    assert_correct_partition(partitions, "bar42")
-  end
-
-  @tag :with_partitioned_db
-  test "explain works with partitions", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-    create_index(db_name, ["some"])
-
-    url = "/#{db_name}/_partition/foo/_explain"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            value: %{
-              "$gte": 6,
-              "$lt": 16
-            }
-          }
-        }
-      )
-
-    %{:body => body} = resp
-
-    assert body["index"]["name"] == "_all_docs"
-    assert body["mrargs"]["partition"] == "foo"
-
-    url = "/#{db_name}/_partition/bar/_explain"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            some: "field"
-          }
-        }
-      )
-
-    %{:body => body} = resp
-
-    assert body["index"]["def"] == %{"fields" => [%{"some" => "asc"}]}
-    assert body["mrargs"]["partition"] == "bar"
-  end
-
-  @tag :with_db
-  test "explain works with non partitioned db", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-    create_index(db_name, ["some"])
-
-    url = "/#{db_name}/_explain"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            value: %{
-              "$gte": 6,
-              "$lt": 16
-            }
-          }
-        }
-      )
-
-    %{:body => body} = resp
-
-    assert body["index"]["name"] == "_all_docs"
-    assert body["mrargs"]["partition"] == :null
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            some: "field"
-          }
-        }
-      )
-
-    %{:body => body} = resp
-
-    assert body["index"]["def"] == %{"fields" => [%{"some" => "asc"}]}
-    assert body["mrargs"]["partition"] == :null
-  end
-
-  @tag :with_partitioned_db
-  test "partitioned query using bookmarks", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-    create_index(db_name, ["value"])
-
-    url = "/#{db_name}/_partition/foo/_find"
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            value: %{
-              "$gte": 6,
-              "$lt": 16
-            }
-          },
-          limit: 3
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 3
-    assert_correct_partition(partitions, "foo")
-
-    %{:body => %{"bookmark" => bookmark}} = resp
-
-    resp =
-      Couch.post(
-        url,
-        body: %{
-          selector: %{
-            value: %{
-              "$gte": 6,
-              "$lt": 16
-            }
-          },
-          limit: 3,
-          bookmark: bookmark
-        }
-      )
-
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 2
-    assert_correct_partition(partitions, "foo")
-  end
-
-  @tag :with_partitioned_db
-  test "global query uses global index", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-    create_index(db_name, ["some"], %{partitioned: false})
-
-    url = "/#{db_name}/_explain"
-
-    selector = %{
-      selector: %{
-        some: "field"
-      },
-      limit: 100
-    }
-
-    resp = Couch.post(url, body: selector)
-    assert resp.status_code == 200
-    %{:body => body} = resp
-    assert body["index"]["def"] == %{"fields" => [%{"some" => "asc"}]}
-
-    url = "/#{db_name}/_find"
-    resp = Couch.post(url, body: selector)
-    assert resp.status_code == 200
-
-    partitions = get_partitions(resp)
-    assert length(partitions) == 100
-  end
-
-  @tag :with_partitioned_db
-  test "global query does not use partition index", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-    create_index(db_name, ["some"])
-
-    url = "/#{db_name}/_explain"
-
-    selector = %{
-      selector: %{
-        some: "field"
-      },
-      limit: 100
-    }
-
-    resp = Couch.post(url, body: selector)
-    %{:body => body} = resp
-    assert body["index"]["name"] == "_all_docs"
-
-    url = "/#{db_name}/_find"
-    resp = Couch.post(url, body: selector)
-
-    assert resp.status_code == 200
-
-    partitions = get_partitions(resp)
-    assert length(partitions) == 100
-  end
-
-  @tag :with_partitioned_db
-  test "partitioned query does not use global index", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-    create_index(db_name, ["some"], %{partitioned: false})
-
-    url = "/#{db_name}/_partition/foo/_explain"
-
-    selector = %{
-      selector: %{
-        some: "field"
-      },
-      limit: 50
-    }
-
-    resp = Couch.post(url, body: selector)
-    assert resp.status_code == 200
-    %{:body => body} = resp
-    assert body["index"]["name"] == "_all_docs"
-
-    url = "/#{db_name}/_partition/foo/_find"
-    resp = Couch.post(url, body: selector)
-    assert resp.status_code == 200
-
-    partitions = get_partitions(resp)
-    assert length(partitions) == 50
-    assert_correct_partition(partitions, "foo")
-  end
-
-  @tag :with_partitioned_db
-  test "partitioned _find and _explain with missing partition returns 400", context do
-    db_name = context[:db_name]
-
-    selector = %{
-      selector: %{
-        some: "field"
-      }
-    }
-
-    resp = Couch.get("/#{db_name}/_partition/_find", body: selector)
-    validate_missing_partition(resp)
-
-    resp = Couch.get("/#{db_name}/_partition/_explain", body: selector)
-    validate_missing_partition(resp)
-  end
-
-  defp validate_missing_partition(resp) do
-    assert resp.status_code == 400
-    %{:body => %{"reason" => reason}} = resp
-    assert Regex.match?(~r/Partition must not start/, reason)
-  end
-
-  @tag :with_partitioned_db
-  test "partitioned query sends correct errors for sort errors", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-
-    url = "/#{db_name}/_partition/foo/_find"
-
-    selector = %{
-      selector: %{
-        some: "field"
-      },
-      sort: ["some"],
-      limit: 50
-    }
-
-    resp = Couch.post(url, body: selector)
-    assert resp.status_code == 400
-    %{:body => %{"reason" => reason}} = resp
-    assert Regex.match?(~r/No partitioned index exists for this sort/, reason)
-
-    url = "/#{db_name}/_find"
-    resp = Couch.post(url, body: selector)
-    assert resp.status_code == 400
-    %{:body => %{"reason" => reason}} = resp
-    assert Regex.match?(~r/No global index exists for this sort/, reason)
-  end
-end
diff --git a/test/elixir/test/partition_size_limit_test.exs b/test/elixir/test/partition_size_limit_test.exs
deleted file mode 100644
index b4be648..0000000
--- a/test/elixir/test/partition_size_limit_test.exs
+++ /dev/null
@@ -1,305 +0,0 @@
-defmodule PartitionSizeLimitTest do
-  use CouchTestCase
-
-  @moduledoc """
-  Test Partition size limit functionality
-  """
-
-  @max_size 10_240
-
-  setup do
-    db_name = random_db_name()
-    {:ok, _} = create_db(db_name, query: %{partitioned: true, q: 1})
-    on_exit(fn -> delete_db(db_name) end)
-
-    set_config({"couchdb", "max_partition_size", Integer.to_string(@max_size)})
-
-    {:ok, [db_name: db_name]}
-  end
-
-  defp get_db_info(dbname) do
-    resp = Couch.get("/#{dbname}")
-    assert resp.status_code == 200
-    %{:body => body} = resp
-    body
-  end
-
-  defp get_partition_info(dbname, partition) do
-    resp = Couch.get("/#{dbname}/_partition/#{partition}")
-    assert resp.status_code == 200
-    %{:body => body} = resp
-    body
-  end
-
-  defp open_doc(db_name, docid, status_assert \\ 200) do
-    resp = Couch.get("/#{db_name}/#{docid}")
-    assert resp.status_code == status_assert
-    %{:body => body} = resp
-    body
-  end
-
-  defp save_doc(db_name, doc, status_assert \\ 201) do
-    resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
-    assert resp.status_code == status_assert
-    %{:body => body} = resp
-    body["rev"]
-  end
-
-  defp delete_doc(db_name, doc, status_assert \\ 200) do
-    url = "/#{db_name}/#{doc["_id"]}"
-    rev = doc["_rev"]
-    resp = Couch.delete(url, query: [w: 3, rev: rev])
-    assert resp.status_code == status_assert
-    %{:body => body} = resp
-    body["rev"]
-  end
-
-  defp fill_partition(db_name, partition \\ "foo") do
-    docs =
-      1..15
-      |> Enum.map(fn i ->
-        id = i |> Integer.to_string() |> String.pad_leading(4, "0")
-        docid = "#{partition}:#{id}"
-        %{_id: docid, value: "0" |> String.pad_leading(1024)}
-      end)
-
-    body = %{:w => 3, :docs => docs}
-    resp = Couch.post("/#{db_name}/_bulk_docs", body: body)
-    assert resp.status_code == 201
-  end
-
-  defp compact(db) do
-    assert Couch.post("/#{db}/_compact").status_code == 202
-
-    retry_until(
-      fn ->
-        Couch.get("/#{db}").body["compact_running"] == false
-      end,
-      200,
-      20_000
-    )
-  end
-
-  test "fill partition manually", context do
-    db_name = context[:db_name]
-    partition = "foo"
-
-    resp =
-      1..1000
-      |> Enum.find_value(0, fn i ->
-        id = i |> Integer.to_string() |> String.pad_leading(4, "0")
-        docid = "#{partition}:#{id}"
-        doc = %{_id: docid, value: "0" |> String.pad_leading(1024)}
-        resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
-
-        if resp.status_code == 201 do
-          false
-        else
-          resp
-        end
-      end)
-
-    assert resp.status_code == 403
-    %{body: body} = resp
-    assert body["error"] == "partition_overflow"
-
-    info = get_partition_info(db_name, partition)
-    assert info["sizes"]["external"] >= @max_size
-  end
-
-  test "full partitions reject POST /dbname", context do
-    db_name = context[:db_name]
-    fill_partition(db_name)
-
-    doc = %{_id: "foo:bar", value: "stuff"}
-    resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
-    assert resp.status_code == 403
-    %{body: body} = resp
-    assert body["error"] == "partition_overflow"
-  end
-
-  test "full partitions reject PUT /dbname/docid", context do
-    db_name = context[:db_name]
-    fill_partition(db_name)
-
-    doc = %{value: "stuff"}
-    resp = Couch.put("/#{db_name}/foo:bar", query: [w: 3], body: doc)
-    assert resp.status_code == 403
-    %{body: body} = resp
-    assert body["error"] == "partition_overflow"
-  end
-
-  test "full partitions reject POST /dbname/_bulk_docs", context do
-    db_name = context[:db_name]
-    fill_partition(db_name)
-
-    body = %{w: 3, docs: [%{_id: "foo:bar"}]}
-    resp = Couch.post("/#{db_name}/_bulk_docs", query: [w: 3], body: body)
-    assert resp.status_code == 201
-    %{body: body} = resp
-    doc_resp = Enum.at(body, 0)
-    assert doc_resp["error"] == "partition_overflow"
-  end
-
-  test "full partitions with mixed POST /dbname/_bulk_docs", context do
-    db_name = context[:db_name]
-    fill_partition(db_name)
-
-    body = %{w: 3, docs: [%{_id: "foo:bar"}, %{_id: "baz:bang"}]}
-    resp = Couch.post("/#{db_name}/_bulk_docs", query: [w: 3], body: body)
-    assert resp.status_code == 201
-    %{body: body} = resp
-
-    doc_resp1 = Enum.at(body, 0)
-    assert doc_resp1["error"] == "partition_overflow"
-
-    doc_resp2 = Enum.at(body, 1)
-    assert doc_resp2["ok"]
-  end
-
-  test "full partitions are still readable", context do
-    db_name = context[:db_name]
-    fill_partition(db_name)
-    open_doc(db_name, "foo:0001")
-  end
-
-  test "full partitions can accept deletes", context do
-    db_name = context[:db_name]
-    fill_partition(db_name)
-
-    doc = open_doc(db_name, "foo:0001")
-    delete_doc(db_name, doc)
-  end
-
-  test "full partitions can accept updates that reduce size", context do
-    db_name = context[:db_name]
-    fill_partition(db_name)
-
-    doc = open_doc(db_name, "foo:0001")
-    save_doc(db_name, %{doc | "value" => ""})
-  end
-
-  test "full partition does not affect other partitions", context do
-    db_name = context[:db_name]
-    fill_partition(db_name)
-    save_doc(db_name, %{_id: "bar:foo", value: "stuff"})
-  end
-
-  test "full partition does not affect design documents", context do
-    db_name = context[:db_name]
-    fill_partition(db_name)
-    rev1 = save_doc(db_name, %{_id: "_design/foo", value: "stuff"})
-    save_doc(db_name, %{_id: "_design/foo", _rev: rev1, value: "hi"})
-    doc = open_doc(db_name, "_design/foo")
-    delete_doc(db_name, doc)
-  end
-
-  test "replication into a full partition works", context do
-    db_name = context[:db_name]
-    fill_partition(db_name)
-    save_doc(db_name, %{_id: "foo:bar", value: "stuff"}, 403)
-
-    doc = %{
-      _id: "foo:bar",
-      _rev: <<"1-23202479633c2b380f79507a776743d5">>,
-      value: "stuff"
-    }
-
-    url = "/#{db_name}/#{doc[:_id]}"
-    query = [new_edits: false, w: 3]
-    resp = Couch.put(url, query: query, body: doc)
-    assert resp.status_code == 201
-  end
-
-  test "compacting a full partition works", context do
-    db_name = context[:db_name]
-    db_info1 = get_db_info(db_name)
-    fill_partition(db_name)
-    compact(db_name)
-    db_info2 = get_db_info(db_name)
-    assert db_info2["sizes"]["file"] != db_info1["sizes"]["file"]
-  end
-
-  test "indexing a full partition works", context do
-    db_name = context[:db_name]
-    fill_partition(db_name)
-
-    ddoc = %{
-      _id: "_design/foo",
-      views: %{
-        bar: %{
-          map: "function(doc) {emit(doc.group, 1);}"
-        }
-      }
-    }
-
-    save_doc(db_name, ddoc)
-
-    url = "/#{db_name}/_partition/foo/_design/foo/_view/bar"
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-    %{body: body} = resp
-
-    assert length(body["rows"]) > 0
-  end
-
-  test "purging docs allows writes", context do
-    db_name = context[:db_name]
-    fill_partition(db_name)
-
-    info = get_partition_info(db_name, "foo")
-    limit = info["doc_count"] - 1
-
-    query = [
-      start_key: "\"foo:0000\"",
-      end_key: "\"foo:9999\"",
-      limit: limit
-    ]
-
-    resp = Couch.get("/#{db_name}/_all_docs", query: query)
-    assert resp.status_code == 200
-    %{body: body} = resp
-
-    pbody =
-      body["rows"]
-      |> Enum.reduce(%{}, fn row, acc ->
-        Map.put(acc, row["id"], [row["value"]["rev"]])
-      end)
-
-    resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: pbody)
-    assert resp.status_code == 201
-
-    save_doc(db_name, %{_id: "foo:bar", value: "some value"})
-  end
-
-  test "increasing partition size allows more writes", context do
-    db_name = context[:db_name]
-    fill_partition(db_name)
-
-    # We use set_config_raw so that we're not setting
-    # on_exit handlers that might interfere with the original
-    # config change done in setup of this test
-    new_size = Integer.to_string(@max_size * 1000)
-    set_config_raw("couchdb", "max_partition_size", new_size)
-
-    save_doc(db_name, %{_id: "foo:bar", value: "stuff"})
-  end
-
-  test "decreasing partition size disables more writes", context do
-    db_name = context[:db_name]
-
-    # We use set_config_raw so that we're not setting
-    # on_exit handlers that might interfere with the original
-    # config change done in setup of this test
-    new_size = Integer.to_string(@max_size * 1000)
-    set_config_raw("couchdb", "max_partition_size", new_size)
-
-    fill_partition(db_name)
-    save_doc(db_name, %{_id: "foo:bar", value: "stuff"})
-
-    old_size = Integer.to_string(@max_size)
-    set_config_raw("couchdb", "max_partition_size", old_size)
-
-    save_doc(db_name, %{_id: "foo:baz", value: "stuff"}, 403)
-  end
-end
diff --git a/test/elixir/test/partition_size_test.exs b/test/elixir/test/partition_size_test.exs
deleted file mode 100644
index b292dc4..0000000
--- a/test/elixir/test/partition_size_test.exs
+++ /dev/null
@@ -1,361 +0,0 @@
-defmodule PartitionSizeTest do
-  use CouchTestCase
-
-  @moduledoc """
-  Test Partition size functionality
-  """
-
-  setup do
-    db_name = random_db_name()
-    {:ok, _} = create_db(db_name, query: %{partitioned: true, q: 1})
-    on_exit(fn -> delete_db(db_name) end)
-
-    {:ok, [db_name: db_name]}
-  end
-
-  def get_db_info(dbname) do
-    resp = Couch.get("/#{dbname}")
-    assert resp.status_code == 200
-    %{:body => body} = resp
-    body
-  end
-
-  def get_partition_info(dbname, partition) do
-    resp = Couch.get("/#{dbname}/_partition/#{partition}")
-    assert resp.status_code == 200
-    %{:body => body} = resp
-    body
-  end
-
-  def mk_partition(i) do
-    i |> rem(10) |> Integer.to_string() |> String.pad_leading(3, "0")
-  end
-
-  def mk_docid(i) do
-    id = i |> Integer.to_string() |> String.pad_leading(4, "0")
-    "#{mk_partition(i)}:#{id}"
-  end
-
-  def mk_docs(db_name) do
-    docs =
-      for i <- 1..1000 do
-        group = Integer.to_string(rem(i, 3))
-
-        %{
-          :_id => mk_docid(i),
-          :value => i,
-          :some => "field",
-          :group => group
-        }
-      end
-
-    body = %{:w => 3, :docs => docs}
-
-    retry_until(fn ->
-      resp = Couch.post("/#{db_name}/_bulk_docs", body: body)
-      assert resp.status_code == 201
-    end)
-  end
-
-  def save_doc(db_name, doc) do
-    resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
-    assert resp.status_code == 201
-    %{:body => body} = resp
-    body["rev"]
-  end
-
-  test "get empty partition", context do
-    db_name = context[:db_name]
-    partition = "non_existent_partition"
-
-    info = get_partition_info(db_name, partition)
-
-    assert info["doc_count"] == 0
-    assert info["doc_del_count"] == 0
-    assert info["partition"] == partition
-    assert info["sizes"]["external"] == 0
-    assert info["sizes"]["active"] == 0
-  end
-
-  test "unknown partition return's zero", context do
-    db_name = context[:db_name]
-    mk_docs(db_name)
-
-    info = get_partition_info(db_name, "unknown")
-    assert info["doc_count"] == 0
-    assert info["doc_del_count"] == 0
-    assert info["sizes"]["external"] == 0
-    assert info["sizes"]["active"] == 0
-  end
-
-  test "simple partition size", context do
-    db_name = context[:db_name]
-    save_doc(db_name, %{_id: "foo:bar", val: 42})
-
-    info = get_partition_info(db_name, "foo")
-    assert info["doc_count"] == 1
-    assert info["doc_del_count"] == 0
-    assert info["sizes"]["external"] > 0
-    assert info["sizes"]["active"] > 0
-  end
-
-  test "adding docs increases partition sizes", context do
-    db_name = context[:db_name]
-    save_doc(db_name, %{_id: "foo:bar", val: 42})
-    pre_info = get_partition_info(db_name, "foo")
-
-    save_doc(db_name, %{_id: "foo:baz", val: 24})
-    post_info = get_partition_info(db_name, "foo")
-
-    assert post_info["doc_count"] == 2
-    assert post_info["doc_del_count"] == 0
-    assert post_info["sizes"]["external"] > pre_info["sizes"]["external"]
-    assert post_info["sizes"]["active"] > pre_info["sizes"]["active"]
-  end
-
-  test "updating docs affects partition sizes", context do
-    db_name = context[:db_name]
-    rev1 = save_doc(db_name, %{_id: "foo:bar", val: ""})
-    info1 = get_partition_info(db_name, "foo")
-
-    rev2 =
-      save_doc(db_name, %{
-        _id: "foo:bar",
-        _rev: rev1,
-        val: "this is a very long string that is so super long its beyond long"
-      })
-
-    info2 = get_partition_info(db_name, "foo")
-
-    save_doc(db_name, %{
-      _id: "foo:bar",
-      _rev: rev2,
-      val: "this string is shorter"
-    })
-
-    info3 = get_partition_info(db_name, "foo")
-
-    assert info3["doc_count"] == 1
-    assert info3["doc_del_count"] == 0
-
-    assert info3["sizes"]["external"] > info1["sizes"]["external"]
-    assert info2["sizes"]["external"] > info3["sizes"]["external"]
-  end
-
-  test "deleting a doc affects partition sizes", context do
-    db_name = context[:db_name]
-    rev1 = save_doc(db_name, %{_id: "foo:bar", val: "some stuff here"})
-    info1 = get_partition_info(db_name, "foo")
-
-    save_doc(db_name, %{_id: "foo:bar", _rev: rev1, _deleted: true})
-    info2 = get_partition_info(db_name, "foo")
-
-    assert info1["doc_count"] == 1
-    assert info1["doc_del_count"] == 0
-
-    assert info2["doc_count"] == 0
-    assert info2["doc_del_count"] == 1
-
-    assert info2["sizes"]["external"] < info1["sizes"]["external"]
-  end
-
-  test "design docs do not affect partition sizes", context do
-    db_name = context[:db_name]
-    mk_docs(db_name)
-
-    pre_infos =
-      0..9
-      |> Enum.map(fn i ->
-        get_partition_info(db_name, mk_partition(i))
-      end)
-
-    0..5
-    |> Enum.map(fn i ->
-      base = i |> Integer.to_string() |> String.pad_leading(5, "0")
-      docid = "_design/#{base}"
-      save_doc(db_name, %{_id: docid, value: "some stuff here"})
-    end)
-
-    post_infos =
-      0..9
-      |> Enum.map(fn i ->
-        get_partition_info(db_name, mk_partition(i))
-      end)
-
-    assert post_infos == pre_infos
-  end
-
-  @tag :skip_on_jenkins
-  test "get all partition sizes", context do
-    db_name = context[:db_name]
-    mk_docs(db_name)
-
-    {esum, asum} =
-      0..9
-      |> Enum.reduce({0, 0}, fn i, {esize, asize} ->
-        partition = mk_partition(i)
-        info = get_partition_info(db_name, partition)
-        assert info["doc_count"] == 100
-        assert info["doc_del_count"] == 0
-        assert info["sizes"]["external"] > 0
-        assert info["sizes"]["active"] > 0
-        {esize + info["sizes"]["external"], asize + info["sizes"]["active"]}
-      end)
-
-    db_info = get_db_info(db_name)
-    assert db_info["sizes"]["external"] >= esum
-    assert db_info["sizes"]["active"] >= asum
-  end
-
-  test "get partition size with attachment", context do
-    db_name = context[:db_name]
-
-    doc = %{
-      _id: "foo:doc-with-attachment",
-      _attachments: %{
-        "foo.txt": %{
-          content_type: "text/plain",
-          data: Base.encode64("This is a text document to save")
-        }
-      }
-    }
-
-    save_doc(db_name, doc)
-
-    db_info = get_db_info(db_name)
-    foo_info = get_partition_info(db_name, "foo")
-
-    assert foo_info["doc_count"] == 1
-    assert foo_info["doc_del_count"] == 0
-    assert foo_info["sizes"]["active"] > 0
-    assert foo_info["sizes"]["external"] > 0
-
-    assert foo_info["sizes"]["active"] <= db_info["sizes"]["active"]
-    assert foo_info["sizes"]["external"] <= db_info["sizes"]["external"]
-  end
-
-  test "attachments don't affect other partitions", context do
-    db_name = context[:db_name]
-    mk_docs(db_name)
-
-    pre_infos =
-      0..9
-      |> Enum.map(fn i ->
-        get_partition_info(db_name, mk_partition(i))
-      end)
-
-    doc = %{
-      _id: "foo:doc-with-attachment",
-      _attachments: %{
-        "foo.txt": %{
-          content_type: "text/plain",
-          data: Base.encode64("This is a text document to save")
-        }
-      }
-    }
-
-    save_doc(db_name, doc)
-
-    att_info = get_partition_info(db_name, "foo")
-    assert att_info["doc_count"] == 1
-    assert att_info["sizes"]["external"] > 0
-
-    post_infos =
-      0..9
-      |> Enum.map(fn i ->
-        get_partition_info(db_name, mk_partition(i))
-      end)
-
-    assert post_infos == pre_infos
-
-    esize =
-      ([att_info] ++ post_infos)
-      |> Enum.reduce(0, fn info, acc ->
-        info["sizes"]["external"] + acc
-      end)
-
-    db_info = get_db_info(db_name)
-    assert esize == db_info["sizes"]["external"]
-  end
-
-  test "partition activity not affect other partition sizes", context do
-    db_name = context[:db_name]
-    mk_docs(db_name)
-
-    partition1 = "000"
-    partition2 = "001"
-
-    info2 = get_partition_info(db_name, partition2)
-
-    doc_id = "#{partition1}:doc-with-attachment"
-
-    doc = %{
-      _id: doc_id,
-      _attachments: %{
-        "foo.txt": %{
-          content_type: "text/plain",
-          data: Base.encode64("This is a text document to save")
-        }
-      }
-    }
-
-    doc_rev = save_doc(db_name, doc)
-
-    info2_attach = get_partition_info(db_name, partition2)
-    assert info2_attach == info2
-
-    doc =
-      Enum.into(
-        %{
-          another: "add another field",
-          _rev: doc_rev
-        },
-        doc
-      )
-
-    doc_rev = save_doc(db_name, doc)
-
-    info2_update = get_partition_info(db_name, partition2)
-    assert info2_update == info2
-
-    resp = Couch.delete("/#{db_name}/#{doc_id}", query: %{rev: doc_rev})
-    assert resp.status_code == 200
-
-    info2_delete = get_partition_info(db_name, partition2)
-    assert info2_delete == info2
-  end
-
-  test "purging docs decreases partition size", context do
-    db_name = context[:db_name]
-    mk_docs(db_name)
-
-    partition = "000"
-
-    query = [
-      start_key: "\"#{partition}:0000\"",
-      end_key: "\"#{partition}:9999\"",
-      limit: 50
-    ]
-
-    resp = Couch.get("/#{db_name}/_all_docs", query: query)
-    assert resp.status_code == 200
-    %{body: body} = resp
-
-    pre_info = get_partition_info(db_name, partition)
-
-    pbody =
-      body["rows"]
-      |> Enum.reduce(%{}, fn row, acc ->
-        Map.put(acc, row["id"], [row["value"]["rev"]])
-      end)
-
-    resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: pbody)
-    assert resp.status_code == 201
-
-    post_info = get_partition_info(db_name, partition)
-    assert post_info["doc_count"] == pre_info["doc_count"] - 50
-    assert post_info["doc_del_count"] == 0
-    assert post_info["sizes"]["active"] < pre_info["sizes"]["active"]
-    assert post_info["sizes"]["external"] < pre_info["sizes"]["external"]
-  end
-end
diff --git a/test/elixir/test/partition_view_test.exs b/test/elixir/test/partition_view_test.exs
deleted file mode 100644
index 0a55c24..0000000
--- a/test/elixir/test/partition_view_test.exs
+++ /dev/null
@@ -1,374 +0,0 @@
-defmodule ViewPartitionTest do
-  use CouchTestCase
-  import PartitionHelpers
-
-  @moduledoc """
-  Test Partition functionality for views
-  """
-
-  setup_all do
-    db_name = random_db_name()
-    {:ok, _} = create_db(db_name, query: %{partitioned: true, q: 1})
-    on_exit(fn -> delete_db(db_name) end)
-
-    create_partition_docs(db_name)
-
-    map_fun1 = """
-      function(doc) {
-        if (doc.some) {
-          emit(doc.value, doc.some);
-        }
-      }
-    """
-
-    map_fun2 = """
-      function(doc) {
-        if (doc.group) {
-          emit([doc.some, doc.group], 1);
-        }
-      }
-    """
-
-    query = %{:w => 3}
-
-    body = %{
-      :docs => [
-        %{
-          _id: "_design/map",
-          views: %{some: %{map: map_fun1}}
-        },
-        %{
-          _id: "_design/map_some",
-          views: %{some: %{map: map_fun2}}
-        },
-        %{
-          _id: "_design/partitioned_true",
-          views: %{some: %{map: map_fun1}},
-          options: %{partitioned: true}
-        },
-        %{
-          _id: "_design/partitioned_false",
-          views: %{some: %{map: map_fun1}},
-          options: %{partitioned: false}
-        },
-        %{
-          _id: "_design/reduce",
-          views: %{some: %{map: map_fun2, reduce: "_count"}}
-        },
-        %{
-          _id: "_design/include_ddocs",
-          views: %{some: %{map: map_fun1}},
-          options: %{include_design: true}
-        }
-      ]
-    }
-
-    resp = Couch.post("/#{db_name}/_bulk_docs", query: query, body: body)
-    Enum.each(resp.body, &assert(&1["ok"]))
-
-    {:ok, [db_name: db_name]}
-  end
-
-  def get_reduce_result(resp) do
-    %{:body => %{"rows" => rows}} = resp
-    rows
-  end
-
-  test "query with partitioned:true returns partitioned fields", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_design/partitioned_true/_view/some"
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert Enum.dedup(partitions) == ["foo"]
-
-    url = "/#{db_name}/_partition/bar/_design/partitioned_true/_view/some"
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert Enum.dedup(partitions) == ["bar"]
-  end
-
-  test "default view query returns partitioned fields", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_design/map/_view/some"
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert Enum.dedup(partitions) == ["foo"]
-
-    url = "/#{db_name}/_partition/bar/_design/map/_view/some"
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert Enum.dedup(partitions) == ["bar"]
-  end
-
-  test "conflicting partitions in path and query string rejected", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_design/map/_view/some"
-    resp = Couch.get(url, query: %{partition: "bar"})
-    assert resp.status_code == 400
-    %{:body => %{"reason" => reason}} = resp
-    assert Regex.match?(~r/Conflicting value/, reason)
-  end
-
-  test "query will return zero results for wrong inputs", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_design/map/_view/some"
-    resp = Couch.get(url, query: %{start_key: "\"foo:12\""})
-    assert resp.status_code == 200
-    assert Map.get(resp, :body)["rows"] == []
-  end
-
-  test "partitioned ddoc cannot be used in global query", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_design/map/_view/some"
-    resp = Couch.get(url)
-    %{:body => %{"reason" => reason}} = resp
-    assert resp.status_code == 400
-    assert Regex.match?(~r/mandatory for queries to this view./, reason)
-  end
-
-  test "partitioned query cannot be used with global ddoc", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_design/partitioned_false/_view/some"
-    resp = Couch.get(url)
-    %{:body => %{"reason" => reason}} = resp
-    assert resp.status_code == 400
-    assert Regex.match?(~r/is not supported in this design doc/, reason)
-  end
-
-  test "view query returns all docs for global query", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_design/partitioned_false/_view/some"
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 100
-  end
-
-  test "partition query errors with incorrect partition supplied", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/_bar/_design/map/_view/some"
-    resp = Couch.get(url)
-    assert resp.status_code == 400
-
-    url = "/#{db_name}/_partition//_design/map/_view/some"
-    resp = Couch.get(url)
-    assert resp.status_code == 400
-  end
-
-  test "partitioned query works with startkey, endkey range", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_design/map/_view/some"
-    resp = Couch.get(url, query: %{start_key: 12, end_key: 20})
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 5
-    assert Enum.dedup(partitions) == ["foo"]
-  end
-
-  test "partitioned query works with keys", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_design/map/_view/some"
-    resp = Couch.post(url, body: %{keys: [2, 4, 6]})
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 3
-    assert ids == ["foo:2", "foo:4", "foo:6"]
-  end
-
-  test "global query works with keys", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_design/partitioned_false/_view/some"
-    resp = Couch.post(url, body: %{keys: [2, 4, 6]})
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 3
-    assert ids == ["foo:2", "foo:4", "foo:6"]
-  end
-
-  test "partition query works with limit", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_design/map/_view/some"
-    resp = Couch.get(url, query: %{limit: 5})
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 5
-    assert Enum.dedup(partitions) == ["foo"]
-  end
-
-  test "partition query with descending", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_design/map/_view/some"
-    resp = Couch.get(url, query: %{descending: true, limit: 5})
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 5
-    assert ids == ["foo:100", "foo:98", "foo:96", "foo:94", "foo:92"]
-
-    resp = Couch.get(url, query: %{descending: false, limit: 5})
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 5
-    assert ids == ["foo:2", "foo:4", "foo:6", "foo:8", "foo:10"]
-  end
-
-  test "partition query with skip", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_design/map/_view/some"
-    resp = Couch.get(url, query: %{skip: 5, limit: 5})
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 5
-    assert ids == ["foo:12", "foo:14", "foo:16", "foo:18", "foo:20"]
-  end
-
-  test "partition query with key", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_design/map/_view/some"
-    resp = Couch.get(url, query: %{key: 22})
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 1
-    assert ids == ["foo:22"]
-  end
-
-  test "partition query with startkey_docid and endkey_docid", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_design/map_some/_view/some"
-
-    resp =
-      Couch.get(
-        url,
-        query: %{
-          startkey: "[\"field\",\"one\"]",
-          endkey: "[\"field\",\"one\"]",
-          startkey_docid: "foo:12",
-          endkey_docid: "foo:30"
-        }
-      )
-
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert ids == ["foo:12", "foo:18", "foo:24", "foo:30"]
-  end
-
-  test "query with reduce works", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_design/reduce/_view/some"
-    resp = Couch.get(url, query: %{reduce: true, group_level: 1})
-    assert resp.status_code == 200
-    results = get_reduce_result(resp)
-    assert results == [%{"key" => ["field"], "value" => 50}]
-
-    resp = Couch.get(url, query: %{reduce: true, group_level: 2})
-    results = get_reduce_result(resp)
-
-    assert results == [
-             %{"key" => ["field", "one"], "value" => 16},
-             %{"key" => ["field", "two"], "value" => 34}
-           ]
-
-    resp = Couch.get(url, query: %{reduce: true, group: true})
-    results = get_reduce_result(resp)
-
-    assert results == [
-             %{"key" => ["field", "one"], "value" => 16},
-             %{"key" => ["field", "two"], "value" => 34}
-           ]
-  end
-
-  test "partition query can set query limits", context do
-    set_config({"query_server_config", "partition_query_limit", "2000"})
-
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-    create_partition_ddoc(db_name)
-
-    url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
-
-    resp =
-      Couch.get(
-        url,
-        query: %{
-          limit: 20
-        }
-      )
-
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 20
-
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 50
-
-    resp =
-      Couch.get(
-        url,
-        query: %{
-          limit: 2000
-        }
-      )
-
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 50
-
-    resp =
-      Couch.get(
-        url,
-        query: %{
-          limit: 2001
-        }
-      )
-
-    assert resp.status_code == 400
-    %{:body => %{"reason" => reason}} = resp
-    assert Regex.match?(~r/Limit is too large/, reason)
-
-    resp =
-      Couch.get(
-        url,
-        query: %{
-          limit: 2000,
-          skip: 25
-        }
-      )
-
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert length(ids) == 25
-  end
-
-  test "include_design works correctly", context do
-    db_name = context[:db_name]
-
-    url = "/#{db_name}/_partition/foo/_design/include_ddocs/_view/some"
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-    partitions = get_partitions(resp)
-    assert length(partitions) == 50
-    assert Enum.dedup(partitions) == ["foo"]
-  end
-end
diff --git a/test/elixir/test/partition_view_update_test.exs b/test/elixir/test/partition_view_update_test.exs
deleted file mode 100644
index 63c6268..0000000
--- a/test/elixir/test/partition_view_update_test.exs
+++ /dev/null
@@ -1,160 +0,0 @@
-defmodule PartitionViewUpdateTest do
-  use CouchTestCase
-  import PartitionHelpers
-
-  @moduledoc """
-  Test Partition view update functionality
-  """
-  @tag :with_partitioned_db
-  test "view updates properly remove old keys", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name, "foo", "bar")
-    create_partition_ddoc(db_name)
-
-    check_key = fn key, num_rows ->
-      url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
-      resp = Couch.get(url, query: [key: key])
-      assert resp.status_code == 200
-      assert length(resp.body["rows"]) == num_rows
-    end
-
-    check_key.(2, 1)
-
-    resp = Couch.get("/#{db_name}/foo:2")
-    doc = Map.put(resp.body, "value", 4)
-    resp = Couch.put("/#{db_name}/foo:2", query: [w: 3], body: doc)
-    assert resp.status_code >= 201 and resp.status_code <= 202
-
-    check_key.(4, 2)
-    check_key.(2, 0)
-  end
-
-  @tag :skip_on_jenkins
-  @tag :with_partitioned_db
-  test "query with update=false works", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-    create_partition_ddoc(db_name)
-
-    url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
-
-    resp =
-      Couch.get(
-        url,
-        query: %{
-          update: "true",
-          limit: 3
-        }
-      )
-
-    assert resp.status_code == 200
-    ids = get_ids(resp)
-    assert ids == ["foo:2", "foo:4", "foo:6"]
-
-    # Avoid race conditions by attempting to get a full response
-    # from every shard before we do our update:false test
-    for _ <- 1..12 do
-      resp = Couch.get(url)
-      assert resp.status_code == 200
-    end
-
-    Couch.put("/#{db_name}/foo:1", body: %{some: "field"})
-
-    retry_until(fn ->
-      resp =
-        Couch.get(
-          url,
-          query: %{
-            update: "false",
-            limit: 3
-          }
-        )
-
-      assert resp.status_code == 200
-      ids = get_ids(resp)
-      assert ids == ["foo:2", "foo:4", "foo:6"]
-    end)
-  end
-
-  @tag :with_partitioned_db
-  test "purge removes view rows", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-    create_partition_ddoc(db_name)
-
-    url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
-
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-    %{body: body} = resp
-    assert length(body["rows"]) == 50
-
-    resp = Couch.get("/#{db_name}/foo:2")
-    assert resp.status_code == 200
-    %{body: body} = resp
-    rev = body["_rev"]
-
-    body = %{"foo:2" => [rev]}
-    resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: body)
-    assert resp.status_code == 201
-
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-    %{body: body} = resp
-    assert length(body["rows"]) == 49
-  end
-
-  @tag :with_partitioned_db
-  test "purged conflict changes view rows", context do
-    db_name = context[:db_name]
-    create_partition_docs(db_name)
-    create_partition_ddoc(db_name)
-
-    url = "/#{db_name}/_partition/foo/_design/mrtest/_view/some"
-
-    resp = Couch.get(url)
-    assert resp.status_code == 200
-    %{body: body} = resp
-    assert length(body["rows"]) == 50
-
-    # Create a conflict on foo:2. Since the 4096
-    # value is deeper than the conflict we can assert
-    # that's in the view before the purge and assert
-    # that 8192 is in the view after the purge.
-    resp = Couch.get("/#{db_name}/foo:2")
-    assert resp.status_code == 200
-    %{body: body} = resp
-    rev1 = body["_rev"]
-
-    doc = %{_id: "foo:2", _rev: rev1, value: 4096, some: "field"}
-    resp = Couch.post("/#{db_name}", query: [w: 3], body: doc)
-    assert resp.status_code == 201
-    %{body: body} = resp
-    rev2 = body["rev"]
-
-    query = [w: 3, new_edits: false]
-    conflict_rev = "1-4a75b4efa0804859b3dfd327cbc1c2f9"
-    doc = %{_id: "foo:2", _rev: conflict_rev, value: 8192, some: "field"}
-    resp = Couch.put("/#{db_name}/foo:2", query: query, body: doc)
-    assert resp.status_code == 201
-
-    # Check that our expected row exists
-    resp = Couch.get(url, query: [key: 4096])
-    assert resp.status_code == 200
-    %{body: body} = resp
-    [row] = body["rows"]
-    assert row["id"] == "foo:2"
-
-    # Remove the current row to be replaced with
-    # a row from the conflict
-    body = %{"foo:2" => [rev2]}
-    resp = Couch.post("/#{db_name}/_purge", query: [w: 3], body: body)
-    assert resp.status_code == 201
-
-    resp = Couch.get(url, query: [key: 8192])
-    assert resp.status_code == 200
-    %{body: body} = resp
-    [row] = body["rows"]
-    assert row["id"] == "foo:2"
-  end
-end
diff --git a/test/elixir/test/reshard_all_docs_test.exs b/test/elixir/test/reshard_all_docs_test.exs
deleted file mode 100644
index 62b6e37..0000000
--- a/test/elixir/test/reshard_all_docs_test.exs
+++ /dev/null
@@ -1,79 +0,0 @@
-defmodule ReshardAllDocsTest do
-  use CouchTestCase
-  import ReshardHelpers
-
-  @moduledoc """
-  Test _all_docs interaction with resharding
-  """
-
-  setup do
-    db = random_db_name()
-    {:ok, _} = create_db(db, query: %{q: 2})
-
-    on_exit(fn ->
-      reset_reshard_state()
-      delete_db(db)
-    end)
-
-    {:ok, [db: db]}
-  end
-
-  test "all_docs after splitting all shards on node1", context do
-    db = context[:db]
-    node1 = get_first_node()
-    docs = add_docs(1..100, db)
-
-    before_split_all_docs = all_docs(db)
-    assert docs == before_split_all_docs
-
-    resp = post_job_node(db, node1)
-    assert resp.status_code == 201
-    jobid = hd(resp.body)["id"]
-    wait_job_completed(jobid)
-
-    assert before_split_all_docs == all_docs(db)
-
-    assert remove_job(jobid).status_code == 200
-  end
-
-  test "all_docs after splitting the same range on all nodes", context do
-    db = context[:db]
-    docs = add_docs(1..100, db)
-
-    before_split_all_docs = all_docs(db)
-    assert docs == before_split_all_docs
-
-    resp = post_job_range(db, "00000000-7fffffff")
-    assert resp.status_code == 201
-
-    resp.body
-    |> Enum.map(fn j -> j["id"] end)
-    |> Enum.each(fn id -> wait_job_completed(id) end)
-
-    assert before_split_all_docs == all_docs(db)
-
-    get_jobs()
-    |> Enum.map(fn j -> j["id"] end)
-    |> Enum.each(fn id -> remove_job(id) end)
-  end
-
-  defp add_docs(range, db) do
-    docs = create_docs(range)
-    w3 = %{:w => 3}
-    resp = Couch.post("/#{db}/_bulk_docs", body: %{docs: docs}, query: w3)
-    assert resp.status_code == 201
-    assert length(resp.body) == length(docs)
-
-    docs
-    |> rev(resp.body)
-    |> Enum.into(%{}, fn %{:_id => id, :_rev => rev} -> {id, rev} end)
-  end
-
-  defp all_docs(db, query \\ %{}) do
-    resp = Couch.get("/#{db}/_all_docs", query: query)
-    assert resp.status_code == 200
-
-    resp.body["rows"]
-    |> Enum.into(%{}, fn %{"id" => id, "value" => v} -> {id, v["rev"]} end)
-  end
-end
diff --git a/test/elixir/test/reshard_basic_test.exs b/test/elixir/test/reshard_basic_test.exs
deleted file mode 100644
index 211dd6b..0000000
--- a/test/elixir/test/reshard_basic_test.exs
+++ /dev/null
@@ -1,174 +0,0 @@
-defmodule ReshardBasicTest do
-  use CouchTestCase
-  import ReshardHelpers
-
-  @moduledoc """
-  Test resharding basic functionality
-  """
-
-  setup_all do
-    db1 = random_db_name()
-    {:ok, _} = create_db(db1, query: %{q: 1})
-    db2 = random_db_name()
-    {:ok, _} = create_db(db2, query: %{q: 2})
-
-    on_exit(fn ->
-      reset_reshard_state()
-      delete_db(db1)
-      delete_db(db2)
-    end)
-
-    {:ok, [db1: db1, db2: db2]}
-  end
-
-  test "basic api querying, no jobs present" do
-    summary = get_summary()
-    assert summary["state"] == "running"
-    assert summary["state_reason"] == :null
-    assert summary["total"] == 0
-    assert summary["completed"] == 0
-    assert summary["failed"] == 0
-    assert summary["stopped"] == 0
-    assert get_state() == %{"state" => "running", "reason" => :null}
-    assert get_jobs() == []
-  end
-
-  test "check validation of invalid parameters", context do
-    db1 = context[:db1]
-    node1 = get_first_node()
-
-    resp = post_job_node(db1, "badnode")
-    assert resp.status_code == 400
-
-    resp = post_job_node("badresharddb", node1)
-    assert resp.status_code == 400
-
-    resp = post_job_db("badresharddb")
-    assert resp.status_code == 400
-
-    resp = post_job_range("badresharddb", "randomgarbage")
-    assert resp.status_code == 400
-
-    resp = get_job("badjobid")
-    assert resp.status_code == 404
-
-    resp = remove_job("badjobid")
-    assert resp.status_code == 404
-  end
-
-  test "toggle global state" do
-    assert get_state() == %{"state" => "running", "reason" => :null}
-    put_state_stopped("xyz")
-    assert get_state() == %{"state" => "stopped", "reason" => "xyz"}
-    put_state_running()
-    assert get_state() == %{"state" => "running", "reason" => :null}
-  end
-
-  test "split q=1 db shards on node1 (1 job)", context do
-    db = context[:db1]
-    node1 = get_first_node()
-
-    resp = post_job_node(db, node1)
-    assert resp.status_code == 201
-
-    body = resp.body
-    assert is_list(body)
-    assert length(body) == 1
-
-    [job] = body
-    id = job["id"]
-    assert is_binary(id)
-    node = job["node"]
-    assert is_binary(node)
-    assert node == node1
-    assert job["ok"] == true
-    shard = job["shard"]
-    assert is_binary(shard)
-
-    resp = get_job(id)
-    assert resp.status_code == 200
-
-    body = resp.body
-    assert body["type"] == "split"
-    assert body["id"] == id
-    assert body["source"] == shard
-    assert is_list(body["history"])
-    assert body["job_state"] in ["new", "running", "completed"]
-    assert is_list(body["target"])
-    assert length(body["target"]) == 2
-
-    wait_job_completed(id)
-
-    resp = get_job(id)
-    assert resp.status_code == 200
-
-    body = resp.body
-    assert body["job_state"] == "completed"
-    assert body["split_state"] == "completed"
-
-    resp = Couch.get("/#{db}/_shards")
-    assert resp.status_code == 200
-    shards = resp.body["shards"]
-    assert node1 not in shards["00000000-ffffffff"]
-    assert shards["00000000-7fffffff"] == [node1]
-    assert shards["80000000-ffffffff"] == [node1]
-
-    summary = get_summary()
-    assert summary["total"] == 1
-    assert summary["completed"] == 1
-
-    resp = remove_job(id)
-    assert resp.status_code == 200
-
-    assert get_jobs() == []
-
-    summary = get_summary()
-    assert summary["total"] == 0
-    assert summary["completed"] == 0
-  end
-
-  test "split q=2 shards on node1 (2 jobs)", context do
-    db = context[:db2]
-    node1 = get_first_node()
-
-    resp = post_job_node(db, node1)
-    assert resp.status_code == 201
-
-    body = resp.body
-    assert is_list(body)
-    assert length(body) == 2
-
-    [job1, job2] = Enum.sort(body)
-    {id1, id2} = {job1["id"], job2["id"]}
-
-    assert get_job(id1).body["id"] == id1
-    assert get_job(id2).body["id"] == id2
-
-    summary = get_summary()
-    assert summary["total"] == 2
-
-    wait_job_completed(id1)
-    wait_job_completed(id2)
-
-    summary = get_summary()
-    assert summary["completed"] == 2
-
-    resp = Couch.get("/#{db}/_shards")
-    assert resp.status_code == 200
-    shards = resp.body["shards"]
-    assert node1 not in shards["00000000-7fffffff"]
-    assert node1 not in shards["80000000-ffffffff"]
-    assert shards["00000000-3fffffff"] == [node1]
-    assert shards["40000000-7fffffff"] == [node1]
-    assert shards["80000000-bfffffff"] == [node1]
-    assert shards["c0000000-ffffffff"] == [node1]
-
-    # deleting the source db should remove the jobs
-    delete_db(db)
-    wait_job_removed(id1)
-    wait_job_removed(id2)
-
-    summary = get_summary()
-    assert summary["total"] == 0
-  end
-end
diff --git a/test/elixir/test/reshard_changes_feed.exs b/test/elixir/test/reshard_changes_feed.exs
deleted file mode 100644
index a4a39fe..0000000
--- a/test/elixir/test/reshard_changes_feed.exs
+++ /dev/null
@@ -1,81 +0,0 @@
-defmodule ReshardChangesFeedTest do
-  use CouchTestCase
-  import ReshardHelpers
-
-  @moduledoc """
-  Test _changes interaction with resharding
-  """
-
-  setup do
-    db = random_db_name()
-    {:ok, _} = create_db(db, query: %{q: 2})
-
-    on_exit(fn ->
-      reset_reshard_state()
-      delete_db(db)
-    end)
-
-    {:ok, [db: db]}
-  end
-
-  test "all_docs after splitting all shards on node1", context do
-    db = context[:db]
-    add_docs(1..3, db)
-
-    all_before = changes(db)
-    first_seq = hd(all_before["results"])["seq"]
-    last_seq = all_before["last_seq"]
-    since_1_before = docset(changes(db, %{:since => first_seq}))
-    since_last_before = docset(changes(db, %{:since => last_seq}))
-
-    resp = post_job_range(db, "00000000-7fffffff")
-    assert resp.status_code == 201
-
-    resp.body
-    |> Enum.map(fn j -> j["id"] end)
-    |> Enum.each(fn id -> wait_job_completed(id) end)
-
-    all_after = changes(db)
-    since_1_after = docset(changes(db, %{:since => first_seq}))
-    since_last_after = docset(changes(db, %{:since => last_seq}))
-
-    assert docset(all_before) == docset(all_after)
-    assert MapSet.subset?(since_1_before, since_1_after)
-    assert MapSet.subset?(since_last_before, since_last_after)
-
-    get_jobs()
-    |> Enum.map(fn j -> j["id"] end)
-    |> Enum.each(fn id -> remove_job(id) end)
-  end
-
-  defp docset(changes) do
-    changes["results"]
-    |> Enum.map(fn %{"id" => id} -> id end)
-    |> MapSet.new()
-  end
-
-  defp changes(db, query \\ %{}) do
-    resp = Couch.get("/#{db}/_changes", query: query)
-    assert resp.status_code == 200
-    resp.body
-  end
-
-  defp add_docs(range, db) do
-    docs = create_docs(range)
-    w3 = %{:w => 3}
-    resp = Couch.post("/#{db}/_bulk_docs", body: %{docs: docs}, query: w3)
-    assert resp.status_code == 201
-    assert length(resp.body) == length(docs)
-
-    docs
-    |> rev(resp.body)
-    |> Enum.into(%{}, fn %{:_id => id, :_rev => rev} -> {id, rev} end)
-  end
-
-  # (Keep for debugging)
-  # defp unpack_seq(seq) when is_binary(seq) do
-  #   [_, opaque] = String.split(seq, "-")
-  #   {:ok, binblob} = Base.url_decode64(opaque, padding: false)
-  #   :erlang.binary_to_term(binblob)
-  # end
-end
diff --git a/test/elixir/test/reshard_helpers.exs b/test/elixir/test/reshard_helpers.exs
deleted file mode 100644
index 52ce301..0000000
--- a/test/elixir/test/reshard_helpers.exs
+++ /dev/null
@@ -1,114 +0,0 @@
-defmodule ReshardHelpers do
-  use CouchTestCase
-
-  def get_summary do
-    resp = Couch.get("/_reshard")
-    assert resp.status_code == 200
-    resp.body
-  end
-
-  def get_state do
-    resp = Couch.get("/_reshard/state")
-    assert resp.status_code == 200
-    resp.body
-  end
-
-  def put_state_running do
-    resp = Couch.put("/_reshard/state", body: %{:state => "running"})
-    assert resp.status_code == 200
-    resp
-  end
-
-  def put_state_stopped(reason \\ "") do
-    body = %{:state => "stopped", :reason => reason}
-    resp = Couch.put("/_reshard/state", body: body)
-    assert resp.status_code == 200
-    resp
-  end
-
-  def get_jobs do
-    resp = Couch.get("/_reshard/jobs")
-    assert resp.status_code == 200
-    resp.body["jobs"]
-  end
-
-  def post_job_db(db) do
-    body = %{:type => :split, :db => db}
-    Couch.post("/_reshard/jobs", body: body)
-  end
-
-  def post_job_node(db, node) do
-    body = %{:type => :split, :db => db, :node => node}
-    Couch.post("/_reshard/jobs", body: body)
-  end
-
-  def post_job_range(db, range) do
-    body = %{:type => :split, :db => db, :range => range}
-    Couch.post("/_reshard/jobs", body: body)
-  end
-
-  def post_job_node_and_range(db, node, range) do
-    body = %{:type => :split, :db => db, :node => node, :range => range}
-    Couch.post("/_reshard/jobs", body: body)
-  end
-
-  def get_job(id) when is_binary(id) do
-    Couch.get("/_reshard/jobs/#{id}")
-  end
-
-  def remove_job(id) when is_binary(id) do
-    Couch.delete("/_reshard/jobs/#{id}")
-  end
-
-  def get_job_state(id) when is_binary(id) do
-    resp = Couch.get("/_reshard/jobs/#{id}/state")
-    assert resp.status_code == 200
-    resp.body["state"]
-  end
-
-  def stop_job(id, reason \\ "") when is_binary(id) do
-    body = %{:state => "stopped", :reason => reason}
-    Couch.post("/_reshard/jobs/#{id}/state", body: body)
-  end
-
-  def resume_job(id) when is_binary(id) do
-    body = %{:state => "running"}
-    Couch.post("/_reshard/jobs/#{id}/state", body: body)
-  end
-
-  def job_ids(jobs) do
-    Enum.map(fn job -> job["id"] end, jobs)
-  end
-
-  def get_first_node do
-    mresp = Couch.get("/_membership")
-    assert mresp.status_code == 200
-    all_nodes = mresp.body["all_nodes"]
-
-    mresp.body["cluster_nodes"]
-    |> Enum.filter(fn n -> n in all_nodes end)
-    |> Enum.sort()
-    |> hd()
-  end
-
-  def wait_job_removed(id) do
-    retry_until(fn -> get_job(id).status_code == 404 end, 200, 10_000)
-  end
-
-  def wait_job_completed(id) do
-    wait_job_state(id, "completed")
-  end
-
-  def wait_job_state(id, state) do
-    retry_until(fn -> get_job_state(id) == state end, 200, 10_000)
-  end
-
-  def reset_reshard_state do
-    get_jobs()
-    |> Enum.map(fn j -> j["id"] end)
-    |> Enum.each(fn id -> remove_job(id) end)
-
-    assert get_jobs() == []
-    put_state_running()
-  end
-end
diff --git a/test/elixir/test/test_helper.exs b/test/elixir/test/test_helper.exs
index 4bf65bc..6311fca 100644
--- a/test/elixir/test/test_helper.exs
+++ b/test/elixir/test/test_helper.exs
@@ -14,5 +14,3 @@ ExUnit.configure(
 )
 
 ExUnit.start()
-Code.require_file("partition_helpers.exs", __DIR__)
-Code.require_file("reshard_helpers.exs", __DIR__)


[couchdb] 03/34: Initial fabric2 implementation on FoundationDB

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 373b42eaf34ecb6b10c6da5c8ec3026b583c9624
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Jun 5 13:29:33 2019 -0500

    Initial fabric2 implementation on FoundationDB
    
    This provides a base implementation of a fabric API backed by
    FoundationDB. While a lot of functionality is provided there are a
    number of places that still require work. An incomplete list includes:
    
      1. Document bodies are currently a single key/value
      2. Attachments are stored as a range of key/value pairs
      3. There is no support for indexing
      4. Request size limits are not enforced directly
      5. Auth is still backed by a legacy CouchDB database
      6. No support for before_doc_update/after_doc_read
      7. Various implementation shortcuts need to be expanded for full API
         support.
---
 FDB_NOTES.md                                       |   57 +
 src/couch/src/couch_att.erl                        |  661 ++++------
 src/couch/src/couch_doc.erl                        |   11 +
 src/fabric/src/fabric.app.src                      |    8 +-
 src/fabric/src/fabric2.hrl                         |   66 +
 src/fabric/src/{fabric.app.src => fabric2_app.erl} |   35 +-
 src/fabric/src/fabric2_db.erl                      | 1299 ++++++++++++++++++++
 src/fabric/src/fabric2_events.erl                  |   84 ++
 src/fabric/src/fabric2_fdb.erl                     | 1187 ++++++++++++++++++
 src/fabric/src/fabric2_server.erl                  |  104 ++
 src/fabric/src/fabric2_sup.erl                     |   47 +
 src/fabric/src/fabric2_txids.erl                   |  144 +++
 src/fabric/src/fabric2_util.erl                    |  203 +++
 13 files changed, 3490 insertions(+), 416 deletions(-)

diff --git a/FDB_NOTES.md b/FDB_NOTES.md
new file mode 100644
index 0000000..c0cdc8c
--- /dev/null
+++ b/FDB_NOTES.md
@@ -0,0 +1,57 @@
+Things of Note
+===
+
+
+1. If a replication sends us two revisions A and B where one is an
+   ancestor of the other, we likely have divergent behavior. However,
+   this should never happen In Theory.
+
+2. Multiple updates to the same document in a _bulk_docs (or if they
+   just happen to be in the same update batch in non-fdb CouchDB)
+   we likely have subtly different behavior.
+
+3. I'm relying on repeated reads in an fdb transaction to be "cheap"
+   in that the reads would be cached in the fdb_transaction object.
+   This needs to be checked for certainty but that appeared to
+   be how things behaved in testing.
+
+4. When attempting to create a doc from scratch in an interacitve_edit
+   update, with revisions specified *and* attachment stubs, the reported
+   error is now a conflict. Previously the missing_stubs error was
+   raised earlier.
+
+5. There may be a difference in behavior if a) there are no VDU functions
+   set on a db and no design documents in a batch. This is because in
+   this situation we don't run the prep_and_validate code on pre-fdb
+   CouchDB. The new code always checks stubs before merging revision trees.
+   I'm sure the old way would fail somehow, but it would fail further on
+   which means we may have failed with a different reason (conflict, etc)
+   before we got to the next place we check for missing stubs.
+
+6. For multi-doc updates we'll need to investigate user versions on
+   versionstamps within a transaction. Also this likely prevents the
+   ability to have multiple updates to the same doc in a single
+   _bulk_docs transaction
+
+7. Document body storage needs to be implemented beyond the single
+   key/value approach.
+
+8. We'll want to look at how we currently apply open options to individual
+    elements of an open_revs call. Might turn out that we have to grab a
+    full FDI even if we could look up a rev directly. (i.e., revs_info
+    would require us having the entire FDI, however it'd be wasteful to return
+    all of that in an open_revs call, but bug compatibility ftw!)
+
+9. Is it possible that a server_admin can delete a db without being able
+    to open it? If so that's probably changed behavior.
+
+10. All docs on large active databases might be a thing getting the doc
+    count. If we allow range requests up to 5s, and we continue to return
+    the doc count total we may have to play games with snapshot reads on
+    the doc count key or else it'll whack any _all_docs range requests
+
+11. Revision infos need to track their size f we want to maintain a database
+    size counter we'll want to store the size of a given doc body for each
+    revision so that we don't have to read the old body when updating the tree.
+
+12. Update sequences do not yet include an incarnation value.
\ No newline at end of file
diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl
index a24de21..0dc5fa5 100644
--- a/src/couch/src/couch_att.erl
+++ b/src/couch/src/couch_att.erl
@@ -29,7 +29,7 @@
 -export([
     size_info/1,
     to_disk_term/1,
-    from_disk_term/2
+    from_disk_term/3
 ]).
 
 -export([
@@ -38,7 +38,7 @@
 ]).
 
 -export([
-    flush/2,
+    flush/3,
     foldl/3,
     range_foldl/5,
     foldl_decode/3,
@@ -46,11 +46,6 @@
 ]).
 
 -export([
-    upgrade/1,
-    downgrade/1
-]).
-
--export([
     max_attachment_size/0,
     validate_attachment_size/3
 ]).
@@ -58,137 +53,61 @@
 -compile(nowarn_deprecated_type).
 -export_type([att/0]).
 
--include_lib("couch/include/couch_db.hrl").
-
-
-%% Legacy attachment record. This is going to be phased out by the new proplist
-%% based structure. It's needed for now to allow code to perform lazy upgrades
-%% while the patch is rolled out to the cluster. Attachments passed as records
-%% will remain so until they are required to be represented as property lists.
-%% Once this has been widely deployed, this record will be removed entirely and
-%% property lists will be the main format.
--record(att, {
-    name :: binary(),
-    type :: binary(),
-    att_len :: non_neg_integer(),
-
-    %% length of the attachment in its identity form
-    %% (that is, without a content encoding applied to it)
-    %% differs from att_len when encoding /= identity
-    disk_len :: non_neg_integer(),
-
-    md5 = <<>> :: binary(),
-    revpos = 0 :: non_neg_integer(),
-    data :: stub | follows | binary() | {any(), any()} |
-            {follows, pid(), reference()} | fun(() -> binary()),
-
-    %% Encoding of the attachment
-    %% currently supported values are:
-    %%     identity, gzip
-    %% additional values to support in the future:
-    %%     deflate, compress
-    encoding = identity :: identity | gzip
-}).
-
-
-%% Extensible Attachment Type
-%%
-%% The following types describe the known properties for attachment fields
-%% encoded as property lists to allow easier upgrades. Values not in this list
-%% should be accepted at runtime but should be treated as opaque data as might
-%% be used by upgraded code. If you plan on operating on new data, please add
-%% an entry here as documentation.
-
-
-%% The name of the attachment is also used as the mime-part name for file
-%% downloads. These must be unique per document.
--type name_prop() :: {name, binary()}.
-
-
-%% The mime type of the attachment. This does affect compression of certain
-%% attachments if the type is found to be configured as a compressable type.
-%% This is commonly reserved for text/* types but could include other custom
-%% cases as well. See definition and use of couch_util:compressable_att_type/1.
--type type_prop() :: {type, binary()}.
-
-
-%% The attachment length is similar to disk-length but ignores additional
-%% encoding that may have occurred.
--type att_len_prop() :: {att_len, non_neg_integer()}.
-
-
-%% The size of the attachment as stored in a disk stream.
--type disk_len_prop() :: {disk_len, non_neg_integer()}.
-
-
-%% This is a digest of the original attachment data as uploaded by the client.
-%% it's useful for checking validity of contents against other attachment data
-%% as well as quick digest computation of the enclosing document.
--type md5_prop() :: {md5, binary()}.
-
 
--type revpos_prop() :: {revpos, 0}.
+-include_lib("couch/include/couch_db.hrl").
 
 
-%% This field is currently overloaded with just about everything. The
-%% {any(), any()} type is just there until I have time to check the actual
-%% values expected. Over time this should be split into more than one property
-%% to allow simpler handling.
--type data_prop() :: {
-    data, stub | follows | binary() | {any(), any()} |
-    {follows, pid(), reference()} | fun(() -> binary())
-}.
+-define(CURRENT_ATT_FORMAT, 0).
 
 
-%% We will occasionally compress our data. See type_prop() for more information
-%% on when this happens.
--type encoding_prop() :: {encoding, identity | gzip}.
+-type prop_name() ::
+    name |
+    type |
+    att_len |
+    disk_len |
+    md5 |
+    revpos |
+    data |
+    encoding.
 
 
--type attachment() :: [
-    name_prop() | type_prop() |
-    att_len_prop() | disk_len_prop() |
-    md5_prop() | revpos_prop() |
-    data_prop() | encoding_prop()
-].
+-type data_prop_type() ::
+    {loc, #{}, binary(), binary()} |
+    stub |
+    follows |
+    binary() |
+    {follows, pid(), reference()} |
+    fun(() -> binary()).
 
--type disk_att_v1() :: {
-    Name :: binary(),
-    Type :: binary(),
-    Sp :: any(),
-    AttLen :: non_neg_integer(),
-    RevPos :: non_neg_integer(),
-    Md5 :: binary()
-}.
 
--type disk_att_v2() :: {
-    Name :: binary(),
-    Type :: binary(),
-    Sp :: any(),
-    AttLen :: non_neg_integer(),
-    DiskLen :: non_neg_integer(),
-    RevPos :: non_neg_integer(),
-    Md5 :: binary(),
-    Enc :: identity | gzip
+-type att() :: #{
+    name := binary(),
+    type := binary(),
+    att_len := non_neg_integer() | undefined,
+    disk_len := non_neg_integer() | undefined,
+    md5 := binary() | undefined,
+    revpos := non_neg_integer(),
+    data := data_prop_type(),
+    encoding := identity | gzip | undefined,
+    headers := [{binary(), binary()}] | undefined
 }.
 
--type disk_att_v3() :: {Base :: tuple(), Extended :: list()}.
-
--type disk_att() :: disk_att_v1() | disk_att_v2() | disk_att_v3().
-
--type att() :: #att{} | attachment() | disk_att().
 
 new() ->
-    %% We construct a record by default for compatability. This will be
-    %% upgraded on demand. A subtle effect this has on all attachments
-    %% constructed via new is that it will pick up the proper defaults
-    %% from the #att record definition given above. Newer properties do
-    %% not support special default values and will all be treated as
-    %% undefined.
-    #att{}.
+    #{
+        name => <<>>,
+        type => <<>>,
+        att_len => undefined,
+        disk_len => undefined,
+        md5 => undefined,
+        revpos => 0,
+        data => undefined,
+        encoding => undefined,
+        headers => undefined
+    }.
 
 
--spec new([{atom(), any()}]) -> att().
+-spec new([{prop_name(), any()}]) -> att().
 new(Props) ->
     store(Props, new()).
 
@@ -197,71 +116,28 @@ new(Props) ->
            (atom(), att()) -> any().
 fetch(Fields, Att) when is_list(Fields) ->
     [fetch(Field, Att) || Field <- Fields];
-fetch(Field, Att) when is_list(Att) ->
-    case lists:keyfind(Field, 1, Att) of
-        {Field, Value} -> Value;
-        false -> undefined
-    end;
-fetch(name, #att{name = Name}) ->
-    Name;
-fetch(type, #att{type = Type}) ->
-    Type;
-fetch(att_len, #att{att_len = AttLen}) ->
-    AttLen;
-fetch(disk_len, #att{disk_len = DiskLen}) ->
-    DiskLen;
-fetch(md5, #att{md5 = Digest}) ->
-    Digest;
-fetch(revpos, #att{revpos = RevPos}) ->
-    RevPos;
-fetch(data, #att{data = Data}) ->
-    Data;
-fetch(encoding, #att{encoding = Encoding}) ->
-    Encoding;
-fetch(_, _) ->
-    undefined.
+fetch(Field, Att) ->
+    maps:get(Field, Att).
 
 
 -spec store([{atom(), any()}], att()) -> att().
 store(Props, Att0) ->
     lists:foldl(fun({Field, Value}, Att) ->
-        store(Field, Value, Att)
+        maps:update(Field, Value, Att)
     end, Att0, Props).
 
 
--spec store(atom(), any(), att()) -> att().
-store(Field, undefined, Att) when is_list(Att) ->
-    lists:keydelete(Field, 1, Att);
-store(Field, Value, Att) when is_list(Att) ->
-    lists:keystore(Field, 1, Att, {Field, Value});
-store(name, Name, Att) ->
-    Att#att{name = Name};
-store(type, Type, Att) ->
-    Att#att{type = Type};
-store(att_len, AttLen, Att) ->
-    Att#att{att_len = AttLen};
-store(disk_len, DiskLen, Att) ->
-    Att#att{disk_len = DiskLen};
-store(md5, Digest, Att) ->
-    Att#att{md5 = Digest};
-store(revpos, RevPos, Att) ->
-    Att#att{revpos = RevPos};
-store(data, Data, Att) ->
-    Att#att{data = Data};
-store(encoding, Encoding, Att) ->
-    Att#att{encoding = Encoding};
 store(Field, Value, Att) ->
-    store(Field, Value, upgrade(Att)).
+    maps:update(Field, Value, Att).
 
 
 -spec transform(atom(), fun(), att()) -> att().
 transform(Field, Fun, Att) ->
-    NewValue = Fun(fetch(Field, Att)),
-    store(Field, NewValue, Att).
+    maps:update_with(Field, Fun, Att).
 
 
-is_stub(Att) ->
-    stub == fetch(data, Att).
+is_stub(#{data := stub}) -> true;
+is_stub(#{}) -> false.
 
 
 %% merge_stubs takes all stub attachments and replaces them with on disk
@@ -275,8 +151,7 @@ merge_stubs(MemAtts, DiskAtts) ->
     merge_stubs(MemAtts, OnDisk, []).
 
 
-%% restore spec when R14 support is dropped
-%% -spec merge_stubs([att()], dict:dict(), [att()]) -> [att()].
+-spec merge_stubs([att()], dict:dict(), [att()]) -> [att()].
 merge_stubs([Att | Rest], OnDisk, Merged) ->
     case fetch(data, Att) of
         stub ->
@@ -308,14 +183,8 @@ size_info([]) ->
     {ok, []};
 size_info(Atts) ->
     Info = lists:map(fun(Att) ->
-        AttLen = fetch(att_len, Att),
-        case fetch(data, Att) of
-             {stream, StreamEngine} ->
-                 {ok, SPos} = couch_stream:to_disk_term(StreamEngine),
-                 {SPos, AttLen};
-             {_, SPos} ->
-                 {SPos, AttLen}
-        end
+        [{loc, _Db, _DocId, AttId}, AttLen] = fetch([data, att_len], Att),
+        {AttId, AttLen}
     end, Atts),
     {ok, lists:usort(Info)}.
 
@@ -324,89 +193,44 @@ size_info(Atts) ->
 %% old format when possible. This should help make the attachment lazy upgrade
 %% as safe as possible, avoiding the need for complicated disk versioning
 %% schemes.
-to_disk_term(#att{} = Att) ->
-    {stream, StreamEngine} = fetch(data, Att),
-    {ok, Sp} = couch_stream:to_disk_term(StreamEngine),
-    {
+to_disk_term(Att) ->
+    {loc, #{}, _DocId, AttId} = fetch(data, Att),
+    {?CURRENT_ATT_FORMAT, {
         fetch(name, Att),
         fetch(type, Att),
-        Sp,
+        AttId,
         fetch(att_len, Att),
         fetch(disk_len, Att),
         fetch(revpos, Att),
         fetch(md5, Att),
-        fetch(encoding, Att)
-    };
-to_disk_term(Att) ->
-    BaseProps = [name, type, data, att_len, disk_len, revpos, md5, encoding],
-    {Extended, Base} = lists:foldl(
-        fun
-            (data, {Props, Values}) ->
-                case lists:keytake(data, 1, Props) of
-                    {value, {_, {stream, StreamEngine}}, Other} ->
-                        {ok, Sp} = couch_stream:to_disk_term(StreamEngine),
-                        {Other, [Sp | Values]};
-                    {value, {_, Value}, Other} ->
-                        {Other, [Value | Values]};
-                    false ->
-                        {Props, [undefined | Values]}
-                end;
-            (Key, {Props, Values}) ->
-                case lists:keytake(Key, 1, Props) of
-                    {value, {_, Value}, Other} -> {Other, [Value | Values]};
-                    false -> {Props, [undefined | Values]}
-                end
-        end,
-        {Att, []},
-        BaseProps
-    ),
-    {list_to_tuple(lists:reverse(Base)), Extended}.
-
-
-%% The new disk term format is a simple wrapper around the legacy format. Base
-%% properties will remain in a tuple while the new fields and possibly data from
-%% future extensions will be stored in a list of atom/value pairs. While this is
-%% slightly less efficient, future work should be able to make use of
-%% compression to remove these sorts of common bits (block level compression
-%% with something like a shared dictionary that is checkpointed every now and
-%% then).
-from_disk_term(StreamSrc, {Base, Extended})
-        when is_tuple(Base), is_list(Extended) ->
-    store(Extended, from_disk_term(StreamSrc, Base));
-from_disk_term(StreamSrc, {Name,Type,Sp,AttLen,DiskLen,RevPos,Md5,Enc}) ->
-    {ok, Stream} = open_stream(StreamSrc, Sp),
-    #att{
-        name=Name,
-        type=Type,
-        att_len=AttLen,
-        disk_len=DiskLen,
-        md5=Md5,
-        revpos=RevPos,
-        data={stream, Stream},
-        encoding=upgrade_encoding(Enc)
-    };
-from_disk_term(StreamSrc, {Name,Type,Sp,AttLen,RevPos,Md5}) ->
-    {ok, Stream} = open_stream(StreamSrc, Sp),
-    #att{
-        name=Name,
-        type=Type,
-        att_len=AttLen,
-        disk_len=AttLen,
-        md5=Md5,
-        revpos=RevPos,
-        data={stream, Stream}
-    };
-from_disk_term(StreamSrc, {Name,{Type,Sp,AttLen}}) ->
-    {ok, Stream} = open_stream(StreamSrc, Sp),
-    #att{
-        name=Name,
-        type=Type,
-        att_len=AttLen,
-        disk_len=AttLen,
-        md5= <<>>,
-        revpos=0,
-        data={stream, Stream}
-    }.
+        fetch(encoding, Att),
+        fetch(headers, Att)
+    }}.
+
+
+from_disk_term(#{} = Db, DocId, {?CURRENT_ATT_FORMAT, Props}) ->
+    {
+        Name,
+        Type,
+        AttId,
+        AttLen,
+        DiskLen,
+        RevPos,
+        Md5,
+        Encoding,
+        Headers
+    } = Props,
+    new([
+        {name, Name},
+        {type, Type},
+        {data, {loc, Db#{tx := undefined}, DocId, AttId}},
+        {att_len, AttLen},
+        {disk_len, DiskLen},
+        {revpos, RevPos},
+        {md5, Md5},
+        {encoding, Encoding},
+        {headers, Headers}
+    ]).
 
 
 %% from_json reads in embedded JSON attachments and creates usable attachment
@@ -433,8 +257,12 @@ stub_from_json(Att, Props) ->
     %% json object. See merge_stubs/3 for the stub check.
     RevPos = couch_util:get_value(<<"revpos">>, Props),
     store([
-        {md5, Digest}, {revpos, RevPos}, {data, stub}, {disk_len, DiskLen},
-        {att_len, EncodedLen}, {encoding, Encoding}
+        {data, stub},
+        {disk_len, DiskLen},
+        {att_len, EncodedLen},
+        {revpos, RevPos},
+        {md5, Digest},
+        {encoding, Encoding}
     ], Att).
 
 
@@ -443,8 +271,12 @@ follow_from_json(Att, Props) ->
     Digest = digest_from_json(Props),
     RevPos = couch_util:get_value(<<"revpos">>, Props, 0),
     store([
-        {md5, Digest}, {revpos, RevPos}, {data, follows}, {disk_len, DiskLen},
-        {att_len, EncodedLen}, {encoding, Encoding}
+        {data, follows},
+        {disk_len, DiskLen},
+        {att_len, EncodedLen},
+        {revpos, RevPos},
+        {md5, Digest},
+        {encoding, Encoding}
     ], Att).
 
 
@@ -455,8 +287,10 @@ inline_from_json(Att, Props) ->
             Length = size(Data),
             RevPos = couch_util:get_value(<<"revpos">>, Props, 0),
             store([
-                {data, Data}, {revpos, RevPos}, {disk_len, Length},
-                {att_len, Length}
+                {data, Data},
+                {disk_len, Length},
+                {att_len, Length},
+                {revpos, RevPos}
             ], Att)
     catch
         _:_ ->
@@ -466,7 +300,6 @@ inline_from_json(Att, Props) ->
     end.
 
 
-
 encoded_lengths_from_json(Props) ->
     Len = couch_util:get_value(<<"length">>, Props),
     case couch_util:get_value(<<"encoding">>, Props) of
@@ -488,9 +321,17 @@ digest_from_json(Props) ->
 
 
 to_json(Att, OutputData, DataToFollow, ShowEncoding) ->
-    [Name, Data, DiskLen, AttLen, Enc, Type, RevPos, Md5] = fetch(
-        [name, data, disk_len, att_len, encoding, type, revpos, md5], Att
-    ),
+    #{
+        name := Name,
+        type := Type,
+        data := Data,
+        disk_len := DiskLen,
+        att_len := AttLen,
+        revpos := RevPos,
+        md5 := Md5,
+        encoding := Encoding,
+        headers := Headers
+    } = Att,
     Props = [
         {<<"content_type">>, Type},
         {<<"revpos">>, RevPos}
@@ -505,71 +346,74 @@ to_json(Att, OutputData, DataToFollow, ShowEncoding) ->
         DataToFollow ->
             [{<<"length">>, DiskLen}, {<<"follows">>, true}];
         true ->
-            AttData = case Enc of
+            AttData = case Encoding of
                 gzip -> zlib:gunzip(to_binary(Att));
                 identity -> to_binary(Att)
             end,
             [{<<"data">>, base64:encode(AttData)}]
     end,
     EncodingProps = if
-        ShowEncoding andalso Enc /= identity ->
+        ShowEncoding andalso Encoding /= identity ->
             [
-                {<<"encoding">>, couch_util:to_binary(Enc)},
+                {<<"encoding">>, couch_util:to_binary(Encoding)},
                 {<<"encoded_length">>, AttLen}
             ];
         true ->
             []
     end,
-    HeadersProp = case fetch(headers, Att) of
+    HeadersProp = case Headers of
         undefined -> [];
         Headers -> [{<<"headers">>, Headers}]
     end,
     {Name, {Props ++ DigestProp ++ DataProps ++ EncodingProps ++ HeadersProp}}.
 
 
-flush(Db, Att) ->
-    flush_data(Db, fetch(data, Att), Att).
+flush(Db, DocId, Att1) ->
+    Att2 = read_data(fetch(data, Att1), Att1),
+    [
+        Data,
+        AttLen,
+        DiskLen,
+        ReqMd5,
+        Encoding
+    ] = fetch([data, att_len, disk_len, md5, encoding], Att2),
+
+    % Eventually, we'll check if we can compress this
+    % attachment here and do so if possible.
+
+    % If we were sent a gzip'ed attachment with no
+    % length data, we have to set it here.
+    Att3 = case AttLen of
+        undefined -> store(att_len, DiskLen, Att2);
+        _ -> Att2
+    end,
+
+    % If no encoding has been set, default to
+    % identity
+    Att4 = case Encoding of
+        undefined -> store(encoding, identity, Att3);
+        _ -> Att3
+    end,
+
+    case Data of
+        {loc, _, _, _} ->
+            % Already flushed
+            Att1;
+        _ when is_binary(Data) ->
+            IdentMd5 = get_identity_md5(Data, fetch(encoding, Att4)),
+            if ReqMd5 == undefined -> ok; true ->
+                couch_util:check_md5(IdentMd5, ReqMd5)
+            end,
+            Att5 = store(md5, IdentMd5, Att4),
+            fabric2_db:write_attachment(Db, DocId, Att5)
+    end.
 
 
-flush_data(Db, Data, Att) when is_binary(Data) ->
-    couch_db:with_stream(Db, Att, fun(OutputStream) ->
-        couch_stream:write(OutputStream, Data)
-    end);
-flush_data(Db, Fun, Att) when is_function(Fun) ->
-    AttName = fetch(name, Att),
-    MaxAttSize = max_attachment_size(),
-    case fetch(att_len, Att) of
-        undefined ->
-            couch_db:with_stream(Db, Att, fun(OutputStream) ->
-                % Fun(MaxChunkSize, WriterFun) must call WriterFun
-                % once for each chunk of the attachment,
-                Fun(4096,
-                    % WriterFun({Length, Binary}, State)
-                    % WriterFun({0, _Footers}, State)
-                    % Called with Length == 0 on the last time.
-                    % WriterFun returns NewState.
-                    fun({0, Footers}, _Total) ->
-                        F = mochiweb_headers:from_binary(Footers),
-                        case mochiweb_headers:get_value("Content-MD5", F) of
-                        undefined ->
-                            ok;
-                        Md5 ->
-                            {md5, base64:decode(Md5)}
-                        end;
-                    ({Length, Chunk}, Total0) ->
-                        Total = Total0 + Length,
-                        validate_attachment_size(AttName, Total, MaxAttSize),
-                        couch_stream:write(OutputStream, Chunk),
-                        Total
-                    end, 0)
-            end);
-        AttLen ->
-            validate_attachment_size(AttName, AttLen, MaxAttSize),
-            couch_db:with_stream(Db, Att, fun(OutputStream) ->
-                write_streamed_attachment(OutputStream, Fun, AttLen)
-            end)
-    end;
-flush_data(Db, {follows, Parser, Ref}, Att) ->
+read_data({loc, #{}, _DocId, _AttId}, Att) ->
+    % Attachment already written to fdb
+    Att;
+
+read_data({follows, Parser, Ref}, Att) ->
     ParserRef = erlang:monitor(process, Parser),
     Fun = fun() ->
         Parser ! {get_bytes, Ref, self()},
@@ -583,41 +427,72 @@ flush_data(Db, {follows, Parser, Ref}, Att) ->
         end
     end,
     try
-        flush_data(Db, Fun, store(data, Fun, Att))
+        read_data(Fun, store(data, Fun, Att))
     after
         erlang:demonitor(ParserRef, [flush])
     end;
-flush_data(Db, {stream, StreamEngine}, Att) ->
-    case couch_db:is_active_stream(Db, StreamEngine) of
-        true ->
-            % Already written
-            Att;
-        false ->
-            NewAtt = couch_db:with_stream(Db, Att, fun(OutputStream) ->
-                couch_stream:copy(StreamEngine, OutputStream)
-            end),
-            InMd5 = fetch(md5, Att),
-            OutMd5 = fetch(md5, NewAtt),
-            couch_util:check_md5(OutMd5, InMd5),
-            NewAtt
+
+read_data(Data, Att) when is_binary(Data) ->
+    Att;
+
+read_data(Fun, Att) when is_function(Fun) ->
+    [AttName, AttLen, InMd5] = fetch([name, att_len, md5], Att),
+    MaxAttSize = max_attachment_size(),
+    case AttLen of
+        undefined ->
+            % Fun(MaxChunkSize, WriterFun) must call WriterFun
+            % once for each chunk of the attachment,
+            WriterFun = fun
+                ({0, Footers}, {Len, Acc}) ->
+                    F = mochiweb_headers:from_binary(Footers),
+                    Md5 = case mochiweb_headers:get_value("Content-MD5", F) of
+                        undefined -> undefined;
+                        Value -> base64:decode(Value)
+                    end,
+                    Props0 = [
+                        {data, iolist_to_binary(lists:reverse(Acc))},
+                        {disk_len, Len}
+                    ],
+                    Props1 = if InMd5 /= md5_in_footer -> Props0; true ->
+                        [{md5, Md5} | Props0]
+                    end,
+                    store(Props1, Att);
+                ({ChunkLen, Chunk}, {Len, Acc}) ->
+                    NewLen = Len + ChunkLen,
+                    validate_attachment_size(AttName, NewLen, MaxAttSize),
+                    {NewLen, [Chunk | Acc]}
+            end,
+            Fun(8192, WriterFun, {0, []});
+        AttLen ->
+            validate_attachment_size(AttName, AttLen, MaxAttSize),
+            read_streamed_attachment(Att, Fun, AttLen, [])
     end.
 
 
-write_streamed_attachment(_Stream, _F, 0) ->
-    ok;
-write_streamed_attachment(_Stream, _F, LenLeft) when LenLeft < 0 ->
+read_streamed_attachment(Att, _F, 0, Acc) ->
+    Bin = iolist_to_binary(lists:reverse(Acc)),
+    store([
+        {data, Bin},
+        {disk_len, size(Bin)}
+    ], Att);
+
+read_streamed_attachment(_Att, _F, LenLeft, _Acc) when LenLeft < 0 ->
     throw({bad_request, <<"attachment longer than expected">>});
-write_streamed_attachment(Stream, F, LenLeft) when LenLeft > 0 ->
-    Bin = try read_next_chunk(F, LenLeft)
+
+read_streamed_attachment(Att, F, LenLeft, Acc) when LenLeft > 0 ->
+    Bin = try
+        read_next_chunk(F, LenLeft)
     catch
         {mp_parser_died, normal} ->
             throw({bad_request, <<"attachment shorter than expected">>})
     end,
-    ok = couch_stream:write(Stream, Bin),
-    write_streamed_attachment(Stream, F, LenLeft - iolist_size(Bin)).
+    Size = iolist_size(Bin),
+    read_streamed_attachment(Att, F, LenLeft - Size, [Bin | Acc]).
+
 
 read_next_chunk(F, _) when is_function(F, 0) ->
     F();
+
 read_next_chunk(F, LenLeft) when is_function(F, 1) ->
     F(lists:min([LenLeft, 16#2000])).
 
@@ -626,14 +501,17 @@ foldl(Att, Fun, Acc) ->
     foldl(fetch(data, Att), Att, Fun, Acc).
 
 
+foldl({loc, Db, DocId, AttId}, _Att, Fun, Acc) ->
+    Bin = fabric2_db:read_attachment(Db#{tx := undefined}, DocId, AttId),
+    Fun(Bin, Acc);
+
 foldl(Bin, _Att, Fun, Acc) when is_binary(Bin) ->
     Fun(Bin, Acc);
-foldl({stream, StreamEngine}, Att, Fun, Acc) ->
-    Md5 = fetch(md5, Att),
-    couch_stream:foldl(StreamEngine, Md5, Fun, Acc);
+
 foldl(DataFun, Att, Fun, Acc) when is_function(DataFun) ->
     Len = fetch(att_len, Att),
     fold_streamed_data(DataFun, Len, Fun, Acc);
+
 foldl({follows, Parser, Ref}, Att, Fun, Acc) ->
     ParserRef = erlang:monitor(process, Parser),
     DataFun = fun() ->
@@ -654,19 +532,26 @@ foldl({follows, Parser, Ref}, Att, Fun, Acc) ->
     end.
 
 
+range_foldl(Bin1, From, To, Fun, Acc) when is_binary(Bin1) ->
+    ReadLen = To - From,
+    Bin2 = case Bin1 of
+        _ when size(Bin1) < From -> <<>>;
+        <<_:From/binary, B2>> -> B2
+    end,
+    Bin3 = case Bin2 of
+        _ when size(Bin2) < ReadLen -> Bin2;
+        <<B3:ReadLen/binary, _/binary>> -> B3
+    end,
+    Fun(Bin3, Acc);
+
 range_foldl(Att, From, To, Fun, Acc) ->
-    {stream, StreamEngine} = fetch(data, Att),
-    couch_stream:range_foldl(StreamEngine, From, To, Fun, Acc).
+    {loc, Db, DocId, AttId} = fetch(data, Att),
+    Bin = fabric2_db:read_attachment(Db, DocId, AttId),
+    range_foldl(Bin, From, To, Fun, Acc).
 
 
-foldl_decode(Att, Fun, Acc) ->
-    case fetch([data, encoding], Att) of
-        [{stream, StreamEngine}, Enc] ->
-            couch_stream:foldl_decode(
-                    StreamEngine, fetch(md5, Att), Enc, Fun, Acc);
-        [Fun2, identity] ->
-            fold_streamed_data(Fun2, fetch(att_len, Att), Fun, Acc)
-    end.
+foldl_decode(_Att, _Fun, _Acc) ->
+    erlang:error(not_supported).
 
 
 to_binary(Att) ->
@@ -677,10 +562,8 @@ to_binary(Bin, _Att) when is_binary(Bin) ->
     Bin;
 to_binary(Iolist, _Att) when is_list(Iolist) ->
     iolist_to_binary(Iolist);
-to_binary({stream, _StreamEngine}, Att) ->
-    iolist_to_binary(
-        lists:reverse(foldl(Att, fun(Bin,Acc) -> [Bin|Acc] end, []))
-    );
+to_binary({loc, Db, DocId, AttId}, _Att) ->
+    fabric2_db:read_attachmet(Db, DocId, AttId);
 to_binary(DataFun, Att) when is_function(DataFun)->
     Len = fetch(att_len, Att),
     iolist_to_binary(
@@ -695,46 +578,22 @@ to_binary(DataFun, Att) when is_function(DataFun)->
 
 fold_streamed_data(_RcvFun, 0, _Fun, Acc) ->
     Acc;
+
 fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0->
     Bin = RcvFun(),
     ResultAcc = Fun(Bin, Acc),
     fold_streamed_data(RcvFun, LenLeft - size(Bin), Fun, ResultAcc).
 
 
-%% Upgrade an attachment record to a property list on demand. This is a one-way
-%% operation as downgrading potentially truncates fields with important data.
--spec upgrade(#att{}) -> attachment().
-upgrade(#att{} = Att) ->
-    Map = lists:zip(
-        record_info(fields, att),
-        lists:seq(2, record_info(size, att))
-    ),
-    %% Don't store undefined elements since that is default
-    [{F, element(I, Att)} || {F, I} <- Map, element(I, Att) /= undefined];
-upgrade(Att) ->
-    Att.
-
-
-%% Downgrade is exposed for interactive convenience. In practice, unless done
-%% manually, upgrades are always one-way.
-downgrade(#att{} = Att) ->
-    Att;
-downgrade(Att) ->
-    #att{
-        name = fetch(name, Att),
-        type = fetch(type, Att),
-        att_len = fetch(att_len, Att),
-        disk_len = fetch(disk_len, Att),
-        md5 = fetch(md5, Att),
-        revpos = fetch(revpos, Att),
-        data = fetch(data, Att),
-        encoding = fetch(encoding, Att)
-    }.
-
-
-upgrade_encoding(true) -> gzip;
-upgrade_encoding(false) -> identity;
-upgrade_encoding(Encoding) -> Encoding.
+get_identity_md5(Bin, gzip) ->
+    Z = zlib:open(),
+    ok = zlib:inflateInit(Z, 16 + 15),
+    Inflated = zlib:inflate(Z, Bin),
+    ok = zlib:inflateEnd(Z),
+    ok = zlib:close(Z),
+    couch_hash:md5_hash(Inflated);
+get_identity_md5(Bin, _) ->
+    couch_hash:md5_hash(Bin).
 
 
 max_attachment_size() ->
@@ -753,18 +612,22 @@ validate_attachment_size(_AttName, _AttSize, _MAxAttSize) ->
     ok.
 
 
-open_stream(StreamSrc, Data) ->
-    case couch_db:is_db(StreamSrc) of
-        true ->
-            couch_db:open_read_stream(StreamSrc, Data);
-        false ->
-            case is_function(StreamSrc, 1) of
-                true ->
-                    StreamSrc(Data);
-                false ->
-                    erlang:error({invalid_stream_source, StreamSrc})
-            end
-    end.
+%% is_compressible(Type) when is_binary(Type) ->
+%%     is_compressible(binary_to_list(Type));
+%% is_compressible(Type) ->
+%%     TypeExpList = re:split(
+%%         config:get("attachments", "compressible_types", ""),
+%%         "\\s*,\\s*",
+%%         [{return, list}]
+%%     ),
+%%     lists:any(
+%%         fun(TypeExp) ->
+%%             Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
+%%                 "(?:\\s*;.*?)?\\s*", $$],
+%%             re:run(Type, Regexp, [caseless]) =/= nomatch
+%%         end,
+%%         [T || T <- TypeExpList, T /= []]
+%%     ).
 
 
 -ifdef(TEST).
diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl
index 4a49372..d33325e 100644
--- a/src/couch/src/couch_doc.erl
+++ b/src/couch/src/couch_doc.erl
@@ -374,6 +374,17 @@ rev_info({#doc{} = Doc, {Pos, [RevId | _]}}) ->
         body_sp = undefined,
         seq = undefined,
         rev = {Pos, RevId}
+    };
+rev_info({#{} = RevInfo, {Pos, [RevId | _]}}) ->
+    #{
+        deleted := Deleted,
+        sequence := Sequence
+    } = RevInfo,
+    #rev_info{
+        deleted = Deleted,
+        body_sp = undefined,
+        seq = Sequence,
+        rev = {Pos, RevId}
     }.
 
 is_deleted(#full_doc_info{rev_tree=Tree}) ->
diff --git a/src/fabric/src/fabric.app.src b/src/fabric/src/fabric.app.src
index d7686ca..20fbb1e 100644
--- a/src/fabric/src/fabric.app.src
+++ b/src/fabric/src/fabric.app.src
@@ -13,7 +13,10 @@
 {application, fabric, [
     {description, "Routing and proxying layer for CouchDB cluster"},
     {vsn, git},
-    {registered, []},
+    {mod, {fabric2_app, []}},
+    {registered, [
+        fabric_server
+    ]},
     {applications, [
         kernel,
         stdlib,
@@ -22,6 +25,7 @@
         rexi,
         mem3,
         couch_log,
-        couch_stats
+        couch_stats,
+        erlfdb
     ]}
 ]}.
diff --git a/src/fabric/src/fabric2.hrl b/src/fabric/src/fabric2.hrl
new file mode 100644
index 0000000..de1d3d1
--- /dev/null
+++ b/src/fabric/src/fabric2.hrl
@@ -0,0 +1,66 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-define(uint2bin(I), binary:encode_unsigned(I, little)).
+-define(bin2uint(I), binary:decode_unsigned(I, little)).
+
+% This will eventually be the `\xFFmetadataVersion` key that is
+% currently only available in FoundationDB master.
+%
+%  https://forums.foundationdb.org/t/a-new-tool-for-managing-layer-metadata/1191
+%
+% Until then we'll fake the same behavior using a randomish
+% key for tracking metadata changse. Once we get to the
+% new feature this will be more performant by updating
+% this define.
+-define(METADATA_VERSION_KEY, <<"$metadata_version_key$">>).
+
+
+% Prefix Definitions
+
+% Layer Level: (LayerPrefix, X, ...)
+
+-define(CLUSTER_CONFIG, 0).
+-define(ALL_DBS, 1).
+-define(DBS, 15).
+-define(TX_IDS, 255).
+
+% Database Level: (LayerPrefix, ?DBS, DbPrefix, X, ...)
+
+-define(DB_VERSION, 0).
+-define(DB_CONFIG, 16).
+-define(DB_STATS, 17).
+-define(DB_ALL_DOCS, 18).
+-define(DB_CHANGES, 19).
+-define(DB_REVS, 20).
+-define(DB_DOCS, 21).
+-define(DB_LOCAL_DOCS, 22).
+-define(DB_ATTS, 23).
+
+
+% Versions
+
+-define(CURR_REV_FORMAT, 0).
+
+
+% Misc constants
+
+-define(PDICT_DB_KEY, '$fabric_db_handle').
+-define(PDICT_LAYER_CACHE, '$fabric_layer_id').
+-define(PDICT_CHECKED_DB_IS_CURRENT, '$fabric_checked_db_is_current').
+-define(PDICT_TX_ID_KEY, '$fabric_tx_id').
+-define(PDICT_TX_RES_KEY, '$fabric_tx_result').
+-define(COMMIT_UNKNOWN_RESULT, 1021).
+
+
+-define(ATTACHMENT_CHUNK_SIZE, 100000).
diff --git a/src/fabric/src/fabric.app.src b/src/fabric/src/fabric2_app.erl
similarity index 64%
copy from src/fabric/src/fabric.app.src
copy to src/fabric/src/fabric2_app.erl
index d7686ca..da95acb 100644
--- a/src/fabric/src/fabric.app.src
+++ b/src/fabric/src/fabric2_app.erl
@@ -10,18 +10,23 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
-{application, fabric, [
-    {description, "Routing and proxying layer for CouchDB cluster"},
-    {vsn, git},
-    {registered, []},
-    {applications, [
-        kernel,
-        stdlib,
-        config,
-        couch,
-        rexi,
-        mem3,
-        couch_log,
-        couch_stats
-    ]}
-]}.
+-module(fabric2_app).
+-behaviour(application).
+
+
+-export([
+    start/2,
+    stop/1
+]).
+
+
+start(_Type, StartArgs) ->
+    fabric2_sup:start_link(StartArgs).
+
+
+stop(_State) ->
+    case application:get_env(erlfdb, test_cluster_pid) of
+        {ok, Pid} -> Pid ! close;
+        _ -> ok
+    end,
+    ok.
diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
new file mode 100644
index 0000000..02a18fa
--- /dev/null
+++ b/src/fabric/src/fabric2_db.erl
@@ -0,0 +1,1299 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db).
+
+
+-export([
+    create/2,
+    open/2,
+    delete/2,
+
+    list_dbs/0,
+    list_dbs/1,
+
+    is_admin/1,
+    check_is_admin/1,
+    check_is_member/1,
+
+    name/1,
+    get_after_doc_read_fun/1,
+    get_before_doc_update_fun/1,
+    get_committed_update_seq/1,
+    get_compacted_seq/1,
+    get_compactor_pid/1,
+    get_db_info/1,
+    %% get_partition_info/2,
+    get_del_doc_count/1,
+    get_doc_count/1,
+    get_doc_count/2,
+    %% get_epochs/1,
+    %% get_filepath/1,
+    get_instance_start_time/1,
+    get_pid/1,
+    get_revs_limit/1,
+    get_security/1,
+    get_update_seq/1,
+    get_user_ctx/1,
+    get_uuid/1,
+    %% get_purge_seq/1,
+    %% get_oldest_purge_seq/1,
+    %% get_purge_infos_limit/1,
+
+    is_clustered/1,
+    is_db/1,
+    is_partitioned/1,
+    is_system_db/1,
+    is_system_db_name/1,
+
+    set_revs_limit/2,
+    %% set_purge_infos_limit/2,
+    set_security/2,
+    set_user_ctx/2,
+
+    ensure_full_commit/1,
+    ensure_full_commit/2,
+
+    %% load_validation_funs/1,
+    %% reload_validation_funs/1,
+
+    open_doc/2,
+    open_doc/3,
+    open_doc_revs/4,
+    %% open_doc_int/3,
+    get_doc_info/2,
+    get_full_doc_info/2,
+    get_full_doc_infos/2,
+    get_missing_revs/2,
+    %% get_design_doc/2,
+    %% get_design_docs/1,
+    %% get_design_doc_count/1,
+    %% get_purge_infos/2,
+
+    %% get_minimum_purge_seq/1,
+    %% purge_client_exists/3,
+
+    %% validate_docid/2,
+    %% doc_from_json_obj_validate/2,
+
+    update_doc/2,
+    update_doc/3,
+    update_docs/2,
+    update_docs/3,
+    %% delete_doc/3,
+
+    %% purge_docs/2,
+    %% purge_docs/3,
+
+    read_attachment/3,
+    write_attachment/3,
+
+    fold_docs/3,
+    fold_docs/4,
+    %% fold_local_docs/4,
+    %% fold_design_docs/4,
+    fold_changes/4,
+    fold_changes/5,
+    %% count_changes_since/2,
+    %% fold_purge_infos/4,
+    %% fold_purge_infos/5,
+
+    %% calculate_start_seq/3,
+    %% owner_of/2,
+
+    %% start_compact/1,
+    %% cancel_compact/1,
+    %% wait_for_compaction/1,
+    %% wait_for_compaction/2,
+
+    %% dbname_suffix/1,
+    %% normalize_dbname/1,
+    %% validate_dbname/1,
+
+    %% make_doc/5,
+    new_revid/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include("fabric2.hrl").
+
+
+-define(DBNAME_REGEX,
+    "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex
+    "(\\.[0-9]{10,})?$" % but allow an optional shard timestamp at the end
+).
+
+
+-define(RETURN(Term), throw({?MODULE, Term})).
+
+
+create(DbName, Options) ->
+    Result = fabric2_fdb:transactional(DbName, Options, fun(TxDb) ->
+        case fabric2_fdb:exists(TxDb) of
+            true ->
+                {error, file_exists};
+            false ->
+                fabric2_fdb:create(TxDb, Options)
+        end
+    end),
+    % We cache outside of the transaction so that we're sure
+    % that the transaction was committed.
+    case Result of
+        #{} = Db ->
+            ok = fabric2_server:store(Db),
+            {ok, Db#{tx := undefined}};
+        Error ->
+            Error
+    end.
+
+
+open(DbName, Options) ->
+    case fabric2_server:fetch(DbName) of
+        #{} = Db ->
+            {ok, maybe_set_user_ctx(Db, Options)};
+        undefined ->
+            Result = fabric2_fdb:transactional(DbName, Options, fun(TxDb) ->
+                fabric2_fdb:open(TxDb, Options)
+            end),
+            % Cache outside the transaction retry loop
+            case Result of
+                #{} = Db ->
+                    ok = fabric2_server:store(Db),
+                    {ok, Db#{tx := undefined}};
+                Error ->
+                    Error
+            end
+    end.
+
+
+delete(DbName, Options) ->
+    % This will throw if the db does not exist
+    {ok, Db} = open(DbName, Options),
+    Resp = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:delete(TxDb)
+    end),
+    if Resp /= ok -> Resp; true ->
+        fabric2_server:remove(DbName)
+    end.
+
+
+list_dbs() ->
+    list_dbs([]).
+
+
+list_dbs(Options) ->
+    fabric2_fdb:transactional(fun(Tx) ->
+        fabric2_fdb:list_dbs(Tx, Options)
+    end).
+
+
+is_admin(Db) ->
+    % TODO: Need to re-consider couch_db_plugin:check_is_admin/1
+    {SecProps} = get_security(Db),
+    UserCtx = get_user_ctx(Db),
+    {Admins} = get_admins(SecProps),
+    is_authorized(Admins, UserCtx).
+
+
+check_is_admin(Db) ->
+    case is_admin(Db) of
+        true ->
+            ok;
+        false ->
+            UserCtx = get_user_ctx(Db),
+            Reason = <<"You are not a db or server admin.">>,
+            throw_security_error(UserCtx, Reason)
+    end.
+
+
+check_is_member(Db) ->
+    case is_member(Db) of
+        true ->
+            ok;
+        false ->
+            UserCtx = get_user_ctx(Db),
+            throw_security_error(UserCtx)
+    end.
+
+
+name(#{name := DbName}) ->
+    DbName.
+
+
+get_after_doc_read_fun(#{after_doc_read := AfterDocRead}) ->
+    AfterDocRead.
+
+
+get_before_doc_update_fun(#{before_doc_update := BeforeDocUpdate}) ->
+    BeforeDocUpdate.
+
+get_committed_update_seq(#{} = Db) ->
+    get_update_seq(Db).
+
+
+get_compacted_seq(#{} = Db) ->
+    get_update_seq(Db).
+
+
+get_compactor_pid(#{} = _Db) ->
+    nil.
+
+
+get_db_info(#{} = Db) ->
+    DbProps = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:get_info(TxDb)
+    end),
+
+    BaseProps = [
+        {cluster, {[{n, 0}, {q, 0}, {r, 0}, {w, 0}]}},
+        {compact_running, false},
+        {data_size, 0},
+        {db_name, name(Db)},
+        {disk_format_version, 0},
+        {disk_size, 0},
+        {instance_start_time, <<"0">>},
+        {purge_seq, 0}
+    ],
+
+    {ok, lists:foldl(fun({Key, Val}, Acc) ->
+        lists:keystore(Key, 1, Acc, {Key, Val})
+    end, BaseProps, DbProps)}.
+
+
+get_del_doc_count(#{} = Db) ->
+    get_doc_count(Db, <<"doc_del_count">>).
+
+
+get_doc_count(Db) ->
+    get_doc_count(Db, <<"doc_count">>).
+
+
+get_doc_count(Db, <<"_all_docs">>) ->
+    get_doc_count(Db, <<"doc_count">>);
+
+get_doc_count(DbName, <<"_design">>) ->
+    get_doc_count(DbName, <<"doc_design_count">>);
+
+get_doc_count(DbName, <<"_local">>) ->
+    get_doc_count(DbName, <<"doc_local_count">>);
+
+get_doc_count(Db, Key) ->
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:get_stat(TxDb, Key)
+    end).
+
+
+get_instance_start_time(#{}) ->
+    0.
+
+
+get_pid(#{}) ->
+    nil.
+
+
+get_revs_limit(#{revs_limit := RevsLimit}) ->
+    RevsLimit.
+
+
+get_security(#{security_doc := SecurityDoc}) ->
+    SecurityDoc.
+
+
+get_update_seq(#{} = Db) ->
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:get_last_change(TxDb)
+    end).
+
+
+get_user_ctx(#{user_ctx := UserCtx}) ->
+    UserCtx.
+
+
+get_uuid(#{uuid := UUID}) ->
+    UUID.
+
+
+is_clustered(#{}) ->
+    false.
+
+
+is_db(#{name := _}) ->
+    true;
+is_db(_) ->
+    false.
+
+
+is_partitioned(#{}) ->
+    false.
+
+
+is_system_db(#{name := DbName}) ->
+    is_system_db_name(DbName).
+
+
+is_system_db_name(DbName) when is_list(DbName) ->
+    is_system_db_name(?l2b(DbName));
+is_system_db_name(DbName) when is_binary(DbName) ->
+    Suffix = filename:basename(DbName),
+    case {filename:dirname(DbName), lists:member(Suffix, ?SYSTEM_DATABASES)} of
+        {<<".">>, Result} -> Result;
+        {_Prefix, false} -> false;
+        {Prefix, true} ->
+            ReOpts =  [{capture,none}, dollar_endonly],
+            re:run(Prefix, ?DBNAME_REGEX, ReOpts) == match
+    end.
+
+
+set_revs_limit(#{} = Db, RevsLimit) ->
+    check_is_admin(Db),
+    RevsLimBin = ?uint2bin(RevsLimit),
+    Resp = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:set_config(TxDb, <<"revs_limit">>, RevsLimBin)
+    end),
+    if Resp /= ok -> Resp; true ->
+        fabric2_server:store(Db#{revs_limit := RevsLimit})
+    end.
+
+
+set_security(#{} = Db, Security) ->
+    check_is_admin(Db),
+    ok = fabric2_util:validate_security_object(Security),
+    SecBin = ?JSON_ENCODE(Security),
+    Resp = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:set_config(TxDb, <<"security_doc">>, SecBin)
+    end),
+    if Resp /= ok -> Resp; true ->
+        fabric2_server:store(Db#{security_doc := Security})
+    end.
+
+
+set_user_ctx(#{} = Db, UserCtx) ->
+    Db#{user_ctx := UserCtx}.
+
+
+ensure_full_commit(#{}) ->
+    {ok, 0}.
+
+
+ensure_full_commit(#{}, _Timeout) ->
+    {ok, 0}.
+
+
+open_doc(#{} = Db, DocId) ->
+    open_doc(Db, DocId, []).
+
+
+open_doc(#{} = Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, _Options) ->
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        case fabric2_fdb:get_local_doc(TxDb, DocId) of
+            #doc{} = Doc -> {ok, Doc};
+            Else -> Else
+        end
+    end);
+
+open_doc(#{} = Db, DocId, Options) ->
+    NeedsTreeOpts = [revs_info, conflicts, deleted_conflicts],
+    NeedsTree = (Options -- NeedsTreeOpts /= Options),
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        Revs = case NeedsTree of
+            true -> fabric2_fdb:get_all_revs(TxDb, DocId);
+            false -> fabric2_fdb:get_winning_revs(TxDb, DocId, 1)
+        end,
+        if Revs == [] -> {not_found, missing}; true ->
+            #{winner := true} = RI = lists:last(Revs),
+            case fabric2_fdb:get_doc_body(TxDb, DocId, RI) of
+                #doc{} = Doc ->
+                    apply_open_doc_opts(Doc, Revs, Options);
+                Else ->
+                    Else
+            end
+        end
+    end).
+
+
+open_doc_revs(Db, DocId, Revs, Options) ->
+    Latest = lists:member(latest, Options),
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        AllRevInfos = fabric2_fdb:get_all_revs(TxDb, DocId),
+        RevTree = lists:foldl(fun(RI, TreeAcc) ->
+            RIPath = fabric2_util:revinfo_to_path(RI),
+            {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
+            Merged
+        end, [], AllRevInfos),
+        {Found, Missing} = case Revs of
+            all ->
+                {couch_key_tree:get_all_leafs(RevTree), []};
+            _ when Latest ->
+                couch_key_tree:get_key_leafs(RevTree, Revs);
+            _ ->
+                couch_key_tree:get(RevTree, Revs)
+        end,
+        Docs = lists:map(fun({Value, {Pos, [Rev | RevPath]}}) ->
+            case Value of
+                ?REV_MISSING ->
+                    % We have the rev in our list but know nothing about it
+                    {{not_found, missing}, {Pos, Rev}};
+                _ ->
+                    RevInfo = #{
+                        rev_id => {Pos, Rev},
+                        rev_path => RevPath
+                    },
+                    case fabric2_fdb:get_doc_body(TxDb, DocId, RevInfo) of
+                        #doc{} = Doc -> {ok, Doc};
+                        Else -> {Else, {Pos, Rev}}
+                    end
+            end
+        end, Found),
+        MissingDocs = [{{not_found, missing}, MRev} || MRev <- Missing],
+        {ok, Docs ++ MissingDocs}
+    end).
+
+
+get_doc_info(Db, DocId) ->
+    case get_full_doc_info(Db, DocId) of
+        not_found -> not_found;
+        FDI -> couch_doc:to_doc_info(FDI)
+    end.
+
+
+get_full_doc_info(Db, DocId) ->
+    RevInfos = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:get_all_revs(TxDb, DocId)
+    end),
+    if RevInfos == [] -> not_found; true ->
+        #{winner := true} = Winner = lists:last(RevInfos),
+        RevTree = lists:foldl(fun(RI, TreeAcc) ->
+            RIPath = fabric2_util:revinfo_to_path(RI),
+            {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
+            Merged
+        end, [], RevInfos),
+        #full_doc_info{
+            id = DocId,
+            update_seq = fabric2_fdb:vs_to_seq(maps:get(sequence, Winner)),
+            deleted = maps:get(deleted, Winner),
+            rev_tree = RevTree
+        }
+    end.
+
+
+get_full_doc_infos(Db, DocIds) ->
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        lists:map(fun(DocId) ->
+            get_full_doc_info(TxDb, DocId)
+        end, DocIds)
+    end).
+
+
+get_missing_revs(Db, JsonIdRevs) ->
+    IdRevs = [idrevs(IdR) || IdR <- JsonIdRevs],
+    AllRevInfos = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        lists:foldl(fun({Id, _Revs}, Acc) ->
+            case maps:is_key(Id, Acc) of
+                true ->
+                    Acc;
+                false ->
+                    RevInfos = fabric2_fdb:get_all_revs(TxDb, Id),
+                    Acc#{Id => RevInfos}
+            end
+        end, #{}, IdRevs)
+    end),
+    AllMissing = lists:flatmap(fun({Id, Revs}) ->
+        #{Id := RevInfos} = AllRevInfos,
+        Missing = try
+            lists:foldl(fun(RevInfo, RevAcc) ->
+                if RevAcc /= [] -> ok; true ->
+                    throw(all_found)
+                end,
+                filter_found_revs(RevInfo, RevAcc)
+            end, Revs, RevInfos)
+        catch throw:all_found ->
+            []
+        end,
+        if Missing == [] -> []; true ->
+            PossibleAncestors = find_possible_ancestors(RevInfos, Missing),
+            [{Id, Missing, PossibleAncestors}]
+        end
+    end, IdRevs),
+    {ok, AllMissing}.
+
+
+update_doc(Db, Doc) ->
+    update_doc(Db, Doc, []).
+
+
+update_doc(Db, Doc, Options) ->
+    case update_docs(Db, [Doc], Options) of
+        {ok, [{ok, NewRev}]} ->
+            {ok, NewRev};
+        {ok, [{{_Id, _Rev}, Error}]} ->
+            throw(Error);
+        {error, [{{_Id, _Rev}, Error}]} ->
+            throw(Error);
+        {error, [Error]} ->
+            throw(Error);
+        {ok, []} ->
+            % replication success
+            {Pos, [RevId | _]} = Doc#doc.revs,
+            {ok, {Pos, RevId}}
+    end.
+
+
+update_docs(Db, Docs) ->
+    update_docs(Db, Docs, []).
+
+
+update_docs(Db, Docs, Options) ->
+    Resps0 = case lists:member(replicated_changes, Options) of
+        false ->
+            fabric2_fdb:transactional(Db, fun(TxDb) ->
+                update_docs_interactive(TxDb, Docs, Options)
+            end);
+        true ->
+            lists:map(fun(Doc) ->
+                fabric2_fdb:transactional(Db, fun(TxDb) ->
+                    update_doc_int(TxDb, Doc, Options)
+                end)
+            end, Docs)
+    end,
+    % Convert errors
+    Resps1 = lists:map(fun(Resp) ->
+        case Resp of
+            {#doc{} = Doc, Error} ->
+                #doc{
+                    id = DocId,
+                    revs = Revs
+                } = Doc,
+                RevId = case Revs of
+                    {RevPos, [Rev | _]} -> {RevPos, Rev};
+                    {0, []} -> {0, <<>>}
+                end,
+                {{DocId, RevId}, Error};
+            Else ->
+                Else
+        end
+    end, Resps0),
+    case lists:member(replicated_changes, Options) of
+        true ->
+            {ok, [R || R <- Resps1, R /= {ok, []}]};
+        false ->
+            Status = lists:foldl(fun(Resp, Acc) ->
+                case Resp of
+                    {ok, _} -> Acc;
+                    _ -> error
+                end
+            end, ok, Resps1),
+            {Status, Resps1}
+    end.
+
+
+read_attachment(Db, DocId, AttId) ->
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:read_attachment(TxDb, DocId, AttId)
+    end).
+
+
+write_attachment(Db, DocId, Att) ->
+    Data = couch_att:fetch(data, Att),
+    {ok, AttId} = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:write_attachment(TxDb, DocId, Data)
+    end),
+    couch_att:store(data, {loc, Db, DocId, AttId}, Att).
+
+
+fold_docs(Db, UserFun, UserAcc) ->
+    fold_docs(Db, UserFun, UserAcc, []).
+
+
+fold_docs(Db, UserFun, UserAcc, Options) ->
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:fold_docs(TxDb, UserFun, UserAcc, Options)
+    end).
+
+
+fold_changes(Db, SinceSeq, UserFun, UserAcc) ->
+    fold_changes(Db, SinceSeq, UserFun, UserAcc, []).
+
+
+fold_changes(Db, SinceSeq, UserFun, UserAcc, Options) ->
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:fold_changes(TxDb, SinceSeq, UserFun, UserAcc, Options)
+    end).
+
+
+new_revid(Doc) ->
+    #doc{
+        body = Body,
+        revs = {OldStart, OldRevs},
+        atts = Atts,
+        deleted = Deleted
+    } = Doc,
+
+    DigestedAtts = lists:foldl(fun(Att, Acc) ->
+        [N, T, M] = couch_att:fetch([name, type, md5], Att),
+        case M == <<>> of
+            true -> Acc;
+            false -> [{N, T, M} | Acc]
+        end
+    end, [], Atts),
+
+    Rev = case DigestedAtts of
+        Atts2 when length(Atts) =/= length(Atts2) ->
+            % We must have old style non-md5 attachments
+            list_to_binary(integer_to_list(couch_util:rand32()));
+        Atts2 ->
+            OldRev = case OldRevs of [] -> 0; [OldRev0 | _] -> OldRev0 end,
+            SigTerm = [Deleted, OldStart, OldRev, Body, Atts2],
+            couch_hash:md5_hash(term_to_binary(SigTerm, [{minor_version, 1}]))
+    end,
+
+    Doc#doc{revs = {OldStart + 1, [Rev | OldRevs]}}.
+
+
+maybe_set_user_ctx(Db, Options) ->
+    case fabric2_util:get_value(user_ctx, Options) of
+        #user_ctx{} = UserCtx ->
+            set_user_ctx(Db, UserCtx);
+        undefined ->
+            Db
+    end.
+
+
+is_member(Db) ->
+    {SecProps} = get_security(Db),
+    case is_admin(Db) of
+        true ->
+            true;
+        false ->
+            case is_public_db(SecProps) of
+                true ->
+                    true;
+                false ->
+                    {Members} = get_members(SecProps),
+                    UserCtx = get_user_ctx(Db),
+                    is_authorized(Members, UserCtx)
+            end
+    end.
+
+
+is_authorized(Group, UserCtx) ->
+    #user_ctx{
+        name = UserName,
+        roles = UserRoles
+    } = UserCtx,
+    Names = fabric2_util:get_value(<<"names">>, Group, []),
+    Roles = fabric2_util:get_value(<<"roles">>, Group, []),
+    case check_security(roles, UserRoles, [<<"_admin">> | Roles]) of
+        true ->
+            true;
+        false ->
+            check_security(names, UserName, Names)
+    end.
+
+
+check_security(roles, [], _) ->
+    false;
+check_security(roles, UserRoles, Roles) ->
+    UserRolesSet = ordsets:from_list(UserRoles),
+    RolesSet = ordsets:from_list(Roles),
+    not ordsets:is_disjoint(UserRolesSet, RolesSet);
+check_security(names, _, []) ->
+    false;
+check_security(names, null, _) ->
+    false;
+check_security(names, UserName, Names) ->
+    lists:member(UserName, Names).
+
+
+throw_security_error(#user_ctx{name = null} = UserCtx) ->
+    Reason = <<"You are not authorized to access this db.">>,
+    throw_security_error(UserCtx, Reason);
+throw_security_error(#user_ctx{name = _} = UserCtx) ->
+    Reason = <<"You are not allowed to access this db.">>,
+    throw_security_error(UserCtx, Reason).
+
+
+throw_security_error(#user_ctx{} = UserCtx, Reason) ->
+    Error = security_error_type(UserCtx),
+    throw({Error, Reason}).
+
+
+security_error_type(#user_ctx{name = null}) ->
+    unauthorized;
+security_error_type(#user_ctx{name = _}) ->
+    forbidden.
+
+
+is_public_db(SecProps) ->
+    {Members} = get_members(SecProps),
+    Names = fabric2_util:get_value(<<"names">>, Members, []),
+    Roles = fabric2_util:get_value(<<"roles">>, Members, []),
+    Names =:= [] andalso Roles =:= [].
+
+
+get_admins(SecProps) ->
+    fabric2_util:get_value(<<"admins">>, SecProps, {[]}).
+
+
+get_members(SecProps) ->
+    % we fallback to readers here for backwards compatibility
+    case fabric2_util:get_value(<<"members">>, SecProps) of
+        undefined ->
+            fabric2_util:get_value(<<"readers">>, SecProps, {[]});
+        Members ->
+            Members
+    end.
+
+
+apply_open_doc_opts(Doc, Revs, Options) ->
+    IncludeRevsInfo = lists:member(revs_info, Options),
+    IncludeConflicts = lists:member(conflicts, Options),
+    IncludeDelConflicts = lists:member(deleted_conflicts, Options),
+    IncludeLocalSeq = lists:member(local_seq, Options),
+    ReturnDeleted = lists:member(deleted, Options),
+
+    % This revs_info becomes fairly useless now that we're
+    % not keeping old document bodies around...
+    Meta1 = if not IncludeRevsInfo -> []; true ->
+        {Pos, [Rev | RevPath]} = Doc#doc.revs,
+        RevPathMissing = lists:map(fun(R) -> {R, missing} end, RevPath),
+        [{revs_info, Pos, [{Rev, available} | RevPathMissing]}]
+    end,
+
+    Meta2 = if not IncludeConflicts -> []; true ->
+        Conflicts = [RI || RI = #{winner := false, deleted := false} <- Revs],
+        if Conflicts == [] -> []; true ->
+            ConflictRevs = [maps:get(rev_id, RI) || RI <- Conflicts],
+            [{conflicts, ConflictRevs}]
+        end
+    end,
+
+    Meta3 = if not IncludeDelConflicts -> []; true ->
+        DelConflicts = [RI || RI = #{winner := false, deleted := true} <- Revs],
+        if DelConflicts == [] -> []; true ->
+            DelConflictRevs = [maps:get(rev_id, RI) || RI <- DelConflicts],
+            [{deleted_conflicts, DelConflictRevs}]
+        end
+    end,
+
+    Meta4 = if not IncludeLocalSeq -> []; true ->
+        #{winner := true, sequence := SeqVS} = lists:last(Revs),
+        [{local_seq, fabric2_fdb:vs_to_seq(SeqVS)}]
+    end,
+
+    case Doc#doc.deleted and not ReturnDeleted of
+        true ->
+            {not_found, deleted};
+        false ->
+            {ok, Doc#doc{
+                meta = Meta1 ++ Meta2 ++ Meta3 ++ Meta4
+            }}
+    end.
+
+
+filter_found_revs(RevInfo, Revs) ->
+    #{
+        rev_id := {Pos, Rev},
+        rev_path := RevPath
+    } = RevInfo,
+    FullRevPath = [Rev | RevPath],
+    lists:flatmap(fun({FindPos, FindRev} = RevIdToFind) ->
+        if FindPos > Pos -> [RevIdToFind]; true ->
+            % Add 1 because lists:nth is 1 based
+            Idx = Pos - FindPos + 1,
+            case Idx > length(FullRevPath) of
+                true ->
+                    [RevIdToFind];
+                false ->
+                    case lists:nth(Idx, FullRevPath) == FindRev of
+                        true -> [];
+                        false -> [RevIdToFind]
+                    end
+            end
+        end
+    end, Revs).
+
+
+find_possible_ancestors(RevInfos, MissingRevs) ->
+    % Find any revinfos that are possible ancestors
+    % of the missing revs. A possible ancestor is
+    % any rev that has a start position less than
+    % any missing revision. Stated alternatively,
+    % find any revinfo that could theoretically
+    % extended to be one or more of the missing
+    % revisions.
+    %
+    % Since we are looking at any missing revision
+    % we can just compare against the maximum missing
+    % start position.
+    MaxMissingPos = case MissingRevs of
+        [] -> 0;
+        [_ | _] -> lists:max([Start || {Start, _Rev} <- MissingRevs])
+    end,
+    lists:flatmap(fun(RevInfo) ->
+        #{rev_id := {RevPos, _} = RevId} = RevInfo,
+        case RevPos < MaxMissingPos of
+            true -> [RevId];
+            false -> []
+        end
+    end, RevInfos).
+
+
+update_doc_int(#{} = Db, #doc{} = Doc, Options) ->
+    IsLocal = case Doc#doc.id of
+        <<?LOCAL_DOC_PREFIX, _/binary>> -> true;
+        _ -> false
+    end,
+    IsReplicated = lists:member(replicated_changes, Options),
+    try
+        case {IsLocal, IsReplicated} of
+            {false, false} -> update_doc_interactive(Db, Doc, Options);
+            {false, true} -> update_doc_replicated(Db, Doc, Options);
+            {true, _} -> update_local_doc(Db, Doc, Options)
+        end
+    catch throw:{?MODULE, Return} ->
+        Return
+    end.
+
+
+update_docs_interactive(Db, Docs0, Options) ->
+    Docs = tag_docs(Docs0),
+    Futures = get_winning_rev_futures(Db, Docs),
+    {Result, _} = lists:mapfoldl(fun(Doc, SeenIds) ->
+        try
+            update_docs_interactive(Db, Doc, Options, Futures, SeenIds)
+        catch throw:{?MODULE, Return} ->
+            {Return, SeenIds}
+        end
+    end, [], Docs),
+    Result.
+
+
+update_docs_interactive(Db, #doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} = Doc,
+        Options, _Futures, SeenIds) ->
+    {update_local_doc(Db, Doc, Options), SeenIds};
+
+update_docs_interactive(Db, Doc, Options, Futures, SeenIds) ->
+    case lists:member(Doc#doc.id, SeenIds) of
+        true ->
+            {{error, conflict}, SeenIds};
+        false ->
+            Future = maps:get(doc_tag(Doc), Futures),
+            case update_doc_interactive(Db, Doc, Future, Options) of
+                {ok, _} = Resp ->
+                    {Resp, [Doc#doc.id | SeenIds]};
+                _ = Resp ->
+                    {Resp, SeenIds}
+            end
+    end.
+
+
+update_doc_interactive(Db, Doc0, Options) ->
+    % Get the current winning revision. This is needed
+    % regardless of which branch we're updating. The extra
+    % revision we're grabbing is an optimization to
+    % save us a round trip if we end up deleting
+    % the winning revision branch.
+    NumRevs = if Doc0#doc.deleted -> 2; true -> 1 end,
+    Future = fabric2_fdb:get_winning_revs_future(Db, Doc0#doc.id, NumRevs),
+    update_doc_interactive(Db, Doc0, Future, Options).
+
+
+update_doc_interactive(Db, Doc0, Future, _Options) ->
+    RevInfos = fabric2_fdb:get_winning_revs_wait(Db, Future),
+    {Winner, SecondPlace} = case RevInfos of
+        [] -> {not_found, not_found};
+        [WRI] -> {WRI, not_found};
+        [WRI, SPRI] -> {WRI, SPRI}
+    end,
+    WinnerRevId = case Winner of
+        not_found ->
+            {0, <<>>};
+        _ ->
+            case maps:get(deleted, Winner) of
+                true -> {0, <<>>};
+                false -> maps:get(rev_id, Winner)
+            end
+    end,
+
+    % Check that a revision was specified if required
+    Doc0RevId = doc_to_revid(Doc0),
+    if Doc0RevId /= {0, <<>>} orelse WinnerRevId == {0, <<>>} -> ok; true ->
+        ?RETURN({error, conflict})
+    end,
+
+    % Check that we're not trying to create a deleted doc
+    if Doc0RevId /= {0, <<>>} orelse not Doc0#doc.deleted -> ok; true ->
+        ?RETURN({error, conflict})
+    end,
+
+    % Get the target revision to update
+    Target = case Doc0RevId == WinnerRevId of
+        true ->
+            Winner;
+        false ->
+            case fabric2_fdb:get_non_deleted_rev(Db, Doc0#doc.id, Doc0RevId) of
+                #{deleted := false} = Target0 ->
+                    Target0;
+                not_found ->
+                    % Either a missing revision or a deleted
+                    % revision. Either way a conflict. Note
+                    % that we get not_found for a deleted revision
+                    % because we only check for the non-deleted
+                    % key in fdb
+                    ?RETURN({error, conflict})
+            end
+    end,
+
+    % When recreating a deleted document we want to extend
+    % the winning revision branch rather than create a
+    % new branch. If we did not do this we could be
+    % recreating into a state that previously existed.
+    Doc1 = case Winner of
+        #{deleted := true} when not Doc0#doc.deleted ->
+            {WinnerRevPos, WinnerRev} = maps:get(rev_id, Winner),
+            WinnerRevPath = maps:get(rev_path, Winner),
+            Doc0#doc{revs = {WinnerRevPos, [WinnerRev | WinnerRevPath]}};
+        _ ->
+            Doc0
+    end,
+
+    % Validate the doc update and create the
+    % new revinfo map
+    Doc2 = prep_and_validate(Db, Doc1, Target),
+    #doc{
+        deleted = NewDeleted,
+        revs = {NewRevPos, [NewRev | NewRevPath]}
+    } = Doc3 = new_revid(Doc2),
+
+    Doc4 = update_attachment_revpos(Doc3),
+
+    NewRevInfo = #{
+        winner => undefined,
+        deleted => NewDeleted,
+        rev_id => {NewRevPos, NewRev},
+        rev_path => NewRevPath,
+        sequence => undefined,
+        branch_count => undefined
+    },
+
+    % Gather the list of possible winnig revisions
+    Possible = case Target == Winner of
+        true when not Doc4#doc.deleted ->
+            [NewRevInfo];
+        true when Doc4#doc.deleted ->
+            case SecondPlace of
+                #{} -> [NewRevInfo, SecondPlace];
+                not_found -> [NewRevInfo]
+            end;
+        false ->
+            [NewRevInfo, Winner]
+    end,
+
+    % Sort the rev infos such that the winner is first
+    {NewWinner0, NonWinner} = case fabric2_util:sort_revinfos(Possible) of
+        [W] -> {W, not_found};
+        [W, NW] -> {W, NW}
+    end,
+
+    BranchCount = case Winner of
+        not_found -> 1;
+        #{branch_count := BC} -> BC
+    end,
+    NewWinner = NewWinner0#{branch_count := BranchCount},
+    ToUpdate = if NonWinner == not_found -> []; true -> [NonWinner] end,
+    ToRemove = if Target == not_found -> []; true -> [Target] end,
+
+    ok = fabric2_fdb:write_doc(
+            Db,
+            Doc4,
+            NewWinner,
+            Winner,
+            ToUpdate,
+            ToRemove
+        ),
+
+    {ok, {NewRevPos, NewRev}}.
+
+
+update_doc_replicated(Db, Doc0, _Options) ->
+    #doc{
+        id = DocId,
+        deleted = Deleted,
+        revs = {RevPos, [Rev | RevPath]}
+    } = Doc0,
+
+    DocRevInfo0 = #{
+        winner => undefined,
+        deleted => Deleted,
+        rev_id => {RevPos, Rev},
+        rev_path => RevPath,
+        sequence => undefined,
+        branch_count => undefined
+    },
+
+    AllRevInfos = fabric2_fdb:get_all_revs(Db, DocId),
+
+    RevTree = lists:foldl(fun(RI, TreeAcc) ->
+        RIPath = fabric2_util:revinfo_to_path(RI),
+        {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
+        Merged
+    end, [], AllRevInfos),
+
+    DocRevPath = fabric2_util:revinfo_to_path(DocRevInfo0),
+    {NewTree, Status} = couch_key_tree:merge(RevTree, DocRevPath),
+    if Status /= internal_node -> ok; true ->
+        % We already know this revision so nothing
+        % left to do.
+        ?RETURN({ok, []})
+    end,
+
+    % Its possible to have a replication with fewer than $revs_limit
+    % revisions which extends an existing branch. To avoid
+    % losing revision history we extract the new node from the
+    % tree and use the combined path after stemming.
+    {[{_, {RevPos, UnstemmedRevs}}], []}
+            = couch_key_tree:get(NewTree, [{RevPos, Rev}]),
+    RevsLimit = fabric2_db:get_revs_limit(Db),
+    Doc1 = Doc0#doc{
+        revs = {RevPos, lists:sublist(UnstemmedRevs, RevsLimit)}
+    },
+    {RevPos, [Rev | NewRevPath]} = Doc1#doc.revs,
+    DocRevInfo1 = DocRevInfo0#{rev_path := NewRevPath},
+
+    % Find any previous revision we knew about for
+    % validation and attachment handling.
+    AllLeafsFull = couch_key_tree:get_all_leafs_full(NewTree),
+    LeafPath = get_leaf_path(RevPos, Rev, AllLeafsFull),
+    PrevRevInfo = find_prev_revinfo(RevPos, LeafPath),
+    Doc2 = prep_and_validate(Db, Doc1, PrevRevInfo),
+
+    % Possible winners are the previous winner and
+    % the new DocRevInfo
+    Winner = case fabric2_util:sort_revinfos(AllRevInfos) of
+        [#{winner := true} = WRI | _] -> WRI;
+        [] -> not_found
+    end,
+    {NewWinner0, NonWinner} = case Winner == PrevRevInfo of
+        true ->
+            {DocRevInfo1, not_found};
+        false ->
+            [W, NW] = fabric2_util:sort_revinfos([Winner, DocRevInfo1]),
+            {W, NW}
+    end,
+
+    NewWinner = NewWinner0#{branch_count := length(AllLeafsFull)},
+    ToUpdate = if NonWinner == not_found -> []; true -> [NonWinner] end,
+    ToRemove = if PrevRevInfo == not_found -> []; true -> [PrevRevInfo] end,
+
+    ok = fabric2_fdb:write_doc(
+            Db,
+            Doc2,
+            NewWinner,
+            Winner,
+            ToUpdate,
+            ToRemove
+        ),
+
+    {ok, []}.
+
+
+update_local_doc(Db, Doc0, _Options) ->
+    Doc1 = case increment_local_doc_rev(Doc0) of
+        {ok, Updated} -> Updated;
+        {error, _} = Error -> ?RETURN(Error)
+    end,
+
+    ok = fabric2_fdb:write_local_doc(Db, Doc1),
+
+    #doc{revs = {0, [Rev]}} = Doc1,
+    {ok, {0, integer_to_binary(Rev)}}.
+
+
+update_attachment_revpos(#doc{revs = {RevPos, _Revs}, atts = Atts0} = Doc) ->
+    Atts = lists:map(fun(Att) ->
+        case couch_att:fetch(data, Att) of
+            {loc, _Db, _DocId, _AttId} ->
+                % Attachment was already on disk
+                Att;
+            _ ->
+                % We will write this attachment with this update
+                % so mark it with the RevPos that will be written
+                couch_att:store(revpos, RevPos, Att)
+        end
+    end, Atts0),
+    Doc#doc{atts = Atts}.
+
+
+get_winning_rev_futures(Db, Docs) ->
+    lists:foldl(fun(Doc, Acc) ->
+        #doc{
+            id = DocId,
+            deleted = Deleted
+        } = Doc,
+        IsLocal = case DocId of
+            <<?LOCAL_DOC_PREFIX, _/binary>> -> true;
+            _ -> false
+        end,
+        if IsLocal -> Acc; true ->
+            NumRevs = if Deleted -> 2; true -> 1 end,
+            Future = fabric2_fdb:get_winning_revs_future(Db, DocId, NumRevs),
+            DocTag = doc_tag(Doc),
+            Acc#{DocTag => Future}
+        end
+    end, #{}, Docs).
+
+
+prep_and_validate(Db, NewDoc, PrevRevInfo) ->
+    HasStubs = couch_doc:has_stubs(NewDoc),
+    HasVDUs = [] /= maps:get(validate_doc_update_funs, Db),
+    IsDDoc = case NewDoc#doc.id of
+        <<?DESIGN_DOC_PREFIX, _/binary>> -> true;
+        _ -> false
+    end,
+
+    PrevDoc = case HasStubs orelse (HasVDUs and not IsDDoc) of
+        true when PrevRevInfo /= not_found ->
+            case fabric2_fdb:get_doc_body(Db, NewDoc#doc.id, PrevRevInfo) of
+                #doc{} = PDoc -> PDoc;
+                {not_found, _} -> nil
+            end;
+        _ ->
+            nil
+    end,
+
+    MergedDoc = if not HasStubs -> NewDoc; true ->
+        % This will throw an error if we have any
+        % attachment stubs missing data
+        couch_doc:merge_stubs(NewDoc, PrevDoc)
+    end,
+    check_duplicate_attachments(MergedDoc),
+    validate_doc_update(Db, MergedDoc, PrevDoc),
+    MergedDoc.
+
+
+validate_doc_update(Db, #doc{id = <<"_design/", _/binary>>} = Doc, _) ->
+    case catch check_is_admin(Db) of
+        ok -> validate_ddoc(Db, Doc);
+        Error -> ?RETURN({Doc, Error})
+    end;
+validate_doc_update(Db, Doc, PrevDoc) ->
+    #{
+        security_doc := Security,
+        validate_doc_update_funs := VDUs
+    } = Db,
+    Fun = fun() ->
+        JsonCtx = fabric2_util:user_ctx_to_json(Db),
+        lists:map(fun(VDU) ->
+            try
+                case VDU(Doc, PrevDoc, JsonCtx, Security) of
+                    ok -> ok;
+                    Error1 -> throw(Error1)
+                end
+            catch throw:Error2 ->
+                ?RETURN({Doc, Error2})
+            end
+        end, VDUs)
+    end,
+    Stat = [couchdb, query_server, vdu_process_time],
+    if VDUs == [] -> ok; true ->
+        couch_stats:update_histogram(Stat, Fun)
+    end.
+
+
+validate_ddoc(Db, DDoc) ->
+    try
+        ok = couch_index_server:validate(Db, couch_doc:with_ejson_body(DDoc))
+    catch
+        throw:{invalid_design_doc, Reason} ->
+            throw({bad_request, invalid_design_doc, Reason});
+        throw:{compilation_error, Reason} ->
+            throw({bad_request, compilation_error, Reason});
+        throw:Error ->
+            ?RETURN({DDoc, Error})
+    end.
+
+
+check_duplicate_attachments(#doc{atts = Atts}) ->
+    lists:foldl(fun(Att, Names) ->
+        Name = couch_att:fetch(name, Att),
+        case ordsets:is_element(Name, Names) of
+            true -> throw({bad_request, <<"Duplicate attachments">>});
+            false -> ordsets:add_element(Name, Names)
+        end
+    end, ordsets:new(), Atts).
+
+
+get_leaf_path(Pos, Rev, [{Pos, [{Rev, _RevInfo} | LeafPath]} | _]) ->
+    LeafPath;
+get_leaf_path(Pos, Rev, [_WrongLeaf | RestLeafs]) ->
+    get_leaf_path(Pos, Rev, RestLeafs).
+
+
+find_prev_revinfo(_Pos, []) ->
+    not_found;
+find_prev_revinfo(Pos, [{_Rev, ?REV_MISSING} | RestPath]) ->
+    find_prev_revinfo(Pos - 1, RestPath);
+find_prev_revinfo(_Pos, [{_Rev, #{} = RevInfo} | _]) ->
+    RevInfo.
+
+
+increment_local_doc_rev(#doc{deleted = true} = Doc) ->
+    {ok, Doc#doc{revs = {0, [0]}}};
+increment_local_doc_rev(#doc{revs = {0, []}} = Doc) ->
+    {ok, Doc#doc{revs = {0, [1]}}};
+increment_local_doc_rev(#doc{revs = {0, [RevStr | _]}} = Doc) ->
+    try
+        PrevRev = binary_to_integer(RevStr),
+        {ok, Doc#doc{revs = {0, [PrevRev + 1]}}}
+    catch error:badarg ->
+        {error, <<"Invalid rev format">>}
+    end;
+increment_local_doc_rev(#doc{}) ->
+    {error, <<"Invalid rev format">>}.
+
+
+doc_to_revid(#doc{revs = Revs}) ->
+    case Revs of
+        {0, []} -> {0, <<>>};
+        {RevPos, [Rev | _]} -> {RevPos, Rev}
+    end.
+
+
+tag_docs([]) ->
+    [];
+tag_docs([#doc{meta = Meta} = Doc | Rest]) ->
+    NewDoc = Doc#doc{
+        meta = [{ref, make_ref()} | Meta]
+    },
+    [NewDoc | tag_docs(Rest)].
+
+
+doc_tag(#doc{meta = Meta}) ->
+    fabric2_util:get_value(ref, Meta).
+
+
+idrevs({Id, Revs}) when is_list(Revs) ->
+    {docid(Id), [rev(R) || R <- Revs]}.
+
+
+docid(DocId) when is_list(DocId) ->
+    list_to_binary(DocId);
+docid(DocId) ->
+    DocId.
+
+
+rev(Rev) when is_list(Rev); is_binary(Rev) ->
+    couch_doc:parse_rev(Rev);
+rev({Seq, Hash} = Rev) when is_integer(Seq), is_binary(Hash) ->
+    Rev.
+
diff --git a/src/fabric/src/fabric2_events.erl b/src/fabric/src/fabric2_events.erl
new file mode 100644
index 0000000..a571714
--- /dev/null
+++ b/src/fabric/src/fabric2_events.erl
@@ -0,0 +1,84 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_events).
+
+
+-export([
+    link_listener/4,
+    stop_listener/1
+]).
+
+-export([
+    init/5,
+    poll/5
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+link_listener(Mod, Fun, St, Options) ->
+    DbName = fabric2_util:get_value(dbname, Options),
+    Pid = spawn_link(?MODULE, init, [self(), DbName, Mod, Fun, St]),
+    receive
+        {Pid, initialized} -> ok
+    end,
+    {ok, Pid}.
+
+
+stop_listener(Pid) ->
+    Pid ! stop_listening.
+
+
+init(Parent, DbName, Mod, Fun, St) ->
+    {ok, Db} = fabric2_db:open(DbName, [?ADMIN_CTX]),
+    Since = fabric2_db:get_update_seq(Db),
+    couch_log:error("XKCD: START LISTENER: ~s : ~p for ~p", [DbName, Since, Parent]),
+    erlang:monitor(process, Parent),
+    Parent ! {self(), initialized},
+    poll(DbName, Since, Mod, Fun, St),
+    couch_log:error("XKCD: STOP LISTENER for ~p", [Parent]).
+
+
+poll(DbName, Since, Mod, Fun, St) ->
+    {Resp, NewSince} = try
+        case fabric2_db:open(DbName, [?ADMIN_CTX]) of
+            {ok, Db} ->
+                case fabric2_db:get_update_seq(Db) of
+                    Since ->
+                        couch_log:error("XKCD: NO UPDATE: ~s :: ~p", [DbName, Since]),
+                        {{ok, St}, Since};
+                    Other ->
+                        couch_log:error("XKCD: UPDATED: ~s :: ~p -> ~p", [DbName, Since, Other]),
+                        {Mod:Fun(DbName, updated, St), Other}
+                end;
+            Error ->
+                exit(Error)
+        end
+    catch error:database_does_not_exist ->
+        Mod:Fun(DbName, deleted, St)
+    end,
+    receive
+        stop_listening ->
+            ok;
+        {'DOWN', _, _, _, _} ->
+            ok
+    after 0 ->
+        case Resp of
+            {ok, NewSt} ->
+                timer:sleep(1000),
+                ?MODULE:poll(DbName, NewSince, Mod, Fun, NewSt);
+            {stop, _} ->
+                ok
+        end
+    end.
diff --git a/src/fabric/src/fabric2_fdb.erl b/src/fabric/src/fabric2_fdb.erl
new file mode 100644
index 0000000..0a4f298
--- /dev/null
+++ b/src/fabric/src/fabric2_fdb.erl
@@ -0,0 +1,1187 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_fdb).
+
+
+-export([
+    transactional/1,
+    transactional/3,
+    transactional/2,
+
+    create/2,
+    open/2,
+    reopen/1,
+    delete/1,
+    exists/1,
+
+    list_dbs/2,
+
+    get_info/1,
+    get_config/1,
+    set_config/3,
+
+    get_stat/2,
+    incr_stat/3,
+
+    get_all_revs/2,
+    get_winning_revs/3,
+    get_winning_revs_future/3,
+    get_winning_revs_wait/2,
+    get_non_deleted_rev/3,
+
+    get_doc_body/3,
+    get_doc_body_future/3,
+    get_doc_body_wait/4,
+    get_local_doc/2,
+
+    write_doc/6,
+    write_local_doc/2,
+
+    read_attachment/3,
+    write_attachment/3,
+
+    fold_docs/4,
+    fold_changes/5,
+    get_last_change/1,
+
+    vs_to_seq/1,
+
+    debug_cluster/0,
+    debug_cluster/2
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include("fabric2.hrl").
+
+
+transactional(Fun) ->
+    do_transaction(Fun, undefined).
+
+
+transactional(DbName, Options, Fun) when is_binary(DbName) ->
+    transactional(fun(Tx) ->
+        Fun(init_db(Tx, DbName, Options))
+    end).
+
+
+transactional(#{tx := undefined} = Db, Fun) ->
+    #{layer_prefix := LayerPrefix} = Db,
+    do_transaction(fun(Tx) ->
+        Fun(Db#{tx => Tx})
+    end, LayerPrefix);
+
+transactional(#{tx := {erlfdb_transaction, _}} = Db, Fun) ->
+    Fun(Db).
+
+
+do_transaction(Fun, LayerPrefix) when is_function(Fun, 1) ->
+    Db = get_db_handle(),
+    try
+        erlfdb:transactional(Db, fun(Tx) ->
+            case get(erlfdb_trace) of
+                Name when is_binary(Name) ->
+                    erlfdb:set_option(Tx, transaction_logging_enable, Name);
+                _ ->
+                    ok
+            end,
+            case is_transaction_applied(Tx) of
+                true ->
+                    get_previous_transaction_result();
+                false ->
+                    execute_transaction(Tx, Fun, LayerPrefix)
+            end
+        end)
+    after
+        clear_transaction()
+    end.
+
+
+create(#{} = Db0, Options) ->
+    #{
+        name := DbName,
+        tx := Tx,
+        layer_prefix := LayerPrefix
+    } = Db = ensure_current(Db0, false),
+
+    % Eventually DbPrefix will be HCA allocated. For now
+    % we're just using the DbName so that debugging is easier.
+    DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+    DbPrefix = erlfdb_tuple:pack({?DBS, DbName}, LayerPrefix),
+    erlfdb:set(Tx, DbKey, DbPrefix),
+
+    % This key is responsible for telling us when something in
+    % the database cache (i.e., fabric2_server's ets table) has
+    % changed and requires re-loading. This currently includes
+    % revs_limit and validate_doc_update functions. There's
+    % no order to versioning here. Its just a value that changes
+    % that is used in the ensure_current check.
+    DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+    DbVersion = fabric2_util:uuid(),
+    erlfdb:set(Tx, DbVersionKey, DbVersion),
+
+    UUID = fabric2_util:uuid(),
+
+    Defaults = [
+        {?DB_CONFIG, <<"uuid">>, UUID},
+        {?DB_CONFIG, <<"revs_limit">>, ?uint2bin(1000)},
+        {?DB_CONFIG, <<"security_doc">>, <<"{}">>},
+        {?DB_STATS, <<"doc_count">>, ?uint2bin(0)},
+        {?DB_STATS, <<"doc_del_count">>, ?uint2bin(0)},
+        {?DB_STATS, <<"doc_design_count">>, ?uint2bin(0)},
+        {?DB_STATS, <<"doc_local_count">>, ?uint2bin(0)},
+        {?DB_STATS, <<"size">>, ?uint2bin(2)}
+    ],
+    lists:foreach(fun({P, K, V}) ->
+        Key = erlfdb_tuple:pack({P, K}, DbPrefix),
+        erlfdb:set(Tx, Key, V)
+    end, Defaults),
+
+    UserCtx = fabric2_util:get_value(user_ctx, Options, #user_ctx{}),
+
+    Db#{
+        uuid => UUID,
+        db_prefix => DbPrefix,
+        db_version => DbVersion,
+
+        revs_limit => 1000,
+        security_doc => {[]},
+        user_ctx => UserCtx,
+
+        validate_doc_update_funs => [],
+        before_doc_update => undefined,
+        after_doc_read => undefined,
+        % All other db things as we add features,
+
+        db_options => Options
+    }.
+
+
+open(#{} = Db0, Options) ->
+    #{
+        name := DbName,
+        tx := Tx,
+        layer_prefix := LayerPrefix
+    } = Db1 = ensure_current(Db0, false),
+
+    DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+    DbPrefix = case erlfdb:wait(erlfdb:get(Tx, DbKey)) of
+        Bin when is_binary(Bin) -> Bin;
+        not_found -> erlang:error(database_does_not_exist)
+    end,
+
+    DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+    DbVersion = erlfdb:wait(erlfdb:get(Tx, DbVersionKey)),
+
+    UserCtx = fabric2_util:get_value(user_ctx, Options, #user_ctx{}),
+
+    Db2 = Db1#{
+        db_prefix => DbPrefix,
+        db_version => DbVersion,
+
+        revs_limit => 1000,
+        security_doc => {[]},
+        user_ctx => UserCtx,
+
+        % Place holders until we implement these
+        % bits.
+        validate_doc_update_funs => [],
+        before_doc_update => undefined,
+        after_doc_read => undefined,
+
+        db_options => Options
+    },
+
+    Db3 = lists:foldl(fun({Key, Val}, DbAcc) ->
+        case Key of
+            <<"uuid">> ->
+                DbAcc#{uuid => Val};
+            <<"revs_limit">> ->
+                DbAcc#{revs_limit => ?bin2uint(Val)};
+            <<"security_doc">> ->
+                DbAcc#{security_doc => ?JSON_DECODE(Val)}
+        end
+    end, Db2, get_config(Db2)),
+
+    load_validate_doc_funs(Db3).
+
+
+reopen(#{} = OldDb) ->
+    require_transaction(OldDb),
+    #{
+        tx := Tx,
+        name := DbName,
+        db_options := Options
+    } = OldDb,
+    open(init_db(Tx, DbName, Options), Options).
+
+
+delete(#{} = Db) ->
+    #{
+        name := DbName,
+        tx := Tx,
+        layer_prefix := LayerPrefix,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+    erlfdb:clear(Tx, DbKey),
+    erlfdb:clear_range_startswith(Tx, DbPrefix),
+    bump_metadata_version(Tx),
+    ok.
+
+
+exists(#{name := DbName} = Db) when is_binary(DbName) ->
+    #{
+        tx := Tx,
+        layer_prefix := LayerPrefix
+    } = ensure_current(Db, false),
+
+    DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+    case erlfdb:wait(erlfdb:get(Tx, DbKey)) of
+        Bin when is_binary(Bin) -> true;
+        not_found -> false
+    end.
+
+
+list_dbs(Tx, _Options) ->
+    Root = erlfdb_directory:root(),
+    CouchDB = erlfdb_directory:create_or_open(Tx, Root, [<<"couchdb">>]),
+    LayerPrefix = erlfdb_directory:get_name(CouchDB),
+    {Start, End} = erlfdb_tuple:range({?ALL_DBS}, LayerPrefix),
+    Future = erlfdb:get_range(Tx, Start, End),
+    lists:map(fun({K, _V}) ->
+        {?ALL_DBS, DbName} = erlfdb_tuple:unpack(K, LayerPrefix),
+        DbName
+    end, erlfdb:wait(Future)).
+
+
+get_info(#{} = Db) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    {CStart, CEnd} = erlfdb_tuple:range({?DB_CHANGES}, DbPrefix),
+    ChangesFuture = erlfdb:get_range(Tx, CStart, CEnd, [
+        {streaming_mode, exact},
+        {limit, 1},
+        {reverse, true}
+    ]),
+
+    StatsPrefix = erlfdb_tuple:pack({?DB_STATS}, DbPrefix),
+    MetaFuture = erlfdb:get_range_startswith(Tx, StatsPrefix),
+
+    RawSeq = case erlfdb:wait(ChangesFuture) of
+        [] ->
+            vs_to_seq(fabric2_util:seq_zero_vs());
+        [{SeqKey, _}] ->
+            {?DB_CHANGES, SeqVS} = erlfdb_tuple:unpack(SeqKey, DbPrefix),
+            vs_to_seq(SeqVS)
+    end,
+    CProp = {update_seq, RawSeq},
+
+    MProps = lists:flatmap(fun({K, V}) ->
+        case erlfdb_tuple:unpack(K, DbPrefix) of
+            {?DB_STATS, <<"doc_count">>} ->
+                [{doc_count, ?bin2uint(V)}];
+            {?DB_STATS, <<"doc_del_count">>} ->
+                [{doc_del_count, ?bin2uint(V)}];
+            {?DB_STATS, <<"size">>} ->
+                Val = ?bin2uint(V),
+                [
+                    {other, {[{data_size, Val}]}},
+                    {sizes, {[
+                        {active, 0},
+                        {external, Val},
+                        {file, 0}
+                    ]}}
+                ];
+            {?DB_STATS, _} ->
+                []
+        end
+    end, erlfdb:wait(MetaFuture)),
+
+    [CProp | MProps].
+
+
+get_config(#{} = Db) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = Db = ensure_current(Db),
+
+    {Start, End} = erlfdb_tuple:range({?DB_CONFIG}, DbPrefix),
+    Future = erlfdb:get_range(Tx, Start, End),
+
+    lists:map(fun({K, V}) ->
+        {?DB_CONFIG, Key} = erlfdb_tuple:unpack(K, DbPrefix),
+        {Key, V}
+    end, erlfdb:wait(Future)).
+
+
+set_config(#{} = Db, ConfigKey, ConfigVal) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    Key = erlfdb_tuple:pack({?DB_CONFIG, ConfigKey}, DbPrefix),
+    erlfdb:set(Tx, Key, ConfigVal),
+    bump_metadata_version(Tx).
+
+
+get_stat(#{} = Db, StatKey) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    Key = erlfdb_tuple:pack({?DB_STATS, StatKey}, DbPrefix),
+
+    % Might need to figure out some sort of type
+    % system here. Uints are because stats are all
+    % atomic op adds for the moment.
+    ?bin2uint(erlfdb:wait(erlfdb:get(Tx, Key))).
+
+
+incr_stat(_Db, _StatKey, 0) ->
+    ok;
+
+incr_stat(#{} = Db, StatKey, Increment) when is_integer(Increment) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    Key = erlfdb_tuple:pack({?DB_STATS, StatKey}, DbPrefix),
+    erlfdb:add(Tx, Key, Increment).
+
+
+get_all_revs(#{} = Db, DocId) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    Prefix = erlfdb_tuple:pack({?DB_REVS, DocId}, DbPrefix),
+    Options = [{streaming_mode, want_all}],
+    Future = erlfdb:get_range_startswith(Tx, Prefix, Options),
+    lists:map(fun({K, V}) ->
+        Key = erlfdb_tuple:unpack(K, DbPrefix),
+        Val = erlfdb_tuple:unpack(V),
+        fdb_to_revinfo(Key, Val)
+    end, erlfdb:wait(Future)).
+
+
+get_winning_revs(Db, DocId, NumRevs) ->
+    Future = get_winning_revs_future(Db, DocId, NumRevs),
+    get_winning_revs_wait(Db, Future).
+
+
+get_winning_revs_future(#{} = Db, DocId, NumRevs) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    {StartKey, EndKey} = erlfdb_tuple:range({?DB_REVS, DocId}, DbPrefix),
+    Options = [{reverse, true}, {limit, NumRevs}],
+    erlfdb:get_range_raw(Tx, StartKey, EndKey, Options).
+
+
+get_winning_revs_wait(#{} = Db, Future) ->
+    #{
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+    {Rows, _, _} = erlfdb:wait(Future),
+    lists:map(fun({K, V}) ->
+        Key = erlfdb_tuple:unpack(K, DbPrefix),
+        Val = erlfdb_tuple:unpack(V),
+        fdb_to_revinfo(Key, Val)
+    end, Rows).
+
+
+get_non_deleted_rev(#{} = Db, DocId, RevId) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    {RevPos, Rev} = RevId,
+
+    BaseKey = {?DB_REVS, DocId, true, RevPos, Rev},
+    Key = erlfdb_tuple:pack(BaseKey, DbPrefix),
+    case erlfdb:wait(erlfdb:get(Tx, Key)) of
+        not_found ->
+            not_found;
+        Val ->
+            fdb_to_revinfo(BaseKey, erlfdb_tuple:unpack(Val))
+    end.
+
+
+get_doc_body(Db, DocId, RevInfo) ->
+    Future = get_doc_body_future(Db, DocId, RevInfo),
+    get_doc_body_wait(Db, DocId, RevInfo, Future).
+
+
+get_doc_body_future(#{} = Db, DocId, RevInfo) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    #{
+        rev_id := {RevPos, Rev}
+    } = RevInfo,
+
+    Key = erlfdb_tuple:pack({?DB_DOCS, DocId, RevPos, Rev}, DbPrefix),
+    erlfdb:get(Tx, Key).
+
+
+get_doc_body_wait(#{} = Db0, DocId, RevInfo, Future) ->
+    Db = ensure_current(Db0),
+
+    #{
+        rev_id := {RevPos, Rev},
+        rev_path := RevPath
+    } = RevInfo,
+
+    Val = erlfdb:wait(Future),
+    fdb_to_doc(Db, DocId, RevPos, [Rev | RevPath], Val).
+
+
+get_local_doc(#{} = Db0, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = Db = ensure_current(Db0),
+
+    Key = erlfdb_tuple:pack({?DB_LOCAL_DOCS, DocId}, DbPrefix),
+    Val = erlfdb:wait(erlfdb:get(Tx, Key)),
+    fdb_to_local_doc(Db, DocId, Val).
+
+
+write_doc(#{} = Db0, Doc, NewWinner0, OldWinner, ToUpdate, ToRemove) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = Db = ensure_current(Db0),
+
+    #doc{
+        id = DocId,
+        deleted = Deleted
+    } = Doc,
+
+    % Revision tree
+
+    NewWinner = NewWinner0#{winner := true},
+    NewRevId = maps:get(rev_id, NewWinner),
+
+    {WKey, WVal, WinnerVS} = revinfo_to_fdb(Tx, DbPrefix, DocId, NewWinner),
+    ok = erlfdb:set_versionstamped_value(Tx, WKey, WVal),
+
+    lists:foreach(fun(RI0) ->
+        RI = RI0#{winner := false},
+        {K, V, undefined} = revinfo_to_fdb(Tx, DbPrefix, DocId, RI),
+        ok = erlfdb:set(Tx, K, V)
+    end, ToUpdate),
+
+    lists:foreach(fun(RI0) ->
+        RI = RI0#{winner := false},
+        {K, _, undefined} = revinfo_to_fdb(Tx, DbPrefix, DocId, RI),
+        ok = erlfdb:clear(Tx, K)
+    end, ToRemove),
+
+    % _all_docs
+
+    UpdateStatus = case {OldWinner, NewWinner} of
+        {not_found, #{deleted := false}} ->
+            created;
+        {#{deleted := true}, #{deleted := false}} ->
+            recreated;
+        {#{deleted := false}, #{deleted := false}} ->
+            updated;
+        {#{deleted := false}, #{deleted := true}} ->
+            deleted
+    end,
+
+    case UpdateStatus of
+        Status when Status == created orelse Status == recreated ->
+            ADKey = erlfdb_tuple:pack({?DB_ALL_DOCS, DocId}, DbPrefix),
+            ADVal = erlfdb_tuple:pack(NewRevId),
+            ok = erlfdb:set(Tx, ADKey, ADVal);
+        deleted ->
+            ADKey = erlfdb_tuple:pack({?DB_ALL_DOCS, DocId}, DbPrefix),
+            ok = erlfdb:clear(Tx, ADKey);
+        updated ->
+            ok
+    end,
+
+    % _changes
+
+    if OldWinner == not_found -> ok; true ->
+        OldSeq = maps:get(sequence, OldWinner),
+        OldSeqKey = erlfdb_tuple:pack({?DB_CHANGES, OldSeq}, DbPrefix),
+        erlfdb:clear(Tx, OldSeqKey)
+    end,
+
+    NewSeqKey = erlfdb_tuple:pack_vs({?DB_CHANGES, WinnerVS}, DbPrefix),
+    NewSeqVal = erlfdb_tuple:pack({DocId, Deleted, NewRevId}),
+    erlfdb:set_versionstamped_key(Tx, NewSeqKey, NewSeqVal),
+
+    % And all the rest...
+
+    ok = write_doc_body(Db, Doc),
+
+    IsDDoc = case Doc#doc.id of
+        <<?DESIGN_DOC_PREFIX, _/binary>> -> true;
+        _ -> false
+    end,
+
+    if not IsDDoc -> ok; true ->
+        bump_db_version(Db)
+    end,
+
+    case UpdateStatus of
+        created ->
+            if not IsDDoc -> ok; true ->
+                incr_stat(Db, <<"doc_design_count">>, 1)
+            end,
+            incr_stat(Db, <<"doc_count">>, 1);
+        recreated ->
+            if not IsDDoc -> ok; true ->
+                incr_stat(Db, <<"doc_design_count">>, 1)
+            end,
+            incr_stat(Db, <<"doc_count">>, 1),
+            incr_stat(Db, <<"doc_del_count">>, -1);
+        deleted ->
+            if not IsDDoc -> ok; true ->
+                incr_stat(Db, <<"doc_design_count">>, -1)
+            end,
+            incr_stat(Db, <<"doc_count">>, -1),
+            incr_stat(Db, <<"doc_del_count">>, 1);
+        updated ->
+            ok
+    end,
+
+    ok.
+
+
+write_local_doc(#{} = Db0, Doc) ->
+    #{
+        tx := Tx
+    } = Db = ensure_current(Db0),
+
+    {LDocKey, LDocVal} = local_doc_to_fdb(Db, Doc),
+
+    WasDeleted = case erlfdb:wait(erlfdb:get(Tx, LDocKey)) of
+        <<_/binary>> -> false;
+        not_found -> true
+    end,
+
+    case Doc#doc.deleted of
+        true -> erlfdb:clear(Tx, LDocKey);
+        false -> erlfdb:set(Tx, LDocKey, LDocVal)
+    end,
+
+    case {WasDeleted, Doc#doc.deleted} of
+        {true, false} ->
+            incr_stat(Db, <<"doc_local_count">>, 1);
+        {false, true} ->
+            incr_stat(Db, <<"doc_local_count">>, -1);
+        _ ->
+            ok
+    end,
+
+    ok.
+
+
+read_attachment(#{} = Db, DocId, AttId) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    AttKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId}, DbPrefix),
+    case erlfdb:wait(erlfdb:get_range_startswith(Tx, AttKey)) of
+        not_found ->
+            throw({not_found, missing});
+        KVs ->
+            Vs = [V || {_K, V} <- KVs],
+            iolist_to_binary(Vs)
+    end.
+
+
+write_attachment(#{} = Db, DocId, Data) when is_binary(Data) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    AttId = fabric2_util:uuid(),
+    Chunks = chunkify_attachment(Data),
+
+    lists:foldl(fun(Chunk, ChunkId) ->
+        AttKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId, ChunkId}, DbPrefix),
+        ok = erlfdb:set(Tx, AttKey, Chunk),
+        ChunkId + 1
+    end, 0, Chunks),
+    {ok, AttId}.
+
+
+fold_docs(#{} = Db, UserFun, UserAcc0, Options) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    {Reverse, Start, End} = get_dir_and_bounds(DbPrefix, Options),
+
+    DocCountKey = erlfdb_tuple:pack({?DB_STATS, <<"doc_count">>}, DbPrefix),
+    DocCountBin = erlfdb:wait(erlfdb:get(Tx, DocCountKey)),
+
+    try
+        UserAcc1 = maybe_stop(UserFun({meta, [
+            {total, ?bin2uint(DocCountBin)},
+            {offset, null}
+        ]}, UserAcc0)),
+
+        UserAcc2 = erlfdb:fold_range(Tx, Start, End, fun({K, V}, UserAccIn) ->
+            {?DB_ALL_DOCS, DocId} = erlfdb_tuple:unpack(K, DbPrefix),
+            RevId = erlfdb_tuple:unpack(V),
+            maybe_stop(UserFun({row, [
+                {id, DocId},
+                {key, DocId},
+                {value, couch_doc:rev_to_str(RevId)}
+            ]}, UserAccIn))
+        end, UserAcc1, [{reverse, Reverse}] ++ Options),
+
+        {ok, maybe_stop(UserFun(complete, UserAcc2))}
+    catch throw:{stop, FinalUserAcc} ->
+        {ok, FinalUserAcc}
+    end.
+
+
+fold_changes(#{} = Db, SinceSeq0, UserFun, UserAcc0, Options) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    SinceSeq1 = get_since_seq(SinceSeq0),
+
+    Reverse = case fabric2_util:get_value(dir, Options, fwd) of
+        fwd -> false;
+        rev -> true
+    end,
+
+    {Start0, End0} = case Reverse of
+        false -> {SinceSeq1, fabric2_util:seq_max_vs()};
+        true -> {fabric2_util:seq_zero_vs(), SinceSeq1}
+    end,
+
+    Start1 = erlfdb_tuple:pack({?DB_CHANGES, Start0}, DbPrefix),
+    End1 = erlfdb_tuple:pack({?DB_CHANGES, End0}, DbPrefix),
+
+    {Start, End} = case Reverse of
+        false -> {erlfdb_key:first_greater_than(Start1), End1};
+        true -> {Start1, erlfdb_key:first_greater_than(End1)}
+    end,
+
+    try
+        {ok, erlfdb:fold_range(Tx, Start, End, fun({K, V}, UserAccIn) ->
+            {?DB_CHANGES, SeqVS} = erlfdb_tuple:unpack(K, DbPrefix),
+            {DocId, Deleted, RevId} = erlfdb_tuple:unpack(V),
+
+            Change = #{
+                id => DocId,
+                sequence => vs_to_seq(SeqVS),
+                rev_id => RevId,
+                deleted => Deleted
+            },
+
+            maybe_stop(UserFun(Change, UserAccIn))
+        end, UserAcc0, [{reverse, Reverse}] ++ Options)}
+    catch throw:{stop, FinalUserAcc} ->
+        {ok, FinalUserAcc}
+    end.
+
+
+get_last_change(#{} = Db) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    {Start, End} = erlfdb_tuple:range({?DB_CHANGES}, DbPrefix),
+    Options = [{limit, 1}, {reverse, true}],
+    case erlfdb:get_range(Tx, Start, End, Options) of
+        [] ->
+            vs_to_seq(fabric2_util:seq_zero_vs());
+        [{K, _V}] ->
+            {?DB_CHANGES, SeqVS} = erlfdb_tuple:unpack(K, DbPrefix),
+            vs_to_seq(SeqVS)
+    end.
+
+
+maybe_stop({ok, Acc}) ->
+    Acc;
+maybe_stop({stop, Acc}) ->
+    throw({stop, Acc}).
+
+
+vs_to_seq(VS) ->
+    <<51:8, SeqBin:12/binary>> = erlfdb_tuple:pack({VS}),
+    fabric2_util:to_hex(SeqBin).
+
+
+debug_cluster() ->
+    debug_cluster(<<>>, <<16#FE, 16#FF, 16#FF>>).
+
+
+debug_cluster(Start, End) ->
+    transactional(fun(Tx) ->
+        lists:foreach(fun({Key, Val}) ->
+            io:format("~s => ~s~n", [
+                    string:pad(erlfdb_util:repr(Key), 60),
+                    erlfdb_util:repr(Val)
+                ])
+        end, erlfdb:get_range(Tx, Start, End))
+    end).
+
+
+init_db(Tx, DbName, Options) ->
+    Root = erlfdb_directory:root(),
+    CouchDB = erlfdb_directory:create_or_open(Tx, Root, [<<"couchdb">>]),
+    Prefix = erlfdb_directory:get_name(CouchDB),
+    Version = erlfdb:wait(erlfdb:get(Tx, ?METADATA_VERSION_KEY)),
+    #{
+        name => DbName,
+        tx => Tx,
+        layer_prefix => Prefix,
+        md_version => Version,
+
+        db_options => Options
+    }.
+
+
+load_validate_doc_funs(#{} = Db) ->
+    FoldFun = fun
+        ({row, Row}, Acc) ->
+            DDocInfo = #{id => fabric2_util:get_value(id, Row)},
+            {ok, [DDocInfo | Acc]};
+        (_, Acc) ->
+            {ok, Acc}
+    end,
+
+    Options = [
+        {start_key, <<"_design/">>},
+        {end_key, <<"_design0">>}
+    ],
+
+    {ok, Infos1} = fold_docs(Db, FoldFun, [], Options),
+
+    Infos2 = lists:map(fun(Info) ->
+        #{
+            id := DDocId = <<"_design/", _/binary>>
+        } = Info,
+        Info#{
+            rev_info => get_winning_revs_future(Db, DDocId, 1)
+        }
+    end, Infos1),
+
+    Infos3 = lists:flatmap(fun(Info) ->
+        #{
+            id := DDocId,
+            rev_info := RevInfoFuture
+        } = Info,
+        [RevInfo] = get_winning_revs_wait(Db, RevInfoFuture),
+        #{deleted := Deleted} = RevInfo,
+        if Deleted -> []; true ->
+            [Info#{
+                rev_info := RevInfo,
+                body => get_doc_body_future(Db, DDocId, RevInfo)
+            }]
+        end
+    end, Infos2),
+
+    VDUs = lists:flatmap(fun(Info) ->
+        #{
+            id := DDocId,
+            rev_info := RevInfo,
+            body := BodyFuture
+        } = Info,
+        #doc{} = Doc = get_doc_body_wait(Db, DDocId, RevInfo, BodyFuture),
+        case couch_doc:get_validate_doc_fun(Doc) of
+            nil -> [];
+            Fun -> [Fun]
+        end
+    end, Infos3),
+
+    Db#{
+        validate_doc_update_funs := VDUs
+    }.
+
+
+bump_metadata_version(Tx) ->
+    % The 14 zero bytes is pulled from the PR for adding the
+    % metadata version key. Not sure why 14 bytes when version
+    % stamps are only 80, but whatever for now.
+    erlfdb:set_versionstamped_value(Tx, ?METADATA_VERSION_KEY, <<0:112>>).
+
+
+bump_db_version(#{} = Db) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = Db,
+
+    DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+    DbVersion = fabric2_util:uuid(),
+    ok = erlfdb:set(Tx, DbVersionKey, DbVersion).
+
+
+write_doc_body(#{} = Db0, #doc{} = Doc) ->
+    #{
+        tx := Tx
+    } = Db = ensure_current(Db0),
+
+    {NewDocKey, NewDocVal} = doc_to_fdb(Db, Doc),
+    erlfdb:set(Tx, NewDocKey, NewDocVal).
+
+
+revinfo_to_fdb(Tx, DbPrefix, DocId, #{winner := true} = RevId) ->
+    #{
+        deleted := Deleted,
+        rev_id := {RevPos, Rev},
+        rev_path := RevPath,
+        branch_count := BranchCount
+    } = RevId,
+    VS = new_versionstamp(Tx),
+    Key = {?DB_REVS, DocId, not Deleted, RevPos, Rev},
+    Val = {?CURR_REV_FORMAT, VS, BranchCount, list_to_tuple(RevPath)},
+    KBin = erlfdb_tuple:pack(Key, DbPrefix),
+    VBin = erlfdb_tuple:pack_vs(Val),
+    {KBin, VBin, VS};
+
+revinfo_to_fdb(_Tx, DbPrefix, DocId, #{} = RevId) ->
+    #{
+        deleted := Deleted,
+        rev_id := {RevPos, Rev},
+        rev_path := RevPath
+    } = RevId,
+    Key = {?DB_REVS, DocId, not Deleted, RevPos, Rev},
+    Val = {?CURR_REV_FORMAT, list_to_tuple(RevPath)},
+    KBin = erlfdb_tuple:pack(Key, DbPrefix),
+    VBin = erlfdb_tuple:pack(Val),
+    {KBin, VBin, undefined}.
+
+
+fdb_to_revinfo(Key, {?CURR_REV_FORMAT, _, _, _} = Val) ->
+    {?DB_REVS, _DocId, NotDeleted, RevPos, Rev} = Key,
+    {_RevFormat, Sequence, BranchCount, RevPath} = Val,
+    #{
+        winner => true,
+        deleted => not NotDeleted,
+        rev_id => {RevPos, Rev},
+        rev_path => tuple_to_list(RevPath),
+        sequence => Sequence,
+        branch_count => BranchCount
+    };
+
+fdb_to_revinfo(Key, {?CURR_REV_FORMAT, _} = Val)  ->
+    {?DB_REVS, _DocId, NotDeleted, RevPos, Rev} = Key,
+    {_RevFormat, RevPath} = Val,
+    #{
+        winner => false,
+        deleted => not NotDeleted,
+        rev_id => {RevPos, Rev},
+        rev_path => tuple_to_list(RevPath),
+        sequence => undefined,
+        branch_count => undefined
+    }.
+
+
+doc_to_fdb(Db, #doc{} = Doc) ->
+    #{
+        db_prefix := DbPrefix
+    } = Db,
+
+    #doc{
+        id = Id,
+        revs = {Start, [Rev | _]},
+        body = Body,
+        atts = Atts,
+        deleted = Deleted
+    } = doc_flush_atts(Db, Doc),
+
+    Key = erlfdb_tuple:pack({?DB_DOCS, Id, Start, Rev}, DbPrefix),
+    Val = {Body, Atts, Deleted},
+    {Key, term_to_binary(Val, [{minor_version, 1}])}.
+
+
+fdb_to_doc(_Db, DocId, Pos, Path, Bin) when is_binary(Bin) ->
+    {Body, Atts, Deleted} = binary_to_term(Bin, [safe]),
+    #doc{
+        id = DocId,
+        revs = {Pos, Path},
+        body = Body,
+        atts = Atts,
+        deleted = Deleted
+    };
+fdb_to_doc(_Db, _DocId, _Pos, _Path, not_found) ->
+    {not_found, missing}.
+
+
+local_doc_to_fdb(Db, #doc{} = Doc) ->
+    #{
+        db_prefix := DbPrefix
+    } = Db,
+
+    #doc{
+        id = Id,
+        revs = {0, [Rev]},
+        body = Body
+    } = Doc,
+
+    StoreRev = case Rev of
+        _ when is_integer(Rev) -> integer_to_binary(Rev);
+        _ when is_binary(Rev) -> Rev
+    end,
+
+    Key = erlfdb_tuple:pack({?DB_LOCAL_DOCS, Id}, DbPrefix),
+    Val = {StoreRev, Body},
+    {Key, term_to_binary(Val, [{minor_version, 1}])}.
+
+
+fdb_to_local_doc(_Db, DocId, Bin) when is_binary(Bin) ->
+    {Rev, Body} = binary_to_term(Bin, [safe]),
+    #doc{
+        id = DocId,
+        revs = {0, [Rev]},
+        deleted = false,
+        body = Body
+    };
+fdb_to_local_doc(_Db, _DocId, not_found) ->
+    {not_found, missing}.
+
+
+doc_flush_atts(Db, Doc) ->
+    Atts = lists:map(fun(Att) ->
+        couch_att:flush(Db, Doc#doc.id, Att)
+    end, Doc#doc.atts),
+    Doc#doc{atts = Atts}.
+
+
+chunkify_attachment(Data) ->
+    case Data of
+        <<>> ->
+            [];
+        <<Head:?ATTACHMENT_CHUNK_SIZE/binary, Rest/binary>> ->
+            [Head | chunkify_attachment(Rest)];
+        <<_/binary>> when size(Data) < ?ATTACHMENT_CHUNK_SIZE ->
+            [Data]
+    end.
+
+
+get_dir_and_bounds(DbPrefix, Options) ->
+    Reverse = case fabric2_util:get_value(dir, Options, fwd) of
+        fwd -> false;
+        rev -> true
+    end,
+    StartKey0 = fabric2_util:get_value(start_key, Options),
+    EndKeyGt = fabric2_util:get_value(end_key_gt, Options),
+    EndKey0 = fabric2_util:get_value(end_key, Options, EndKeyGt),
+    InclusiveEnd = EndKeyGt == undefined,
+
+    % CouchDB swaps the key meanings based on the direction
+    % of the fold. FoundationDB does not so we have to
+    % swap back here.
+    {StartKey1, EndKey1} = case Reverse of
+        false -> {StartKey0, EndKey0};
+        true -> {EndKey0, StartKey0}
+    end,
+
+    % Set the maximum bounds for the start and endkey
+    StartKey2 = case StartKey1 of
+        undefined -> {?DB_ALL_DOCS};
+        SK2 when is_binary(SK2) -> {?DB_ALL_DOCS, SK2}
+    end,
+
+    EndKey2 = case EndKey1 of
+        undefined -> {?DB_ALL_DOCS, <<16#FF>>};
+        EK2 when is_binary(EK2) -> {?DB_ALL_DOCS, EK2}
+    end,
+
+    StartKey3 = erlfdb_tuple:pack(StartKey2, DbPrefix),
+    EndKey3 = erlfdb_tuple:pack(EndKey2, DbPrefix),
+
+    % FoundationDB ranges are applied as SK <= key < EK
+    % By default, CouchDB is SK <= key <= EK with the
+    % optional inclusive_end=false option changing that
+    % to SK <= key < EK. Also, remember that CouchDB
+    % swaps the meaning of SK and EK based on direction.
+    %
+    % Thus we have this wonderful bit of logic to account
+    % for all of those combinations.
+
+    StartKey4 = case {Reverse, InclusiveEnd} of
+        {true, false} ->
+            erlfdb_key:first_greater_than(StartKey3);
+        _ ->
+            StartKey3
+    end,
+
+    EndKey4 = case {Reverse, InclusiveEnd} of
+        {false, true} when EndKey0 /= undefined ->
+            erlfdb_key:first_greater_than(EndKey3);
+        {true, _} ->
+            erlfdb_key:first_greater_than(EndKey3);
+        _ ->
+            EndKey3
+    end,
+
+    {Reverse, StartKey4, EndKey4}.
+
+
+get_since_seq(Seq) when Seq == <<>>; Seq == <<"0">>; Seq == 0->
+    fabric2_util:seq_zero_vs();
+
+get_since_seq(Seq) when Seq == now; Seq == <<"now">> ->
+    fabric2_util:seq_max_vs();
+
+get_since_seq(Seq) when is_binary(Seq), size(Seq) == 24 ->
+    Seq1 = fabric2_util:from_hex(Seq),
+    Seq2 = <<51:8, Seq1/binary>>,
+    {SeqVS} = erlfdb_tuple:unpack(Seq2),
+    SeqVS;
+
+get_since_seq(List) when is_list(List) ->
+    get_since_seq(list_to_binary(List));
+
+get_since_seq(Seq) ->
+    erlang:error({invalid_since_seq, Seq}).
+
+
+get_db_handle() ->
+    case get(?PDICT_DB_KEY) of
+        undefined ->
+            {ok, Db} = application:get_env(fabric, db),
+            put(?PDICT_DB_KEY, Db),
+            Db;
+        Db ->
+            Db
+    end.
+
+
+require_transaction(#{tx := {erlfdb_transaction, _}} = _Db) ->
+    ok;
+require_transaction(#{} = _Db) ->
+    erlang:error(transaction_required).
+
+
+ensure_current(Db) ->
+    ensure_current(Db, true).
+
+
+ensure_current(#{} = Db, CheckDbVersion) ->
+    require_transaction(Db),
+
+    #{
+        tx := Tx,
+        md_version := MetaDataVersion
+    } = Db,
+
+    case erlfdb:wait(erlfdb:get(Tx, ?METADATA_VERSION_KEY)) of
+        MetaDataVersion -> Db;
+        _NewVersion -> reopen(Db)
+    end,
+
+    AlreadyChecked = get(?PDICT_CHECKED_DB_IS_CURRENT),
+    if not CheckDbVersion orelse AlreadyChecked == true -> Db; true ->
+        #{
+            db_prefix := DbPrefix,
+            db_version := DbVersion
+        } = Db,
+
+        DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+
+        case erlfdb:wait(erlfdb:get(Tx, DbVersionKey)) of
+            DbVersion ->
+                put(?PDICT_CHECKED_DB_IS_CURRENT, true),
+                Db;
+            _NewDBVersion ->
+                fabric2_server:remove(maps:get(name, Db)),
+                reopen(Db)
+        end
+    end.
+
+
+is_transaction_applied(Tx) ->
+    is_commit_unknown_result()
+        andalso has_transaction_id()
+        andalso transaction_id_exists(Tx).
+
+
+get_previous_transaction_result() ->
+    get(?PDICT_TX_RES_KEY).
+
+
+execute_transaction(Tx, Fun, LayerPrefix) ->
+    put(?PDICT_CHECKED_DB_IS_CURRENT, false),
+    Result = Fun(Tx),
+    case erlfdb:is_read_only(Tx) of
+        true ->
+            ok;
+        false ->
+            erlfdb:set(Tx, get_transaction_id(Tx, LayerPrefix), <<>>),
+            put(?PDICT_TX_RES_KEY, Result)
+    end,
+    Result.
+
+
+clear_transaction() ->
+    fabric2_txids:remove(get(?PDICT_TX_ID_KEY)),
+    erase(?PDICT_CHECKED_DB_IS_CURRENT),
+    erase(?PDICT_TX_ID_KEY),
+    erase(?PDICT_TX_RES_KEY).
+
+
+is_commit_unknown_result() ->
+    erlfdb:get_last_error() == ?COMMIT_UNKNOWN_RESULT.
+
+
+has_transaction_id() ->
+    is_binary(get(?PDICT_TX_ID_KEY)).
+
+
+transaction_id_exists(Tx) ->
+    erlfdb:wait(erlfdb:get(Tx, get(?PDICT_TX_ID_KEY))) == <<>>.
+
+
+get_transaction_id(Tx, LayerPrefix) ->
+    case get(?PDICT_TX_ID_KEY) of
+        undefined ->
+            TxId = fabric2_txids:create(Tx, LayerPrefix),
+            put(?PDICT_TX_ID_KEY, TxId),
+            TxId;
+        TxId when is_binary(TxId) ->
+            TxId
+    end.
+
+
+new_versionstamp(Tx) ->
+    TxId = erlfdb:get_next_tx_id(Tx),
+    {versionstamp, 16#FFFFFFFFFFFFFFFF, 16#FFFF, TxId}.
+
diff --git a/src/fabric/src/fabric2_server.erl b/src/fabric/src/fabric2_server.erl
new file mode 100644
index 0000000..5b826cd
--- /dev/null
+++ b/src/fabric/src/fabric2_server.erl
@@ -0,0 +1,104 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_server).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+    start_link/0,
+    fetch/1,
+    store/1,
+    remove/1
+]).
+
+
+-export([
+    init/1,
+    terminate/2,
+    handle_call/3,
+    handle_cast/2,
+    handle_info/2,
+    code_change/3
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+-define(CLUSTER_FILE, "/usr/local/etc/foundationdb/fdb.cluster").
+
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+fetch(DbName) when is_binary(DbName) ->
+    case ets:lookup(?MODULE, DbName) of
+        [{DbName, #{} = Db}] -> Db;
+        [] -> undefined
+    end.
+
+
+store(#{name := DbName} = Db0) when is_binary(DbName) ->
+    Db1 = Db0#{
+        tx := undefined,
+        user_ctx := #user_ctx{}
+    },
+    true = ets:insert(?MODULE, {DbName, Db1}),
+    ok.
+
+
+remove(DbName) when is_binary(DbName) ->
+    true = ets:delete(?MODULE, DbName),
+    ok.
+
+
+init(_) ->
+    ets:new(?MODULE, [
+            public,
+            named_table,
+            {read_concurrency, true},
+            {write_concurrency, true}
+        ]),
+
+    Db = case application:get_env(fabric, eunit_run) of
+        {ok, true} ->
+            erlfdb_util:get_test_db([empty]);
+        undefined ->
+            ClusterStr = config:get("erlfdb", "cluster_file", ?CLUSTER_FILE),
+            erlfdb:open(iolist_to_binary(ClusterStr))
+    end,
+    application:set_env(fabric, db, Db),
+
+    {ok, nil}.
+
+
+terminate(_, _St) ->
+    ok.
+
+
+handle_call(Msg, _From, St) ->
+    {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+    {stop, {bad_cast, Msg}, St}.
+
+
+handle_info(Msg, St) ->
+    {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+    {ok, St}.
diff --git a/src/fabric/src/fabric2_sup.erl b/src/fabric/src/fabric2_sup.erl
new file mode 100644
index 0000000..73c6c1f
--- /dev/null
+++ b/src/fabric/src/fabric2_sup.erl
@@ -0,0 +1,47 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License.  You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_sup).
+-behaviour(supervisor).
+-vsn(1).
+
+
+-export([
+    start_link/1
+]).
+
+-export([
+    init/1
+]).
+
+
+start_link(Args) ->
+    supervisor:start_link({local, ?MODULE}, ?MODULE, Args).
+
+
+init([]) ->
+    Flags = #{
+        strategy => one_for_one,
+        intensity => 1,
+        period => 5
+    },
+    Children = [
+        #{
+            id => fabric2_server,
+            start => {fabric2_server, start_link, []}
+        },
+        #{
+            id => fabric2_txids,
+            start => {fabric2_txids, start_link, []}
+        }
+    ],
+    {ok, {Flags, Children}}.
diff --git a/src/fabric/src/fabric2_txids.erl b/src/fabric/src/fabric2_txids.erl
new file mode 100644
index 0000000..bbb8bdf
--- /dev/null
+++ b/src/fabric/src/fabric2_txids.erl
@@ -0,0 +1,144 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_txids).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+    start_link/0,
+    create/2,
+    remove/1
+]).
+
+
+-export([
+    init/1,
+    terminate/2,
+    handle_call/3,
+    handle_cast/2,
+    handle_info/2,
+    code_change/3
+]).
+
+
+-include("fabric2.hrl").
+
+
+-define(ONE_HOUR, 3600000000).
+-define(MAX_TX_IDS, 1000).
+
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+create(Tx, undefined) ->
+    Root = erlfdb_directory:root(),
+    CouchDB = erlfdb_directory:create_or_open(Tx, Root, [<<"couchdb">>]),
+    Prefix = erlfdb_directory:get_name(CouchDB),
+    create(Tx, Prefix);
+
+create(_Tx, LayerPrefix) ->
+    {Mega, Secs, Micro} = os:timestamp(),
+    Key = {?TX_IDS, Mega, Secs, Micro, fabric2_util:uuid()},
+    erlfdb_tuple:pack(Key, LayerPrefix).
+
+
+remove(TxId) when is_binary(TxId) ->
+    gen_server:cast(?MODULE, {remove, TxId});
+
+remove(undefined) ->
+    ok.
+
+
+
+init(_) ->
+    {ok, #{
+        last_sweep => os:timestamp(),
+        txids => []
+    }}.
+
+
+terminate(_, #{txids := TxIds}) ->
+    if TxIds == [] -> ok; true ->
+        fabric2_fdb:transactional(fun(Tx) ->
+            lists:foreach(fun(TxId) ->
+                erlfdb:clear(Tx, TxId)
+            end)
+        end)
+    end,
+    ok.
+
+
+handle_call(Msg, _From, St) ->
+    {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast({remove, TxId}, St) ->
+    #{
+        last_sweep := LastSweep,
+        txids := TxIds
+    } = St,
+
+    NewTxIds = [TxId | TxIds],
+    NewSt = St#{txids := NewTxIds},
+
+    NeedsSweep = timer:now_diff(os:timestamp(), LastSweep) > ?ONE_HOUR,
+
+    case NeedsSweep orelse length(NewTxIds) >= ?MAX_TX_IDS of
+        true ->
+            {noreply, clean(NewSt, NeedsSweep)};
+        false ->
+            {noreply, NewSt}
+    end.
+
+
+handle_info(Msg, St) ->
+    {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+    {ok, St}.
+
+
+clean(St, NeedsSweep) ->
+    #{
+        last_sweep := LastSweep,
+        txids := TxIds
+    } = St,
+    fabric2_fdb:transactional(fun(Tx) ->
+        lists:foreach(fun(TxId) ->
+            erlfdb:clear(Tx, TxId)
+        end, TxIds),
+        case NeedsSweep of
+            true ->
+                sweep(Tx, LastSweep),
+                St#{
+                    last_sweep := os:timestamp(),
+                    txids := []
+                };
+            false ->
+                St#{txids := []}
+        end
+    end).
+
+
+sweep(Tx, {Mega, Secs, Micro}) ->
+    Root = erlfdb_directory:root(),
+    CouchDB = erlfdb_directory:create_or_open(Tx, Root, [<<"couchdb">>]),
+    Prefix = erlfdb_directory:get_name(CouchDB),
+    StartKey = erlfdb_tuple:pack({?TX_IDS}, Prefix),
+    EndKey = erlfdb_tuple:pack({?TX_IDS, Mega, Secs, Micro}, Prefix),
+    erlfdb:set_option(Tx, next_write_no_write_conflict_range),
+    erlfdb:clear_range(Tx, StartKey, EndKey).
diff --git a/src/fabric/src/fabric2_util.erl b/src/fabric/src/fabric2_util.erl
new file mode 100644
index 0000000..6e2df67
--- /dev/null
+++ b/src/fabric/src/fabric2_util.erl
@@ -0,0 +1,203 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_util).
+
+
+-export([
+    revinfo_to_path/1,
+    sort_revinfos/1,
+
+    seq_zero_vs/0,
+    seq_max_vs/0,
+
+    user_ctx_to_json/1,
+
+    validate_security_object/1,
+
+    get_value/2,
+    get_value/3,
+    to_hex/1,
+    from_hex/1,
+    uuid/0
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+revinfo_to_path(RevInfo) ->
+    #{
+        rev_id := {RevPos, Rev},
+        rev_path := RevPath
+    } = RevInfo,
+    Revs = lists:reverse(RevPath, [Rev]),
+    Path = revinfo_to_path(RevInfo, Revs),
+    {RevPos - length(Revs) + 1, Path}.
+
+
+revinfo_to_path(RevInfo, [Rev]) ->
+    {Rev, RevInfo, []};
+
+revinfo_to_path(RevInfo, [Rev | Rest]) ->
+    {Rev, ?REV_MISSING, [revinfo_to_path(RevInfo, Rest)]}.
+
+
+sort_revinfos(RevInfos) ->
+    CmpFun = fun(A, B) -> rev_sort_key(A) > rev_sort_key(B) end,
+    lists:sort(CmpFun, RevInfos).
+
+
+rev_sort_key(#{} = RevInfo) ->
+    #{
+        deleted := Deleted,
+        rev_id := {RevPos, Rev}
+    } = RevInfo,
+    {not Deleted, RevPos, Rev}.
+
+
+seq_zero_vs() ->
+    {versionstamp, 0, 0, 0}.
+
+
+seq_max_vs() ->
+    {versionstamp, 18446744073709551615, 65535, 65535}.
+
+
+user_ctx_to_json(Db) ->
+    UserCtx = fabric2_db:get_user_ctx(Db),
+    {[
+        {<<"db">>, fabric2_db:name(Db)},
+        {<<"name">>, UserCtx#user_ctx.name},
+        {<<"roles">>, UserCtx#user_ctx.roles}
+    ]}.
+
+
+validate_security_object({SecProps}) ->
+    Admins = get_value(<<"admins">>, SecProps, {[]}),
+    ok = validate_names_and_roles(Admins),
+
+    % we fallback to readers here for backwards compatibility
+    Readers = get_value(<<"readers">>, SecProps, {[]}),
+    Members = get_value(<<"members">>, SecProps, Readers),
+    ok = validate_names_and_roles(Members).
+
+
+validate_names_and_roles({Props}) when is_list(Props) ->
+    validate_json_list_of_strings(<<"names">>, Props),
+    validate_json_list_of_strings(<<"roles">>, Props);
+validate_names_and_roles(_) ->
+    throw("admins or members must be a JSON list of strings").
+
+
+validate_json_list_of_strings(Member, Props) ->
+    case get_value(Member, Props, []) of
+        Values when is_list(Values) ->
+            NonBinary = lists:filter(fun(V) -> not is_binary(V) end, Values),
+            if NonBinary == [] -> ok; true ->
+                MemberStr = binary_to_list(Member),
+                throw(MemberStr ++ " must be a JSON list of strings")
+            end;
+        _ ->
+            MemberStr = binary_to_list(Member),
+            throw(MemberStr ++ " must be a JSON list of strings")
+    end.
+
+
+get_value(Key, List) ->
+    get_value(Key, List, undefined).
+
+
+get_value(Key, List, Default) ->
+    case lists:keysearch(Key, 1, List) of
+        {value, {Key,Value}} ->
+            Value;
+        false ->
+            Default
+    end.
+
+
+to_hex(Bin) ->
+    list_to_binary(to_hex_int(Bin)).
+
+
+to_hex_int(<<>>) ->
+    [];
+to_hex_int(<<Hi:4, Lo:4, Rest/binary>>) ->
+    [nibble_to_hex(Hi), nibble_to_hex(Lo) | to_hex(Rest)].
+
+
+nibble_to_hex(I) ->
+    case I of
+        0 -> $0;
+        1 -> $1;
+        2 -> $2;
+        3 -> $3;
+        4 -> $4;
+        5 -> $5;
+        6 -> $6;
+        7 -> $7;
+        8 -> $8;
+        9 -> $9;
+        10 -> $a;
+        11 -> $b;
+        12 -> $c;
+        13 -> $d;
+        14 -> $e;
+        15 -> $f
+    end.
+
+
+from_hex(Bin) ->
+    iolist_to_binary(from_hex_int(Bin)).
+
+
+from_hex_int(<<>>) ->
+    [];
+from_hex_int(<<Hi:8, Lo:8, RestBinary/binary>>) ->
+    HiNib = hex_to_nibble(Hi),
+    LoNib = hex_to_nibble(Lo),
+    [<<HiNib:4, LoNib:4>> | from_hex_int(RestBinary)];
+from_hex_int(<<BadHex/binary>>) ->
+    erlang:error({invalid_hex, BadHex}).
+
+
+hex_to_nibble(N) ->
+    case N of
+        $0 -> 0;
+        $1 -> 1;
+        $2 -> 2;
+        $3 -> 3;
+        $4 -> 4;
+        $5 -> 5;
+        $6 -> 6;
+        $7 -> 7;
+        $8 -> 8;
+        $9 -> 9;
+        $a -> 10;
+        $A -> 10;
+        $b -> 11;
+        $B -> 11;
+        $c -> 12;
+        $C -> 12;
+        $d -> 13;
+        $D -> 13;
+        $e -> 14;
+        $E -> 14;
+        $f -> 15;
+        $F -> 15;
+        _ -> erlang:error({invalid_hex, N})
+    end.
+
+
+uuid() ->
+    to_hex(crypto:strong_rand_bytes(16)).


[couchdb] 17/34: Fix arity in changes timeout callback

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 920e1ffbce2ba0b0fedb65cfabd334770016d0fe
Author: Eric Avdey <ei...@eiri.ca>
AuthorDate: Thu Jun 20 10:42:30 2019 -0300

    Fix arity in changes timeout callback
---
 src/chttpd/src/chttpd_auth_cache.erl | 2 +-
 src/chttpd/src/chttpd_changes.erl    | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/src/chttpd/src/chttpd_auth_cache.erl b/src/chttpd/src/chttpd_auth_cache.erl
index d947fe6..fc1ee62 100644
--- a/src/chttpd/src/chttpd_auth_cache.erl
+++ b/src/chttpd/src/chttpd_auth_cache.erl
@@ -176,7 +176,7 @@ changes_callback({change, {Change}}, _) ->
             ets_lru:remove(?CACHE, UserName)
     end,
     {ok, couch_util:get_value(seq, Change)};
-changes_callback(timeout, Acc) ->
+changes_callback({timeout, _ResponseType}, Acc) ->
     {ok, Acc};
 changes_callback({error, _}, EndSeq) ->
     exit({seq, EndSeq}).
diff --git a/src/chttpd/src/chttpd_changes.erl b/src/chttpd/src/chttpd_changes.erl
index d27bbad..0e03482 100644
--- a/src/chttpd/src/chttpd_changes.erl
+++ b/src/chttpd/src/chttpd_changes.erl
@@ -485,10 +485,10 @@ get_changes_timeout(Args, Callback) ->
         end;
     true ->
         {DefaultTimeout,
-            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end};
+            fun(UserAcc) -> {ok, Callback({timeout, ResponseType}, UserAcc)} end};
     _ ->
         {lists:min([DefaultTimeout, Heartbeat]),
-            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end}
+            fun(UserAcc) -> {ok, Callback({timeout, ResponseType}, UserAcc)} end}
     end.
 
 start_sending_changes(Callback, UserAcc) ->


[couchdb] 19/34: CouchDB background jobs

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 0c2d674d6aff8fe6d3458db65c14594dce5e48dc
Author: Nick Vatamaniuc <va...@apache.org>
AuthorDate: Wed Jun 12 16:11:56 2019 -0400

    CouchDB background jobs
    
    RFC: https://github.com/apache/couchdb-documentation/pull/409
    
    Main API is in the `couch_jobs` module. Additional description of internals is
    in the README.md file.
---
 rebar.config.script                                |   1 +
 rel/overlay/etc/default.ini                        |  23 +-
 rel/reltool.config                                 |   2 +
 src/couch_jobs/.gitignore                          |   4 +
 src/couch_jobs/README.md                           |  62 ++
 src/couch_jobs/rebar.config                        |  14 +
 src/couch_jobs/src/couch_jobs.app.src              |  31 +
 src/couch_jobs/src/couch_jobs.erl                  | 378 ++++++++++++
 src/couch_jobs/src/couch_jobs.hrl                  |  52 ++
 src/couch_jobs/src/couch_jobs_activity_monitor.erl | 133 ++++
 .../src/couch_jobs_activity_monitor_sup.erl        |  64 ++
 src/couch_jobs/src/couch_jobs_app.erl              |  26 +
 src/couch_jobs/src/couch_jobs_fdb.erl              | 679 +++++++++++++++++++++
 src/couch_jobs/src/couch_jobs_notifier.erl         | 285 +++++++++
 src/couch_jobs/src/couch_jobs_notifier_sup.erl     |  64 ++
 src/couch_jobs/src/couch_jobs_pending.erl          | 143 +++++
 src/couch_jobs/src/couch_jobs_server.erl           | 193 ++++++
 src/couch_jobs/src/couch_jobs_sup.erl              |  66 ++
 src/couch_jobs/src/couch_jobs_type_monitor.erl     |  84 +++
 src/couch_jobs/test/couch_jobs_tests.erl           | 606 ++++++++++++++++++
 20 files changed, 2909 insertions(+), 1 deletion(-)

diff --git a/rebar.config.script b/rebar.config.script
index d7c0d9a..14fdf28 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -87,6 +87,7 @@ SubDirs = [
     "src/ddoc_cache",
     "src/dreyfus",
     "src/fabric",
+    "src/couch_jobs",
     "src/global_changes",
     "src/mango",
     "src/rexi",
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index dbb0744..69f57ff 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -520,4 +520,25 @@ min_priority = 2.0
 ; value will be rejected. If this config setting is not defined,
 ; CouchDB will use the value of `max_limit` instead. If neither is
 ; defined, the default is 2000 as stated here.
-; max_limit_partitions = 2000
\ No newline at end of file
+; max_limit_partitions = 2000
+
+[couch_jobs]
+;
+; Maximum jitter used when checking for active job timeouts
+;activity_monitor_max_jitter_msec = 10000
+;
+; Hold-off applied before notifying subscribers. Since active jobs can be
+; queried more effiently using a range read, increasing this value should make
+; notifications more performant, however, it would also increase notification
+; latency.
+;type_monitor_holdoff_msec = 50
+;
+; Timeout used when waiting for the job type notification watches. The default
+; value of "infinity" should work well in most cases.
+;type_monitor_timeout_msec = infinity
+;
+; How often to check for the presense of new job types.
+;type_check_period_msec = 15000
+;
+; Jitter applied when checking for new job types.
+;type_check_max_jitter_msec = 5000
diff --git a/rel/reltool.config b/rel/reltool.config
index da85f36..2f03e61 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -33,6 +33,7 @@
         config,
         couch,
         couch_epi,
+        couch_jobs,
         couch_index,
         couch_log,
         couch_mrview,
@@ -91,6 +92,7 @@
     {app, config, [{incl_cond, include}]},
     {app, couch, [{incl_cond, include}]},
     {app, couch_epi, [{incl_cond, include}]},
+    {app, couch_jobs, [{incl_cond, include}]},
     {app, couch_index, [{incl_cond, include}]},
     {app, couch_log, [{incl_cond, include}]},
     {app, couch_mrview, [{incl_cond, include}]},
diff --git a/src/couch_jobs/.gitignore b/src/couch_jobs/.gitignore
new file mode 100644
index 0000000..6ef4c52
--- /dev/null
+++ b/src/couch_jobs/.gitignore
@@ -0,0 +1,4 @@
+*.beam
+.eunit
+ebin/couch_jobs.app
+.DS_Store
\ No newline at end of file
diff --git a/src/couch_jobs/README.md b/src/couch_jobs/README.md
new file mode 100644
index 0000000..bc45d32
--- /dev/null
+++ b/src/couch_jobs/README.md
@@ -0,0 +1,62 @@
+CouchDB Jobs Application
+========================
+
+Run background jobs in CouchDB
+
+Design (RFC) discussion: https://github.com/apache/couchdb-documentation/pull/409/files
+
+This is a description of some of the modules:
+
+ * `couch_jobs`: The main API module. It contains functions for creating,
+   accepting, executing, and monitoring jobs. A common pattern in this module
+   is to get a jobs transaction object (named `JTx` throughout the code), then
+   start a transaction and call a bunch of functions from `couch_jobs_fdb` in
+   that transaction.
+
+ * `couch_jobs_fdb`: This is a layer that talks to FDB. There is a lot of tuple
+   packing and unpacking, reading ranges and also managing transaction objects.
+
+ * `couch_jobs_pending`: This module implements the pending jobs queue. These
+   functions could all go in `couch_jobs_fdb` but the implemention was fairly
+   self-contained, with its own private helper functions, so it made sense to
+   move to a separate module.
+
+ * `couch_jobs_activity_monitor`: Here is where the "activity monitor"
+   functionality is implemented. That's done with a `gen_server` instance
+   running for each type. This `gen_server` periodically check if there are
+   inactive jobs for its type, and if they are, it re-enqueues them. If the
+   timeout value changes, then it skips the pending check, until the new
+   timeout expires.
+
+ * `couch_jobs_activity_monitor_sup` : This is a simple one-for-one supervisor
+   to spawn `couch_jobs_activity_monitor` instances for each type.
+
+ * `couch_jobs_type_monitor` : This is a helper process meant to be
+   `spawn_link`-ed from a parent `gen_server`. It then monitors activity for a
+   particular job type. If any jobs of that type have an update it notifies the
+   parent process.
+
+ * `couch_jobs_notifier`: Is responsible for subscriptions. Just like
+   with activity monitor there is a `gen_server` instance running per
+   each type. It uses a linked `couch_jobs_type_monitor` process to wait for
+   any job updates. When an update notification arrives, it can efficiently
+   find out if any active jobs have been updated, by reading the `(?JOBS,
+   ?ACTIVITY, Type, Sequence)` range. That should account for the bulk of
+   changes. The jobs that are not active anymore, are queried individually.
+   Subscriptions are managed in an ordered set ETS table.
+
+ * `couch_jobs_notifier_sup`: A simple one-for-one supervisor to spawn
+   `couch_jobs_notifier` processes for each type.
+
+ * `couch_jobs_server`: This is a `gen_server` which keeps track of job
+   types. It then starts or stops activity monitors and notifiers for each
+   type. To do that it queries the ` (?JOBS, ?ACTIVITY_TIMEOUT)` periodically.
+
+ * `couch_jobs_sup`: This is the main application supervisor. The restart
+   strategy is `rest_for_one`, meaning that a when a child restarts, the
+   sibling following it will restart. One interesting entry there is the first
+   child which is used just to create an ETS table used by `couch_jobs_fdb` to
+   cache transaction object (`JTx` mentioned above). That child calls
+   `init_cache/0`, where it creates the ETS then returns with `ignore` so it
+   doesn't actually spawn a process. The ETS table will be owned by the
+   supervisor process.
diff --git a/src/couch_jobs/rebar.config b/src/couch_jobs/rebar.config
new file mode 100644
index 0000000..362c878
--- /dev/null
+++ b/src/couch_jobs/rebar.config
@@ -0,0 +1,14 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{cover_enabled, true}.
+{cover_print_enabled, true}.
diff --git a/src/couch_jobs/src/couch_jobs.app.src b/src/couch_jobs/src/couch_jobs.app.src
new file mode 100644
index 0000000..8ded14c
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs.app.src
@@ -0,0 +1,31 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{application, couch_jobs, [
+    {description, "CouchDB Jobs"},
+    {vsn, git},
+    {mod, {couch_jobs_app, []}},
+    {registered, [
+        couch_jobs_sup,
+        couch_jobs_activity_monitor_sup,
+        couch_jobs_notifier_sup,
+        couch_jobs_server
+    ]},
+    {applications, [
+        kernel,
+        stdlib,
+        erlfdb,
+        couch_log,
+        config,
+        fabric
+    ]}
+]}.
diff --git a/src/couch_jobs/src/couch_jobs.erl b/src/couch_jobs/src/couch_jobs.erl
new file mode 100644
index 0000000..d469ed4
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs.erl
@@ -0,0 +1,378 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs).
+
+-export([
+    % Job creation
+    add/4,
+    add/5,
+    remove/3,
+    get_job_data/3,
+    get_job_state/3,
+
+    % Job processing
+    accept/1,
+    accept/2,
+    finish/2,
+    finish/3,
+    resubmit/2,
+    resubmit/3,
+    is_resubmitted/1,
+    update/2,
+    update/3,
+
+    % Subscriptions
+    subscribe/2,
+    subscribe/3,
+    unsubscribe/1,
+    wait/2,
+    wait/3,
+
+    % Type timeouts
+    set_type_timeout/2,
+    clear_type_timeout/1,
+    get_type_timeout/1
+]).
+
+
+-include("couch_jobs.hrl").
+
+
+-define(MIN_ACCEPT_WAIT_MSEC, 100).
+
+
+%% Job Creation API
+
+-spec add(jtx(), job_type(), job_id(), job_data()) -> ok | {error, any()}.
+add(Tx, Type, JobId, JobData) ->
+    add(Tx, Type, JobId, JobData, 0).
+
+
+-spec add(jtx(), job_type(), job_id(), job_data(), scheduled_time()) ->
+    ok | {error, any()}.
+add(Tx, Type, JobId, JobData, ScheduledTime) when is_binary(JobId),
+        is_map(JobData), is_integer(ScheduledTime) ->
+    couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+        case couch_jobs_fdb:add(JTx, Type, JobId, JobData, ScheduledTime) of
+            {ok, _, _, _} -> ok;
+            {error, Error} -> {error, Error}
+        end
+    end).
+
+
+-spec remove(jtx(), job_type(), job_id()) -> ok | {error, any()}.
+remove(Tx, Type, JobId) when is_binary(JobId) ->
+    couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+        couch_jobs_fdb:remove(JTx, job(Type, JobId))
+    end).
+
+
+-spec get_job_data(jtx(), job_type(), job_id()) -> {ok, job_data()} | {error,
+    any()}.
+get_job_data(Tx, Type, JobId) when is_binary(JobId) ->
+    couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+        case couch_jobs_fdb:get_job_state_and_data(JTx, job(Type, JobId)) of
+            {ok, _Seq, _State, Data} ->
+                {ok, couch_jobs_fdb:decode_data(Data)};
+            {error, Error} ->
+                {error, Error}
+        end
+    end).
+
+
+-spec get_job_state(jtx(), job_type(), job_id()) -> {ok, job_state()} | {error,
+    any()}.
+get_job_state(Tx, Type, JobId) when is_binary(JobId) ->
+    couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+        case couch_jobs_fdb:get_job_state_and_data(JTx, job(Type, JobId)) of
+            {ok, _Seq, State, _Data} ->
+                {ok, State};
+            {error, Error} ->
+                {error, Error}
+        end
+    end).
+
+
+%% Job processor API
+
+-spec accept(job_type()) -> {ok, job(), job_data()} | {error, any()}.
+accept(Type) ->
+    accept(Type, #{}).
+
+
+-spec accept(job_type(), job_accept_opts()) -> {ok, job()} | {error, any()}.
+accept(Type, #{} = Opts) ->
+    NoSched = maps:get(no_schedule, Opts, false),
+    MaxSchedTimeDefault = case NoSched of
+        true -> 0;
+        false -> ?UNDEFINED_MAX_SCHEDULED_TIME
+    end,
+    MaxSchedTime = maps:get(max_sched_time, Opts, MaxSchedTimeDefault),
+    Timeout = maps:get(timeout, Opts, infinity),
+    case NoSched andalso MaxSchedTime =/= 0 of
+        true ->
+            {error, no_schedule_require_0_max_sched_time};
+        false ->
+            accept_loop(Type, NoSched, MaxSchedTime, Timeout)
+    end.
+
+
+-spec finish(jtx(), job()) -> ok | {error, any()}.
+finish(Tx, Job) ->
+    finish(Tx, Job, undefined).
+
+
+-spec finish(jtx(), job(), job_data()) -> ok | {error, any()}.
+finish(Tx, #{jlock := <<_/binary>>} = Job, JobData) ->
+    couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+        couch_jobs_fdb:finish(JTx, Job, JobData)
+    end).
+
+
+-spec resubmit(jtx(), job()) -> {ok, job()} | {error, any()}.
+resubmit(Tx, Job) ->
+    resubmit(Tx, Job, ?UNDEFINED_MAX_SCHEDULED_TIME).
+
+
+-spec resubmit(jtx(), job(), scheduled_time()) -> {ok, job()} | {error, any()}.
+resubmit(Tx, #{jlock := <<_/binary>>} = Job, SchedTime) ->
+    couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+        couch_jobs_fdb:resubmit(JTx, Job, SchedTime)
+    end).
+
+
+-spec is_resubmitted(job()) -> true | false.
+is_resubmitted(#{job := true} = Job) ->
+    maps:get(resubmit, Job, false).
+
+
+-spec update(jtx(), job()) -> {ok, job()} | {error, any()}.
+update(Tx, Job) ->
+    update(Tx, Job, undefined).
+
+
+-spec update(jtx(), job(), job_data()) -> {ok, job()} | {error, any()}.
+update(Tx, #{jlock := <<_/binary>>} = Job, JobData) ->
+    couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+        couch_jobs_fdb:update(JTx, Job, JobData)
+    end).
+
+
+%% Subscription API
+
+% Receive events as messages. Wait for them using `wait/2,3`
+% functions.
+%
+
+-spec subscribe(job_type(), job_id()) -> {ok, job_subscription(), job_state(),
+    job_data()} | {ok, finished, job_data()} | {error, any()}.
+subscribe(Type, JobId) ->
+    subscribe(undefined, Type, JobId).
+
+
+-spec subscribe(jtx(), job_type(), job_id()) -> {ok, job_subscription(),
+    job_state(), job_data()} | {ok, finished, job_data()} | {error, any()}.
+subscribe(Tx, Type, JobId) ->
+    StateData = couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
+        Job = #{job => true, type => Type, id => JobId},
+        couch_jobs_fdb:get_job_state_and_data(JTx, Job)
+    end),
+    case StateData of
+        {ok, _Seq, finished, Data} ->
+            {ok, finished, couch_jobs_fdb:decode_data(Data)};
+        {ok, Seq, State, Data} ->
+            case couch_jobs_notifier:subscribe(Type, JobId, State, Seq) of
+                {ok, SubRef} ->
+                    Data1 = couch_jobs_fdb:decode_data(Data),
+                    {ok, SubRef, State, Data1};
+                {error, Error} ->
+                    {error, Error}
+            end;
+        {error, Error} ->
+            {error, Error}
+    end.
+
+
+% Unsubscribe from getting notifications based on a particular subscription.
+% Each subscription should be followed by its own unsubscription call. However,
+% subscriber processes are also monitored and auto-unsubscribed if they exit.
+% If subscribing process is exiting, calling this function is optional.
+%
+-spec unsubscribe(job_subscription()) -> ok.
+unsubscribe({Server, Ref}) when is_pid(Server), is_reference(Ref) ->
+    try
+        couch_jobs_notifier:unsubscribe(Server, Ref)
+    after
+        flush_notifications(Ref)
+    end.
+
+
+% Wait to receive job state updates
+%
+-spec wait(job_subscription() | [job_subscription()], timeout()) ->
+    {job_type(), job_id(), job_state(), job_data()} | timeout.
+wait({_, Ref}, Timeout) ->
+    receive
+        {?COUCH_JOBS_EVENT, Ref, Type, Id, State, Data} ->
+            {Type, Id, State, couch_jobs_fdb:decode_data(Data)}
+    after
+        Timeout -> timeout
+    end;
+
+wait(Subs, Timeout) when is_list(Subs) ->
+    {Result, ResendQ} = wait_any(Subs, Timeout, []),
+    lists:foreach(fun(Msg) -> self() ! Msg end, ResendQ),
+    Result.
+
+
+-spec wait(job_subscription() | [job_subscription()], job_state(), timeout())
+    -> {job_type(), job_id(), job_state(), job_data()} | timeout.
+wait({_, Ref} = Sub, State, Timeout) when is_atom(State) ->
+    receive
+        {?COUCH_JOBS_EVENT, Ref, Type, Id, MsgState, Data0} ->
+            case MsgState =:= State of
+                true ->
+                    Data = couch_jobs_fdb:decode_data(Data0),
+                    {Type, Id, State, Data};
+                false ->
+                    wait(Sub, State, Timeout)
+            end
+    after
+        Timeout -> timeout
+    end;
+
+wait(Subs, State, Timeout) when is_list(Subs),
+        is_atom(State) ->
+    {Result, ResendQ} = wait_any(Subs, State, Timeout, []),
+    lists:foreach(fun(Msg) -> self() ! Msg end, ResendQ),
+    Result.
+
+
+%% Job type timeout API
+
+% These functions manipulate the activity timeout for each job type.
+
+-spec set_type_timeout(job_type(), timeout()) -> ok.
+set_type_timeout(Type, Timeout) ->
+    couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
+        couch_jobs_fdb:set_type_timeout(JTx, Type, Timeout)
+    end).
+
+
+-spec clear_type_timeout(job_type()) -> ok.
+clear_type_timeout(Type) ->
+    couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
+        couch_jobs_fdb:clear_type_timeout(JTx, Type)
+    end).
+
+
+-spec get_type_timeout(job_type()) -> timeout().
+get_type_timeout(Type) ->
+    couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
+        couch_jobs_fdb:get_type_timeout(JTx, Type)
+    end).
+
+
+%% Private utilities
+
+accept_loop(Type, NoSched, MaxSchedTime, Timeout) ->
+    TxFun =  fun(JTx) ->
+        couch_jobs_fdb:accept(JTx, Type, MaxSchedTime, NoSched)
+    end,
+    case couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), TxFun) of
+        {ok, Job, Data} ->
+            {ok, Job, Data};
+        {not_found, PendingWatch} ->
+            case wait_pending(PendingWatch, MaxSchedTime, Timeout) of
+                {error, not_found} ->
+                    {error, not_found};
+                ok ->
+                    accept_loop(Type, NoSched, MaxSchedTime, Timeout)
+            end
+    end.
+
+
+job(Type, JobId) ->
+    #{job => true, type => Type, id => JobId}.
+
+
+wait_pending(PendingWatch, _MaxSTime, 0) ->
+    erlfdb:cancel(PendingWatch, [flush]),
+    {error, not_found};
+
+wait_pending(PendingWatch, MaxSTime, UserTimeout) ->
+    NowMSec = erlang:system_time(millisecond),
+    Timeout0 = max(?MIN_ACCEPT_WAIT_MSEC, MaxSTime * 1000 - NowMSec),
+    Timeout = min(limit_timeout(Timeout0), UserTimeout),
+    try
+        erlfdb:wait(PendingWatch, [{timeout, Timeout}]),
+        ok
+    catch
+        error:{timeout, _} ->
+            erlfdb:cancel(PendingWatch, [flush]),
+            {error, not_found}
+    end.
+
+
+wait_any(Subs, Timeout0, ResendQ) when is_list(Subs) ->
+    Timeout = limit_timeout(Timeout0),
+    receive
+        {?COUCH_JOBS_EVENT, Ref, Type, Id, State, Data0} = Msg ->
+            case lists:keyfind(Ref, 2, Subs) of
+                false ->
+                    wait_any(Subs, Timeout, [Msg | ResendQ]);
+                {_, Ref} ->
+                    Data = couch_jobs_fdb:decode_data(Data0),
+                    {{Type, Id, State, Data}, ResendQ}
+            end
+    after
+        Timeout -> {timeout, ResendQ}
+    end.
+
+
+wait_any(Subs, State, Timeout0, ResendQ) when
+        is_list(Subs) ->
+    Timeout = limit_timeout(Timeout0),
+    receive
+        {?COUCH_JOBS_EVENT, Ref, Type, Id, MsgState, Data0} = Msg ->
+            case lists:keyfind(Ref, 2, Subs) of
+                false ->
+                    wait_any(Subs, Timeout, [Msg | ResendQ]);
+                {_, Ref} ->
+                    case MsgState =:= State of
+                        true ->
+                            Data = couch_jobs_fdb:decode_data(Data0),
+                            {{Type, Id, State, Data}, ResendQ};
+                        false ->
+                            wait_any(Subs, Timeout, ResendQ)
+                    end
+            end
+    after
+        Timeout -> {timeout, ResendQ}
+    end.
+
+
+limit_timeout(Timeout) when is_integer(Timeout), Timeout < 16#FFFFFFFF ->
+    Timeout;
+
+limit_timeout(_Timeout) ->
+    infinity.
+
+
+flush_notifications(Ref) ->
+    receive
+        {?COUCH_JOBS_EVENT, Ref, _, _, _} ->
+            flush_notifications(Ref)
+    after
+        0 -> ok
+    end.
diff --git a/src/couch_jobs/src/couch_jobs.hrl b/src/couch_jobs/src/couch_jobs.hrl
new file mode 100644
index 0000000..2a02d76
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs.hrl
@@ -0,0 +1,52 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+% Job map/json field definitions
+%
+-define(OPT_PRIORITY, <<"priority">>).
+-define(OPT_DATA, <<"data">>).
+-define(OPT_CANCEL, <<"cancel">>).
+-define(OPT_RESUBMIT, <<"resubmit">>).
+
+% These might be in a fabric public hrl eventually
+%
+-define(uint2bin(I), binary:encode_unsigned(I, little)).
+-define(bin2uint(I), binary:decode_unsigned(I, little)).
+-define(UNSET_VS, {versionstamp, 16#FFFFFFFFFFFFFFFF, 16#FFFF}).
+-define(METADATA_VERSION_KEY, <<"$metadata_version_key$">>).
+
+% Data model definitions
+%
+-define(JOBS, 51).  % coordinate with fabric2.hrl
+-define(DATA, 1).
+-define(PENDING, 2).
+-define(WATCHES_PENDING, 3).
+-define(WATCHES_ACTIVITY, 4).
+-define(ACTIVITY_TIMEOUT, 5).
+-define(ACTIVITY, 6).
+
+
+-define(COUCH_JOBS_EVENT, '$couch_jobs_event').
+-define(COUCH_JOBS_CURRENT, '$couch_jobs_current').
+-define(UNDEFINED_MAX_SCHEDULED_TIME, 1 bsl 36).
+
+
+-type jtx() :: map() | undefined | tuple().
+-type job_id() :: binary().
+-type job_type() :: tuple() | binary() | non_neg_integer().
+-type job() :: map().
+-type job_data() :: map() | undefined.
+-type job_accept_opts() :: map().
+-type scheduled_time() :: non_neg_integer() | undefined.
+-type job_state() :: running | pending | finished.
+-type job_subscription() :: {pid(), reference()}.
diff --git a/src/couch_jobs/src/couch_jobs_activity_monitor.erl b/src/couch_jobs/src/couch_jobs_activity_monitor.erl
new file mode 100644
index 0000000..ef82e6b
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_activity_monitor.erl
@@ -0,0 +1,133 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_activity_monitor).
+
+-behaviour(gen_server).
+
+
+-export([
+    start_link/1
+]).
+
+-export([
+    init/1,
+    terminate/2,
+    handle_call/3,
+    handle_cast/2,
+    handle_info/2,
+    code_change/3
+]).
+
+-record(st, {
+    jtx,
+    type,
+    tref,
+    timeout = 0,
+    vs = not_found
+}).
+
+
+-define(MAX_JITTER_DEFAULT, 10000).
+-define(MISSING_TIMEOUT_CHECK, 5000).
+
+
+start_link(Type) ->
+    gen_server:start_link(?MODULE, [Type], []).
+
+
+%% gen_server callbacks
+
+init([Type]) ->
+    St = #st{jtx = couch_jobs_fdb:get_jtx(), type = Type},
+    {ok, schedule_check(St)}.
+
+
+terminate(_, _St) ->
+    ok.
+
+
+handle_call(Msg, _From, St) ->
+    {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+    {stop, {bad_cast, Msg}, St}.
+
+
+handle_info(check_activity, St) ->
+    St1 = check_activity(St),
+    St2 = schedule_check(St1),
+    {noreply, St2};
+
+handle_info({Ref, ready}, St) when is_reference(Ref) ->
+    % Don't crash out couch_jobs_server and the whole application would need to
+    % eventually do proper cleanup in erlfdb:wait timeout code.
+    LogMsg = "~p : spurious erlfdb future ready message ~p",
+    couch_log:error(LogMsg, [?MODULE, Ref]),
+    {noreply, St};
+
+handle_info(Msg, St) ->
+    {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+    {ok, St}.
+
+
+% Private helper functions
+
+check_activity(#st{jtx = JTx, type = Type, vs = not_found} = St) ->
+    NewVS = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+        couch_jobs_fdb:get_activity_vs(JTx1, Type)
+    end),
+    St#st{vs = NewVS};
+
+check_activity(#st{jtx = JTx, type = Type, vs = VS} = St) ->
+    NewVS = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+        NewVS = couch_jobs_fdb:get_activity_vs(JTx1, Type),
+        JobIds = couch_jobs_fdb:get_inactive_since(JTx1, Type, VS),
+        couch_jobs_fdb:re_enqueue_inactive(JTx1, Type, JobIds),
+        NewVS
+    end),
+    St#st{vs = NewVS}.
+
+
+get_timeout_msec(JTx, Type) ->
+    TimeoutVal = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+        couch_jobs_fdb:get_type_timeout(JTx1, Type)
+    end),
+    case TimeoutVal of
+        not_found -> not_found;
+        ValSeconds -> timer:seconds(ValSeconds)
+    end.
+
+
+schedule_check(#st{jtx = JTx, type = Type, timeout = OldTimeout} = St) ->
+    % Reset versionstamp if timeout changed.
+    St1 = case get_timeout_msec(JTx, Type) of
+        not_found ->
+            St#st{vs = not_found, timeout = ?MISSING_TIMEOUT_CHECK};
+        OldTimeout ->
+            St;
+        NewTimeout ->
+            St#st{vs = not_found, timeout = NewTimeout}
+    end,
+    #st{timeout = Timeout} = St1,
+    MaxJitter = min(Timeout div 2, get_max_jitter_msec()),
+    Wait = Timeout + rand:uniform(max(1, MaxJitter)),
+    St1#st{tref = erlang:send_after(Wait, self(), check_activity)}.
+
+
+get_max_jitter_msec()->
+    config:get_integer("couch_jobs", "activity_monitor_max_jitter_msec",
+        ?MAX_JITTER_DEFAULT).
diff --git a/src/couch_jobs/src/couch_jobs_activity_monitor_sup.erl b/src/couch_jobs/src/couch_jobs_activity_monitor_sup.erl
new file mode 100644
index 0000000..b11161a
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_activity_monitor_sup.erl
@@ -0,0 +1,64 @@
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_activity_monitor_sup).
+
+
+-behaviour(supervisor).
+
+
+-export([
+    start_link/0,
+
+    start_monitor/1,
+    stop_monitor/1,
+    get_child_pids/0
+]).
+
+-export([
+    init/1
+]).
+
+
+start_link() ->
+    supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+
+start_monitor(Type) ->
+    supervisor:start_child(?MODULE, [Type]).
+
+
+stop_monitor(Pid) ->
+    supervisor:terminate_child(?MODULE, Pid).
+
+
+get_child_pids() ->
+    lists:map(fun({_Id, Pid, _Type, _Mod}) ->
+        Pid
+    end, supervisor:which_children(?MODULE)).
+
+
+init(_) ->
+    Flags = #{
+        strategy => simple_one_for_one,
+        intensity => 10,
+        period => 3
+    },
+    Children = [
+        #{
+            id => couch_jobs_monitor,
+            restart => temporary,
+            start => {couch_jobs_activity_monitor, start_link, []}
+        }
+    ],
+    {ok, {Flags, Children}}.
diff --git a/src/couch_jobs/src/couch_jobs_app.erl b/src/couch_jobs/src/couch_jobs_app.erl
new file mode 100644
index 0000000..720b948
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_app.erl
@@ -0,0 +1,26 @@
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_app).
+
+
+-behaviour(application).
+
+
+-export([
+    start/2,
+    stop/1
+]).
+
+
+start(_Type, []) ->
+    couch_jobs_sup:start_link().
+
+
+stop([]) ->
+    ok.
diff --git a/src/couch_jobs/src/couch_jobs_fdb.erl b/src/couch_jobs/src/couch_jobs_fdb.erl
new file mode 100644
index 0000000..1317d03
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_fdb.erl
@@ -0,0 +1,679 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_fdb).
+
+
+-export([
+    add/5,
+    remove/2,
+    get_job_state_and_data/2,
+    get_jobs/2,
+    get_jobs/3,
+
+    accept/4,
+    finish/3,
+    resubmit/3,
+    update/3,
+
+    set_type_timeout/3,
+    clear_type_timeout/2,
+    get_type_timeout/2,
+    get_types/1,
+
+    get_activity_vs/2,
+    get_activity_vs_and_watch/2,
+    get_active_since/3,
+    get_inactive_since/3,
+    re_enqueue_inactive/3,
+
+    init_cache/0,
+
+    encode_data/1,
+    decode_data/1,
+
+    get_jtx/0,
+    get_jtx/1,
+    tx/2,
+
+    get_job/2,
+    get_jobs/0
+]).
+
+
+-include("couch_jobs.hrl").
+
+
+-record(jv, {
+    seq,
+    jlock,
+    stime,
+    resubmit,
+    data
+}).
+
+
+-define(JOBS_ETS_KEY, jobs).
+-define(MD_TIMESTAMP_ETS_KEY, md_timestamp).
+-define(MD_VERSION_MAX_AGE_SEC, 10).
+-define(PENDING_SEQ, 0).
+
+
+% Data model
+%
+% (?JOBS, ?DATA, Type, JobId) = (Sequence, Lock, SchedTime, Resubmit, JobData)
+% (?JOBS, ?PENDING, Type, ScheduledTime, JobId) = ""
+% (?JOBS, ?WATCHES_PENDING, Type) = Counter
+% (?JOBS, ?WATCHES_ACTIVITY, Type) = Sequence
+% (?JOBS, ?ACTIVITY_TIMEOUT, Type) = ActivityTimeout
+% (?JOBS, ?ACTIVITY, Type, Sequence) = JobId
+%
+% In the ?DATA row Sequence can have these values:
+%  0 - when the job is pending
+%  null - when the job is finished
+%  Versionstamp - when the job is running
+
+
+% Job creation API
+
+add(#{jtx := true} = JTx0, Type, JobId, Data, STime) ->
+    #{tx := Tx} = JTx = get_jtx(JTx0),
+    Job = #{job => true, type => Type, id => JobId},
+    case get_type_timeout(JTx, Type) of
+        not_found ->
+            {error, no_type_timeout};
+        Int when is_integer(Int) ->
+            Key = job_key(JTx, Job),
+            case erlfdb:wait(erlfdb:get(Tx, Key)) of
+                <<_/binary>> ->
+                    {ok, Job1} = resubmit(JTx, Job, STime),
+                    #{seq := Seq, state := State, data := Data1} = Job1,
+                    {ok, State, Seq, Data1};
+                not_found ->
+                    try
+                        maybe_enqueue(JTx, Type, JobId, STime, true, Data),
+                        {ok, pending, ?PENDING_SEQ, Data}
+                    catch
+                        error:{json_encoding_error, Error} ->
+                            {error, {json_encoding_error, Error}}
+                    end
+            end
+    end.
+
+
+remove(#{jtx := true} = JTx0, #{job := true} = Job) ->
+    #{tx := Tx} = JTx = get_jtx(JTx0),
+    #{type := Type, id := JobId} = Job,
+    Key = job_key(JTx, Job),
+    case get_job_val(Tx, Key) of
+        #jv{stime = STime} ->
+            couch_jobs_pending:remove(JTx, Type, JobId, STime),
+            erlfdb:clear(Tx, Key),
+            ok;
+        not_found ->
+            {error, not_found}
+    end.
+
+
+get_job_state_and_data(#{jtx := true} = JTx, #{job := true} = Job) ->
+    case get_job_val(get_jtx(JTx), Job) of
+        #jv{seq = Seq, jlock = JLock, data = Data} ->
+            {ok, Seq, job_state(JLock, Seq), Data};
+        not_found ->
+            {error, not_found}
+    end.
+
+
+get_jobs(JTx, Type) ->
+    get_jobs(JTx, Type, fun(_) -> true end).
+
+
+get_jobs(#{jtx := true} = JTx, Type, Filter) when is_function(Filter, 1) ->
+    #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+    Prefix = erlfdb_tuple:pack({?DATA, Type}, Jobs),
+    Opts = [{streaming_mode, want_all}],
+    Result = erlfdb:wait(erlfdb:get_range_startswith(Tx, Prefix, Opts)),
+    lists:foldl(fun({K, V}, #{} = Acc) ->
+        {JobId} = erlfdb_tuple:unpack(K, Prefix),
+        case Filter(JobId) of
+            true ->
+                {Seq, JLock, _, _, Data} = erlfdb_tuple:unpack(V),
+                Acc#{JobId => {Seq, job_state(JLock, Seq), Data}};
+            false ->
+                Acc
+        end
+    end, #{}, Result).
+
+
+% Job processor API
+
+accept(#{jtx := true} = JTx0, Type, MaxSTime, NoSched)
+        when is_integer(MaxSTime), is_boolean(NoSched) ->
+    #{jtx := true, tx := Tx} = JTx = get_jtx(JTx0),
+    case couch_jobs_pending:dequeue(JTx, Type, MaxSTime, NoSched) of
+        {not_found, PendingWatch} ->
+            {not_found, PendingWatch};
+        {ok, JobId} ->
+            JLock = fabric2_util:uuid(),
+            Key = job_key(JTx, Type, JobId),
+            JV0 = get_job_val(Tx, Key),
+            #jv{jlock = null, data = Data} = JV0,
+            JV = JV0#jv{seq = ?UNSET_VS, jlock = JLock, resubmit = false},
+            set_job_val(Tx, Key, JV),
+            update_activity(JTx, Type, JobId, null, Data),
+            Job = #{
+                job => true,
+                type => Type,
+                id => JobId,
+                jlock => JLock
+            },
+            {ok, Job, decode_data(Data)}
+    end.
+
+
+finish(#{jtx := true} = JTx0, #{jlock := <<_/binary>>} = Job, Data) when
+        is_map(Data) orelse Data =:= undefined ->
+    #{tx := Tx} = JTx = get_jtx(JTx0),
+    #{type := Type, jlock := JLock, id := JobId} = Job,
+    case get_job_or_halt(Tx, job_key(JTx, Job), JLock) of
+        #jv{seq = Seq, stime = STime, resubmit = Resubmit, data = OldData} ->
+            NewData = case Data =:= undefined of
+                true -> OldData;
+                false -> Data
+            end,
+            try maybe_enqueue(JTx, Type, JobId, STime, Resubmit, NewData) of
+                ok ->
+                    clear_activity(JTx, Type, Seq),
+                    update_watch(JTx, Type)
+            catch
+                error:{json_encoding_error, Error} ->
+                    {error, {json_encoding_error, Error}}
+            end;
+        halt ->
+            {error, halt}
+    end.
+
+
+resubmit(#{jtx := true} = JTx0, #{job := true} = Job, NewSTime) ->
+    #{tx := Tx} = JTx = get_jtx(JTx0),
+    #{type := Type, id := JobId} = Job,
+    Key = job_key(JTx, Job),
+    case get_job_val(Tx, Key) of
+        #jv{seq = Seq, jlock = JLock, stime = OldSTime, data = Data} = JV ->
+            STime = case NewSTime =:= undefined of
+                true -> OldSTime;
+                false -> NewSTime
+            end,
+            case job_state(JLock, Seq) of
+                finished ->
+                    ok = maybe_enqueue(JTx, Type, JobId, STime, true, Data),
+                    Job1 = Job#{
+                        seq => ?PENDING_SEQ,
+                        state => pending,
+                        data => Data
+                    },
+                    {ok, Job1};
+                pending ->
+                    JV1 = JV#jv{seq = ?PENDING_SEQ, stime = STime},
+                    set_job_val(Tx, Key, JV1),
+                    couch_jobs_pending:remove(JTx, Type, JobId, OldSTime),
+                    couch_jobs_pending:enqueue(JTx, Type, STime, JobId),
+                    Job1 = Job#{
+                        stime => STime,
+                        seq => ?PENDING_SEQ,
+                        state => pending,
+                        data => Data
+                    },
+                    {ok, Job1};
+                running ->
+                    JV1 = JV#jv{stime = STime, resubmit = true},
+                    set_job_val(Tx, Key, JV1),
+                    {ok, Job#{resubmit => true, stime => STime,
+                        state => running, seq => Seq, data => Data}}
+            end;
+        not_found ->
+            {error, not_found}
+    end.
+
+
+update(#{jtx := true} = JTx0, #{jlock := <<_/binary>>} = Job, Data0) when
+        is_map(Data0) orelse Data0 =:= undefined ->
+    #{tx := Tx} = JTx = get_jtx(JTx0),
+    #{jlock := JLock, type := Type, id := JobId} = Job,
+    Key = job_key(JTx, Job),
+    case get_job_or_halt(Tx, Key, JLock) of
+        #jv{seq = Seq, stime = STime, resubmit = Resubmit} = JV0 ->
+            Data = case Data0 =:= undefined of
+                true -> JV0#jv.data;
+                false -> Data0
+            end,
+            JV = JV0#jv{seq = ?UNSET_VS, data = Data},
+            try set_job_val(Tx, Key, JV) of
+                ok ->
+                    update_activity(JTx, Type, JobId, Seq, Data),
+                    {ok, Job#{resubmit => Resubmit, stime => STime}}
+            catch
+                error:{json_encoding_error, Error} ->
+                    {error, {json_encoding_error, Error}}
+            end;
+        halt ->
+            {error, halt}
+    end.
+
+
+% Type and activity monitoring API
+
+set_type_timeout(#{jtx := true} = JTx, Type, Timeout) ->
+    #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+    Key = erlfdb_tuple:pack({?ACTIVITY_TIMEOUT, Type}, Jobs),
+    Val = erlfdb_tuple:pack({Timeout}),
+    erlfdb:set(Tx, Key, Val).
+
+
+clear_type_timeout(#{jtx := true} = JTx, Type) ->
+    #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+    Key = erlfdb_tuple:pack({?ACTIVITY_TIMEOUT, Type}, Jobs),
+    erlfdb:clear(Tx, Key).
+
+
+get_type_timeout(#{jtx := true} = JTx, Type) ->
+    #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+    Key = erlfdb_tuple:pack({?ACTIVITY_TIMEOUT, Type}, Jobs),
+    case erlfdb:wait(erlfdb:get_ss(Tx, Key)) of
+        not_found ->
+            not_found;
+        Val ->
+            {Timeout} = erlfdb_tuple:unpack(Val),
+            Timeout
+    end.
+
+
+get_types(#{jtx := true} = JTx) ->
+    #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+    Prefix = erlfdb_tuple:pack({?ACTIVITY_TIMEOUT}, Jobs),
+    Opts = [{streaming_mode, want_all}],
+    Result = erlfdb:wait(erlfdb:get_range_startswith(Tx, Prefix, Opts)),
+    lists:map(fun({K, _V}) ->
+        {Type} = erlfdb_tuple:unpack(K, Prefix),
+        Type
+    end, Result).
+
+
+get_activity_vs(#{jtx := true} = JTx, Type) ->
+    #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+    Key = erlfdb_tuple:pack({?WATCHES_ACTIVITY, Type}, Jobs),
+    case erlfdb:wait(erlfdb:get(Tx, Key)) of
+        not_found ->
+            not_found;
+        Val ->
+            {VS} = erlfdb_tuple:unpack(Val),
+            VS
+    end.
+
+
+get_activity_vs_and_watch(#{jtx := true} = JTx, Type) ->
+    #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+    Key = erlfdb_tuple:pack({?WATCHES_ACTIVITY, Type}, Jobs),
+    Future = erlfdb:get(Tx, Key),
+    Watch = erlfdb:watch(Tx, Key),
+    case erlfdb:wait(Future) of
+        not_found ->
+            {not_found, Watch};
+        Val ->
+            {VS} = erlfdb_tuple:unpack(Val),
+            {VS, Watch}
+    end.
+
+
+get_active_since(#{jtx := true} = JTx, Type, Versionstamp) ->
+    #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+    Prefix = erlfdb_tuple:pack({?ACTIVITY}, Jobs),
+    StartKey = erlfdb_tuple:pack({Type, Versionstamp}, Prefix),
+    StartKeySel = erlfdb_key:first_greater_or_equal(StartKey),
+    {_, EndKey} = erlfdb_tuple:range({Type}, Prefix),
+    Opts = [{streaming_mode, want_all}],
+    Future = erlfdb:get_range(Tx, StartKeySel, EndKey, Opts),
+    maps:from_list(lists:map(fun({_K, V}) ->
+        erlfdb_tuple:unpack(V)
+    end, erlfdb:wait(Future))).
+
+
+get_inactive_since(#{jtx := true} = JTx, Type, Versionstamp) ->
+    #{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
+    Prefix = erlfdb_tuple:pack({?ACTIVITY}, Jobs),
+    {StartKey, _} = erlfdb_tuple:range({Type}, Prefix),
+    EndKey = erlfdb_tuple:pack({Type, Versionstamp}, Prefix),
+    EndKeySel = erlfdb_key:first_greater_than(EndKey),
+    Opts = [{streaming_mode, want_all}],
+    Future = erlfdb:get_range(Tx, StartKey, EndKeySel, Opts),
+    lists:map(fun({_K, V}) ->
+        {JobId, _} = erlfdb_tuple:unpack(V),
+        JobId
+    end, erlfdb:wait(Future)).
+
+
+re_enqueue_inactive(#{jtx := true} = JTx, Type, JobIds) when is_list(JobIds) ->
+    #{tx := Tx} = get_jtx(JTx),
+    lists:foreach(fun(JobId) ->
+        case get_job_val(Tx, job_key(JTx, Type, JobId)) of
+            #jv{seq = Seq, stime = STime, data = Data} ->
+                clear_activity(JTx, Type, Seq),
+                maybe_enqueue(JTx, Type, JobId, STime, true, Data);
+            not_found ->
+                ok
+        end
+    end, JobIds),
+    case length(JobIds) > 0 of
+        true -> update_watch(JTx, Type);
+        false -> ok
+    end.
+
+
+% Cache initialization API. Called from the supervisor just to create the ETS
+% table. It returns `ignore` to tell supervisor it won't actually start any
+% process, which is what we want here.
+%
+init_cache() ->
+    ConcurrencyOpts = [{read_concurrency, true}, {write_concurrency, true}],
+    ets:new(?MODULE, [public, named_table] ++ ConcurrencyOpts),
+    ignore.
+
+
+% Functions to encode / decode JobData
+%
+encode_data(#{} = JobData) ->
+    try
+        jiffy:encode(JobData)
+    catch
+        throw:{error, Error} ->
+            % legacy clause since new versions of jiffy raise error instead
+            error({json_encoding_error, Error});
+        error:{error, Error} ->
+            error({json_encoding_error, Error})
+    end.
+
+
+decode_data(#{} = JobData) ->
+    JobData;
+
+decode_data(<<_/binary>> = JobData) ->
+    jiffy:decode(JobData, [return_maps]).
+
+
+% Cached job transaction object. This object wraps a transaction, caches the
+% directory lookup path, and the metadata version. The function can be used
+% from inside or outside the transaction. When used from a transaction it will
+% verify if the metadata was changed, and will refresh automatically.
+%
+get_jtx() ->
+    get_jtx(undefined).
+
+
+get_jtx(#{tx := Tx} = _TxDb) ->
+    get_jtx(Tx);
+
+get_jtx(undefined = _Tx) ->
+    case ets:lookup(?MODULE, ?JOBS_ETS_KEY) of
+        [{_, #{} = JTx}] ->
+            JTx;
+        [] ->
+            JTx = update_jtx_cache(init_jtx(undefined)),
+            JTx#{tx := undefined}
+    end;
+
+get_jtx({erlfdb_transaction, _} = Tx) ->
+    case ets:lookup(?MODULE, ?JOBS_ETS_KEY) of
+        [{_, #{} = JTx}] ->
+            ensure_current(JTx#{tx := Tx});
+        [] ->
+            update_jtx_cache(init_jtx(Tx))
+    end.
+
+
+% Transaction processing to be used with couch jobs' specific transaction
+% contexts
+%
+tx(#{jtx := true} = JTx, Fun) when is_function(Fun, 1) ->
+    fabric2_fdb:transactional(JTx, Fun).
+
+
+% Debug and testing API
+
+get_job(Type, JobId) ->
+    fabric2_fdb:transactional(fun(Tx) ->
+        JTx = init_jtx(Tx),
+        case get_job_val(Tx, job_key(JTx, Type, JobId)) of
+            #jv{seq = Seq, jlock = JLock} = JV ->
+                #{
+                    job => true,
+                    type => Type,
+                    id => JobId,
+                    seq => Seq,
+                    jlock => JLock,
+                    stime => JV#jv.stime,
+                    resubmit => JV#jv.resubmit,
+                    data => decode_data(JV#jv.data),
+                    state => job_state(JLock, Seq)
+                };
+            not_found ->
+                not_found
+        end
+    end).
+
+
+get_jobs() ->
+    fabric2_fdb:transactional(fun(Tx) ->
+        #{jobs_path := Jobs} = init_jtx(Tx),
+        Prefix = erlfdb_tuple:pack({?DATA}, Jobs),
+        Opts = [{streaming_mode, want_all}],
+        Result = erlfdb:wait(erlfdb:get_range_startswith(Tx, Prefix, Opts)),
+        lists:map(fun({K, V}) ->
+            {Type, JobId} = erlfdb_tuple:unpack(K, Prefix),
+            {Seq, JLock, _, _, Data} = erlfdb_tuple:unpack(V),
+            JobState = job_state(JLock, Seq),
+            {Type, JobId, JobState, decode_data(Data)}
+        end, Result)
+    end).
+
+
+% Private helper functions
+
+maybe_enqueue(#{jtx := true} = JTx, Type, JobId, STime, Resubmit, Data) ->
+    #{tx := Tx} = JTx,
+    Key = job_key(JTx, Type, JobId),
+    JV = #jv{
+        seq = null,
+        jlock = null,
+        stime = STime,
+        resubmit = false,
+        data = Data
+    },
+    case Resubmit of
+        true ->
+            set_job_val(Tx, Key, JV#jv{seq = ?PENDING_SEQ}),
+            couch_jobs_pending:enqueue(JTx, Type, STime, JobId);
+        false ->
+            set_job_val(Tx, Key, JV)
+    end,
+    ok.
+
+
+job_key(#{jtx := true, jobs_path := Jobs}, Type, JobId) ->
+    erlfdb_tuple:pack({?DATA, Type, JobId}, Jobs).
+
+
+job_key(JTx, #{type := Type, id := JobId}) ->
+    job_key(JTx, Type, JobId).
+
+
+get_job_val(#{jtx := true, tx := Tx} = JTx, #{job := true} = Job) ->
+    get_job_val(Tx, job_key(JTx, Job));
+
+get_job_val(Tx = {erlfdb_transaction, _}, Key) ->
+    case erlfdb:wait(erlfdb:get(Tx, Key)) of
+        <<_/binary>> = Val ->
+            {Seq, JLock, STime, Resubmit, Data} = erlfdb_tuple:unpack(Val),
+            #jv{
+                seq = Seq,
+                jlock = JLock,
+                stime = STime,
+                resubmit = Resubmit,
+                data = Data
+            };
+        not_found ->
+            not_found
+    end.
+
+
+set_job_val(Tx = {erlfdb_transaction, _}, Key, #jv{} = JV) ->
+    #jv{
+        seq = Seq,
+        jlock = JLock,
+        stime = STime,
+        resubmit = Resubmit,
+        data = Data0
+    } = JV,
+    Data = case Data0 of
+        #{} -> encode_data(Data0);
+        <<_/binary>> -> Data0
+    end,
+    case Seq of
+        ?UNSET_VS ->
+            Val = erlfdb_tuple:pack_vs({Seq, JLock, STime, Resubmit, Data}),
+            erlfdb:set_versionstamped_value(Tx, Key, Val);
+        _Other ->
+            Val = erlfdb_tuple:pack({Seq, JLock, STime, Resubmit, Data}),
+            erlfdb:set(Tx, Key, Val)
+    end,
+    ok.
+
+
+get_job_or_halt(Tx, Key, JLock) ->
+    case get_job_val(Tx, Key) of
+        #jv{jlock = CurJLock} when CurJLock =/= JLock ->
+            halt;
+        #jv{} = Res ->
+            Res;
+        not_found ->
+            halt
+    end.
+
+
+update_activity(#{jtx := true} = JTx, Type, JobId, Seq, Data0) ->
+    #{tx := Tx, jobs_path :=  Jobs} = JTx,
+    case Seq =/= null of
+        true -> clear_activity(JTx, Type, Seq);
+        false -> ok
+    end,
+    Key = erlfdb_tuple:pack_vs({?ACTIVITY, Type, ?UNSET_VS}, Jobs),
+    Data = case Data0 of
+        #{} -> encode_data(Data0);
+        <<_/binary>> -> Data0
+    end,
+    Val = erlfdb_tuple:pack({JobId, Data}),
+    erlfdb:set_versionstamped_key(Tx, Key, Val),
+    update_watch(JTx, Type).
+
+
+clear_activity(#{jtx := true} = JTx, Type, Seq) ->
+    #{tx := Tx, jobs_path :=  Jobs} = JTx,
+    Key = erlfdb_tuple:pack({?ACTIVITY, Type, Seq}, Jobs),
+    erlfdb:clear(Tx, Key).
+
+
+update_watch(#{jtx := true} = JTx, Type) ->
+    #{tx := Tx, jobs_path :=  Jobs} = JTx,
+    Key = erlfdb_tuple:pack({?WATCHES_ACTIVITY, Type}, Jobs),
+    Val = erlfdb_tuple:pack_vs({?UNSET_VS}),
+    erlfdb:set_versionstamped_value(Tx, Key, Val),
+    ok.
+
+
+job_state(JLock, Seq) ->
+    case {JLock, Seq} of
+        {null, null} -> finished;
+        {JLock, _} when JLock =/= null -> running;
+        {null, Seq} when Seq =/= null -> pending
+    end.
+
+
+% This a transaction context object similar to the Db = #{} one from
+% fabric2_fdb. It's is used to cache the jobs path directory (to avoid extra
+% lookups on every operation) and to check for metadata changes (in case
+% directory changes).
+%
+init_jtx(undefined) ->
+    fabric2_fdb:transactional(fun(Tx) -> init_jtx(Tx) end);
+
+init_jtx({erlfdb_transaction, _} = Tx) ->
+    Root = erlfdb_directory:root(),
+    CouchDB = erlfdb_directory:create_or_open(Tx, Root, [<<"couchdb">>]),
+    LayerPrefix = erlfdb_directory:get_name(CouchDB),
+    Jobs = erlfdb_tuple:pack({?JOBS}, LayerPrefix),
+    Version = erlfdb:wait(erlfdb:get(Tx, ?METADATA_VERSION_KEY)),
+    % layer_prefix, md_version and tx here match db map fields in fabric2_fdb
+    % but we also assert that this is a job transaction using the jtx => true
+    % field
+    #{
+        jtx => true,
+        tx => Tx,
+        layer_prefix => LayerPrefix,
+        jobs_path => Jobs,
+        md_version => Version
+    }.
+
+
+ensure_current(#{jtx := true, tx := Tx} = JTx) ->
+    case get(?COUCH_JOBS_CURRENT) of
+        Tx ->
+            JTx;
+        _ ->
+            JTx1 = update_current(JTx),
+            put(?COUCH_JOBS_CURRENT, Tx),
+            JTx1
+    end.
+
+
+update_current(#{tx := Tx, md_version := Version} = JTx) ->
+    case get_md_version_age(Version) of
+        Age when Age =< ?MD_VERSION_MAX_AGE_SEC ->
+            % Looked it up not too long ago. Avoid looking it up to frequently
+            JTx;
+        _ ->
+            case erlfdb:wait(erlfdb:get(Tx, ?METADATA_VERSION_KEY)) of
+                Version ->
+                    update_md_version_timestamp(Version),
+                    JTx;
+                _NewVersion ->
+                    update_jtx_cache(init_jtx(Tx))
+            end
+    end.
+
+
+update_jtx_cache(#{jtx := true, md_version := Version} = JTx) ->
+    CachedJTx = JTx#{tx := undefined},
+    ets:insert(?MODULE, {?JOBS_ETS_KEY, CachedJTx}),
+    update_md_version_timestamp(Version),
+    JTx.
+
+
+get_md_version_age(Version) ->
+    Timestamp = case ets:lookup(?MODULE, ?MD_TIMESTAMP_ETS_KEY) of
+        [{_, Version, Ts}] -> Ts;
+        _ -> 0
+    end,
+    erlang:system_time(second) - Timestamp.
+
+
+update_md_version_timestamp(Version) ->
+    Ts = erlang:system_time(second),
+    ets:insert(?MODULE, {?MD_TIMESTAMP_ETS_KEY, Version, Ts}).
diff --git a/src/couch_jobs/src/couch_jobs_notifier.erl b/src/couch_jobs/src/couch_jobs_notifier.erl
new file mode 100644
index 0000000..1c554a0
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_notifier.erl
@@ -0,0 +1,285 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_notifier).
+
+-behaviour(gen_server).
+
+
+-export([
+    start_link/1,
+    subscribe/4,
+    unsubscribe/2
+]).
+
+-export([
+    init/1,
+    terminate/2,
+    handle_call/3,
+    handle_cast/2,
+    handle_info/2,
+    code_change/3
+]).
+
+
+-include("couch_jobs.hrl").
+
+
+-define(TYPE_MONITOR_HOLDOFF_DEFAULT, 50).
+-define(TYPE_MONITOR_TIMEOUT_DEFAULT, "infinity").
+-define(GET_JOBS_RANGE_RATIO, 0.5).
+
+
+-record(st, {
+    jtx,
+    type,
+    monitor_pid,
+    subs, % #{JobId => #{Ref => {Pid, State, Seq}}}
+    pidmap, % #{{Jobid, Pid} => Ref}
+    refmap % #{Ref => JobId}
+}).
+
+
+start_link(Type) ->
+    gen_server:start_link(?MODULE, [Type], []).
+
+
+subscribe(Type, JobId, State, Seq) ->
+    case couch_jobs_server:get_notifier_server(Type) of
+        {ok, Server} ->
+            CallArgs = {subscribe, JobId, State, Seq, self()},
+            Ref = gen_server:call(Server, CallArgs, infinity),
+            {ok, {Server, Ref}};
+        {error, Error} ->
+            {error, Error}
+    end.
+
+
+unsubscribe(Server, Ref) when is_reference(Ref) ->
+    gen_server:call(Server, {unsubscribe, Ref, self()}, infinity).
+
+
+init([Type]) ->
+    JTx = couch_jobs_fdb:get_jtx(),
+    St = #st{
+        jtx = JTx,
+        type = Type,
+        subs = #{},
+        pidmap = #{},
+        refmap = #{}
+    },
+    VS = get_type_vs(St),
+    HoldOff = get_holdoff(),
+    Timeout = get_timeout(),
+    Pid = couch_jobs_type_monitor:start(Type, VS, HoldOff, Timeout),
+    {ok, St#st{monitor_pid = Pid}}.
+
+
+terminate(_, _St) ->
+    ok.
+
+
+handle_call({subscribe, JobId, State, Seq, Pid}, _From, #st{} = St) ->
+    #st{pidmap = PidMap, refmap = RefMap} = St,
+    case maps:get({JobId, Pid}, PidMap, not_found) of
+        not_found ->
+            Ref = erlang:monitor(process, Pid),
+            St1 = update_sub(JobId, Ref, Pid, State, Seq, St),
+            St2 = St1#st{pidmap = PidMap#{{JobId, Pid} => Ref}},
+            St3 = St2#st{refmap = RefMap#{Ref => JobId}},
+            {reply, Ref, St3};
+        Ref when is_reference(Ref) ->
+            St1 = update_sub(JobId, Ref, Pid, State, Seq, St),
+            {reply, Ref, St1}
+    end;
+
+handle_call({unsubscribe, Ref, Pid}, _From, #st{} = St) ->
+    {reply, ok, unsubscribe_int(Ref, Pid, St)};
+
+handle_call(Msg, _From, St) ->
+    {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+    {stop, {bad_cast, Msg}, St}.
+
+
+handle_info({type_updated, VS}, St) ->
+    VSMax = flush_type_updated_messages(VS),
+    {noreply, notify_subscribers(VSMax, St)};
+
+handle_info({Ref, ready}, St) when is_reference(Ref) ->
+    % Don't crash out couch_jobs_server and the whole application would need to
+    % eventually do proper cleanup in erlfdb:wait timeout code.
+    LogMsg = "~p : spurious erlfdb future ready message ~p",
+    couch_log:error(LogMsg, [?MODULE, Ref]),
+    {noreply, St};
+
+handle_info({'DOWN', Ref, process, Pid, _}, #st{} = St) ->
+    {noreply, unsubscribe_int(Ref, Pid, St)};
+
+handle_info(Msg, St) ->
+    {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+    {ok, St}.
+
+
+update_subs(JobId, Refs, #st{subs = Subs} = St) when map_size(Refs) =:= 0 ->
+    St#st{subs = maps:remove(JobId, Subs)};
+
+update_subs(JobId, Refs, #st{subs = Subs} = St) when map_size(Refs) > 0 ->
+    St#st{subs = Subs#{JobId => Refs}}.
+
+
+update_sub(JobId, Ref, Pid, State, Seq, #st{subs = Subs} = St) ->
+    Refs =  maps:get(JobId, Subs, #{}),
+    update_subs(JobId, Refs#{Ref => {Pid, State, Seq}}, St).
+
+
+remove_sub(JobId, Ref, #st{subs = Subs} = St) ->
+    case maps:get(JobId, Subs, not_found) of
+        not_found -> St;
+        #{} = Refs -> update_subs(JobId, maps:remove(Ref, Refs), St)
+    end.
+
+
+unsubscribe_int(Id, Ref, Pid, #st{pidmap = PidMap, refmap = RefMap} = St) ->
+    St1 = remove_sub(Id, Ref, St),
+    erlang:demonitor(Ref, [flush]),
+    St1#st{
+        pidmap = maps:remove({Id, Pid}, PidMap),
+        refmap = maps:remove(Ref, RefMap)
+    }.
+
+
+unsubscribe_int(Ref, Pid, #st{refmap = RefMap} = St) ->
+    case maps:get(Ref, RefMap, not_found) of
+        not_found -> St;
+        Id -> unsubscribe_int(Id, Ref, Pid, St)
+    end.
+
+
+flush_type_updated_messages(VSMax) ->
+    receive
+        {type_updated, VS} ->
+            flush_type_updated_messages(max(VS, VSMax))
+    after
+        0 -> VSMax
+    end.
+
+
+get_jobs(#st{jtx = JTx, type = Type}, InactiveIdMap, Ratio)
+        when Ratio >= ?GET_JOBS_RANGE_RATIO ->
+    Filter = fun(JobId) -> maps:is_key(JobId, InactiveIdMap) end,
+    JobMap = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+        couch_jobs_fdb:get_jobs(JTx1, Type, Filter)
+    end),
+    maps:map(fun(JobId, _) ->
+        case maps:is_key(JobId, JobMap) of
+            true -> maps:get(JobId, JobMap);
+            false -> {null, not_found, not_found}
+        end
+    end, InactiveIdMap);
+
+get_jobs(#st{jtx = JTx, type = Type}, InactiveIdMap, _) ->
+    couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+        maps:map(fun(JobId, _) ->
+            Job = #{job => true, type => Type, id => JobId},
+            case couch_jobs_fdb:get_job_state_and_data(JTx1, Job) of
+                {ok, Seq, State, Data} ->
+                    {Seq, State, Data};
+                {error, not_found} ->
+                    {null, not_found, not_found}
+            end
+        end, InactiveIdMap)
+    end).
+
+
+get_type_vs(#st{jtx = JTx, type = Type}) ->
+    couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+        couch_jobs_fdb:get_activity_vs(JTx1, Type)
+    end).
+
+
+% "Active since" is the set of jobs that have been active (running)
+% and updated at least once since the given versionstamp. These are relatively
+% cheap to find as it's just a range read in the ?ACTIVITY subspace.
+%
+get_active_since(#st{} = _St, not_found) ->
+    #{};
+
+get_active_since(#st{jtx = JTx, type = Type, subs = Subs}, VS) ->
+    AllUpdated = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+        couch_jobs_fdb:get_active_since(JTx1, Type, VS)
+    end),
+    maps:map(fun(_JobId, Data) ->
+        {VS, running, Data}
+    end, maps:with(maps:keys(Subs), AllUpdated)).
+
+
+notify_subscribers(_, #st{subs = Subs} = St) when map_size(Subs) =:= 0 ->
+    St;
+
+notify_subscribers(ActiveVS, #st{} = St1) ->
+    % First gather the easy (cheap) active jobs. Then with those out of way
+    % inspect each job to get its state.
+    Active = get_active_since(St1, ActiveVS),
+    St2 = notify_job_ids(Active, St1),
+    ActiveIds = maps:keys(Active),
+    Subs = St2#st.subs,
+    InactiveIdMap = maps:without(ActiveIds, Subs),
+    InactiveRatio = maps:size(InactiveIdMap) / maps:size(Subs),
+    Inactive = get_jobs(St2, InactiveIdMap, InactiveRatio),
+    notify_job_ids(Inactive, St2).
+
+
+notify_job_ids(#{} = Jobs, #st{type = Type} = St0) ->
+    maps:fold(fun(Id, {VS, State, Data}, #st{} = StAcc) ->
+        DoUnsub = lists:member(State, [finished, not_found]),
+        maps:fold(fun
+            (_Ref, {_Pid, running, OldVS}, St) when State =:= running,
+                    OldVS >= VS ->
+                St;
+            (Ref, {Pid, running, OldVS}, St) when State =:= running,
+                    OldVS < VS ->
+                % For running state send updates even if state doesn't change
+                notify(Pid, Ref, Type, Id, State, Data),
+                update_sub(Id, Ref, Pid, running, VS, St);
+            (_Ref, {_Pid, OldState, _VS}, St) when OldState =:= State ->
+                St;
+            (Ref, {Pid, _State, _VS}, St) ->
+                notify(Pid, Ref, Type, Id, State, Data),
+                case DoUnsub of
+                    true -> unsubscribe_int(Id, Ref, Pid, St);
+                    false -> update_sub(Id, Ref, Pid, State, VS, St)
+                end
+        end, StAcc, maps:get(Id, StAcc#st.subs, #{}))
+    end, St0, Jobs).
+
+
+notify(Pid, Ref, Type, Id, State, Data) ->
+    Pid ! {?COUCH_JOBS_EVENT, Ref, Type, Id, State, Data}.
+
+
+get_holdoff() ->
+    config:get_integer("couch_jobs", "type_monitor_holdoff_msec",
+        ?TYPE_MONITOR_HOLDOFF_DEFAULT).
+
+
+get_timeout() ->
+    Default =  ?TYPE_MONITOR_TIMEOUT_DEFAULT,
+    case config:get("couch_jobs", "type_monitor_timeout_msec", Default) of
+        "infinity" -> infinity;
+        Milliseconds -> list_to_integer(Milliseconds)
+    end.
diff --git a/src/couch_jobs/src/couch_jobs_notifier_sup.erl b/src/couch_jobs/src/couch_jobs_notifier_sup.erl
new file mode 100644
index 0000000..81d9349
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_notifier_sup.erl
@@ -0,0 +1,64 @@
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_notifier_sup).
+
+
+-behaviour(supervisor).
+
+
+-export([
+    start_link/0,
+
+    start_notifier/1,
+    stop_notifier/1,
+    get_child_pids/0
+]).
+
+-export([
+    init/1
+]).
+
+
+start_link() ->
+    supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+
+start_notifier(Type) ->
+    supervisor:start_child(?MODULE, [Type]).
+
+
+stop_notifier(Pid) ->
+    supervisor:terminate_child(?MODULE, Pid).
+
+
+get_child_pids() ->
+    lists:map(fun({_Id, Pid, _Type, _Mod}) ->
+        Pid
+    end, supervisor:which_children(?MODULE)).
+
+
+init(_) ->
+    Flags = #{
+        strategy => simple_one_for_one,
+        intensity => 10,
+        period => 3
+    },
+    Children = [
+        #{
+            id => couch_jobs_notifier,
+            restart => temporary,
+            start => {couch_jobs_notifier, start_link, []}
+        }
+    ],
+    {ok, {Flags, Children}}.
diff --git a/src/couch_jobs/src/couch_jobs_pending.erl b/src/couch_jobs/src/couch_jobs_pending.erl
new file mode 100644
index 0000000..ab53c59
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_pending.erl
@@ -0,0 +1,143 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_pending).
+
+
+-export([
+    enqueue/4,
+    dequeue/4,
+    remove/4
+]).
+
+
+-include("couch_jobs.hrl").
+
+
+-define(RANGE_LIMIT, 1024).
+
+
+enqueue(#{jtx := true} = JTx, Type, STime, JobId) ->
+    #{tx := Tx, jobs_path := Jobs} = JTx,
+    Key = erlfdb_tuple:pack({?PENDING, Type, STime, JobId}, Jobs),
+    erlfdb:set(Tx, Key, <<>>),
+    WatchKey = erlfdb_tuple:pack({?WATCHES_PENDING, Type}, Jobs),
+    erlfdb:add(Tx, WatchKey, 1),
+    ok.
+
+
+dequeue(#{jtx := true} = JTx, Type, _, true) ->
+    #{tx := Tx, jobs_path := Jobs} = JTx,
+    Prefix = erlfdb_tuple:pack({?PENDING, Type, 0}, Jobs),
+    case get_random_item(Tx, Prefix) of
+        {error, not_found} ->
+            {not_found, get_pending_watch(JTx, Type)};
+        {ok, PendingKey} ->
+            erlfdb:clear(Tx, PendingKey),
+            {JobId} = erlfdb_tuple:unpack(PendingKey, Prefix),
+            {ok, JobId}
+    end;
+
+dequeue(#{jtx := true} = JTx, Type, MaxPriority, _) ->
+    #{tx := Tx, jobs_path := Jobs} = JTx,
+    Prefix = erlfdb_tuple:pack({?PENDING, Type}, Jobs),
+    StartKeySel = erlfdb_key:first_greater_than(Prefix),
+    End = erlfdb_tuple:pack({MaxPriority, <<16#FF>>}, Prefix),
+    EndKeySel = erlfdb_key:first_greater_or_equal(End),
+    case clear_random_key_from_range(Tx, StartKeySel, EndKeySel) of
+        {error, not_found} ->
+            {not_found, get_pending_watch(JTx, Type)};
+        {ok, PendingKey} ->
+            {_, JobId} = erlfdb_tuple:unpack(PendingKey, Prefix),
+            {ok, JobId}
+    end.
+
+
+remove(#{jtx := true} = JTx, Type, JobId, STime) ->
+    #{tx := Tx, jobs_path := Jobs} = JTx,
+    Key = erlfdb_tuple:pack({?PENDING, Type, STime, JobId}, Jobs),
+    erlfdb:clear(Tx, Key).
+
+
+%% Private functions
+
+
+% Pick a random item from the range without reading the keys in first. However
+% the constraint it that IDs should looks like random UUIDs
+get_random_item(Tx, Prefix) ->
+    Id = fabric2_util:uuid(),
+    Snapshot = erlfdb:snapshot(Tx),
+    % Try to be fair and switch evently between trying ids before or after the
+    % randomly generated one. Otherwise, trying before first, will leave a lot
+    % of <<"fff...">> IDs in the queue for too long and trying "after" first
+    % will leave a lot of <"00...">> ones waiting.
+    case rand:uniform() > 0.5 of
+        true ->
+            case get_after(Snapshot, Prefix, Id) of
+                {error, not_found} -> get_before(Snapshot, Prefix, Id);
+                {ok, Key} -> {ok, Key}
+            end;
+        false ->
+            case get_before(Snapshot, Prefix, Id) of
+                {error, not_found} -> get_after(Snapshot, Prefix, Id);
+                {ok, Key} -> {ok, Key}
+            end
+    end.
+
+
+get_before(Snapshot, Prefix, Id) ->
+    KSel = erlfdb_key:last_less_or_equal(erlfdb_tuple:pack({Id}, Prefix)),
+    PrefixSize = byte_size(Prefix),
+    case erlfdb:wait(erlfdb:get_key(Snapshot, KSel)) of
+        <<Prefix:PrefixSize/binary, _/binary>> = Key ->  {ok, Key};
+        _ -> {error, not_found}
+    end.
+
+
+get_after(Snapshot, Prefix, Id) ->
+    KSel = erlfdb_key:first_greater_or_equal(erlfdb_tuple:pack({Id}, Prefix)),
+    PrefixSize = byte_size(Prefix),
+    case erlfdb:wait(erlfdb:get_key(Snapshot, KSel)) of
+        <<Prefix:PrefixSize/binary, _/binary>> = Key -> {ok, Key};
+        _ -> {error, not_found}
+    end.
+
+
+% Pick a random key from the range snapshot. Then radomly pick a key to clear.
+% Before clearing, ensure there is a read conflict on the key in in case other
+% workers have picked the same key.
+%
+clear_random_key_from_range(Tx, Start, End) ->
+    Opts = [
+        {limit, ?RANGE_LIMIT},
+        {snapshot, true}
+    ],
+    case erlfdb:wait(erlfdb:get_range(Tx, Start, End, Opts)) of
+        [] ->
+            {error, not_found};
+        [{Key, _}] ->
+            erlfdb:add_read_conflict_key(Tx, Key),
+            erlfdb:clear(Tx, Key),
+            {ok, Key};
+        [{_, _} | _] = KVs ->
+            Index = rand:uniform(length(KVs)),
+            {Key, _} = lists:nth(Index, KVs),
+            erlfdb:add_read_conflict_key(Tx, Key),
+            erlfdb:clear(Tx, Key),
+            {ok, Key}
+    end.
+
+
+get_pending_watch(#{jtx := true} = JTx, Type) ->
+    #{tx := Tx, jobs_path := Jobs} = couch_jobs_fdb:get_jtx(JTx),
+    Key = erlfdb_tuple:pack({?WATCHES_PENDING, Type}, Jobs),
+    erlfdb:watch(Tx, Key).
diff --git a/src/couch_jobs/src/couch_jobs_server.erl b/src/couch_jobs/src/couch_jobs_server.erl
new file mode 100644
index 0000000..2e03c7d
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_server.erl
@@ -0,0 +1,193 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_server).
+
+-behaviour(gen_server).
+
+
+-export([
+    start_link/0,
+    get_notifier_server/1,
+    force_check_types/0
+]).
+
+-export([
+    init/1,
+    terminate/2,
+    handle_call/3,
+    handle_cast/2,
+    handle_info/2,
+    code_change/3
+]).
+
+
+-define(TYPE_CHECK_PERIOD_DEFAULT, 15000).
+-define(MAX_JITTER_DEFAULT, 5000).
+
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, nil, []).
+
+
+get_notifier_server(Type) ->
+    case get_type_pid_refs(Type) of
+        {{_, _}, {NotifierPid, _}} ->
+            {ok, NotifierPid};
+        not_found ->
+            force_check_types(),
+            case get_type_pid_refs(Type) of
+                {{_, _}, {NotifierPid, _}} ->
+                    {ok, NotifierPid};
+                not_found ->
+                    {error, not_found}
+            end
+    end.
+
+
+force_check_types() ->
+    gen_server:call(?MODULE, check_types, infinity).
+
+
+init(_) ->
+    % If couch_jobs_server is after the notifiers and activity supervisor. If
+    % it restart, there could be some stale notifier or activity monitors. Kill
+    % those as later on we'd start new ones anyway.
+    reset_monitors(),
+    reset_notifiers(),
+    ets:new(?MODULE, [protected, named_table]),
+    check_types(),
+    schedule_check(),
+    {ok, nil}.
+
+
+terminate(_, _St) ->
+    ok.
+
+
+handle_call(check_types, _From, St) ->
+    check_types(),
+    {reply, ok, St};
+
+handle_call(Msg, _From, St) ->
+    {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+    {stop, {bad_cast, Msg}, St}.
+
+
+handle_info(check_types, St) ->
+    check_types(),
+    schedule_check(),
+    {noreply, St};
+
+handle_info({'DOWN', _Ref, process, Pid, Reason}, St) ->
+    LogMsg = "~p : process ~p exited with ~p",
+    couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
+    {stop, {unexpected_process_exit, Pid, Reason}, St};
+
+handle_info({Ref, ready}, St) when is_reference(Ref) ->
+    % Don't crash out couch_jobs_server and the whole application would need to
+    % eventually do proper cleanup in erlfdb:wait timeout code.
+    LogMsg = "~p : spurious erlfdb future ready message ~p",
+    couch_log:error(LogMsg, [?MODULE, Ref]),
+    {noreply, St};
+
+handle_info(Msg, St) ->
+    {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+    {ok, St}.
+
+
+check_types() ->
+    FdbTypes = fdb_types(),
+    EtsTypes = ets_types(),
+    ToStart = FdbTypes -- EtsTypes,
+    ToStop = EtsTypes -- FdbTypes,
+    lists:foreach(fun(Type) -> start_monitors(Type) end, ToStart),
+    lists:foreach(fun(Type) -> stop_monitors(Type) end, ToStop).
+
+
+start_monitors(Type) ->
+    MonPidRef = case couch_jobs_activity_monitor_sup:start_monitor(Type) of
+        {ok, Pid1} -> {Pid1, monitor(process, Pid1)};
+        {error, Error1} -> error({failed_to_start_monitor, Type, Error1})
+    end,
+    NotifierPidRef = case couch_jobs_notifier_sup:start_notifier(Type) of
+        {ok, Pid2} -> {Pid2, monitor(process, Pid2)};
+        {error, Error2} -> error({failed_to_start_notifier, Type, Error2})
+    end,
+    ets:insert_new(?MODULE, {Type, MonPidRef, NotifierPidRef}).
+
+
+stop_monitors(Type) ->
+    {{MonPid, MonRef}, {NotifierPid, NotifierRef}} = get_type_pid_refs(Type),
+    ok = couch_jobs_activity_monitor_sup:stop_monitor(MonPid),
+    demonitor(MonRef, [flush]),
+    ok = couch_jobs_notifier_sup:stop_notifier(NotifierPid),
+    demonitor(NotifierRef, [flush]),
+    ets:delete(?MODULE, Type).
+
+
+reset_monitors() ->
+    lists:foreach(fun(Pid) ->
+        couch_jobs_activity_monitor_sup:stop_monitor(Pid)
+    end, couch_jobs_activity_monitor_sup:get_child_pids()).
+
+
+reset_notifiers() ->
+    lists:foreach(fun(Pid) ->
+        couch_jobs_notifier_sup:stop_notifier(Pid)
+    end, couch_jobs_notifier_sup:get_child_pids()).
+
+
+get_type_pid_refs(Type) ->
+    case ets:lookup(?MODULE, Type) of
+        [{_, MonPidRef, NotifierPidRef}] -> {MonPidRef, NotifierPidRef};
+        [] -> not_found
+    end.
+
+
+ets_types() ->
+    lists:flatten(ets:match(?MODULE, {'$1', '_', '_'})).
+
+
+fdb_types() ->
+    try
+        couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
+            couch_jobs_fdb:get_types(JTx)
+        end)
+    catch
+        error:{timeout, _} ->
+            couch_log:warning("~p : Timed out connecting to FDB", [?MODULE]),
+            []
+    end.
+
+
+schedule_check() ->
+    Timeout = get_period_msec(),
+    MaxJitter = max(Timeout div 2, get_max_jitter_msec()),
+    Wait = Timeout + rand:uniform(max(1, MaxJitter)),
+    erlang:send_after(Wait, self(), check_types).
+
+
+get_period_msec() ->
+    config:get_integer("couch_jobs", "type_check_period_msec",
+        ?TYPE_CHECK_PERIOD_DEFAULT).
+
+
+get_max_jitter_msec() ->
+    config:get_integer("couch_jobs", "type_check_max_jitter_msec",
+        ?MAX_JITTER_DEFAULT).
diff --git a/src/couch_jobs/src/couch_jobs_sup.erl b/src/couch_jobs/src/couch_jobs_sup.erl
new file mode 100644
index 0000000..d790237
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_sup.erl
@@ -0,0 +1,66 @@
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_sup).
+
+
+-behaviour(supervisor).
+
+
+-export([
+    start_link/0
+]).
+
+-export([
+    init/1
+]).
+
+
+start_link() ->
+    supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+
+init([]) ->
+    Flags = #{
+        strategy => rest_for_one,
+        intensity => 3,
+        period => 10
+    },
+    Children = [
+        #{
+            id => couch_jobs_fdb,
+            restart => transient,
+            start => {couch_jobs_fdb, init_cache, []}
+        },
+        #{
+            id => couch_jobs_activity_monitor_sup,
+            restart => permanent,
+            shutdown => brutal_kill,
+            type => supervisor,
+            start => {couch_jobs_activity_monitor_sup, start_link, []}
+        },
+        #{
+            id => couch_jobs_notifier_sup,
+            restart => permanent,
+            shutdown => brutal_kill,
+            type => supervisor,
+            start => {couch_jobs_notifier_sup, start_link, []}
+        },
+        #{
+            id => couch_jobs_server,
+            restart => permanent,
+            shutdown => brutal_kill,
+            start => {couch_jobs_server, start_link, []}
+        }
+    ],
+    {ok, {Flags, Children}}.
diff --git a/src/couch_jobs/src/couch_jobs_type_monitor.erl b/src/couch_jobs/src/couch_jobs_type_monitor.erl
new file mode 100644
index 0000000..562a866
--- /dev/null
+++ b/src/couch_jobs/src/couch_jobs_type_monitor.erl
@@ -0,0 +1,84 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_type_monitor).
+
+
+-export([
+    start/4
+]).
+
+
+-include("couch_jobs.hrl").
+
+
+-record(st, {
+    jtx,
+    type,
+    vs,
+    parent,
+    timestamp,
+    holdoff,
+    timeout
+}).
+
+
+start(Type, VS, HoldOff, Timeout) ->
+    Parent = self(),
+    spawn_link(fun() ->
+        loop(#st{
+            jtx = couch_jobs_fdb:get_jtx(),
+            type = Type,
+            vs = VS,
+            parent = Parent,
+            timestamp = 0,
+            holdoff = HoldOff,
+            timeout = Timeout
+        })
+    end).
+
+
+loop(#st{vs = VS, timeout = Timeout} = St) ->
+    {St1, Watch} = case get_vs_and_watch(St) of
+        {VS1, W} when VS1 =/= VS -> {notify(St#st{vs = VS1}), W};
+        {VS, W} -> {St, W}
+    end,
+    try
+        erlfdb:wait(Watch, [{timeout, Timeout}])
+    catch
+        error:{erlfdb_error, 1009} ->
+            erlfdb:cancel(Watch, [flush]),
+            ok;
+        error:{timeout, _} ->
+            erlfdb:cancel(Watch, [flush]),
+            ok
+    end,
+    loop(St1).
+
+
+notify(#st{} = St) ->
+    #st{holdoff = HoldOff, parent = Pid, timestamp = Ts, vs = VS} = St,
+    Now = erlang:system_time(millisecond),
+    case Now - Ts of
+        Dt when Dt < HoldOff ->
+            timer:sleep(max(HoldOff - Dt, 0));
+        _ ->
+            ok
+    end,
+    Pid ! {type_updated, VS},
+    St#st{timestamp = Now}.
+
+
+get_vs_and_watch(#st{jtx = JTx, type = Type}) ->
+    couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+        couch_jobs_fdb:get_activity_vs_and_watch(JTx1, Type)
+    end).
diff --git a/src/couch_jobs/test/couch_jobs_tests.erl b/src/couch_jobs/test/couch_jobs_tests.erl
new file mode 100644
index 0000000..a7e085e
--- /dev/null
+++ b/src/couch_jobs/test/couch_jobs_tests.erl
@@ -0,0 +1,606 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_jobs_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+% Job creation API can take an undefined Tx object
+% in that case it will start its own transaction
+-define(TX, undefined).
+
+
+couch_jobs_basic_test_() ->
+    {
+        "Test couch jobs basics",
+        {
+            setup,
+            fun setup_couch/0, fun teardown_couch/1,
+            {
+                foreach,
+                fun setup/0, fun teardown/1,
+                [
+                    fun add_remove_pending/1,
+                    fun add_remove_errors/1,
+                    fun get_job_data_and_state/1,
+                    fun resubmit_as_job_creator/1,
+                    fun type_timeouts_and_server/1,
+                    fun dead_notifier_restarts_jobs_server/1,
+                    fun bad_messages_restart_couch_jobs_server/1,
+                    fun bad_messages_restart_notifier/1,
+                    fun bad_messages_restart_activity_monitor/1,
+                    fun basic_accept_and_finish/1,
+                    fun accept_blocking/1,
+                    fun job_processor_update/1,
+                    fun resubmit_enqueues_job/1,
+                    fun resubmit_custom_schedtime/1,
+                    fun accept_max_schedtime/1,
+                    fun accept_no_schedule/1,
+                    fun subscribe/1,
+                    fun subscribe_wait_multiple/1,
+                    fun enqueue_inactive/1,
+                    fun remove_running_job/1,
+                    fun check_get_jobs/1,
+                    fun use_fabric_transaction_object/1
+                ]
+            }
+        }
+    }.
+
+
+setup_couch() ->
+    test_util:start_couch([fabric]).
+
+
+teardown_couch(Ctx) ->
+    test_util:stop_couch(Ctx),
+    meck:unload().
+
+
+setup() ->
+    application:start(couch_jobs),
+    clear_jobs(),
+    T1 = {<<"t1">>, 1024}, % a complex type should work
+    T2 = 42, % a number should work as well
+    T1Timeout = 2,
+    T2Timeout = 3,
+    couch_jobs:set_type_timeout(T1, T1Timeout),
+    couch_jobs:set_type_timeout(T2, T2Timeout),
+    #{
+        t1 => T1,
+        t2 => T2,
+        t1_timeout => T1Timeout,
+        j1 => <<"j1">>,
+        j2 => <<"j2">>,
+        dbname => ?tempdb()
+    }.
+
+
+teardown(#{dbname := DbName}) ->
+    clear_jobs(),
+    application:stop(couch_jobs),
+    AllDbs = fabric2_db:list_dbs(),
+    case lists:member(DbName, AllDbs) of
+        true -> ok = fabric2_db:delete(DbName, []);
+        false -> ok
+    end,
+    meck:unload().
+
+
+clear_jobs() ->
+    couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
+        #{jobs_path := Jobs, tx := Tx} = JTx,
+        erlfdb:clear_range_startswith(Tx, Jobs)
+    end).
+
+
+restart_app() ->
+    application:stop(couch_jobs),
+    application:start(couch_jobs),
+    couch_jobs_server:force_check_types().
+
+
+get_job(Type, JobId) ->
+    couch_jobs_fdb:get_job(Type, JobId).
+
+
+add_remove_pending(#{t1 := T1, j1 := J1, t2 := T2, j2 := J2}) ->
+    ?_test(begin
+        ?assertEqual(ok, couch_jobs:add(?TX, T1, J1, #{})),
+        ?assertMatch(#{state := pending, data := #{}}, get_job(T1, J1)),
+        ?assertEqual(ok, couch_jobs:remove(?TX, T1, J1)),
+        % Data and numeric type should work as well. Also do it in a
+        % transaction
+        Data = #{<<"x">> => 42},
+        ?assertEqual(ok, fabric2_fdb:transactional(fun(Tx) ->
+            couch_jobs:add(Tx, T2, J2, Data)
+        end)),
+        ?assertMatch(#{state := pending, data := Data}, get_job(T2, J2)),
+        ?assertEqual(ok, couch_jobs:remove(?TX, T2, J2))
+    end).
+
+
+get_job_data_and_state(#{t1 := T, j1 := J}) ->
+    ?_test(begin
+        Data = #{<<"x">> => 42},
+        ok = couch_jobs:add(?TX, T, J, Data),
+        ?assertEqual({ok, Data}, couch_jobs:get_job_data(?TX, T, J)),
+        ?assertEqual({ok, pending}, couch_jobs:get_job_state(?TX, T, J)),
+        ?assertEqual(ok, couch_jobs:remove(?TX, T, J)),
+        ?assertEqual({error, not_found}, couch_jobs:get_job_data(?TX, T, J)),
+        ?assertEqual({error, not_found}, couch_jobs:get_job_state(?TX, T, J))
+    end).
+
+
+add_remove_errors(#{t1 := T, j1 := J}) ->
+    ?_test(begin
+        ?assertEqual({error, not_found}, couch_jobs:remove(?TX, 999, <<"x">>)),
+        ?assertMatch({error, {json_encoding_error, _}}, couch_jobs:add(?TX, T,
+            J, #{1 => 2})),
+        ?assertEqual({error, no_type_timeout}, couch_jobs:add(?TX, <<"x">>, J,
+            #{})),
+        ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{})),
+        ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{})),
+        ?assertEqual(ok, couch_jobs:remove(?TX, T, J))
+    end).
+
+
+resubmit_as_job_creator(#{t1 := T, j1 := J}) ->
+    ?_test(begin
+        Data = #{<<"x">> => 42},
+        ok = couch_jobs:add(?TX, T, J, Data, 15),
+
+        % Job was pending, doesn't get resubmitted
+        ok = couch_jobs:add(?TX, T, J, Data, 16),
+        ?assertMatch(#{state := pending, stime := 16}, get_job(T, J)),
+
+        {ok, Job1, Data} = couch_jobs:accept(T),
+
+        % If is running, it gets flagged to be resubmitted
+        ok = couch_jobs:add(?TX, T, J, Data, 17),
+        ?assertMatch(#{state := running, stime := 17}, get_job(T, J)),
+        ?assertEqual(true, couch_jobs:is_resubmitted(get_job(T, J))),
+
+        ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
+        % It should be pending according to the resubmit flag
+        ?assertMatch(#{state := pending, stime := 17}, get_job(T, J)),
+
+        % A finished job will be re-enqueued
+        {ok, Job2, _} = couch_jobs:accept(T),
+        ?assertEqual(ok, couch_jobs:finish(?TX, Job2)),
+        ?assertMatch(#{state := finished, stime := 17}, get_job(T, J)),
+        ok = couch_jobs:add(?TX, T, J, Data, 18),
+        ?assertMatch(#{state := pending, stime := 18}, get_job(T, J))
+    end).
+
+
+type_timeouts_and_server(#{t1 := T, t1_timeout := T1Timeout}) ->
+    ?_test(begin
+        couch_jobs_server:force_check_types(),
+
+        ?assertEqual(T1Timeout, couch_jobs:get_type_timeout(T)),
+
+        ?assertEqual(2,
+            length(couch_jobs_activity_monitor_sup:get_child_pids())),
+        ?assertEqual(2, length(couch_jobs_notifier_sup:get_child_pids())),
+        ?assertMatch({ok, _}, couch_jobs_server:get_notifier_server(T)),
+
+        ?assertEqual(ok, couch_jobs:set_type_timeout(<<"t3">>, 8)),
+        couch_jobs_server:force_check_types(),
+        ?assertEqual(3,
+            length(couch_jobs_activity_monitor_sup:get_child_pids())),
+        ?assertEqual(3, length(couch_jobs_notifier_sup:get_child_pids())),
+
+        ?assertEqual(ok, couch_jobs:clear_type_timeout(<<"t3">>)),
+        couch_jobs_server:force_check_types(),
+        ?assertEqual(2,
+            length(couch_jobs_activity_monitor_sup:get_child_pids())),
+        ?assertEqual(2,
+            length(couch_jobs_notifier_sup:get_child_pids())),
+        ?assertMatch({error, _},
+            couch_jobs_server:get_notifier_server(<<"t3">>)),
+
+        ?assertEqual(not_found, couch_jobs:get_type_timeout(<<"t3">>))
+    end).
+
+
+dead_notifier_restarts_jobs_server(#{}) ->
+    ?_test(begin
+        couch_jobs_server:force_check_types(),
+
+        ServerPid = whereis(couch_jobs_server),
+        Ref = monitor(process, ServerPid),
+
+        [Notifier1, _Notifier2] = couch_jobs_notifier_sup:get_child_pids(),
+        exit(Notifier1, kill),
+
+        % Killing a notifier should kill the server as well
+        receive {'DOWN', Ref, _, _, _} -> ok end
+    end).
+
+
+bad_messages_restart_couch_jobs_server(#{}) ->
+    ?_test(begin
+        % couch_jobs_server dies on bad cast
+        ServerPid1 = whereis(couch_jobs_server),
+        Ref1 = monitor(process, ServerPid1),
+        gen_server:cast(ServerPid1, bad_cast),
+        receive {'DOWN', Ref1, _, _, _} -> ok end,
+
+        restart_app(),
+
+        % couch_jobs_server dies on bad call
+        ServerPid2 = whereis(couch_jobs_server),
+        Ref2 = monitor(process, ServerPid2),
+        catch gen_server:call(ServerPid2, bad_call),
+        receive {'DOWN', Ref2, _, _, _} -> ok end,
+
+        restart_app(),
+
+        % couch_jobs_server dies on bad info
+        ServerPid3 = whereis(couch_jobs_server),
+        Ref3 = monitor(process, ServerPid3),
+        ServerPid3 ! a_random_message,
+        receive {'DOWN', Ref3, _, _, _} -> ok end,
+
+        restart_app()
+    end).
+
+
+bad_messages_restart_notifier(#{}) ->
+    ?_test(begin
+        couch_jobs_server:force_check_types(),
+
+        % bad cast kills the activity monitor
+        [AMon1, _] = couch_jobs_notifier_sup:get_child_pids(),
+        Ref1 = monitor(process, AMon1),
+        gen_server:cast(AMon1, bad_cast),
+        receive {'DOWN', Ref1, _, _, _} -> ok end,
+
+        restart_app(),
+
+        % bad calls restart activity monitor
+        [AMon2, _] = couch_jobs_notifier_sup:get_child_pids(),
+        Ref2 = monitor(process, AMon2),
+        catch gen_server:call(AMon2, bad_call),
+        receive {'DOWN', Ref2, _, _, _} -> ok end,
+
+        restart_app(),
+
+        % bad info message kills activity monitor
+        [AMon3, _] = couch_jobs_notifier_sup:get_child_pids(),
+        Ref3 = monitor(process, AMon3),
+        AMon3 ! a_bad_message,
+        receive {'DOWN', Ref3, _, _, _} -> ok end,
+
+
+        restart_app()
+    end).
+
+
+bad_messages_restart_activity_monitor(#{}) ->
+    ?_test(begin
+        couch_jobs_server:force_check_types(),
+
+        % bad cast kills the activity monitor
+        [AMon1, _] = couch_jobs_activity_monitor_sup:get_child_pids(),
+        Ref1 = monitor(process, AMon1),
+        gen_server:cast(AMon1, bad_cast),
+        receive {'DOWN', Ref1, _, _, _} -> ok end,
+
+        restart_app(),
+
+        % bad calls restart activity monitor
+        [AMon2, _] = couch_jobs_activity_monitor_sup:get_child_pids(),
+        Ref2 = monitor(process, AMon2),
+        catch gen_server:call(AMon2, bad_call),
+        receive {'DOWN', Ref2, _, _, _} -> ok end,
+
+        restart_app(),
+
+        % bad info message kills activity monitor
+        [AMon3, _] = couch_jobs_activity_monitor_sup:get_child_pids(),
+        Ref3 = monitor(process, AMon3),
+        AMon3 ! a_bad_message,
+        receive {'DOWN', Ref3, _, _, _} -> ok end,
+
+        restart_app()
+    end).
+
+
+basic_accept_and_finish(#{t1 := T, j1 := J}) ->
+    ?_test(begin
+        ok = couch_jobs:add(?TX, T, J, #{}),
+        {ok, Job, #{}} = couch_jobs:accept(T),
+        ?assertMatch(#{state := running}, get_job(T, J)),
+        % check json validation for bad data in finish
+        ?assertMatch({error, {json_encoding_error, _}},
+            fabric2_fdb:transactional(fun(Tx) ->
+                couch_jobs:finish(Tx, Job, #{1 => 1})
+            end)),
+        Data = #{<<"x">> => 42},
+        ?assertEqual(ok, fabric2_fdb:transactional(fun(Tx) ->
+            couch_jobs:finish(Tx, Job, Data)
+        end)),
+        ?assertMatch(#{state := finished, data := Data}, get_job(T, J))
+    end).
+
+
+accept_blocking(#{t1 := T, j1 := J1, j2 := J2}) ->
+    ?_test(begin
+        Accept = fun() -> exit(couch_jobs:accept(T)) end,
+        WaitAccept = fun(Ref) ->
+            receive
+                {'DOWN', Ref, _, _, Res} -> Res
+            after
+                500 -> timeout
+            end
+        end,
+        {_, Ref1} = spawn_monitor(Accept),
+        ok = couch_jobs:add(?TX, T, J1, #{}),
+        ?assertMatch({ok, #{id := J1}, #{}}, WaitAccept(Ref1)),
+        {_, Ref2} = spawn_monitor(Accept),
+        ?assertEqual(timeout, WaitAccept(Ref2)),
+        ok = couch_jobs:add(?TX, T, J2, #{}),
+        ?assertMatch({ok, #{id := J2}, #{}}, WaitAccept(Ref2))
+    end).
+
+
+job_processor_update(#{t1 := T, j1 := J}) ->
+    ?_test(begin
+        ok = couch_jobs:add(?TX, T, J, #{}),
+        {ok, Job, #{}} = couch_jobs:accept(T),
+
+        % Use proper transactions in a few places here instead of passing in
+        % ?TX This is mostly to increase code coverage
+
+        ?assertMatch({ok, #{job := true}}, fabric2_fdb:transactional(fun(Tx) ->
+            couch_jobs:update(Tx, Job, #{<<"x">> => 1})
+        end)),
+
+        ?assertMatch(#{data := #{<<"x">> := 1}, state := running},
+            get_job(T, J)),
+
+        ?assertMatch({ok, #{job := true}}, fabric2_fdb:transactional(fun(Tx) ->
+            couch_jobs:update(Tx, Job)
+        end)),
+
+        ?assertMatch(#{data := #{<<"x">> := 1}, state := running},
+            get_job(T, J)),
+
+        ?assertMatch({ok, #{job := true}}, fabric2_fdb:transactional(fun(Tx) ->
+            couch_jobs:update(Tx, Job, #{<<"x">> => 2})
+        end)),
+
+        % check json validation for bad data in update
+        ?assertMatch({error, {json_encoding_error, _}},
+            fabric2_fdb:transactional(fun(Tx) ->
+                couch_jobs:update(Tx, Job, #{1 => 1})
+            end)),
+
+        ?assertMatch(#{data := #{<<"x">> := 2}, state := running},
+            get_job(T, J)),
+
+        % Finish may update the data as well
+        ?assertEqual(ok, couch_jobs:finish(?TX, Job, #{<<"x">> => 3})),
+        ?assertMatch(#{data := #{<<"x">> := 3}, state := finished},
+            get_job(T, J))
+    end).
+
+
+resubmit_enqueues_job(#{t1 := T, j1 := J}) ->
+    ?_test(begin
+        ok = couch_jobs:add(?TX, T, J, #{}),
+        {ok, Job1, #{}} = couch_jobs:accept(T),
+        ?assertMatch({ok, _}, couch_jobs:resubmit(?TX, Job1, 6)),
+        ?assertEqual(ok, couch_jobs:finish(?TX, Job1)),
+        ?assertMatch(#{state := pending, stime := 6}, get_job(T, J)),
+        {ok, Job2, #{}} = couch_jobs:accept(T),
+        ?assertEqual(ok, couch_jobs:finish(?TX, Job2)),
+        ?assertMatch(#{state := finished}, get_job(T, J))
+    end).
+
+
+resubmit_custom_schedtime(#{t1 := T, j1 := J}) ->
+    ?_test(begin
+        ?assertEqual(ok, couch_jobs:add(?TX, T, J, #{}, 7)),
+        {ok, Job, #{}} = couch_jobs:accept(T),
+        ?assertMatch({ok, _}, couch_jobs:resubmit(?TX, Job, 9)),
+        ?assertEqual(ok, couch_jobs:finish(?TX, Job)),
+        ?assertMatch(#{stime := 9, state := pending}, get_job(T, J))
+    end).
+
+
+accept_max_schedtime(#{t1 := T, j1 := J1, j2 := J2}) ->
+    ?_test(begin
+        ok = couch_jobs:add(?TX, T, J1, #{}, 5000),
+        ok = couch_jobs:add(?TX, T, J2, #{}, 3000),
+        ?assertEqual({error, not_found}, couch_jobs:accept(T,
+            #{max_sched_time => 1000})),
+        ?assertMatch({ok, #{id := J2}, _}, couch_jobs:accept(T,
+            #{max_sched_time => 3000})),
+        ?assertMatch({ok, #{id := J1}, _}, couch_jobs:accept(T,
+            #{max_sched_time => 9000}))
+    end).
+
+
+accept_no_schedule(#{t1 := T}) ->
+    ?_test(begin
+        JobCount = 25,
+        Jobs = [fabric2_util:uuid() || _ <- lists:seq(1, JobCount)],
+        [couch_jobs:add(?TX, T, J, #{}) || J <- Jobs],
+        InvalidOpts = #{no_schedule => true, max_sched_time => 1},
+        ?assertMatch({error, _}, couch_jobs:accept(T, InvalidOpts)),
+        AcceptOpts = #{no_schedule => true},
+        Accepted = [begin
+            {ok, #{id := J}, _} = couch_jobs:accept(T, AcceptOpts),
+            J
+        end || _ <- lists:seq(1, JobCount)],
+        ?assertEqual(lists:sort(Jobs), lists:sort(Accepted))
+    end).
+
+
+subscribe(#{t1 := T, j1 := J}) ->
+    ?_test(begin
+        ok = couch_jobs:add(?TX, T, J, #{<<"z">> => 1}),
+
+        ?assertEqual({error, not_found}, couch_jobs:subscribe(<<"xyz">>, J)),
+        ?assertEqual({error, not_found}, couch_jobs:subscribe(T, <<"j5">>)),
+
+        SubRes0 =  couch_jobs:subscribe(T, J),
+        ?assertMatch({ok, {_, _}, pending, #{<<"z">> := 1}}, SubRes0),
+        {ok, SubId0, pending, _} = SubRes0,
+
+        SubRes1 = couch_jobs:subscribe(T, J),
+        ?assertEqual(SubRes0, SubRes1),
+
+        ?assertEqual(ok, couch_jobs:unsubscribe(SubId0)),
+
+        SubRes =  couch_jobs:subscribe(T, J),
+        ?assertMatch({ok, {_, _}, pending, #{<<"z">> := 1}}, SubRes),
+        {ok, SubId, pending, _} = SubRes,
+
+        {ok, Job, _} = couch_jobs:accept(T),
+        ?assertMatch({T, J, running, #{<<"z">> := 1}},
+            couch_jobs:wait(SubId, 5000)),
+
+        % Make sure we get intermediate `running` updates
+        ?assertMatch({ok, _}, couch_jobs:update(?TX, Job, #{<<"z">> => 2})),
+        ?assertMatch({T, J, running, #{<<"z">> := 2}},
+            couch_jobs:wait(SubId, 5000)),
+
+        ?assertEqual(ok, couch_jobs:finish(?TX, Job, #{<<"z">> => 3})),
+        ?assertMatch({T, J, finished, #{<<"z">> := 3}},
+            couch_jobs:wait(SubId, finished, 5000)),
+
+        ?assertEqual(timeout, couch_jobs:wait(SubId, 50)),
+
+        ?assertEqual({ok, finished, #{<<"z">> => 3}},
+            couch_jobs:subscribe(T, J)),
+
+        ?assertEqual(ok, couch_jobs:remove(?TX, T, J)),
+        ?assertEqual({error, not_found}, couch_jobs:subscribe(T, J))
+    end).
+
+
+subscribe_wait_multiple(#{t1 := T, j1 := J1, j2 := J2}) ->
+    ?_test(begin
+        ok = couch_jobs:add(?TX, T, J1, #{}),
+        ok = couch_jobs:add(?TX, T, J2, #{}),
+
+        {ok, S1, pending, #{}} = couch_jobs:subscribe(T, J1),
+        {ok, S2, pending, #{}} = couch_jobs:subscribe(T, J2),
+
+        Subs = [S1, S2],
+
+        % Accept one job. Only one running update is expected. PJob1 and PJob2
+        % do not necessarily correspond got Job1 and Job2, they could be
+        % accepted as Job2 and Job1 respectively.
+        {ok, PJob1, _} = couch_jobs:accept(T),
+        ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
+        ?assertMatch(timeout, couch_jobs:wait(Subs, 50)),
+
+        % Accept another job. Expect another update.
+        {ok, PJob2, _} = couch_jobs:accept(T),
+        ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
+        ?assertMatch(timeout, couch_jobs:wait(Subs, 50)),
+
+        ?assertMatch({ok, _}, couch_jobs:update(?TX, PJob1, #{<<"q">> => 5})),
+        ?assertMatch({ok, _}, couch_jobs:update(?TX, PJob2, #{<<"r">> => 6})),
+
+        % Each job was updated once, expect two running updates.
+        ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
+        ?assertMatch({_, _, running, _}, couch_jobs:wait(Subs, 5000)),
+
+        % Finish one job. Expect one finished update only.
+        ?assertEqual(ok, couch_jobs:finish(?TX, PJob1)),
+
+        ?assertMatch({_, _, finished, #{<<"q">> := 5}},
+            couch_jobs:wait(Subs, finished, 5000)),
+        ?assertMatch(timeout, couch_jobs:wait(Subs, finished, 50)),
+
+        % Finish another job. However, unsubscribe should flush the
+        % the message and we should not get it.
+        ?assertEqual(ok, couch_jobs:finish(?TX, PJob2)),
+        ?assertEqual(ok, couch_jobs:unsubscribe(S1)),
+        ?assertEqual(ok, couch_jobs:unsubscribe(S2)),
+        ?assertMatch(timeout, couch_jobs:wait(Subs, finished, 50))
+    end).
+
+
+enqueue_inactive(#{t1 := T, j1 := J, t1_timeout := Timeout}) ->
+    {timeout, 10, ?_test(begin
+        couch_jobs_server:force_check_types(),
+
+        ok  = couch_jobs:add(?TX, T, J, #{<<"y">> => 1}),
+        {ok, Job, _} = couch_jobs:accept(T),
+
+        {ok, SubId, running, #{<<"y">> := 1}} = couch_jobs:subscribe(T, J),
+        Wait = 3 * Timeout * 1000,
+        ?assertEqual({T, J, pending, #{<<"y">> => 1}},
+            couch_jobs:wait(SubId, pending, Wait)),
+        ?assertMatch(#{state := pending}, get_job(T, J)),
+
+        % After job was re-enqueued, old job processor can't update it anymore
+        ?assertEqual({error, halt}, couch_jobs:update(?TX, Job)),
+        ?assertEqual({error, halt}, couch_jobs:finish(?TX, Job))
+    end)}.
+
+
+remove_running_job(#{t1 := T, j1 := J}) ->
+    ?_test(begin
+        ok = couch_jobs:add(?TX, T, J, #{}),
+        {ok, Job, _} = couch_jobs:accept(T),
+        ?assertEqual(ok, couch_jobs:remove(?TX, T, J)),
+        ?assertEqual({error, not_found}, couch_jobs:remove(?TX, T, J)),
+        ?assertEqual({error, halt}, couch_jobs:update(?TX, Job)),
+        ?assertEqual({error, halt}, couch_jobs:finish(?TX, Job))
+    end).
+
+
+check_get_jobs(#{t1 := T1, j1 := J1, t2 := T2, j2 := J2}) ->
+    ?_test(begin
+        ok = couch_jobs:add(?TX, T1, J1, #{}),
+        ok = couch_jobs:add(?TX, T2, J2, #{}),
+        ?assertMatch([
+            {T2, J2, pending, #{}},
+            {T1, J1, pending, #{}}
+        ], lists:sort(couch_jobs_fdb:get_jobs())),
+        {ok, _, _} = couch_jobs:accept(T1),
+        ?assertMatch([
+            {T2, J2, pending, #{}},
+            {T1, J1, running, #{}}
+        ], lists:sort(couch_jobs_fdb:get_jobs()))
+    end).
+
+
+use_fabric_transaction_object(#{t1 := T1, j1 := J1, dbname := DbName}) ->
+    ?_test(begin
+        {ok, Db} = fabric2_db:create(DbName, []),
+        ?assertEqual(ok, couch_jobs:add(Db, T1, J1, #{})),
+        ?assertMatch(#{state := pending, data := #{}}, get_job(T1, J1)),
+        {ok, Job, _} = couch_jobs:accept(T1),
+        ?assertEqual(ok, fabric2_fdb:transactional(Db, fun(Db1) ->
+            {ok, #{}} = couch_jobs:get_job_data(Db1, T1, J1),
+            Doc1 = #doc{id = <<"1">>, body = {[]}},
+            {ok, {_, _}} = fabric2_db:update_doc(Db1, Doc1),
+            Doc2 = #doc{id = <<"2">>, body = {[]}},
+            {ok, {_, _}} = fabric2_db:update_doc(Db1, Doc2),
+            couch_jobs:finish(Db1, Job, #{<<"d">> => 1})
+        end)),
+        ok = couch_jobs:remove(#{tx => undefined}, T1, J1),
+        ok = fabric2_db:delete(DbName, [])
+    end).