You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@couchdb.apache.org by da...@apache.org on 2019/06/05 19:03:15 UTC

[couchdb] branch prototype/fdb-layer updated (b348b95 -> 767c83d)

This is an automated email from the ASF dual-hosted git repository.

davisp pushed a change to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git.


    omit b348b95  Add _all_dbs, _all_docs, and _changes
    omit 4e16c96  WIP: Bugs and tweaks
    omit 971bb98  Bug fixes
    omit 628795a  Fix compiler errors
    omit f6e64ec  WIP: Refactoring fdb layer code
    omit 6221795  Add more bits from couch_db_int.hrl
    omit 265111c  First draft of fabric2:open_revs/4
    omit 70a51f0  More notes on loose ends
    omit 7f41f52  Add doc read checks
    omit b43ae78  Implement doc reads
    omit 6a82373  Fix versionstamp handling
    omit 41d82c7  Allow for specifying the range when dumping a cluster
    omit 36a863c  Only run VDUs when they exist
    omit e75e68e  A simple test script for probing fdb progress
    omit b24a593  Implement update_docs
    omit 730d671  Implement db info blobs
    omit 4d106db  Implement get_db_info
    omit 1d0c2fa  Implement database deletion
    omit 729f22a  Implement db creation
    omit 392374f  Initial CouchDB directories on node boot
    omit 8df4483  First contact with FoundationDB
    omit 730d360  Add erlfdb dependency
    omit 211bf62  Ignore new dependency directories
     add b81ca74  fixes to elixir tests (#1939)
     add ca74d12  Update folsom to support newer erlang
     add c90c453  Merge pull request #1938 from cloudant/update-folsom
     add 13104aa  upgrade ken to 1.0.3
     add a2b88a3  Merge pull request #1941 from apache/upgrade-ken-1.0.3
     add f751dda  Update smoosh to 1.0.1
     add 931d77d  Merge pull request #1942 from cloudant/update-smoosh-1.0.1
     add 6b48a46  Fail make eunit upon eunit app suite failure
     add c39852e  Merge pull request #1951 from apache/fail-make-on-eunit-failure
     add 8d65907  Make PropEr an optional (test) dependency
     add 5765a54  Merge pull request #1955 from apache/optional-proper
     add 92c004b  Warn people to edit both Makefiles. (#1952)
     add d8eec70  Jenkins add attachment test (#1953)
     add 0c7111c  test: port multiple_rows.js to Elixir test suite (#1958)
     add a36ec91  Improve chttpd_socket_buffer_size_test
     add aee2fd9  Add stats in fabric for partition and normal views (#1963)
     add c151a32  Ignore weak ETag part
     add 8ef42f7  Merge pull request #1971 from apache/weak-etag-comparison
     add d98fd88  Added more info for a mango sort error (#1970)
     add f88d35b  Add security item to the RFC template (#1914)
     add 8efe9b2  test: port invalid_docids to Elixir test suite (#1968)
     add 0957f90  Update ioq to 2.1.1
     add 9f924e4  Merge pull request #1981 from cloudant/update/ioq-2.1.1
     add 941578d  Improve elixir test stability
     add cbc0dd4  Merge pull request #1991 from cloudant/improve-elixir-test-stability
     add 9d7a56e  Skip running PropEr's own unit tests
     add 56a9d1b  Use couch_ejson_size for calculation of doc's ejson size
     add 4f31cdc  Reuse pre-calculated external docs' size on compaction
     add 3227e61  Merge pull request #1983 from cloudant/fix-external-docs-size
     add d10b795  Uneven shard copy handling in mem3 and fabric
     add 13db67e  Implement initial shard splitting data copy
     add b7d5b5d  Update internal replicator to handle split shards
     add 9f9a6fc  Shard splitting job implementation
     add bcdd994  Resharding supervisor and job manager
     add a6db7d5  Implement resharding HTTP API
     add 5d19926  Port javascript attachment test suite into elixir (#1999)
     add 52189ee  Port copy doc tests into elixir test suite (#2000)
     add 17bdde4  Promote ibrowse 4.0.1-1
     add 9865932  Merge pull request #2001 from cloudant/promote-ibrowse-4.0.1-1
     add ae261e4  In the resharding API test pick the first live node
     add bb30e98  Fix upgrade clause for mem3_rpc:load_checkpoint/4,5
     add e9fc291  Change _security object for new dbs to admin-only by default
     add 5010b2f  Don't reset_index if read_header fails
     add 91b299d  Merge pull request #2003 from apache/dont-reset-index
     add b1f65df  Expose node name via /_node/_local, closes #2005 (#2006)
     add 3a54280  Allow restricting resharding parameters
     add 85617a7  Increase max number of resharding jobs
     add 3714ff5  Handle database re-creation edge case in internal replicator
     add 762d7a8  Use individual rexi kill messages by default
     add 3c958a9  Fix full ring assertion in fabric stream shard replacements
     add af26397  Jenkins: Add ARM64, drop trusty, improve Jenkinsfile (#2023)
     add a4145c2  Fix epoch mismatch errors (#2027)
     add eee1484  Ignore new subprojects
     add 04c37fc  fix: remove restart handlers for obsolete config settings
     add 57bf82e  feat: move fauxton docroot config to OS env-vars for consistency
     new 51a3960  Update build system for FoundationDB
     new d7b015c  Disable eunit test suite in fabric
     new ee2e4c8  Initial fabric2 implementation on FoundationDB
     new 1db8a43  Initial test suite for the fabric2 implementation
     new fc1ffeb  Update ddoc_cache to use fabric2
     new 767c83d  Start switching chttpd HTTP endpoints to fabric2

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (b348b95)
            \
             N -- N -- N   refs/heads/prototype/fdb-layer (767c83d)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 6 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .github/ISSUE_TEMPLATE/rfc.md                      |    8 +-
 .gitignore                                         |    4 +-
 FDB_NOTES.md                                       |   31 +-
 Jenkinsfile                                        |  796 +++++++--------
 Makefile                                           |   14 +-
 Makefile.win                                       |    6 +-
 configure                                          |    8 +
 dev/run                                            |   23 +-
 fdb-test.py                                        |   64 --
 rebar.config.script                                |   32 +-
 rel/files/couchdb.in                               |    1 +
 rel/files/eunit.config                             |    3 +-
 rel/overlay/etc/default.ini                        |    1 -
 src/chttpd/src/chttpd.erl                          |   56 +-
 src/chttpd/src/chttpd_auth_request.erl             |    2 +
 .../src/chttpd_changes.erl}                        |  331 +++---
 src/chttpd/src/chttpd_db.erl                       |  170 +++-
 src/chttpd/src/chttpd_external.erl                 |   35 +-
 src/chttpd/src/chttpd_misc.erl                     |   11 +-
 src/chttpd/src/chttpd_show.erl                     |    5 +-
 src/chttpd/test/chttpd_socket_buffer_size_test.erl |  152 ++-
 src/couch/priv/stats_descriptions.cfg              |   48 +
 src/couch/src/couch_att.erl                        |  661 +++++-------
 src/couch/src/couch_bt_engine.erl                  |   18 +-
 src/couch/src/couch_bt_engine_compactor.erl        |   12 +-
 src/couch/src/couch_db.erl                         |   37 +-
 src/couch/src/couch_db_engine.erl                  |   32 +
 src/couch/src/couch_db_split.erl                   |  495 +++++++++
 src/couch/src/couch_doc.erl                        |   11 +
 src/couch/src/couch_multidb_changes.erl            |    6 +-
 src/couch/src/couch_server.erl                     |   17 +-
 src/couch/test/couch_db_split_tests.erl            |  302 ++++++
 src/couch/test/couch_server_tests.erl              |   30 +
 src/couch_mrview/src/couch_mrview.erl              |   16 +-
 src/couch_mrview/src/couch_mrview_index.erl        |    4 +-
 .../src/cpse_test_copy_purge_infos.erl             |   82 ++
 src/couch_pse_tests/src/cpse_util.erl              |    1 +
 src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl     |    2 +-
 src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl |    2 +-
 src/fabric/include/fabric.hrl                      |    5 +-
 src/fabric/src/fabric.erl                          |  100 +-
 src/fabric/src/fabric2.hrl                         |   50 +
 src/fabric/src/fabric2_app.erl                     |    4 +
 src/fabric/src/fabric2_db.erl                      | 1027 +++++++++++++------
 src/fabric/src/fabric2_events.erl                  |   84 ++
 src/fabric/src/fabric2_fdb.erl                     | 1061 +++++++++++++++-----
 src/fabric/src/fabric2_server.erl                  |   17 +-
 src/fabric/src/fabric2_sup.erl                     |    4 +
 src/fabric/src/fabric2_txids.erl                   |  144 +++
 src/fabric/src/fabric2_util.erl                    |  144 ++-
 src/fabric/src/fabric_db_create.erl                |   60 +-
 src/fabric/src/fabric_db_doc_count.erl             |   51 +-
 src/fabric/src/fabric_db_info.erl                  |  137 ++-
 src/fabric/src/fabric_db_meta.erl                  |    4 +-
 src/fabric/src/fabric_db_partition_info.erl        |    2 +-
 src/fabric/src/fabric_db_update_listener.erl       |    4 +-
 src/fabric/src/fabric_design_doc_count.erl         |   51 +-
 src/fabric/src/fabric_dict.erl                     |    3 +
 src/fabric/src/fabric_doc_open.erl                 |  821 +++++++--------
 src/fabric/src/fabric_doc_open_revs.erl            |  932 ++++++++---------
 src/fabric/src/fabric_doc_purge.erl                |  692 ++++++-------
 src/fabric/src/fabric_doc_update.erl               |  282 +++---
 src/fabric/src/fabric_group_info.erl               |   73 +-
 src/fabric/src/fabric_ring.erl                     |  519 ++++++++++
 src/fabric/src/fabric_rpc.erl                      |   38 +-
 src/fabric/src/fabric_streams.erl                  |  284 +++---
 src/fabric/src/fabric_util.erl                     |   60 +-
 src/fabric/src/fabric_view.erl                     |  275 +++--
 src/fabric/src/fabric_view_all_docs.erl            |    4 +-
 src/fabric/src/fabric_view_changes.erl             |  454 +++++++--
 src/fabric/src/fabric_view_map.erl                 |    7 +-
 src/fabric/src/fabric_view_reduce.erl              |    7 +-
 src/fabric/test/fabric2_changes_fold_tests.erl     |  114 +++
 src/fabric/test/fabric2_db_crud_tests.erl          |   88 ++
 src/fabric/test/fabric2_db_misc_tests.erl          |  113 +++
 src/fabric/test/fabric2_db_security_tests.erl      |  162 +++
 src/fabric/test/fabric2_doc_count_tests.erl        |  251 +++++
 src/fabric/test/fabric2_doc_crud_tests.erl         |  770 ++++++++++++++
 src/fabric/test/fabric2_doc_fold_tests.erl         |  209 ++++
 src/fabric/test/fabric2_fdb_tx_retry_tests.erl     |  178 ++++
 .../test/fabric2_trace_db_create_tests.erl}        |   46 +-
 .../test/fabric2_trace_db_delete_tests.erl}        |   49 +-
 src/fabric/test/fabric2_trace_db_open_tests.erl    |   50 +
 src/fabric/test/fabric2_trace_doc_create_tests.erl |   86 ++
 src/fabric/test/fabric_rpc_purge_tests.erl         |  307 ------
 src/mango/src/mango_error.erl                      |   16 +-
 src/mango/src/mango_httpd.erl                      |    9 +-
 src/mango/src/mango_idx.erl                        |   27 +-
 src/mem3/README.md                                 |    6 +-
 src/mem3/README_reshard.md                         |   93 ++
 src/mem3/include/mem3.hrl                          |    5 +
 src/mem3/src/mem3.app.src                          |    1 +
 src/mem3/src/mem3.erl                              |   23 +-
 src/mem3/src/mem3_httpd.erl                        |    2 +-
 src/mem3/src/mem3_httpd_handlers.erl               |    1 +
 src/mem3/src/mem3_rep.erl                          |  678 +++++++++----
 src/mem3/src/mem3_reshard.erl                      |  918 +++++++++++++++++
 src/mem3/src/mem3_reshard.hrl                      |   74 ++
 src/mem3/src/mem3_reshard_api.erl                  |  217 ++++
 src/mem3/src/mem3_reshard_dbdoc.erl                |  275 +++++
 src/mem3/src/mem3_reshard_httpd.erl                |  317 ++++++
 src/mem3/src/mem3_reshard_index.erl                |  164 +++
 src/mem3/src/mem3_reshard_job.erl                  |  722 +++++++++++++
 .../src/mem3_reshard_job_sup.erl}                  |   47 +-
 src/mem3/src/mem3_reshard_store.erl                |  288 ++++++
 .../src/mem3_reshard_sup.erl}                      |   35 +-
 src/mem3/src/mem3_reshard_validate.erl             |  126 +++
 src/mem3/src/mem3_rpc.erl                          |   53 +-
 src/mem3/src/mem3_shards.erl                       |   45 +-
 src/mem3/src/mem3_sup.erl                          |    6 +-
 src/mem3/src/mem3_sync.erl                         |   11 +-
 src/mem3/src/mem3_sync_event_listener.erl          |    2 +-
 src/mem3/src/mem3_util.erl                         |  282 +++++-
 src/mem3/test/mem3_rep_test.erl                    |  320 ++++++
 src/mem3/test/mem3_reshard_api_test.erl            |  846 ++++++++++++++++
 src/mem3/test/mem3_reshard_changes_feed_test.erl   |  388 +++++++
 src/mem3/test/mem3_reshard_test.erl                |  804 +++++++++++++++
 src/mem3/test/mem3_ring_prop_tests.erl             |  144 +++
 src/rexi/src/rexi.erl                              |   26 +-
 src/rexi/src/rexi_server.erl                       |   28 +-
 test/elixir/README.md                              |   28 +-
 test/elixir/lib/couch/db_test.ex                   |  146 ++-
 test/elixir/run-only                               |    3 +
 test/elixir/test/attachment_names_test.exs         |   97 ++
 test/elixir/test/attachment_paths_test.exs         |  177 ++++
 test/elixir/test/attachment_ranges_test.exs        |  143 +++
 test/elixir/test/attachment_views_test.exs         |  142 +++
 test/elixir/test/attachments_multipart_test.exs    |  409 ++++++++
 test/elixir/test/attachments_test.exs              |  482 +++++++++
 test/elixir/test/basics_test.exs                   |    2 +-
 test/elixir/test/copy_doc_test.exs                 |   71 ++
 test/elixir/test/invalid_docids_test.exs           |   85 ++
 test/elixir/test/multiple_rows_test.exs            |  136 +++
 test/elixir/test/partition_all_docs_test.exs       |    1 +
 test/elixir/test/partition_ddoc_test.exs           |   26 +-
 test/elixir/test/partition_mango_test.exs          |   27 +
 test/elixir/test/partition_size_test.exs           |    7 +-
 test/elixir/test/reshard_all_docs_test.exs         |   79 ++
 test/elixir/test/reshard_basic_test.exs            |  174 ++++
 test/elixir/test/reshard_changes_feed.exs          |   81 ++
 test/elixir/test/reshard_helpers.exs               |  114 +++
 test/elixir/test/test_helper.exs                   |    1 +
 test/javascript/tests/etags_head.js                |    4 +
 143 files changed, 18244 insertions(+), 4542 deletions(-)
 delete mode 100755 fdb-test.py
 copy src/{couch/src/couch_changes.erl => chttpd/src/chttpd_changes.erl} (78%)
 create mode 100644 src/couch/src/couch_db_split.erl
 create mode 100644 src/couch/test/couch_db_split_tests.erl
 create mode 100644 src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl
 create mode 100644 src/fabric/src/fabric2_events.erl
 create mode 100644 src/fabric/src/fabric2_txids.erl
 create mode 100644 src/fabric/src/fabric_ring.erl
 create mode 100644 src/fabric/test/fabric2_changes_fold_tests.erl
 create mode 100644 src/fabric/test/fabric2_db_crud_tests.erl
 create mode 100644 src/fabric/test/fabric2_db_misc_tests.erl
 create mode 100644 src/fabric/test/fabric2_db_security_tests.erl
 create mode 100644 src/fabric/test/fabric2_doc_count_tests.erl
 create mode 100644 src/fabric/test/fabric2_doc_crud_tests.erl
 create mode 100644 src/fabric/test/fabric2_doc_fold_tests.erl
 create mode 100644 src/fabric/test/fabric2_fdb_tx_retry_tests.erl
 copy src/{mem3/test/mem3_sync_security_test.erl => fabric/test/fabric2_trace_db_create_tests.erl} (56%)
 copy src/{mem3/test/mem3_sync_security_test.erl => fabric/test/fabric2_trace_db_delete_tests.erl} (50%)
 create mode 100644 src/fabric/test/fabric2_trace_db_open_tests.erl
 create mode 100644 src/fabric/test/fabric2_trace_doc_create_tests.erl
 delete mode 100644 src/fabric/test/fabric_rpc_purge_tests.erl
 create mode 100644 src/mem3/README_reshard.md
 create mode 100644 src/mem3/src/mem3_reshard.erl
 create mode 100644 src/mem3/src/mem3_reshard.hrl
 create mode 100644 src/mem3/src/mem3_reshard_api.erl
 create mode 100644 src/mem3/src/mem3_reshard_dbdoc.erl
 create mode 100644 src/mem3/src/mem3_reshard_httpd.erl
 create mode 100644 src/mem3/src/mem3_reshard_index.erl
 create mode 100644 src/mem3/src/mem3_reshard_job.erl
 copy src/{ddoc_cache/src/ddoc_cache_sup.erl => mem3/src/mem3_reshard_job_sup.erl} (55%)
 create mode 100644 src/mem3/src/mem3_reshard_store.erl
 copy src/{ddoc_cache/src/ddoc_cache_sup.erl => mem3/src/mem3_reshard_sup.erl} (60%)
 create mode 100644 src/mem3/src/mem3_reshard_validate.erl
 create mode 100644 src/mem3/test/mem3_rep_test.erl
 create mode 100644 src/mem3/test/mem3_reshard_api_test.erl
 create mode 100644 src/mem3/test/mem3_reshard_changes_feed_test.erl
 create mode 100644 src/mem3/test/mem3_reshard_test.erl
 create mode 100644 src/mem3/test/mem3_ring_prop_tests.erl
 create mode 100755 test/elixir/run-only
 create mode 100644 test/elixir/test/attachment_names_test.exs
 create mode 100644 test/elixir/test/attachment_paths_test.exs
 create mode 100644 test/elixir/test/attachment_ranges_test.exs
 create mode 100644 test/elixir/test/attachment_views_test.exs
 create mode 100644 test/elixir/test/attachments_multipart_test.exs
 create mode 100644 test/elixir/test/attachments_test.exs
 create mode 100644 test/elixir/test/copy_doc_test.exs
 create mode 100644 test/elixir/test/invalid_docids_test.exs
 create mode 100644 test/elixir/test/multiple_rows_test.exs
 create mode 100644 test/elixir/test/reshard_all_docs_test.exs
 create mode 100644 test/elixir/test/reshard_basic_test.exs
 create mode 100644 test/elixir/test/reshard_changes_feed.exs
 create mode 100644 test/elixir/test/reshard_helpers.exs


[couchdb] 01/06: Update build system for FoundationDB

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 51a396017a224f629154e579f591a3fe86bc7fc9
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Jun 5 13:04:56 2019 -0500

    Update build system for FoundationDB
---
 .gitignore             | 3 +++
 Makefile               | 6 +++++-
 dev/run                | 9 ++++++++-
 rebar.config.script    | 7 ++++++-
 rel/files/eunit.config | 3 ++-
 test/elixir/run-only   | 3 +++
 6 files changed, 27 insertions(+), 4 deletions(-)

diff --git a/.gitignore b/.gitignore
index 36bc130..bbb12a9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,6 +9,7 @@
 .venv
 .DS_Store
 .rebar/
+.erlfdb/
 .eunit/
 log
 apache-couchdb-*/
@@ -44,6 +45,7 @@ src/couch/priv/couch_js/util.d
 src/couch/priv/icu_driver/couch_icu_driver.d
 src/mango/src/mango_cursor_text.nocompile
 src/docs/
+src/erlfdb/
 src/ets_lru/
 src/fauxton/
 src/folsom/
@@ -51,6 +53,7 @@ src/hqueue/
 src/hyper/
 src/ibrowse/
 src/ioq/
+src/hqueue/
 src/jiffy/
 src/ken/
 src/khash/
diff --git a/Makefile b/Makefile
index 0acf828..0006d2a 100644
--- a/Makefile
+++ b/Makefile
@@ -213,7 +213,11 @@ python-black-update: .venv/bin/black
 
 .PHONY: elixir
 elixir: elixir-init elixir-check-formatted elixir-credo devclean
-	@dev/run -a adm:pass --no-eval 'test/elixir/run --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
+	@dev/run --erlang-config=rel/files/eunit.config -n 1 -a adm:pass --no-eval 'test/elixir/run --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
+
+.PHONY: elixir-only
+elixir-only: devclean
+	@dev/run --erlang-config=rel/files/eunit.config -n 1 -a adm:pass --no-eval 'test/elixir/run-only --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
 
 .PHONY: elixir-init
 elixir-init:
diff --git a/dev/run b/dev/run
index 60e7d5c..72f5a47 100755
--- a/dev/run
+++ b/dev/run
@@ -181,6 +181,12 @@ def setup_argparse():
         help="Optional key=val config overrides. Can be repeated",
     )
     parser.add_option(
+        "--erlang-config",
+        dest="erlang_config",
+        default="rel/files/sys.config",
+        help="Specify an alternative Erlang application configuration"
+    )
+    parser.add_option(
         "--degrade-cluster",
         dest="degrade_cluster",
         type=int,
@@ -222,6 +228,7 @@ def setup_context(opts, args):
         "haproxy": opts.haproxy,
         "haproxy_port": opts.haproxy_port,
         "config_overrides": opts.config_overrides,
+        "erlang_config": opts.erlang_config,
         "no_eval": opts.no_eval,
         "reset_logs": True,
         "procs": [],
@@ -559,7 +566,7 @@ def boot_node(ctx, node):
         "-args_file",
         os.path.join(node_etcdir, "vm.args"),
         "-config",
-        os.path.join(reldir, "files", "sys"),
+        os.path.join(ctx["rootdir"], ctx["erlang_config"]),
         "-couch_ini",
         os.path.join(node_etcdir, "default.ini"),
         os.path.join(node_etcdir, "local.ini"),
diff --git a/rebar.config.script b/rebar.config.script
index 254c674..3f3ef46 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -114,7 +114,10 @@ DepDescs = [
 {ibrowse,          "ibrowse",          {tag, "CouchDB-4.0.1-1"}},
 {jiffy,            "jiffy",            {tag, "CouchDB-0.14.11-2"}},
 {mochiweb,         "mochiweb",         {tag, "v2.19.0"}},
-{meck,             "meck",             {tag, "0.8.8"}}
+{meck,             "meck",             {tag, "0.8.8"}},
+
+%% TMP - Until this is moved to a proper Apache repo
+{erlfdb,           "erlfdb",           {branch, "master"}}
 ],
 
 WithProper = lists:keyfind(with_proper, 1, CouchConfig) == {with_proper, true},
@@ -129,6 +132,8 @@ end,
 BaseUrl = "https://github.com/apache/",
 
 MakeDep = fun
+    ({erlfdb, _, Version}) ->
+        {erlfdb, ".*", {git, "https://github.com/cloudant-labs/couchdb-erlfdb", {branch, "master"}}};
     ({AppName, {url, Url}, Version}) ->
         {AppName, ".*", {git, Url, Version}};
     ({AppName, {url, Url}, Version, Options}) ->
diff --git a/rel/files/eunit.config b/rel/files/eunit.config
index 3c7457d..5e96fae 100644
--- a/rel/files/eunit.config
+++ b/rel/files/eunit.config
@@ -12,5 +12,6 @@
 
 [
     {kernel, [{error_logger, silent}]},
-    {sasl, [{sasl_error_logger, false}]}
+    {sasl, [{sasl_error_logger, false}]},
+    {fabric, [{eunit_run, true}]}
 ].
diff --git a/test/elixir/run-only b/test/elixir/run-only
new file mode 100755
index 0000000..7c2a4ae
--- /dev/null
+++ b/test/elixir/run-only
@@ -0,0 +1,3 @@
+#!/bin/bash -e
+cd "$(dirname "$0")"
+mix test --trace "$@"


[couchdb] 06/06: Start switching chttpd HTTP endpoints to fabric2

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 767c83dd614a5f3c850e2434bb4d4c1a8eef0dde
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Jun 5 13:43:20 2019 -0500

    Start switching chttpd HTTP endpoints to fabric2
    
    This is not an exhaustive port of the entire chttpd API. However, this
    is enough to support basic CRUD operations far enough that replication
    works.
---
 src/chttpd/src/chttpd.erl              |  11 +-
 src/chttpd/src/chttpd_auth_request.erl |   7 +-
 src/chttpd/src/chttpd_changes.erl      | 973 +++++++++++++++++++++++++++++++++
 src/chttpd/src/chttpd_db.erl           | 328 +++++------
 src/chttpd/src/chttpd_external.erl     |  35 +-
 src/chttpd/src/chttpd_misc.erl         |  62 +--
 src/chttpd/src/chttpd_show.erl         |   5 +-
 src/couch_mrview/src/couch_mrview.erl  |  16 +-
 test/elixir/test/basics_test.exs       |   2 +-
 9 files changed, 1183 insertions(+), 256 deletions(-)

diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
index 1e1d638..4d32c03 100644
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -25,7 +25,7 @@
     error_info/1, parse_form/1, json_body/1, json_body_obj/1, body/1,
     doc_etag/1, make_etag/1, etag_respond/3, etag_match/2,
     partition/1, serve_file/3, serve_file/4,
-    server_header/0, start_chunked_response/3,send_chunk/2,
+    server_header/0, start_chunked_response/3,send_chunk/2,last_chunk/1,
     start_response_length/4, send/2, start_json_response/2,
     start_json_response/3, end_json_response/1, send_response/4,
     send_response_no_cors/4,
@@ -743,7 +743,14 @@ start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers0) ->
     {ok, Resp}.
 
 send_chunk(Resp, Data) ->
-    Resp:write_chunk(Data),
+    case iolist_size(Data) of
+        0 -> ok; % do nothing
+        _ -> Resp:write_chunk(Data)
+    end,
+    {ok, Resp}.
+
+last_chunk(Resp) ->
+    Resp:write_chunk([]),
     {ok, Resp}.
 
 send_response(Req, Code, Headers0, Body) ->
diff --git a/src/chttpd/src/chttpd_auth_request.erl b/src/chttpd/src/chttpd_auth_request.erl
index 96dbf98..7210905 100644
--- a/src/chttpd/src/chttpd_auth_request.erl
+++ b/src/chttpd/src/chttpd_auth_request.erl
@@ -103,7 +103,8 @@ server_authorization_check(#httpd{path_parts=[<<"_", _/binary>>|_]}=Req) ->
     require_admin(Req).
 
 db_authorization_check(#httpd{path_parts=[DbName|_],user_ctx=Ctx}=Req) ->
-    {_} = fabric:get_security(DbName, [{user_ctx, Ctx}]),
+    {ok, Db} = fabric2_db:open(DbName, [{user_ctx, Ctx}]),
+    fabric2_db:check_is_member(Db),
     Req.
 
 require_admin(Req) ->
@@ -111,8 +112,8 @@ require_admin(Req) ->
     Req.
 
 require_db_admin(#httpd{path_parts=[DbName|_],user_ctx=Ctx}=Req) ->
-    Sec = fabric:get_security(DbName, [{user_ctx, Ctx}]),
-
+    {ok, Db} = fabric2_db:open(DbName, [{user_ctx, Ctx}]),
+    Sec = fabric2_db:get_security(Db),
     case is_db_admin(Ctx,Sec) of
         true -> Req;
         false ->  throw({unauthorized, <<"You are not a server or db admin.">>})
diff --git a/src/chttpd/src/chttpd_changes.erl b/src/chttpd/src/chttpd_changes.erl
new file mode 100644
index 0000000..30caab2
--- /dev/null
+++ b/src/chttpd/src/chttpd_changes.erl
@@ -0,0 +1,973 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_changes).
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+
+-export([
+    handle_db_changes/3,
+    handle_changes/4,
+    get_changes_timeout/2,
+    wait_updated/3,
+    get_rest_updated/1,
+    configure_filter/4,
+    filter/3,
+    handle_db_event/3,
+    handle_view_event/3,
+    view_filter/3,
+    send_changes_doc_ids/6,
+    send_changes_design_docs/6
+]).
+
+-export([changes_enumerator/2]).
+
+%% export so we can use fully qualified call to facilitate hot-code upgrade
+-export([
+    keep_sending_changes/3
+]).
+
+-record(changes_acc, {
+    db,
+    view_name,
+    ddoc_name,
+    view,
+    seq,
+    prepend,
+    filter,
+    callback,
+    user_acc,
+    resp_type,
+    limit,
+    include_docs,
+    doc_options,
+    conflicts,
+    timeout,
+    timeout_fun,
+    aggregation_kvs,
+    aggregation_results
+}).
+
+handle_db_changes(Args, Req, Db) ->
+    handle_changes(Args, Req, Db, db).
+
+handle_changes(Args1, Req, Db, Type) ->
+    ReqPid = chttpd:header_value(Req, "XKCD", "<unknown>"),
+    #changes_args{
+        style = Style,
+        filter = FilterName,
+        feed = Feed,
+        dir = Dir,
+        since = Since
+    } = Args1,
+    couch_log:error("XKCD: STARTING CHANGES FEED ~p for ~s : ~p", [self(), ReqPid, Since]),
+    Filter = configure_filter(FilterName, Style, Req, Db),
+    Args = Args1#changes_args{filter_fun = Filter},
+    % The type of changes feed depends on the supplied filter. If the query is
+    % for an optimized view-filtered db changes, we need to use the view
+    % sequence tree.
+    {UseViewChanges, DDocName, ViewName} = case {Type, Filter} of
+        {{view, DDocName0, ViewName0}, _} ->
+            {true, DDocName0, ViewName0};
+        {_, {fast_view, _, DDoc, ViewName0}} ->
+            {true, DDoc#doc.id, ViewName0};
+        _ ->
+            {false, undefined, undefined}
+    end,
+    DbName = fabric2_db:name(Db),
+    {StartListenerFun, View} = if UseViewChanges ->
+        {ok, {_, View0, _}, _, _} = couch_mrview_util:get_view(
+                DbName, DDocName, ViewName, #mrargs{}),
+        case View0#mrview.seq_btree of
+            #btree{} ->
+                ok;
+            _ ->
+                throw({bad_request, "view changes not enabled"})
+        end,
+        SNFun = fun() ->
+            couch_event:link_listener(
+                 ?MODULE, handle_view_event, {self(), DDocName}, [{dbname, DbName}]
+            )
+        end,
+        {SNFun, View0};
+    true ->
+        SNFun = fun() ->
+            fabric2_events:link_listener(
+                    ?MODULE, handle_db_event, self(), [{dbname, DbName}]
+                )
+        end,
+        {SNFun, undefined}
+    end,
+    Start = fun() ->
+        StartSeq = case Dir of
+        rev ->
+            fabric2_fdb:get_update_seq(Db);
+        fwd ->
+            Since
+        end,
+        View2 = if UseViewChanges ->
+            {ok, {_, View1, _}, _, _} = couch_mrview_util:get_view(
+                    DbName, DDocName, ViewName, #mrargs{}),
+            View1;
+        true ->
+            undefined
+        end,
+        {Db, View2, StartSeq}
+    end,
+    % begin timer to deal with heartbeat when filter function fails
+    case Args#changes_args.heartbeat of
+    undefined ->
+        erlang:erase(last_changes_heartbeat);
+    Val when is_integer(Val); Val =:= true ->
+        put(last_changes_heartbeat, os:timestamp())
+    end,
+
+    case lists:member(Feed, ["continuous", "longpoll", "eventsource"]) of
+    true ->
+        fun(CallbackAcc) ->
+            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
+            {ok, Listener} = StartListenerFun(),
+
+            {Db, View, StartSeq} = Start(),
+            UserAcc2 = start_sending_changes(Callback, UserAcc),
+            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
+            Acc0 = build_acc(Args, Callback, UserAcc2, Db, StartSeq,
+                             <<"">>, Timeout, TimeoutFun, DDocName, ViewName,
+                             View),
+            try
+                keep_sending_changes(
+                    Args#changes_args{dir=fwd},
+                    Acc0,
+                    true)
+            after
+                fabric2_events:stop_listener(Listener),
+                get_rest_updated(ok) % clean out any remaining update messages
+            end
+        end;
+    false ->
+        fun(CallbackAcc) ->
+            {Callback, UserAcc} = get_callback_acc(CallbackAcc),
+            UserAcc2 = start_sending_changes(Callback, UserAcc),
+            {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
+            {Db, View, StartSeq} = Start(),
+            Acc0 = build_acc(Args#changes_args{feed="normal"}, Callback,
+                             UserAcc2, Db, StartSeq, <<>>, Timeout, TimeoutFun,
+                             DDocName, ViewName, View),
+            {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
+                send_changes(
+                    Acc0,
+                    Dir,
+                    true),
+            end_sending_changes(Callback, UserAcc3, LastSeq)
+        end
+    end.
+
+
+handle_db_event(_DbName, updated, Parent) ->
+    Parent ! updated,
+    {ok, Parent};
+handle_db_event(_DbName, deleted, Parent) ->
+    Parent ! deleted,
+    {ok, Parent};
+handle_db_event(_DbName, _Event, Parent) ->
+    {ok, Parent}.
+
+
+handle_view_event(_DbName, Msg, {Parent, DDocId}) ->
+    case Msg of
+        {index_commit, DDocId} ->
+            Parent ! updated;
+        {index_delete, DDocId} ->
+            Parent ! deleted;
+        _ ->
+            ok
+    end,
+    {ok, {Parent, DDocId}}.
+
+get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 2) ->
+    Pair;
+get_callback_acc(Callback) when is_function(Callback, 1) ->
+    {fun(Ev, _) -> Callback(Ev) end, ok}.
+
+
+configure_filter("_doc_ids", Style, Req, _Db) ->
+    {doc_ids, Style, get_doc_ids(Req)};
+configure_filter("_selector", Style, Req, _Db) ->
+    {selector, Style,  get_selector_and_fields(Req)};
+configure_filter("_design", Style, _Req, _Db) ->
+    {design_docs, Style};
+configure_filter("_view", Style, Req, Db) ->
+    ViewName = get_view_qs(Req),
+    if ViewName /= "" -> ok; true ->
+        throw({bad_request, "`view` filter parameter is not provided."})
+    end,
+    ViewNameParts = string:tokens(ViewName, "/"),
+    case [?l2b(couch_httpd:unquote(Part)) || Part <- ViewNameParts] of
+        [DName, VName] ->
+            {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
+            check_member_exists(DDoc, [<<"views">>, VName]),
+            FilterType = try
+                true = couch_util:get_nested_json_value(
+                        DDoc#doc.body,
+                        [<<"options">>, <<"seq_indexed">>]
+                ),
+                fast_view
+            catch _:_ ->
+                view
+            end,
+            case fabric2_db:is_clustered(Db) of
+                true ->
+                    DIR = fabric_util:doc_id_and_rev(DDoc),
+                    {fetch, FilterType, Style, DIR, VName};
+                false ->
+                    {FilterType, Style, DDoc, VName}
+            end;
+        [] ->
+            Msg = "`view` must be of the form `designname/viewname`",
+            throw({bad_request, Msg})
+    end;
+configure_filter([$_ | _], _Style, _Req, _Db) ->
+    throw({bad_request, "unknown builtin filter name"});
+configure_filter("", main_only, _Req, _Db) ->
+    {default, main_only};
+configure_filter("", all_docs, _Req, _Db) ->
+    {default, all_docs};
+configure_filter(FilterName, Style, Req, Db) ->
+    FilterNameParts = string:tokens(FilterName, "/"),
+    case [?l2b(couch_httpd:unquote(Part)) || Part <- FilterNameParts] of
+        [DName, FName] ->
+            {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
+            check_member_exists(DDoc, [<<"filters">>, FName]),
+            {custom, Style, Req, DDoc, FName};
+        [] ->
+            {default, Style};
+        _Else ->
+            Msg = "`filter` must be of the form `designname/filtername`",
+            throw({bad_request, Msg})
+    end.
+
+
+filter(Db, Change, {default, Style}) ->
+    apply_style(Db, Change, Style);
+filter(Db, Change, {doc_ids, Style, DocIds}) ->
+    case lists:member(maps:get(id, Change), DocIds) of
+        true ->
+            apply_style(Db, Change, Style);
+        false ->
+            []
+    end;
+filter(Db, Change, {selector, Style, {Selector, _Fields}}) ->
+    Docs = open_revs(Db, Change, Style),
+    Passes = [mango_selector:match(Selector, couch_doc:to_json_obj(Doc, []))
+        || Doc <- Docs],
+    filter_revs(Passes, Docs);
+filter(Db, Change, {design_docs, Style}) ->
+    case maps:get(id, Change) of
+        <<"_design", _/binary>> ->
+            apply_style(Db, Change, Style);
+        _ ->
+            []
+    end;
+filter(Db, Change, {FilterType, Style, DDoc, VName})
+        when FilterType == view; FilterType == fast_view ->
+    Docs = open_revs(Db, Change, Style),
+    {ok, Passes} = couch_query_servers:filter_view(DDoc, VName, Docs),
+    filter_revs(Passes, Docs);
+filter(Db, Change, {custom, Style, Req0, DDoc, FName}) ->
+    Req = case Req0 of
+        {json_req, _} -> Req0;
+        #httpd{} -> {json_req, chttpd_external:json_req_obj(Req0, Db)}
+    end,
+    Docs = open_revs(Db, Change, Style),
+    {ok, Passes} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
+    filter_revs(Passes, Docs);
+filter(A, B, C) ->
+    erlang:error({filter_error, A, B, C}).
+
+fast_view_filter(Db, {{Seq, _}, {ID, _, _}}, {fast_view, Style, _, _}) ->
+    case fabric2_db:get_doc_info(Db, ID) of
+        {ok, #doc_info{high_seq=Seq}=DocInfo} ->
+            Docs = open_revs(Db, DocInfo, Style),
+            Changes = lists:map(fun(#doc{revs={RevPos, [RevId | _]}}) ->
+                RevStr = couch_doc:rev_to_str({RevPos, RevId}),
+                {[{<<"rev">>, RevStr}]}
+            end, Docs),
+            {DocInfo, Changes};
+        {ok, #doc_info{high_seq=HighSeq}} when Seq > HighSeq ->
+            % If the view seq tree is out of date (or if the view seq tree
+            % was opened before the db) seqs may come by from the seq tree
+            % which correspond to the not-most-current revision of a document.
+            % The proper thing to do is to not send this old revision, but wait
+            % until we reopen the up-to-date view seq tree and continue the
+            % fold.
+            % I left the Seq > HighSeq guard in so if (for some godforsaken
+            % reason) the seq in the view is more current than the database,
+            % we'll throw an error.
+            {undefined, []};
+        {error, not_found} ->
+            {undefined, []}
+    end.
+
+
+
+view_filter(Db, KV, {default, Style}) ->
+    apply_view_style(Db, KV, Style).
+
+
+get_view_qs({json_req, {Props}}) ->
+    {Query} = couch_util:get_value(<<"query">>, Props, {[]}),
+    binary_to_list(couch_util:get_value(<<"view">>, Query, ""));
+get_view_qs(Req) ->
+    couch_httpd:qs_value(Req, "view", "").
+
+get_doc_ids({json_req, {Props}}) ->
+    check_docids(couch_util:get_value(<<"doc_ids">>, Props));
+get_doc_ids(#httpd{method='POST'}=Req) ->
+    couch_httpd:validate_ctype(Req, "application/json"),
+    {Props} = couch_httpd:json_body_obj(Req),
+    check_docids(couch_util:get_value(<<"doc_ids">>, Props));
+get_doc_ids(#httpd{method='GET'}=Req) ->
+    DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")),
+    check_docids(DocIds);
+get_doc_ids(_) ->
+    throw({bad_request, no_doc_ids_provided}).
+
+
+get_selector_and_fields({json_req, {Props}}) ->
+    Selector = check_selector(couch_util:get_value(<<"selector">>, Props)),
+    Fields = check_fields(couch_util:get_value(<<"fields">>, Props, nil)),
+    {Selector, Fields};
+get_selector_and_fields(#httpd{method='POST'}=Req) ->
+    couch_httpd:validate_ctype(Req, "application/json"),
+    get_selector_and_fields({json_req,  couch_httpd:json_body_obj(Req)});
+get_selector_and_fields(_) ->
+    throw({bad_request, "Selector must be specified in POST payload"}).
+
+
+check_docids(DocIds) when is_list(DocIds) ->
+    lists:foreach(fun
+        (DocId) when not is_binary(DocId) ->
+            Msg = "`doc_ids` filter parameter is not a list of doc ids.",
+            throw({bad_request, Msg});
+        (_) -> ok
+    end, DocIds),
+    DocIds;
+check_docids(_) ->
+    Msg = "`doc_ids` filter parameter is not a list of doc ids.",
+    throw({bad_request, Msg}).
+
+
+check_selector(Selector={_}) ->
+    try
+        mango_selector:normalize(Selector)
+    catch
+        {mango_error, Mod, Reason0} ->
+            {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
+            throw({bad_request, Reason})
+    end;
+check_selector(_Selector) ->
+    throw({bad_request, "Selector error: expected a JSON object"}).
+
+
+check_fields(nil) ->
+    nil;
+check_fields(Fields) when is_list(Fields) ->
+    try
+        {ok, Fields1} = mango_fields:new(Fields),
+        Fields1
+    catch
+        {mango_error, Mod, Reason0} ->
+            {_StatusCode, _Error, Reason} = mango_error:info(Mod, Reason0),
+            throw({bad_request, Reason})
+    end;
+check_fields(_Fields) ->
+    throw({bad_request, "Selector error: fields must be JSON array"}).
+
+
+open_ddoc(Db, DDocId) ->
+    case ddoc_cache:open_doc(Db, DDocId) of
+        {ok, _} = Resp -> Resp;
+        Else -> throw(Else)
+    end.
+
+
+check_member_exists(#doc{body={Props}}, Path) ->
+    couch_util:get_nested_json_value({Props}, Path).
+
+
+apply_style(_Db, Change, main_only) ->
+    #{rev_id := RevId} = Change,
+    [{[{<<"rev">>, couch_doc:rev_to_str(RevId)}]}];
+apply_style(Db, Change, all_docs) ->
+    % We have to fetch all revs for this row
+    #{id := DocId} = Change,
+    {ok, Resps} = fabric2_db:open_doc_revs(Db, DocId, all, [deleted]),
+    lists:flatmap(fun(Resp) ->
+        case Resp of
+            {ok, #doc{revs = {Pos, [Rev | _]}}} ->
+                [{[{<<"rev">>, couch_doc:rev_to_str({Pos, Rev})}]}];
+            _ ->
+                []
+        end
+    end, Resps);
+apply_style(A, B, C) ->
+    erlang:error({changes_apply_style, A, B, C}).
+
+apply_view_style(_Db, {{_Seq, _Key}, {_ID, _Value, Rev}}, main_only) ->
+    [{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}];
+apply_view_style(Db, {{_Seq, _Key}, {ID, _Value, _Rev}}, all_docs) ->
+    case couch_db:get_doc_info(Db, ID) of
+        {ok, DocInfo} ->
+            apply_style(Db, DocInfo, all_docs);
+        {error, not_found} ->
+            []
+    end.
+
+
+open_revs(Db, Change, Style) ->
+    #{id := DocId} = Change,
+    Options = [deleted, conflicts],
+    try
+        case Style of
+            main_only ->
+                {ok, Doc} = fabric2_db:open_doc(Db, DocId, Options),
+                [Doc];
+            all_docs ->
+                {ok, Docs} = fabric2_db:open_doc_revs(Db, DocId, all, Options),
+                [Doc || {ok, Doc} <- Docs]
+        end
+    catch _:_ ->
+        % We didn't log this before, should we now?
+        []
+    end.
+
+
+filter_revs(Passes, Docs) ->
+    lists:flatmap(fun
+        ({true, #doc{revs={RevPos, [RevId | _]}}}) ->
+            RevStr = couch_doc:rev_to_str({RevPos, RevId}),
+            Change = {[{<<"rev">>, RevStr}]},
+            [Change];
+        (_) ->
+            []
+    end, lists:zip(Passes, Docs)).
+
+
+get_changes_timeout(Args, Callback) ->
+    #changes_args{
+        heartbeat = Heartbeat,
+        timeout = Timeout,
+        feed = ResponseType
+    } = Args,
+    DefaultTimeout = list_to_integer(
+        config:get("httpd", "changes_timeout", "60000")
+    ),
+    case Heartbeat of
+    undefined ->
+        case Timeout of
+        undefined ->
+            {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
+        infinity ->
+            {infinity, fun(UserAcc) -> {stop, UserAcc} end};
+        _ ->
+            {lists:min([DefaultTimeout, Timeout]),
+                fun(UserAcc) -> {stop, UserAcc} end}
+        end;
+    true ->
+        {DefaultTimeout,
+            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end};
+    _ ->
+        {lists:min([DefaultTimeout, Heartbeat]),
+            fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end}
+    end.
+
+start_sending_changes(Callback, UserAcc) ->
+    {_, NewUserAcc} = Callback(start, UserAcc),
+    NewUserAcc.
+
+build_acc(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout, TimeoutFun, DDocName, ViewName, View) ->
+    #changes_args{
+        include_docs = IncludeDocs,
+        doc_options = DocOpts,
+        conflicts = Conflicts,
+        limit = Limit,
+        feed = ResponseType,
+        filter_fun = Filter
+    } = Args,
+    #changes_acc{
+        db = Db,
+        seq = StartSeq,
+        prepend = Prepend,
+        filter = Filter,
+        callback = Callback,
+        user_acc = UserAcc,
+        resp_type = ResponseType,
+        limit = Limit,
+        include_docs = IncludeDocs,
+        doc_options = DocOpts,
+        conflicts = Conflicts,
+        timeout = Timeout,
+        timeout_fun = TimeoutFun,
+        ddoc_name = DDocName,
+        view_name = ViewName,
+        view = View,
+        aggregation_results=[],
+        aggregation_kvs=[]
+    }.
+
+send_changes(Acc, Dir, FirstRound) ->
+    #changes_acc{
+        db = Db,
+        seq = StartSeq,
+        filter = Filter,
+        view = View
+    } = Acc,
+    DbEnumFun = fun changes_enumerator/2,
+    case can_optimize(FirstRound, Filter) of
+        {true, Fun} ->
+            Fun(Db, StartSeq, Dir, DbEnumFun, Acc, Filter);
+        _ ->
+            case {View, Filter}  of
+                {#mrview{}, {fast_view, _, _, _}} ->
+                    couch_mrview:view_changes_since(View, StartSeq, DbEnumFun, [{dir, Dir}], Acc);
+                {undefined, _} ->
+                    Opts = [{dir, Dir}],
+                    fabric2_db:fold_changes(Db, StartSeq, DbEnumFun, Acc, Opts);
+                {#mrview{}, _} ->
+                    ViewEnumFun = fun view_changes_enumerator/2,
+                    {Go, Acc0} = couch_mrview:view_changes_since(View, StartSeq, ViewEnumFun, [{dir, Dir}], Acc),
+                    case Acc0 of
+                        #changes_acc{aggregation_results=[]} ->
+                            {Go, Acc0};
+                        _ ->
+                            #changes_acc{
+                                aggregation_results = AggResults,
+                                aggregation_kvs = AggKVs,
+                                user_acc = UserAcc,
+                                callback = Callback,
+                                resp_type = ResponseType,
+                                prepend = Prepend
+                            } = Acc0,
+                            ChangesRow = view_changes_row(AggResults, AggKVs, Acc0),
+                            UserAcc0 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
+                            reset_heartbeat(),
+                            {Go, Acc0#changes_acc{user_acc=UserAcc0}}
+                    end
+            end
+    end.
+
+
+can_optimize(true, {doc_ids, _Style, DocIds}) ->
+    MaxDocIds = config:get_integer("couchdb",
+        "changes_doc_ids_optimization_threshold", 100),
+    if length(DocIds) =< MaxDocIds ->
+        {true, fun send_changes_doc_ids/6};
+    true ->
+        false
+    end;
+can_optimize(true, {design_docs, _Style}) ->
+    {true, fun send_changes_design_docs/6};
+can_optimize(_, _) ->
+    false.
+
+
+send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
+    Results = fabric2_db:get_full_doc_infos(Db, DocIds),
+    FullInfos = lists:foldl(fun
+        (#full_doc_info{}=FDI, Acc) -> [FDI | Acc];
+        (not_found, Acc) -> Acc
+    end, [], Results),
+    send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
+
+
+send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
+    FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
+    Opts = [
+        include_deleted,
+        {start_key, <<"_design/">>},
+        {end_key_gt, <<"_design0">>}
+    ],
+    {ok, FullInfos} = couch_db:fold_docs(Db, FoldFun, [], Opts),
+    send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
+
+
+send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
+    FoldFun = case Dir of
+        fwd -> fun lists:foldl/3;
+        rev -> fun lists:foldr/3
+    end,
+    GreaterFun = case Dir of
+        fwd -> fun(A, B) -> A > B end;
+        rev -> fun(A, B) -> A =< B end
+    end,
+    DocInfos = lists:foldl(fun(FDI, Acc) ->
+        DI = couch_doc:to_doc_info(FDI),
+        case GreaterFun(DI#doc_info.high_seq, StartSeq) of
+            true -> [DI | Acc];
+            false -> Acc
+        end
+    end, [], FullDocInfos),
+    SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
+    FinalAcc = try
+        FoldFun(fun(DocInfo, Acc) ->
+            % Kinda gross that we're munging this back to a map
+            % that will then have to re-read and rebuild the FDI
+            % for all_docs style. But c'est la vie.
+            #doc_info{
+                id = DocId,
+                high_seq = Seq,
+                revs = [#rev_info{rev = Rev, deleted = Deleted} | _]
+            } = DocInfo,
+            Change = #{
+                id => DocId,
+                sequence => Seq,
+                rev_id => Rev,
+                deleted => Deleted
+            },
+            case Fun(Change, Acc) of
+                {ok, NewAcc} ->
+                    NewAcc;
+                {stop, NewAcc} ->
+                    throw({stop, NewAcc})
+            end
+        end, Acc0, SortedDocInfos)
+    catch
+        {stop, Acc} -> Acc
+    end,
+    case Dir of
+        fwd ->
+            FinalAcc0 = case element(1, FinalAcc) of
+                changes_acc -> % we came here via couch_http or internal call
+                    FinalAcc#changes_acc{seq = fabric2_db:get_update_seq(Db)};
+                fabric_changes_acc -> % we came here via chttpd / fabric / rexi
+                    FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)}
+            end,
+            {ok, FinalAcc0};
+        rev -> {ok, FinalAcc}
+    end.
+
+
+keep_sending_changes(Args, Acc0, FirstRound) ->
+    #changes_args{
+        feed = ResponseType,
+        limit = Limit,
+        db_open_options = DbOptions
+    } = Args,
+
+    {ok, ChangesAcc} = send_changes(Acc0, fwd, FirstRound),
+
+    #changes_acc{
+        db = Db, callback = Callback,
+        timeout = Timeout, timeout_fun = TimeoutFun, seq = EndSeq,
+        prepend = Prepend2, user_acc = UserAcc2, limit = NewLimit,
+        ddoc_name = DDocName, view_name = ViewName
+    } = ChangesAcc,
+
+    if Limit > NewLimit, ResponseType == "longpoll" ->
+        end_sending_changes(Callback, UserAcc2, EndSeq);
+    true ->
+        {Go, UserAcc3} = notify_waiting_for_updates(Callback, UserAcc2),
+        if Go /= ok -> end_sending_changes(Callback, UserAcc3, EndSeq); true ->
+            case wait_updated(Timeout, TimeoutFun, UserAcc3) of
+            {updated, UserAcc4} ->
+                UserCtx = fabric2_db:get_user_ctx(Db),
+                DbOptions1 = [{user_ctx, UserCtx} | DbOptions],
+                case fabric2_db:open(fabric2_db:name(Db), DbOptions1) of
+                {ok, Db2} ->
+                    ?MODULE:keep_sending_changes(
+                      Args#changes_args{limit=NewLimit},
+                      ChangesAcc#changes_acc{
+                        db = Db2,
+                        view = maybe_refresh_view(Db2, DDocName, ViewName),
+                        user_acc = UserAcc4,
+                        seq = EndSeq,
+                        prepend = Prepend2,
+                        timeout = Timeout,
+                        timeout_fun = TimeoutFun},
+                      false);
+                _Else ->
+                    end_sending_changes(Callback, UserAcc3, EndSeq)
+                end;
+            {stop, UserAcc4} ->
+                end_sending_changes(Callback, UserAcc4, EndSeq)
+            end
+        end
+    end.
+
+maybe_refresh_view(_, undefined, undefined) ->
+    undefined;
+maybe_refresh_view(Db, DDocName, ViewName) ->
+    DbName = couch_db:name(Db),
+    {ok, {_, View, _}, _, _} = couch_mrview_util:get_view(DbName, DDocName, ViewName, #mrargs{}),
+    View.
+
+notify_waiting_for_updates(Callback, UserAcc) ->
+    Callback(waiting_for_updates, UserAcc).
+
+end_sending_changes(Callback, UserAcc, EndSeq) ->
+    Callback({stop, EndSeq, null}, UserAcc).
+
+view_changes_enumerator(Value, Acc) ->
+    #changes_acc{
+        filter = Filter, callback = Callback, prepend = Prepend,
+        user_acc = UserAcc, limit = Limit, resp_type = ResponseType, db = Db,
+        timeout = Timeout, timeout_fun = TimeoutFun, seq = CurrentSeq,
+        aggregation_kvs=AggKVs, aggregation_results=AggResults
+    } = Acc,
+
+    Results0 = view_filter(Db, Value, Filter),
+    Results = [Result || Result <- Results0, Result /= null],
+    {{Seq, _}, _} = Value,
+
+    Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
+
+    if CurrentSeq =:= Seq ->
+        NewAggKVs = case Results of
+            [] -> AggKVs;
+            _ -> [Value|AggKVs]
+        end,
+        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
+        Acc0 = Acc#changes_acc{
+            seq = Seq,
+            user_acc = UserAcc2,
+            aggregation_kvs=NewAggKVs
+        },
+        case Done of
+            stop -> {stop, Acc0};
+            ok -> {Go, Acc0}
+        end;
+    AggResults =/= [] ->
+        {NewAggKVs, NewAggResults} = case Results of
+            [] -> {[], []};
+            _ -> {[Value], Results}
+        end,
+        if ResponseType =:= "continuous" orelse ResponseType =:= "eventsource" ->
+            ChangesRow = view_changes_row(AggResults, AggKVs, Acc),
+            UserAcc2 = Callback({change, ChangesRow, <<>>}, ResponseType, UserAcc),
+            reset_heartbeat(),
+            {Go, Acc#changes_acc{
+                seq = Seq, user_acc = UserAcc2, limit = Limit - 1,
+                aggregation_kvs=NewAggKVs, aggregation_results=NewAggResults}};
+        true ->
+            ChangesRow = view_changes_row(AggResults, AggKVs, Acc),
+            UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
+            reset_heartbeat(),
+            {Go, Acc#changes_acc{
+                seq = Seq, prepend = <<",\n">>, user_acc = UserAcc2,
+                limit = Limit - 1, aggregation_kvs=[Value],
+                aggregation_results=Results}}
+        end;
+    true ->
+        {NewAggKVs, NewAggResults} = case Results of
+            [] -> {[], []};
+            _ -> {[Value], Results}
+        end,
+        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
+        Acc0 = Acc#changes_acc{
+            seq = Seq,
+            user_acc = UserAcc2,
+            aggregation_kvs=NewAggKVs,
+            aggregation_results=NewAggResults
+        },
+        case Done of
+            stop -> {stop, Acc0};
+            ok -> {Go, Acc0}
+        end
+    end.
+
+changes_enumerator(Change0, Acc) ->
+    #changes_acc{
+        filter = Filter,
+        callback = Callback,
+        user_acc = UserAcc,
+        limit = Limit,
+        db = Db,
+        timeout = Timeout,
+        timeout_fun = TimeoutFun
+    } = Acc,
+    {Change1, Results0} = case Filter of
+        {fast_view, _, _, _} ->
+            fast_view_filter(Db, Change0, Filter);
+        _ ->
+            {Change0, filter(Db, Change0, Filter)}
+    end,
+    Results = [Result || Result <- Results0, Result /= null],
+    Seq = maps:get(sequence, Change1),
+    Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
+    case Results of
+    [] ->
+        {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
+        case Done of
+        stop ->
+            {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
+        ok ->
+            {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
+        end;
+    _ ->
+        ChangesRow = changes_row(Results, Change1, Acc),
+        {UserGo, UserAcc2} = Callback({change, ChangesRow}, UserAcc),
+        RealGo = case UserGo of
+            ok -> Go;
+            stop -> stop
+        end,
+        reset_heartbeat(),
+        couch_log:error("XKCD: CHANGE SEQ: ~p", [Seq]),
+        {RealGo, Acc#changes_acc{
+            seq = Seq,
+            user_acc = UserAcc2,
+            limit = Limit - 1
+        }}
+    end.
+
+
+
+view_changes_row(Results, KVs, Acc) ->
+    {Add, Remove} = lists:foldl(fun(Row, {AddAcc, RemAcc}) ->
+        {{_Seq, Key}, {_Id, Value, _Rev}} = Row,
+        case Value of
+            removed ->
+                {AddAcc, [Key|RemAcc]};
+            {dups, DupValues} ->
+                AddAcc1 = lists:foldl(fun(DupValue, AddAcc0) ->
+                    [[Key, DupValue]|AddAcc0]
+                end, AddAcc, DupValues),
+                {AddAcc1, RemAcc};
+            _ ->
+                {[[Key, Value]|AddAcc], RemAcc}
+        end
+    end, {[], []}, KVs),
+
+    % Seq, Id, and Rev should be the same for all KVs, since we're aggregating
+    % by seq.
+    [{{Seq, _Key}, {Id, _Value, Rev}}|_] = KVs,
+
+    {[
+        {<<"seq">>, Seq}, {<<"id">>, Id}, {<<"add">>, Add},
+        {<<"remove">>, Remove}, {<<"changes">>, Results}
+    ] ++ maybe_get_changes_doc({Id, Rev}, Acc)}.
+
+
+changes_row(Results, Change, Acc) ->
+    #{
+        id := Id,
+        sequence := Seq,
+        deleted := Del
+    } = Change,
+    {[
+        {<<"seq">>, Seq},
+        {<<"id">>, Id},
+        {<<"changes">>, Results}
+    ] ++ deleted_item(Del) ++ maybe_get_changes_doc(Change, Acc)}.
+
+maybe_get_changes_doc(Value, #changes_acc{include_docs=true}=Acc) ->
+    #changes_acc{
+        db = Db,
+        doc_options = DocOpts,
+        conflicts = Conflicts,
+        filter = Filter
+    } = Acc,
+    Opts = case Conflicts of
+        true -> [deleted, conflicts];
+        false -> [deleted]
+    end,
+    load_doc(Db, Value, Opts, DocOpts, Filter);
+
+maybe_get_changes_doc(_Value, _Acc) ->
+    [].
+
+
+load_doc(Db, Value, Opts, DocOpts, Filter) ->
+    case load_doc(Db, Value, Opts) of
+        null ->
+            [{doc, null}];
+        Doc ->
+            [{doc, doc_to_json(Doc, DocOpts, Filter)}]
+    end.
+
+
+load_doc(Db, Change, Opts) ->
+    #{
+        id := Id,
+        rev_id := RevId
+    } = Change,
+    case fabric2_db:open_doc_revs(Db, Id, [RevId], Opts) of
+        {ok, [{ok, Doc}]} ->
+            Doc;
+        _ ->
+            null
+    end.
+
+
+doc_to_json(Doc, DocOpts, {selector, _Style, {_Selector, Fields}})
+    when Fields =/= nil ->
+    mango_fields:extract(couch_doc:to_json_obj(Doc, DocOpts), Fields);
+doc_to_json(Doc, DocOpts, _Filter) ->
+    couch_doc:to_json_obj(Doc, DocOpts).
+
+
+deleted_item(true) -> [{<<"deleted">>, true}];
+deleted_item(_) -> [].
+
+% waits for a updated msg, if there are multiple msgs, collects them.
+wait_updated(Timeout, TimeoutFun, UserAcc) ->
+    couch_log:error("XKCD: WAITING FOR UPDATE", []),
+    receive
+    updated ->
+        couch_log:error("XKCD: GOT UPDATED", []),
+        get_rest_updated(UserAcc);
+    deleted ->
+        couch_log:error("XKCD: DB DELETED", []),
+        {stop, UserAcc}
+    after Timeout ->
+        {Go, UserAcc2} = TimeoutFun(UserAcc),
+        case Go of
+        ok ->
+            couch_log:error("XKCD: WAIT UPDATED TIMEOUT, RETRY", []),
+            ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2);
+        stop ->
+            couch_log:error("XKCD: WAIT UPDATED TIMEOUT STOP", []),
+            {stop, UserAcc2}
+        end
+    end.
+
+get_rest_updated(UserAcc) ->
+    receive
+    updated ->
+        get_rest_updated(UserAcc)
+    after 0 ->
+        {updated, UserAcc}
+    end.
+
+reset_heartbeat() ->
+    case get(last_changes_heartbeat) of
+    undefined ->
+        ok;
+    _ ->
+        put(last_changes_heartbeat, os:timestamp())
+    end.
+
+maybe_heartbeat(Timeout, TimeoutFun, Acc) ->
+    Before = get(last_changes_heartbeat),
+    case Before of
+    undefined ->
+        {ok, Acc};
+    _ ->
+        Now = os:timestamp(),
+        case timer:now_diff(Now, Before) div 1000 >= Timeout of
+        true ->
+            Acc2 = TimeoutFun(Acc),
+            put(last_changes_heartbeat, Now),
+            Acc2;
+        false ->
+            {ok, Acc}
+        end
+    end.
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index c6404b0..40c1a1e 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -93,18 +93,13 @@ handle_changes_req(#httpd{path_parts=[_,<<"_changes">>]}=Req, _Db) ->
 handle_changes_req1(#httpd{}=Req, Db) ->
     #changes_args{filter=Raw, style=Style} = Args0 = parse_changes_query(Req),
     ChangesArgs = Args0#changes_args{
-        filter_fun = couch_changes:configure_filter(Raw, Style, Req, Db),
-        db_open_options = [{user_ctx, couch_db:get_user_ctx(Db)}]
+        db_open_options = [{user_ctx, fabric2_db:get_user_ctx(Db)}]
     },
+    ChangesFun = chttpd_changes:handle_db_changes(ChangesArgs, Req, Db),
     Max = chttpd:chunked_response_buffer_size(),
     case ChangesArgs#changes_args.feed of
     "normal" ->
-        T0 = os:timestamp(),
-        {ok, Info} = fabric:get_db_info(Db),
-        Suffix = mem3:shard_suffix(Db),
-        Etag = chttpd:make_etag({Info, Suffix}),
-        DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
-        couch_stats:update_histogram([couchdb, dbinfo], DeltaT),
+        Etag = <<"foo">>,
         chttpd:etag_respond(Req, Etag, fun() ->
             Acc0 = #cacc{
                 feed = normal,
@@ -112,7 +107,7 @@ handle_changes_req1(#httpd{}=Req, Db) ->
                 mochi = Req,
                 threshold = Max
             },
-            fabric:changes(Db, fun changes_callback/2, Acc0, ChangesArgs)
+            ChangesFun({fun changes_callback/2, Acc0})
         end);
     Feed when Feed =:= "continuous"; Feed =:= "longpoll"; Feed =:= "eventsource"  ->
         couch_stats:increment_counter([couchdb, httpd, clients_requesting_changes]),
@@ -122,7 +117,7 @@ handle_changes_req1(#httpd{}=Req, Db) ->
             threshold = Max
         },
         try
-            fabric:changes(Db, fun changes_callback/2, Acc0, ChangesArgs)
+            ChangesFun({fun changes_callback/2, Acc0})
         after
             couch_stats:decrement_counter([couchdb, httpd, clients_requesting_changes])
         end;
@@ -337,7 +332,7 @@ update_partition_stats(PathParts) ->
 handle_design_req(#httpd{
         path_parts=[_DbName, _Design, Name, <<"_",_/binary>> = Action | _Rest]
     }=Req, Db) ->
-    DbName = mem3:dbname(couch_db:name(Db)),
+    DbName = fabric2_db:name(Db),
     case ddoc_cache:open(DbName, <<"_design/", Name/binary>>) of
     {ok, DDoc} ->
         Handler = chttpd_handlers:design_handler(Action, fun bad_action_req/3),
@@ -365,56 +360,33 @@ handle_design_info_req(Req, _Db, _DDoc) ->
 
 create_db_req(#httpd{}=Req, DbName) ->
     couch_httpd:verify_is_server_admin(Req),
-    N = chttpd:qs_value(Req, "n", config:get("cluster", "n", "3")),
-    Q = chttpd:qs_value(Req, "q", config:get("cluster", "q", "8")),
-    P = chttpd:qs_value(Req, "placement", config:get("cluster", "placement")),
-    EngineOpt = parse_engine_opt(Req),
-    DbProps = parse_partitioned_opt(Req),
-    Options = [
-        {n, N},
-        {q, Q},
-        {placement, P},
-        {props, DbProps}
-    ] ++ EngineOpt,
     DocUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
-    case fabric:create_db(DbName, Options) of
-    ok ->
-        send_json(Req, 201, [{"Location", DocUrl}], {[{ok, true}]});
-    accepted ->
-        send_json(Req, 202, [{"Location", DocUrl}], {[{ok, true}]});
-    {error, file_exists} ->
-        chttpd:send_error(Req, file_exists);
-    Error ->
-        throw(Error)
+    case fabric2_db:create(DbName, []) of
+        {ok, _} ->
+            send_json(Req, 201, [{"Location", DocUrl}], {[{ok, true}]});
+        {error, file_exists} ->
+            chttpd:send_error(Req, file_exists);
+        Error ->
+            throw(Error)
     end.
 
 delete_db_req(#httpd{}=Req, DbName) ->
     couch_httpd:verify_is_server_admin(Req),
-    case fabric:delete_db(DbName, []) of
-    ok ->
-        send_json(Req, 200, {[{ok, true}]});
-    accepted ->
-        send_json(Req, 202, {[{ok, true}]});
-    Error ->
-        throw(Error)
+    case fabric2_db:delete(DbName, []) of
+        ok ->
+            send_json(Req, 200, {[{ok, true}]});
+        Error ->
+            throw(Error)
     end.
 
 do_db_req(#httpd{path_parts=[DbName|_], user_ctx=Ctx}=Req, Fun) ->
-    Shard = hd(mem3:shards(DbName)),
-    Props = couch_util:get_value(props, Shard#shard.opts, []),
-    Opts = case Ctx of
-        undefined ->
-            [{props, Props}];
-        #user_ctx{} ->
-            [{user_ctx, Ctx}, {props, Props}]
-    end,
-    {ok, Db} = couch_db:clustered_db(DbName, Opts),
+    {ok, Db} = fabric2_db:open(DbName, [{user_ctx, Ctx}]),
     Fun(Req, Db).
 
-db_req(#httpd{method='GET',path_parts=[DbName]}=Req, _Db) ->
+db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) ->
     % measure the time required to generate the etag, see if it's worth it
     T0 = os:timestamp(),
-    {ok, DbInfo} = fabric:get_db_info(DbName),
+    {ok, DbInfo} = fabric2_db:get_db_info(Db),
     DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
     couch_stats:update_histogram([couchdb, dbinfo], DeltaT),
     send_json(Req, {DbInfo});
@@ -422,22 +394,22 @@ db_req(#httpd{method='GET',path_parts=[DbName]}=Req, _Db) ->
 db_req(#httpd{method='POST', path_parts=[DbName], user_ctx=Ctx}=Req, Db) ->
     chttpd:validate_ctype(Req, "application/json"),
 
-    W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    Options = [{user_ctx,Ctx}, {w,W}],
+    Options = [{user_ctx,Ctx}],
 
-    Doc = couch_db:doc_from_json_obj_validate(Db, chttpd:json_body(Req)),
-    Doc2 = case Doc#doc.id of
+    Doc0 = chttpd:json_body(Req),
+    Doc1 = couch_doc:from_json_obj_validate(Doc0, fabric2_db:name(Db)),
+    Doc2 = case Doc1#doc.id of
         <<"">> ->
-            Doc#doc{id=couch_uuids:new(), revs={0, []}};
+            Doc1#doc{id=couch_uuids:new(), revs={0, []}};
         _ ->
-            Doc
+            Doc1
     end,
     DocId = Doc2#doc.id,
     case chttpd:qs_value(Req, "batch") of
     "ok" ->
         % async_batching
         spawn(fun() ->
-                case catch(fabric:update_doc(Db, Doc2, Options)) of
+                case catch(fabric2_db:update_doc(Db, Doc2, Options)) of
                 {ok, _} ->
                     chttpd_stats:incr_writes(),
                     ok;
@@ -457,7 +429,7 @@ db_req(#httpd{method='POST', path_parts=[DbName], user_ctx=Ctx}=Req, Db) ->
         % normal
         DocUrl = absolute_uri(Req, [$/, couch_util:url_encode(DbName),
             $/, couch_util:url_encode(DocId)]),
-        case fabric:update_doc(Db, Doc2, Options) of
+        case fabric2_db:update_doc(Db, Doc2, Options) of
         {ok, NewRev} ->
             chttpd_stats:incr_writes(),
             HttpCode = 201;
@@ -475,13 +447,10 @@ db_req(#httpd{method='POST', path_parts=[DbName], user_ctx=Ctx}=Req, Db) ->
 db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
     send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
 
-db_req(#httpd{method='POST', path_parts=[DbName, <<"_ensure_full_commit">>],
-        user_ctx=Ctx}=Req, _Db) ->
+db_req(#httpd{method='POST', path_parts=[_DbName, <<"_ensure_full_commit">>],
+        user_ctx=Ctx}=Req, Db) ->
     chttpd:validate_ctype(Req, "application/json"),
-    %% use fabric call to trigger a database_does_not_exist exception
-    %% for missing databases that'd return error 404 from chttpd
-    %% get_security used to prefer shards on the same node over other nodes
-    fabric:get_security(DbName, [{user_ctx, Ctx}]),
+    #{db_prefix := <<_/binary>>} = Db,
     send_json(Req, 201, {[
         {ok, true},
         {instance_start_time, <<"0">>}
@@ -503,22 +472,17 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req,
         DocsArray0
     end,
     couch_stats:update_histogram([couchdb, httpd, bulk_docs], length(DocsArray)),
-    W = case couch_util:get_value(<<"w">>, JsonProps) of
-    Value when is_integer(Value) ->
-        integer_to_list(Value);
-    _ ->
-        chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db)))
-    end,
     case chttpd:header_value(Req, "X-Couch-Full-Commit") of
     "true" ->
-        Options = [full_commit, {user_ctx,Ctx}, {w,W}];
+        Options = [full_commit, {user_ctx,Ctx}];
     "false" ->
-        Options = [delay_commit, {user_ctx,Ctx}, {w,W}];
+        Options = [delay_commit, {user_ctx,Ctx}];
     _ ->
-        Options = [{user_ctx,Ctx}, {w,W}]
+        Options = [{user_ctx,Ctx}]
     end,
+    DbName = fabric2_db:name(Db),
     Docs = lists:map(fun(JsonObj) ->
-        Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj),
+        Doc = couch_doc:from_json_obj_validate(JsonObj, DbName),
         validate_attachment_names(Doc),
         case Doc#doc.id of
             <<>> -> Doc#doc{id = couch_uuids:new()};
@@ -532,7 +496,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req,
         true  -> [all_or_nothing|Options];
         _ -> Options
         end,
-        case fabric:update_docs(Db, Docs, Options2) of
+        case fabric2_db:update_docs(Db, Docs, Options2) of
         {ok, Results} ->
             % output the results
             chttpd_stats:incr_writes(length(Results)),
@@ -551,7 +515,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req,
             send_json(Req, 417, ErrorsJson)
         end;
     false ->
-        case fabric:update_docs(Db, Docs, [replicated_changes|Options]) of
+        case fabric2_db:update_docs(Db, Docs, [replicated_changes|Options]) of
         {ok, Errors} ->
             chttpd_stats:incr_writes(length(Docs)),
             ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
@@ -647,8 +611,7 @@ db_req(#httpd{path_parts=[_, <<"_bulk_get">>]}=Req, _Db) ->
 db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
     couch_stats:increment_counter([couchdb, httpd, purge_requests]),
     chttpd:validate_ctype(Req, "application/json"),
-    W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    Options = [{user_ctx, Req#httpd.user_ctx}, {w, W}],
+    Options = [{user_ctx, Req#httpd.user_ctx}],
     {IdsRevs} = chttpd:json_body_obj(Req),
     IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
     MaxIds = config:get_integer("purge", "max_document_id_number", 100),
@@ -723,7 +686,7 @@ db_req(#httpd{path_parts=[_,OP]}=Req, _Db) when ?IS_ALL_DOCS(OP) ->
 db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
     chttpd:validate_ctype(Req, "application/json"),
     {JsonDocIdRevs} = chttpd:json_body_obj(Req),
-    case fabric:get_missing_revs(Db, JsonDocIdRevs) of
+    case fabric2_db:get_missing_revs(Db, JsonDocIdRevs) of
         {error, Reason} ->
             chttpd:send_error(Req, Reason);
         {ok, Results} ->
@@ -740,7 +703,7 @@ db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
 db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
     chttpd:validate_ctype(Req, "application/json"),
     {JsonDocIdRevs} = chttpd:json_body_obj(Req),
-    case fabric:get_missing_revs(Db, JsonDocIdRevs) of
+    case fabric2_db:get_missing_revs(Db, JsonDocIdRevs) of
         {error, Reason} ->
             chttpd:send_error(Req, Reason);
         {ok, Results} ->
@@ -856,22 +819,22 @@ multi_all_docs_view(Req, Db, OP, Queries) ->
         200, [], FirstChunk),
     VAcc1 = VAcc0#vacc{resp=Resp0},
     VAcc2 = lists:foldl(fun(Args, Acc0) ->
-        {ok, Acc1} = fabric:all_docs(Db, Options,
+        {ok, Acc1} = fabric2_db:fold_docs(Db, Options,
             fun view_cb/2, Acc0, Args),
         Acc1
     end, VAcc1, ArgQueries),
     {ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"),
     chttpd:end_delayed_json_response(Resp1).
 
-all_docs_view(Req, Db, Keys, OP) ->
-    Args0 = couch_mrview_http:parse_params(Req, Keys),
-    Args1 = Args0#mrargs{view_type=map},
-    Args2 = fabric_util:validate_all_docs_args(Db, Args1),
-    Args3 = set_namespace(OP, Args2),
+all_docs_view(Req, Db, _Keys, _OP) ->
+    % Args0 = couch_mrview_http:parse_params(Req, Keys),
+    % Args1 = Args0#mrargs{view_type=map},
+    % Args2 = fabric_util:validate_all_docs_args(Db, Args1),
+    % Args3 = set_namespace(OP, Args2),
     Options = [{user_ctx, Req#httpd.user_ctx}],
     Max = chttpd:chunked_response_buffer_size(),
     VAcc = #vacc{db=Db, req=Req, threshold=Max},
-    {ok, Resp} = fabric:all_docs(Db, Options, fun view_cb/2, VAcc, Args3),
+    {ok, Resp} = fabric2_db:fold_docs(Db, fun view_cb/2, VAcc, Options),
     {ok, Resp#vacc.resp}.
 
 view_cb({row, Row} = Msg, Acc) ->
@@ -915,7 +878,7 @@ db_doc_req(#httpd{method='GET', mochi_req=MochiReq}=Req, Db, DocId) ->
         Doc = couch_doc_open(Db, DocId, Rev, Options2),
         send_doc(Req, Doc, Options2);
     _ ->
-        case fabric:open_revs(Db, DocId, Revs, Options) of
+        case fabric2_db:open_doc_revs(Db, DocId, Revs, Options) of
             {ok, []} when Revs == all ->
                 chttpd:send_error(Req, {not_found, missing});
             {ok, Results} ->
@@ -956,8 +919,7 @@ db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
     couch_db:validate_docid(Db, DocId),
     chttpd:validate_ctype(Req, "multipart/form-data"),
 
-    W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    Options = [{user_ctx,Ctx}, {w,W}],
+    Options = [{user_ctx,Ctx}],
 
     Form = couch_httpd:parse_form(Req),
     case proplists:is_defined("_doc", Form) of
@@ -966,7 +928,7 @@ db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
         Doc = couch_doc_from_req(Req, Db, DocId, Json);
     false ->
         Rev = couch_doc:parse_rev(list_to_binary(couch_util:get_value("_rev", Form))),
-        Doc = case fabric:open_revs(Db, DocId, [Rev], []) of
+        Doc = case fabric2_db:open_doc_revs(Db, DocId, [Rev], []) of
             {ok, [{ok, Doc0}]} ->
                 chttpd_stats:incr_reads(),
                 Doc0;
@@ -995,7 +957,7 @@ db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
     NewDoc = Doc#doc{
         atts = UpdatedAtts ++ OldAtts2
     },
-    case fabric:update_doc(Db, NewDoc, Options) of
+    case fabric2_db:update_doc(Db, NewDoc, Options) of
     {ok, NewRev} ->
         chttpd_stats:incr_writes(),
         HttpCode = 201;
@@ -1013,11 +975,10 @@ db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
     #doc_query_args{
         update_type = UpdateType
     } = parse_doc_query(Req),
-    DbName = couch_db:name(Db),
-    couch_db:validate_docid(Db, DocId),
+    DbName = fabric2_db:name(Db),
+    couch_doc:validate_docid(DocId),
 
-    W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    Options = [{user_ctx,Ctx}, {w,W}],
+    Options = [{user_ctx, Ctx}],
 
     Loc = absolute_uri(Req, [$/, couch_util:url_encode(DbName),
         $/, couch_util:url_encode(DocId)]),
@@ -1025,7 +986,7 @@ db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
     case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
     ("multipart/related;" ++ _) = ContentType ->
         couch_httpd:check_max_request_length(Req),
-        couch_httpd_multipart:num_mp_writers(mem3:n(mem3:dbname(DbName), DocId)),
+        couch_httpd_multipart:num_mp_writers(1),
         {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(ContentType,
                 fun() -> receive_request_data(Req) end),
         Doc = couch_doc_from_req(Req, Db, DocId, Doc0),
@@ -1045,7 +1006,7 @@ db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
             Doc = couch_doc_from_req(Req, Db, DocId, chttpd:json_body(Req)),
 
             spawn(fun() ->
-                    case catch(fabric:update_doc(Db, Doc, Options)) of
+                    case catch(fabric2_db:update_doc(Db, Doc, Options)) of
                     {ok, _} ->
                         chttpd_stats:incr_writes(),
                         ok;
@@ -1079,7 +1040,7 @@ db_doc_req(#httpd{method='COPY', user_ctx=Ctx}=Req, Db, SourceDocId) ->
     % open old doc
     Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
     % save new doc
-    case fabric:update_doc(Db,
+    case fabric2_db:update_doc(Db,
         Doc#doc{id=TargetDocId, revs=TargetRevs}, [{user_ctx,Ctx}]) of
     {ok, NewTargetRev} ->
         chttpd_stats:incr_writes(),
@@ -1180,7 +1141,7 @@ send_docs_multipart(Req, Results, Options1) ->
     CType = {"Content-Type",
         "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
     {ok, Resp} = start_chunked_response(Req, 200, [CType]),
-    couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
+    chttpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
     lists:foreach(
         fun({ok, #doc{atts=Atts}=Doc}) ->
             Refs = monitor_attachments(Doc#doc.atts),
@@ -1188,25 +1149,25 @@ send_docs_multipart(Req, Results, Options1) ->
             JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
             {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
                     InnerBoundary, JsonBytes, Atts, true),
-            couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ",
+            chttpd:send_chunk(Resp, <<"\r\nContent-Type: ",
                     ContentType/binary, "\r\n\r\n">>),
             couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
-                    fun(Data) -> couch_httpd:send_chunk(Resp, Data)
+                    fun(Data) -> chttpd:send_chunk(Resp, Data)
                     end, true),
-             couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>)
+             chttpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>)
             after
                 demonitor_refs(Refs)
             end;
         ({{not_found, missing}, RevId}) ->
              RevStr = couch_doc:rev_to_str(RevId),
              Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
-             couch_httpd:send_chunk(Resp,
+             chttpd:send_chunk(Resp,
                 [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
                 Json,
                 <<"\r\n--", OuterBoundary/binary>>])
          end, Results),
-    couch_httpd:send_chunk(Resp, <<"--">>),
-    couch_httpd:last_chunk(Resp).
+    chttpd:send_chunk(Resp, <<"--">>),
+    chttpd:last_chunk(Resp).
 
 bulk_get_multipart_headers({0, []}, Id, Boundary) ->
     [
@@ -1276,15 +1237,14 @@ send_updated_doc(Req, Db, DocId, Doc, Headers) ->
 
 send_updated_doc(#httpd{user_ctx=Ctx} = Req, Db, DocId, #doc{deleted=Deleted}=Doc,
         Headers, UpdateType) ->
-    W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
     Options =
         case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
         "true" ->
-            [full_commit, UpdateType, {user_ctx,Ctx}, {w,W}];
+            [full_commit, UpdateType, {user_ctx,Ctx}];
         "false" ->
-            [delay_commit, UpdateType, {user_ctx,Ctx}, {w,W}];
+            [delay_commit, UpdateType, {user_ctx,Ctx}];
         _ ->
-            [UpdateType, {user_ctx,Ctx}, {w,W}]
+            [UpdateType, {user_ctx,Ctx}]
         end,
     {Status, {etag, Etag}, Body} = update_doc(Db, DocId,
         #doc{deleted=Deleted}=Doc, Options),
@@ -1303,31 +1263,7 @@ http_code_from_status(Status) ->
     end.
 
 update_doc(Db, DocId, #doc{deleted=Deleted, body=DocBody}=Doc, Options) ->
-    {_, Ref} = spawn_monitor(fun() ->
-        try fabric:update_doc(Db, Doc, Options) of
-            Resp ->
-                exit({exit_ok, Resp})
-        catch
-            throw:Reason ->
-                exit({exit_throw, Reason});
-            error:Reason ->
-                exit({exit_error, Reason});
-            exit:Reason ->
-                exit({exit_exit, Reason})
-        end
-    end),
-    Result = receive
-        {'DOWN', Ref, _, _, {exit_ok, Ret}} ->
-            Ret;
-        {'DOWN', Ref, _, _, {exit_throw, Reason}} ->
-            throw(Reason);
-        {'DOWN', Ref, _, _, {exit_error, Reason}} ->
-            erlang:error(Reason);
-        {'DOWN', Ref, _, _, {exit_exit, Reason}} ->
-            erlang:exit(Reason)
-    end,
-
-    case Result of
+    case fabric2_db:update_doc(Db, Doc, Options) of
     {ok, NewRev} ->
         Accepted = false;
     {accepted, NewRev} ->
@@ -1374,7 +1310,7 @@ couch_doc_from_req(Req, _Db, DocId, #doc{revs=Revs} = Doc) ->
     end,
     Doc#doc{id=DocId, revs=Revs2};
 couch_doc_from_req(Req, Db, DocId, Json) ->
-    Doc = couch_db:doc_from_json_obj_validate(Db, Json),
+    Doc = couch_doc:from_json_obj_validate(Json, fabric2_db:name(Db)),
     couch_doc_from_req(Req, Db, DocId, Doc).
 
 
@@ -1382,11 +1318,10 @@ couch_doc_from_req(Req, Db, DocId, Json) ->
 % couch_doc_open(Db, DocId) ->
 %   couch_doc_open(Db, DocId, nil, []).
 
-couch_doc_open(Db, DocId, Rev, Options0) ->
-    Options = [{user_ctx, couch_db:get_user_ctx(Db)} | Options0],
+couch_doc_open(Db, DocId, Rev, Options) ->
     case Rev of
     nil -> % open most recent rev
-        case fabric:open_doc(Db, DocId, Options) of
+        case fabric2_db:open_doc(Db, DocId, Options) of
         {ok, Doc} ->
             chttpd_stats:incr_reads(),
             Doc;
@@ -1394,7 +1329,7 @@ couch_doc_open(Db, DocId, Rev, Options0) ->
              throw(Error)
          end;
     _ -> % open a specific rev (deletions come back as stubs)
-        case fabric:open_revs(Db, DocId, [Rev], Options) of
+        case fabric2_db:open_doc_revs(Db, DocId, [Rev], Options) of
         {ok, [{ok, Doc}]} ->
             chttpd_stats:incr_reads(),
             Doc;
@@ -1515,8 +1450,12 @@ db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNa
     end;
 
 
-db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNameParts)
+db_attachment_req(#httpd{method=Method}=Req, Db, DocId, FileNameParts)
         when (Method == 'PUT') or (Method == 'DELETE') ->
+    #httpd{
+        user_ctx = Ctx,
+        mochi_req = MochiReq
+    } = Req,
     FileName = validate_attachment_name(
                     mochiweb_util:join(
                         lists:map(fun binary_to_list/1,
@@ -1526,16 +1465,45 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa
         'DELETE' ->
             [];
         _ ->
-            MimeType = case couch_httpd:header_value(Req,"Content-Type") of
+            MimeType = case chttpd:header_value(Req,"Content-Type") of
                 % We could throw an error here or guess by the FileName.
                 % Currently, just giving it a default.
                 undefined -> <<"application/octet-stream">>;
                 CType -> list_to_binary(CType)
             end,
-            Data = fabric:att_receiver(Req, chttpd:body_length(Req)),
+            Data = case chttpd:body_length(Req) of
+                undefined ->
+                    <<"">>;
+                {unknown_transfer_encoding, Unknown} ->
+                    exit({unknown_transfer_encoding, Unknown});
+                chunked ->
+                    fun(MaxChunkSize, ChunkFun, InitState) ->
+                        chttpd:recv_chunked(
+                            Req, MaxChunkSize, ChunkFun, InitState
+                        )
+                    end;
+                0 ->
+                    <<"">>;
+                Length when is_integer(Length) ->
+                    Expect = case chttpd:header_value(Req, "expect") of
+                        undefined ->
+                            undefined;
+                        Value when is_list(Value) ->
+                            string:to_lower(Value)
+                    end,
+                    case Expect of
+                        "100-continue" ->
+                            MochiReq:start_raw_response({100, gb_trees:empty()});
+                        _Else ->
+                            ok
+                    end,
+                    fun() -> chttpd:recv(Req, 0) end;
+                Length ->
+                    exit({length_not_integer, Length})
+            end,
             ContentLen = case couch_httpd:header_value(Req,"Content-Length") of
                 undefined -> undefined;
-                Length -> list_to_integer(Length)
+                CL -> list_to_integer(CL)
             end,
             ContentEnc = string:to_lower(string:strip(
                 couch_httpd:header_value(Req, "Content-Encoding", "identity")
@@ -1570,7 +1538,7 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa
             couch_db:validate_docid(Db, DocId),
             #doc{id=DocId};
         Rev ->
-            case fabric:open_revs(Db, DocId, [Rev], [{user_ctx,Ctx}]) of
+            case fabric2_db:open_doc_revs(Db, DocId, [Rev], [{user_ctx,Ctx}]) of
             {ok, [{ok, Doc0}]} ->
                 chttpd_stats:incr_reads(),
                 Doc0;
@@ -1585,8 +1553,7 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa
     DocEdited = Doc#doc{
         atts = NewAtt ++ [A || A <- Atts, couch_att:fetch(name, A) /= FileName]
     },
-    W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
-    case fabric:update_doc(Db, DocEdited, [{user_ctx,Ctx}, {w,W}]) of
+    case fabric2_db:update_doc(Db, DocEdited, [{user_ctx,Ctx}]) of
     {ok, UpdatedRev} ->
         chttpd_stats:incr_writes(),
         HttpCode = 201;
@@ -1595,7 +1562,7 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa
         HttpCode = 202
     end,
     erlang:put(mochiweb_request_recv, true),
-    DbName = couch_db:name(Db),
+    DbName = fabric2_db:name(Db),
 
     {Status, Headers} = case Method of
         'DELETE' ->
@@ -1682,46 +1649,6 @@ get_md5_header(Req) ->
 parse_doc_query(Req) ->
     lists:foldl(fun parse_doc_query/2, #doc_query_args{}, chttpd:qs(Req)).
 
-parse_engine_opt(Req) ->
-    case chttpd:qs_value(Req, "engine") of
-        undefined ->
-            [];
-        Extension ->
-            Available = couch_server:get_engine_extensions(),
-            case lists:member(Extension, Available) of
-                true ->
-                    [{engine, iolist_to_binary(Extension)}];
-                false ->
-                    throw({bad_request, invalid_engine_extension})
-            end
-    end.
-
-
-parse_partitioned_opt(Req) ->
-    case chttpd:qs_value(Req, "partitioned") of
-        undefined ->
-            [];
-        "false" ->
-            [];
-        "true" ->
-            ok = validate_partitioned_db_enabled(Req),
-            [
-                {partitioned, true},
-                {hash, [couch_partition, hash, []]}
-            ];
-        _ ->
-            throw({bad_request, <<"Invalid `partitioned` parameter">>})
-    end.
-
-
-validate_partitioned_db_enabled(Req) ->
-    case couch_flags:is_enabled(partitioned, Req) of
-        true -> 
-            ok;
-        false ->
-            throw({bad_request, <<"Partitioned feature is not enabled.">>})
-    end.
-
 
 parse_doc_query({Key, Value}, Args) ->
     case {Key, Value} of
@@ -1791,7 +1718,7 @@ parse_changes_query(Req) ->
         {"descending", "true"} ->
             Args#changes_args{dir=rev};
         {"since", _} ->
-            Args#changes_args{since=Value};
+            Args#changes_args{since=parse_since_seq(Value)};
         {"last-event-id", _} ->
             Args#changes_args{since=Value};
         {"limit", _} ->
@@ -1845,6 +1772,27 @@ parse_changes_query(Req) ->
             ChangesArgs
     end.
 
+
+parse_since_seq(Seq) when is_binary(Seq), size(Seq) > 30 ->
+    throw({bad_request, url_encoded_since_seq});
+
+parse_since_seq(Seq) when is_binary(Seq), size(Seq) > 2 ->
+    % We have implicitly allowed the since seq to either be
+    % JSON encoded or a "raw" string. Here we just remove the
+    % surrounding quotes if they exist and are paired.
+    SeqSize = size(Seq) - 2,
+    case Seq of
+        <<"\"", S:SeqSize/binary, "\"">> -> S;
+        S -> S
+    end;
+
+parse_since_seq(Seq) when is_binary(Seq) ->
+    Seq;
+
+parse_since_seq(Seq) when is_list(Seq) ->
+    parse_since_seq(iolist_to_binary(Seq)).
+
+
 extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
     extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
 extract_header_rev(Req, ExplicitRev) ->
@@ -1885,6 +1833,8 @@ monitor_attachments(Atts) when is_list(Atts) ->
         case couch_att:fetch(data, Att) of
             {Fd, _} ->
                 [monitor(process, Fd) | Monitors];
+            {loc, _, _, _} ->
+                Monitors;
             stub ->
                 Monitors;
             Else ->
@@ -1982,7 +1932,7 @@ bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs}) ->
             bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs, Options1})
     end;
 bulk_get_open_doc_revs1(Db, Props, _, {DocId, Revs, Options}) ->
-    case fabric:open_revs(Db, DocId, Revs, Options) of
+    case fabric2_db:open_doc_revs(Db, DocId, Revs, Options) of
         {ok, []} ->
             RevStr = couch_util:get_value(<<"rev">>, Props),
             Error = {RevStr, <<"not_found">>, <<"missing">>},
diff --git a/src/chttpd/src/chttpd_external.erl b/src/chttpd/src/chttpd_external.erl
index fa35c6b..3e59ffe 100644
--- a/src/chttpd/src/chttpd_external.erl
+++ b/src/chttpd/src/chttpd_external.erl
@@ -74,7 +74,7 @@ json_req_obj_fields() ->
      <<"peer">>, <<"form">>, <<"cookie">>, <<"userCtx">>, <<"secObj">>].
 
 json_req_obj_field(<<"info">>, #httpd{}, Db, _DocId) ->
-    {ok, Info} = get_db_info(Db),
+    {ok, Info} = fabric2_db:get_db_info(Db),
     {Info};
 json_req_obj_field(<<"uuid">>, #httpd{}, _Db, _DocId) ->
     couch_uuids:new();
@@ -117,27 +117,18 @@ json_req_obj_field(<<"form">>, #httpd{mochi_req=Req, method=Method}=HttpReq, Db,
 json_req_obj_field(<<"cookie">>, #httpd{mochi_req=Req}, _Db, _DocId) ->
     to_json_terms(Req:parse_cookie());
 json_req_obj_field(<<"userCtx">>, #httpd{}, Db, _DocId) ->
-    couch_util:json_user_ctx(Db);
-json_req_obj_field(<<"secObj">>, #httpd{user_ctx=UserCtx}, Db, _DocId) ->
-    get_db_security(Db, UserCtx).
-
-
-get_db_info(Db) ->
-    case couch_db:is_clustered(Db) of
-        true ->
-            fabric:get_db_info(Db);
-        false ->
-            couch_db:get_db_info(Db)
-    end.
-
-
-get_db_security(Db, #user_ctx{}) ->
-    case couch_db:is_clustered(Db) of
-        true ->
-            fabric:get_security(Db);
-        false ->
-            couch_db:get_security(Db)
-    end.
+    json_user_ctx(Db);
+json_req_obj_field(<<"secObj">>, #httpd{user_ctx = #user_ctx{}}, Db, _DocId) ->
+    fabric2_db:get_security(Db).
+
+
+json_user_ctx(Db) ->
+    Ctx = fabric2_db:get_user_ctx(Db),
+    {[
+        {<<"db">>, fabric2_db:name(Db)},
+        {<<"name">>, Ctx#user_ctx.name},
+        {<<"roles">>, Ctx#user_ctx.roles}
+    ]}.
 
 
 to_json_terms(Data) ->
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 819d782..b244e84 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -108,43 +108,39 @@ maybe_add_csp_headers(Headers, _) ->
     Headers.
 
 handle_all_dbs_req(#httpd{method='GET'}=Req) ->
-    Args = couch_mrview_http:parse_params(Req, undefined),
-    ShardDbName = config:get("mem3", "shards_db", "_dbs"),
-    %% shard_db is not sharded but mem3:shards treats it as an edge case
-    %% so it can be pushed thru fabric
-    {ok, Info} = fabric:get_db_info(ShardDbName),
-    Etag = couch_httpd:make_etag({Info}),
-    Options = [{user_ctx, Req#httpd.user_ctx}],
+    % TODO: Support args and options properly, transform
+    % this back into a fold call similar to the old
+    % version.
+    %% Args = couch_mrview_http:parse_params(Req, undefined),
+    % Eventually the Etag for this request will be derived
+    % from the \xFFmetadataVersion key in fdb
+    Etag = <<"foo">>,
+    %% Options = [{user_ctx, Req#httpd.user_ctx}],
     {ok, Resp} = chttpd:etag_respond(Req, Etag, fun() ->
-        {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"ETag",Etag}]),
-        VAcc = #vacc{req=Req,resp=Resp},
-        fabric:all_docs(ShardDbName, Options, fun all_dbs_callback/2, VAcc, Args)
-    end),
-    case is_record(Resp, vacc) of
-        true -> {ok, Resp#vacc.resp};
-        _ -> {ok, Resp}
-    end;
+        AllDbs = fabric2_db:list_dbs(),
+        chttpd:send_json(Req, AllDbs)
+    end);
 handle_all_dbs_req(Req) ->
     send_method_not_allowed(Req, "GET,HEAD").
 
-all_dbs_callback({meta, _Meta}, #vacc{resp=Resp0}=Acc) ->
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
-    {ok, Acc#vacc{resp=Resp1}};
-all_dbs_callback({row, Row}, #vacc{resp=Resp0}=Acc) ->
-    Prepend = couch_mrview_http:prepend_val(Acc),
-    case couch_util:get_value(id, Row) of <<"_design", _/binary>> ->
-        {ok, Acc};
-    DbName ->
-        {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
-        {ok, Acc#vacc{prepend=",", resp=Resp1}}
-    end;
-all_dbs_callback(complete, #vacc{resp=Resp0}=Acc) ->
-    {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "]"),
-    {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
-    {ok, Acc#vacc{resp=Resp2}};
-all_dbs_callback({error, Reason}, #vacc{resp=Resp0}=Acc) ->
-    {ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
-    {ok, Acc#vacc{resp=Resp1}}.
+%% all_dbs_callback({meta, _Meta}, #vacc{resp=Resp0}=Acc) ->
+%%     {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
+%%     {ok, Acc#vacc{resp=Resp1}};
+%% all_dbs_callback({row, Row}, #vacc{resp=Resp0}=Acc) ->
+%%     Prepend = couch_mrview_http:prepend_val(Acc),
+%%     case couch_util:get_value(id, Row) of <<"_design", _/binary>> ->
+%%         {ok, Acc};
+%%     DbName ->
+%%         {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
+%%         {ok, Acc#vacc{prepend=",", resp=Resp1}}
+%%     end;
+%% all_dbs_callback(complete, #vacc{resp=Resp0}=Acc) ->
+%%     {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "]"),
+%%     {ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
+%%     {ok, Acc#vacc{resp=Resp2}};
+%% all_dbs_callback({error, Reason}, #vacc{resp=Resp0}=Acc) ->
+%%     {ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
+%%     {ok, Acc#vacc{resp=Resp1}}.
 
 handle_dbs_info_req(#httpd{method='POST'}=Req) ->
     chttpd:validate_ctype(Req, "application/json"),
diff --git a/src/chttpd/src/chttpd_show.erl b/src/chttpd/src/chttpd_show.erl
index c3bf119..2eb6dc3 100644
--- a/src/chttpd/src/chttpd_show.erl
+++ b/src/chttpd/src/chttpd_show.erl
@@ -123,15 +123,14 @@ send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
     JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
     JsonDoc = couch_query_servers:json_doc(Doc),
     Cmd = [<<"updates">>, UpdateName],
-    W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
     UpdateResp = couch_query_servers:ddoc_prompt(DDoc, Cmd, [JsonDoc, JsonReq]),
     JsonResp = case UpdateResp of
         [<<"up">>, {NewJsonDoc}, {JsonResp0}] ->
             case chttpd:header_value(Req, "X-Couch-Full-Commit", "false") of
             "true" ->
-                Options = [full_commit, {user_ctx, Req#httpd.user_ctx}, {w, W}];
+                Options = [full_commit, {user_ctx, Req#httpd.user_ctx}];
             _ ->
-                Options = [{user_ctx, Req#httpd.user_ctx}, {w, W}]
+                Options = [{user_ctx, Req#httpd.user_ctx}]
             end,
             NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
             couch_doc:validate_docid(NewDoc#doc.id),
diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl
index ae1d8d6..cf6f27f 100644
--- a/src/couch_mrview/src/couch_mrview.erl
+++ b/src/couch_mrview/src/couch_mrview.erl
@@ -173,8 +173,18 @@ join([H|[]], _, Acc) ->
 join([H|T], Sep, Acc) ->
     join(T, Sep, [Sep, H | Acc]).
 
+validate(#{} = Db, DDoc) ->
+    DbName = fabric2_db:name(Db),
+    IsPartitioned = fabric2_db:is_partitioned(Db),
+    validate(DbName, IsPartitioned, DDoc);
 
-validate(Db,  DDoc) ->
+validate(Db, DDoc) ->
+    DbName = couch_db:name(Db),
+    IsPartitioned = couch_db:is_partitioned(Db),
+    validate(DbName, IsPartitioned, DDoc).
+
+
+validate(DbName, IsDbPartitioned,  DDoc) ->
     ok = validate_ddoc_fields(DDoc#doc.body),
     GetName = fun
         (#mrview{map_names = [Name | _]}) -> Name;
@@ -203,9 +213,9 @@ validate(Db,  DDoc) ->
         language = Lang,
         views = Views,
         partitioned = Partitioned
-    }} = couch_mrview_util:ddoc_to_mrst(couch_db:name(Db), DDoc),
+    }} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
 
-    case {couch_db:is_partitioned(Db), Partitioned} of
+    case {IsDbPartitioned, Partitioned} of
         {false, true} ->
             throw({invalid_design_doc,
                 <<"partitioned option cannot be true in a "
diff --git a/test/elixir/test/basics_test.exs b/test/elixir/test/basics_test.exs
index 3491ef5..c28c78c 100644
--- a/test/elixir/test/basics_test.exs
+++ b/test/elixir/test/basics_test.exs
@@ -100,7 +100,7 @@ defmodule BasicsTest do
     db_name = context[:db_name]
     {:ok, _} = create_doc(db_name, sample_doc_foo())
     resp = Couch.get("/#{db_name}/foo", query: %{:local_seq => true})
-    assert resp.body["_local_seq"] == 1, "Local seq value == 1"
+    assert is_binary(resp.body["_local_seq"]), "Local seq value is a binary"
   end
 
   @tag :with_db


[couchdb] 04/06: Initial test suite for the fabric2 implementation

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 1db8a4385208b4459fafd9f1f3e84b276f4f58f7
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Jun 5 13:33:55 2019 -0500

    Initial test suite for the fabric2 implementation
    
    This provides a good bit of code coverage for the new implementation.
    We'll want to expand this to include relevant tests from the previous
    fabric test suite along with reading through the various other tests and
    ensuring that we cover the API as deeply as is appropriate for this
    layer.
---
 src/fabric/test/fabric2_changes_fold_tests.erl     | 114 +++
 src/fabric/test/fabric2_db_crud_tests.erl          |  88 +++
 src/fabric/test/fabric2_db_misc_tests.erl          | 113 +++
 src/fabric/test/fabric2_db_security_tests.erl      | 162 +++++
 src/fabric/test/fabric2_doc_count_tests.erl        | 251 +++++++
 src/fabric/test/fabric2_doc_crud_tests.erl         | 770 +++++++++++++++++++++
 src/fabric/test/fabric2_doc_fold_tests.erl         | 209 ++++++
 src/fabric/test/fabric2_fdb_tx_retry_tests.erl     | 178 +++++
 src/fabric/test/fabric2_trace_db_create_tests.erl  |  46 ++
 src/fabric/test/fabric2_trace_db_delete_tests.erl  |  49 ++
 src/fabric/test/fabric2_trace_db_open_tests.erl    |  50 ++
 src/fabric/test/fabric2_trace_doc_create_tests.erl |  86 +++
 12 files changed, 2116 insertions(+)

diff --git a/src/fabric/test/fabric2_changes_fold_tests.erl b/src/fabric/test/fabric2_changes_fold_tests.erl
new file mode 100644
index 0000000..892b448
--- /dev/null
+++ b/src/fabric/test/fabric2_changes_fold_tests.erl
@@ -0,0 +1,114 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_changes_fold_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+-define(DOC_COUNT, 25).
+
+
+changes_fold_test_() ->
+    {
+        "Test changes fold operations",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun fold_changes_basic/1,
+                fun fold_changes_since_now/1,
+                fun fold_changes_since_seq/1,
+                fun fold_changes_basic_rev/1,
+                fun fold_changes_since_now_rev/1,
+                fun fold_changes_since_seq_rev/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([fabric]),
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    Rows = lists:map(fun(Val) ->
+        DocId = fabric2_util:uuid(),
+        Doc = #doc{
+            id = DocId,
+            body = {[{<<"value">>, Val}]}
+        },
+        {ok, RevId} = fabric2_db:update_doc(Db, Doc, []),
+        UpdateSeq = fabric2_db:get_update_seq(Db),
+        #{
+            id => DocId,
+            sequence => UpdateSeq,
+            deleted => false,
+            rev_id => RevId
+        }
+    end, lists:seq(1, ?DOC_COUNT)),
+    {Db, Rows, Ctx}.
+
+
+cleanup({Db, _DocIdRevs, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+fold_changes_basic({Db, DocRows, _}) ->
+    {ok, Rows} = fabric2_db:fold_changes(Db, 0, fun fold_fun/2, []),
+    ?assertEqual(lists:reverse(DocRows), Rows).
+
+
+fold_changes_since_now({Db, _, _}) ->
+    {ok, Rows} = fabric2_db:fold_changes(Db, now, fun fold_fun/2, []),
+    ?assertEqual([], Rows).
+
+
+fold_changes_since_seq({_, [], _}) ->
+    ok;
+
+fold_changes_since_seq({Db, [Row | RestRows], _}) ->
+    #{sequence := Since} = Row,
+    {ok, Rows} = fabric2_db:fold_changes(Db, Since, fun fold_fun/2, []),
+    ?assertEqual(lists:reverse(RestRows), Rows),
+    fold_changes_since_seq({Db, RestRows, nil}).
+
+
+fold_changes_basic_rev({Db, _, _}) ->
+    Opts = [{dir, rev}],
+    {ok, Rows} = fabric2_db:fold_changes(Db, 0, fun fold_fun/2, [], Opts),
+    ?assertEqual([], Rows).
+
+
+fold_changes_since_now_rev({Db, DocRows, _}) ->
+    Opts = [{dir, rev}],
+    {ok, Rows} = fabric2_db:fold_changes(Db, now, fun fold_fun/2, [], Opts),
+    ?assertEqual(DocRows, Rows).
+
+
+fold_changes_since_seq_rev({_, [], _}) ->
+    ok;
+
+fold_changes_since_seq_rev({Db, DocRows, _}) ->
+    #{sequence := Since} = lists:last(DocRows),
+    Opts = [{dir, rev}],
+    {ok, Rows} = fabric2_db:fold_changes(Db, Since, fun fold_fun/2, [], Opts),
+    ?assertEqual(DocRows, Rows),
+    RestRows = lists:sublist(DocRows, length(DocRows) - 1),
+    fold_changes_since_seq_rev({Db, RestRows, nil}).
+
+
+fold_fun(#{} = Change, Acc) ->
+    {ok, [Change | Acc]}.
diff --git a/src/fabric/test/fabric2_db_crud_tests.erl b/src/fabric/test/fabric2_db_crud_tests.erl
new file mode 100644
index 0000000..24deeb2
--- /dev/null
+++ b/src/fabric/test/fabric2_db_crud_tests.erl
@@ -0,0 +1,88 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db_crud_tests).
+
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+-define(TDEF(A), {atom_to_list(A), fun A/0}).
+
+
+crud_test_() ->
+    {
+        "Test database CRUD operations",
+        {
+            setup,
+            fun() -> test_util:start_couch([fabric]) end,
+            fun test_util:stop_couch/1,
+            [
+                ?TDEF(create_db),
+                ?TDEF(open_db),
+                ?TDEF(delete_db),
+                ?TDEF(list_dbs)
+            ]
+        }
+    }.
+
+
+create_db() ->
+    DbName = ?tempdb(),
+    ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+    ?assertEqual(true, ets:member(fabric2_server, DbName)),
+    ?assertEqual({error, file_exists}, fabric2_db:create(DbName, [])).
+
+
+open_db() ->
+    DbName = ?tempdb(),
+    ?assertError(database_does_not_exist, fabric2_db:open(DbName, [])),
+
+    ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+    ?assertEqual(true, ets:member(fabric2_server, DbName)),
+
+    % Opening the cached version
+    ?assertMatch({ok, _}, fabric2_db:open(DbName, [])),
+
+    % Remove from cache and re-open
+    true = ets:delete(fabric2_server, DbName),
+    ?assertMatch({ok, _}, fabric2_db:open(DbName, [])).
+
+
+delete_db() ->
+    DbName = ?tempdb(),
+    ?assertError(database_does_not_exist, fabric2_db:delete(DbName, [])),
+
+    ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+    ?assertEqual(true, ets:member(fabric2_server, DbName)),
+
+    ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+    ?assertEqual(false, ets:member(fabric2_server, DbName)),
+
+    ?assertError(database_does_not_exist, fabric2_db:open(DbName, [])).
+
+
+list_dbs() ->
+    DbName = ?tempdb(),
+    AllDbs1 = fabric2_db:list_dbs(),
+
+    ?assert(is_list(AllDbs1)),
+    ?assert(not lists:member(DbName, AllDbs1)),
+
+    ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+    AllDbs2 = fabric2_db:list_dbs(),
+    ?assert(lists:member(DbName, AllDbs2)),
+
+    ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+    AllDbs3 = fabric2_db:list_dbs(),
+    ?assert(not lists:member(DbName, AllDbs3)).
diff --git a/src/fabric/test/fabric2_db_misc_tests.erl b/src/fabric/test/fabric2_db_misc_tests.erl
new file mode 100644
index 0000000..8e64056
--- /dev/null
+++ b/src/fabric/test/fabric2_db_misc_tests.erl
@@ -0,0 +1,113 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db_misc_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+-define(TDEF(A), {atom_to_list(A), fun A/1}).
+
+
+misc_test_() ->
+    {
+        "Test database miscellaney",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun empty_db_info/1,
+                fun accessors/1,
+                fun set_revs_limit/1,
+                fun set_security/1,
+                fun is_system_db/1,
+                fun ensure_full_commit/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([fabric]),
+    DbName = ?tempdb(),
+    {ok, Db} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
+    {DbName, Db, Ctx}.
+
+
+cleanup({_DbName, Db, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+empty_db_info({DbName, Db, _}) ->
+    {ok, Info} = fabric2_db:get_db_info(Db),
+    ?assertEqual(DbName, fabric2_util:get_value(db_name, Info)),
+    ?assertEqual(0, fabric2_util:get_value(doc_count, Info)),
+    ?assertEqual(0, fabric2_util:get_value(doc_del_count, Info)),
+    ?assert(is_binary(fabric2_util:get_value(update_seq, Info))).
+
+
+accessors({DbName, Db, _}) ->
+    SeqZero = fabric2_fdb:vs_to_seq(fabric2_util:seq_zero_vs()),
+    ?assertEqual(DbName, fabric2_db:name(Db)),
+    ?assertEqual(0, fabric2_db:get_instance_start_time(Db)),
+    ?assertEqual(nil, fabric2_db:get_pid(Db)),
+    ?assertEqual(undefined, fabric2_db:get_before_doc_update_fun(Db)),
+    ?assertEqual(undefined, fabric2_db:get_after_doc_read_fun(Db)),
+    ?assertEqual(SeqZero, fabric2_db:get_committed_update_seq(Db)),
+    ?assertEqual(SeqZero, fabric2_db:get_compacted_seq(Db)),
+    ?assertEqual(SeqZero, fabric2_db:get_update_seq(Db)),
+    ?assertEqual(nil, fabric2_db:get_compactor_pid(Db)),
+    ?assertEqual(1000, fabric2_db:get_revs_limit(Db)),
+    ?assertMatch(<<_:32/binary>>, fabric2_db:get_uuid(Db)),
+    ?assertEqual(true, fabric2_db:is_db(Db)),
+    ?assertEqual(false, fabric2_db:is_db(#{})),
+    ?assertEqual(false, fabric2_db:is_partitioned(Db)),
+    ?assertEqual(false, fabric2_db:is_clustered(Db)).
+
+
+set_revs_limit({DbName, Db, _}) ->
+    ?assertEqual(ok, fabric2_db:set_revs_limit(Db, 500)),
+    {ok, Db2} = fabric2_db:open(DbName, []),
+    ?assertEqual(500, fabric2_db:get_revs_limit(Db2)).
+
+
+set_security({DbName, Db, _}) ->
+    SecObj = {[
+        {<<"admins">>, {[
+            {<<"names">>, []},
+            {<<"roles">>, []}
+        ]}}
+    ]},
+    ?assertEqual(ok, fabric2_db:set_security(Db, SecObj)),
+    {ok, Db2} = fabric2_db:open(DbName, []),
+    ?assertEqual(SecObj, fabric2_db:get_security(Db2)).
+
+
+is_system_db({DbName, Db, _}) ->
+    ?assertEqual(false, fabric2_db:is_system_db(Db)),
+    ?assertEqual(false, fabric2_db:is_system_db_name("foo")),
+    ?assertEqual(false, fabric2_db:is_system_db_name(DbName)),
+    ?assertEqual(true, fabric2_db:is_system_db_name(<<"_replicator">>)),
+    ?assertEqual(true, fabric2_db:is_system_db_name("_replicator")),
+    ?assertEqual(true, fabric2_db:is_system_db_name(<<"foo/_replicator">>)),
+    ?assertEqual(false, fabric2_db:is_system_db_name(<<"f.o/_replicator">>)),
+    ?assertEqual(false, fabric2_db:is_system_db_name(<<"foo/bar">>)).
+
+
+ensure_full_commit({_, Db, _}) ->
+    ?assertEqual({ok, 0}, fabric2_db:ensure_full_commit(Db)),
+    ?assertEqual({ok, 0}, fabric2_db:ensure_full_commit(Db, 5)).
diff --git a/src/fabric/test/fabric2_db_security_tests.erl b/src/fabric/test/fabric2_db_security_tests.erl
new file mode 100644
index 0000000..9796011
--- /dev/null
+++ b/src/fabric/test/fabric2_db_security_tests.erl
@@ -0,0 +1,162 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db_security_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+security_test_() ->
+    {
+        "Test database security operations",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun is_admin_name/1,
+                fun is_not_admin_name/1,
+                fun is_admin_role/1,
+                fun is_not_admin_role/1,
+                fun check_is_admin/1,
+                fun check_is_not_admin/1,
+                fun check_is_member_name/1,
+                fun check_is_not_member_name/1,
+                fun check_is_member_role/1,
+                fun check_is_not_member_role/1,
+                fun check_admin_is_member/1,
+                fun check_is_member_of_public_db/1,
+                fun check_set_user_ctx/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([fabric]),
+    DbName = ?tempdb(),
+    {ok, Db1} = fabric2_db:create(DbName, [{user_ctx, ?ADMIN_USER}]),
+    SecProps = {[
+        {<<"admins">>, {[
+            {<<"names">>, [<<"admin_name1">>, <<"admin_name2">>]},
+            {<<"roles">>, [<<"admin_role1">>, <<"admin_role2">>]}
+        ]}},
+        {<<"members">>, {[
+            {<<"names">>, [<<"member_name1">>, <<"member_name2">>]},
+            {<<"roles">>, [<<"member_role1">>, <<"member_role2">>]}
+        ]}}
+    ]},
+    ok = fabric2_db:set_security(Db1, SecProps),
+    {ok, Db2} = fabric2_db:open(DbName, []),
+    {Db2, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+is_admin_name({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"admin_name1">>},
+    ?assertEqual(true, fabric2_db:is_admin(Db#{user_ctx := UserCtx})).
+
+
+is_not_admin_name({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"member1">>},
+    ?assertEqual(false, fabric2_db:is_admin(Db#{user_ctx := UserCtx})).
+
+
+is_admin_role({Db, _}) ->
+    UserCtx = #user_ctx{roles = [<<"admin_role1">>]},
+    ?assertEqual(true, fabric2_db:is_admin(Db#{user_ctx := UserCtx})).
+
+
+is_not_admin_role({Db, _}) ->
+    UserCtx = #user_ctx{roles = [<<"member_role1">>]},
+    ?assertEqual(false, fabric2_db:is_admin(Db#{user_ctx := UserCtx})).
+
+
+check_is_admin({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"admin_name1">>},
+    ?assertEqual(ok, fabric2_db:check_is_admin(Db#{user_ctx := UserCtx})).
+
+
+check_is_not_admin({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"member_name1">>},
+    ?assertThrow(
+        {unauthorized, <<"You are not a db or server admin.">>},
+        fabric2_db:check_is_admin(Db#{user_ctx := #user_ctx{}})
+    ),
+    ?assertThrow(
+        {forbidden, <<"You are not a db or server admin.">>},
+        fabric2_db:check_is_admin(Db#{user_ctx := UserCtx})
+    ).
+
+
+check_is_member_name({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"member_name1">>},
+    ?assertEqual(ok, fabric2_db:check_is_member(Db#{user_ctx := UserCtx})).
+
+
+check_is_not_member_name({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"foo">>},
+    ?assertThrow(
+        {unauthorized, <<"You are not authorized", _/binary>>},
+        fabric2_db:check_is_member(Db#{user_ctx := #user_ctx{}})
+    ),
+    ?assertThrow(
+        {forbidden, <<"You are not allowed to access", _/binary>>},
+        fabric2_db:check_is_member(Db#{user_ctx := UserCtx})
+    ).
+
+
+check_is_member_role({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"foo">>, roles = [<<"member_role1">>]},
+    ?assertEqual(ok, fabric2_db:check_is_member(Db#{user_ctx := UserCtx})).
+
+
+check_is_not_member_role({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"foo">>, roles = [<<"bar">>]},
+    ?assertThrow(
+        {forbidden, <<"You are not allowed to access", _/binary>>},
+        fabric2_db:check_is_member(Db#{user_ctx := UserCtx})
+    ).
+
+
+check_admin_is_member({Db, _}) ->
+    UserCtx = #user_ctx{name = <<"admin_name1">>},
+    ?assertEqual(ok, fabric2_db:check_is_member(Db#{user_ctx := UserCtx})).
+
+
+check_is_member_of_public_db({Db, _}) ->
+    PublicDb = Db#{security_doc := {[]}},
+    UserCtx = #user_ctx{name = <<"foo">>, roles = [<<"bar">>]},
+    ?assertEqual(
+        ok,
+        fabric2_db:check_is_member(PublicDb#{user_ctx := #user_ctx{}})
+    ),
+    ?assertEqual(
+        ok,
+        fabric2_db:check_is_member(PublicDb#{user_ctx := UserCtx})
+    ).
+
+
+check_set_user_ctx({Db0, _}) ->
+    DbName = fabric2_db:name(Db0),
+    UserCtx = #user_ctx{name = <<"foo">>, roles = [<<"bar">>]},
+    {ok, Db1} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+    ?assertEqual(UserCtx, fabric2_db:get_user_ctx(Db1)).
+
+
diff --git a/src/fabric/test/fabric2_doc_count_tests.erl b/src/fabric/test/fabric2_doc_count_tests.erl
new file mode 100644
index 0000000..37d0840
--- /dev/null
+++ b/src/fabric/test/fabric2_doc_count_tests.erl
@@ -0,0 +1,251 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_doc_count_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+-define(DOC_COUNT, 10).
+
+
+doc_count_test_() ->
+    {
+        "Test document counting operations",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun normal_docs/1,
+                fun design_docs/1,
+                fun local_docs/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([fabric]),
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+normal_docs({Db, _}) ->
+    {DocCount, DelDocCount, DDocCount, LDocCount} = get_doc_counts(Db),
+
+    Docs1 = lists:map(fun(Id) ->
+        Doc = #doc{
+            id = integer_to_binary(Id),
+            body = {[{<<"value">>, Id}]}
+        },
+        {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc, []),
+        Doc#doc{revs = {RevPos, [Rev]}}
+    end, lists:seq(1, ?DOC_COUNT)),
+
+    check_doc_counts(
+            Db,
+            DocCount + ?DOC_COUNT,
+            DelDocCount,
+            DDocCount,
+            LDocCount
+        ),
+
+    Docs2 = lists:map(fun(Doc) ->
+        {[{<<"value">>, V}]} = Doc#doc.body,
+        NewDoc = case V rem 2 of
+            0 -> Doc#doc{deleted = true};
+            1 -> Doc
+        end,
+        {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, NewDoc, []),
+        NewDoc#doc{revs = {RevPos, [Rev]}}
+    end, Docs1),
+
+    check_doc_counts(
+            Db,
+            DocCount + ?DOC_COUNT div 2,
+            DelDocCount + ?DOC_COUNT div 2,
+            DDocCount,
+            LDocCount
+        ),
+
+    lists:map(fun(Doc) ->
+        case Doc#doc.deleted of
+            true ->
+                Undeleted = Doc#doc{
+                    revs = {0, []},
+                    deleted = false
+                },
+                {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Undeleted, []),
+                Undeleted#doc{revs = {RevPos, [Rev]}};
+            false ->
+                Doc
+        end
+    end, Docs2),
+
+    check_doc_counts(
+            Db,
+            DocCount + ?DOC_COUNT,
+            DelDocCount,
+            DDocCount,
+            LDocCount
+        ).
+
+
+design_docs({Db, _}) ->
+    {DocCount, DelDocCount, DDocCount, LDocCount} = get_doc_counts(Db),
+
+    Docs1 = lists:map(fun(Id) ->
+        BinId = integer_to_binary(Id),
+        DDocId = <<?DESIGN_DOC_PREFIX, BinId/binary>>,
+        Doc = #doc{
+            id = DDocId,
+            body = {[{<<"value">>, Id}]}
+        },
+        {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc, []),
+        Doc#doc{revs = {RevPos, [Rev]}}
+    end, lists:seq(1, ?DOC_COUNT)),
+
+    check_doc_counts(
+            Db,
+            DocCount + ?DOC_COUNT,
+            DelDocCount,
+            DDocCount + ?DOC_COUNT,
+            LDocCount
+        ),
+
+    Docs2 = lists:map(fun(Doc) ->
+        {[{<<"value">>, V}]} = Doc#doc.body,
+        NewDoc = case V rem 2 of
+            0 -> Doc#doc{deleted = true};
+            1 -> Doc
+        end,
+        {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, NewDoc, []),
+        NewDoc#doc{revs = {RevPos, [Rev]}}
+    end, Docs1),
+
+    check_doc_counts(
+            Db,
+            DocCount + ?DOC_COUNT div 2,
+            DelDocCount + ?DOC_COUNT div 2,
+            DDocCount + ?DOC_COUNT div 2,
+            LDocCount
+        ),
+
+    lists:map(fun(Doc) ->
+        case Doc#doc.deleted of
+            true ->
+                Undeleted = Doc#doc{
+                    revs = {0, []},
+                    deleted = false
+                },
+                {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Undeleted, []),
+                Undeleted#doc{revs = {RevPos, [Rev]}};
+            false ->
+                Doc
+        end
+    end, Docs2),
+
+    check_doc_counts(
+            Db,
+            DocCount + ?DOC_COUNT,
+            DelDocCount,
+            DDocCount + ?DOC_COUNT,
+            LDocCount
+        ).
+
+
+local_docs({Db, _}) ->
+    {DocCount, DelDocCount, DDocCount, LDocCount} = get_doc_counts(Db),
+
+    Docs1 = lists:map(fun(Id) ->
+        BinId = integer_to_binary(Id),
+        LDocId = <<?LOCAL_DOC_PREFIX, BinId/binary>>,
+        Doc = #doc{
+            id = LDocId,
+            body = {[{<<"value">>, Id}]}
+        },
+        {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc, []),
+        Doc#doc{revs = {RevPos, [Rev]}}
+    end, lists:seq(1, ?DOC_COUNT)),
+
+    check_doc_counts(
+            Db,
+            DocCount,
+            DelDocCount,
+            DDocCount,
+            LDocCount + ?DOC_COUNT
+        ),
+
+    Docs2 = lists:map(fun(Doc) ->
+        {[{<<"value">>, V}]} = Doc#doc.body,
+        NewDoc = case V rem 2 of
+            0 -> Doc#doc{deleted = true};
+            1 -> Doc
+        end,
+        {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, NewDoc, []),
+        NewDoc#doc{revs = {RevPos, [Rev]}}
+    end, Docs1),
+
+    check_doc_counts(
+            Db,
+            DocCount,
+            DelDocCount,
+            DDocCount,
+            LDocCount + ?DOC_COUNT div 2
+        ),
+
+    lists:map(fun(Doc) ->
+        case Doc#doc.deleted of
+            true ->
+                Undeleted = Doc#doc{
+                    revs = {0, []},
+                    deleted = false
+                },
+                {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Undeleted, []),
+                Undeleted#doc{revs = {RevPos, [Rev]}};
+            false ->
+                Doc
+        end
+    end, Docs2),
+
+    check_doc_counts(
+            Db,
+            DocCount,
+            DelDocCount,
+            DDocCount,
+            LDocCount + ?DOC_COUNT
+        ).
+
+
+get_doc_counts(Db) ->
+    DocCount = fabric2_db:get_doc_count(Db),
+    DelDocCount = fabric2_db:get_del_doc_count(Db),
+    DDocCount = fabric2_db:get_doc_count(Db, <<"_design">>),
+    LDocCount = fabric2_db:get_doc_count(Db, <<"_local">>),
+    {DocCount, DelDocCount, DDocCount, LDocCount}.
+
+
+check_doc_counts(Db, DocCount, DelDocCount, DDocCount, LDocCount) ->
+    ?assertEqual(DocCount, fabric2_db:get_doc_count(Db)),
+    ?assertEqual(DelDocCount, fabric2_db:get_del_doc_count(Db)),
+    ?assertEqual(DocCount, fabric2_db:get_doc_count(Db, <<"_all_docs">>)),
+    ?assertEqual(DDocCount, fabric2_db:get_doc_count(Db, <<"_design">>)),
+    ?assertEqual(LDocCount, fabric2_db:get_doc_count(Db, <<"_local">>)).
diff --git a/src/fabric/test/fabric2_doc_crud_tests.erl b/src/fabric/test/fabric2_doc_crud_tests.erl
new file mode 100644
index 0000000..85b2766
--- /dev/null
+++ b/src/fabric/test/fabric2_doc_crud_tests.erl
@@ -0,0 +1,770 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_doc_crud_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+doc_crud_test_() ->
+    {
+        "Test document CRUD operations",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun open_missing_doc/1,
+                fun create_new_doc/1,
+                fun create_ddoc_basic/1,
+                fun create_ddoc_requires_admin/1,
+                fun create_ddoc_requires_validation/1,
+                fun create_ddoc_requires_compilation/1,
+                fun update_doc_basic/1,
+                fun update_ddoc_basic/1,
+                fun update_doc_replicated/1,
+                fun update_doc_replicated_add_conflict/1,
+                fun update_doc_replicated_changes_winner/1,
+                fun update_doc_replicated_extension/1,
+                fun update_doc_replicate_existing_rev/1,
+                fun update_winning_conflict_branch/1,
+                fun update_non_winning_conflict_branch/1,
+                fun delete_doc_basic/1,
+                fun delete_changes_winner/1,
+                fun recreate_doc_basic/1,
+                fun conflict_on_create_new_with_rev/1,
+                fun conflict_on_update_with_no_rev/1,
+                fun conflict_on_create_as_deleted/1,
+                fun conflict_on_recreate_as_deleted/1,
+                fun conflict_on_extend_deleted/1,
+                fun open_doc_revs_basic/1,
+                fun open_doc_revs_all/1,
+                fun open_doc_revs_latest/1,
+                fun get_missing_revs_basic/1,
+                fun get_missing_revs_on_missing_doc/1,
+                fun open_missing_local_doc/1,
+                fun create_local_doc_basic/1,
+                fun update_local_doc_basic/1,
+                fun delete_local_doc_basic/1,
+                fun recreate_local_doc/1,
+                fun create_local_doc_bad_rev/1,
+                fun create_local_doc_random_rev/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([fabric]),
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+open_missing_doc({Db, _}) ->
+    ?assertEqual({not_found, missing}, fabric2_db:open_doc(Db, <<"foo">>)).
+
+
+create_new_doc({Db, _}) ->
+    Doc = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc),
+    NewDoc = Doc#doc{revs = {RevPos, [Rev]}},
+    ?assertEqual({ok, NewDoc}, fabric2_db:open_doc(Db, Doc#doc.id)).
+
+
+create_ddoc_basic({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    DDocId = <<"_design/", UUID/binary>>,
+    Doc = #doc{
+        id = DDocId,
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {RevPos, Rev}} = fabric2_db:update_doc(Db, Doc),
+    NewDoc = Doc#doc{revs = {RevPos, [Rev]}},
+    ?assertEqual({ok, NewDoc}, fabric2_db:open_doc(Db, Doc#doc.id)).
+
+
+create_ddoc_requires_admin({Db, _}) ->
+    Db2 = fabric2_db:set_user_ctx(Db, #user_ctx{}),
+    UUID = fabric2_util:uuid(),
+    DDocId = <<"_design/", UUID/binary>>,
+    Doc = #doc{
+        id = DDocId,
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    ?assertThrow({unauthorized, _}, fabric2_db:update_doc(Db2, Doc)).
+
+
+create_ddoc_requires_validation({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    DDocId = <<"_design/", UUID/binary>>,
+    Doc = #doc{
+        id = DDocId,
+        body = {[
+            {<<"views">>, {[
+                {<<"foo">>, {[
+                    {<<"map">>, <<"function(doc) {}">>},
+                    {<<"reduce">>, <<"_not_a_builtin_reduce">>}
+                ]}}
+            ]}}
+        ]}
+    },
+    ?assertThrow(
+            {bad_request, invalid_design_doc, _},
+            fabric2_db:update_doc(Db, Doc)
+        ).
+
+
+create_ddoc_requires_compilation({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    DDocId = <<"_design/", UUID/binary>>,
+    Doc = #doc{
+        id = DDocId,
+        body = {[
+            {<<"language">>, <<"javascript">>},
+            {<<"views">>, {[
+                {<<"foo">>, {[
+                    {<<"map">>, <<"Hopefully this is invalid JavaScript">>}
+                ]}}
+            ]}}
+        ]}
+    },
+    ?assertThrow(
+            {bad_request, compilation_error, _},
+            fabric2_db:update_doc(Db, Doc)
+        ).
+
+
+update_doc_basic({Db, _}) ->
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"state">>, 1}]}
+    },
+    {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+    Doc2 = Doc1#doc{
+        revs = {Pos1, [Rev1]},
+        body = {[{<<"state">>, 2}]}
+    },
+    {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+    Doc3 = Doc2#doc{
+        revs = {Pos2, [Rev2, Rev1]}
+    },
+    ?assertEqual({ok, Doc3}, fabric2_db:open_doc(Db, Doc2#doc.id)).
+
+
+update_ddoc_basic({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    DDocId = <<"_design/", UUID/binary>>,
+    Doc1 = #doc{
+        id = DDocId,
+        body = {[{<<"state">>, 1}]}
+    },
+    {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+    Doc2 = Doc1#doc{
+        revs = {Pos1, [Rev1]},
+        body = {[{<<"state">>, 2}]}
+    },
+    {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+    Doc3 = Doc2#doc{
+        revs = {Pos2, [Rev2, Rev1]}
+    },
+    ?assertEqual({ok, Doc3}, fabric2_db:open_doc(Db, Doc2#doc.id)).
+
+
+update_doc_replicated({Db, _}) ->
+    Doc = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [fabric2_util:uuid(), fabric2_util:uuid()]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc, [replicated_changes]),
+    ?assertEqual({ok, Doc}, fabric2_db:open_doc(Db, Doc#doc.id)).
+
+
+update_doc_replicated_add_conflict({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    ?assertEqual({ok, Doc1}, fabric2_db:open_doc(Db, Doc1#doc.id)),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+    ?assertEqual({ok, Doc1}, fabric2_db:open_doc(Db, Doc2#doc.id)).
+
+
+update_doc_replicated_changes_winner({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    ?assertEqual({ok, Doc1}, fabric2_db:open_doc(Db, Doc1#doc.id)),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+    ?assertEqual({ok, Doc2}, fabric2_db:open_doc(Db, Doc2#doc.id)).
+
+
+update_doc_replicated_extension({Db, _}) ->
+    % No sort necessary and avoided on purpose to
+    % demonstrate that this is not sort dependent
+    Rev1 = fabric2_util:uuid(),
+    Rev2 = fabric2_util:uuid(),
+    Rev3 = fabric2_util:uuid(),
+    Rev4 = fabric2_util:uuid(),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {4, [Rev4, Rev3, Rev2]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {4, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+    {ok, Doc3} = fabric2_db:open_doc(Db, Doc2#doc.id),
+    ?assertEqual({4, [Rev4, Rev3, Rev2, Rev1]}, Doc3#doc.revs),
+    ?assertEqual(Doc2#doc{revs = undefined}, Doc3#doc{revs = undefined}).
+
+
+update_doc_replicate_existing_rev({Db, _}) ->
+    Rev1 = fabric2_util:uuid(),
+    Rev2 = fabric2_util:uuid(),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    {ok, []} = fabric2_db:update_docs(Db, [Doc1], [replicated_changes]),
+    ?assertEqual({ok, Doc1}, fabric2_db:open_doc(Db, Doc1#doc.id)).
+
+
+update_winning_conflict_branch({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+    % Update the winning branch
+    Doc3 = Doc1#doc{
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"baz">>, 2}]}
+    },
+    {ok, {3, Rev4}} = fabric2_db:update_doc(Db, Doc3),
+    {ok, Doc4} = fabric2_db:open_doc(Db, Doc3#doc.id),
+    % Assert we've got the correct winner
+    ?assertEqual({3, [Rev4, Rev3, Rev1]}, Doc4#doc.revs),
+    ?assertEqual(Doc3#doc{revs = undefined}, Doc4#doc{revs = undefined}).
+
+
+update_non_winning_conflict_branch({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+    % Update the non winning branch
+    Doc3 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"baz">>, 2}]}
+    },
+    {ok, {3, Rev4}} = fabric2_db:update_doc(Db, Doc3),
+    {ok, Doc4} = fabric2_db:open_doc(Db, Doc3#doc.id),
+    % Assert we've got the correct winner
+    ?assertEqual({3, [Rev4, Rev2, Rev1]}, Doc4#doc.revs),
+    ?assertEqual(Doc3#doc{revs = undefined}, Doc4#doc{revs = undefined}).
+
+
+delete_doc_basic({Db, _}) ->
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"state">>, 1}]}
+    },
+    {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+    Doc2 = Doc1#doc{
+        revs = {Pos1, [Rev1]},
+        deleted = true,
+        body = {[{<<"state">>, 2}]}
+    },
+    {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+    Doc3 = Doc2#doc{revs = {Pos2, [Rev2, Rev1]}},
+    ?assertEqual({ok, Doc3}, fabric2_db:open_doc(Db, Doc2#doc.id, [deleted])).
+
+
+delete_changes_winner({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+    % Delete the winning branch
+    Doc3 = Doc1#doc{
+        revs = {2, [Rev3, Rev1]},
+        deleted = true,
+        body = {[]}
+    },
+    {ok, {3, _}} = fabric2_db:update_doc(Db, Doc3),
+    ?assertEqual({ok, Doc2}, fabric2_db:open_doc(Db, Doc3#doc.id)).
+
+
+recreate_doc_basic({Db, _}) ->
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"state">>, 1}]}
+    },
+    {ok, {1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+    Doc2 = Doc1#doc{
+        revs = {1, [Rev1]},
+        deleted = true,
+        body = {[{<<"state">>, 2}]}
+    },
+    {ok, {2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+    Doc3 = Doc1#doc{
+        revs = {0, []},
+        deleted = false,
+        body = {[{<<"state">>, 3}]}
+    },
+    {ok, {3, Rev3}} = fabric2_db:update_doc(Db, Doc3),
+    {ok, Doc4} = fabric2_db:open_doc(Db, Doc3#doc.id),
+    ?assertEqual({3, [Rev3, Rev2, Rev1]}, Doc4#doc.revs),
+    ?assertEqual(Doc3#doc{revs = undefined}, Doc4#doc{revs = undefined}).
+
+
+conflict_on_create_new_with_rev({Db, _}) ->
+    Doc = #doc{
+        id = fabric2_util:uuid(),
+        revs = {1, [fabric2_util:uuid()]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc)).
+
+
+conflict_on_update_with_no_rev({Db, _}) ->
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"state">>, 1}]}
+    },
+    {ok, _} = fabric2_db:update_doc(Db, Doc1),
+    Doc2 = Doc1#doc{
+        revs = {0, []},
+        body = {[{<<"state">>, 2}]}
+    },
+    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc2)).
+
+
+conflict_on_create_as_deleted({Db, _}) ->
+    Doc = #doc{
+        id = fabric2_util:uuid(),
+        deleted = true,
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc)).
+
+
+conflict_on_recreate_as_deleted({Db, _}) ->
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"state">>, 1}]}
+    },
+    {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+    Doc2 = Doc1#doc{
+        revs = {Pos1, [Rev1]},
+        deleted = true,
+        body = {[{<<"state">>, 2}]}
+    },
+    {ok, _} = fabric2_db:update_doc(Db, Doc2),
+    Doc3 = Doc1#doc{
+        revs = {0, []},
+        deleted = true,
+        body = {[{<<"state">>, 3}]}
+    },
+    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc3)).
+
+
+conflict_on_extend_deleted({Db, _}) ->
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"state">>, 1}]}
+    },
+    {ok, {Pos1, Rev1}} = fabric2_db:update_doc(Db, Doc1),
+    Doc2 = Doc1#doc{
+        revs = {Pos1, [Rev1]},
+        deleted = true,
+        body = {[{<<"state">>, 2}]}
+    },
+    {ok, {Pos2, Rev2}} = fabric2_db:update_doc(Db, Doc2),
+    Doc3 = Doc1#doc{
+        revs = {Pos2, [Rev2]},
+        deleted = false,
+        body = {[{<<"state">>, 3}]}
+    },
+    ?assertThrow({error, conflict}, fabric2_db:update_doc(Db, Doc3)).
+
+
+open_doc_revs_basic({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    DocId = fabric2_util:uuid(),
+    Doc1 = #doc{
+        id = DocId,
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+
+    {ok, [{ok, Doc3}]} = fabric2_db:open_doc_revs(Db, DocId, [{2, Rev3}], []),
+    ?assertEqual(Doc1, Doc3),
+
+    {ok, [{ok, Doc4}]} = fabric2_db:open_doc_revs(Db, DocId, [{2, Rev2}], []),
+    ?assertEqual(Doc2, Doc4),
+
+    Revs = [{2, Rev3}, {2, Rev2}, {1, Rev1}],
+    {ok, Docs} = fabric2_db:open_doc_revs(Db, DocId, Revs, []),
+    ?assert(length(Docs) == 3),
+    ?assert(lists:member({ok, Doc1}, Docs)),
+    ?assert(lists:member({ok, Doc2}, Docs)),
+    ?assert(lists:member({{not_found, missing}, {1, Rev1}}, Docs)),
+
+    % Make sure crazy madeup revisions are accepted
+    MissingRevs = [{5, fabric2_util:uuid()}, {1, fabric2_util:uuid()}],
+    {ok, NFMissing} = fabric2_db:open_doc_revs(Db, DocId, MissingRevs, []),
+    ?assertEqual(2, length(NFMissing)),
+    lists:foreach(fun(MR) ->
+        ?assert(lists:member({{not_found, missing}, MR}, NFMissing))
+    end, MissingRevs).
+
+
+open_doc_revs_all({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    DocId = fabric2_util:uuid(),
+    Doc1 = #doc{
+        id = DocId,
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+
+    {ok, Docs} = fabric2_db:open_doc_revs(Db, DocId, all, []),
+    ?assert(length(Docs) == 2),
+    ?assert(lists:member({ok, Doc1}, Docs)),
+    ?assert(lists:member({ok, Doc2}, Docs)).
+
+
+open_doc_revs_latest({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    DocId = fabric2_util:uuid(),
+    Doc1 = #doc{
+        id = DocId,
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+
+    Opts = [latest],
+    {ok, [{ok, Doc3}]} = fabric2_db:open_doc_revs(Db, DocId, [{2, Rev3}], Opts),
+    ?assertEqual(Doc1, Doc3),
+
+    {ok, Docs} = fabric2_db:open_doc_revs(Db, DocId, [{1, Rev1}], Opts),
+    ?assert(length(Docs) == 2),
+    ?assert(lists:member({ok, Doc1}, Docs)),
+    ?assert(lists:member({ok, Doc2}, Docs)).
+
+
+get_missing_revs_basic({Db, _}) ->
+    [Rev1, Rev2, Rev3] = lists:sort([
+            fabric2_util:uuid(),
+            fabric2_util:uuid(),
+            fabric2_util:uuid()
+        ]),
+    DocId = fabric2_util:uuid(),
+    Doc1 = #doc{
+        id = DocId,
+        revs = {2, [Rev3, Rev1]},
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc1, [replicated_changes]),
+    Doc2 = Doc1#doc{
+        revs = {2, [Rev2, Rev1]},
+        body = {[{<<"bar">>, <<"foo">>}]}
+    },
+    {ok, {2, _}} = fabric2_db:update_doc(Db, Doc2, [replicated_changes]),
+
+    % Check that we can find all revisions
+    AllRevs = [{1, Rev1}, {2, Rev2}, {2, Rev3}],
+    ?assertEqual(
+            {ok, []},
+            fabric2_db:get_missing_revs(Db, [{DocId, AllRevs}])
+        ),
+
+    % Check that a missing revision is found with no possible ancestors
+    MissingRev = {2, fabric2_util:uuid()},
+    ?assertEqual(
+            {ok, [{DocId, [MissingRev], []}]},
+            fabric2_db:get_missing_revs(Db, [{DocId, [MissingRev]}])
+        ),
+
+    % Check that only a missing rev is returned
+    ?assertEqual(
+            {ok, [{DocId, [MissingRev], []}]},
+            fabric2_db:get_missing_revs(Db, [{DocId, [MissingRev | AllRevs]}])
+        ),
+
+    % Check that we can find possible ancestors
+    MissingWithAncestors = {4, fabric2_util:uuid()},
+    PossibleAncestors = [{2, Rev2}, {2, Rev3}],
+    ?assertEqual(
+            {ok, [{DocId, [MissingWithAncestors], PossibleAncestors}]},
+            fabric2_db:get_missing_revs(Db, [{DocId, [MissingWithAncestors]}])
+        ).
+
+
+get_missing_revs_on_missing_doc({Db, _}) ->
+    Revs = lists:sort([
+            couch_doc:rev_to_str({1, fabric2_util:uuid()}),
+            couch_doc:rev_to_str({2, fabric2_util:uuid()}),
+            couch_doc:rev_to_str({800, fabric2_util:uuid()})
+        ]),
+    DocId = fabric2_util:uuid(),
+    {ok, Resp} = fabric2_db:get_missing_revs(Db, [{DocId, Revs}]),
+    ?assertMatch([{DocId, [_ | _], []}], Resp),
+    [{DocId, Missing, _}] = Resp,
+    MissingStrs = [couch_doc:rev_to_str(Rev) || Rev <- Missing],
+    ?assertEqual(Revs, lists:sort(MissingStrs)).
+
+
+open_missing_local_doc({Db, _}) ->
+    ?assertEqual(
+            {not_found, missing},
+            fabric2_db:open_doc(Db, <<"_local/foo">>, [])
+        ).
+
+
+create_local_doc_basic({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+    Doc1 = #doc{
+        id = LDocId,
+        revs = {0, []},
+        deleted = false,
+        body = {[{<<"ohai">>, <<"there">>}]}
+    },
+    ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+    {ok, Doc2} = fabric2_db:open_doc(Db, Doc1#doc.id, []),
+    ?assertEqual(Doc1#doc{revs = {0, [<<"1">>]}}, Doc2).
+
+
+update_local_doc_basic({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+    Doc1 = #doc{
+        id = LDocId,
+        revs = {0, []},
+        deleted = false,
+        body = {[{<<"ohai">>, <<"there">>}]}
+    },
+    ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+    Doc2 = Doc1#doc{
+        revs = {0, [<<"1">>]},
+        body = {[{<<"whiz">>, <<"bang">>}]}
+    },
+    ?assertEqual({ok, {0, <<"2">>}}, fabric2_db:update_doc(Db, Doc2)),
+    {ok, Doc3} = fabric2_db:open_doc(Db, Doc1#doc.id, []),
+    ?assertEqual(Doc2#doc{revs = {0, [<<"2">>]}}, Doc3).
+
+
+delete_local_doc_basic({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+    Doc1 = #doc{
+        id = LDocId,
+        revs = {0, []},
+        deleted = false,
+        body = {[{<<"ohai">>, <<"there">>}]}
+    },
+    ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+    Doc2 = Doc1#doc{
+        revs = {0, [<<"1">>]},
+        deleted = true,
+        body = {[]}
+    },
+    ?assertEqual({ok, {0, <<"0">>}}, fabric2_db:update_doc(Db, Doc2)),
+    ?assertEqual(
+            {not_found, missing},
+            fabric2_db:open_doc(Db, LDocId)
+        ).
+
+
+recreate_local_doc({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+    Doc1 = #doc{
+        id = LDocId,
+        revs = {0, []},
+        deleted = false,
+        body = {[{<<"ohai">>, <<"there">>}]}
+    },
+    ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+    Doc2 = Doc1#doc{
+        revs = {0, [<<"1">>]},
+        deleted = true,
+        body = {[]}
+    },
+    ?assertEqual({ok, {0, <<"0">>}}, fabric2_db:update_doc(Db, Doc2)),
+    ?assertEqual(
+            {not_found, missing},
+            fabric2_db:open_doc(Db, LDocId)
+        ),
+
+    ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+    {ok, Doc3} = fabric2_db:open_doc(Db, LDocId),
+    ?assertEqual(Doc1#doc{revs = {0, [<<"1">>]}}, Doc3).
+
+
+create_local_doc_bad_rev({Db, _}) ->
+    UUID = fabric2_util:uuid(),
+    LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+    Doc1 = #doc{
+        id = LDocId,
+        revs = {0, [<<"not a number">>]}
+    },
+    ?assertThrow(
+            {error, <<"Invalid rev format">>},
+            fabric2_db:update_doc(Db, Doc1)
+        ),
+
+    Doc2 = Doc1#doc{
+        revs = bad_bad_rev_roy_brown
+    },
+    ?assertThrow(
+            {error, <<"Invalid rev format">>},
+            fabric2_db:update_doc(Db, Doc2)
+        ).
+
+
+create_local_doc_random_rev({Db, _}) ->
+    % Local docs don't care what rev is passed as long
+    % as long as its a number.
+    UUID = fabric2_util:uuid(),
+    LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+    Doc1 = #doc{
+        id = LDocId,
+        revs = {0, [<<"42">>]},
+        body = {[{<<"state">>, 1}]}
+    },
+    ?assertEqual({ok, {0, <<"43">>}}, fabric2_db:update_doc(Db, Doc1)),
+    {ok, Doc2} = fabric2_db:open_doc(Db, LDocId, []),
+    ?assertEqual(Doc1#doc{revs = {0, [<<"43">>]}}, Doc2),
+
+    Doc3 = Doc1#doc{
+        revs = {0, [<<"1234567890">>]},
+        body = {[{<<"state">>, 2}]}
+    },
+    ?assertEqual({ok, {0, <<"1234567891">>}}, fabric2_db:update_doc(Db, Doc3)),
+    {ok, Doc4} = fabric2_db:open_doc(Db, LDocId, []),
+    ?assertEqual(Doc3#doc{revs = {0, [<<"1234567891">>]}}, Doc4),
+
+    Doc5 = Doc1#doc{
+        revs = {0, [<<"1">>]},
+        body = {[{<<"state">>, 3}]}
+    },
+    ?assertEqual({ok, {0, <<"2">>}}, fabric2_db:update_doc(Db, Doc5)),
+    {ok, Doc6} = fabric2_db:open_doc(Db, LDocId, []),
+    ?assertEqual(Doc5#doc{revs = {0, [<<"2">>]}}, Doc6).
diff --git a/src/fabric/test/fabric2_doc_fold_tests.erl b/src/fabric/test/fabric2_doc_fold_tests.erl
new file mode 100644
index 0000000..caa5f92
--- /dev/null
+++ b/src/fabric/test/fabric2_doc_fold_tests.erl
@@ -0,0 +1,209 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_doc_fold_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+-define(DOC_COUNT, 50).
+
+
+doc_fold_test_() ->
+    {
+        "Test document fold operations",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun fold_docs_basic/1,
+                fun fold_docs_rev/1,
+                fun fold_docs_with_start_key/1,
+                fun fold_docs_with_end_key/1,
+                fun fold_docs_with_both_keys_the_same/1,
+                fun fold_docs_with_different_keys/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([fabric]),
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    DocIdRevs = lists:map(fun(Val) ->
+        DocId = fabric2_util:uuid(),
+        Doc = #doc{
+            id = DocId,
+            body = {[{<<"value">>, Val}]}
+        },
+        {ok, Rev} = fabric2_db:update_doc(Db, Doc, []),
+        {DocId, couch_doc:rev_to_str(Rev)}
+    end, lists:seq(1, ?DOC_COUNT)),
+    {Db, lists:sort(DocIdRevs), Ctx}.
+
+
+cleanup({Db, _DocIdRevs, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+fold_docs_basic({Db, DocIdRevs, _}) ->
+    {ok, {?DOC_COUNT, Rows}} = fabric2_db:fold_docs(Db, fun fold_fun/2, []),
+    ?assertEqual(DocIdRevs, lists:reverse(Rows)).
+
+
+fold_docs_rev({Db, DocIdRevs, _}) ->
+    Opts = [{dir, rev}],
+    {ok, {?DOC_COUNT, Rows}} =
+            fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts),
+    ?assertEqual(DocIdRevs, Rows).
+
+
+fold_docs_with_start_key({Db, DocIdRevs, _}) ->
+    {StartKey, _} = hd(DocIdRevs),
+    Opts = [{start_key, StartKey}],
+    {ok, {?DOC_COUNT, Rows}}
+            = fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts),
+    ?assertEqual(DocIdRevs, lists:reverse(Rows)),
+    if length(DocIdRevs) == 1 -> ok; true ->
+        fold_docs_with_start_key({Db, tl(DocIdRevs), nil})
+    end.
+
+
+fold_docs_with_end_key({Db, DocIdRevs, _}) ->
+    RevDocIdRevs = lists:reverse(DocIdRevs),
+    {EndKey, _} = hd(RevDocIdRevs),
+    Opts = [{end_key, EndKey}],
+    {ok, {?DOC_COUNT, Rows}} =
+            fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts),
+    ?assertEqual(RevDocIdRevs, Rows),
+    if length(DocIdRevs) == 1 -> ok; true ->
+        fold_docs_with_end_key({Db, lists:reverse(tl(RevDocIdRevs)), nil})
+    end.
+
+
+fold_docs_with_both_keys_the_same({Db, DocIdRevs, _}) ->
+    lists:foreach(fun({DocId, _} = Row) ->
+        check_all_combos(Db, DocId, DocId, [Row])
+    end, DocIdRevs).
+
+
+fold_docs_with_different_keys({Db, DocIdRevs, _}) ->
+    lists:foreach(fun(_) ->
+        {StartKey, EndKey, Rows} = pick_range(DocIdRevs),
+        check_all_combos(Db, StartKey, EndKey, Rows)
+    end, lists:seq(1, 500)).
+
+
+check_all_combos(Db, StartKey, EndKey, Rows) ->
+    Opts1 = make_opts(fwd, StartKey, EndKey, true),
+    {ok, {?DOC_COUNT, Rows1}} =
+            fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts1),
+    ?assertEqual(lists:reverse(Rows), Rows1),
+
+    Opts2 = make_opts(fwd, StartKey, EndKey, false),
+    {ok, {?DOC_COUNT, Rows2}} =
+            fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts2),
+    Expect2 = if EndKey == undefined -> lists:reverse(Rows); true ->
+        lists:reverse(all_but_last(Rows))
+    end,
+    ?assertEqual(Expect2, Rows2),
+
+    Opts3 = make_opts(rev, StartKey, EndKey, true),
+    {ok, {?DOC_COUNT, Rows3}} =
+            fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts3),
+    ?assertEqual(Rows, Rows3),
+
+    Opts4 = make_opts(rev, StartKey, EndKey, false),
+    {ok, {?DOC_COUNT, Rows4}} =
+            fabric2_db:fold_docs(Db, fun fold_fun/2, [], Opts4),
+    Expect4 = if StartKey == undefined -> Rows; true ->
+        tl(Rows)
+    end,
+    ?assertEqual(Expect4, Rows4).
+
+
+
+make_opts(fwd, StartKey, EndKey, InclusiveEnd) ->
+    DirOpts = case rand:uniform() =< 0.50 of
+        true -> [{dir, fwd}];
+        false -> []
+    end,
+    StartOpts = case StartKey of
+        undefined -> [];
+        <<_/binary>> -> [{start_key, StartKey}]
+    end,
+    EndOpts = case EndKey of
+        undefined -> [];
+        <<_/binary>> when InclusiveEnd -> [{end_key, EndKey}];
+        <<_/binary>> -> [{end_key_gt, EndKey}]
+    end,
+    DirOpts ++ StartOpts ++ EndOpts;
+make_opts(rev, StartKey, EndKey, InclusiveEnd) ->
+    BaseOpts = make_opts(fwd, EndKey, StartKey, InclusiveEnd),
+    [{dir, rev}] ++ BaseOpts -- [{dir, fwd}].
+
+
+all_but_last([]) ->
+    [];
+all_but_last([_]) ->
+    [];
+all_but_last(Rows) ->
+    lists:sublist(Rows, length(Rows) - 1).
+
+
+pick_range(DocIdRevs) ->
+    {StartKey, StartRow, RestRows} = pick_start_key(DocIdRevs),
+    {EndKey, EndRow, RowsBetween} = pick_end_key(RestRows),
+    {StartKey, EndKey, StartRow ++ RowsBetween ++ EndRow}.
+
+
+pick_start_key(Rows) ->
+    case rand:uniform() =< 0.1 of
+        true ->
+            {undefined, [], Rows};
+        false ->
+            Idx = rand:uniform(length(Rows)),
+            {DocId, _} = Row = lists:nth(Idx, Rows),
+            {DocId, [Row], lists:nthtail(Idx, Rows)}
+    end.
+
+
+pick_end_key([]) ->
+    {undefined, [], []};
+
+pick_end_key(Rows) ->
+    case rand:uniform() =< 0.1 of
+        true ->
+            {undefined, [], Rows};
+        false ->
+            Idx = rand:uniform(length(Rows)),
+            {DocId, _} = Row = lists:nth(Idx, Rows),
+            Tail = lists:nthtail(Idx, Rows),
+            {DocId, [Row], Rows -- [Row | Tail]}
+    end.
+
+
+fold_fun({meta, Meta}, _Acc) ->
+    Total = fabric2_util:get_value(total, Meta),
+    {ok, {Total, []}};
+fold_fun({row, Row}, {Total, Rows}) ->
+    RowId = fabric2_util:get_value(id, Row),
+    RowId = fabric2_util:get_value(key, Row),
+    RowRev = fabric2_util:get_value(value, Row),
+    {ok, {Total, [{RowId, RowRev} | Rows]}};
+fold_fun(complete, Acc) ->
+    {ok, Acc}.
diff --git a/src/fabric/test/fabric2_fdb_tx_retry_tests.erl b/src/fabric/test/fabric2_fdb_tx_retry_tests.erl
new file mode 100644
index 0000000..c924ce5
--- /dev/null
+++ b/src/fabric/test/fabric2_fdb_tx_retry_tests.erl
@@ -0,0 +1,178 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_fdb_tx_retry_tests).
+
+
+-include_lib("eunit/include/eunit.hrl").
+
+
+-define(TDEF(A), {atom_to_list(A), fun A/0}).
+
+
+meck_setup() ->
+    meck:new(erlfdb),
+    meck:new(fabric2_txids),
+    EnvSt = case application:get_env(fabric, db) of
+        {ok, Db} -> {ok, Db};
+        undefined -> undefined
+    end,
+    application:set_env(fabric, db, not_a_real_db),
+    EnvSt.
+
+
+meck_cleanup(EnvSt) ->
+    case EnvSt of
+        {ok, Db} -> application:set_env(fabric, db, Db);
+        undefined -> application:unset_env(fabric, db)
+    end,
+    meck:unload().
+
+
+retry_test_() ->
+    {
+        foreach,
+        fun meck_setup/0,
+        fun meck_cleanup/1,
+        [
+            ?TDEF(read_only_no_retry),
+            ?TDEF(read_only_commit_unknown_result),
+            ?TDEF(run_on_first_try),
+            ?TDEF(retry_when_commit_conflict),
+            ?TDEF(retry_when_txid_not_found),
+            ?TDEF(no_retry_when_txid_found)
+        ]
+    }.
+
+
+read_only_no_retry() ->
+    meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+        UserFun(not_a_real_transaction)
+    end),
+    meck:expect(erlfdb, get_last_error, fun() -> 0 end),
+    meck:expect(erlfdb, get, fun(_, _) -> foo end),
+    meck:expect(erlfdb, is_read_only, fun(_) -> true end),
+    meck:expect(fabric2_txids, remove, fun(undefined) -> ok end),
+
+    Result = fabric2_fdb:transactional(fun(Tx) ->
+        ?assertEqual(foo, erlfdb:get(Tx, bar)),
+        did_run
+    end),
+
+    ?assertEqual(did_run, Result),
+    ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+read_only_commit_unknown_result() ->
+    % Not 100% certain that this would ever actually
+    % happen in the wild but might as well test that
+    % we don't blow up if it does.
+    meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+        UserFun(not_a_real_transaction)
+    end),
+    meck:expect(erlfdb, get_last_error, fun() -> 1021 end),
+    meck:expect(erlfdb, get, fun(_, _) -> foo end),
+    meck:expect(erlfdb, is_read_only, fun(_) -> true end),
+    meck:expect(fabric2_txids, remove, fun(undefined) -> ok end),
+
+    Result = fabric2_fdb:transactional(fun(Tx) ->
+        ?assertEqual(foo, erlfdb:get(Tx, bar)),
+        did_run
+    end),
+
+    ?assertEqual(did_run, Result),
+    ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+run_on_first_try() ->
+    meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+        UserFun(not_a_real_transaction)
+    end),
+    meck:expect(erlfdb, get_last_error, fun() -> undefined end),
+    meck:expect(erlfdb, clear, fun(_, _) -> ok end),
+    meck:expect(erlfdb, is_read_only, fun(_) -> false end),
+    meck:expect(fabric2_txids, create, fun(_, _) -> <<"a txid">> end),
+    meck:expect(erlfdb, set, fun(_, <<"a txid">>, <<>>) -> ok end),
+    meck:expect(fabric2_txids, remove, fun(<<"a txid">>) -> ok end),
+
+    Result = fabric2_fdb:transactional(fun(Tx) ->
+        ?assertEqual(ok, erlfdb:clear(Tx, bang)),
+        did_run
+    end),
+
+    ?assertEqual(did_run, Result),
+    ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+retry_when_commit_conflict() ->
+    meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+        UserFun(not_a_real_transaction)
+    end),
+    meck:expect(erlfdb, get_last_error, fun() -> 1020 end),
+    meck:expect(erlfdb, clear, fun(_, _) -> ok end),
+    meck:expect(erlfdb, is_read_only, fun(_) -> false end),
+    meck:expect(fabric2_txids, create, fun(_, _) -> <<"a txid">> end),
+    meck:expect(erlfdb, set, fun(_, <<"a txid">>, <<>>) -> ok end),
+    meck:expect(fabric2_txids, remove, fun(<<"a txid">>) -> ok end),
+
+    Result = fabric2_fdb:transactional(fun(Tx) ->
+        ?assertEqual(ok, erlfdb:clear(Tx, <<"foo">>)),
+        did_run
+    end),
+
+    ?assertEqual(did_run, Result),
+    ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+retry_when_txid_not_found() ->
+    meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+        UserFun(not_a_real_transaction)
+    end),
+    meck:expect(erlfdb, get_last_error, fun() -> 1021 end),
+    meck:expect(erlfdb, get, fun(_, <<"a txid">>) -> future end),
+    meck:expect(erlfdb, wait, fun(future) -> not_found end),
+    meck:expect(erlfdb, clear, fun(_, _) -> ok end),
+    meck:expect(erlfdb, is_read_only, fun(_) -> false end),
+    meck:expect(erlfdb, set, fun(_, <<"a txid">>, <<>>) -> ok end),
+    meck:expect(fabric2_txids, remove, fun(<<"a txid">>) -> ok end),
+
+    put('$fabric_tx_id', <<"a txid">>),
+    put('$fabric_tx_result', not_the_correct_result),
+
+    Result = fabric2_fdb:transactional(fun(Tx) ->
+        ?assertEqual(ok, erlfdb:clear(Tx, <<"foo">>)),
+        yay_not_skipped
+    end),
+
+    ?assertEqual(yay_not_skipped, Result),
+    ?assert(meck:validate([erlfdb, fabric2_txids])).
+
+
+no_retry_when_txid_found() ->
+    meck:expect(erlfdb, transactional, fun(_Db, UserFun) ->
+        UserFun(not_a_real_transaction)
+    end),
+    meck:expect(erlfdb, get_last_error, fun() -> 1021 end),
+    meck:expect(erlfdb, get, fun(_, <<"a txid">>) -> future end),
+    meck:expect(erlfdb, wait, fun(future) -> <<>> end),
+    meck:expect(fabric2_txids, remove, fun(<<"a txid">>) -> ok end),
+
+    put('$fabric_tx_id', <<"a txid">>),
+    put('$fabric_tx_result', did_not_run),
+
+    Result = fabric2_fdb:transactional(fun(_Tx) ->
+        ?assert(false),
+        did_run
+    end),
+
+    ?assertEqual(did_not_run, Result),
+    ?assert(meck:validate([erlfdb, fabric2_txids])).
\ No newline at end of file
diff --git a/src/fabric/test/fabric2_trace_db_create_tests.erl b/src/fabric/test/fabric2_trace_db_create_tests.erl
new file mode 100644
index 0000000..09cc863
--- /dev/null
+++ b/src/fabric/test/fabric2_trace_db_create_tests.erl
@@ -0,0 +1,46 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_trace_db_create_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+trace_test_() ->
+    {
+        "Trace operation",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            [
+                fun create_db/0
+            ]
+        }
+    }.
+
+
+setup() ->
+    put(erlfdb_trace, "starting fabric"),
+    test_util:start_couch([fabric]).
+
+
+cleanup(Ctx) ->
+    test_util:stop_couch(Ctx).
+
+
+create_db() ->
+    put(erlfdb_trace, <<"create db">>),
+    {ok, _Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]).
diff --git a/src/fabric/test/fabric2_trace_db_delete_tests.erl b/src/fabric/test/fabric2_trace_db_delete_tests.erl
new file mode 100644
index 0000000..ddbb2c8
--- /dev/null
+++ b/src/fabric/test/fabric2_trace_db_delete_tests.erl
@@ -0,0 +1,49 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_trace_db_delete_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+trace_test_() ->
+    {
+        "Trace operation",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun delete_db/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    put(erlfdb_trace, "starting fabric"),
+    Ctx = test_util:start_couch([fabric]),
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    {Db, Ctx}.
+
+
+cleanup({_Db, Ctx}) ->
+    test_util:stop_couch(Ctx).
+
+
+delete_db({Db, _}) ->
+    put(erlfdb_trace, <<"delete db">>),
+    fabric2_server:remove(fabric2_db:name(Db)),
+    ok = fabric2_db:delete(fabric2_db:name(Db), []).
diff --git a/src/fabric/test/fabric2_trace_db_open_tests.erl b/src/fabric/test/fabric2_trace_db_open_tests.erl
new file mode 100644
index 0000000..71e3301
--- /dev/null
+++ b/src/fabric/test/fabric2_trace_db_open_tests.erl
@@ -0,0 +1,50 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_trace_db_open_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+trace_test_() ->
+    {
+        "Trace operation",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun open_db/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    put(erlfdb_trace, "starting fabric"),
+    Ctx = test_util:start_couch([fabric]),
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+open_db({Db, _}) ->
+    put(erlfdb_trace, <<"open db">>),
+    fabric2_server:remove(fabric2_db:name(Db)),
+    {ok, _Db} = fabric2_db:open(fabric2_db:name(Db), [{user_ctx, ?ADMIN_USER}]).
diff --git a/src/fabric/test/fabric2_trace_doc_create_tests.erl b/src/fabric/test/fabric2_trace_doc_create_tests.erl
new file mode 100644
index 0000000..1e0b47c
--- /dev/null
+++ b/src/fabric/test/fabric2_trace_doc_create_tests.erl
@@ -0,0 +1,86 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_trace_doc_create_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+doc_crud_test_() ->
+    {
+        "Test document CRUD operations",
+        {
+            setup,
+            fun setup/0,
+            fun cleanup/1,
+            {with, [
+                fun create_new_doc/1,
+                fun create_two_docs/1,
+                fun create_50_docs/1
+            ]}
+        }
+    }.
+
+
+setup() ->
+    Ctx = test_util:start_couch([fabric]),
+    {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+    {Db, Ctx}.
+
+
+cleanup({Db, Ctx}) ->
+    ok = fabric2_db:delete(fabric2_db:name(Db), []),
+    test_util:stop_couch(Ctx).
+
+
+create_new_doc({Db, _}) ->
+    put(erlfdb_trace, <<"one doc">>),
+    Doc = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"foo">>, <<"bar">>}]}
+    },
+    {ok, _} = fabric2_db:update_doc(Db, Doc).
+
+
+create_two_docs({Db, _}) ->
+    put(erlfdb_trace, <<"two docs">>),
+    Doc1 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"bam">>, <<"baz">>}]}
+    },
+    Doc2 = #doc{
+        id = fabric2_util:uuid(),
+        body = {[{<<"bang">>, <<"bargle">>}]}
+    },
+    {ok, _} = fabric2_db:update_docs(Db, [Doc1, Doc2]).
+
+
+create_50_docs({Db, _}) ->
+    lists:foreach(fun(_) ->
+        spawn_monitor(fun() ->
+            Name = io_lib:format("50 docs : ~w", [self()]),
+            put(erlfdb_trace, iolist_to_binary(Name)),
+            Docs = lists:map(fun(Val) ->
+                #doc{
+                    id = fabric2_util:uuid(),
+                    body = {[{<<"value">>, Val}]}
+                }
+            end, lists:seq(1, 50)),
+            {ok, _} = fabric2_db:update_docs(Db, Docs)
+        end)
+    end, lists:seq(1, 5)),
+    lists:foreach(fun(_) ->
+        receive {'DOWN', _, _, _, _} -> ok end
+    end, lists:seq(1, 5)).


[couchdb] 03/06: Initial fabric2 implementation on FoundationDB

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit ee2e4c8cd70f833f04c4504f6664e241fad5061f
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Jun 5 13:29:33 2019 -0500

    Initial fabric2 implementation on FoundationDB
    
    This provides a base implementation of a fabric API backed by
    FoundationDB. While a lot of functionality is provided there are a
    number of places that still require work. An incomplete list includes:
    
      1. Document bodies are currently a single key/value
      2. Attachments are stored as a range of key/value pairs
      3. There is no support for indexing
      4. Request size limits are not enforced directly
      5. Auth is still backed by a legacy CouchDB database
      6. No support for before_doc_update/after_doc_read
      7. Various implementation shortcuts need to be expanded for full API
         support.
---
 FDB_NOTES.md                                       |   57 +
 src/couch/src/couch_att.erl                        |  661 ++++------
 src/couch/src/couch_doc.erl                        |   11 +
 src/fabric/src/fabric.app.src                      |    8 +-
 src/fabric/src/fabric2.hrl                         |   66 +
 src/fabric/src/{fabric.app.src => fabric2_app.erl} |   35 +-
 src/fabric/src/fabric2_db.erl                      | 1299 ++++++++++++++++++++
 src/fabric/src/fabric2_events.erl                  |   84 ++
 src/fabric/src/fabric2_fdb.erl                     | 1187 ++++++++++++++++++
 src/fabric/src/fabric2_server.erl                  |  104 ++
 src/fabric/src/fabric2_sup.erl                     |   47 +
 src/fabric/src/fabric2_txids.erl                   |  144 +++
 src/fabric/src/fabric2_util.erl                    |  203 +++
 13 files changed, 3490 insertions(+), 416 deletions(-)

diff --git a/FDB_NOTES.md b/FDB_NOTES.md
new file mode 100644
index 0000000..c0cdc8c
--- /dev/null
+++ b/FDB_NOTES.md
@@ -0,0 +1,57 @@
+Things of Note
+===
+
+
+1. If a replication sends us two revisions A and B where one is an
+   ancestor of the other, we likely have divergent behavior. However,
+   this should never happen In Theory.
+
+2. Multiple updates to the same document in a _bulk_docs (or if they
+   just happen to be in the same update batch in non-fdb CouchDB)
+   we likely have subtly different behavior.
+
+3. I'm relying on repeated reads in an fdb transaction to be "cheap"
+   in that the reads would be cached in the fdb_transaction object.
+   This needs to be checked for certainty but that appeared to
+   be how things behaved in testing.
+
+4. When attempting to create a doc from scratch in an interacitve_edit
+   update, with revisions specified *and* attachment stubs, the reported
+   error is now a conflict. Previously the missing_stubs error was
+   raised earlier.
+
+5. There may be a difference in behavior if a) there are no VDU functions
+   set on a db and no design documents in a batch. This is because in
+   this situation we don't run the prep_and_validate code on pre-fdb
+   CouchDB. The new code always checks stubs before merging revision trees.
+   I'm sure the old way would fail somehow, but it would fail further on
+   which means we may have failed with a different reason (conflict, etc)
+   before we got to the next place we check for missing stubs.
+
+6. For multi-doc updates we'll need to investigate user versions on
+   versionstamps within a transaction. Also this likely prevents the
+   ability to have multiple updates to the same doc in a single
+   _bulk_docs transaction
+
+7. Document body storage needs to be implemented beyond the single
+   key/value approach.
+
+8. We'll want to look at how we currently apply open options to individual
+    elements of an open_revs call. Might turn out that we have to grab a
+    full FDI even if we could look up a rev directly. (i.e., revs_info
+    would require us having the entire FDI, however it'd be wasteful to return
+    all of that in an open_revs call, but bug compatibility ftw!)
+
+9. Is it possible that a server_admin can delete a db without being able
+    to open it? If so that's probably changed behavior.
+
+10. All docs on large active databases might be a thing getting the doc
+    count. If we allow range requests up to 5s, and we continue to return
+    the doc count total we may have to play games with snapshot reads on
+    the doc count key or else it'll whack any _all_docs range requests
+
+11. Revision infos need to track their size f we want to maintain a database
+    size counter we'll want to store the size of a given doc body for each
+    revision so that we don't have to read the old body when updating the tree.
+
+12. Update sequences do not yet include an incarnation value.
\ No newline at end of file
diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl
index a24de21..0dc5fa5 100644
--- a/src/couch/src/couch_att.erl
+++ b/src/couch/src/couch_att.erl
@@ -29,7 +29,7 @@
 -export([
     size_info/1,
     to_disk_term/1,
-    from_disk_term/2
+    from_disk_term/3
 ]).
 
 -export([
@@ -38,7 +38,7 @@
 ]).
 
 -export([
-    flush/2,
+    flush/3,
     foldl/3,
     range_foldl/5,
     foldl_decode/3,
@@ -46,11 +46,6 @@
 ]).
 
 -export([
-    upgrade/1,
-    downgrade/1
-]).
-
--export([
     max_attachment_size/0,
     validate_attachment_size/3
 ]).
@@ -58,137 +53,61 @@
 -compile(nowarn_deprecated_type).
 -export_type([att/0]).
 
--include_lib("couch/include/couch_db.hrl").
-
-
-%% Legacy attachment record. This is going to be phased out by the new proplist
-%% based structure. It's needed for now to allow code to perform lazy upgrades
-%% while the patch is rolled out to the cluster. Attachments passed as records
-%% will remain so until they are required to be represented as property lists.
-%% Once this has been widely deployed, this record will be removed entirely and
-%% property lists will be the main format.
--record(att, {
-    name :: binary(),
-    type :: binary(),
-    att_len :: non_neg_integer(),
-
-    %% length of the attachment in its identity form
-    %% (that is, without a content encoding applied to it)
-    %% differs from att_len when encoding /= identity
-    disk_len :: non_neg_integer(),
-
-    md5 = <<>> :: binary(),
-    revpos = 0 :: non_neg_integer(),
-    data :: stub | follows | binary() | {any(), any()} |
-            {follows, pid(), reference()} | fun(() -> binary()),
-
-    %% Encoding of the attachment
-    %% currently supported values are:
-    %%     identity, gzip
-    %% additional values to support in the future:
-    %%     deflate, compress
-    encoding = identity :: identity | gzip
-}).
-
-
-%% Extensible Attachment Type
-%%
-%% The following types describe the known properties for attachment fields
-%% encoded as property lists to allow easier upgrades. Values not in this list
-%% should be accepted at runtime but should be treated as opaque data as might
-%% be used by upgraded code. If you plan on operating on new data, please add
-%% an entry here as documentation.
-
-
-%% The name of the attachment is also used as the mime-part name for file
-%% downloads. These must be unique per document.
--type name_prop() :: {name, binary()}.
-
-
-%% The mime type of the attachment. This does affect compression of certain
-%% attachments if the type is found to be configured as a compressable type.
-%% This is commonly reserved for text/* types but could include other custom
-%% cases as well. See definition and use of couch_util:compressable_att_type/1.
--type type_prop() :: {type, binary()}.
-
-
-%% The attachment length is similar to disk-length but ignores additional
-%% encoding that may have occurred.
--type att_len_prop() :: {att_len, non_neg_integer()}.
-
-
-%% The size of the attachment as stored in a disk stream.
--type disk_len_prop() :: {disk_len, non_neg_integer()}.
-
-
-%% This is a digest of the original attachment data as uploaded by the client.
-%% it's useful for checking validity of contents against other attachment data
-%% as well as quick digest computation of the enclosing document.
--type md5_prop() :: {md5, binary()}.
-
 
--type revpos_prop() :: {revpos, 0}.
+-include_lib("couch/include/couch_db.hrl").
 
 
-%% This field is currently overloaded with just about everything. The
-%% {any(), any()} type is just there until I have time to check the actual
-%% values expected. Over time this should be split into more than one property
-%% to allow simpler handling.
--type data_prop() :: {
-    data, stub | follows | binary() | {any(), any()} |
-    {follows, pid(), reference()} | fun(() -> binary())
-}.
+-define(CURRENT_ATT_FORMAT, 0).
 
 
-%% We will occasionally compress our data. See type_prop() for more information
-%% on when this happens.
--type encoding_prop() :: {encoding, identity | gzip}.
+-type prop_name() ::
+    name |
+    type |
+    att_len |
+    disk_len |
+    md5 |
+    revpos |
+    data |
+    encoding.
 
 
--type attachment() :: [
-    name_prop() | type_prop() |
-    att_len_prop() | disk_len_prop() |
-    md5_prop() | revpos_prop() |
-    data_prop() | encoding_prop()
-].
+-type data_prop_type() ::
+    {loc, #{}, binary(), binary()} |
+    stub |
+    follows |
+    binary() |
+    {follows, pid(), reference()} |
+    fun(() -> binary()).
 
--type disk_att_v1() :: {
-    Name :: binary(),
-    Type :: binary(),
-    Sp :: any(),
-    AttLen :: non_neg_integer(),
-    RevPos :: non_neg_integer(),
-    Md5 :: binary()
-}.
 
--type disk_att_v2() :: {
-    Name :: binary(),
-    Type :: binary(),
-    Sp :: any(),
-    AttLen :: non_neg_integer(),
-    DiskLen :: non_neg_integer(),
-    RevPos :: non_neg_integer(),
-    Md5 :: binary(),
-    Enc :: identity | gzip
+-type att() :: #{
+    name := binary(),
+    type := binary(),
+    att_len := non_neg_integer() | undefined,
+    disk_len := non_neg_integer() | undefined,
+    md5 := binary() | undefined,
+    revpos := non_neg_integer(),
+    data := data_prop_type(),
+    encoding := identity | gzip | undefined,
+    headers := [{binary(), binary()}] | undefined
 }.
 
--type disk_att_v3() :: {Base :: tuple(), Extended :: list()}.
-
--type disk_att() :: disk_att_v1() | disk_att_v2() | disk_att_v3().
-
--type att() :: #att{} | attachment() | disk_att().
 
 new() ->
-    %% We construct a record by default for compatability. This will be
-    %% upgraded on demand. A subtle effect this has on all attachments
-    %% constructed via new is that it will pick up the proper defaults
-    %% from the #att record definition given above. Newer properties do
-    %% not support special default values and will all be treated as
-    %% undefined.
-    #att{}.
+    #{
+        name => <<>>,
+        type => <<>>,
+        att_len => undefined,
+        disk_len => undefined,
+        md5 => undefined,
+        revpos => 0,
+        data => undefined,
+        encoding => undefined,
+        headers => undefined
+    }.
 
 
--spec new([{atom(), any()}]) -> att().
+-spec new([{prop_name(), any()}]) -> att().
 new(Props) ->
     store(Props, new()).
 
@@ -197,71 +116,28 @@ new(Props) ->
            (atom(), att()) -> any().
 fetch(Fields, Att) when is_list(Fields) ->
     [fetch(Field, Att) || Field <- Fields];
-fetch(Field, Att) when is_list(Att) ->
-    case lists:keyfind(Field, 1, Att) of
-        {Field, Value} -> Value;
-        false -> undefined
-    end;
-fetch(name, #att{name = Name}) ->
-    Name;
-fetch(type, #att{type = Type}) ->
-    Type;
-fetch(att_len, #att{att_len = AttLen}) ->
-    AttLen;
-fetch(disk_len, #att{disk_len = DiskLen}) ->
-    DiskLen;
-fetch(md5, #att{md5 = Digest}) ->
-    Digest;
-fetch(revpos, #att{revpos = RevPos}) ->
-    RevPos;
-fetch(data, #att{data = Data}) ->
-    Data;
-fetch(encoding, #att{encoding = Encoding}) ->
-    Encoding;
-fetch(_, _) ->
-    undefined.
+fetch(Field, Att) ->
+    maps:get(Field, Att).
 
 
 -spec store([{atom(), any()}], att()) -> att().
 store(Props, Att0) ->
     lists:foldl(fun({Field, Value}, Att) ->
-        store(Field, Value, Att)
+        maps:update(Field, Value, Att)
     end, Att0, Props).
 
 
--spec store(atom(), any(), att()) -> att().
-store(Field, undefined, Att) when is_list(Att) ->
-    lists:keydelete(Field, 1, Att);
-store(Field, Value, Att) when is_list(Att) ->
-    lists:keystore(Field, 1, Att, {Field, Value});
-store(name, Name, Att) ->
-    Att#att{name = Name};
-store(type, Type, Att) ->
-    Att#att{type = Type};
-store(att_len, AttLen, Att) ->
-    Att#att{att_len = AttLen};
-store(disk_len, DiskLen, Att) ->
-    Att#att{disk_len = DiskLen};
-store(md5, Digest, Att) ->
-    Att#att{md5 = Digest};
-store(revpos, RevPos, Att) ->
-    Att#att{revpos = RevPos};
-store(data, Data, Att) ->
-    Att#att{data = Data};
-store(encoding, Encoding, Att) ->
-    Att#att{encoding = Encoding};
 store(Field, Value, Att) ->
-    store(Field, Value, upgrade(Att)).
+    maps:update(Field, Value, Att).
 
 
 -spec transform(atom(), fun(), att()) -> att().
 transform(Field, Fun, Att) ->
-    NewValue = Fun(fetch(Field, Att)),
-    store(Field, NewValue, Att).
+    maps:update_with(Field, Fun, Att).
 
 
-is_stub(Att) ->
-    stub == fetch(data, Att).
+is_stub(#{data := stub}) -> true;
+is_stub(#{}) -> false.
 
 
 %% merge_stubs takes all stub attachments and replaces them with on disk
@@ -275,8 +151,7 @@ merge_stubs(MemAtts, DiskAtts) ->
     merge_stubs(MemAtts, OnDisk, []).
 
 
-%% restore spec when R14 support is dropped
-%% -spec merge_stubs([att()], dict:dict(), [att()]) -> [att()].
+-spec merge_stubs([att()], dict:dict(), [att()]) -> [att()].
 merge_stubs([Att | Rest], OnDisk, Merged) ->
     case fetch(data, Att) of
         stub ->
@@ -308,14 +183,8 @@ size_info([]) ->
     {ok, []};
 size_info(Atts) ->
     Info = lists:map(fun(Att) ->
-        AttLen = fetch(att_len, Att),
-        case fetch(data, Att) of
-             {stream, StreamEngine} ->
-                 {ok, SPos} = couch_stream:to_disk_term(StreamEngine),
-                 {SPos, AttLen};
-             {_, SPos} ->
-                 {SPos, AttLen}
-        end
+        [{loc, _Db, _DocId, AttId}, AttLen] = fetch([data, att_len], Att),
+        {AttId, AttLen}
     end, Atts),
     {ok, lists:usort(Info)}.
 
@@ -324,89 +193,44 @@ size_info(Atts) ->
 %% old format when possible. This should help make the attachment lazy upgrade
 %% as safe as possible, avoiding the need for complicated disk versioning
 %% schemes.
-to_disk_term(#att{} = Att) ->
-    {stream, StreamEngine} = fetch(data, Att),
-    {ok, Sp} = couch_stream:to_disk_term(StreamEngine),
-    {
+to_disk_term(Att) ->
+    {loc, #{}, _DocId, AttId} = fetch(data, Att),
+    {?CURRENT_ATT_FORMAT, {
         fetch(name, Att),
         fetch(type, Att),
-        Sp,
+        AttId,
         fetch(att_len, Att),
         fetch(disk_len, Att),
         fetch(revpos, Att),
         fetch(md5, Att),
-        fetch(encoding, Att)
-    };
-to_disk_term(Att) ->
-    BaseProps = [name, type, data, att_len, disk_len, revpos, md5, encoding],
-    {Extended, Base} = lists:foldl(
-        fun
-            (data, {Props, Values}) ->
-                case lists:keytake(data, 1, Props) of
-                    {value, {_, {stream, StreamEngine}}, Other} ->
-                        {ok, Sp} = couch_stream:to_disk_term(StreamEngine),
-                        {Other, [Sp | Values]};
-                    {value, {_, Value}, Other} ->
-                        {Other, [Value | Values]};
-                    false ->
-                        {Props, [undefined | Values]}
-                end;
-            (Key, {Props, Values}) ->
-                case lists:keytake(Key, 1, Props) of
-                    {value, {_, Value}, Other} -> {Other, [Value | Values]};
-                    false -> {Props, [undefined | Values]}
-                end
-        end,
-        {Att, []},
-        BaseProps
-    ),
-    {list_to_tuple(lists:reverse(Base)), Extended}.
-
-
-%% The new disk term format is a simple wrapper around the legacy format. Base
-%% properties will remain in a tuple while the new fields and possibly data from
-%% future extensions will be stored in a list of atom/value pairs. While this is
-%% slightly less efficient, future work should be able to make use of
-%% compression to remove these sorts of common bits (block level compression
-%% with something like a shared dictionary that is checkpointed every now and
-%% then).
-from_disk_term(StreamSrc, {Base, Extended})
-        when is_tuple(Base), is_list(Extended) ->
-    store(Extended, from_disk_term(StreamSrc, Base));
-from_disk_term(StreamSrc, {Name,Type,Sp,AttLen,DiskLen,RevPos,Md5,Enc}) ->
-    {ok, Stream} = open_stream(StreamSrc, Sp),
-    #att{
-        name=Name,
-        type=Type,
-        att_len=AttLen,
-        disk_len=DiskLen,
-        md5=Md5,
-        revpos=RevPos,
-        data={stream, Stream},
-        encoding=upgrade_encoding(Enc)
-    };
-from_disk_term(StreamSrc, {Name,Type,Sp,AttLen,RevPos,Md5}) ->
-    {ok, Stream} = open_stream(StreamSrc, Sp),
-    #att{
-        name=Name,
-        type=Type,
-        att_len=AttLen,
-        disk_len=AttLen,
-        md5=Md5,
-        revpos=RevPos,
-        data={stream, Stream}
-    };
-from_disk_term(StreamSrc, {Name,{Type,Sp,AttLen}}) ->
-    {ok, Stream} = open_stream(StreamSrc, Sp),
-    #att{
-        name=Name,
-        type=Type,
-        att_len=AttLen,
-        disk_len=AttLen,
-        md5= <<>>,
-        revpos=0,
-        data={stream, Stream}
-    }.
+        fetch(encoding, Att),
+        fetch(headers, Att)
+    }}.
+
+
+from_disk_term(#{} = Db, DocId, {?CURRENT_ATT_FORMAT, Props}) ->
+    {
+        Name,
+        Type,
+        AttId,
+        AttLen,
+        DiskLen,
+        RevPos,
+        Md5,
+        Encoding,
+        Headers
+    } = Props,
+    new([
+        {name, Name},
+        {type, Type},
+        {data, {loc, Db#{tx := undefined}, DocId, AttId}},
+        {att_len, AttLen},
+        {disk_len, DiskLen},
+        {revpos, RevPos},
+        {md5, Md5},
+        {encoding, Encoding},
+        {headers, Headers}
+    ]).
 
 
 %% from_json reads in embedded JSON attachments and creates usable attachment
@@ -433,8 +257,12 @@ stub_from_json(Att, Props) ->
     %% json object. See merge_stubs/3 for the stub check.
     RevPos = couch_util:get_value(<<"revpos">>, Props),
     store([
-        {md5, Digest}, {revpos, RevPos}, {data, stub}, {disk_len, DiskLen},
-        {att_len, EncodedLen}, {encoding, Encoding}
+        {data, stub},
+        {disk_len, DiskLen},
+        {att_len, EncodedLen},
+        {revpos, RevPos},
+        {md5, Digest},
+        {encoding, Encoding}
     ], Att).
 
 
@@ -443,8 +271,12 @@ follow_from_json(Att, Props) ->
     Digest = digest_from_json(Props),
     RevPos = couch_util:get_value(<<"revpos">>, Props, 0),
     store([
-        {md5, Digest}, {revpos, RevPos}, {data, follows}, {disk_len, DiskLen},
-        {att_len, EncodedLen}, {encoding, Encoding}
+        {data, follows},
+        {disk_len, DiskLen},
+        {att_len, EncodedLen},
+        {revpos, RevPos},
+        {md5, Digest},
+        {encoding, Encoding}
     ], Att).
 
 
@@ -455,8 +287,10 @@ inline_from_json(Att, Props) ->
             Length = size(Data),
             RevPos = couch_util:get_value(<<"revpos">>, Props, 0),
             store([
-                {data, Data}, {revpos, RevPos}, {disk_len, Length},
-                {att_len, Length}
+                {data, Data},
+                {disk_len, Length},
+                {att_len, Length},
+                {revpos, RevPos}
             ], Att)
     catch
         _:_ ->
@@ -466,7 +300,6 @@ inline_from_json(Att, Props) ->
     end.
 
 
-
 encoded_lengths_from_json(Props) ->
     Len = couch_util:get_value(<<"length">>, Props),
     case couch_util:get_value(<<"encoding">>, Props) of
@@ -488,9 +321,17 @@ digest_from_json(Props) ->
 
 
 to_json(Att, OutputData, DataToFollow, ShowEncoding) ->
-    [Name, Data, DiskLen, AttLen, Enc, Type, RevPos, Md5] = fetch(
-        [name, data, disk_len, att_len, encoding, type, revpos, md5], Att
-    ),
+    #{
+        name := Name,
+        type := Type,
+        data := Data,
+        disk_len := DiskLen,
+        att_len := AttLen,
+        revpos := RevPos,
+        md5 := Md5,
+        encoding := Encoding,
+        headers := Headers
+    } = Att,
     Props = [
         {<<"content_type">>, Type},
         {<<"revpos">>, RevPos}
@@ -505,71 +346,74 @@ to_json(Att, OutputData, DataToFollow, ShowEncoding) ->
         DataToFollow ->
             [{<<"length">>, DiskLen}, {<<"follows">>, true}];
         true ->
-            AttData = case Enc of
+            AttData = case Encoding of
                 gzip -> zlib:gunzip(to_binary(Att));
                 identity -> to_binary(Att)
             end,
             [{<<"data">>, base64:encode(AttData)}]
     end,
     EncodingProps = if
-        ShowEncoding andalso Enc /= identity ->
+        ShowEncoding andalso Encoding /= identity ->
             [
-                {<<"encoding">>, couch_util:to_binary(Enc)},
+                {<<"encoding">>, couch_util:to_binary(Encoding)},
                 {<<"encoded_length">>, AttLen}
             ];
         true ->
             []
     end,
-    HeadersProp = case fetch(headers, Att) of
+    HeadersProp = case Headers of
         undefined -> [];
         Headers -> [{<<"headers">>, Headers}]
     end,
     {Name, {Props ++ DigestProp ++ DataProps ++ EncodingProps ++ HeadersProp}}.
 
 
-flush(Db, Att) ->
-    flush_data(Db, fetch(data, Att), Att).
+flush(Db, DocId, Att1) ->
+    Att2 = read_data(fetch(data, Att1), Att1),
+    [
+        Data,
+        AttLen,
+        DiskLen,
+        ReqMd5,
+        Encoding
+    ] = fetch([data, att_len, disk_len, md5, encoding], Att2),
+
+    % Eventually, we'll check if we can compress this
+    % attachment here and do so if possible.
+
+    % If we were sent a gzip'ed attachment with no
+    % length data, we have to set it here.
+    Att3 = case AttLen of
+        undefined -> store(att_len, DiskLen, Att2);
+        _ -> Att2
+    end,
+
+    % If no encoding has been set, default to
+    % identity
+    Att4 = case Encoding of
+        undefined -> store(encoding, identity, Att3);
+        _ -> Att3
+    end,
+
+    case Data of
+        {loc, _, _, _} ->
+            % Already flushed
+            Att1;
+        _ when is_binary(Data) ->
+            IdentMd5 = get_identity_md5(Data, fetch(encoding, Att4)),
+            if ReqMd5 == undefined -> ok; true ->
+                couch_util:check_md5(IdentMd5, ReqMd5)
+            end,
+            Att5 = store(md5, IdentMd5, Att4),
+            fabric2_db:write_attachment(Db, DocId, Att5)
+    end.
 
 
-flush_data(Db, Data, Att) when is_binary(Data) ->
-    couch_db:with_stream(Db, Att, fun(OutputStream) ->
-        couch_stream:write(OutputStream, Data)
-    end);
-flush_data(Db, Fun, Att) when is_function(Fun) ->
-    AttName = fetch(name, Att),
-    MaxAttSize = max_attachment_size(),
-    case fetch(att_len, Att) of
-        undefined ->
-            couch_db:with_stream(Db, Att, fun(OutputStream) ->
-                % Fun(MaxChunkSize, WriterFun) must call WriterFun
-                % once for each chunk of the attachment,
-                Fun(4096,
-                    % WriterFun({Length, Binary}, State)
-                    % WriterFun({0, _Footers}, State)
-                    % Called with Length == 0 on the last time.
-                    % WriterFun returns NewState.
-                    fun({0, Footers}, _Total) ->
-                        F = mochiweb_headers:from_binary(Footers),
-                        case mochiweb_headers:get_value("Content-MD5", F) of
-                        undefined ->
-                            ok;
-                        Md5 ->
-                            {md5, base64:decode(Md5)}
-                        end;
-                    ({Length, Chunk}, Total0) ->
-                        Total = Total0 + Length,
-                        validate_attachment_size(AttName, Total, MaxAttSize),
-                        couch_stream:write(OutputStream, Chunk),
-                        Total
-                    end, 0)
-            end);
-        AttLen ->
-            validate_attachment_size(AttName, AttLen, MaxAttSize),
-            couch_db:with_stream(Db, Att, fun(OutputStream) ->
-                write_streamed_attachment(OutputStream, Fun, AttLen)
-            end)
-    end;
-flush_data(Db, {follows, Parser, Ref}, Att) ->
+read_data({loc, #{}, _DocId, _AttId}, Att) ->
+    % Attachment already written to fdb
+    Att;
+
+read_data({follows, Parser, Ref}, Att) ->
     ParserRef = erlang:monitor(process, Parser),
     Fun = fun() ->
         Parser ! {get_bytes, Ref, self()},
@@ -583,41 +427,72 @@ flush_data(Db, {follows, Parser, Ref}, Att) ->
         end
     end,
     try
-        flush_data(Db, Fun, store(data, Fun, Att))
+        read_data(Fun, store(data, Fun, Att))
     after
         erlang:demonitor(ParserRef, [flush])
     end;
-flush_data(Db, {stream, StreamEngine}, Att) ->
-    case couch_db:is_active_stream(Db, StreamEngine) of
-        true ->
-            % Already written
-            Att;
-        false ->
-            NewAtt = couch_db:with_stream(Db, Att, fun(OutputStream) ->
-                couch_stream:copy(StreamEngine, OutputStream)
-            end),
-            InMd5 = fetch(md5, Att),
-            OutMd5 = fetch(md5, NewAtt),
-            couch_util:check_md5(OutMd5, InMd5),
-            NewAtt
+
+read_data(Data, Att) when is_binary(Data) ->
+    Att;
+
+read_data(Fun, Att) when is_function(Fun) ->
+    [AttName, AttLen, InMd5] = fetch([name, att_len, md5], Att),
+    MaxAttSize = max_attachment_size(),
+    case AttLen of
+        undefined ->
+            % Fun(MaxChunkSize, WriterFun) must call WriterFun
+            % once for each chunk of the attachment,
+            WriterFun = fun
+                ({0, Footers}, {Len, Acc}) ->
+                    F = mochiweb_headers:from_binary(Footers),
+                    Md5 = case mochiweb_headers:get_value("Content-MD5", F) of
+                        undefined -> undefined;
+                        Value -> base64:decode(Value)
+                    end,
+                    Props0 = [
+                        {data, iolist_to_binary(lists:reverse(Acc))},
+                        {disk_len, Len}
+                    ],
+                    Props1 = if InMd5 /= md5_in_footer -> Props0; true ->
+                        [{md5, Md5} | Props0]
+                    end,
+                    store(Props1, Att);
+                ({ChunkLen, Chunk}, {Len, Acc}) ->
+                    NewLen = Len + ChunkLen,
+                    validate_attachment_size(AttName, NewLen, MaxAttSize),
+                    {NewLen, [Chunk | Acc]}
+            end,
+            Fun(8192, WriterFun, {0, []});
+        AttLen ->
+            validate_attachment_size(AttName, AttLen, MaxAttSize),
+            read_streamed_attachment(Att, Fun, AttLen, [])
     end.
 
 
-write_streamed_attachment(_Stream, _F, 0) ->
-    ok;
-write_streamed_attachment(_Stream, _F, LenLeft) when LenLeft < 0 ->
+read_streamed_attachment(Att, _F, 0, Acc) ->
+    Bin = iolist_to_binary(lists:reverse(Acc)),
+    store([
+        {data, Bin},
+        {disk_len, size(Bin)}
+    ], Att);
+
+read_streamed_attachment(_Att, _F, LenLeft, _Acc) when LenLeft < 0 ->
     throw({bad_request, <<"attachment longer than expected">>});
-write_streamed_attachment(Stream, F, LenLeft) when LenLeft > 0 ->
-    Bin = try read_next_chunk(F, LenLeft)
+
+read_streamed_attachment(Att, F, LenLeft, Acc) when LenLeft > 0 ->
+    Bin = try
+        read_next_chunk(F, LenLeft)
     catch
         {mp_parser_died, normal} ->
             throw({bad_request, <<"attachment shorter than expected">>})
     end,
-    ok = couch_stream:write(Stream, Bin),
-    write_streamed_attachment(Stream, F, LenLeft - iolist_size(Bin)).
+    Size = iolist_size(Bin),
+    read_streamed_attachment(Att, F, LenLeft - Size, [Bin | Acc]).
+
 
 read_next_chunk(F, _) when is_function(F, 0) ->
     F();
+
 read_next_chunk(F, LenLeft) when is_function(F, 1) ->
     F(lists:min([LenLeft, 16#2000])).
 
@@ -626,14 +501,17 @@ foldl(Att, Fun, Acc) ->
     foldl(fetch(data, Att), Att, Fun, Acc).
 
 
+foldl({loc, Db, DocId, AttId}, _Att, Fun, Acc) ->
+    Bin = fabric2_db:read_attachment(Db#{tx := undefined}, DocId, AttId),
+    Fun(Bin, Acc);
+
 foldl(Bin, _Att, Fun, Acc) when is_binary(Bin) ->
     Fun(Bin, Acc);
-foldl({stream, StreamEngine}, Att, Fun, Acc) ->
-    Md5 = fetch(md5, Att),
-    couch_stream:foldl(StreamEngine, Md5, Fun, Acc);
+
 foldl(DataFun, Att, Fun, Acc) when is_function(DataFun) ->
     Len = fetch(att_len, Att),
     fold_streamed_data(DataFun, Len, Fun, Acc);
+
 foldl({follows, Parser, Ref}, Att, Fun, Acc) ->
     ParserRef = erlang:monitor(process, Parser),
     DataFun = fun() ->
@@ -654,19 +532,26 @@ foldl({follows, Parser, Ref}, Att, Fun, Acc) ->
     end.
 
 
+range_foldl(Bin1, From, To, Fun, Acc) when is_binary(Bin1) ->
+    ReadLen = To - From,
+    Bin2 = case Bin1 of
+        _ when size(Bin1) < From -> <<>>;
+        <<_:From/binary, B2>> -> B2
+    end,
+    Bin3 = case Bin2 of
+        _ when size(Bin2) < ReadLen -> Bin2;
+        <<B3:ReadLen/binary, _/binary>> -> B3
+    end,
+    Fun(Bin3, Acc);
+
 range_foldl(Att, From, To, Fun, Acc) ->
-    {stream, StreamEngine} = fetch(data, Att),
-    couch_stream:range_foldl(StreamEngine, From, To, Fun, Acc).
+    {loc, Db, DocId, AttId} = fetch(data, Att),
+    Bin = fabric2_db:read_attachment(Db, DocId, AttId),
+    range_foldl(Bin, From, To, Fun, Acc).
 
 
-foldl_decode(Att, Fun, Acc) ->
-    case fetch([data, encoding], Att) of
-        [{stream, StreamEngine}, Enc] ->
-            couch_stream:foldl_decode(
-                    StreamEngine, fetch(md5, Att), Enc, Fun, Acc);
-        [Fun2, identity] ->
-            fold_streamed_data(Fun2, fetch(att_len, Att), Fun, Acc)
-    end.
+foldl_decode(_Att, _Fun, _Acc) ->
+    erlang:error(not_supported).
 
 
 to_binary(Att) ->
@@ -677,10 +562,8 @@ to_binary(Bin, _Att) when is_binary(Bin) ->
     Bin;
 to_binary(Iolist, _Att) when is_list(Iolist) ->
     iolist_to_binary(Iolist);
-to_binary({stream, _StreamEngine}, Att) ->
-    iolist_to_binary(
-        lists:reverse(foldl(Att, fun(Bin,Acc) -> [Bin|Acc] end, []))
-    );
+to_binary({loc, Db, DocId, AttId}, _Att) ->
+    fabric2_db:read_attachmet(Db, DocId, AttId);
 to_binary(DataFun, Att) when is_function(DataFun)->
     Len = fetch(att_len, Att),
     iolist_to_binary(
@@ -695,46 +578,22 @@ to_binary(DataFun, Att) when is_function(DataFun)->
 
 fold_streamed_data(_RcvFun, 0, _Fun, Acc) ->
     Acc;
+
 fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0->
     Bin = RcvFun(),
     ResultAcc = Fun(Bin, Acc),
     fold_streamed_data(RcvFun, LenLeft - size(Bin), Fun, ResultAcc).
 
 
-%% Upgrade an attachment record to a property list on demand. This is a one-way
-%% operation as downgrading potentially truncates fields with important data.
--spec upgrade(#att{}) -> attachment().
-upgrade(#att{} = Att) ->
-    Map = lists:zip(
-        record_info(fields, att),
-        lists:seq(2, record_info(size, att))
-    ),
-    %% Don't store undefined elements since that is default
-    [{F, element(I, Att)} || {F, I} <- Map, element(I, Att) /= undefined];
-upgrade(Att) ->
-    Att.
-
-
-%% Downgrade is exposed for interactive convenience. In practice, unless done
-%% manually, upgrades are always one-way.
-downgrade(#att{} = Att) ->
-    Att;
-downgrade(Att) ->
-    #att{
-        name = fetch(name, Att),
-        type = fetch(type, Att),
-        att_len = fetch(att_len, Att),
-        disk_len = fetch(disk_len, Att),
-        md5 = fetch(md5, Att),
-        revpos = fetch(revpos, Att),
-        data = fetch(data, Att),
-        encoding = fetch(encoding, Att)
-    }.
-
-
-upgrade_encoding(true) -> gzip;
-upgrade_encoding(false) -> identity;
-upgrade_encoding(Encoding) -> Encoding.
+get_identity_md5(Bin, gzip) ->
+    Z = zlib:open(),
+    ok = zlib:inflateInit(Z, 16 + 15),
+    Inflated = zlib:inflate(Z, Bin),
+    ok = zlib:inflateEnd(Z),
+    ok = zlib:close(Z),
+    couch_hash:md5_hash(Inflated);
+get_identity_md5(Bin, _) ->
+    couch_hash:md5_hash(Bin).
 
 
 max_attachment_size() ->
@@ -753,18 +612,22 @@ validate_attachment_size(_AttName, _AttSize, _MAxAttSize) ->
     ok.
 
 
-open_stream(StreamSrc, Data) ->
-    case couch_db:is_db(StreamSrc) of
-        true ->
-            couch_db:open_read_stream(StreamSrc, Data);
-        false ->
-            case is_function(StreamSrc, 1) of
-                true ->
-                    StreamSrc(Data);
-                false ->
-                    erlang:error({invalid_stream_source, StreamSrc})
-            end
-    end.
+%% is_compressible(Type) when is_binary(Type) ->
+%%     is_compressible(binary_to_list(Type));
+%% is_compressible(Type) ->
+%%     TypeExpList = re:split(
+%%         config:get("attachments", "compressible_types", ""),
+%%         "\\s*,\\s*",
+%%         [{return, list}]
+%%     ),
+%%     lists:any(
+%%         fun(TypeExp) ->
+%%             Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
+%%                 "(?:\\s*;.*?)?\\s*", $$],
+%%             re:run(Type, Regexp, [caseless]) =/= nomatch
+%%         end,
+%%         [T || T <- TypeExpList, T /= []]
+%%     ).
 
 
 -ifdef(TEST).
diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl
index 4a49372..d33325e 100644
--- a/src/couch/src/couch_doc.erl
+++ b/src/couch/src/couch_doc.erl
@@ -374,6 +374,17 @@ rev_info({#doc{} = Doc, {Pos, [RevId | _]}}) ->
         body_sp = undefined,
         seq = undefined,
         rev = {Pos, RevId}
+    };
+rev_info({#{} = RevInfo, {Pos, [RevId | _]}}) ->
+    #{
+        deleted := Deleted,
+        sequence := Sequence
+    } = RevInfo,
+    #rev_info{
+        deleted = Deleted,
+        body_sp = undefined,
+        seq = Sequence,
+        rev = {Pos, RevId}
     }.
 
 is_deleted(#full_doc_info{rev_tree=Tree}) ->
diff --git a/src/fabric/src/fabric.app.src b/src/fabric/src/fabric.app.src
index d7686ca..20fbb1e 100644
--- a/src/fabric/src/fabric.app.src
+++ b/src/fabric/src/fabric.app.src
@@ -13,7 +13,10 @@
 {application, fabric, [
     {description, "Routing and proxying layer for CouchDB cluster"},
     {vsn, git},
-    {registered, []},
+    {mod, {fabric2_app, []}},
+    {registered, [
+        fabric_server
+    ]},
     {applications, [
         kernel,
         stdlib,
@@ -22,6 +25,7 @@
         rexi,
         mem3,
         couch_log,
-        couch_stats
+        couch_stats,
+        erlfdb
     ]}
 ]}.
diff --git a/src/fabric/src/fabric2.hrl b/src/fabric/src/fabric2.hrl
new file mode 100644
index 0000000..de1d3d1
--- /dev/null
+++ b/src/fabric/src/fabric2.hrl
@@ -0,0 +1,66 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+-define(uint2bin(I), binary:encode_unsigned(I, little)).
+-define(bin2uint(I), binary:decode_unsigned(I, little)).
+
+% This will eventually be the `\xFFmetadataVersion` key that is
+% currently only available in FoundationDB master.
+%
+%  https://forums.foundationdb.org/t/a-new-tool-for-managing-layer-metadata/1191
+%
+% Until then we'll fake the same behavior using a randomish
+% key for tracking metadata changse. Once we get to the
+% new feature this will be more performant by updating
+% this define.
+-define(METADATA_VERSION_KEY, <<"$metadata_version_key$">>).
+
+
+% Prefix Definitions
+
+% Layer Level: (LayerPrefix, X, ...)
+
+-define(CLUSTER_CONFIG, 0).
+-define(ALL_DBS, 1).
+-define(DBS, 15).
+-define(TX_IDS, 255).
+
+% Database Level: (LayerPrefix, ?DBS, DbPrefix, X, ...)
+
+-define(DB_VERSION, 0).
+-define(DB_CONFIG, 16).
+-define(DB_STATS, 17).
+-define(DB_ALL_DOCS, 18).
+-define(DB_CHANGES, 19).
+-define(DB_REVS, 20).
+-define(DB_DOCS, 21).
+-define(DB_LOCAL_DOCS, 22).
+-define(DB_ATTS, 23).
+
+
+% Versions
+
+-define(CURR_REV_FORMAT, 0).
+
+
+% Misc constants
+
+-define(PDICT_DB_KEY, '$fabric_db_handle').
+-define(PDICT_LAYER_CACHE, '$fabric_layer_id').
+-define(PDICT_CHECKED_DB_IS_CURRENT, '$fabric_checked_db_is_current').
+-define(PDICT_TX_ID_KEY, '$fabric_tx_id').
+-define(PDICT_TX_RES_KEY, '$fabric_tx_result').
+-define(COMMIT_UNKNOWN_RESULT, 1021).
+
+
+-define(ATTACHMENT_CHUNK_SIZE, 100000).
diff --git a/src/fabric/src/fabric.app.src b/src/fabric/src/fabric2_app.erl
similarity index 64%
copy from src/fabric/src/fabric.app.src
copy to src/fabric/src/fabric2_app.erl
index d7686ca..da95acb 100644
--- a/src/fabric/src/fabric.app.src
+++ b/src/fabric/src/fabric2_app.erl
@@ -10,18 +10,23 @@
 % License for the specific language governing permissions and limitations under
 % the License.
 
-{application, fabric, [
-    {description, "Routing and proxying layer for CouchDB cluster"},
-    {vsn, git},
-    {registered, []},
-    {applications, [
-        kernel,
-        stdlib,
-        config,
-        couch,
-        rexi,
-        mem3,
-        couch_log,
-        couch_stats
-    ]}
-]}.
+-module(fabric2_app).
+-behaviour(application).
+
+
+-export([
+    start/2,
+    stop/1
+]).
+
+
+start(_Type, StartArgs) ->
+    fabric2_sup:start_link(StartArgs).
+
+
+stop(_State) ->
+    case application:get_env(erlfdb, test_cluster_pid) of
+        {ok, Pid} -> Pid ! close;
+        _ -> ok
+    end,
+    ok.
diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
new file mode 100644
index 0000000..02a18fa
--- /dev/null
+++ b/src/fabric/src/fabric2_db.erl
@@ -0,0 +1,1299 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_db).
+
+
+-export([
+    create/2,
+    open/2,
+    delete/2,
+
+    list_dbs/0,
+    list_dbs/1,
+
+    is_admin/1,
+    check_is_admin/1,
+    check_is_member/1,
+
+    name/1,
+    get_after_doc_read_fun/1,
+    get_before_doc_update_fun/1,
+    get_committed_update_seq/1,
+    get_compacted_seq/1,
+    get_compactor_pid/1,
+    get_db_info/1,
+    %% get_partition_info/2,
+    get_del_doc_count/1,
+    get_doc_count/1,
+    get_doc_count/2,
+    %% get_epochs/1,
+    %% get_filepath/1,
+    get_instance_start_time/1,
+    get_pid/1,
+    get_revs_limit/1,
+    get_security/1,
+    get_update_seq/1,
+    get_user_ctx/1,
+    get_uuid/1,
+    %% get_purge_seq/1,
+    %% get_oldest_purge_seq/1,
+    %% get_purge_infos_limit/1,
+
+    is_clustered/1,
+    is_db/1,
+    is_partitioned/1,
+    is_system_db/1,
+    is_system_db_name/1,
+
+    set_revs_limit/2,
+    %% set_purge_infos_limit/2,
+    set_security/2,
+    set_user_ctx/2,
+
+    ensure_full_commit/1,
+    ensure_full_commit/2,
+
+    %% load_validation_funs/1,
+    %% reload_validation_funs/1,
+
+    open_doc/2,
+    open_doc/3,
+    open_doc_revs/4,
+    %% open_doc_int/3,
+    get_doc_info/2,
+    get_full_doc_info/2,
+    get_full_doc_infos/2,
+    get_missing_revs/2,
+    %% get_design_doc/2,
+    %% get_design_docs/1,
+    %% get_design_doc_count/1,
+    %% get_purge_infos/2,
+
+    %% get_minimum_purge_seq/1,
+    %% purge_client_exists/3,
+
+    %% validate_docid/2,
+    %% doc_from_json_obj_validate/2,
+
+    update_doc/2,
+    update_doc/3,
+    update_docs/2,
+    update_docs/3,
+    %% delete_doc/3,
+
+    %% purge_docs/2,
+    %% purge_docs/3,
+
+    read_attachment/3,
+    write_attachment/3,
+
+    fold_docs/3,
+    fold_docs/4,
+    %% fold_local_docs/4,
+    %% fold_design_docs/4,
+    fold_changes/4,
+    fold_changes/5,
+    %% count_changes_since/2,
+    %% fold_purge_infos/4,
+    %% fold_purge_infos/5,
+
+    %% calculate_start_seq/3,
+    %% owner_of/2,
+
+    %% start_compact/1,
+    %% cancel_compact/1,
+    %% wait_for_compaction/1,
+    %% wait_for_compaction/2,
+
+    %% dbname_suffix/1,
+    %% normalize_dbname/1,
+    %% validate_dbname/1,
+
+    %% make_doc/5,
+    new_revid/1
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include("fabric2.hrl").
+
+
+-define(DBNAME_REGEX,
+    "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex
+    "(\\.[0-9]{10,})?$" % but allow an optional shard timestamp at the end
+).
+
+
+-define(RETURN(Term), throw({?MODULE, Term})).
+
+
+create(DbName, Options) ->
+    Result = fabric2_fdb:transactional(DbName, Options, fun(TxDb) ->
+        case fabric2_fdb:exists(TxDb) of
+            true ->
+                {error, file_exists};
+            false ->
+                fabric2_fdb:create(TxDb, Options)
+        end
+    end),
+    % We cache outside of the transaction so that we're sure
+    % that the transaction was committed.
+    case Result of
+        #{} = Db ->
+            ok = fabric2_server:store(Db),
+            {ok, Db#{tx := undefined}};
+        Error ->
+            Error
+    end.
+
+
+open(DbName, Options) ->
+    case fabric2_server:fetch(DbName) of
+        #{} = Db ->
+            {ok, maybe_set_user_ctx(Db, Options)};
+        undefined ->
+            Result = fabric2_fdb:transactional(DbName, Options, fun(TxDb) ->
+                fabric2_fdb:open(TxDb, Options)
+            end),
+            % Cache outside the transaction retry loop
+            case Result of
+                #{} = Db ->
+                    ok = fabric2_server:store(Db),
+                    {ok, Db#{tx := undefined}};
+                Error ->
+                    Error
+            end
+    end.
+
+
+delete(DbName, Options) ->
+    % This will throw if the db does not exist
+    {ok, Db} = open(DbName, Options),
+    Resp = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:delete(TxDb)
+    end),
+    if Resp /= ok -> Resp; true ->
+        fabric2_server:remove(DbName)
+    end.
+
+
+list_dbs() ->
+    list_dbs([]).
+
+
+list_dbs(Options) ->
+    fabric2_fdb:transactional(fun(Tx) ->
+        fabric2_fdb:list_dbs(Tx, Options)
+    end).
+
+
+is_admin(Db) ->
+    % TODO: Need to re-consider couch_db_plugin:check_is_admin/1
+    {SecProps} = get_security(Db),
+    UserCtx = get_user_ctx(Db),
+    {Admins} = get_admins(SecProps),
+    is_authorized(Admins, UserCtx).
+
+
+check_is_admin(Db) ->
+    case is_admin(Db) of
+        true ->
+            ok;
+        false ->
+            UserCtx = get_user_ctx(Db),
+            Reason = <<"You are not a db or server admin.">>,
+            throw_security_error(UserCtx, Reason)
+    end.
+
+
+check_is_member(Db) ->
+    case is_member(Db) of
+        true ->
+            ok;
+        false ->
+            UserCtx = get_user_ctx(Db),
+            throw_security_error(UserCtx)
+    end.
+
+
+name(#{name := DbName}) ->
+    DbName.
+
+
+get_after_doc_read_fun(#{after_doc_read := AfterDocRead}) ->
+    AfterDocRead.
+
+
+get_before_doc_update_fun(#{before_doc_update := BeforeDocUpdate}) ->
+    BeforeDocUpdate.
+
+get_committed_update_seq(#{} = Db) ->
+    get_update_seq(Db).
+
+
+get_compacted_seq(#{} = Db) ->
+    get_update_seq(Db).
+
+
+get_compactor_pid(#{} = _Db) ->
+    nil.
+
+
+get_db_info(#{} = Db) ->
+    DbProps = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:get_info(TxDb)
+    end),
+
+    BaseProps = [
+        {cluster, {[{n, 0}, {q, 0}, {r, 0}, {w, 0}]}},
+        {compact_running, false},
+        {data_size, 0},
+        {db_name, name(Db)},
+        {disk_format_version, 0},
+        {disk_size, 0},
+        {instance_start_time, <<"0">>},
+        {purge_seq, 0}
+    ],
+
+    {ok, lists:foldl(fun({Key, Val}, Acc) ->
+        lists:keystore(Key, 1, Acc, {Key, Val})
+    end, BaseProps, DbProps)}.
+
+
+get_del_doc_count(#{} = Db) ->
+    get_doc_count(Db, <<"doc_del_count">>).
+
+
+get_doc_count(Db) ->
+    get_doc_count(Db, <<"doc_count">>).
+
+
+get_doc_count(Db, <<"_all_docs">>) ->
+    get_doc_count(Db, <<"doc_count">>);
+
+get_doc_count(DbName, <<"_design">>) ->
+    get_doc_count(DbName, <<"doc_design_count">>);
+
+get_doc_count(DbName, <<"_local">>) ->
+    get_doc_count(DbName, <<"doc_local_count">>);
+
+get_doc_count(Db, Key) ->
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:get_stat(TxDb, Key)
+    end).
+
+
+get_instance_start_time(#{}) ->
+    0.
+
+
+get_pid(#{}) ->
+    nil.
+
+
+get_revs_limit(#{revs_limit := RevsLimit}) ->
+    RevsLimit.
+
+
+get_security(#{security_doc := SecurityDoc}) ->
+    SecurityDoc.
+
+
+get_update_seq(#{} = Db) ->
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:get_last_change(TxDb)
+    end).
+
+
+get_user_ctx(#{user_ctx := UserCtx}) ->
+    UserCtx.
+
+
+get_uuid(#{uuid := UUID}) ->
+    UUID.
+
+
+is_clustered(#{}) ->
+    false.
+
+
+is_db(#{name := _}) ->
+    true;
+is_db(_) ->
+    false.
+
+
+is_partitioned(#{}) ->
+    false.
+
+
+is_system_db(#{name := DbName}) ->
+    is_system_db_name(DbName).
+
+
+is_system_db_name(DbName) when is_list(DbName) ->
+    is_system_db_name(?l2b(DbName));
+is_system_db_name(DbName) when is_binary(DbName) ->
+    Suffix = filename:basename(DbName),
+    case {filename:dirname(DbName), lists:member(Suffix, ?SYSTEM_DATABASES)} of
+        {<<".">>, Result} -> Result;
+        {_Prefix, false} -> false;
+        {Prefix, true} ->
+            ReOpts =  [{capture,none}, dollar_endonly],
+            re:run(Prefix, ?DBNAME_REGEX, ReOpts) == match
+    end.
+
+
+set_revs_limit(#{} = Db, RevsLimit) ->
+    check_is_admin(Db),
+    RevsLimBin = ?uint2bin(RevsLimit),
+    Resp = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:set_config(TxDb, <<"revs_limit">>, RevsLimBin)
+    end),
+    if Resp /= ok -> Resp; true ->
+        fabric2_server:store(Db#{revs_limit := RevsLimit})
+    end.
+
+
+set_security(#{} = Db, Security) ->
+    check_is_admin(Db),
+    ok = fabric2_util:validate_security_object(Security),
+    SecBin = ?JSON_ENCODE(Security),
+    Resp = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:set_config(TxDb, <<"security_doc">>, SecBin)
+    end),
+    if Resp /= ok -> Resp; true ->
+        fabric2_server:store(Db#{security_doc := Security})
+    end.
+
+
+set_user_ctx(#{} = Db, UserCtx) ->
+    Db#{user_ctx := UserCtx}.
+
+
+ensure_full_commit(#{}) ->
+    {ok, 0}.
+
+
+ensure_full_commit(#{}, _Timeout) ->
+    {ok, 0}.
+
+
+open_doc(#{} = Db, DocId) ->
+    open_doc(Db, DocId, []).
+
+
+open_doc(#{} = Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, _Options) ->
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        case fabric2_fdb:get_local_doc(TxDb, DocId) of
+            #doc{} = Doc -> {ok, Doc};
+            Else -> Else
+        end
+    end);
+
+open_doc(#{} = Db, DocId, Options) ->
+    NeedsTreeOpts = [revs_info, conflicts, deleted_conflicts],
+    NeedsTree = (Options -- NeedsTreeOpts /= Options),
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        Revs = case NeedsTree of
+            true -> fabric2_fdb:get_all_revs(TxDb, DocId);
+            false -> fabric2_fdb:get_winning_revs(TxDb, DocId, 1)
+        end,
+        if Revs == [] -> {not_found, missing}; true ->
+            #{winner := true} = RI = lists:last(Revs),
+            case fabric2_fdb:get_doc_body(TxDb, DocId, RI) of
+                #doc{} = Doc ->
+                    apply_open_doc_opts(Doc, Revs, Options);
+                Else ->
+                    Else
+            end
+        end
+    end).
+
+
+open_doc_revs(Db, DocId, Revs, Options) ->
+    Latest = lists:member(latest, Options),
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        AllRevInfos = fabric2_fdb:get_all_revs(TxDb, DocId),
+        RevTree = lists:foldl(fun(RI, TreeAcc) ->
+            RIPath = fabric2_util:revinfo_to_path(RI),
+            {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
+            Merged
+        end, [], AllRevInfos),
+        {Found, Missing} = case Revs of
+            all ->
+                {couch_key_tree:get_all_leafs(RevTree), []};
+            _ when Latest ->
+                couch_key_tree:get_key_leafs(RevTree, Revs);
+            _ ->
+                couch_key_tree:get(RevTree, Revs)
+        end,
+        Docs = lists:map(fun({Value, {Pos, [Rev | RevPath]}}) ->
+            case Value of
+                ?REV_MISSING ->
+                    % We have the rev in our list but know nothing about it
+                    {{not_found, missing}, {Pos, Rev}};
+                _ ->
+                    RevInfo = #{
+                        rev_id => {Pos, Rev},
+                        rev_path => RevPath
+                    },
+                    case fabric2_fdb:get_doc_body(TxDb, DocId, RevInfo) of
+                        #doc{} = Doc -> {ok, Doc};
+                        Else -> {Else, {Pos, Rev}}
+                    end
+            end
+        end, Found),
+        MissingDocs = [{{not_found, missing}, MRev} || MRev <- Missing],
+        {ok, Docs ++ MissingDocs}
+    end).
+
+
+get_doc_info(Db, DocId) ->
+    case get_full_doc_info(Db, DocId) of
+        not_found -> not_found;
+        FDI -> couch_doc:to_doc_info(FDI)
+    end.
+
+
+get_full_doc_info(Db, DocId) ->
+    RevInfos = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:get_all_revs(TxDb, DocId)
+    end),
+    if RevInfos == [] -> not_found; true ->
+        #{winner := true} = Winner = lists:last(RevInfos),
+        RevTree = lists:foldl(fun(RI, TreeAcc) ->
+            RIPath = fabric2_util:revinfo_to_path(RI),
+            {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
+            Merged
+        end, [], RevInfos),
+        #full_doc_info{
+            id = DocId,
+            update_seq = fabric2_fdb:vs_to_seq(maps:get(sequence, Winner)),
+            deleted = maps:get(deleted, Winner),
+            rev_tree = RevTree
+        }
+    end.
+
+
+get_full_doc_infos(Db, DocIds) ->
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        lists:map(fun(DocId) ->
+            get_full_doc_info(TxDb, DocId)
+        end, DocIds)
+    end).
+
+
+get_missing_revs(Db, JsonIdRevs) ->
+    IdRevs = [idrevs(IdR) || IdR <- JsonIdRevs],
+    AllRevInfos = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        lists:foldl(fun({Id, _Revs}, Acc) ->
+            case maps:is_key(Id, Acc) of
+                true ->
+                    Acc;
+                false ->
+                    RevInfos = fabric2_fdb:get_all_revs(TxDb, Id),
+                    Acc#{Id => RevInfos}
+            end
+        end, #{}, IdRevs)
+    end),
+    AllMissing = lists:flatmap(fun({Id, Revs}) ->
+        #{Id := RevInfos} = AllRevInfos,
+        Missing = try
+            lists:foldl(fun(RevInfo, RevAcc) ->
+                if RevAcc /= [] -> ok; true ->
+                    throw(all_found)
+                end,
+                filter_found_revs(RevInfo, RevAcc)
+            end, Revs, RevInfos)
+        catch throw:all_found ->
+            []
+        end,
+        if Missing == [] -> []; true ->
+            PossibleAncestors = find_possible_ancestors(RevInfos, Missing),
+            [{Id, Missing, PossibleAncestors}]
+        end
+    end, IdRevs),
+    {ok, AllMissing}.
+
+
+update_doc(Db, Doc) ->
+    update_doc(Db, Doc, []).
+
+
+update_doc(Db, Doc, Options) ->
+    case update_docs(Db, [Doc], Options) of
+        {ok, [{ok, NewRev}]} ->
+            {ok, NewRev};
+        {ok, [{{_Id, _Rev}, Error}]} ->
+            throw(Error);
+        {error, [{{_Id, _Rev}, Error}]} ->
+            throw(Error);
+        {error, [Error]} ->
+            throw(Error);
+        {ok, []} ->
+            % replication success
+            {Pos, [RevId | _]} = Doc#doc.revs,
+            {ok, {Pos, RevId}}
+    end.
+
+
+update_docs(Db, Docs) ->
+    update_docs(Db, Docs, []).
+
+
+update_docs(Db, Docs, Options) ->
+    Resps0 = case lists:member(replicated_changes, Options) of
+        false ->
+            fabric2_fdb:transactional(Db, fun(TxDb) ->
+                update_docs_interactive(TxDb, Docs, Options)
+            end);
+        true ->
+            lists:map(fun(Doc) ->
+                fabric2_fdb:transactional(Db, fun(TxDb) ->
+                    update_doc_int(TxDb, Doc, Options)
+                end)
+            end, Docs)
+    end,
+    % Convert errors
+    Resps1 = lists:map(fun(Resp) ->
+        case Resp of
+            {#doc{} = Doc, Error} ->
+                #doc{
+                    id = DocId,
+                    revs = Revs
+                } = Doc,
+                RevId = case Revs of
+                    {RevPos, [Rev | _]} -> {RevPos, Rev};
+                    {0, []} -> {0, <<>>}
+                end,
+                {{DocId, RevId}, Error};
+            Else ->
+                Else
+        end
+    end, Resps0),
+    case lists:member(replicated_changes, Options) of
+        true ->
+            {ok, [R || R <- Resps1, R /= {ok, []}]};
+        false ->
+            Status = lists:foldl(fun(Resp, Acc) ->
+                case Resp of
+                    {ok, _} -> Acc;
+                    _ -> error
+                end
+            end, ok, Resps1),
+            {Status, Resps1}
+    end.
+
+
+read_attachment(Db, DocId, AttId) ->
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:read_attachment(TxDb, DocId, AttId)
+    end).
+
+
+write_attachment(Db, DocId, Att) ->
+    Data = couch_att:fetch(data, Att),
+    {ok, AttId} = fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:write_attachment(TxDb, DocId, Data)
+    end),
+    couch_att:store(data, {loc, Db, DocId, AttId}, Att).
+
+
+fold_docs(Db, UserFun, UserAcc) ->
+    fold_docs(Db, UserFun, UserAcc, []).
+
+
+fold_docs(Db, UserFun, UserAcc, Options) ->
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:fold_docs(TxDb, UserFun, UserAcc, Options)
+    end).
+
+
+fold_changes(Db, SinceSeq, UserFun, UserAcc) ->
+    fold_changes(Db, SinceSeq, UserFun, UserAcc, []).
+
+
+fold_changes(Db, SinceSeq, UserFun, UserAcc, Options) ->
+    fabric2_fdb:transactional(Db, fun(TxDb) ->
+        fabric2_fdb:fold_changes(TxDb, SinceSeq, UserFun, UserAcc, Options)
+    end).
+
+
+new_revid(Doc) ->
+    #doc{
+        body = Body,
+        revs = {OldStart, OldRevs},
+        atts = Atts,
+        deleted = Deleted
+    } = Doc,
+
+    DigestedAtts = lists:foldl(fun(Att, Acc) ->
+        [N, T, M] = couch_att:fetch([name, type, md5], Att),
+        case M == <<>> of
+            true -> Acc;
+            false -> [{N, T, M} | Acc]
+        end
+    end, [], Atts),
+
+    Rev = case DigestedAtts of
+        Atts2 when length(Atts) =/= length(Atts2) ->
+            % We must have old style non-md5 attachments
+            list_to_binary(integer_to_list(couch_util:rand32()));
+        Atts2 ->
+            OldRev = case OldRevs of [] -> 0; [OldRev0 | _] -> OldRev0 end,
+            SigTerm = [Deleted, OldStart, OldRev, Body, Atts2],
+            couch_hash:md5_hash(term_to_binary(SigTerm, [{minor_version, 1}]))
+    end,
+
+    Doc#doc{revs = {OldStart + 1, [Rev | OldRevs]}}.
+
+
+maybe_set_user_ctx(Db, Options) ->
+    case fabric2_util:get_value(user_ctx, Options) of
+        #user_ctx{} = UserCtx ->
+            set_user_ctx(Db, UserCtx);
+        undefined ->
+            Db
+    end.
+
+
+is_member(Db) ->
+    {SecProps} = get_security(Db),
+    case is_admin(Db) of
+        true ->
+            true;
+        false ->
+            case is_public_db(SecProps) of
+                true ->
+                    true;
+                false ->
+                    {Members} = get_members(SecProps),
+                    UserCtx = get_user_ctx(Db),
+                    is_authorized(Members, UserCtx)
+            end
+    end.
+
+
+is_authorized(Group, UserCtx) ->
+    #user_ctx{
+        name = UserName,
+        roles = UserRoles
+    } = UserCtx,
+    Names = fabric2_util:get_value(<<"names">>, Group, []),
+    Roles = fabric2_util:get_value(<<"roles">>, Group, []),
+    case check_security(roles, UserRoles, [<<"_admin">> | Roles]) of
+        true ->
+            true;
+        false ->
+            check_security(names, UserName, Names)
+    end.
+
+
+check_security(roles, [], _) ->
+    false;
+check_security(roles, UserRoles, Roles) ->
+    UserRolesSet = ordsets:from_list(UserRoles),
+    RolesSet = ordsets:from_list(Roles),
+    not ordsets:is_disjoint(UserRolesSet, RolesSet);
+check_security(names, _, []) ->
+    false;
+check_security(names, null, _) ->
+    false;
+check_security(names, UserName, Names) ->
+    lists:member(UserName, Names).
+
+
+throw_security_error(#user_ctx{name = null} = UserCtx) ->
+    Reason = <<"You are not authorized to access this db.">>,
+    throw_security_error(UserCtx, Reason);
+throw_security_error(#user_ctx{name = _} = UserCtx) ->
+    Reason = <<"You are not allowed to access this db.">>,
+    throw_security_error(UserCtx, Reason).
+
+
+throw_security_error(#user_ctx{} = UserCtx, Reason) ->
+    Error = security_error_type(UserCtx),
+    throw({Error, Reason}).
+
+
+security_error_type(#user_ctx{name = null}) ->
+    unauthorized;
+security_error_type(#user_ctx{name = _}) ->
+    forbidden.
+
+
+is_public_db(SecProps) ->
+    {Members} = get_members(SecProps),
+    Names = fabric2_util:get_value(<<"names">>, Members, []),
+    Roles = fabric2_util:get_value(<<"roles">>, Members, []),
+    Names =:= [] andalso Roles =:= [].
+
+
+get_admins(SecProps) ->
+    fabric2_util:get_value(<<"admins">>, SecProps, {[]}).
+
+
+get_members(SecProps) ->
+    % we fallback to readers here for backwards compatibility
+    case fabric2_util:get_value(<<"members">>, SecProps) of
+        undefined ->
+            fabric2_util:get_value(<<"readers">>, SecProps, {[]});
+        Members ->
+            Members
+    end.
+
+
+apply_open_doc_opts(Doc, Revs, Options) ->
+    IncludeRevsInfo = lists:member(revs_info, Options),
+    IncludeConflicts = lists:member(conflicts, Options),
+    IncludeDelConflicts = lists:member(deleted_conflicts, Options),
+    IncludeLocalSeq = lists:member(local_seq, Options),
+    ReturnDeleted = lists:member(deleted, Options),
+
+    % This revs_info becomes fairly useless now that we're
+    % not keeping old document bodies around...
+    Meta1 = if not IncludeRevsInfo -> []; true ->
+        {Pos, [Rev | RevPath]} = Doc#doc.revs,
+        RevPathMissing = lists:map(fun(R) -> {R, missing} end, RevPath),
+        [{revs_info, Pos, [{Rev, available} | RevPathMissing]}]
+    end,
+
+    Meta2 = if not IncludeConflicts -> []; true ->
+        Conflicts = [RI || RI = #{winner := false, deleted := false} <- Revs],
+        if Conflicts == [] -> []; true ->
+            ConflictRevs = [maps:get(rev_id, RI) || RI <- Conflicts],
+            [{conflicts, ConflictRevs}]
+        end
+    end,
+
+    Meta3 = if not IncludeDelConflicts -> []; true ->
+        DelConflicts = [RI || RI = #{winner := false, deleted := true} <- Revs],
+        if DelConflicts == [] -> []; true ->
+            DelConflictRevs = [maps:get(rev_id, RI) || RI <- DelConflicts],
+            [{deleted_conflicts, DelConflictRevs}]
+        end
+    end,
+
+    Meta4 = if not IncludeLocalSeq -> []; true ->
+        #{winner := true, sequence := SeqVS} = lists:last(Revs),
+        [{local_seq, fabric2_fdb:vs_to_seq(SeqVS)}]
+    end,
+
+    case Doc#doc.deleted and not ReturnDeleted of
+        true ->
+            {not_found, deleted};
+        false ->
+            {ok, Doc#doc{
+                meta = Meta1 ++ Meta2 ++ Meta3 ++ Meta4
+            }}
+    end.
+
+
+filter_found_revs(RevInfo, Revs) ->
+    #{
+        rev_id := {Pos, Rev},
+        rev_path := RevPath
+    } = RevInfo,
+    FullRevPath = [Rev | RevPath],
+    lists:flatmap(fun({FindPos, FindRev} = RevIdToFind) ->
+        if FindPos > Pos -> [RevIdToFind]; true ->
+            % Add 1 because lists:nth is 1 based
+            Idx = Pos - FindPos + 1,
+            case Idx > length(FullRevPath) of
+                true ->
+                    [RevIdToFind];
+                false ->
+                    case lists:nth(Idx, FullRevPath) == FindRev of
+                        true -> [];
+                        false -> [RevIdToFind]
+                    end
+            end
+        end
+    end, Revs).
+
+
+find_possible_ancestors(RevInfos, MissingRevs) ->
+    % Find any revinfos that are possible ancestors
+    % of the missing revs. A possible ancestor is
+    % any rev that has a start position less than
+    % any missing revision. Stated alternatively,
+    % find any revinfo that could theoretically
+    % extended to be one or more of the missing
+    % revisions.
+    %
+    % Since we are looking at any missing revision
+    % we can just compare against the maximum missing
+    % start position.
+    MaxMissingPos = case MissingRevs of
+        [] -> 0;
+        [_ | _] -> lists:max([Start || {Start, _Rev} <- MissingRevs])
+    end,
+    lists:flatmap(fun(RevInfo) ->
+        #{rev_id := {RevPos, _} = RevId} = RevInfo,
+        case RevPos < MaxMissingPos of
+            true -> [RevId];
+            false -> []
+        end
+    end, RevInfos).
+
+
+update_doc_int(#{} = Db, #doc{} = Doc, Options) ->
+    IsLocal = case Doc#doc.id of
+        <<?LOCAL_DOC_PREFIX, _/binary>> -> true;
+        _ -> false
+    end,
+    IsReplicated = lists:member(replicated_changes, Options),
+    try
+        case {IsLocal, IsReplicated} of
+            {false, false} -> update_doc_interactive(Db, Doc, Options);
+            {false, true} -> update_doc_replicated(Db, Doc, Options);
+            {true, _} -> update_local_doc(Db, Doc, Options)
+        end
+    catch throw:{?MODULE, Return} ->
+        Return
+    end.
+
+
+update_docs_interactive(Db, Docs0, Options) ->
+    Docs = tag_docs(Docs0),
+    Futures = get_winning_rev_futures(Db, Docs),
+    {Result, _} = lists:mapfoldl(fun(Doc, SeenIds) ->
+        try
+            update_docs_interactive(Db, Doc, Options, Futures, SeenIds)
+        catch throw:{?MODULE, Return} ->
+            {Return, SeenIds}
+        end
+    end, [], Docs),
+    Result.
+
+
+update_docs_interactive(Db, #doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} = Doc,
+        Options, _Futures, SeenIds) ->
+    {update_local_doc(Db, Doc, Options), SeenIds};
+
+update_docs_interactive(Db, Doc, Options, Futures, SeenIds) ->
+    case lists:member(Doc#doc.id, SeenIds) of
+        true ->
+            {{error, conflict}, SeenIds};
+        false ->
+            Future = maps:get(doc_tag(Doc), Futures),
+            case update_doc_interactive(Db, Doc, Future, Options) of
+                {ok, _} = Resp ->
+                    {Resp, [Doc#doc.id | SeenIds]};
+                _ = Resp ->
+                    {Resp, SeenIds}
+            end
+    end.
+
+
+update_doc_interactive(Db, Doc0, Options) ->
+    % Get the current winning revision. This is needed
+    % regardless of which branch we're updating. The extra
+    % revision we're grabbing is an optimization to
+    % save us a round trip if we end up deleting
+    % the winning revision branch.
+    NumRevs = if Doc0#doc.deleted -> 2; true -> 1 end,
+    Future = fabric2_fdb:get_winning_revs_future(Db, Doc0#doc.id, NumRevs),
+    update_doc_interactive(Db, Doc0, Future, Options).
+
+
+update_doc_interactive(Db, Doc0, Future, _Options) ->
+    RevInfos = fabric2_fdb:get_winning_revs_wait(Db, Future),
+    {Winner, SecondPlace} = case RevInfos of
+        [] -> {not_found, not_found};
+        [WRI] -> {WRI, not_found};
+        [WRI, SPRI] -> {WRI, SPRI}
+    end,
+    WinnerRevId = case Winner of
+        not_found ->
+            {0, <<>>};
+        _ ->
+            case maps:get(deleted, Winner) of
+                true -> {0, <<>>};
+                false -> maps:get(rev_id, Winner)
+            end
+    end,
+
+    % Check that a revision was specified if required
+    Doc0RevId = doc_to_revid(Doc0),
+    if Doc0RevId /= {0, <<>>} orelse WinnerRevId == {0, <<>>} -> ok; true ->
+        ?RETURN({error, conflict})
+    end,
+
+    % Check that we're not trying to create a deleted doc
+    if Doc0RevId /= {0, <<>>} orelse not Doc0#doc.deleted -> ok; true ->
+        ?RETURN({error, conflict})
+    end,
+
+    % Get the target revision to update
+    Target = case Doc0RevId == WinnerRevId of
+        true ->
+            Winner;
+        false ->
+            case fabric2_fdb:get_non_deleted_rev(Db, Doc0#doc.id, Doc0RevId) of
+                #{deleted := false} = Target0 ->
+                    Target0;
+                not_found ->
+                    % Either a missing revision or a deleted
+                    % revision. Either way a conflict. Note
+                    % that we get not_found for a deleted revision
+                    % because we only check for the non-deleted
+                    % key in fdb
+                    ?RETURN({error, conflict})
+            end
+    end,
+
+    % When recreating a deleted document we want to extend
+    % the winning revision branch rather than create a
+    % new branch. If we did not do this we could be
+    % recreating into a state that previously existed.
+    Doc1 = case Winner of
+        #{deleted := true} when not Doc0#doc.deleted ->
+            {WinnerRevPos, WinnerRev} = maps:get(rev_id, Winner),
+            WinnerRevPath = maps:get(rev_path, Winner),
+            Doc0#doc{revs = {WinnerRevPos, [WinnerRev | WinnerRevPath]}};
+        _ ->
+            Doc0
+    end,
+
+    % Validate the doc update and create the
+    % new revinfo map
+    Doc2 = prep_and_validate(Db, Doc1, Target),
+    #doc{
+        deleted = NewDeleted,
+        revs = {NewRevPos, [NewRev | NewRevPath]}
+    } = Doc3 = new_revid(Doc2),
+
+    Doc4 = update_attachment_revpos(Doc3),
+
+    NewRevInfo = #{
+        winner => undefined,
+        deleted => NewDeleted,
+        rev_id => {NewRevPos, NewRev},
+        rev_path => NewRevPath,
+        sequence => undefined,
+        branch_count => undefined
+    },
+
+    % Gather the list of possible winnig revisions
+    Possible = case Target == Winner of
+        true when not Doc4#doc.deleted ->
+            [NewRevInfo];
+        true when Doc4#doc.deleted ->
+            case SecondPlace of
+                #{} -> [NewRevInfo, SecondPlace];
+                not_found -> [NewRevInfo]
+            end;
+        false ->
+            [NewRevInfo, Winner]
+    end,
+
+    % Sort the rev infos such that the winner is first
+    {NewWinner0, NonWinner} = case fabric2_util:sort_revinfos(Possible) of
+        [W] -> {W, not_found};
+        [W, NW] -> {W, NW}
+    end,
+
+    BranchCount = case Winner of
+        not_found -> 1;
+        #{branch_count := BC} -> BC
+    end,
+    NewWinner = NewWinner0#{branch_count := BranchCount},
+    ToUpdate = if NonWinner == not_found -> []; true -> [NonWinner] end,
+    ToRemove = if Target == not_found -> []; true -> [Target] end,
+
+    ok = fabric2_fdb:write_doc(
+            Db,
+            Doc4,
+            NewWinner,
+            Winner,
+            ToUpdate,
+            ToRemove
+        ),
+
+    {ok, {NewRevPos, NewRev}}.
+
+
+update_doc_replicated(Db, Doc0, _Options) ->
+    #doc{
+        id = DocId,
+        deleted = Deleted,
+        revs = {RevPos, [Rev | RevPath]}
+    } = Doc0,
+
+    DocRevInfo0 = #{
+        winner => undefined,
+        deleted => Deleted,
+        rev_id => {RevPos, Rev},
+        rev_path => RevPath,
+        sequence => undefined,
+        branch_count => undefined
+    },
+
+    AllRevInfos = fabric2_fdb:get_all_revs(Db, DocId),
+
+    RevTree = lists:foldl(fun(RI, TreeAcc) ->
+        RIPath = fabric2_util:revinfo_to_path(RI),
+        {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
+        Merged
+    end, [], AllRevInfos),
+
+    DocRevPath = fabric2_util:revinfo_to_path(DocRevInfo0),
+    {NewTree, Status} = couch_key_tree:merge(RevTree, DocRevPath),
+    if Status /= internal_node -> ok; true ->
+        % We already know this revision so nothing
+        % left to do.
+        ?RETURN({ok, []})
+    end,
+
+    % Its possible to have a replication with fewer than $revs_limit
+    % revisions which extends an existing branch. To avoid
+    % losing revision history we extract the new node from the
+    % tree and use the combined path after stemming.
+    {[{_, {RevPos, UnstemmedRevs}}], []}
+            = couch_key_tree:get(NewTree, [{RevPos, Rev}]),
+    RevsLimit = fabric2_db:get_revs_limit(Db),
+    Doc1 = Doc0#doc{
+        revs = {RevPos, lists:sublist(UnstemmedRevs, RevsLimit)}
+    },
+    {RevPos, [Rev | NewRevPath]} = Doc1#doc.revs,
+    DocRevInfo1 = DocRevInfo0#{rev_path := NewRevPath},
+
+    % Find any previous revision we knew about for
+    % validation and attachment handling.
+    AllLeafsFull = couch_key_tree:get_all_leafs_full(NewTree),
+    LeafPath = get_leaf_path(RevPos, Rev, AllLeafsFull),
+    PrevRevInfo = find_prev_revinfo(RevPos, LeafPath),
+    Doc2 = prep_and_validate(Db, Doc1, PrevRevInfo),
+
+    % Possible winners are the previous winner and
+    % the new DocRevInfo
+    Winner = case fabric2_util:sort_revinfos(AllRevInfos) of
+        [#{winner := true} = WRI | _] -> WRI;
+        [] -> not_found
+    end,
+    {NewWinner0, NonWinner} = case Winner == PrevRevInfo of
+        true ->
+            {DocRevInfo1, not_found};
+        false ->
+            [W, NW] = fabric2_util:sort_revinfos([Winner, DocRevInfo1]),
+            {W, NW}
+    end,
+
+    NewWinner = NewWinner0#{branch_count := length(AllLeafsFull)},
+    ToUpdate = if NonWinner == not_found -> []; true -> [NonWinner] end,
+    ToRemove = if PrevRevInfo == not_found -> []; true -> [PrevRevInfo] end,
+
+    ok = fabric2_fdb:write_doc(
+            Db,
+            Doc2,
+            NewWinner,
+            Winner,
+            ToUpdate,
+            ToRemove
+        ),
+
+    {ok, []}.
+
+
+update_local_doc(Db, Doc0, _Options) ->
+    Doc1 = case increment_local_doc_rev(Doc0) of
+        {ok, Updated} -> Updated;
+        {error, _} = Error -> ?RETURN(Error)
+    end,
+
+    ok = fabric2_fdb:write_local_doc(Db, Doc1),
+
+    #doc{revs = {0, [Rev]}} = Doc1,
+    {ok, {0, integer_to_binary(Rev)}}.
+
+
+update_attachment_revpos(#doc{revs = {RevPos, _Revs}, atts = Atts0} = Doc) ->
+    Atts = lists:map(fun(Att) ->
+        case couch_att:fetch(data, Att) of
+            {loc, _Db, _DocId, _AttId} ->
+                % Attachment was already on disk
+                Att;
+            _ ->
+                % We will write this attachment with this update
+                % so mark it with the RevPos that will be written
+                couch_att:store(revpos, RevPos, Att)
+        end
+    end, Atts0),
+    Doc#doc{atts = Atts}.
+
+
+get_winning_rev_futures(Db, Docs) ->
+    lists:foldl(fun(Doc, Acc) ->
+        #doc{
+            id = DocId,
+            deleted = Deleted
+        } = Doc,
+        IsLocal = case DocId of
+            <<?LOCAL_DOC_PREFIX, _/binary>> -> true;
+            _ -> false
+        end,
+        if IsLocal -> Acc; true ->
+            NumRevs = if Deleted -> 2; true -> 1 end,
+            Future = fabric2_fdb:get_winning_revs_future(Db, DocId, NumRevs),
+            DocTag = doc_tag(Doc),
+            Acc#{DocTag => Future}
+        end
+    end, #{}, Docs).
+
+
+prep_and_validate(Db, NewDoc, PrevRevInfo) ->
+    HasStubs = couch_doc:has_stubs(NewDoc),
+    HasVDUs = [] /= maps:get(validate_doc_update_funs, Db),
+    IsDDoc = case NewDoc#doc.id of
+        <<?DESIGN_DOC_PREFIX, _/binary>> -> true;
+        _ -> false
+    end,
+
+    PrevDoc = case HasStubs orelse (HasVDUs and not IsDDoc) of
+        true when PrevRevInfo /= not_found ->
+            case fabric2_fdb:get_doc_body(Db, NewDoc#doc.id, PrevRevInfo) of
+                #doc{} = PDoc -> PDoc;
+                {not_found, _} -> nil
+            end;
+        _ ->
+            nil
+    end,
+
+    MergedDoc = if not HasStubs -> NewDoc; true ->
+        % This will throw an error if we have any
+        % attachment stubs missing data
+        couch_doc:merge_stubs(NewDoc, PrevDoc)
+    end,
+    check_duplicate_attachments(MergedDoc),
+    validate_doc_update(Db, MergedDoc, PrevDoc),
+    MergedDoc.
+
+
+validate_doc_update(Db, #doc{id = <<"_design/", _/binary>>} = Doc, _) ->
+    case catch check_is_admin(Db) of
+        ok -> validate_ddoc(Db, Doc);
+        Error -> ?RETURN({Doc, Error})
+    end;
+validate_doc_update(Db, Doc, PrevDoc) ->
+    #{
+        security_doc := Security,
+        validate_doc_update_funs := VDUs
+    } = Db,
+    Fun = fun() ->
+        JsonCtx = fabric2_util:user_ctx_to_json(Db),
+        lists:map(fun(VDU) ->
+            try
+                case VDU(Doc, PrevDoc, JsonCtx, Security) of
+                    ok -> ok;
+                    Error1 -> throw(Error1)
+                end
+            catch throw:Error2 ->
+                ?RETURN({Doc, Error2})
+            end
+        end, VDUs)
+    end,
+    Stat = [couchdb, query_server, vdu_process_time],
+    if VDUs == [] -> ok; true ->
+        couch_stats:update_histogram(Stat, Fun)
+    end.
+
+
+validate_ddoc(Db, DDoc) ->
+    try
+        ok = couch_index_server:validate(Db, couch_doc:with_ejson_body(DDoc))
+    catch
+        throw:{invalid_design_doc, Reason} ->
+            throw({bad_request, invalid_design_doc, Reason});
+        throw:{compilation_error, Reason} ->
+            throw({bad_request, compilation_error, Reason});
+        throw:Error ->
+            ?RETURN({DDoc, Error})
+    end.
+
+
+check_duplicate_attachments(#doc{atts = Atts}) ->
+    lists:foldl(fun(Att, Names) ->
+        Name = couch_att:fetch(name, Att),
+        case ordsets:is_element(Name, Names) of
+            true -> throw({bad_request, <<"Duplicate attachments">>});
+            false -> ordsets:add_element(Name, Names)
+        end
+    end, ordsets:new(), Atts).
+
+
+get_leaf_path(Pos, Rev, [{Pos, [{Rev, _RevInfo} | LeafPath]} | _]) ->
+    LeafPath;
+get_leaf_path(Pos, Rev, [_WrongLeaf | RestLeafs]) ->
+    get_leaf_path(Pos, Rev, RestLeafs).
+
+
+find_prev_revinfo(_Pos, []) ->
+    not_found;
+find_prev_revinfo(Pos, [{_Rev, ?REV_MISSING} | RestPath]) ->
+    find_prev_revinfo(Pos - 1, RestPath);
+find_prev_revinfo(_Pos, [{_Rev, #{} = RevInfo} | _]) ->
+    RevInfo.
+
+
+increment_local_doc_rev(#doc{deleted = true} = Doc) ->
+    {ok, Doc#doc{revs = {0, [0]}}};
+increment_local_doc_rev(#doc{revs = {0, []}} = Doc) ->
+    {ok, Doc#doc{revs = {0, [1]}}};
+increment_local_doc_rev(#doc{revs = {0, [RevStr | _]}} = Doc) ->
+    try
+        PrevRev = binary_to_integer(RevStr),
+        {ok, Doc#doc{revs = {0, [PrevRev + 1]}}}
+    catch error:badarg ->
+        {error, <<"Invalid rev format">>}
+    end;
+increment_local_doc_rev(#doc{}) ->
+    {error, <<"Invalid rev format">>}.
+
+
+doc_to_revid(#doc{revs = Revs}) ->
+    case Revs of
+        {0, []} -> {0, <<>>};
+        {RevPos, [Rev | _]} -> {RevPos, Rev}
+    end.
+
+
+tag_docs([]) ->
+    [];
+tag_docs([#doc{meta = Meta} = Doc | Rest]) ->
+    NewDoc = Doc#doc{
+        meta = [{ref, make_ref()} | Meta]
+    },
+    [NewDoc | tag_docs(Rest)].
+
+
+doc_tag(#doc{meta = Meta}) ->
+    fabric2_util:get_value(ref, Meta).
+
+
+idrevs({Id, Revs}) when is_list(Revs) ->
+    {docid(Id), [rev(R) || R <- Revs]}.
+
+
+docid(DocId) when is_list(DocId) ->
+    list_to_binary(DocId);
+docid(DocId) ->
+    DocId.
+
+
+rev(Rev) when is_list(Rev); is_binary(Rev) ->
+    couch_doc:parse_rev(Rev);
+rev({Seq, Hash} = Rev) when is_integer(Seq), is_binary(Hash) ->
+    Rev.
+
diff --git a/src/fabric/src/fabric2_events.erl b/src/fabric/src/fabric2_events.erl
new file mode 100644
index 0000000..a571714
--- /dev/null
+++ b/src/fabric/src/fabric2_events.erl
@@ -0,0 +1,84 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_events).
+
+
+-export([
+    link_listener/4,
+    stop_listener/1
+]).
+
+-export([
+    init/5,
+    poll/5
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+link_listener(Mod, Fun, St, Options) ->
+    DbName = fabric2_util:get_value(dbname, Options),
+    Pid = spawn_link(?MODULE, init, [self(), DbName, Mod, Fun, St]),
+    receive
+        {Pid, initialized} -> ok
+    end,
+    {ok, Pid}.
+
+
+stop_listener(Pid) ->
+    Pid ! stop_listening.
+
+
+init(Parent, DbName, Mod, Fun, St) ->
+    {ok, Db} = fabric2_db:open(DbName, [?ADMIN_CTX]),
+    Since = fabric2_db:get_update_seq(Db),
+    couch_log:error("XKCD: START LISTENER: ~s : ~p for ~p", [DbName, Since, Parent]),
+    erlang:monitor(process, Parent),
+    Parent ! {self(), initialized},
+    poll(DbName, Since, Mod, Fun, St),
+    couch_log:error("XKCD: STOP LISTENER for ~p", [Parent]).
+
+
+poll(DbName, Since, Mod, Fun, St) ->
+    {Resp, NewSince} = try
+        case fabric2_db:open(DbName, [?ADMIN_CTX]) of
+            {ok, Db} ->
+                case fabric2_db:get_update_seq(Db) of
+                    Since ->
+                        couch_log:error("XKCD: NO UPDATE: ~s :: ~p", [DbName, Since]),
+                        {{ok, St}, Since};
+                    Other ->
+                        couch_log:error("XKCD: UPDATED: ~s :: ~p -> ~p", [DbName, Since, Other]),
+                        {Mod:Fun(DbName, updated, St), Other}
+                end;
+            Error ->
+                exit(Error)
+        end
+    catch error:database_does_not_exist ->
+        Mod:Fun(DbName, deleted, St)
+    end,
+    receive
+        stop_listening ->
+            ok;
+        {'DOWN', _, _, _, _} ->
+            ok
+    after 0 ->
+        case Resp of
+            {ok, NewSt} ->
+                timer:sleep(1000),
+                ?MODULE:poll(DbName, NewSince, Mod, Fun, NewSt);
+            {stop, _} ->
+                ok
+        end
+    end.
diff --git a/src/fabric/src/fabric2_fdb.erl b/src/fabric/src/fabric2_fdb.erl
new file mode 100644
index 0000000..0a4f298
--- /dev/null
+++ b/src/fabric/src/fabric2_fdb.erl
@@ -0,0 +1,1187 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_fdb).
+
+
+-export([
+    transactional/1,
+    transactional/3,
+    transactional/2,
+
+    create/2,
+    open/2,
+    reopen/1,
+    delete/1,
+    exists/1,
+
+    list_dbs/2,
+
+    get_info/1,
+    get_config/1,
+    set_config/3,
+
+    get_stat/2,
+    incr_stat/3,
+
+    get_all_revs/2,
+    get_winning_revs/3,
+    get_winning_revs_future/3,
+    get_winning_revs_wait/2,
+    get_non_deleted_rev/3,
+
+    get_doc_body/3,
+    get_doc_body_future/3,
+    get_doc_body_wait/4,
+    get_local_doc/2,
+
+    write_doc/6,
+    write_local_doc/2,
+
+    read_attachment/3,
+    write_attachment/3,
+
+    fold_docs/4,
+    fold_changes/5,
+    get_last_change/1,
+
+    vs_to_seq/1,
+
+    debug_cluster/0,
+    debug_cluster/2
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include("fabric2.hrl").
+
+
+transactional(Fun) ->
+    do_transaction(Fun, undefined).
+
+
+transactional(DbName, Options, Fun) when is_binary(DbName) ->
+    transactional(fun(Tx) ->
+        Fun(init_db(Tx, DbName, Options))
+    end).
+
+
+transactional(#{tx := undefined} = Db, Fun) ->
+    #{layer_prefix := LayerPrefix} = Db,
+    do_transaction(fun(Tx) ->
+        Fun(Db#{tx => Tx})
+    end, LayerPrefix);
+
+transactional(#{tx := {erlfdb_transaction, _}} = Db, Fun) ->
+    Fun(Db).
+
+
+do_transaction(Fun, LayerPrefix) when is_function(Fun, 1) ->
+    Db = get_db_handle(),
+    try
+        erlfdb:transactional(Db, fun(Tx) ->
+            case get(erlfdb_trace) of
+                Name when is_binary(Name) ->
+                    erlfdb:set_option(Tx, transaction_logging_enable, Name);
+                _ ->
+                    ok
+            end,
+            case is_transaction_applied(Tx) of
+                true ->
+                    get_previous_transaction_result();
+                false ->
+                    execute_transaction(Tx, Fun, LayerPrefix)
+            end
+        end)
+    after
+        clear_transaction()
+    end.
+
+
+create(#{} = Db0, Options) ->
+    #{
+        name := DbName,
+        tx := Tx,
+        layer_prefix := LayerPrefix
+    } = Db = ensure_current(Db0, false),
+
+    % Eventually DbPrefix will be HCA allocated. For now
+    % we're just using the DbName so that debugging is easier.
+    DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+    DbPrefix = erlfdb_tuple:pack({?DBS, DbName}, LayerPrefix),
+    erlfdb:set(Tx, DbKey, DbPrefix),
+
+    % This key is responsible for telling us when something in
+    % the database cache (i.e., fabric2_server's ets table) has
+    % changed and requires re-loading. This currently includes
+    % revs_limit and validate_doc_update functions. There's
+    % no order to versioning here. Its just a value that changes
+    % that is used in the ensure_current check.
+    DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+    DbVersion = fabric2_util:uuid(),
+    erlfdb:set(Tx, DbVersionKey, DbVersion),
+
+    UUID = fabric2_util:uuid(),
+
+    Defaults = [
+        {?DB_CONFIG, <<"uuid">>, UUID},
+        {?DB_CONFIG, <<"revs_limit">>, ?uint2bin(1000)},
+        {?DB_CONFIG, <<"security_doc">>, <<"{}">>},
+        {?DB_STATS, <<"doc_count">>, ?uint2bin(0)},
+        {?DB_STATS, <<"doc_del_count">>, ?uint2bin(0)},
+        {?DB_STATS, <<"doc_design_count">>, ?uint2bin(0)},
+        {?DB_STATS, <<"doc_local_count">>, ?uint2bin(0)},
+        {?DB_STATS, <<"size">>, ?uint2bin(2)}
+    ],
+    lists:foreach(fun({P, K, V}) ->
+        Key = erlfdb_tuple:pack({P, K}, DbPrefix),
+        erlfdb:set(Tx, Key, V)
+    end, Defaults),
+
+    UserCtx = fabric2_util:get_value(user_ctx, Options, #user_ctx{}),
+
+    Db#{
+        uuid => UUID,
+        db_prefix => DbPrefix,
+        db_version => DbVersion,
+
+        revs_limit => 1000,
+        security_doc => {[]},
+        user_ctx => UserCtx,
+
+        validate_doc_update_funs => [],
+        before_doc_update => undefined,
+        after_doc_read => undefined,
+        % All other db things as we add features,
+
+        db_options => Options
+    }.
+
+
+open(#{} = Db0, Options) ->
+    #{
+        name := DbName,
+        tx := Tx,
+        layer_prefix := LayerPrefix
+    } = Db1 = ensure_current(Db0, false),
+
+    DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+    DbPrefix = case erlfdb:wait(erlfdb:get(Tx, DbKey)) of
+        Bin when is_binary(Bin) -> Bin;
+        not_found -> erlang:error(database_does_not_exist)
+    end,
+
+    DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+    DbVersion = erlfdb:wait(erlfdb:get(Tx, DbVersionKey)),
+
+    UserCtx = fabric2_util:get_value(user_ctx, Options, #user_ctx{}),
+
+    Db2 = Db1#{
+        db_prefix => DbPrefix,
+        db_version => DbVersion,
+
+        revs_limit => 1000,
+        security_doc => {[]},
+        user_ctx => UserCtx,
+
+        % Place holders until we implement these
+        % bits.
+        validate_doc_update_funs => [],
+        before_doc_update => undefined,
+        after_doc_read => undefined,
+
+        db_options => Options
+    },
+
+    Db3 = lists:foldl(fun({Key, Val}, DbAcc) ->
+        case Key of
+            <<"uuid">> ->
+                DbAcc#{uuid => Val};
+            <<"revs_limit">> ->
+                DbAcc#{revs_limit => ?bin2uint(Val)};
+            <<"security_doc">> ->
+                DbAcc#{security_doc => ?JSON_DECODE(Val)}
+        end
+    end, Db2, get_config(Db2)),
+
+    load_validate_doc_funs(Db3).
+
+
+reopen(#{} = OldDb) ->
+    require_transaction(OldDb),
+    #{
+        tx := Tx,
+        name := DbName,
+        db_options := Options
+    } = OldDb,
+    open(init_db(Tx, DbName, Options), Options).
+
+
+delete(#{} = Db) ->
+    #{
+        name := DbName,
+        tx := Tx,
+        layer_prefix := LayerPrefix,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+    erlfdb:clear(Tx, DbKey),
+    erlfdb:clear_range_startswith(Tx, DbPrefix),
+    bump_metadata_version(Tx),
+    ok.
+
+
+exists(#{name := DbName} = Db) when is_binary(DbName) ->
+    #{
+        tx := Tx,
+        layer_prefix := LayerPrefix
+    } = ensure_current(Db, false),
+
+    DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+    case erlfdb:wait(erlfdb:get(Tx, DbKey)) of
+        Bin when is_binary(Bin) -> true;
+        not_found -> false
+    end.
+
+
+list_dbs(Tx, _Options) ->
+    Root = erlfdb_directory:root(),
+    CouchDB = erlfdb_directory:create_or_open(Tx, Root, [<<"couchdb">>]),
+    LayerPrefix = erlfdb_directory:get_name(CouchDB),
+    {Start, End} = erlfdb_tuple:range({?ALL_DBS}, LayerPrefix),
+    Future = erlfdb:get_range(Tx, Start, End),
+    lists:map(fun({K, _V}) ->
+        {?ALL_DBS, DbName} = erlfdb_tuple:unpack(K, LayerPrefix),
+        DbName
+    end, erlfdb:wait(Future)).
+
+
+get_info(#{} = Db) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    {CStart, CEnd} = erlfdb_tuple:range({?DB_CHANGES}, DbPrefix),
+    ChangesFuture = erlfdb:get_range(Tx, CStart, CEnd, [
+        {streaming_mode, exact},
+        {limit, 1},
+        {reverse, true}
+    ]),
+
+    StatsPrefix = erlfdb_tuple:pack({?DB_STATS}, DbPrefix),
+    MetaFuture = erlfdb:get_range_startswith(Tx, StatsPrefix),
+
+    RawSeq = case erlfdb:wait(ChangesFuture) of
+        [] ->
+            vs_to_seq(fabric2_util:seq_zero_vs());
+        [{SeqKey, _}] ->
+            {?DB_CHANGES, SeqVS} = erlfdb_tuple:unpack(SeqKey, DbPrefix),
+            vs_to_seq(SeqVS)
+    end,
+    CProp = {update_seq, RawSeq},
+
+    MProps = lists:flatmap(fun({K, V}) ->
+        case erlfdb_tuple:unpack(K, DbPrefix) of
+            {?DB_STATS, <<"doc_count">>} ->
+                [{doc_count, ?bin2uint(V)}];
+            {?DB_STATS, <<"doc_del_count">>} ->
+                [{doc_del_count, ?bin2uint(V)}];
+            {?DB_STATS, <<"size">>} ->
+                Val = ?bin2uint(V),
+                [
+                    {other, {[{data_size, Val}]}},
+                    {sizes, {[
+                        {active, 0},
+                        {external, Val},
+                        {file, 0}
+                    ]}}
+                ];
+            {?DB_STATS, _} ->
+                []
+        end
+    end, erlfdb:wait(MetaFuture)),
+
+    [CProp | MProps].
+
+
+get_config(#{} = Db) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = Db = ensure_current(Db),
+
+    {Start, End} = erlfdb_tuple:range({?DB_CONFIG}, DbPrefix),
+    Future = erlfdb:get_range(Tx, Start, End),
+
+    lists:map(fun({K, V}) ->
+        {?DB_CONFIG, Key} = erlfdb_tuple:unpack(K, DbPrefix),
+        {Key, V}
+    end, erlfdb:wait(Future)).
+
+
+set_config(#{} = Db, ConfigKey, ConfigVal) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    Key = erlfdb_tuple:pack({?DB_CONFIG, ConfigKey}, DbPrefix),
+    erlfdb:set(Tx, Key, ConfigVal),
+    bump_metadata_version(Tx).
+
+
+get_stat(#{} = Db, StatKey) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    Key = erlfdb_tuple:pack({?DB_STATS, StatKey}, DbPrefix),
+
+    % Might need to figure out some sort of type
+    % system here. Uints are because stats are all
+    % atomic op adds for the moment.
+    ?bin2uint(erlfdb:wait(erlfdb:get(Tx, Key))).
+
+
+incr_stat(_Db, _StatKey, 0) ->
+    ok;
+
+incr_stat(#{} = Db, StatKey, Increment) when is_integer(Increment) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    Key = erlfdb_tuple:pack({?DB_STATS, StatKey}, DbPrefix),
+    erlfdb:add(Tx, Key, Increment).
+
+
+get_all_revs(#{} = Db, DocId) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    Prefix = erlfdb_tuple:pack({?DB_REVS, DocId}, DbPrefix),
+    Options = [{streaming_mode, want_all}],
+    Future = erlfdb:get_range_startswith(Tx, Prefix, Options),
+    lists:map(fun({K, V}) ->
+        Key = erlfdb_tuple:unpack(K, DbPrefix),
+        Val = erlfdb_tuple:unpack(V),
+        fdb_to_revinfo(Key, Val)
+    end, erlfdb:wait(Future)).
+
+
+get_winning_revs(Db, DocId, NumRevs) ->
+    Future = get_winning_revs_future(Db, DocId, NumRevs),
+    get_winning_revs_wait(Db, Future).
+
+
+get_winning_revs_future(#{} = Db, DocId, NumRevs) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    {StartKey, EndKey} = erlfdb_tuple:range({?DB_REVS, DocId}, DbPrefix),
+    Options = [{reverse, true}, {limit, NumRevs}],
+    erlfdb:get_range_raw(Tx, StartKey, EndKey, Options).
+
+
+get_winning_revs_wait(#{} = Db, Future) ->
+    #{
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+    {Rows, _, _} = erlfdb:wait(Future),
+    lists:map(fun({K, V}) ->
+        Key = erlfdb_tuple:unpack(K, DbPrefix),
+        Val = erlfdb_tuple:unpack(V),
+        fdb_to_revinfo(Key, Val)
+    end, Rows).
+
+
+get_non_deleted_rev(#{} = Db, DocId, RevId) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    {RevPos, Rev} = RevId,
+
+    BaseKey = {?DB_REVS, DocId, true, RevPos, Rev},
+    Key = erlfdb_tuple:pack(BaseKey, DbPrefix),
+    case erlfdb:wait(erlfdb:get(Tx, Key)) of
+        not_found ->
+            not_found;
+        Val ->
+            fdb_to_revinfo(BaseKey, erlfdb_tuple:unpack(Val))
+    end.
+
+
+get_doc_body(Db, DocId, RevInfo) ->
+    Future = get_doc_body_future(Db, DocId, RevInfo),
+    get_doc_body_wait(Db, DocId, RevInfo, Future).
+
+
+get_doc_body_future(#{} = Db, DocId, RevInfo) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    #{
+        rev_id := {RevPos, Rev}
+    } = RevInfo,
+
+    Key = erlfdb_tuple:pack({?DB_DOCS, DocId, RevPos, Rev}, DbPrefix),
+    erlfdb:get(Tx, Key).
+
+
+get_doc_body_wait(#{} = Db0, DocId, RevInfo, Future) ->
+    Db = ensure_current(Db0),
+
+    #{
+        rev_id := {RevPos, Rev},
+        rev_path := RevPath
+    } = RevInfo,
+
+    Val = erlfdb:wait(Future),
+    fdb_to_doc(Db, DocId, RevPos, [Rev | RevPath], Val).
+
+
+get_local_doc(#{} = Db0, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = Db = ensure_current(Db0),
+
+    Key = erlfdb_tuple:pack({?DB_LOCAL_DOCS, DocId}, DbPrefix),
+    Val = erlfdb:wait(erlfdb:get(Tx, Key)),
+    fdb_to_local_doc(Db, DocId, Val).
+
+
+write_doc(#{} = Db0, Doc, NewWinner0, OldWinner, ToUpdate, ToRemove) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = Db = ensure_current(Db0),
+
+    #doc{
+        id = DocId,
+        deleted = Deleted
+    } = Doc,
+
+    % Revision tree
+
+    NewWinner = NewWinner0#{winner := true},
+    NewRevId = maps:get(rev_id, NewWinner),
+
+    {WKey, WVal, WinnerVS} = revinfo_to_fdb(Tx, DbPrefix, DocId, NewWinner),
+    ok = erlfdb:set_versionstamped_value(Tx, WKey, WVal),
+
+    lists:foreach(fun(RI0) ->
+        RI = RI0#{winner := false},
+        {K, V, undefined} = revinfo_to_fdb(Tx, DbPrefix, DocId, RI),
+        ok = erlfdb:set(Tx, K, V)
+    end, ToUpdate),
+
+    lists:foreach(fun(RI0) ->
+        RI = RI0#{winner := false},
+        {K, _, undefined} = revinfo_to_fdb(Tx, DbPrefix, DocId, RI),
+        ok = erlfdb:clear(Tx, K)
+    end, ToRemove),
+
+    % _all_docs
+
+    UpdateStatus = case {OldWinner, NewWinner} of
+        {not_found, #{deleted := false}} ->
+            created;
+        {#{deleted := true}, #{deleted := false}} ->
+            recreated;
+        {#{deleted := false}, #{deleted := false}} ->
+            updated;
+        {#{deleted := false}, #{deleted := true}} ->
+            deleted
+    end,
+
+    case UpdateStatus of
+        Status when Status == created orelse Status == recreated ->
+            ADKey = erlfdb_tuple:pack({?DB_ALL_DOCS, DocId}, DbPrefix),
+            ADVal = erlfdb_tuple:pack(NewRevId),
+            ok = erlfdb:set(Tx, ADKey, ADVal);
+        deleted ->
+            ADKey = erlfdb_tuple:pack({?DB_ALL_DOCS, DocId}, DbPrefix),
+            ok = erlfdb:clear(Tx, ADKey);
+        updated ->
+            ok
+    end,
+
+    % _changes
+
+    if OldWinner == not_found -> ok; true ->
+        OldSeq = maps:get(sequence, OldWinner),
+        OldSeqKey = erlfdb_tuple:pack({?DB_CHANGES, OldSeq}, DbPrefix),
+        erlfdb:clear(Tx, OldSeqKey)
+    end,
+
+    NewSeqKey = erlfdb_tuple:pack_vs({?DB_CHANGES, WinnerVS}, DbPrefix),
+    NewSeqVal = erlfdb_tuple:pack({DocId, Deleted, NewRevId}),
+    erlfdb:set_versionstamped_key(Tx, NewSeqKey, NewSeqVal),
+
+    % And all the rest...
+
+    ok = write_doc_body(Db, Doc),
+
+    IsDDoc = case Doc#doc.id of
+        <<?DESIGN_DOC_PREFIX, _/binary>> -> true;
+        _ -> false
+    end,
+
+    if not IsDDoc -> ok; true ->
+        bump_db_version(Db)
+    end,
+
+    case UpdateStatus of
+        created ->
+            if not IsDDoc -> ok; true ->
+                incr_stat(Db, <<"doc_design_count">>, 1)
+            end,
+            incr_stat(Db, <<"doc_count">>, 1);
+        recreated ->
+            if not IsDDoc -> ok; true ->
+                incr_stat(Db, <<"doc_design_count">>, 1)
+            end,
+            incr_stat(Db, <<"doc_count">>, 1),
+            incr_stat(Db, <<"doc_del_count">>, -1);
+        deleted ->
+            if not IsDDoc -> ok; true ->
+                incr_stat(Db, <<"doc_design_count">>, -1)
+            end,
+            incr_stat(Db, <<"doc_count">>, -1),
+            incr_stat(Db, <<"doc_del_count">>, 1);
+        updated ->
+            ok
+    end,
+
+    ok.
+
+
+write_local_doc(#{} = Db0, Doc) ->
+    #{
+        tx := Tx
+    } = Db = ensure_current(Db0),
+
+    {LDocKey, LDocVal} = local_doc_to_fdb(Db, Doc),
+
+    WasDeleted = case erlfdb:wait(erlfdb:get(Tx, LDocKey)) of
+        <<_/binary>> -> false;
+        not_found -> true
+    end,
+
+    case Doc#doc.deleted of
+        true -> erlfdb:clear(Tx, LDocKey);
+        false -> erlfdb:set(Tx, LDocKey, LDocVal)
+    end,
+
+    case {WasDeleted, Doc#doc.deleted} of
+        {true, false} ->
+            incr_stat(Db, <<"doc_local_count">>, 1);
+        {false, true} ->
+            incr_stat(Db, <<"doc_local_count">>, -1);
+        _ ->
+            ok
+    end,
+
+    ok.
+
+
+read_attachment(#{} = Db, DocId, AttId) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    AttKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId}, DbPrefix),
+    case erlfdb:wait(erlfdb:get_range_startswith(Tx, AttKey)) of
+        not_found ->
+            throw({not_found, missing});
+        KVs ->
+            Vs = [V || {_K, V} <- KVs],
+            iolist_to_binary(Vs)
+    end.
+
+
+write_attachment(#{} = Db, DocId, Data) when is_binary(Data) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    AttId = fabric2_util:uuid(),
+    Chunks = chunkify_attachment(Data),
+
+    lists:foldl(fun(Chunk, ChunkId) ->
+        AttKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId, ChunkId}, DbPrefix),
+        ok = erlfdb:set(Tx, AttKey, Chunk),
+        ChunkId + 1
+    end, 0, Chunks),
+    {ok, AttId}.
+
+
+fold_docs(#{} = Db, UserFun, UserAcc0, Options) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    {Reverse, Start, End} = get_dir_and_bounds(DbPrefix, Options),
+
+    DocCountKey = erlfdb_tuple:pack({?DB_STATS, <<"doc_count">>}, DbPrefix),
+    DocCountBin = erlfdb:wait(erlfdb:get(Tx, DocCountKey)),
+
+    try
+        UserAcc1 = maybe_stop(UserFun({meta, [
+            {total, ?bin2uint(DocCountBin)},
+            {offset, null}
+        ]}, UserAcc0)),
+
+        UserAcc2 = erlfdb:fold_range(Tx, Start, End, fun({K, V}, UserAccIn) ->
+            {?DB_ALL_DOCS, DocId} = erlfdb_tuple:unpack(K, DbPrefix),
+            RevId = erlfdb_tuple:unpack(V),
+            maybe_stop(UserFun({row, [
+                {id, DocId},
+                {key, DocId},
+                {value, couch_doc:rev_to_str(RevId)}
+            ]}, UserAccIn))
+        end, UserAcc1, [{reverse, Reverse}] ++ Options),
+
+        {ok, maybe_stop(UserFun(complete, UserAcc2))}
+    catch throw:{stop, FinalUserAcc} ->
+        {ok, FinalUserAcc}
+    end.
+
+
+fold_changes(#{} = Db, SinceSeq0, UserFun, UserAcc0, Options) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    SinceSeq1 = get_since_seq(SinceSeq0),
+
+    Reverse = case fabric2_util:get_value(dir, Options, fwd) of
+        fwd -> false;
+        rev -> true
+    end,
+
+    {Start0, End0} = case Reverse of
+        false -> {SinceSeq1, fabric2_util:seq_max_vs()};
+        true -> {fabric2_util:seq_zero_vs(), SinceSeq1}
+    end,
+
+    Start1 = erlfdb_tuple:pack({?DB_CHANGES, Start0}, DbPrefix),
+    End1 = erlfdb_tuple:pack({?DB_CHANGES, End0}, DbPrefix),
+
+    {Start, End} = case Reverse of
+        false -> {erlfdb_key:first_greater_than(Start1), End1};
+        true -> {Start1, erlfdb_key:first_greater_than(End1)}
+    end,
+
+    try
+        {ok, erlfdb:fold_range(Tx, Start, End, fun({K, V}, UserAccIn) ->
+            {?DB_CHANGES, SeqVS} = erlfdb_tuple:unpack(K, DbPrefix),
+            {DocId, Deleted, RevId} = erlfdb_tuple:unpack(V),
+
+            Change = #{
+                id => DocId,
+                sequence => vs_to_seq(SeqVS),
+                rev_id => RevId,
+                deleted => Deleted
+            },
+
+            maybe_stop(UserFun(Change, UserAccIn))
+        end, UserAcc0, [{reverse, Reverse}] ++ Options)}
+    catch throw:{stop, FinalUserAcc} ->
+        {ok, FinalUserAcc}
+    end.
+
+
+get_last_change(#{} = Db) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = ensure_current(Db),
+
+    {Start, End} = erlfdb_tuple:range({?DB_CHANGES}, DbPrefix),
+    Options = [{limit, 1}, {reverse, true}],
+    case erlfdb:get_range(Tx, Start, End, Options) of
+        [] ->
+            vs_to_seq(fabric2_util:seq_zero_vs());
+        [{K, _V}] ->
+            {?DB_CHANGES, SeqVS} = erlfdb_tuple:unpack(K, DbPrefix),
+            vs_to_seq(SeqVS)
+    end.
+
+
+maybe_stop({ok, Acc}) ->
+    Acc;
+maybe_stop({stop, Acc}) ->
+    throw({stop, Acc}).
+
+
+vs_to_seq(VS) ->
+    <<51:8, SeqBin:12/binary>> = erlfdb_tuple:pack({VS}),
+    fabric2_util:to_hex(SeqBin).
+
+
+debug_cluster() ->
+    debug_cluster(<<>>, <<16#FE, 16#FF, 16#FF>>).
+
+
+debug_cluster(Start, End) ->
+    transactional(fun(Tx) ->
+        lists:foreach(fun({Key, Val}) ->
+            io:format("~s => ~s~n", [
+                    string:pad(erlfdb_util:repr(Key), 60),
+                    erlfdb_util:repr(Val)
+                ])
+        end, erlfdb:get_range(Tx, Start, End))
+    end).
+
+
+init_db(Tx, DbName, Options) ->
+    Root = erlfdb_directory:root(),
+    CouchDB = erlfdb_directory:create_or_open(Tx, Root, [<<"couchdb">>]),
+    Prefix = erlfdb_directory:get_name(CouchDB),
+    Version = erlfdb:wait(erlfdb:get(Tx, ?METADATA_VERSION_KEY)),
+    #{
+        name => DbName,
+        tx => Tx,
+        layer_prefix => Prefix,
+        md_version => Version,
+
+        db_options => Options
+    }.
+
+
+load_validate_doc_funs(#{} = Db) ->
+    FoldFun = fun
+        ({row, Row}, Acc) ->
+            DDocInfo = #{id => fabric2_util:get_value(id, Row)},
+            {ok, [DDocInfo | Acc]};
+        (_, Acc) ->
+            {ok, Acc}
+    end,
+
+    Options = [
+        {start_key, <<"_design/">>},
+        {end_key, <<"_design0">>}
+    ],
+
+    {ok, Infos1} = fold_docs(Db, FoldFun, [], Options),
+
+    Infos2 = lists:map(fun(Info) ->
+        #{
+            id := DDocId = <<"_design/", _/binary>>
+        } = Info,
+        Info#{
+            rev_info => get_winning_revs_future(Db, DDocId, 1)
+        }
+    end, Infos1),
+
+    Infos3 = lists:flatmap(fun(Info) ->
+        #{
+            id := DDocId,
+            rev_info := RevInfoFuture
+        } = Info,
+        [RevInfo] = get_winning_revs_wait(Db, RevInfoFuture),
+        #{deleted := Deleted} = RevInfo,
+        if Deleted -> []; true ->
+            [Info#{
+                rev_info := RevInfo,
+                body => get_doc_body_future(Db, DDocId, RevInfo)
+            }]
+        end
+    end, Infos2),
+
+    VDUs = lists:flatmap(fun(Info) ->
+        #{
+            id := DDocId,
+            rev_info := RevInfo,
+            body := BodyFuture
+        } = Info,
+        #doc{} = Doc = get_doc_body_wait(Db, DDocId, RevInfo, BodyFuture),
+        case couch_doc:get_validate_doc_fun(Doc) of
+            nil -> [];
+            Fun -> [Fun]
+        end
+    end, Infos3),
+
+    Db#{
+        validate_doc_update_funs := VDUs
+    }.
+
+
+bump_metadata_version(Tx) ->
+    % The 14 zero bytes is pulled from the PR for adding the
+    % metadata version key. Not sure why 14 bytes when version
+    % stamps are only 80, but whatever for now.
+    erlfdb:set_versionstamped_value(Tx, ?METADATA_VERSION_KEY, <<0:112>>).
+
+
+bump_db_version(#{} = Db) ->
+    #{
+        tx := Tx,
+        db_prefix := DbPrefix
+    } = Db,
+
+    DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+    DbVersion = fabric2_util:uuid(),
+    ok = erlfdb:set(Tx, DbVersionKey, DbVersion).
+
+
+write_doc_body(#{} = Db0, #doc{} = Doc) ->
+    #{
+        tx := Tx
+    } = Db = ensure_current(Db0),
+
+    {NewDocKey, NewDocVal} = doc_to_fdb(Db, Doc),
+    erlfdb:set(Tx, NewDocKey, NewDocVal).
+
+
+revinfo_to_fdb(Tx, DbPrefix, DocId, #{winner := true} = RevId) ->
+    #{
+        deleted := Deleted,
+        rev_id := {RevPos, Rev},
+        rev_path := RevPath,
+        branch_count := BranchCount
+    } = RevId,
+    VS = new_versionstamp(Tx),
+    Key = {?DB_REVS, DocId, not Deleted, RevPos, Rev},
+    Val = {?CURR_REV_FORMAT, VS, BranchCount, list_to_tuple(RevPath)},
+    KBin = erlfdb_tuple:pack(Key, DbPrefix),
+    VBin = erlfdb_tuple:pack_vs(Val),
+    {KBin, VBin, VS};
+
+revinfo_to_fdb(_Tx, DbPrefix, DocId, #{} = RevId) ->
+    #{
+        deleted := Deleted,
+        rev_id := {RevPos, Rev},
+        rev_path := RevPath
+    } = RevId,
+    Key = {?DB_REVS, DocId, not Deleted, RevPos, Rev},
+    Val = {?CURR_REV_FORMAT, list_to_tuple(RevPath)},
+    KBin = erlfdb_tuple:pack(Key, DbPrefix),
+    VBin = erlfdb_tuple:pack(Val),
+    {KBin, VBin, undefined}.
+
+
+fdb_to_revinfo(Key, {?CURR_REV_FORMAT, _, _, _} = Val) ->
+    {?DB_REVS, _DocId, NotDeleted, RevPos, Rev} = Key,
+    {_RevFormat, Sequence, BranchCount, RevPath} = Val,
+    #{
+        winner => true,
+        deleted => not NotDeleted,
+        rev_id => {RevPos, Rev},
+        rev_path => tuple_to_list(RevPath),
+        sequence => Sequence,
+        branch_count => BranchCount
+    };
+
+fdb_to_revinfo(Key, {?CURR_REV_FORMAT, _} = Val)  ->
+    {?DB_REVS, _DocId, NotDeleted, RevPos, Rev} = Key,
+    {_RevFormat, RevPath} = Val,
+    #{
+        winner => false,
+        deleted => not NotDeleted,
+        rev_id => {RevPos, Rev},
+        rev_path => tuple_to_list(RevPath),
+        sequence => undefined,
+        branch_count => undefined
+    }.
+
+
+doc_to_fdb(Db, #doc{} = Doc) ->
+    #{
+        db_prefix := DbPrefix
+    } = Db,
+
+    #doc{
+        id = Id,
+        revs = {Start, [Rev | _]},
+        body = Body,
+        atts = Atts,
+        deleted = Deleted
+    } = doc_flush_atts(Db, Doc),
+
+    Key = erlfdb_tuple:pack({?DB_DOCS, Id, Start, Rev}, DbPrefix),
+    Val = {Body, Atts, Deleted},
+    {Key, term_to_binary(Val, [{minor_version, 1}])}.
+
+
+fdb_to_doc(_Db, DocId, Pos, Path, Bin) when is_binary(Bin) ->
+    {Body, Atts, Deleted} = binary_to_term(Bin, [safe]),
+    #doc{
+        id = DocId,
+        revs = {Pos, Path},
+        body = Body,
+        atts = Atts,
+        deleted = Deleted
+    };
+fdb_to_doc(_Db, _DocId, _Pos, _Path, not_found) ->
+    {not_found, missing}.
+
+
+local_doc_to_fdb(Db, #doc{} = Doc) ->
+    #{
+        db_prefix := DbPrefix
+    } = Db,
+
+    #doc{
+        id = Id,
+        revs = {0, [Rev]},
+        body = Body
+    } = Doc,
+
+    StoreRev = case Rev of
+        _ when is_integer(Rev) -> integer_to_binary(Rev);
+        _ when is_binary(Rev) -> Rev
+    end,
+
+    Key = erlfdb_tuple:pack({?DB_LOCAL_DOCS, Id}, DbPrefix),
+    Val = {StoreRev, Body},
+    {Key, term_to_binary(Val, [{minor_version, 1}])}.
+
+
+fdb_to_local_doc(_Db, DocId, Bin) when is_binary(Bin) ->
+    {Rev, Body} = binary_to_term(Bin, [safe]),
+    #doc{
+        id = DocId,
+        revs = {0, [Rev]},
+        deleted = false,
+        body = Body
+    };
+fdb_to_local_doc(_Db, _DocId, not_found) ->
+    {not_found, missing}.
+
+
+doc_flush_atts(Db, Doc) ->
+    Atts = lists:map(fun(Att) ->
+        couch_att:flush(Db, Doc#doc.id, Att)
+    end, Doc#doc.atts),
+    Doc#doc{atts = Atts}.
+
+
+chunkify_attachment(Data) ->
+    case Data of
+        <<>> ->
+            [];
+        <<Head:?ATTACHMENT_CHUNK_SIZE/binary, Rest/binary>> ->
+            [Head | chunkify_attachment(Rest)];
+        <<_/binary>> when size(Data) < ?ATTACHMENT_CHUNK_SIZE ->
+            [Data]
+    end.
+
+
+get_dir_and_bounds(DbPrefix, Options) ->
+    Reverse = case fabric2_util:get_value(dir, Options, fwd) of
+        fwd -> false;
+        rev -> true
+    end,
+    StartKey0 = fabric2_util:get_value(start_key, Options),
+    EndKeyGt = fabric2_util:get_value(end_key_gt, Options),
+    EndKey0 = fabric2_util:get_value(end_key, Options, EndKeyGt),
+    InclusiveEnd = EndKeyGt == undefined,
+
+    % CouchDB swaps the key meanings based on the direction
+    % of the fold. FoundationDB does not so we have to
+    % swap back here.
+    {StartKey1, EndKey1} = case Reverse of
+        false -> {StartKey0, EndKey0};
+        true -> {EndKey0, StartKey0}
+    end,
+
+    % Set the maximum bounds for the start and endkey
+    StartKey2 = case StartKey1 of
+        undefined -> {?DB_ALL_DOCS};
+        SK2 when is_binary(SK2) -> {?DB_ALL_DOCS, SK2}
+    end,
+
+    EndKey2 = case EndKey1 of
+        undefined -> {?DB_ALL_DOCS, <<16#FF>>};
+        EK2 when is_binary(EK2) -> {?DB_ALL_DOCS, EK2}
+    end,
+
+    StartKey3 = erlfdb_tuple:pack(StartKey2, DbPrefix),
+    EndKey3 = erlfdb_tuple:pack(EndKey2, DbPrefix),
+
+    % FoundationDB ranges are applied as SK <= key < EK
+    % By default, CouchDB is SK <= key <= EK with the
+    % optional inclusive_end=false option changing that
+    % to SK <= key < EK. Also, remember that CouchDB
+    % swaps the meaning of SK and EK based on direction.
+    %
+    % Thus we have this wonderful bit of logic to account
+    % for all of those combinations.
+
+    StartKey4 = case {Reverse, InclusiveEnd} of
+        {true, false} ->
+            erlfdb_key:first_greater_than(StartKey3);
+        _ ->
+            StartKey3
+    end,
+
+    EndKey4 = case {Reverse, InclusiveEnd} of
+        {false, true} when EndKey0 /= undefined ->
+            erlfdb_key:first_greater_than(EndKey3);
+        {true, _} ->
+            erlfdb_key:first_greater_than(EndKey3);
+        _ ->
+            EndKey3
+    end,
+
+    {Reverse, StartKey4, EndKey4}.
+
+
+get_since_seq(Seq) when Seq == <<>>; Seq == <<"0">>; Seq == 0->
+    fabric2_util:seq_zero_vs();
+
+get_since_seq(Seq) when Seq == now; Seq == <<"now">> ->
+    fabric2_util:seq_max_vs();
+
+get_since_seq(Seq) when is_binary(Seq), size(Seq) == 24 ->
+    Seq1 = fabric2_util:from_hex(Seq),
+    Seq2 = <<51:8, Seq1/binary>>,
+    {SeqVS} = erlfdb_tuple:unpack(Seq2),
+    SeqVS;
+
+get_since_seq(List) when is_list(List) ->
+    get_since_seq(list_to_binary(List));
+
+get_since_seq(Seq) ->
+    erlang:error({invalid_since_seq, Seq}).
+
+
+get_db_handle() ->
+    case get(?PDICT_DB_KEY) of
+        undefined ->
+            {ok, Db} = application:get_env(fabric, db),
+            put(?PDICT_DB_KEY, Db),
+            Db;
+        Db ->
+            Db
+    end.
+
+
+require_transaction(#{tx := {erlfdb_transaction, _}} = _Db) ->
+    ok;
+require_transaction(#{} = _Db) ->
+    erlang:error(transaction_required).
+
+
+ensure_current(Db) ->
+    ensure_current(Db, true).
+
+
+ensure_current(#{} = Db, CheckDbVersion) ->
+    require_transaction(Db),
+
+    #{
+        tx := Tx,
+        md_version := MetaDataVersion
+    } = Db,
+
+    case erlfdb:wait(erlfdb:get(Tx, ?METADATA_VERSION_KEY)) of
+        MetaDataVersion -> Db;
+        _NewVersion -> reopen(Db)
+    end,
+
+    AlreadyChecked = get(?PDICT_CHECKED_DB_IS_CURRENT),
+    if not CheckDbVersion orelse AlreadyChecked == true -> Db; true ->
+        #{
+            db_prefix := DbPrefix,
+            db_version := DbVersion
+        } = Db,
+
+        DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+
+        case erlfdb:wait(erlfdb:get(Tx, DbVersionKey)) of
+            DbVersion ->
+                put(?PDICT_CHECKED_DB_IS_CURRENT, true),
+                Db;
+            _NewDBVersion ->
+                fabric2_server:remove(maps:get(name, Db)),
+                reopen(Db)
+        end
+    end.
+
+
+is_transaction_applied(Tx) ->
+    is_commit_unknown_result()
+        andalso has_transaction_id()
+        andalso transaction_id_exists(Tx).
+
+
+get_previous_transaction_result() ->
+    get(?PDICT_TX_RES_KEY).
+
+
+execute_transaction(Tx, Fun, LayerPrefix) ->
+    put(?PDICT_CHECKED_DB_IS_CURRENT, false),
+    Result = Fun(Tx),
+    case erlfdb:is_read_only(Tx) of
+        true ->
+            ok;
+        false ->
+            erlfdb:set(Tx, get_transaction_id(Tx, LayerPrefix), <<>>),
+            put(?PDICT_TX_RES_KEY, Result)
+    end,
+    Result.
+
+
+clear_transaction() ->
+    fabric2_txids:remove(get(?PDICT_TX_ID_KEY)),
+    erase(?PDICT_CHECKED_DB_IS_CURRENT),
+    erase(?PDICT_TX_ID_KEY),
+    erase(?PDICT_TX_RES_KEY).
+
+
+is_commit_unknown_result() ->
+    erlfdb:get_last_error() == ?COMMIT_UNKNOWN_RESULT.
+
+
+has_transaction_id() ->
+    is_binary(get(?PDICT_TX_ID_KEY)).
+
+
+transaction_id_exists(Tx) ->
+    erlfdb:wait(erlfdb:get(Tx, get(?PDICT_TX_ID_KEY))) == <<>>.
+
+
+get_transaction_id(Tx, LayerPrefix) ->
+    case get(?PDICT_TX_ID_KEY) of
+        undefined ->
+            TxId = fabric2_txids:create(Tx, LayerPrefix),
+            put(?PDICT_TX_ID_KEY, TxId),
+            TxId;
+        TxId when is_binary(TxId) ->
+            TxId
+    end.
+
+
+new_versionstamp(Tx) ->
+    TxId = erlfdb:get_next_tx_id(Tx),
+    {versionstamp, 16#FFFFFFFFFFFFFFFF, 16#FFFF, TxId}.
+
diff --git a/src/fabric/src/fabric2_server.erl b/src/fabric/src/fabric2_server.erl
new file mode 100644
index 0000000..5b826cd
--- /dev/null
+++ b/src/fabric/src/fabric2_server.erl
@@ -0,0 +1,104 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_server).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+    start_link/0,
+    fetch/1,
+    store/1,
+    remove/1
+]).
+
+
+-export([
+    init/1,
+    terminate/2,
+    handle_call/3,
+    handle_cast/2,
+    handle_info/2,
+    code_change/3
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+-define(CLUSTER_FILE, "/usr/local/etc/foundationdb/fdb.cluster").
+
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+fetch(DbName) when is_binary(DbName) ->
+    case ets:lookup(?MODULE, DbName) of
+        [{DbName, #{} = Db}] -> Db;
+        [] -> undefined
+    end.
+
+
+store(#{name := DbName} = Db0) when is_binary(DbName) ->
+    Db1 = Db0#{
+        tx := undefined,
+        user_ctx := #user_ctx{}
+    },
+    true = ets:insert(?MODULE, {DbName, Db1}),
+    ok.
+
+
+remove(DbName) when is_binary(DbName) ->
+    true = ets:delete(?MODULE, DbName),
+    ok.
+
+
+init(_) ->
+    ets:new(?MODULE, [
+            public,
+            named_table,
+            {read_concurrency, true},
+            {write_concurrency, true}
+        ]),
+
+    Db = case application:get_env(fabric, eunit_run) of
+        {ok, true} ->
+            erlfdb_util:get_test_db([empty]);
+        undefined ->
+            ClusterStr = config:get("erlfdb", "cluster_file", ?CLUSTER_FILE),
+            erlfdb:open(iolist_to_binary(ClusterStr))
+    end,
+    application:set_env(fabric, db, Db),
+
+    {ok, nil}.
+
+
+terminate(_, _St) ->
+    ok.
+
+
+handle_call(Msg, _From, St) ->
+    {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast(Msg, St) ->
+    {stop, {bad_cast, Msg}, St}.
+
+
+handle_info(Msg, St) ->
+    {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+    {ok, St}.
diff --git a/src/fabric/src/fabric2_sup.erl b/src/fabric/src/fabric2_sup.erl
new file mode 100644
index 0000000..73c6c1f
--- /dev/null
+++ b/src/fabric/src/fabric2_sup.erl
@@ -0,0 +1,47 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License.  You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_sup).
+-behaviour(supervisor).
+-vsn(1).
+
+
+-export([
+    start_link/1
+]).
+
+-export([
+    init/1
+]).
+
+
+start_link(Args) ->
+    supervisor:start_link({local, ?MODULE}, ?MODULE, Args).
+
+
+init([]) ->
+    Flags = #{
+        strategy => one_for_one,
+        intensity => 1,
+        period => 5
+    },
+    Children = [
+        #{
+            id => fabric2_server,
+            start => {fabric2_server, start_link, []}
+        },
+        #{
+            id => fabric2_txids,
+            start => {fabric2_txids, start_link, []}
+        }
+    ],
+    {ok, {Flags, Children}}.
diff --git a/src/fabric/src/fabric2_txids.erl b/src/fabric/src/fabric2_txids.erl
new file mode 100644
index 0000000..bbb8bdf
--- /dev/null
+++ b/src/fabric/src/fabric2_txids.erl
@@ -0,0 +1,144 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_txids).
+-behaviour(gen_server).
+-vsn(1).
+
+
+-export([
+    start_link/0,
+    create/2,
+    remove/1
+]).
+
+
+-export([
+    init/1,
+    terminate/2,
+    handle_call/3,
+    handle_cast/2,
+    handle_info/2,
+    code_change/3
+]).
+
+
+-include("fabric2.hrl").
+
+
+-define(ONE_HOUR, 3600000000).
+-define(MAX_TX_IDS, 1000).
+
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+create(Tx, undefined) ->
+    Root = erlfdb_directory:root(),
+    CouchDB = erlfdb_directory:create_or_open(Tx, Root, [<<"couchdb">>]),
+    Prefix = erlfdb_directory:get_name(CouchDB),
+    create(Tx, Prefix);
+
+create(_Tx, LayerPrefix) ->
+    {Mega, Secs, Micro} = os:timestamp(),
+    Key = {?TX_IDS, Mega, Secs, Micro, fabric2_util:uuid()},
+    erlfdb_tuple:pack(Key, LayerPrefix).
+
+
+remove(TxId) when is_binary(TxId) ->
+    gen_server:cast(?MODULE, {remove, TxId});
+
+remove(undefined) ->
+    ok.
+
+
+
+init(_) ->
+    {ok, #{
+        last_sweep => os:timestamp(),
+        txids => []
+    }}.
+
+
+terminate(_, #{txids := TxIds}) ->
+    if TxIds == [] -> ok; true ->
+        fabric2_fdb:transactional(fun(Tx) ->
+            lists:foreach(fun(TxId) ->
+                erlfdb:clear(Tx, TxId)
+            end)
+        end)
+    end,
+    ok.
+
+
+handle_call(Msg, _From, St) ->
+    {stop, {bad_call, Msg}, {bad_call, Msg}, St}.
+
+
+handle_cast({remove, TxId}, St) ->
+    #{
+        last_sweep := LastSweep,
+        txids := TxIds
+    } = St,
+
+    NewTxIds = [TxId | TxIds],
+    NewSt = St#{txids := NewTxIds},
+
+    NeedsSweep = timer:now_diff(os:timestamp(), LastSweep) > ?ONE_HOUR,
+
+    case NeedsSweep orelse length(NewTxIds) >= ?MAX_TX_IDS of
+        true ->
+            {noreply, clean(NewSt, NeedsSweep)};
+        false ->
+            {noreply, NewSt}
+    end.
+
+
+handle_info(Msg, St) ->
+    {stop, {bad_info, Msg}, St}.
+
+
+code_change(_OldVsn, St, _Extra) ->
+    {ok, St}.
+
+
+clean(St, NeedsSweep) ->
+    #{
+        last_sweep := LastSweep,
+        txids := TxIds
+    } = St,
+    fabric2_fdb:transactional(fun(Tx) ->
+        lists:foreach(fun(TxId) ->
+            erlfdb:clear(Tx, TxId)
+        end, TxIds),
+        case NeedsSweep of
+            true ->
+                sweep(Tx, LastSweep),
+                St#{
+                    last_sweep := os:timestamp(),
+                    txids := []
+                };
+            false ->
+                St#{txids := []}
+        end
+    end).
+
+
+sweep(Tx, {Mega, Secs, Micro}) ->
+    Root = erlfdb_directory:root(),
+    CouchDB = erlfdb_directory:create_or_open(Tx, Root, [<<"couchdb">>]),
+    Prefix = erlfdb_directory:get_name(CouchDB),
+    StartKey = erlfdb_tuple:pack({?TX_IDS}, Prefix),
+    EndKey = erlfdb_tuple:pack({?TX_IDS, Mega, Secs, Micro}, Prefix),
+    erlfdb:set_option(Tx, next_write_no_write_conflict_range),
+    erlfdb:clear_range(Tx, StartKey, EndKey).
diff --git a/src/fabric/src/fabric2_util.erl b/src/fabric/src/fabric2_util.erl
new file mode 100644
index 0000000..6e2df67
--- /dev/null
+++ b/src/fabric/src/fabric2_util.erl
@@ -0,0 +1,203 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_util).
+
+
+-export([
+    revinfo_to_path/1,
+    sort_revinfos/1,
+
+    seq_zero_vs/0,
+    seq_max_vs/0,
+
+    user_ctx_to_json/1,
+
+    validate_security_object/1,
+
+    get_value/2,
+    get_value/3,
+    to_hex/1,
+    from_hex/1,
+    uuid/0
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+revinfo_to_path(RevInfo) ->
+    #{
+        rev_id := {RevPos, Rev},
+        rev_path := RevPath
+    } = RevInfo,
+    Revs = lists:reverse(RevPath, [Rev]),
+    Path = revinfo_to_path(RevInfo, Revs),
+    {RevPos - length(Revs) + 1, Path}.
+
+
+revinfo_to_path(RevInfo, [Rev]) ->
+    {Rev, RevInfo, []};
+
+revinfo_to_path(RevInfo, [Rev | Rest]) ->
+    {Rev, ?REV_MISSING, [revinfo_to_path(RevInfo, Rest)]}.
+
+
+sort_revinfos(RevInfos) ->
+    CmpFun = fun(A, B) -> rev_sort_key(A) > rev_sort_key(B) end,
+    lists:sort(CmpFun, RevInfos).
+
+
+rev_sort_key(#{} = RevInfo) ->
+    #{
+        deleted := Deleted,
+        rev_id := {RevPos, Rev}
+    } = RevInfo,
+    {not Deleted, RevPos, Rev}.
+
+
+seq_zero_vs() ->
+    {versionstamp, 0, 0, 0}.
+
+
+seq_max_vs() ->
+    {versionstamp, 18446744073709551615, 65535, 65535}.
+
+
+user_ctx_to_json(Db) ->
+    UserCtx = fabric2_db:get_user_ctx(Db),
+    {[
+        {<<"db">>, fabric2_db:name(Db)},
+        {<<"name">>, UserCtx#user_ctx.name},
+        {<<"roles">>, UserCtx#user_ctx.roles}
+    ]}.
+
+
+validate_security_object({SecProps}) ->
+    Admins = get_value(<<"admins">>, SecProps, {[]}),
+    ok = validate_names_and_roles(Admins),
+
+    % we fallback to readers here for backwards compatibility
+    Readers = get_value(<<"readers">>, SecProps, {[]}),
+    Members = get_value(<<"members">>, SecProps, Readers),
+    ok = validate_names_and_roles(Members).
+
+
+validate_names_and_roles({Props}) when is_list(Props) ->
+    validate_json_list_of_strings(<<"names">>, Props),
+    validate_json_list_of_strings(<<"roles">>, Props);
+validate_names_and_roles(_) ->
+    throw("admins or members must be a JSON list of strings").
+
+
+validate_json_list_of_strings(Member, Props) ->
+    case get_value(Member, Props, []) of
+        Values when is_list(Values) ->
+            NonBinary = lists:filter(fun(V) -> not is_binary(V) end, Values),
+            if NonBinary == [] -> ok; true ->
+                MemberStr = binary_to_list(Member),
+                throw(MemberStr ++ " must be a JSON list of strings")
+            end;
+        _ ->
+            MemberStr = binary_to_list(Member),
+            throw(MemberStr ++ " must be a JSON list of strings")
+    end.
+
+
+get_value(Key, List) ->
+    get_value(Key, List, undefined).
+
+
+get_value(Key, List, Default) ->
+    case lists:keysearch(Key, 1, List) of
+        {value, {Key,Value}} ->
+            Value;
+        false ->
+            Default
+    end.
+
+
+to_hex(Bin) ->
+    list_to_binary(to_hex_int(Bin)).
+
+
+to_hex_int(<<>>) ->
+    [];
+to_hex_int(<<Hi:4, Lo:4, Rest/binary>>) ->
+    [nibble_to_hex(Hi), nibble_to_hex(Lo) | to_hex(Rest)].
+
+
+nibble_to_hex(I) ->
+    case I of
+        0 -> $0;
+        1 -> $1;
+        2 -> $2;
+        3 -> $3;
+        4 -> $4;
+        5 -> $5;
+        6 -> $6;
+        7 -> $7;
+        8 -> $8;
+        9 -> $9;
+        10 -> $a;
+        11 -> $b;
+        12 -> $c;
+        13 -> $d;
+        14 -> $e;
+        15 -> $f
+    end.
+
+
+from_hex(Bin) ->
+    iolist_to_binary(from_hex_int(Bin)).
+
+
+from_hex_int(<<>>) ->
+    [];
+from_hex_int(<<Hi:8, Lo:8, RestBinary/binary>>) ->
+    HiNib = hex_to_nibble(Hi),
+    LoNib = hex_to_nibble(Lo),
+    [<<HiNib:4, LoNib:4>> | from_hex_int(RestBinary)];
+from_hex_int(<<BadHex/binary>>) ->
+    erlang:error({invalid_hex, BadHex}).
+
+
+hex_to_nibble(N) ->
+    case N of
+        $0 -> 0;
+        $1 -> 1;
+        $2 -> 2;
+        $3 -> 3;
+        $4 -> 4;
+        $5 -> 5;
+        $6 -> 6;
+        $7 -> 7;
+        $8 -> 8;
+        $9 -> 9;
+        $a -> 10;
+        $A -> 10;
+        $b -> 11;
+        $B -> 11;
+        $c -> 12;
+        $C -> 12;
+        $d -> 13;
+        $D -> 13;
+        $e -> 14;
+        $E -> 14;
+        $f -> 15;
+        $F -> 15;
+        _ -> erlang:error({invalid_hex, N})
+    end.
+
+
+uuid() ->
+    to_hex(crypto:strong_rand_bytes(16)).


[couchdb] 02/06: Disable eunit test suite in fabric

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit d7b015c006eed95d3ad80a2c1daadd94503dfd2d
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Jun 5 13:09:50 2019 -0500

    Disable eunit test suite in fabric
    
    Most of these tests are for quorum and clustered response handling which
    will no longer exist with FoundationDB. Eventually we'll want to go
    through these and pick out anything that is still applicable and ensure
    that we re-add them to the new test suite.
---
 src/fabric/src/fabric.erl                  | 100 ++--
 src/fabric/src/fabric_db_create.erl        |  60 +-
 src/fabric/src/fabric_db_info.erl          |  62 +-
 src/fabric/src/fabric_doc_open.erl         | 821 ++++++++++++-------------
 src/fabric/src/fabric_doc_open_revs.erl    | 932 ++++++++++++++---------------
 src/fabric/src/fabric_doc_purge.erl        | 692 ++++++++++-----------
 src/fabric/src/fabric_doc_update.erl       | 282 ++++-----
 src/fabric/src/fabric_rpc.erl              |  38 +-
 src/fabric/src/fabric_streams.erl          | 157 ++---
 src/fabric/src/fabric_util.erl             |  48 +-
 src/fabric/src/fabric_view.erl             | 188 +++---
 src/fabric/src/fabric_view_changes.erl     | 362 +++++------
 src/fabric/test/fabric_rpc_purge_tests.erl | 307 ----------
 13 files changed, 1872 insertions(+), 2177 deletions(-)

diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl
index 6d04184..a1f74a8 100644
--- a/src/fabric/src/fabric.erl
+++ b/src/fabric/src/fabric.erl
@@ -658,53 +658,53 @@ set_namespace(NS, #mrargs{extra = Extra} = Args) ->
     Args#mrargs{extra = [{namespace, NS} | Extra]}.
 
 
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-update_doc_test_() ->
-    {
-        "Update doc tests", {
-            setup, fun setup/0, fun teardown/1,
-            fun(Ctx) -> [
-                should_throw_conflict(Ctx)
-            ] end
-        }
-    }.
-
-should_throw_conflict(Doc) ->
-    ?_test(begin
-        ?assertThrow(conflict, update_doc(<<"test-db">>, Doc, []))
-    end).
-
-
-setup() ->
-    Doc = #doc{
-        id = <<"test_doc">>,
-        revs = {3, [<<5,68,252,180,43,161,216,223,26,119,71,219,212,229,
-            159,113>>]},
-        body = {[{<<"foo">>,<<"asdf">>},{<<"author">>,<<"tom">>}]},
-        atts = [], deleted = false, meta = []
-    },
-    ok = application:ensure_started(config),
-    ok = meck:expect(mem3, shards, fun(_, _) -> [] end),
-    ok = meck:expect(mem3, quorum, fun(_) -> 1 end),
-    ok = meck:expect(rexi, cast, fun(_, _) -> ok end),
-    ok = meck:expect(rexi_utils, recv,
-        fun(_, _, _, _, _, _) ->
-            {ok, {error, [{Doc, conflict}]}}
-        end),
-    ok = meck:expect(couch_util, reorder_results,
-        fun(_, [{_, Res}]) ->
-            [Res]
-        end),
-    ok = meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
-    ok = meck:expect(rexi_monitor, stop, fun(_) -> ok end),
-    Doc.
-
-
-teardown(_) ->
-    meck:unload(),
-    ok = application:stop(config).
-
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% update_doc_test_() ->
+%%     {
+%%         "Update doc tests", {
+%%             setup, fun setup/0, fun teardown/1,
+%%             fun(Ctx) -> [
+%%                 should_throw_conflict(Ctx)
+%%             ] end
+%%         }
+%%     }.
+%%
+%% should_throw_conflict(Doc) ->
+%%     ?_test(begin
+%%         ?assertThrow(conflict, update_doc(<<"test-db">>, Doc, []))
+%%     end).
+%%
+%%
+%% setup() ->
+%%     Doc = #doc{
+%%         id = <<"test_doc">>,
+%%         revs = {3, [<<5,68,252,180,43,161,216,223,26,119,71,219,212,229,
+%%             159,113>>]},
+%%         body = {[{<<"foo">>,<<"asdf">>},{<<"author">>,<<"tom">>}]},
+%%         atts = [], deleted = false, meta = []
+%%     },
+%%     ok = application:ensure_started(config),
+%%     ok = meck:expect(mem3, shards, fun(_, _) -> [] end),
+%%     ok = meck:expect(mem3, quorum, fun(_) -> 1 end),
+%%     ok = meck:expect(rexi, cast, fun(_, _) -> ok end),
+%%     ok = meck:expect(rexi_utils, recv,
+%%         fun(_, _, _, _, _, _) ->
+%%             {ok, {error, [{Doc, conflict}]}}
+%%         end),
+%%     ok = meck:expect(couch_util, reorder_results,
+%%         fun(_, [{_, Res}]) ->
+%%             [Res]
+%%         end),
+%%     ok = meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
+%%     ok = meck:expect(rexi_monitor, stop, fun(_) -> ok end),
+%%     Doc.
+%%
+%%
+%% teardown(_) ->
+%%     meck:unload(),
+%%     ok = application:stop(config).
+%%
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_db_create.erl b/src/fabric/src/fabric_db_create.erl
index 2edc6dc..81f1ecb 100644
--- a/src/fabric/src/fabric_db_create.erl
+++ b/src/fabric/src/fabric_db_create.erl
@@ -185,33 +185,33 @@ make_document([#shard{dbname=DbName}|_] = Shards, Suffix, Options) ->
 
 db_exists(DbName) -> is_list(catch mem3:shards(DbName)).
 
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-db_exists_for_existing_db_test() ->
-    start_meck_(),
-    Mock = fun(DbName) when is_binary(DbName) ->
-        [#shard{dbname = DbName, range = [0,100]}]
-    end,
-    ok = meck:expect(mem3, shards, Mock),
-    ?assertEqual(true, db_exists(<<"foobar">>)),
-    ?assertEqual(true, meck:validate(mem3)),
-    stop_meck_().
-
-db_exists_for_missing_db_test() ->
-    start_meck_(),
-    Mock = fun(DbName) ->
-        erlang:error(database_does_not_exist, DbName)
-    end,
-    ok = meck:expect(mem3, shards, Mock),
-    ?assertEqual(false, db_exists(<<"foobar">>)),
-    ?assertEqual(false, meck:validate(mem3)),
-    stop_meck_().
-
-start_meck_() ->
-    ok = meck:new(mem3).
-
-stop_meck_() ->
-    ok = meck:unload(mem3).
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% db_exists_for_existing_db_test() ->
+%%     start_meck_(),
+%%     Mock = fun(DbName) when is_binary(DbName) ->
+%%         [#shard{dbname = DbName, range = [0,100]}]
+%%     end,
+%%     ok = meck:expect(mem3, shards, Mock),
+%%     ?assertEqual(true, db_exists(<<"foobar">>)),
+%%     ?assertEqual(true, meck:validate(mem3)),
+%%     stop_meck_().
+%%
+%% db_exists_for_missing_db_test() ->
+%%     start_meck_(),
+%%     Mock = fun(DbName) ->
+%%         erlang:error(database_does_not_exist, DbName)
+%%     end,
+%%     ok = meck:expect(mem3, shards, Mock),
+%%     ?assertEqual(false, db_exists(<<"foobar">>)),
+%%     ?assertEqual(false, meck:validate(mem3)),
+%%     stop_meck_().
+%%
+%% start_meck_() ->
+%%     ok = meck:new(mem3).
+%%
+%% stop_meck_() ->
+%%     ok = meck:unload(mem3).
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_db_info.erl b/src/fabric/src/fabric_db_info.erl
index bb7a353..8e4cd9e 100644
--- a/src/fabric/src/fabric_db_info.erl
+++ b/src/fabric/src/fabric_db_info.erl
@@ -155,34 +155,34 @@ get_cluster_info(Shards) ->
     {ok, [{q, Q}, {n, N}, {w, WR}, {r, WR}]}.
 
 
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-get_cluster_info_test_() ->
-    {
-        setup,
-        fun setup/0,
-        fun get_cluster_info_test_generator/1
-    }.
-
-
-setup() ->
-    Quorums = [1, 2, 3],
-    Shards = [1, 3, 5, 8, 12, 24],
-    [{N, Q} || N <- Quorums, Q <- Shards].
-
-get_cluster_info_test_generator([]) ->
-    [];
-get_cluster_info_test_generator([{N, Q} | Rest]) ->
-    {generator,
-    fun() ->
-        Nodes = lists:seq(1, 8),
-        Shards = mem3_util:create_partition_map(<<"foo">>, N, Q, Nodes),
-        {ok, Info} = get_cluster_info(Shards),
-        [
-            ?_assertEqual(N, couch_util:get_value(n, Info)),
-            ?_assertEqual(Q, couch_util:get_value(q, Info))
-        ] ++ get_cluster_info_test_generator(Rest)
-    end}.
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% get_cluster_info_test_() ->
+%%     {
+%%         setup,
+%%         fun setup/0,
+%%         fun get_cluster_info_test_generator/1
+%%     }.
+%%
+%%
+%% setup() ->
+%%     Quorums = [1, 2, 3],
+%%     Shards = [1, 3, 5, 8, 12, 24],
+%%     [{N, Q} || N <- Quorums, Q <- Shards].
+%%
+%% get_cluster_info_test_generator([]) ->
+%%     [];
+%% get_cluster_info_test_generator([{N, Q} | Rest]) ->
+%%     {generator,
+%%     fun() ->
+%%         Nodes = lists:seq(1, 8),
+%%         Shards = mem3_util:create_partition_map(<<"foo">>, N, Q, Nodes),
+%%         {ok, Info} = get_cluster_info(Shards),
+%%         [
+%%             ?_assertEqual(N, couch_util:get_value(n, Info)),
+%%             ?_assertEqual(Q, couch_util:get_value(q, Info))
+%%         ] ++ get_cluster_info_test_generator(Rest)
+%%     end}.
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_doc_open.erl b/src/fabric/src/fabric_doc_open.erl
index 743ad8c7..224800c 100644
--- a/src/fabric/src/fabric_doc_open.erl
+++ b/src/fabric/src/fabric_doc_open.erl
@@ -182,414 +182,415 @@ format_reply(Else, _) ->
     Else.
 
 
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
 
-
-setup() ->
-    meck:new([
-        couch_log,
-        couch_stats,
-        fabric,
-        fabric_util,
-        mem3,
-        rexi,
-        rexi_monitor
-    ], [passthrough]).
-
-
-teardown(_) ->
-    meck:unload().
-
-
-open_doc_test_() ->
-    {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            t_is_r_met(),
-            t_handle_message_down(),
-            t_handle_message_exit(),
-            t_handle_message_reply(),
-            t_store_node_revs(),
-            t_read_repair(),
-            t_handle_response_quorum_met(),
-            t_get_doc_info()
-        ]
-    }.
-
-
-t_is_r_met() ->
-    ?_test(begin
-        Workers0 = [],
-        Workers1 = [nil],
-        Workers2 = [nil, nil],
-
-        SuccessCases = [
-            {{true, foo}, [fabric_util:kv(foo, 2)], 2},
-            {{true, foo}, [fabric_util:kv(foo, 3)], 2},
-            {{true, foo}, [fabric_util:kv(foo, 1)], 1},
-            {{true, foo}, [fabric_util:kv(foo, 2), fabric_util:kv(bar, 1)], 2},
-            {{true, bar}, [fabric_util:kv(bar, 1), fabric_util:kv(bar, 2)], 2},
-            {{true, bar}, [fabric_util:kv(bar, 2), fabric_util:kv(foo, 1)], 2}
-        ],
-        lists:foreach(fun({Expect, Replies, Q}) ->
-            ?assertEqual(Expect, is_r_met(Workers0, Replies, Q))
-        end, SuccessCases),
-
-        WaitForMoreCases = [
-            {[fabric_util:kv(foo, 1)], 2},
-            {[fabric_util:kv(foo, 2)], 3},
-            {[fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2}
-        ],
-        lists:foreach(fun({Replies, Q}) ->
-            ?assertEqual(wait_for_more, is_r_met(Workers2, Replies, Q))
-        end, WaitForMoreCases),
-
-        FailureCases = [
-            {Workers0, [fabric_util:kv(foo, 1)], 2},
-            {Workers1, [fabric_util:kv(foo, 1)], 2},
-            {Workers1, [fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2},
-            {Workers1, [fabric_util:kv(foo, 2)], 3}
-        ],
-        lists:foreach(fun({Workers, Replies, Q}) ->
-            ?assertEqual(no_more_workers, is_r_met(Workers, Replies, Q))
-        end, FailureCases)
-    end).
-
-
-t_handle_message_down() ->
-    Node0 = 'foo@localhost',
-    Node1 = 'bar@localhost',
-    Down0 = {rexi_DOWN, nil, {nil, Node0}, nil},
-    Down1 = {rexi_DOWN, nil, {nil, Node1}, nil},
-    Workers0 = [#shard{node=Node0} || _ <- [a, b]],
-    Worker1 = #shard{node=Node1},
-    Workers1 = Workers0 ++ [Worker1],
-
-    ?_test(begin
-        % Stop when no more workers are left
-        ?assertEqual(
-            {stop, #acc{workers=[]}},
-            handle_message(Down0, nil, #acc{workers=Workers0})
-        ),
-
-        % Continue when we have more workers
-        ?assertEqual(
-            {ok, #acc{workers=[Worker1]}},
-            handle_message(Down0, nil, #acc{workers=Workers1})
-        ),
-
-        % A second DOWN removes the remaining workers
-        ?assertEqual(
-            {stop, #acc{workers=[]}},
-            handle_message(Down1, nil, #acc{workers=[Worker1]})
-        )
-    end).
-
-
-t_handle_message_exit() ->
-    Exit = {rexi_EXIT, nil},
-    Worker0 = #shard{ref=erlang:make_ref()},
-    Worker1 = #shard{ref=erlang:make_ref()},
-
-    ?_test(begin
-        % Only removes the specified worker
-        ?assertEqual(
-            {ok, #acc{workers=[Worker1]}},
-            handle_message(Exit, Worker0, #acc{workers=[Worker0, Worker1]})
-        ),
-
-        ?assertEqual(
-            {ok, #acc{workers=[Worker0]}},
-            handle_message(Exit, Worker1, #acc{workers=[Worker0, Worker1]})
-        ),
-
-        % We bail if it was the last worker
-        ?assertEqual(
-            {stop, #acc{workers=[]}},
-            handle_message(Exit, Worker0, #acc{workers=[Worker0]})
-        )
-    end).
-
-
-t_handle_message_reply() ->
-    Worker0 = #shard{ref=erlang:make_ref()},
-    Worker1 = #shard{ref=erlang:make_ref()},
-    Worker2 = #shard{ref=erlang:make_ref()},
-    Workers = [Worker0, Worker1, Worker2],
-    Acc0 = #acc{workers=Workers, r=2, replies=[]},
-
-    ?_test(begin
-        meck:expect(rexi, kill_all, fun(_) -> ok end),
-
-        % Test that we continue when we haven't met R yet
-        ?assertMatch(
-            {ok, #acc{
-                workers=[Worker0, Worker1],
-                replies=[{foo, {foo, 1}}]
-            }},
-            handle_message(foo, Worker2, Acc0)
-        ),
-
-        ?assertMatch(
-            {ok, #acc{
-                workers=[Worker0, Worker1],
-                replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
-            }},
-            handle_message(bar, Worker2, Acc0#acc{
-                replies=[{foo, {foo, 1}}]
-            })
-        ),
-
-        % Test that we don't get a quorum when R isn't met. q_reply
-        % isn't set and state remains unchanged and {stop, NewAcc}
-        % is returned. Bit subtle on the assertions here.
-
-        ?assertMatch(
-            {stop, #acc{workers=[], replies=[{foo, {foo, 1}}]}},
-            handle_message(foo, Worker0, Acc0#acc{workers=[Worker0]})
-        ),
-
-        ?assertMatch(
-            {stop, #acc{
-                workers=[],
-                replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
-            }},
-            handle_message(bar, Worker0, Acc0#acc{
-                workers=[Worker0],
-                replies=[{foo, {foo, 1}}]
-            })
-        ),
-
-        % Check that when R is met we stop with a new state and
-        % a q_reply.
-
-        ?assertMatch(
-            {stop, #acc{
-                workers=[],
-                replies=[{foo, {foo, 2}}],
-                state=r_met,
-                q_reply=foo
-            }},
-            handle_message(foo, Worker1, Acc0#acc{
-                workers=[Worker0, Worker1],
-                replies=[{foo, {foo, 1}}]
-            })
-        ),
-
-        ?assertEqual(
-            {stop, #acc{
-                workers=[],
-                r=1,
-                replies=[{foo, {foo, 1}}],
-                state=r_met,
-                q_reply=foo
-            }},
-            handle_message(foo, Worker0, Acc0#acc{r=1})
-        ),
-
-        ?assertMatch(
-            {stop, #acc{
-                workers=[],
-                replies=[{bar, {bar, 1}}, {foo, {foo, 2}}],
-                state=r_met,
-                q_reply=foo
-            }},
-            handle_message(foo, Worker0, Acc0#acc{
-                workers=[Worker0],
-                replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
-            })
-        )
-    end).
-
-
-t_store_node_revs() ->
-    W1 = #shard{node = w1, ref = erlang:make_ref()},
-    W2 = #shard{node = w2, ref = erlang:make_ref()},
-    W3 = #shard{node = w3, ref = erlang:make_ref()},
-    Foo1 = {ok, #doc{id = <<"bar">>, revs = {1, [<<"foo">>]}}},
-    Foo2 = {ok, #doc{id = <<"bar">>, revs = {2, [<<"foo2">>, <<"foo">>]}}},
-    NFM = {not_found, missing},
-
-    InitAcc = #acc{workers = [W1, W2, W3], replies = [], r = 2},
-
-    ?_test(begin
-        meck:expect(rexi, kill_all, fun(_) -> ok end),
-
-        % Simple case
-        {ok, #acc{node_revs = NodeRevs1}} = handle_message(Foo1, W1, InitAcc),
-        ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs1),
-
-        % Make sure we only hold the head rev
-        {ok, #acc{node_revs = NodeRevs2}} = handle_message(Foo2, W1, InitAcc),
-        ?assertEqual([{w1, [{2, <<"foo2">>}]}], NodeRevs2),
-
-        % Make sure we don't capture anything on error
-        {ok, #acc{node_revs = NodeRevs3}} = handle_message(NFM, W1, InitAcc),
-        ?assertEqual([], NodeRevs3),
-
-        % Make sure we accumulate node revs
-        Acc1 = InitAcc#acc{node_revs = [{w1, [{1, <<"foo">>}]}]},
-        {ok, #acc{node_revs = NodeRevs4}} = handle_message(Foo2, W2, Acc1),
-        ?assertEqual(
-                [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
-                NodeRevs4
-            ),
-
-        % Make sure rexi_DOWN doesn't modify node_revs
-        Down = {rexi_DOWN, nil, {nil, w1}, nil},
-        {ok, #acc{node_revs = NodeRevs5}} = handle_message(Down, W2, Acc1),
-        ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs5),
-
-        % Make sure rexi_EXIT doesn't modify node_revs
-        Exit = {rexi_EXIT, reason},
-        {ok, #acc{node_revs = NodeRevs6}} = handle_message(Exit, W2, Acc1),
-        ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs6),
-
-        % Make sure an error doesn't remove any node revs
-        {ok, #acc{node_revs = NodeRevs7}} = handle_message(NFM, W2, Acc1),
-        ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs7),
-
-        % Make sure we have all of our node_revs when meeting
-        % quorum
-        {ok, Acc2} = handle_message(Foo1, W1, InitAcc),
-        {ok, Acc3} = handle_message(Foo2, W2, Acc2),
-        {stop, Acc4} = handle_message(NFM, W3, Acc3),
-        ?assertEqual(
-                [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
-                Acc4#acc.node_revs
-            )
-    end).
-
-
-t_read_repair() ->
-    Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
-    Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
-    NFM = {not_found, missing},
-
-    ?_test(begin
-        meck:expect(couch_log, notice, fun(_, _) -> ok end),
-        meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-
-        % Test when we have actual doc data to repair
-        meck:expect(fabric, update_docs, fun(_, [_], _) -> {ok, []} end),
-        Acc0 = #acc{
-            dbname = <<"name">>,
-            replies = [fabric_util:kv(Foo1,1)]
-        },
-        ?assertEqual(Foo1, read_repair(Acc0)),
-
-        meck:expect(fabric, update_docs, fun(_, [_, _], _) -> {ok, []} end),
-        Acc1 = #acc{
-            dbname = <<"name">>,
-            replies = [fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,1)]
-        },
-        ?assertEqual(Foo2, read_repair(Acc1)),
-
-        % Test when we have nothing but errors
-        Acc2 = #acc{replies=[fabric_util:kv(NFM, 1)]},
-        ?assertEqual(NFM, read_repair(Acc2)),
-
-        Acc3 = #acc{replies=[fabric_util:kv(NFM,1), fabric_util:kv(foo,2)]},
-        ?assertEqual(NFM, read_repair(Acc3)),
-
-        Acc4 = #acc{replies=[fabric_util:kv(foo,1), fabric_util:kv(bar,1)]},
-        ?assertEqual(bar, read_repair(Acc4))
-    end).
-
-
-t_handle_response_quorum_met() ->
-    Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
-    Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
-    Bar1 = {ok, #doc{revs = {1,[<<"bar">>]}}},
-
-    ?_test(begin
-        meck:expect(couch_log, notice, fun(_, _) -> ok end),
-        meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
-        meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-
-        BasicOkAcc = #acc{
-            state=r_met,
-            replies=[fabric_util:kv(Foo1,2)],
-            q_reply=Foo1
-        },
-        ?assertEqual(Foo1, handle_response(BasicOkAcc)),
-
-        WithAncestorsAcc = #acc{
-            state=r_met,
-            replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,2)],
-            q_reply=Foo2
-        },
-        ?assertEqual(Foo2, handle_response(WithAncestorsAcc)),
-
-        % This also checks when the quorum isn't the most recent
-        % revision.
-        DeeperWinsAcc = #acc{
-            state=r_met,
-            replies=[fabric_util:kv(Foo1,2), fabric_util:kv(Foo2,1)],
-            q_reply=Foo1
-        },
-        ?assertEqual(Foo2, handle_response(DeeperWinsAcc)),
-
-        % Check that we return the proper doc based on rev
-        % (ie, pos is equal)
-        BiggerRevWinsAcc = #acc{
-            state=r_met,
-            replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Bar1,2)],
-            q_reply=Bar1
-        },
-        ?assertEqual(Foo1, handle_response(BiggerRevWinsAcc))
-
-        % r_not_met is a proxy to read_repair so we rely on
-        % read_repair_test for those conditions.
-    end).
-
-
-t_get_doc_info() ->
-    ?_test(begin
-        meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
-        meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-        meck:expect(fabric_util, submit_jobs, fun(_, _, _) -> ok end),
-        meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
-        meck:expect(rexi_monitor, stop, fun(_) -> ok end),
-        meck:expect(mem3, shards, fun(_, _) -> ok end),
-        meck:expect(mem3, n, fun(_) -> 3 end),
-        meck:expect(mem3, quorum, fun(_) -> 2 end),
-
-        meck:expect(fabric_util, recv, fun(_, _, _, _) ->
-            {ok, #acc{state = r_not_met}}
-        end),
-        Rsp1 = fabric_doc_open:go("test", "one", [doc_info]),
-        ?assertEqual({error, quorum_not_met}, Rsp1),
-
-        Rsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
-        ?assertEqual({error, quorum_not_met}, Rsp2),
-
-        meck:expect(fabric_util, recv, fun(_, _, _, _) ->
-            {ok, #acc{state = r_met, q_reply = not_found}}
-        end),
-        MissingRsp1 = fabric_doc_open:go("test", "one", [doc_info]),
-        ?assertEqual({not_found, missing}, MissingRsp1),
-        MissingRsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
-        ?assertEqual({not_found, missing}, MissingRsp2),
-
-        meck:expect(fabric_util, recv, fun(_, _, _, _) ->
-            A = #doc_info{},
-            {ok, #acc{state = r_met, q_reply = {ok, A}}}
-        end),
-        {ok, Rec1} = fabric_doc_open:go("test", "one", [doc_info]),
-        ?assert(is_record(Rec1, doc_info)),
-
-        meck:expect(fabric_util, recv, fun(_, _, _, _) ->
-            A = #full_doc_info{deleted = true},
-            {ok, #acc{state = r_met, q_reply = {ok, A}}}
-        end),
-        Rsp3 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
-        ?assertEqual({not_found, deleted}, Rsp3),
-        {ok, Rec2} = fabric_doc_open:go("test", "one", [{doc_info, full},deleted]),
-        ?assert(is_record(Rec2, full_doc_info))
-    end).
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%%
+%% setup() ->
+%%     meck:new([
+%%         couch_log,
+%%         couch_stats,
+%%         fabric,
+%%         fabric_util,
+%%         mem3,
+%%         rexi,
+%%         rexi_monitor
+%%     ], [passthrough]).
+%%
+%%
+%% teardown(_) ->
+%%     meck:unload().
+%%
+%%
+%% open_doc_test_() ->
+%%     {
+%%         foreach,
+%%         fun setup/0,
+%%         fun teardown/1,
+%%         [
+%%             t_is_r_met(),
+%%             t_handle_message_down(),
+%%             t_handle_message_exit(),
+%%             t_handle_message_reply(),
+%%             t_store_node_revs(),
+%%             t_read_repair(),
+%%             t_handle_response_quorum_met(),
+%%             t_get_doc_info()
+%%         ]
+%%     }.
+%%
+%%
+%% t_is_r_met() ->
+%%     ?_test(begin
+%%         Workers0 = [],
+%%         Workers1 = [nil],
+%%         Workers2 = [nil, nil],
+%%
+%%         SuccessCases = [
+%%             {{true, foo}, [fabric_util:kv(foo, 2)], 2},
+%%             {{true, foo}, [fabric_util:kv(foo, 3)], 2},
+%%             {{true, foo}, [fabric_util:kv(foo, 1)], 1},
+%%             {{true, foo}, [fabric_util:kv(foo, 2), fabric_util:kv(bar, 1)], 2},
+%%             {{true, bar}, [fabric_util:kv(bar, 1), fabric_util:kv(bar, 2)], 2},
+%%             {{true, bar}, [fabric_util:kv(bar, 2), fabric_util:kv(foo, 1)], 2}
+%%         ],
+%%         lists:foreach(fun({Expect, Replies, Q}) ->
+%%             ?assertEqual(Expect, is_r_met(Workers0, Replies, Q))
+%%         end, SuccessCases),
+%%
+%%         WaitForMoreCases = [
+%%             {[fabric_util:kv(foo, 1)], 2},
+%%             {[fabric_util:kv(foo, 2)], 3},
+%%             {[fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2}
+%%         ],
+%%         lists:foreach(fun({Replies, Q}) ->
+%%             ?assertEqual(wait_for_more, is_r_met(Workers2, Replies, Q))
+%%         end, WaitForMoreCases),
+%%
+%%         FailureCases = [
+%%             {Workers0, [fabric_util:kv(foo, 1)], 2},
+%%             {Workers1, [fabric_util:kv(foo, 1)], 2},
+%%             {Workers1, [fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2},
+%%             {Workers1, [fabric_util:kv(foo, 2)], 3}
+%%         ],
+%%         lists:foreach(fun({Workers, Replies, Q}) ->
+%%             ?assertEqual(no_more_workers, is_r_met(Workers, Replies, Q))
+%%         end, FailureCases)
+%%     end).
+%%
+%%
+%% t_handle_message_down() ->
+%%     Node0 = 'foo@localhost',
+%%     Node1 = 'bar@localhost',
+%%     Down0 = {rexi_DOWN, nil, {nil, Node0}, nil},
+%%     Down1 = {rexi_DOWN, nil, {nil, Node1}, nil},
+%%     Workers0 = [#shard{node=Node0} || _ <- [a, b]],
+%%     Worker1 = #shard{node=Node1},
+%%     Workers1 = Workers0 ++ [Worker1],
+%%
+%%     ?_test(begin
+%%         % Stop when no more workers are left
+%%         ?assertEqual(
+%%             {stop, #acc{workers=[]}},
+%%             handle_message(Down0, nil, #acc{workers=Workers0})
+%%         ),
+%%
+%%         % Continue when we have more workers
+%%         ?assertEqual(
+%%             {ok, #acc{workers=[Worker1]}},
+%%             handle_message(Down0, nil, #acc{workers=Workers1})
+%%         ),
+%%
+%%         % A second DOWN removes the remaining workers
+%%         ?assertEqual(
+%%             {stop, #acc{workers=[]}},
+%%             handle_message(Down1, nil, #acc{workers=[Worker1]})
+%%         )
+%%     end).
+%%
+%%
+%% t_handle_message_exit() ->
+%%     Exit = {rexi_EXIT, nil},
+%%     Worker0 = #shard{ref=erlang:make_ref()},
+%%     Worker1 = #shard{ref=erlang:make_ref()},
+%%
+%%     ?_test(begin
+%%         % Only removes the specified worker
+%%         ?assertEqual(
+%%             {ok, #acc{workers=[Worker1]}},
+%%             handle_message(Exit, Worker0, #acc{workers=[Worker0, Worker1]})
+%%         ),
+%%
+%%         ?assertEqual(
+%%             {ok, #acc{workers=[Worker0]}},
+%%             handle_message(Exit, Worker1, #acc{workers=[Worker0, Worker1]})
+%%         ),
+%%
+%%         % We bail if it was the last worker
+%%         ?assertEqual(
+%%             {stop, #acc{workers=[]}},
+%%             handle_message(Exit, Worker0, #acc{workers=[Worker0]})
+%%         )
+%%     end).
+%%
+%%
+%% t_handle_message_reply() ->
+%%     Worker0 = #shard{ref=erlang:make_ref()},
+%%     Worker1 = #shard{ref=erlang:make_ref()},
+%%     Worker2 = #shard{ref=erlang:make_ref()},
+%%     Workers = [Worker0, Worker1, Worker2],
+%%     Acc0 = #acc{workers=Workers, r=2, replies=[]},
+%%
+%%     ?_test(begin
+%%         meck:expect(rexi, kill_all, fun(_) -> ok end),
+%%
+%%         % Test that we continue when we haven't met R yet
+%%         ?assertMatch(
+%%             {ok, #acc{
+%%                 workers=[Worker0, Worker1],
+%%                 replies=[{foo, {foo, 1}}]
+%%             }},
+%%             handle_message(foo, Worker2, Acc0)
+%%         ),
+%%
+%%         ?assertMatch(
+%%             {ok, #acc{
+%%                 workers=[Worker0, Worker1],
+%%                 replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
+%%             }},
+%%             handle_message(bar, Worker2, Acc0#acc{
+%%                 replies=[{foo, {foo, 1}}]
+%%             })
+%%         ),
+%%
+%%         % Test that we don't get a quorum when R isn't met. q_reply
+%%         % isn't set and state remains unchanged and {stop, NewAcc}
+%%         % is returned. Bit subtle on the assertions here.
+%%
+%%         ?assertMatch(
+%%             {stop, #acc{workers=[], replies=[{foo, {foo, 1}}]}},
+%%             handle_message(foo, Worker0, Acc0#acc{workers=[Worker0]})
+%%         ),
+%%
+%%         ?assertMatch(
+%%             {stop, #acc{
+%%                 workers=[],
+%%                 replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
+%%             }},
+%%             handle_message(bar, Worker0, Acc0#acc{
+%%                 workers=[Worker0],
+%%                 replies=[{foo, {foo, 1}}]
+%%             })
+%%         ),
+%%
+%%         % Check that when R is met we stop with a new state and
+%%         % a q_reply.
+%%
+%%         ?assertMatch(
+%%             {stop, #acc{
+%%                 workers=[],
+%%                 replies=[{foo, {foo, 2}}],
+%%                 state=r_met,
+%%                 q_reply=foo
+%%             }},
+%%             handle_message(foo, Worker1, Acc0#acc{
+%%                 workers=[Worker0, Worker1],
+%%                 replies=[{foo, {foo, 1}}]
+%%             })
+%%         ),
+%%
+%%         ?assertEqual(
+%%             {stop, #acc{
+%%                 workers=[],
+%%                 r=1,
+%%                 replies=[{foo, {foo, 1}}],
+%%                 state=r_met,
+%%                 q_reply=foo
+%%             }},
+%%             handle_message(foo, Worker0, Acc0#acc{r=1})
+%%         ),
+%%
+%%         ?assertMatch(
+%%             {stop, #acc{
+%%                 workers=[],
+%%                 replies=[{bar, {bar, 1}}, {foo, {foo, 2}}],
+%%                 state=r_met,
+%%                 q_reply=foo
+%%             }},
+%%             handle_message(foo, Worker0, Acc0#acc{
+%%                 workers=[Worker0],
+%%                 replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
+%%             })
+%%         )
+%%     end).
+%%
+%%
+%% t_store_node_revs() ->
+%%     W1 = #shard{node = w1, ref = erlang:make_ref()},
+%%     W2 = #shard{node = w2, ref = erlang:make_ref()},
+%%     W3 = #shard{node = w3, ref = erlang:make_ref()},
+%%     Foo1 = {ok, #doc{id = <<"bar">>, revs = {1, [<<"foo">>]}}},
+%%     Foo2 = {ok, #doc{id = <<"bar">>, revs = {2, [<<"foo2">>, <<"foo">>]}}},
+%%     NFM = {not_found, missing},
+%%
+%%     InitAcc = #acc{workers = [W1, W2, W3], replies = [], r = 2},
+%%
+%%     ?_test(begin
+%%         meck:expect(rexi, kill_all, fun(_) -> ok end),
+%%
+%%         % Simple case
+%%         {ok, #acc{node_revs = NodeRevs1}} = handle_message(Foo1, W1, InitAcc),
+%%         ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs1),
+%%
+%%         % Make sure we only hold the head rev
+%%         {ok, #acc{node_revs = NodeRevs2}} = handle_message(Foo2, W1, InitAcc),
+%%         ?assertEqual([{w1, [{2, <<"foo2">>}]}], NodeRevs2),
+%%
+%%         % Make sure we don't capture anything on error
+%%         {ok, #acc{node_revs = NodeRevs3}} = handle_message(NFM, W1, InitAcc),
+%%         ?assertEqual([], NodeRevs3),
+%%
+%%         % Make sure we accumulate node revs
+%%         Acc1 = InitAcc#acc{node_revs = [{w1, [{1, <<"foo">>}]}]},
+%%         {ok, #acc{node_revs = NodeRevs4}} = handle_message(Foo2, W2, Acc1),
+%%         ?assertEqual(
+%%                 [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
+%%                 NodeRevs4
+%%             ),
+%%
+%%         % Make sure rexi_DOWN doesn't modify node_revs
+%%         Down = {rexi_DOWN, nil, {nil, w1}, nil},
+%%         {ok, #acc{node_revs = NodeRevs5}} = handle_message(Down, W2, Acc1),
+%%         ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs5),
+%%
+%%         % Make sure rexi_EXIT doesn't modify node_revs
+%%         Exit = {rexi_EXIT, reason},
+%%         {ok, #acc{node_revs = NodeRevs6}} = handle_message(Exit, W2, Acc1),
+%%         ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs6),
+%%
+%%         % Make sure an error doesn't remove any node revs
+%%         {ok, #acc{node_revs = NodeRevs7}} = handle_message(NFM, W2, Acc1),
+%%         ?assertEqual([{w1, [{1, <<"foo">>}]}], NodeRevs7),
+%%
+%%         % Make sure we have all of our node_revs when meeting
+%%         % quorum
+%%         {ok, Acc2} = handle_message(Foo1, W1, InitAcc),
+%%         {ok, Acc3} = handle_message(Foo2, W2, Acc2),
+%%         {stop, Acc4} = handle_message(NFM, W3, Acc3),
+%%         ?assertEqual(
+%%                 [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
+%%                 Acc4#acc.node_revs
+%%             )
+%%     end).
+%%
+%%
+%% t_read_repair() ->
+%%     Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
+%%     Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
+%%     NFM = {not_found, missing},
+%%
+%%     ?_test(begin
+%%         meck:expect(couch_log, notice, fun(_, _) -> ok end),
+%%         meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%%
+%%         % Test when we have actual doc data to repair
+%%         meck:expect(fabric, update_docs, fun(_, [_], _) -> {ok, []} end),
+%%         Acc0 = #acc{
+%%             dbname = <<"name">>,
+%%             replies = [fabric_util:kv(Foo1,1)]
+%%         },
+%%         ?assertEqual(Foo1, read_repair(Acc0)),
+%%
+%%         meck:expect(fabric, update_docs, fun(_, [_, _], _) -> {ok, []} end),
+%%         Acc1 = #acc{
+%%             dbname = <<"name">>,
+%%             replies = [fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,1)]
+%%         },
+%%         ?assertEqual(Foo2, read_repair(Acc1)),
+%%
+%%         % Test when we have nothing but errors
+%%         Acc2 = #acc{replies=[fabric_util:kv(NFM, 1)]},
+%%         ?assertEqual(NFM, read_repair(Acc2)),
+%%
+%%         Acc3 = #acc{replies=[fabric_util:kv(NFM,1), fabric_util:kv(foo,2)]},
+%%         ?assertEqual(NFM, read_repair(Acc3)),
+%%
+%%         Acc4 = #acc{replies=[fabric_util:kv(foo,1), fabric_util:kv(bar,1)]},
+%%         ?assertEqual(bar, read_repair(Acc4))
+%%     end).
+%%
+%%
+%% t_handle_response_quorum_met() ->
+%%     Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
+%%     Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
+%%     Bar1 = {ok, #doc{revs = {1,[<<"bar">>]}}},
+%%
+%%     ?_test(begin
+%%         meck:expect(couch_log, notice, fun(_, _) -> ok end),
+%%         meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
+%%         meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%%
+%%         BasicOkAcc = #acc{
+%%             state=r_met,
+%%             replies=[fabric_util:kv(Foo1,2)],
+%%             q_reply=Foo1
+%%         },
+%%         ?assertEqual(Foo1, handle_response(BasicOkAcc)),
+%%
+%%         WithAncestorsAcc = #acc{
+%%             state=r_met,
+%%             replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,2)],
+%%             q_reply=Foo2
+%%         },
+%%         ?assertEqual(Foo2, handle_response(WithAncestorsAcc)),
+%%
+%%         % This also checks when the quorum isn't the most recent
+%%         % revision.
+%%         DeeperWinsAcc = #acc{
+%%             state=r_met,
+%%             replies=[fabric_util:kv(Foo1,2), fabric_util:kv(Foo2,1)],
+%%             q_reply=Foo1
+%%         },
+%%         ?assertEqual(Foo2, handle_response(DeeperWinsAcc)),
+%%
+%%         % Check that we return the proper doc based on rev
+%%         % (ie, pos is equal)
+%%         BiggerRevWinsAcc = #acc{
+%%             state=r_met,
+%%             replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Bar1,2)],
+%%             q_reply=Bar1
+%%         },
+%%         ?assertEqual(Foo1, handle_response(BiggerRevWinsAcc))
+%%
+%%         % r_not_met is a proxy to read_repair so we rely on
+%%         % read_repair_test for those conditions.
+%%     end).
+%%
+%%
+%% t_get_doc_info() ->
+%%     ?_test(begin
+%%         meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
+%%         meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%%         meck:expect(fabric_util, submit_jobs, fun(_, _, _) -> ok end),
+%%         meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
+%%         meck:expect(rexi_monitor, stop, fun(_) -> ok end),
+%%         meck:expect(mem3, shards, fun(_, _) -> ok end),
+%%         meck:expect(mem3, n, fun(_) -> 3 end),
+%%         meck:expect(mem3, quorum, fun(_) -> 2 end),
+%%
+%%         meck:expect(fabric_util, recv, fun(_, _, _, _) ->
+%%             {ok, #acc{state = r_not_met}}
+%%         end),
+%%         Rsp1 = fabric_doc_open:go("test", "one", [doc_info]),
+%%         ?assertEqual({error, quorum_not_met}, Rsp1),
+%%
+%%         Rsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
+%%         ?assertEqual({error, quorum_not_met}, Rsp2),
+%%
+%%         meck:expect(fabric_util, recv, fun(_, _, _, _) ->
+%%             {ok, #acc{state = r_met, q_reply = not_found}}
+%%         end),
+%%         MissingRsp1 = fabric_doc_open:go("test", "one", [doc_info]),
+%%         ?assertEqual({not_found, missing}, MissingRsp1),
+%%         MissingRsp2 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
+%%         ?assertEqual({not_found, missing}, MissingRsp2),
+%%
+%%         meck:expect(fabric_util, recv, fun(_, _, _, _) ->
+%%             A = #doc_info{},
+%%             {ok, #acc{state = r_met, q_reply = {ok, A}}}
+%%         end),
+%%         {ok, Rec1} = fabric_doc_open:go("test", "one", [doc_info]),
+%%         ?assert(is_record(Rec1, doc_info)),
+%%
+%%         meck:expect(fabric_util, recv, fun(_, _, _, _) ->
+%%             A = #full_doc_info{deleted = true},
+%%             {ok, #acc{state = r_met, q_reply = {ok, A}}}
+%%         end),
+%%         Rsp3 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
+%%         ?assertEqual({not_found, deleted}, Rsp3),
+%%         {ok, Rec2} = fabric_doc_open:go("test", "one", [{doc_info, full},deleted]),
+%%         ?assert(is_record(Rec2, full_doc_info))
+%%     end).
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_doc_open_revs.erl b/src/fabric/src/fabric_doc_open_revs.erl
index 8ac3f30..f5b6380 100644
--- a/src/fabric/src/fabric_doc_open_revs.erl
+++ b/src/fabric/src/fabric_doc_open_revs.erl
@@ -313,469 +313,469 @@ collapse_duplicate_revs_int([Reply | Rest]) ->
     [Reply | collapse_duplicate_revs(Rest)].
 
 
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-
-setup() ->
-    config:start_link([]),
-    meck:new([fabric, couch_stats, couch_log]),
-    meck:new(fabric_util, [passthrough]),
-    meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, nil} end),
-    meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-    meck:expect(couch_log, notice, fun(_, _) -> ok end),
-    meck:expect(fabric_util, cleanup, fun(_) -> ok end).
-
-
-
-teardown(_) ->
-    (catch meck:unload([fabric, couch_stats, couch_log, fabric_util])),
-    config:stop().
-
-
-state0(Revs, Latest) ->
-    #state{
-        worker_count = 3,
-        workers =
-            [#shard{node='node1'}, #shard{node='node2'}, #shard{node='node3'}],
-        r = 2,
-        revs = Revs,
-        latest = Latest
-    }.
-
-
-revs() -> [{1,<<"foo">>}, {1,<<"bar">>}, {1,<<"baz">>}].
-
-
-foo1() -> {ok, #doc{revs = {1, [<<"foo">>]}}}.
-foo2() -> {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}}.
-foo2stemmed() -> {ok, #doc{revs = {2, [<<"foo2">>]}}}.
-fooNF() -> {{not_found, missing}, {1,<<"foo">>}}.
-foo2NF() -> {{not_found, missing}, {2, <<"foo2">>}}.
-bar1() -> {ok, #doc{revs = {1, [<<"bar">>]}}}.
-barNF() -> {{not_found, missing}, {1,<<"bar">>}}.
-bazNF() -> {{not_found, missing}, {1,<<"baz">>}}.
-baz1() -> {ok, #doc{revs = {1, [<<"baz">>]}}}.
-
-
-
-open_doc_revs_test_() ->
-    {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            check_empty_response_not_quorum(),
-            check_basic_response(),
-            check_finish_quorum(),
-            check_finish_quorum_newer(),
-            check_no_quorum_on_second(),
-            check_done_on_third(),
-            check_specific_revs_first_msg(),
-            check_revs_done_on_agreement(),
-            check_latest_true(),
-            check_ancestor_counted_in_quorum(),
-            check_not_found_counts_for_descendant(),
-            check_worker_error_skipped(),
-            check_quorum_only_counts_valid_responses(),
-            check_empty_list_when_no_workers_reply(),
-            check_node_rev_stored(),
-            check_node_rev_store_head_only(),
-            check_node_rev_store_multiple(),
-            check_node_rev_dont_store_errors(),
-            check_node_rev_store_non_errors(),
-            check_node_rev_store_concatenate(),
-            check_node_rev_store_concantenate_multiple(),
-            check_node_rev_unmodified_on_down_or_exit(),
-            check_not_found_replies_are_removed_when_doc_found(),
-            check_not_found_returned_when_one_of_docs_not_found(),
-            check_not_found_returned_when_doc_not_found(),
-            check_longer_rev_list_returned(),
-            check_longer_rev_list_not_combined(),
-            check_not_found_removed_and_longer_rev_list()
-        ]
-    }.
-
-
-% Tests for revs=all
-
-
-check_empty_response_not_quorum() ->
-    % Simple smoke test that we don't think we're
-    % done with a first empty response
-    W1 = #shard{node='node1'},
-    W2 = #shard{node='node2'},
-    W3 = #shard{node='node3'},
-    ?_assertMatch(
-        {ok, #state{workers = [W2, W3]}},
-        handle_message({ok, []}, W1, state0(all, false))
-    ).
-
-
-check_basic_response() ->
-    % Check that we've handle a response
-    W1 = #shard{node='node1'},
-    W2 = #shard{node='node2'},
-    W3 = #shard{node='node3'},
-    ?_assertMatch(
-        {ok, #state{reply_count = 1, workers = [W2, W3]}},
-        handle_message({ok, [foo1(), bar1()]}, W1, state0(all, false))
-    ).
-
-
-check_finish_quorum() ->
-    % Two messages with the same revisions means we're done
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        S0 = state0(all, false),
-        {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
-        Expect = {stop, [bar1(), foo1()]},
-        ?assertEqual(Expect, handle_message({ok, [foo1(), bar1()]}, W2, S1))
-    end).
-
-
-check_finish_quorum_newer() ->
-    % We count a descendant of a revision for quorum so
-    % foo1 should count for foo2 which means we're finished.
-    % We also validate that read_repair was triggered.
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        S0 = state0(all, false),
-        {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
-        Expect = {stop, [bar1(), foo2()]},
-        ok = meck:reset(fabric),
-        ?assertEqual(Expect, handle_message({ok, [foo2(), bar1()]}, W2, S1)),
-        ok = meck:wait(fabric, update_docs, '_', 5000),
-        ?assertMatch(
-            [{_, {fabric, update_docs, [_, _, _]}, _}],
-            meck:history(fabric)
-        )
-    end).
-
-
-check_no_quorum_on_second() ->
-    % Quorum not yet met for the foo revision so we
-    % would wait for w3
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        W3 = #shard{node='node3'},
-        S0 = state0(all, false),
-        {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
-        ?assertMatch(
-            {ok, #state{workers = [W3]}},
-            handle_message({ok, [bar1()]}, W2, S1)
-        )
-    end).
-
-
-check_done_on_third() ->
-    % The third message of three means we're done no matter
-    % what. Every revision seen in this pattern should be
-    % included.
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        W3 = #shard{node='node3'},
-        S0 = state0(all, false),
-        {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
-        {ok, S2} = handle_message({ok, [bar1()]}, W2, S1),
-        Expect = {stop, [bar1(), foo1()]},
-        ?assertEqual(Expect, handle_message({ok, [bar1()]}, W3, S2))
-    end).
-
-
-% Tests for a specific list of revs
-
-
-check_specific_revs_first_msg() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        W3 = #shard{node='node3'},
-        S0 = state0(revs(), false),
-        ?assertMatch(
-            {ok, #state{reply_count = 1, workers = [W2, W3]}},
-            handle_message({ok, [foo1(), bar1(), bazNF()]}, W1, S0)
-        )
-    end).
-
-
-check_revs_done_on_agreement() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        S0 = state0(revs(), false),
-        Msg = {ok, [foo1(), bar1(), bazNF()]},
-        {ok, S1} = handle_message(Msg, W1, S0),
-        Expect = {stop, [bar1(), foo1(), bazNF()]},
-        ?assertEqual(Expect, handle_message(Msg, W2, S1))
-    end).
-
-
-check_latest_true() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        S0 = state0(revs(), true),
-        Msg1 = {ok, [foo2(), bar1(), bazNF()]},
-        Msg2 = {ok, [foo2(), bar1(), bazNF()]},
-        {ok, S1} = handle_message(Msg1, W1, S0),
-        Expect = {stop, [bar1(), foo2(), bazNF()]},
-        ?assertEqual(Expect, handle_message(Msg2, W2, S1))
-    end).
-
-
-check_ancestor_counted_in_quorum() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        S0 = state0(revs(), true),
-        Msg1 = {ok, [foo1(), bar1(), bazNF()]},
-        Msg2 = {ok, [foo2(), bar1(), bazNF()]},
-        Expect = {stop, [bar1(), foo2(), bazNF()]},
-
-        % Older first
-        {ok, S1} = handle_message(Msg1, W1, S0),
-        ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
-
-        % Newer first
-        {ok, S2} = handle_message(Msg2, W2, S0),
-        ?assertEqual(Expect, handle_message(Msg1, W1, S2))
-    end).
-
-
-check_not_found_counts_for_descendant() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        S0 = state0(revs(), true),
-        Msg1 = {ok, [foo1(), bar1(), bazNF()]},
-        Msg2 = {ok, [foo1(), bar1(), baz1()]},
-        Expect = {stop, [bar1(), baz1(), foo1()]},
-
-        % not_found first
-        {ok, S1} = handle_message(Msg1, W1, S0),
-        ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
-
-        % not_found second
-        {ok, S2} = handle_message(Msg2, W2, S0),
-        ?assertEqual(Expect, handle_message(Msg1, W1, S2))
-    end).
-
-
-check_worker_error_skipped() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        W3 = #shard{node='node3'},
-        S0 = state0(revs(), true),
-        Msg1 = {ok, [foo1(), bar1(), baz1()]},
-        Msg2 = {rexi_EXIT, reason},
-        Msg3 = {ok, [foo1(), bar1(), baz1()]},
-        Expect = {stop, [bar1(), baz1(), foo1()]},
-
-        {ok, S1} = handle_message(Msg1, W1, S0),
-        {ok, S2} = handle_message(Msg2, W2, S1),
-        ?assertEqual(Expect, handle_message(Msg3, W3, S2))
-    end).
-
-
-check_quorum_only_counts_valid_responses() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        W3 = #shard{node='node3'},
-        S0 = state0(revs(), true),
-        Msg1 = {rexi_EXIT, reason},
-        Msg2 = {rexi_EXIT, reason},
-        Msg3 = {ok, [foo1(), bar1(), baz1()]},
-        Expect = {stop, [bar1(), baz1(), foo1()]},
-
-        {ok, S1} = handle_message(Msg1, W1, S0),
-        {ok, S2} = handle_message(Msg2, W2, S1),
-        ?assertEqual(Expect, handle_message(Msg3, W3, S2))
-    end).
-
-
-check_empty_list_when_no_workers_reply() ->
-    ?_test(begin
-        W1 = #shard{node='node1'},
-        W2 = #shard{node='node2'},
-        W3 = #shard{node='node3'},
-        S0 = state0(revs(), true),
-        Msg1 = {rexi_EXIT, reason},
-        Msg2 = {rexi_EXIT, reason},
-        Msg3 = {rexi_DOWN, nodedown, {nil, node()}, nil},
-        Expect = {stop, all_workers_died},
-
-        {ok, S1} = handle_message(Msg1, W1, S0),
-        {ok, S2} = handle_message(Msg2, W2, S1),
-        ?assertEqual(Expect, handle_message(Msg3, W3, S2))
-    end).
-
-
-check_node_rev_stored() ->
-    ?_test(begin
-        W1 = #shard{node = node1},
-        S0 = state0([], true),
-
-        {ok, S1} = handle_message({ok, [foo1()]}, W1, S0),
-        ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
-    end).
-
-
-check_node_rev_store_head_only() ->
-    ?_test(begin
-        W1 = #shard{node = node1},
-        S0 = state0([], true),
-
-        {ok, S1} = handle_message({ok, [foo2()]}, W1, S0),
-        ?assertEqual([{node1, [{2, <<"foo2">>}]}], S1#state.node_revs)
-    end).
-
-
-check_node_rev_store_multiple() ->
-    ?_test(begin
-        W1 = #shard{node = node1},
-        S0 = state0([], true),
-
-        {ok, S1} = handle_message({ok, [foo1(), foo2()]}, W1, S0),
-        ?assertEqual(
-                [{node1, [{2, <<"foo2">>}, {1, <<"foo">>}]}],
-                S1#state.node_revs
-            )
-    end).
-
-
-check_node_rev_dont_store_errors() ->
-    ?_test(begin
-        W1 = #shard{node = node1},
-        S0 = state0([], true),
-
-        {ok, S1} = handle_message({ok, [barNF()]}, W1, S0),
-        ?assertEqual([], S1#state.node_revs)
-    end).
-
-
-check_node_rev_store_non_errors() ->
-    ?_test(begin
-        W1 = #shard{node = node1},
-        S0 = state0([], true),
-
-        {ok, S1} = handle_message({ok, [foo1(), barNF()]}, W1, S0),
-        ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
-    end).
-
-
-check_node_rev_store_concatenate() ->
-    ?_test(begin
-        W2 = #shard{node = node2},
-        S0 = state0([], true),
-        S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-
-        {ok, S2} = handle_message({ok, [foo2()]}, W2, S1),
-        ?assertEqual(
-                [{node2, [{2, <<"foo2">>}]}, {node1, [{1, <<"foo">>}]}],
-                S2#state.node_revs
-            )
-    end).
-
-
-check_node_rev_store_concantenate_multiple() ->
-    ?_test(begin
-        W2 = #shard{node = node2},
-        S0 = state0([], true),
-        S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-
-        {ok, S2} = handle_message({ok, [foo2(), bar1()]}, W2, S1),
-        ?assertEqual(
-                [
-                    {node2, [{1, <<"bar">>}, {2, <<"foo2">>}]},
-                    {node1, [{1, <<"foo">>}]}
-                ],
-                S2#state.node_revs
-            )
-    end).
-
-
-check_node_rev_unmodified_on_down_or_exit() ->
-    ?_test(begin
-        W2 = #shard{node = node2},
-        S0 = state0([], true),
-        S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
-
-        Down = {rexi_DOWN, nodedown, {nil, node()}, nil},
-        {ok, S2} = handle_message(Down, W2, S1),
-        ?assertEqual(
-                [{node1, [{1, <<"foo">>}]}],
-                S2#state.node_revs
-            ),
-
-        Exit = {rexi_EXIT, reason},
-        {ok, S3} = handle_message(Exit, W2, S1),
-        ?assertEqual(
-                [{node1, [{1, <<"foo">>}]}],
-                S3#state.node_revs
-            )
-    end).
-
-
-check_not_found_replies_are_removed_when_doc_found() ->
-    ?_test(begin
-        Replies = replies_to_dict([foo1(), bar1(), fooNF()]),
-        Expect = [bar1(), foo1()],
-        ?assertEqual(Expect, dict_format_replies(Replies))
-    end).
-
-check_not_found_returned_when_one_of_docs_not_found() ->
-    ?_test(begin
-        Replies = replies_to_dict([foo1(), foo2(), barNF()]),
-        Expect = [foo1(), foo2(), barNF()],
-        ?assertEqual(Expect, dict_format_replies(Replies))
-    end).
-
-check_not_found_returned_when_doc_not_found() ->
-    ?_test(begin
-        Replies = replies_to_dict([fooNF(), barNF(), bazNF()]),
-        Expect = [barNF(), bazNF(), fooNF()],
-        ?assertEqual(Expect, dict_format_replies(Replies))
-    end).
-
-check_longer_rev_list_returned() ->
-    ?_test(begin
-        Replies = replies_to_dict([foo2(), foo2stemmed()]),
-        Expect = [foo2()],
-        ?assertEqual(2, length(Replies)),
-        ?assertEqual(Expect, dict_format_replies(Replies))
-    end).
-
-check_longer_rev_list_not_combined() ->
-    ?_test(begin
-        Replies = replies_to_dict([foo2(), foo2stemmed(), bar1()]),
-        Expect = [bar1(), foo2()],
-        ?assertEqual(3, length(Replies)),
-        ?assertEqual(Expect, dict_format_replies(Replies))
-    end).
-
-check_not_found_removed_and_longer_rev_list() ->
-    ?_test(begin
-        Replies = replies_to_dict([foo2(), foo2stemmed(), foo2NF()]),
-        Expect = [foo2()],
-        ?assertEqual(3, length(Replies)),
-        ?assertEqual(Expect, dict_format_replies(Replies))
-    end).
-
-
-replies_to_dict(Replies) ->
-    [reply_to_element(R) || R <- Replies].
-
-reply_to_element({ok, #doc{revs = Revs}} = Reply) ->
-    {_, [Rev | _]} = Revs,
-    {{Rev, Revs}, {Reply, 1}};
-reply_to_element(Reply) ->
-    {Reply, {Reply, 1}}.
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%%
+%% setup() ->
+%%     config:start_link([]),
+%%     meck:new([fabric, couch_stats, couch_log]),
+%%     meck:new(fabric_util, [passthrough]),
+%%     meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, nil} end),
+%%     meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%%     meck:expect(couch_log, notice, fun(_, _) -> ok end),
+%%     meck:expect(fabric_util, cleanup, fun(_) -> ok end).
+%%
+%%
+%%
+%% teardown(_) ->
+%%     (catch meck:unload([fabric, couch_stats, couch_log, fabric_util])),
+%%     config:stop().
+%%
+%%
+%% state0(Revs, Latest) ->
+%%     #state{
+%%         worker_count = 3,
+%%         workers =
+%%             [#shard{node='node1'}, #shard{node='node2'}, #shard{node='node3'}],
+%%         r = 2,
+%%         revs = Revs,
+%%         latest = Latest
+%%     }.
+%%
+%%
+%% revs() -> [{1,<<"foo">>}, {1,<<"bar">>}, {1,<<"baz">>}].
+%%
+%%
+%% foo1() -> {ok, #doc{revs = {1, [<<"foo">>]}}}.
+%% foo2() -> {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}}.
+%% foo2stemmed() -> {ok, #doc{revs = {2, [<<"foo2">>]}}}.
+%% fooNF() -> {{not_found, missing}, {1,<<"foo">>}}.
+%% foo2NF() -> {{not_found, missing}, {2, <<"foo2">>}}.
+%% bar1() -> {ok, #doc{revs = {1, [<<"bar">>]}}}.
+%% barNF() -> {{not_found, missing}, {1,<<"bar">>}}.
+%% bazNF() -> {{not_found, missing}, {1,<<"baz">>}}.
+%% baz1() -> {ok, #doc{revs = {1, [<<"baz">>]}}}.
+%%
+%%
+%%
+%% open_doc_revs_test_() ->
+%%     {
+%%         foreach,
+%%         fun setup/0,
+%%         fun teardown/1,
+%%         [
+%%             check_empty_response_not_quorum(),
+%%             check_basic_response(),
+%%             check_finish_quorum(),
+%%             check_finish_quorum_newer(),
+%%             check_no_quorum_on_second(),
+%%             check_done_on_third(),
+%%             check_specific_revs_first_msg(),
+%%             check_revs_done_on_agreement(),
+%%             check_latest_true(),
+%%             check_ancestor_counted_in_quorum(),
+%%             check_not_found_counts_for_descendant(),
+%%             check_worker_error_skipped(),
+%%             check_quorum_only_counts_valid_responses(),
+%%             check_empty_list_when_no_workers_reply(),
+%%             check_node_rev_stored(),
+%%             check_node_rev_store_head_only(),
+%%             check_node_rev_store_multiple(),
+%%             check_node_rev_dont_store_errors(),
+%%             check_node_rev_store_non_errors(),
+%%             check_node_rev_store_concatenate(),
+%%             check_node_rev_store_concantenate_multiple(),
+%%             check_node_rev_unmodified_on_down_or_exit(),
+%%             check_not_found_replies_are_removed_when_doc_found(),
+%%             check_not_found_returned_when_one_of_docs_not_found(),
+%%             check_not_found_returned_when_doc_not_found(),
+%%             check_longer_rev_list_returned(),
+%%             check_longer_rev_list_not_combined(),
+%%             check_not_found_removed_and_longer_rev_list()
+%%         ]
+%%     }.
+%%
+%%
+%% % Tests for revs=all
+%%
+%%
+%% check_empty_response_not_quorum() ->
+%%     % Simple smoke test that we don't think we're
+%%     % done with a first empty response
+%%     W1 = #shard{node='node1'},
+%%     W2 = #shard{node='node2'},
+%%     W3 = #shard{node='node3'},
+%%     ?_assertMatch(
+%%         {ok, #state{workers = [W2, W3]}},
+%%         handle_message({ok, []}, W1, state0(all, false))
+%%     ).
+%%
+%%
+%% check_basic_response() ->
+%%     % Check that we've handle a response
+%%     W1 = #shard{node='node1'},
+%%     W2 = #shard{node='node2'},
+%%     W3 = #shard{node='node3'},
+%%     ?_assertMatch(
+%%         {ok, #state{reply_count = 1, workers = [W2, W3]}},
+%%         handle_message({ok, [foo1(), bar1()]}, W1, state0(all, false))
+%%     ).
+%%
+%%
+%% check_finish_quorum() ->
+%%     % Two messages with the same revisions means we're done
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         S0 = state0(all, false),
+%%         {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
+%%         Expect = {stop, [bar1(), foo1()]},
+%%         ?assertEqual(Expect, handle_message({ok, [foo1(), bar1()]}, W2, S1))
+%%     end).
+%%
+%%
+%% check_finish_quorum_newer() ->
+%%     % We count a descendant of a revision for quorum so
+%%     % foo1 should count for foo2 which means we're finished.
+%%     % We also validate that read_repair was triggered.
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         S0 = state0(all, false),
+%%         {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
+%%         Expect = {stop, [bar1(), foo2()]},
+%%         ok = meck:reset(fabric),
+%%         ?assertEqual(Expect, handle_message({ok, [foo2(), bar1()]}, W2, S1)),
+%%         ok = meck:wait(fabric, update_docs, '_', 5000),
+%%         ?assertMatch(
+%%             [{_, {fabric, update_docs, [_, _, _]}, _}],
+%%             meck:history(fabric)
+%%         )
+%%     end).
+%%
+%%
+%% check_no_quorum_on_second() ->
+%%     % Quorum not yet met for the foo revision so we
+%%     % would wait for w3
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         W3 = #shard{node='node3'},
+%%         S0 = state0(all, false),
+%%         {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
+%%         ?assertMatch(
+%%             {ok, #state{workers = [W3]}},
+%%             handle_message({ok, [bar1()]}, W2, S1)
+%%         )
+%%     end).
+%%
+%%
+%% check_done_on_third() ->
+%%     % The third message of three means we're done no matter
+%%     % what. Every revision seen in this pattern should be
+%%     % included.
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         W3 = #shard{node='node3'},
+%%         S0 = state0(all, false),
+%%         {ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
+%%         {ok, S2} = handle_message({ok, [bar1()]}, W2, S1),
+%%         Expect = {stop, [bar1(), foo1()]},
+%%         ?assertEqual(Expect, handle_message({ok, [bar1()]}, W3, S2))
+%%     end).
+%%
+%%
+%% % Tests for a specific list of revs
+%%
+%%
+%% check_specific_revs_first_msg() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         W3 = #shard{node='node3'},
+%%         S0 = state0(revs(), false),
+%%         ?assertMatch(
+%%             {ok, #state{reply_count = 1, workers = [W2, W3]}},
+%%             handle_message({ok, [foo1(), bar1(), bazNF()]}, W1, S0)
+%%         )
+%%     end).
+%%
+%%
+%% check_revs_done_on_agreement() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         S0 = state0(revs(), false),
+%%         Msg = {ok, [foo1(), bar1(), bazNF()]},
+%%         {ok, S1} = handle_message(Msg, W1, S0),
+%%         Expect = {stop, [bar1(), foo1(), bazNF()]},
+%%         ?assertEqual(Expect, handle_message(Msg, W2, S1))
+%%     end).
+%%
+%%
+%% check_latest_true() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         S0 = state0(revs(), true),
+%%         Msg1 = {ok, [foo2(), bar1(), bazNF()]},
+%%         Msg2 = {ok, [foo2(), bar1(), bazNF()]},
+%%         {ok, S1} = handle_message(Msg1, W1, S0),
+%%         Expect = {stop, [bar1(), foo2(), bazNF()]},
+%%         ?assertEqual(Expect, handle_message(Msg2, W2, S1))
+%%     end).
+%%
+%%
+%% check_ancestor_counted_in_quorum() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         S0 = state0(revs(), true),
+%%         Msg1 = {ok, [foo1(), bar1(), bazNF()]},
+%%         Msg2 = {ok, [foo2(), bar1(), bazNF()]},
+%%         Expect = {stop, [bar1(), foo2(), bazNF()]},
+%%
+%%         % Older first
+%%         {ok, S1} = handle_message(Msg1, W1, S0),
+%%         ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
+%%
+%%         % Newer first
+%%         {ok, S2} = handle_message(Msg2, W2, S0),
+%%         ?assertEqual(Expect, handle_message(Msg1, W1, S2))
+%%     end).
+%%
+%%
+%% check_not_found_counts_for_descendant() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         S0 = state0(revs(), true),
+%%         Msg1 = {ok, [foo1(), bar1(), bazNF()]},
+%%         Msg2 = {ok, [foo1(), bar1(), baz1()]},
+%%         Expect = {stop, [bar1(), baz1(), foo1()]},
+%%
+%%         % not_found first
+%%         {ok, S1} = handle_message(Msg1, W1, S0),
+%%         ?assertEqual(Expect, handle_message(Msg2, W2, S1)),
+%%
+%%         % not_found second
+%%         {ok, S2} = handle_message(Msg2, W2, S0),
+%%         ?assertEqual(Expect, handle_message(Msg1, W1, S2))
+%%     end).
+%%
+%%
+%% check_worker_error_skipped() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         W3 = #shard{node='node3'},
+%%         S0 = state0(revs(), true),
+%%         Msg1 = {ok, [foo1(), bar1(), baz1()]},
+%%         Msg2 = {rexi_EXIT, reason},
+%%         Msg3 = {ok, [foo1(), bar1(), baz1()]},
+%%         Expect = {stop, [bar1(), baz1(), foo1()]},
+%%
+%%         {ok, S1} = handle_message(Msg1, W1, S0),
+%%         {ok, S2} = handle_message(Msg2, W2, S1),
+%%         ?assertEqual(Expect, handle_message(Msg3, W3, S2))
+%%     end).
+%%
+%%
+%% check_quorum_only_counts_valid_responses() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         W3 = #shard{node='node3'},
+%%         S0 = state0(revs(), true),
+%%         Msg1 = {rexi_EXIT, reason},
+%%         Msg2 = {rexi_EXIT, reason},
+%%         Msg3 = {ok, [foo1(), bar1(), baz1()]},
+%%         Expect = {stop, [bar1(), baz1(), foo1()]},
+%%
+%%         {ok, S1} = handle_message(Msg1, W1, S0),
+%%         {ok, S2} = handle_message(Msg2, W2, S1),
+%%         ?assertEqual(Expect, handle_message(Msg3, W3, S2))
+%%     end).
+%%
+%%
+%% check_empty_list_when_no_workers_reply() ->
+%%     ?_test(begin
+%%         W1 = #shard{node='node1'},
+%%         W2 = #shard{node='node2'},
+%%         W3 = #shard{node='node3'},
+%%         S0 = state0(revs(), true),
+%%         Msg1 = {rexi_EXIT, reason},
+%%         Msg2 = {rexi_EXIT, reason},
+%%         Msg3 = {rexi_DOWN, nodedown, {nil, node()}, nil},
+%%         Expect = {stop, all_workers_died},
+%%
+%%         {ok, S1} = handle_message(Msg1, W1, S0),
+%%         {ok, S2} = handle_message(Msg2, W2, S1),
+%%         ?assertEqual(Expect, handle_message(Msg3, W3, S2))
+%%     end).
+%%
+%%
+%% check_node_rev_stored() ->
+%%     ?_test(begin
+%%         W1 = #shard{node = node1},
+%%         S0 = state0([], true),
+%%
+%%         {ok, S1} = handle_message({ok, [foo1()]}, W1, S0),
+%%         ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
+%%     end).
+%%
+%%
+%% check_node_rev_store_head_only() ->
+%%     ?_test(begin
+%%         W1 = #shard{node = node1},
+%%         S0 = state0([], true),
+%%
+%%         {ok, S1} = handle_message({ok, [foo2()]}, W1, S0),
+%%         ?assertEqual([{node1, [{2, <<"foo2">>}]}], S1#state.node_revs)
+%%     end).
+%%
+%%
+%% check_node_rev_store_multiple() ->
+%%     ?_test(begin
+%%         W1 = #shard{node = node1},
+%%         S0 = state0([], true),
+%%
+%%         {ok, S1} = handle_message({ok, [foo1(), foo2()]}, W1, S0),
+%%         ?assertEqual(
+%%                 [{node1, [{2, <<"foo2">>}, {1, <<"foo">>}]}],
+%%                 S1#state.node_revs
+%%             )
+%%     end).
+%%
+%%
+%% check_node_rev_dont_store_errors() ->
+%%     ?_test(begin
+%%         W1 = #shard{node = node1},
+%%         S0 = state0([], true),
+%%
+%%         {ok, S1} = handle_message({ok, [barNF()]}, W1, S0),
+%%         ?assertEqual([], S1#state.node_revs)
+%%     end).
+%%
+%%
+%% check_node_rev_store_non_errors() ->
+%%     ?_test(begin
+%%         W1 = #shard{node = node1},
+%%         S0 = state0([], true),
+%%
+%%         {ok, S1} = handle_message({ok, [foo1(), barNF()]}, W1, S0),
+%%         ?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
+%%     end).
+%%
+%%
+%% check_node_rev_store_concatenate() ->
+%%     ?_test(begin
+%%         W2 = #shard{node = node2},
+%%         S0 = state0([], true),
+%%         S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
+%%
+%%         {ok, S2} = handle_message({ok, [foo2()]}, W2, S1),
+%%         ?assertEqual(
+%%                 [{node2, [{2, <<"foo2">>}]}, {node1, [{1, <<"foo">>}]}],
+%%                 S2#state.node_revs
+%%             )
+%%     end).
+%%
+%%
+%% check_node_rev_store_concantenate_multiple() ->
+%%     ?_test(begin
+%%         W2 = #shard{node = node2},
+%%         S0 = state0([], true),
+%%         S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
+%%
+%%         {ok, S2} = handle_message({ok, [foo2(), bar1()]}, W2, S1),
+%%         ?assertEqual(
+%%                 [
+%%                     {node2, [{1, <<"bar">>}, {2, <<"foo2">>}]},
+%%                     {node1, [{1, <<"foo">>}]}
+%%                 ],
+%%                 S2#state.node_revs
+%%             )
+%%     end).
+%%
+%%
+%% check_node_rev_unmodified_on_down_or_exit() ->
+%%     ?_test(begin
+%%         W2 = #shard{node = node2},
+%%         S0 = state0([], true),
+%%         S1 = S0#state{node_revs = [{node1, [{1, <<"foo">>}]}]},
+%%
+%%         Down = {rexi_DOWN, nodedown, {nil, node()}, nil},
+%%         {ok, S2} = handle_message(Down, W2, S1),
+%%         ?assertEqual(
+%%                 [{node1, [{1, <<"foo">>}]}],
+%%                 S2#state.node_revs
+%%             ),
+%%
+%%         Exit = {rexi_EXIT, reason},
+%%         {ok, S3} = handle_message(Exit, W2, S1),
+%%         ?assertEqual(
+%%                 [{node1, [{1, <<"foo">>}]}],
+%%                 S3#state.node_revs
+%%             )
+%%     end).
+%%
+%%
+%% check_not_found_replies_are_removed_when_doc_found() ->
+%%     ?_test(begin
+%%         Replies = replies_to_dict([foo1(), bar1(), fooNF()]),
+%%         Expect = [bar1(), foo1()],
+%%         ?assertEqual(Expect, dict_format_replies(Replies))
+%%     end).
+%%
+%% check_not_found_returned_when_one_of_docs_not_found() ->
+%%     ?_test(begin
+%%         Replies = replies_to_dict([foo1(), foo2(), barNF()]),
+%%         Expect = [foo1(), foo2(), barNF()],
+%%         ?assertEqual(Expect, dict_format_replies(Replies))
+%%     end).
+%%
+%% check_not_found_returned_when_doc_not_found() ->
+%%     ?_test(begin
+%%         Replies = replies_to_dict([fooNF(), barNF(), bazNF()]),
+%%         Expect = [barNF(), bazNF(), fooNF()],
+%%         ?assertEqual(Expect, dict_format_replies(Replies))
+%%     end).
+%%
+%% check_longer_rev_list_returned() ->
+%%     ?_test(begin
+%%         Replies = replies_to_dict([foo2(), foo2stemmed()]),
+%%         Expect = [foo2()],
+%%         ?assertEqual(2, length(Replies)),
+%%         ?assertEqual(Expect, dict_format_replies(Replies))
+%%     end).
+%%
+%% check_longer_rev_list_not_combined() ->
+%%     ?_test(begin
+%%         Replies = replies_to_dict([foo2(), foo2stemmed(), bar1()]),
+%%         Expect = [bar1(), foo2()],
+%%         ?assertEqual(3, length(Replies)),
+%%         ?assertEqual(Expect, dict_format_replies(Replies))
+%%     end).
+%%
+%% check_not_found_removed_and_longer_rev_list() ->
+%%     ?_test(begin
+%%         Replies = replies_to_dict([foo2(), foo2stemmed(), foo2NF()]),
+%%         Expect = [foo2()],
+%%         ?assertEqual(3, length(Replies)),
+%%         ?assertEqual(Expect, dict_format_replies(Replies))
+%%     end).
+%%
+%%
+%% replies_to_dict(Replies) ->
+%%     [reply_to_element(R) || R <- Replies].
+%%
+%% reply_to_element({ok, #doc{revs = Revs}} = Reply) ->
+%%     {_, [Rev | _]} = Revs,
+%%     {{Rev, Revs}, {Reply, 1}};
+%% reply_to_element(Reply) ->
+%%     {Reply, {Reply, 1}}.
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_doc_purge.erl b/src/fabric/src/fabric_doc_purge.erl
index 7e447ff..6d77fc2 100644
--- a/src/fabric/src/fabric_doc_purge.erl
+++ b/src/fabric/src/fabric_doc_purge.erl
@@ -224,349 +224,349 @@ has_quorum(Resps, Count, W) ->
     end.
 
 
--ifdef(TEST).
-
--include_lib("eunit/include/eunit.hrl").
-
-purge_test_() ->
-    {
-        foreach,
-        fun setup/0,
-        fun teardown/1,
-        [
-            t_w2_ok(),
-            t_w3_ok(),
-
-            t_w2_mixed_accepted(),
-            t_w3_mixed_accepted(),
-
-            t_w2_exit1_ok(),
-            t_w2_exit2_accepted(),
-            t_w2_exit3_error(),
-
-            t_w4_accepted(),
-
-            t_mixed_ok_accepted(),
-            t_mixed_errors()
-        ]
-    }.
-
-
-setup() ->
-    meck:new(couch_log),
-    meck:expect(couch_log, warning, fun(_, _) -> ok end),
-    meck:expect(couch_log, notice, fun(_, _) -> ok end).
-
-
-teardown(_) ->
-    meck:unload().
-
-
-t_w2_ok() ->
-    ?_test(begin
-        Acc0 = create_init_acc(2),
-        Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-
-        {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-        ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-        check_quorum(Acc1, false),
-
-        {stop, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, true),
-
-        Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(ok, resp_health(Resps))
-    end).
-
-
-t_w3_ok() ->
-    ?_test(begin
-        Acc0 = create_init_acc(3),
-        Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-
-        {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-        check_quorum(Acc1, false),
-
-        {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, false),
-
-        {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
-        ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-        check_quorum(Acc3, true),
-
-        Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(ok, resp_health(Resps))
-    end).
-
-
-t_w2_mixed_accepted() ->
-    ?_test(begin
-        Acc0 = create_init_acc(2),
-        Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
-        Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
-
-        {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
-        ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-        check_quorum(Acc1, false),
-
-        {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, false),
-
-        {stop, Acc3} = handle_message(Msg1, worker(3, Acc0), Acc2),
-        ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-        check_quorum(Acc3, true),
-
-        Expect = [
-            {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
-            {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
-        ],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(accepted, resp_health(Resps))
-    end).
-
-
-t_w3_mixed_accepted() ->
-    ?_test(begin
-        Acc0 = create_init_acc(3),
-        Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
-        Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
-
-        {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
-        ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-        check_quorum(Acc1, false),
-
-        {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, false),
-
-        {stop, Acc3} = handle_message(Msg2, worker(3, Acc0), Acc2),
-        ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-        check_quorum(Acc3, true),
-
-        Expect = [
-            {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
-            {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
-        ],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(accepted, resp_health(Resps))
-    end).
-
-
-t_w2_exit1_ok() ->
-    ?_test(begin
-        Acc0 = create_init_acc(2),
-        Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-        ExitMsg = {rexi_EXIT, blargh},
-
-        {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-        ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-        check_quorum(Acc1, false),
-
-        {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, false),
-
-        {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
-        ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-        check_quorum(Acc3, true),
-
-        Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(ok, resp_health(Resps))
-    end).
-
-
-t_w2_exit2_accepted() ->
-    ?_test(begin
-        Acc0 = create_init_acc(2),
-        Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-        ExitMsg = {rexi_EXIT, blargh},
-
-        {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-        ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-        check_quorum(Acc1, false),
-
-        {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, false),
-
-        {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
-        ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-        check_quorum(Acc3, true),
-
-        Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(accepted, resp_health(Resps))
-    end).
-
-
-t_w2_exit3_error() ->
-    ?_test(begin
-        Acc0 = create_init_acc(2),
-        ExitMsg = {rexi_EXIT, blargh},
-
-        {ok, Acc1} = handle_message(ExitMsg, worker(1, Acc0), Acc0),
-        ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-        check_quorum(Acc1, false),
-
-        {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, false),
-
-        {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
-        ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-        check_quorum(Acc3, true),
-
-        Expect = [
-            {error, internal_server_error},
-            {error, internal_server_error}
-        ],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(error, resp_health(Resps))
-    end).
-
-
-t_w4_accepted() ->
-    % Make sure we return when all workers have responded
-    % rather than wait around for a timeout if a user asks
-    % for a qourum with more than the available number of
-    % shards.
-    ?_test(begin
-        Acc0 = create_init_acc(4),
-        Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
-
-        {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-        ?assertEqual(2, length(Acc1#acc.worker_uuids)),
-        check_quorum(Acc1, false),
-
-        {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
-        ?assertEqual(1, length(Acc2#acc.worker_uuids)),
-        check_quorum(Acc2, false),
-
-        {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
-        ?assertEqual(0, length(Acc3#acc.worker_uuids)),
-        check_quorum(Acc3, true),
-
-        Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(accepted, resp_health(Resps))
-    end).
-
-
-t_mixed_ok_accepted() ->
-    ?_test(begin
-        WorkerUUIDs = [
-            {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
-            {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
-            {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
-
-            {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
-            {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
-            {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
-        ],
-
-        Acc0 = #acc{
-            worker_uuids = WorkerUUIDs,
-            resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
-            uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
-            w = 2
-        },
-
-        Msg1 = {ok, [{ok, [{1, <<"foo">>}]}]},
-        Msg2 = {ok, [{ok, [{2, <<"bar">>}]}]},
-        ExitMsg = {rexi_EXIT, blargh},
-
-        {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
-        {ok, Acc2} = handle_message(Msg1, worker(2, Acc0), Acc1),
-        {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
-        {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
-        {stop, Acc5} = handle_message(Msg2, worker(6, Acc0), Acc4),
-
-        Expect = [{ok, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(accepted, resp_health(Resps))
-    end).
-
-
-t_mixed_errors() ->
-    ?_test(begin
-        WorkerUUIDs = [
-            {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
-            {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
-            {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
-
-            {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
-            {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
-            {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
-        ],
-
-        Acc0 = #acc{
-            worker_uuids = WorkerUUIDs,
-            resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
-            uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
-            w = 2
-        },
-
-        Msg = {ok, [{ok, [{1, <<"foo">>}]}]},
-        ExitMsg = {rexi_EXIT, blargh},
-
-        {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
-        {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
-        {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
-        {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
-        {stop, Acc5} = handle_message(ExitMsg, worker(6, Acc0), Acc4),
-
-        Expect = [{ok, [{1, <<"foo">>}]}, {error, internal_server_error}],
-        Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
-        ?assertEqual(Expect, Resps),
-        ?assertEqual(error, resp_health(Resps))
-    end).
-
-
-create_init_acc(W) ->
-    UUID1 = <<"uuid1">>,
-    UUID2 = <<"uuid2">>,
-
-    Nodes = [node1, node2, node3],
-    Shards = mem3_util:create_partition_map(<<"foo">>, 3, 1, Nodes),
-
-    % Create our worker_uuids. We're relying on the fact that
-    % we're using a fake Q=1 db so we don't have to worry
-    % about any hashing here.
-    WorkerUUIDs = lists:map(fun(Shard) ->
-        {Shard#shard{ref = erlang:make_ref()}, [UUID1, UUID2]}
-    end, Shards),
-
-    #acc{
-        worker_uuids = WorkerUUIDs,
-        resps = dict:from_list([{UUID1, []}, {UUID2, []}]),
-        uuid_counts = dict:from_list([{UUID1, 3}, {UUID2, 3}]),
-        w = W
-    }.
-
-
-worker(N, #acc{worker_uuids = WorkerUUIDs}) ->
-    {Worker, _} = lists:nth(N, WorkerUUIDs),
-    Worker.
-
-
-check_quorum(Acc, Expect) ->
-    dict:fold(fun(_Shard, Resps, _) ->
-        ?assertEqual(Expect, has_quorum(Resps, 3, Acc#acc.w))
-    end, nil, Acc#acc.resps).
-
--endif.
+%% -ifdef(TEST).
+%%
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% purge_test_() ->
+%%     {
+%%         foreach,
+%%         fun setup/0,
+%%         fun teardown/1,
+%%         [
+%%             t_w2_ok(),
+%%             t_w3_ok(),
+%%
+%%             t_w2_mixed_accepted(),
+%%             t_w3_mixed_accepted(),
+%%
+%%             t_w2_exit1_ok(),
+%%             t_w2_exit2_accepted(),
+%%             t_w2_exit3_error(),
+%%
+%%             t_w4_accepted(),
+%%
+%%             t_mixed_ok_accepted(),
+%%             t_mixed_errors()
+%%         ]
+%%     }.
+%%
+%%
+%% setup() ->
+%%     meck:new(couch_log),
+%%     meck:expect(couch_log, warning, fun(_, _) -> ok end),
+%%     meck:expect(couch_log, notice, fun(_, _) -> ok end).
+%%
+%%
+%% teardown(_) ->
+%%     meck:unload().
+%%
+%%
+%% t_w2_ok() ->
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(2),
+%%         Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%%
+%%         {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%%         ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%%         check_quorum(Acc1, false),
+%%
+%%         {stop, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, true),
+%%
+%%         Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(ok, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_w3_ok() ->
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(3),
+%%         Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%%
+%%         {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%%         check_quorum(Acc1, false),
+%%
+%%         {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, false),
+%%
+%%         {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
+%%         ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%%         check_quorum(Acc3, true),
+%%
+%%         Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(ok, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_w2_mixed_accepted() ->
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(2),
+%%         Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
+%%         Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
+%%
+%%         {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
+%%         ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%%         check_quorum(Acc1, false),
+%%
+%%         {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, false),
+%%
+%%         {stop, Acc3} = handle_message(Msg1, worker(3, Acc0), Acc2),
+%%         ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%%         check_quorum(Acc3, true),
+%%
+%%         Expect = [
+%%             {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
+%%             {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
+%%         ],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(accepted, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_w3_mixed_accepted() ->
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(3),
+%%         Msg1 = {ok, [{ok, [{1, <<"foo1">>}]}, {ok, [{2, <<"bar1">>}]}]},
+%%         Msg2 = {ok, [{ok, [{1, <<"foo2">>}]}, {ok, [{2, <<"bar2">>}]}]},
+%%
+%%         {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
+%%         ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%%         check_quorum(Acc1, false),
+%%
+%%         {ok, Acc2} = handle_message(Msg2, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, false),
+%%
+%%         {stop, Acc3} = handle_message(Msg2, worker(3, Acc0), Acc2),
+%%         ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%%         check_quorum(Acc3, true),
+%%
+%%         Expect = [
+%%             {accepted, [{1, <<"foo1">>}, {1, <<"foo2">>}]},
+%%             {accepted, [{2, <<"bar1">>}, {2, <<"bar2">>}]}
+%%         ],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc2),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(accepted, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_w2_exit1_ok() ->
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(2),
+%%         Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%%         ExitMsg = {rexi_EXIT, blargh},
+%%
+%%         {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%%         ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%%         check_quorum(Acc1, false),
+%%
+%%         {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, false),
+%%
+%%         {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
+%%         ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%%         check_quorum(Acc3, true),
+%%
+%%         Expect = [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(ok, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_w2_exit2_accepted() ->
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(2),
+%%         Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%%         ExitMsg = {rexi_EXIT, blargh},
+%%
+%%         {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%%         ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%%         check_quorum(Acc1, false),
+%%
+%%         {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, false),
+%%
+%%         {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
+%%         ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%%         check_quorum(Acc3, true),
+%%
+%%         Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(accepted, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_w2_exit3_error() ->
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(2),
+%%         ExitMsg = {rexi_EXIT, blargh},
+%%
+%%         {ok, Acc1} = handle_message(ExitMsg, worker(1, Acc0), Acc0),
+%%         ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%%         check_quorum(Acc1, false),
+%%
+%%         {ok, Acc2} = handle_message(ExitMsg, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, false),
+%%
+%%         {stop, Acc3} = handle_message(ExitMsg, worker(3, Acc0), Acc2),
+%%         ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%%         check_quorum(Acc3, true),
+%%
+%%         Expect = [
+%%             {error, internal_server_error},
+%%             {error, internal_server_error}
+%%         ],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(error, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_w4_accepted() ->
+%%     % Make sure we return when all workers have responded
+%%     % rather than wait around for a timeout if a user asks
+%%     % for a qourum with more than the available number of
+%%     % shards.
+%%     ?_test(begin
+%%         Acc0 = create_init_acc(4),
+%%         Msg = {ok, [{ok, [{1, <<"foo">>}]}, {ok, [{2, <<"bar">>}]}]},
+%%
+%%         {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%%         ?assertEqual(2, length(Acc1#acc.worker_uuids)),
+%%         check_quorum(Acc1, false),
+%%
+%%         {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
+%%         ?assertEqual(1, length(Acc2#acc.worker_uuids)),
+%%         check_quorum(Acc2, false),
+%%
+%%         {stop, Acc3} = handle_message(Msg, worker(3, Acc0), Acc2),
+%%         ?assertEqual(0, length(Acc3#acc.worker_uuids)),
+%%         check_quorum(Acc3, true),
+%%
+%%         Expect = [{accepted, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc3),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(accepted, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_mixed_ok_accepted() ->
+%%     ?_test(begin
+%%         WorkerUUIDs = [
+%%             {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
+%%             {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
+%%             {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
+%%
+%%             {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
+%%             {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
+%%             {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
+%%         ],
+%%
+%%         Acc0 = #acc{
+%%             worker_uuids = WorkerUUIDs,
+%%             resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
+%%             uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
+%%             w = 2
+%%         },
+%%
+%%         Msg1 = {ok, [{ok, [{1, <<"foo">>}]}]},
+%%         Msg2 = {ok, [{ok, [{2, <<"bar">>}]}]},
+%%         ExitMsg = {rexi_EXIT, blargh},
+%%
+%%         {ok, Acc1} = handle_message(Msg1, worker(1, Acc0), Acc0),
+%%         {ok, Acc2} = handle_message(Msg1, worker(2, Acc0), Acc1),
+%%         {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
+%%         {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
+%%         {stop, Acc5} = handle_message(Msg2, worker(6, Acc0), Acc4),
+%%
+%%         Expect = [{ok, [{1, <<"foo">>}]}, {accepted, [{2, <<"bar">>}]}],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(accepted, resp_health(Resps))
+%%     end).
+%%
+%%
+%% t_mixed_errors() ->
+%%     ?_test(begin
+%%         WorkerUUIDs = [
+%%             {#shard{node = a, range = [1, 2]}, [<<"uuid1">>]},
+%%             {#shard{node = b, range = [1, 2]}, [<<"uuid1">>]},
+%%             {#shard{node = c, range = [1, 2]}, [<<"uuid1">>]},
+%%
+%%             {#shard{node = a, range = [3, 4]}, [<<"uuid2">>]},
+%%             {#shard{node = b, range = [3, 4]}, [<<"uuid2">>]},
+%%             {#shard{node = c, range = [3, 4]}, [<<"uuid2">>]}
+%%         ],
+%%
+%%         Acc0 = #acc{
+%%             worker_uuids = WorkerUUIDs,
+%%             resps = dict:from_list([{<<"uuid1">>, []}, {<<"uuid2">>, []}]),
+%%             uuid_counts = dict:from_list([{<<"uuid1">>, 3}, {<<"uuid2">>, 3}]),
+%%             w = 2
+%%         },
+%%
+%%         Msg = {ok, [{ok, [{1, <<"foo">>}]}]},
+%%         ExitMsg = {rexi_EXIT, blargh},
+%%
+%%         {ok, Acc1} = handle_message(Msg, worker(1, Acc0), Acc0),
+%%         {ok, Acc2} = handle_message(Msg, worker(2, Acc0), Acc1),
+%%         {ok, Acc3} = handle_message(ExitMsg, worker(4, Acc0), Acc2),
+%%         {ok, Acc4} = handle_message(ExitMsg, worker(5, Acc0), Acc3),
+%%         {stop, Acc5} = handle_message(ExitMsg, worker(6, Acc0), Acc4),
+%%
+%%         Expect = [{ok, [{1, <<"foo">>}]}, {error, internal_server_error}],
+%%         Resps = format_resps([<<"uuid1">>, <<"uuid2">>], Acc5),
+%%         ?assertEqual(Expect, Resps),
+%%         ?assertEqual(error, resp_health(Resps))
+%%     end).
+%%
+%%
+%% create_init_acc(W) ->
+%%     UUID1 = <<"uuid1">>,
+%%     UUID2 = <<"uuid2">>,
+%%
+%%     Nodes = [node1, node2, node3],
+%%     Shards = mem3_util:create_partition_map(<<"foo">>, 3, 1, Nodes),
+%%
+%%     % Create our worker_uuids. We're relying on the fact that
+%%     % we're using a fake Q=1 db so we don't have to worry
+%%     % about any hashing here.
+%%     WorkerUUIDs = lists:map(fun(Shard) ->
+%%         {Shard#shard{ref = erlang:make_ref()}, [UUID1, UUID2]}
+%%     end, Shards),
+%%
+%%     #acc{
+%%         worker_uuids = WorkerUUIDs,
+%%         resps = dict:from_list([{UUID1, []}, {UUID2, []}]),
+%%         uuid_counts = dict:from_list([{UUID1, 3}, {UUID2, 3}]),
+%%         w = W
+%%     }.
+%%
+%%
+%% worker(N, #acc{worker_uuids = WorkerUUIDs}) ->
+%%     {Worker, _} = lists:nth(N, WorkerUUIDs),
+%%     Worker.
+%%
+%%
+%% check_quorum(Acc, Expect) ->
+%%     dict:fold(fun(_Shard, Resps, _) ->
+%%         ?assertEqual(Expect, has_quorum(Resps, 3, Acc#acc.w))
+%%     end, nil, Acc#acc.resps).
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_doc_update.erl b/src/fabric/src/fabric_doc_update.erl
index c108c9a..84f4bc4 100644
--- a/src/fabric/src/fabric_doc_update.erl
+++ b/src/fabric/src/fabric_doc_update.erl
@@ -219,144 +219,144 @@ validate_atomic_update(_DbName, AllDocs, true) ->
     end, AllDocs),
     throw({aborted, PreCommitFailures}).
 
-% eunits
-doc_update1_test() ->
-    meck:new(couch_stats),
-    meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-    meck:new(couch_log),
-    meck:expect(couch_log, warning, fun(_,_) -> ok end),
-
-    Doc1 = #doc{revs = {1,[<<"foo">>]}},
-    Doc2 = #doc{revs = {1,[<<"bar">>]}},
-    Docs = [Doc1],
-    Docs2 = [Doc2, Doc1],
-    Dict = dict:from_list([{Doc,[]} || Doc <- Docs]),
-    Dict2 = dict:from_list([{Doc,[]} || Doc <- Docs2]),
-
-    Shards =
-        mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
-    GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
-
-
-    % test for W = 2
-    AccW2 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
-        Dict},
-
-    {ok,{WaitingCountW2_1,_,_,_,_}=AccW2_1} =
-        handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW2),
-    ?assertEqual(WaitingCountW2_1,2),
-    {stop, FinalReplyW2 } =
-        handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW2_1),
-    ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW2),
-
-    % test for W = 3
-    AccW3 = {length(Shards), length(Docs), list_to_integer("3"), GroupedDocs,
-        Dict},
-
-    {ok,{WaitingCountW3_1,_,_,_,_}=AccW3_1} =
-        handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW3),
-    ?assertEqual(WaitingCountW3_1,2),
-
-    {ok,{WaitingCountW3_2,_,_,_,_}=AccW3_2} =
-        handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW3_1),
-    ?assertEqual(WaitingCountW3_2,1),
-
-    {stop, FinalReplyW3 } =
-        handle_message({ok, [{ok, Doc1}]},lists:nth(3,Shards),AccW3_2),
-    ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW3),
-
-    % test w quorum > # shards, which should fail immediately
-
-    Shards2 = mem3_util:create_partition_map("foo",1,1,["node1"]),
-    GroupedDocs2 = group_docs_by_shard_hack(<<"foo">>,Shards2,Docs),
-
-    AccW4 =
-        {length(Shards2), length(Docs), list_to_integer("2"), GroupedDocs2, Dict},
-    Bool =
-    case handle_message({ok, [{ok, Doc1}]},hd(Shards2),AccW4) of
-        {stop, _Reply} ->
-            true;
-        _ -> false
-    end,
-    ?assertEqual(Bool,true),
-
-    % Docs with no replies should end up as {error, internal_server_error}
-    SA1 = #shard{node=a, range=1},
-    SB1 = #shard{node=b, range=1},
-    SA2 = #shard{node=a, range=2},
-    SB2 = #shard{node=b, range=2},
-    GroupedDocs3 = [{SA1,[Doc1]}, {SB1,[Doc1]}, {SA2,[Doc2]}, {SB2,[Doc2]}],
-    StW5_0 = {length(GroupedDocs3), length(Docs2), 2, GroupedDocs3, Dict2},
-    {ok, StW5_1} = handle_message({ok, [{ok, "A"}]}, SA1, StW5_0),
-    {ok, StW5_2} = handle_message({rexi_EXIT, nil}, SB1, StW5_1),
-    {ok, StW5_3} = handle_message({rexi_EXIT, nil}, SA2, StW5_2),
-    {stop, ReplyW5} = handle_message({rexi_EXIT, nil}, SB2, StW5_3),
-    ?assertEqual(
-        {error, [{Doc1,{accepted,"A"}},{Doc2,{error,internal_server_error}}]},
-        ReplyW5
-    ),
-    meck:unload(couch_log),
-    meck:unload(couch_stats).
-
-
-doc_update2_test() ->
-    meck:new(couch_stats),
-    meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
-    meck:new(couch_log),
-    meck:expect(couch_log, warning, fun(_,_) -> ok end),
-
-    Doc1 = #doc{revs = {1,[<<"foo">>]}},
-    Doc2 = #doc{revs = {1,[<<"bar">>]}},
-    Docs = [Doc2, Doc1],
-    Shards =
-        mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
-    GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
-    Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
-        dict:from_list([{Doc,[]} || Doc <- Docs])},
-
-    {ok,{WaitingCount1,_,_,_,_}=Acc1} =
-        handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
-    ?assertEqual(WaitingCount1,2),
-
-    {ok,{WaitingCount2,_,_,_,_}=Acc2} =
-        handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
-    ?assertEqual(WaitingCount2,1),
-
-    {stop, Reply} =
-        handle_message({rexi_EXIT, 1},lists:nth(3,Shards),Acc2),
-
-    ?assertEqual({accepted, [{Doc1,{accepted,Doc2}}, {Doc2,{accepted,Doc1}}]},
-        Reply),
-    meck:unload(couch_log),
-    meck:unload(couch_stats).
-
-doc_update3_test() ->
-    Doc1 = #doc{revs = {1,[<<"foo">>]}},
-    Doc2 = #doc{revs = {1,[<<"bar">>]}},
-    Docs = [Doc2, Doc1],
-    Shards =
-        mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
-    GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
-    Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
-        dict:from_list([{Doc,[]} || Doc <- Docs])},
-
-    {ok,{WaitingCount1,_,_,_,_}=Acc1} =
-        handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
-    ?assertEqual(WaitingCount1,2),
-
-    {ok,{WaitingCount2,_,_,_,_}=Acc2} =
-        handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
-    ?assertEqual(WaitingCount2,1),
-
-    {stop, Reply} =
-        handle_message({ok, [{ok, Doc1},{ok, Doc2}]},lists:nth(3,Shards),Acc2),
-
-    ?assertEqual({ok, [{Doc1, {ok, Doc2}},{Doc2, {ok,Doc1}}]},Reply).
-
-% needed for testing to avoid having to start the mem3 application
-group_docs_by_shard_hack(_DbName, Shards, Docs) ->
-    dict:to_list(lists:foldl(fun(#doc{id=_Id} = Doc, D0) ->
-        lists:foldl(fun(Shard, D1) ->
-            dict:append(Shard, Doc, D1)
-        end, D0, Shards)
-    end, dict:new(), Docs)).
+%% % eunits
+%% doc_update1_test() ->
+%%     meck:new(couch_stats),
+%%     meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%%     meck:new(couch_log),
+%%     meck:expect(couch_log, warning, fun(_,_) -> ok end),
+%%
+%%     Doc1 = #doc{revs = {1,[<<"foo">>]}},
+%%     Doc2 = #doc{revs = {1,[<<"bar">>]}},
+%%     Docs = [Doc1],
+%%     Docs2 = [Doc2, Doc1],
+%%     Dict = dict:from_list([{Doc,[]} || Doc <- Docs]),
+%%     Dict2 = dict:from_list([{Doc,[]} || Doc <- Docs2]),
+%%
+%%     Shards =
+%%         mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
+%%     GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
+%%
+%%
+%%     % test for W = 2
+%%     AccW2 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
+%%         Dict},
+%%
+%%     {ok,{WaitingCountW2_1,_,_,_,_}=AccW2_1} =
+%%         handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW2),
+%%     ?assertEqual(WaitingCountW2_1,2),
+%%     {stop, FinalReplyW2 } =
+%%         handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW2_1),
+%%     ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW2),
+%%
+%%     % test for W = 3
+%%     AccW3 = {length(Shards), length(Docs), list_to_integer("3"), GroupedDocs,
+%%         Dict},
+%%
+%%     {ok,{WaitingCountW3_1,_,_,_,_}=AccW3_1} =
+%%         handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW3),
+%%     ?assertEqual(WaitingCountW3_1,2),
+%%
+%%     {ok,{WaitingCountW3_2,_,_,_,_}=AccW3_2} =
+%%         handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW3_1),
+%%     ?assertEqual(WaitingCountW3_2,1),
+%%
+%%     {stop, FinalReplyW3 } =
+%%         handle_message({ok, [{ok, Doc1}]},lists:nth(3,Shards),AccW3_2),
+%%     ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW3),
+%%
+%%     % test w quorum > # shards, which should fail immediately
+%%
+%%     Shards2 = mem3_util:create_partition_map("foo",1,1,["node1"]),
+%%     GroupedDocs2 = group_docs_by_shard_hack(<<"foo">>,Shards2,Docs),
+%%
+%%     AccW4 =
+%%         {length(Shards2), length(Docs), list_to_integer("2"), GroupedDocs2, Dict},
+%%     Bool =
+%%     case handle_message({ok, [{ok, Doc1}]},hd(Shards2),AccW4) of
+%%         {stop, _Reply} ->
+%%             true;
+%%         _ -> false
+%%     end,
+%%     ?assertEqual(Bool,true),
+%%
+%%     % Docs with no replies should end up as {error, internal_server_error}
+%%     SA1 = #shard{node=a, range=1},
+%%     SB1 = #shard{node=b, range=1},
+%%     SA2 = #shard{node=a, range=2},
+%%     SB2 = #shard{node=b, range=2},
+%%     GroupedDocs3 = [{SA1,[Doc1]}, {SB1,[Doc1]}, {SA2,[Doc2]}, {SB2,[Doc2]}],
+%%     StW5_0 = {length(GroupedDocs3), length(Docs2), 2, GroupedDocs3, Dict2},
+%%     {ok, StW5_1} = handle_message({ok, [{ok, "A"}]}, SA1, StW5_0),
+%%     {ok, StW5_2} = handle_message({rexi_EXIT, nil}, SB1, StW5_1),
+%%     {ok, StW5_3} = handle_message({rexi_EXIT, nil}, SA2, StW5_2),
+%%     {stop, ReplyW5} = handle_message({rexi_EXIT, nil}, SB2, StW5_3),
+%%     ?assertEqual(
+%%         {error, [{Doc1,{accepted,"A"}},{Doc2,{error,internal_server_error}}]},
+%%         ReplyW5
+%%     ),
+%%     meck:unload(couch_log),
+%%     meck:unload(couch_stats).
+%%
+%%
+%% doc_update2_test() ->
+%%     meck:new(couch_stats),
+%%     meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
+%%     meck:new(couch_log),
+%%     meck:expect(couch_log, warning, fun(_,_) -> ok end),
+%%
+%%     Doc1 = #doc{revs = {1,[<<"foo">>]}},
+%%     Doc2 = #doc{revs = {1,[<<"bar">>]}},
+%%     Docs = [Doc2, Doc1],
+%%     Shards =
+%%         mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
+%%     GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
+%%     Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
+%%         dict:from_list([{Doc,[]} || Doc <- Docs])},
+%%
+%%     {ok,{WaitingCount1,_,_,_,_}=Acc1} =
+%%         handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
+%%     ?assertEqual(WaitingCount1,2),
+%%
+%%     {ok,{WaitingCount2,_,_,_,_}=Acc2} =
+%%         handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
+%%     ?assertEqual(WaitingCount2,1),
+%%
+%%     {stop, Reply} =
+%%         handle_message({rexi_EXIT, 1},lists:nth(3,Shards),Acc2),
+%%
+%%     ?assertEqual({accepted, [{Doc1,{accepted,Doc2}}, {Doc2,{accepted,Doc1}}]},
+%%         Reply),
+%%     meck:unload(couch_log),
+%%     meck:unload(couch_stats).
+%%
+%% doc_update3_test() ->
+%%     Doc1 = #doc{revs = {1,[<<"foo">>]}},
+%%     Doc2 = #doc{revs = {1,[<<"bar">>]}},
+%%     Docs = [Doc2, Doc1],
+%%     Shards =
+%%         mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
+%%     GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
+%%     Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
+%%         dict:from_list([{Doc,[]} || Doc <- Docs])},
+%%
+%%     {ok,{WaitingCount1,_,_,_,_}=Acc1} =
+%%         handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
+%%     ?assertEqual(WaitingCount1,2),
+%%
+%%     {ok,{WaitingCount2,_,_,_,_}=Acc2} =
+%%         handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
+%%     ?assertEqual(WaitingCount2,1),
+%%
+%%     {stop, Reply} =
+%%         handle_message({ok, [{ok, Doc1},{ok, Doc2}]},lists:nth(3,Shards),Acc2),
+%%
+%%     ?assertEqual({ok, [{Doc1, {ok, Doc2}},{Doc2, {ok,Doc1}}]},Reply).
+%%
+%% % needed for testing to avoid having to start the mem3 application
+%% group_docs_by_shard_hack(_DbName, Shards, Docs) ->
+%%     dict:to_list(lists:foldl(fun(#doc{id=_Id} = Doc, D0) ->
+%%         lists:foldl(fun(Shard, D1) ->
+%%             dict:append(Shard, Doc, D1)
+%%         end, D0, Shards)
+%%     end, dict:new(), Docs)).
diff --git a/src/fabric/src/fabric_rpc.erl b/src/fabric/src/fabric_rpc.erl
index 97374be..212a1da 100644
--- a/src/fabric/src/fabric_rpc.erl
+++ b/src/fabric/src/fabric_rpc.erl
@@ -643,22 +643,22 @@ uuid(Db) ->
 uuid_prefix_len() ->
     list_to_integer(config:get("fabric", "uuid_prefix_len", "7")).
 
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-maybe_filtered_json_doc_no_filter_test() ->
-    Body = {[{<<"a">>, 1}]},
-    Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
-    {JDocProps} = maybe_filtered_json_doc(Doc, [], x),
-    ExpectedProps = [{<<"_id">>, <<"1">>}, {<<"_rev">>, <<"1-r1">>}, {<<"a">>, 1}],
-    ?assertEqual(lists:keysort(1, JDocProps), ExpectedProps).
-
-maybe_filtered_json_doc_with_filter_test() ->
-    Body = {[{<<"a">>, 1}]},
-    Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
-    Fields = [<<"a">>, <<"nonexistent">>],
-    Filter = {selector, main_only, {some_selector, Fields}},
-    {JDocProps} = maybe_filtered_json_doc(Doc, [], Filter),
-    ?assertEqual(JDocProps, [{<<"a">>, 1}]).
-
--endif.
+%% -ifdef(TEST).
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% maybe_filtered_json_doc_no_filter_test() ->
+%%     Body = {[{<<"a">>, 1}]},
+%%     Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
+%%     {JDocProps} = maybe_filtered_json_doc(Doc, [], x),
+%%     ExpectedProps = [{<<"_id">>, <<"1">>}, {<<"_rev">>, <<"1-r1">>}, {<<"a">>, 1}],
+%%     ?assertEqual(lists:keysort(1, JDocProps), ExpectedProps).
+%%
+%% maybe_filtered_json_doc_with_filter_test() ->
+%%     Body = {[{<<"a">>, 1}]},
+%%     Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body},
+%%     Fields = [<<"a">>, <<"nonexistent">>],
+%%     Filter = {selector, main_only, {some_selector, Fields}},
+%%     {JDocProps} = maybe_filtered_json_doc(Doc, [], Filter),
+%%     ?assertEqual(JDocProps, [{<<"a">>, 1}]).
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_streams.erl b/src/fabric/src/fabric_streams.erl
index 59c8b8a..98e2850 100644
--- a/src/fabric/src/fabric_streams.erl
+++ b/src/fabric/src/fabric_streams.erl
@@ -192,82 +192,83 @@ add_worker_to_cleaner(CoordinatorPid, Worker) ->
 
 
 
--ifdef(TEST).
 
--include_lib("eunit/include/eunit.hrl").
-
-worker_cleaner_test_() ->
-    {
-        "Fabric spawn_worker_cleaner test", {
-            setup, fun setup/0, fun teardown/1,
-            fun(_) -> [
-                should_clean_workers(),
-                does_not_fire_if_cleanup_called(),
-                should_clean_additional_worker_too()
-            ] end
-        }
-    }.
-
-
-should_clean_workers() ->
-    ?_test(begin
-        meck:reset(rexi),
-        erase(?WORKER_CLEANER),
-        Workers = [
-            #shard{node = 'n1', ref = make_ref()},
-            #shard{node = 'n2', ref = make_ref()}
-        ],
-        {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
-        Cleaner = spawn_worker_cleaner(Coord, Workers),
-        Ref = erlang:monitor(process, Cleaner),
-        Coord ! die,
-        receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
-        ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
-    end).
-
-
-does_not_fire_if_cleanup_called() ->
-    ?_test(begin
-        meck:reset(rexi),
-        erase(?WORKER_CLEANER),
-        Workers = [
-            #shard{node = 'n1', ref = make_ref()},
-            #shard{node = 'n2', ref = make_ref()}
-        ],
-        {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
-        Cleaner = spawn_worker_cleaner(Coord, Workers),
-        Ref = erlang:monitor(process, Cleaner),
-        cleanup(Workers),
-        Coord ! die,
-        receive {'DOWN', Ref, _, _, _} -> ok end,
-        % 2 calls would be from cleanup/1 function. If cleanup process fired
-        % too it would have been 4 calls total.
-        ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
-    end).
-
-
-should_clean_additional_worker_too() ->
-    ?_test(begin
-        meck:reset(rexi),
-        erase(?WORKER_CLEANER),
-        Workers = [
-            #shard{node = 'n1', ref = make_ref()}
-        ],
-        {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
-        Cleaner = spawn_worker_cleaner(Coord, Workers),
-        add_worker_to_cleaner(Coord, #shard{node = 'n2', ref = make_ref()}),
-        Ref = erlang:monitor(process, Cleaner),
-        Coord ! die,
-        receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
-        ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
-    end).
-
-
-setup() ->
-    ok = meck:expect(rexi, kill_all, fun(_) -> ok end).
-
-
-teardown(_) ->
-    meck:unload().
-
--endif.
+%% -ifdef(TEST).
+%%
+%% -include_lib("eunit/include/eunit.hrl").
+%%
+%% worker_cleaner_test_() ->
+%%     {
+%%         "Fabric spawn_worker_cleaner test", {
+%%             setup, fun setup/0, fun teardown/1,
+%%             fun(_) -> [
+%%                 should_clean_workers(),
+%%                 does_not_fire_if_cleanup_called(),
+%%                 should_clean_additional_worker_too()
+%%             ] end
+%%         }
+%%     }.
+%%
+%%
+%% should_clean_workers() ->
+%%     ?_test(begin
+%%         meck:reset(rexi),
+%%         erase(?WORKER_CLEANER),
+%%         Workers = [
+%%             #shard{node = 'n1', ref = make_ref()},
+%%             #shard{node = 'n2', ref = make_ref()}
+%%         ],
+%%         {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+%%         Cleaner = spawn_worker_cleaner(Coord, Workers),
+%%         Ref = erlang:monitor(process, Cleaner),
+%%         Coord ! die,
+%%         receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
+%%         ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
+%%     end).
+%%
+%%
+%% does_not_fire_if_cleanup_called() ->
+%%     ?_test(begin
+%%         meck:reset(rexi),
+%%         erase(?WORKER_CLEANER),
+%%         Workers = [
+%%             #shard{node = 'n1', ref = make_ref()},
+%%             #shard{node = 'n2', ref = make_ref()}
+%%         ],
+%%         {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+%%         Cleaner = spawn_worker_cleaner(Coord, Workers),
+%%         Ref = erlang:monitor(process, Cleaner),
+%%         cleanup(Workers),
+%%         Coord ! die,
+%%         receive {'DOWN', Ref, _, _, _} -> ok end,
+%%         % 2 calls would be from cleanup/1 function. If cleanup process fired
+%%         % too it would have been 4 calls total.
+%%         ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
+%%     end).
+%%
+%%
+%% should_clean_additional_worker_too() ->
+%%     ?_test(begin
+%%         meck:reset(rexi),
+%%         erase(?WORKER_CLEANER),
+%%         Workers = [
+%%             #shard{node = 'n1', ref = make_ref()}
+%%         ],
+%%         {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+%%         Cleaner = spawn_worker_cleaner(Coord, Workers),
+%%         add_worker_to_cleaner(Coord, #shard{node = 'n2', ref = make_ref()}),
+%%         Ref = erlang:monitor(process, Cleaner),
+%%         Coord ! die,
+%%         receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
+%%         ?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
+%%     end).
+%%
+%%
+%% setup() ->
+%%     ok = meck:expect(rexi, kill_all, fun(_) -> ok end).
+%%
+%%
+%% teardown(_) ->
+%%     meck:unload().
+%%
+%% -endif.
diff --git a/src/fabric/src/fabric_util.erl b/src/fabric/src/fabric_util.erl
index aaf0623..16f916c 100644
--- a/src/fabric/src/fabric_util.erl
+++ b/src/fabric/src/fabric_util.erl
@@ -189,30 +189,30 @@ create_monitors(Shards) ->
     ]),
     rexi_monitor:start(MonRefs).
 
-%% verify only id and rev are used in key.
-update_counter_test() ->
-    Reply = {ok, #doc{id = <<"id">>, revs = <<"rev">>,
-                    body = <<"body">>, atts = <<"atts">>}},
-    ?assertEqual([{{<<"id">>,<<"rev">>}, {Reply, 1}}],
-        update_counter(Reply, 1, [])).
-
-remove_ancestors_test() ->
-    Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
-    Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
-    Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
-    Bar2 = {not_found, {1,<<"bar">>}},
-    ?assertEqual(
-        [kv(Bar1,1), kv(Foo1,1)],
-        remove_ancestors([kv(Bar1,1), kv(Foo1,1)], [])
-    ),
-    ?assertEqual(
-        [kv(Bar1,1), kv(Foo2,2)],
-        remove_ancestors([kv(Bar1,1), kv(Foo1,1), kv(Foo2,1)], [])
-    ),
-    ?assertEqual(
-        [kv(Bar1,2)],
-        remove_ancestors([kv(Bar2,1), kv(Bar1,1)], [])
-    ).
+%% %% verify only id and rev are used in key.
+%% update_counter_test() ->
+%%     Reply = {ok, #doc{id = <<"id">>, revs = <<"rev">>,
+%%                     body = <<"body">>, atts = <<"atts">>}},
+%%     ?assertEqual([{{<<"id">>,<<"rev">>}, {Reply, 1}}],
+%%         update_counter(Reply, 1, [])).
+%%
+%% remove_ancestors_test() ->
+%%     Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
+%%     Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
+%%     Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
+%%     Bar2 = {not_found, {1,<<"bar">>}},
+%%     ?assertEqual(
+%%         [kv(Bar1,1), kv(Foo1,1)],
+%%         remove_ancestors([kv(Bar1,1), kv(Foo1,1)], [])
+%%     ),
+%%     ?assertEqual(
+%%         [kv(Bar1,1), kv(Foo2,2)],
+%%         remove_ancestors([kv(Bar1,1), kv(Foo1,1), kv(Foo2,1)], [])
+%%     ),
+%%     ?assertEqual(
+%%         [kv(Bar1,2)],
+%%         remove_ancestors([kv(Bar2,1), kv(Bar1,1)], [])
+%%     ).
 
 is_replicator_db(DbName) ->
     path_ends_with(DbName, <<"_replicator">>).
diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl
index 55b44e6..adde1e6 100644
--- a/src/fabric/src/fabric_view.erl
+++ b/src/fabric/src/fabric_view.erl
@@ -416,97 +416,97 @@ fix_skip_and_limit(#mrargs{} = Args) ->
 remove_finalizer(Args) ->
     couch_mrview_util:set_extra(Args, finalizer, null).
 
-% unit test
-is_progress_possible_test() ->
-    EndPoint = 2 bsl 31,
-    T1 = [[0, EndPoint-1]],
-    ?assertEqual(is_progress_possible(mk_cnts(T1)),true),
-    T2 = [[0,10],[11,20],[21,EndPoint-1]],
-    ?assertEqual(is_progress_possible(mk_cnts(T2)),true),
-    % gap
-    T3 = [[0,10],[12,EndPoint-1]],
-    ?assertEqual(is_progress_possible(mk_cnts(T3)),false),
-    % outside range
-    T4 = [[1,10],[11,20],[21,EndPoint-1]],
-    ?assertEqual(is_progress_possible(mk_cnts(T4)),false),
-    % outside range
-    T5 = [[0,10],[11,20],[21,EndPoint]],
-    ?assertEqual(is_progress_possible(mk_cnts(T5)),false),
-    T6 = [[0, 10], [11, 20], [0, 5], [6, 21], [21, EndPoint - 1]],
-    ?assertEqual(is_progress_possible(mk_cnts(T6)), true),
-    % not possible, overlap is not exact
-    T7 = [[0, 10], [13, 20], [21, EndPoint - 1], [9, 12]],
-    ?assertEqual(is_progress_possible(mk_cnts(T7)), false).
-
-
-remove_overlapping_shards_test() ->
-    Cb = undefined,
-
-    Shards = mk_cnts([[0, 10], [11, 20], [21, ?RING_END]], 3),
-
-    % Simple (exact) overlap
-    Shard1 = mk_shard("node-3", [11, 20]),
-    Shards1 = fabric_dict:store(Shard1, nil, Shards),
-    R1 = remove_overlapping_shards(Shard1, Shards1, Cb),
-    ?assertEqual([{0, 10}, {11, 20}, {21, ?RING_END}],
-        fabric_util:worker_ranges(R1)),
-    ?assert(fabric_dict:is_key(Shard1, R1)),
-
-    % Split overlap (shard overlap multiple workers)
-    Shard2 = mk_shard("node-3", [0, 20]),
-    Shards2 = fabric_dict:store(Shard2, nil, Shards),
-    R2 = remove_overlapping_shards(Shard2, Shards2, Cb),
-    ?assertEqual([{0, 20}, {21, ?RING_END}],
-        fabric_util:worker_ranges(R2)),
-    ?assert(fabric_dict:is_key(Shard2, R2)).
-
-
-get_shard_replacements_test() ->
-    Unused = [mk_shard(N, [B, E]) || {N, B, E} <- [
-        {"n1", 11, 20}, {"n1", 21, ?RING_END},
-        {"n2", 0, 4}, {"n2", 5, 10}, {"n2", 11, 20},
-        {"n3", 0, 21, ?RING_END}
-    ]],
-    Used = [mk_shard(N, [B, E]) || {N, B, E} <- [
-        {"n2", 21, ?RING_END},
-        {"n3", 0, 10}, {"n3", 11, 20}
-    ]],
-    Res = lists:sort(get_shard_replacements_int(Unused, Used)),
-    % Notice that [0, 10] range can be replaced by spawning the [0, 4] and [5,
-    % 10] workers on n1
-    Expect = [
-        {[0, 10], [mk_shard("n2", [0, 4]), mk_shard("n2", [5, 10])]},
-        {[11, 20], [mk_shard("n1", [11, 20]), mk_shard("n2", [11, 20])]},
-        {[21, ?RING_END], [mk_shard("n1", [21, ?RING_END])]}
-    ],
-    ?assertEqual(Expect, Res).
-
-
-mk_cnts(Ranges) ->
-    Shards = lists:map(fun mk_shard/1, Ranges),
-    orddict:from_list([{Shard,nil} || Shard <- Shards]).
-
-mk_cnts(Ranges, NoNodes) ->
-    orddict:from_list([{Shard,nil}
-                       || Shard <-
-                              lists:flatten(lists:map(
-                                 fun(Range) ->
-                                         mk_shards(NoNodes,Range,[])
-                                 end, Ranges))]
-                     ).
-
-mk_shards(0,_Range,Shards) ->
-    Shards;
-mk_shards(NoNodes,Range,Shards) ->
-    Name ="node-" ++ integer_to_list(NoNodes),
-    mk_shards(NoNodes-1,Range, [mk_shard(Name, Range) | Shards]).
-
-
-mk_shard([B, E]) when is_integer(B), is_integer(E) ->
-    #shard{range = [B, E]}.
-
-
-mk_shard(Name, Range) ->
-    Node = list_to_atom(Name),
-    BName = list_to_binary(Name),
-    #shard{name = BName, node = Node, range = Range}.
+%% % unit test
+%% is_progress_possible_test() ->
+%%     EndPoint = 2 bsl 31,
+%%     T1 = [[0, EndPoint-1]],
+%%     ?assertEqual(is_progress_possible(mk_cnts(T1)),true),
+%%     T2 = [[0,10],[11,20],[21,EndPoint-1]],
+%%     ?assertEqual(is_progress_possible(mk_cnts(T2)),true),
+%%     % gap
+%%     T3 = [[0,10],[12,EndPoint-1]],
+%%     ?assertEqual(is_progress_possible(mk_cnts(T3)),false),
+%%     % outside range
+%%     T4 = [[1,10],[11,20],[21,EndPoint-1]],
+%%     ?assertEqual(is_progress_possible(mk_cnts(T4)),false),
+%%     % outside range
+%%     T5 = [[0,10],[11,20],[21,EndPoint]],
+%%     ?assertEqual(is_progress_possible(mk_cnts(T5)),false),
+%%     T6 = [[0, 10], [11, 20], [0, 5], [6, 21], [21, EndPoint - 1]],
+%%     ?assertEqual(is_progress_possible(mk_cnts(T6)), true),
+%%     % not possible, overlap is not exact
+%%     T7 = [[0, 10], [13, 20], [21, EndPoint - 1], [9, 12]],
+%%     ?assertEqual(is_progress_possible(mk_cnts(T7)), false).
+%%
+%%
+%% remove_overlapping_shards_test() ->
+%%     Cb = undefined,
+%%
+%%     Shards = mk_cnts([[0, 10], [11, 20], [21, ?RING_END]], 3),
+%%
+%%     % Simple (exact) overlap
+%%     Shard1 = mk_shard("node-3", [11, 20]),
+%%     Shards1 = fabric_dict:store(Shard1, nil, Shards),
+%%     R1 = remove_overlapping_shards(Shard1, Shards1, Cb),
+%%     ?assertEqual([{0, 10}, {11, 20}, {21, ?RING_END}],
+%%         fabric_util:worker_ranges(R1)),
+%%     ?assert(fabric_dict:is_key(Shard1, R1)),
+%%
+%%     % Split overlap (shard overlap multiple workers)
+%%     Shard2 = mk_shard("node-3", [0, 20]),
+%%     Shards2 = fabric_dict:store(Shard2, nil, Shards),
+%%     R2 = remove_overlapping_shards(Shard2, Shards2, Cb),
+%%     ?assertEqual([{0, 20}, {21, ?RING_END}],
+%%         fabric_util:worker_ranges(R2)),
+%%     ?assert(fabric_dict:is_key(Shard2, R2)).
+%%
+%%
+%% get_shard_replacements_test() ->
+%%     Unused = [mk_shard(N, [B, E]) || {N, B, E} <- [
+%%         {"n1", 11, 20}, {"n1", 21, ?RING_END},
+%%         {"n2", 0, 4}, {"n2", 5, 10}, {"n2", 11, 20},
+%%         {"n3", 0, 21, ?RING_END}
+%%     ]],
+%%     Used = [mk_shard(N, [B, E]) || {N, B, E} <- [
+%%         {"n2", 21, ?RING_END},
+%%         {"n3", 0, 10}, {"n3", 11, 20}
+%%     ]],
+%%     Res = lists:sort(get_shard_replacements_int(Unused, Used)),
+%%     % Notice that [0, 10] range can be replaced by spawning the [0, 4] and [5,
+%%     % 10] workers on n1
+%%     Expect = [
+%%         {[0, 10], [mk_shard("n2", [0, 4]), mk_shard("n2", [5, 10])]},
+%%         {[11, 20], [mk_shard("n1", [11, 20]), mk_shard("n2", [11, 20])]},
+%%         {[21, ?RING_END], [mk_shard("n1", [21, ?RING_END])]}
+%%     ],
+%%     ?assertEqual(Expect, Res).
+%%
+%%
+%% mk_cnts(Ranges) ->
+%%     Shards = lists:map(fun mk_shard/1, Ranges),
+%%     orddict:from_list([{Shard,nil} || Shard <- Shards]).
+%%
+%% mk_cnts(Ranges, NoNodes) ->
+%%     orddict:from_list([{Shard,nil}
+%%                        || Shard <-
+%%                               lists:flatten(lists:map(
+%%                                  fun(Range) ->
+%%                                          mk_shards(NoNodes,Range,[])
+%%                                  end, Ranges))]
+%%                      ).
+%%
+%% mk_shards(0,_Range,Shards) ->
+%%     Shards;
+%% mk_shards(NoNodes,Range,Shards) ->
+%%     Name ="node-" ++ integer_to_list(NoNodes),
+%%     mk_shards(NoNodes-1,Range, [mk_shard(Name, Range) | Shards]).
+%%
+%%
+%% mk_shard([B, E]) when is_integer(B), is_integer(E) ->
+%%     #shard{range = [B, E]}.
+%%
+%%
+%% mk_shard(Name, Range) ->
+%%     Node = list_to_atom(Name),
+%%     BName = list_to_binary(Name),
+%%     #shard{name = BName, node = Node, range = Range}.
diff --git a/src/fabric/src/fabric_view_changes.erl b/src/fabric/src/fabric_view_changes.erl
index febbd31..3f684a3 100644
--- a/src/fabric/src/fabric_view_changes.erl
+++ b/src/fabric/src/fabric_view_changes.erl
@@ -637,184 +637,184 @@ increment_changes_epoch() ->
     application:set_env(fabric, changes_epoch, os:timestamp()).
 
 
-unpack_seq_setup() ->
-    meck:new(mem3),
-    meck:new(fabric_view),
-    meck:expect(mem3, get_shard, fun(_, _, _) -> {ok, #shard{}} end),
-    meck:expect(fabric_ring, is_progress_possible, fun(_) -> true end),
-    ok.
-
-
-unpack_seqs_test_() ->
-    {
-        setup,
-        fun unpack_seq_setup/0,
-        fun (_) -> meck:unload() end,
-        [
-            t_unpack_seqs()
-        ]
-    }.
-
-
-t_unpack_seqs() ->
-    ?_test(begin
-        % BigCouch 0.3 style.
-        assert_shards("23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-        "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-        "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA"),
-
-        % BigCouch 0.4 style.
-        assert_shards([23423,<<"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-        "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-        "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA">>]),
-
-        % BigCouch 0.4 style (as string).
-        assert_shards("[23423,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-        "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-        "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-        assert_shards("[23423 ,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-        "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-        "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-        assert_shards("[23423, \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-        "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-        "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-        assert_shards("[23423 , \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-        "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-        "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
-
-        % with internal hypen
-        assert_shards("651-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
-        "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
-        "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"),
-        assert_shards([651,"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
-        "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
-        "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"]),
-
-        % CouchDB 1.2 style
-        assert_shards("\"23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
-        "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
-        "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"")
-    end).
-
-
-assert_shards(Packed) ->
-    ?assertMatch([{#shard{},_}|_], unpack_seqs(Packed, <<"foo">>)).
-
-
-find_replacements_test() ->
-    % None of the workers are in the live list of shard but there is a
-    % replacement on n3 for the full range. It should get picked instead of
-    % the two smaller one on n2.
-    Workers1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
-    AllShards1 = [
-        mk_shard("n1", 11, ?RING_END),
-        mk_shard("n2", 0, 4),
-        mk_shard("n2", 5, 10),
-        mk_shard("n3", 0, ?RING_END)
-    ],
-    {WorkersRes1, Dead1, Reps1} = find_replacements(Workers1, AllShards1),
-    ?assertEqual([], WorkersRes1),
-    ?assertEqual(Workers1, Dead1),
-    ?assertEqual([mk_shard("n3", 0, ?RING_END)], Reps1),
-
-    % None of the workers are in the live list of shards and there is a
-    % split replacement from n2 (range [0, 10] replaced with [0, 4], [5, 10])
-    Workers2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
-    AllShards2 = [
-        mk_shard("n1", 11, ?RING_END),
-        mk_shard("n2", 0, 4),
-        mk_shard("n2", 5, 10)
-    ],
-    {WorkersRes2, Dead2, Reps2} = find_replacements(Workers2, AllShards2),
-    ?assertEqual([], WorkersRes2),
-    ?assertEqual(Workers2, Dead2),
-    ?assertEqual([
-        mk_shard("n1", 11, ?RING_END),
-        mk_shard("n2", 0, 4),
-        mk_shard("n2", 5, 10)
-    ], lists:sort(Reps2)),
-
-    % One worker is available and one needs to be replaced. Replacement will be
-    % from two split shards
-    Workers3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
-    AllShards3 = [
-        mk_shard("n1", 11, ?RING_END),
-        mk_shard("n2", 0, 4),
-        mk_shard("n2", 5, 10),
-        mk_shard("n2", 11, ?RING_END)
-    ],
-    {WorkersRes3, Dead3, Reps3} = find_replacements(Workers3, AllShards3),
-    ?assertEqual(mk_workers([{"n2", 11, ?RING_END}]), WorkersRes3),
-    ?assertEqual(mk_workers([{"n1", 0, 10}]), Dead3),
-    ?assertEqual([
-        mk_shard("n2", 0, 4),
-        mk_shard("n2", 5, 10)
-    ], lists:sort(Reps3)),
-
-    % All workers are available. Make sure they are not killed even if there is
-    % a longer (single) shard to replace them.
-    Workers4 = mk_workers([{"n1", 0, 10}, {"n1", 11, ?RING_END}]),
-    AllShards4 = [
-        mk_shard("n1", 0, 10),
-        mk_shard("n1", 11, ?RING_END),
-        mk_shard("n2", 0, 4),
-        mk_shard("n2", 5, 10),
-        mk_shard("n3", 0, ?RING_END)
-    ],
-    {WorkersRes4, Dead4, Reps4} = find_replacements(Workers4, AllShards4),
-    ?assertEqual(Workers4, WorkersRes4),
-    ?assertEqual([], Dead4),
-    ?assertEqual([], Reps4).
-
-
-mk_workers(NodesRanges) ->
-    mk_workers(NodesRanges, nil).
-
-mk_workers(NodesRanges, Val) ->
-    orddict:from_list([{mk_shard(N, B, E), Val} || {N, B, E} <- NodesRanges]).
-
-
-mk_shard(Name, B, E) ->
-    Node = list_to_atom(Name),
-    BName = list_to_binary(Name),
-    #shard{name = BName, node = Node, range = [B, E]}.
-
-
-find_split_shard_replacements_test() ->
-    % One worker is can be replaced and one can't
-    Dead1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
-    Shards1 = [
-        mk_shard("n1", 0, 4),
-        mk_shard("n1", 5, 10),
-        mk_shard("n3", 11, ?RING_END)
-    ],
-    {Workers1, ShardsLeft1} = find_split_shard_replacements(Dead1, Shards1),
-    ?assertEqual(mk_workers([{"n1", 0, 4}, {"n1", 5, 10}], 42), Workers1),
-    ?assertEqual([mk_shard("n3", 11, ?RING_END)], ShardsLeft1),
-
-    % All workers can be replaced - one by 1 shard, another by 3 smaller shards
-    Dead2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
-    Shards2 = [
-        mk_shard("n1", 0, 10),
-        mk_shard("n2", 11, 12),
-        mk_shard("n2", 13, 14),
-        mk_shard("n2", 15, ?RING_END)
-    ],
-    {Workers2, ShardsLeft2} = find_split_shard_replacements(Dead2, Shards2),
-    ?assertEqual(mk_workers([
-       {"n1", 0, 10},
-       {"n2", 11, 12},
-       {"n2", 13, 14},
-       {"n2", 15, ?RING_END}
-    ], 42), Workers2),
-    ?assertEqual([], ShardsLeft2),
-
-    % No workers can be replaced. Ranges match but they are on different nodes
-    Dead3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
-    Shards3 = [
-        mk_shard("n2", 0, 10),
-        mk_shard("n3", 11, ?RING_END)
-    ],
-    {Workers3, ShardsLeft3} = find_split_shard_replacements(Dead3, Shards3),
-    ?assertEqual([], Workers3),
-    ?assertEqual(Shards3, ShardsLeft3).
+%% unpack_seq_setup() ->
+%%     meck:new(mem3),
+%%     meck:new(fabric_view),
+%%     meck:expect(mem3, get_shard, fun(_, _, _) -> {ok, #shard{}} end),
+%%     meck:expect(fabric_ring, is_progress_possible, fun(_) -> true end),
+%%     ok.
+%%
+%%
+%% unpack_seqs_test_() ->
+%%     {
+%%         setup,
+%%         fun unpack_seq_setup/0,
+%%         fun (_) -> meck:unload() end,
+%%         [
+%%             t_unpack_seqs()
+%%         ]
+%%     }.
+%%
+%%
+%% t_unpack_seqs() ->
+%%     ?_test(begin
+%%         % BigCouch 0.3 style.
+%%         assert_shards("23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%%         "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%%         "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA"),
+%%
+%%         % BigCouch 0.4 style.
+%%         assert_shards([23423,<<"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%%         "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%%         "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA">>]),
+%%
+%%         % BigCouch 0.4 style (as string).
+%%         assert_shards("[23423,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%%         "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%%         "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
+%%         assert_shards("[23423 ,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%%         "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%%         "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
+%%         assert_shards("[23423, \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%%         "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%%         "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
+%%         assert_shards("[23423 , \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%%         "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%%         "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
+%%
+%%         % with internal hypen
+%%         assert_shards("651-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
+%%         "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
+%%         "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"),
+%%         assert_shards([651,"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
+%%         "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
+%%         "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"]),
+%%
+%%         % CouchDB 1.2 style
+%%         assert_shards("\"23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+%%         "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+%%         "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"")
+%%     end).
+%%
+%%
+%% assert_shards(Packed) ->
+%%     ?assertMatch([{#shard{},_}|_], unpack_seqs(Packed, <<"foo">>)).
+%%
+%%
+%% find_replacements_test() ->
+%%     % None of the workers are in the live list of shard but there is a
+%%     % replacement on n3 for the full range. It should get picked instead of
+%%     % the two smaller one on n2.
+%%     Workers1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
+%%     AllShards1 = [
+%%         mk_shard("n1", 11, ?RING_END),
+%%         mk_shard("n2", 0, 4),
+%%         mk_shard("n2", 5, 10),
+%%         mk_shard("n3", 0, ?RING_END)
+%%     ],
+%%     {WorkersRes1, Dead1, Reps1} = find_replacements(Workers1, AllShards1),
+%%     ?assertEqual([], WorkersRes1),
+%%     ?assertEqual(Workers1, Dead1),
+%%     ?assertEqual([mk_shard("n3", 0, ?RING_END)], Reps1),
+%%
+%%     % None of the workers are in the live list of shards and there is a
+%%     % split replacement from n2 (range [0, 10] replaced with [0, 4], [5, 10])
+%%     Workers2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
+%%     AllShards2 = [
+%%         mk_shard("n1", 11, ?RING_END),
+%%         mk_shard("n2", 0, 4),
+%%         mk_shard("n2", 5, 10)
+%%     ],
+%%     {WorkersRes2, Dead2, Reps2} = find_replacements(Workers2, AllShards2),
+%%     ?assertEqual([], WorkersRes2),
+%%     ?assertEqual(Workers2, Dead2),
+%%     ?assertEqual([
+%%         mk_shard("n1", 11, ?RING_END),
+%%         mk_shard("n2", 0, 4),
+%%         mk_shard("n2", 5, 10)
+%%     ], lists:sort(Reps2)),
+%%
+%%     % One worker is available and one needs to be replaced. Replacement will be
+%%     % from two split shards
+%%     Workers3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}]),
+%%     AllShards3 = [
+%%         mk_shard("n1", 11, ?RING_END),
+%%         mk_shard("n2", 0, 4),
+%%         mk_shard("n2", 5, 10),
+%%         mk_shard("n2", 11, ?RING_END)
+%%     ],
+%%     {WorkersRes3, Dead3, Reps3} = find_replacements(Workers3, AllShards3),
+%%     ?assertEqual(mk_workers([{"n2", 11, ?RING_END}]), WorkersRes3),
+%%     ?assertEqual(mk_workers([{"n1", 0, 10}]), Dead3),
+%%     ?assertEqual([
+%%         mk_shard("n2", 0, 4),
+%%         mk_shard("n2", 5, 10)
+%%     ], lists:sort(Reps3)),
+%%
+%%     % All workers are available. Make sure they are not killed even if there is
+%%     % a longer (single) shard to replace them.
+%%     Workers4 = mk_workers([{"n1", 0, 10}, {"n1", 11, ?RING_END}]),
+%%     AllShards4 = [
+%%         mk_shard("n1", 0, 10),
+%%         mk_shard("n1", 11, ?RING_END),
+%%         mk_shard("n2", 0, 4),
+%%         mk_shard("n2", 5, 10),
+%%         mk_shard("n3", 0, ?RING_END)
+%%     ],
+%%     {WorkersRes4, Dead4, Reps4} = find_replacements(Workers4, AllShards4),
+%%     ?assertEqual(Workers4, WorkersRes4),
+%%     ?assertEqual([], Dead4),
+%%     ?assertEqual([], Reps4).
+%%
+%%
+%% mk_workers(NodesRanges) ->
+%%     mk_workers(NodesRanges, nil).
+%%
+%% mk_workers(NodesRanges, Val) ->
+%%     orddict:from_list([{mk_shard(N, B, E), Val} || {N, B, E} <- NodesRanges]).
+%%
+%%
+%% mk_shard(Name, B, E) ->
+%%     Node = list_to_atom(Name),
+%%     BName = list_to_binary(Name),
+%%     #shard{name = BName, node = Node, range = [B, E]}.
+%%
+%%
+%% find_split_shard_replacements_test() ->
+%%     % One worker is can be replaced and one can't
+%%     Dead1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
+%%     Shards1 = [
+%%         mk_shard("n1", 0, 4),
+%%         mk_shard("n1", 5, 10),
+%%         mk_shard("n3", 11, ?RING_END)
+%%     ],
+%%     {Workers1, ShardsLeft1} = find_split_shard_replacements(Dead1, Shards1),
+%%     ?assertEqual(mk_workers([{"n1", 0, 4}, {"n1", 5, 10}], 42), Workers1),
+%%     ?assertEqual([mk_shard("n3", 11, ?RING_END)], ShardsLeft1),
+%%
+%%     % All workers can be replaced - one by 1 shard, another by 3 smaller shards
+%%     Dead2 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
+%%     Shards2 = [
+%%         mk_shard("n1", 0, 10),
+%%         mk_shard("n2", 11, 12),
+%%         mk_shard("n2", 13, 14),
+%%         mk_shard("n2", 15, ?RING_END)
+%%     ],
+%%     {Workers2, ShardsLeft2} = find_split_shard_replacements(Dead2, Shards2),
+%%     ?assertEqual(mk_workers([
+%%        {"n1", 0, 10},
+%%        {"n2", 11, 12},
+%%        {"n2", 13, 14},
+%%        {"n2", 15, ?RING_END}
+%%     ], 42), Workers2),
+%%     ?assertEqual([], ShardsLeft2),
+%%
+%%     % No workers can be replaced. Ranges match but they are on different nodes
+%%     Dead3 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
+%%     Shards3 = [
+%%         mk_shard("n2", 0, 10),
+%%         mk_shard("n3", 11, ?RING_END)
+%%     ],
+%%     {Workers3, ShardsLeft3} = find_split_shard_replacements(Dead3, Shards3),
+%%     ?assertEqual([], Workers3),
+%%     ?assertEqual(Shards3, ShardsLeft3).
diff --git a/src/fabric/test/fabric_rpc_purge_tests.erl b/src/fabric/test/fabric_rpc_purge_tests.erl
deleted file mode 100644
index 4eafb2b..0000000
--- a/src/fabric/test/fabric_rpc_purge_tests.erl
+++ /dev/null
@@ -1,307 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_rpc_purge_tests).
-
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
--define(TDEF(A), {A, fun A/1}).
-
-% TODO: Add tests:
-%         - filter some updates
-%         - allow for an update that was filtered by a node
-%         - ignore lagging nodes
-
-main_test_() ->
-    {
-        setup,
-        spawn,
-        fun setup_all/0,
-        fun teardown_all/1,
-        [
-            {
-                foreach,
-                fun setup_no_purge/0,
-                fun teardown_no_purge/1,
-                lists:map(fun wrap/1, [
-                    ?TDEF(t_no_purge_no_filter)
-                ])
-            },
-            {
-                foreach,
-                fun setup_single_purge/0,
-                fun teardown_single_purge/1,
-                lists:map(fun wrap/1, [
-                    ?TDEF(t_filter),
-                    ?TDEF(t_filter_unknown_node),
-                    ?TDEF(t_filter_local_node),
-                    ?TDEF(t_no_filter_old_node),
-                    ?TDEF(t_no_filter_different_node),
-                    ?TDEF(t_no_filter_after_repl)
-                ])
-            },
-            {
-                foreach,
-                fun setup_multi_purge/0,
-                fun teardown_multi_purge/1,
-                lists:map(fun wrap/1, [
-                    ?TDEF(t_filter),
-                    ?TDEF(t_filter_unknown_node),
-                    ?TDEF(t_filter_local_node),
-                    ?TDEF(t_no_filter_old_node),
-                    ?TDEF(t_no_filter_different_node),
-                    ?TDEF(t_no_filter_after_repl)
-                ])
-            }
-        ]
-    }.
-
-
-setup_all() ->
-    test_util:start_couch().
-
-
-teardown_all(Ctx) ->
-    test_util:stop_couch(Ctx).
-
-
-setup_no_purge() ->
-    {ok, Db} = create_db(),
-    populate_db(Db),
-    couch_db:name(Db).
-
-
-teardown_no_purge(DbName) ->
-    ok = couch_server:delete(DbName, []).
-
-
-setup_single_purge() ->
-    DbName = setup_no_purge(),
-    DocId = <<"0003">>,
-    {ok, OldDoc} = open_doc(DbName, DocId),
-    purge_doc(DbName, DocId),
-    {DbName, DocId, OldDoc, 1}.
-
-
-teardown_single_purge({DbName, _, _, _}) ->
-    teardown_no_purge(DbName).
-
-
-setup_multi_purge() ->
-    DbName = setup_no_purge(),
-    DocId = <<"0003">>,
-    {ok, OldDoc} = open_doc(DbName, DocId),
-    lists:foreach(fun(I) ->
-        PDocId = iolist_to_binary(io_lib:format("~4..0b", [I])),
-        purge_doc(DbName, PDocId)
-    end, lists:seq(1, 5)),
-    {DbName, DocId, OldDoc, 3}.
-
-
-teardown_multi_purge(Ctx) ->
-    teardown_single_purge(Ctx).
-
-
-t_no_purge_no_filter(DbName) ->
-    DocId = <<"0003">>,
-
-    {ok, OldDoc} = open_doc(DbName, DocId),
-    NewDoc = create_update(OldDoc, 2),
-
-    rpc_update_doc(DbName, NewDoc),
-
-    {ok, CurrDoc} = open_doc(DbName, DocId),
-    ?assert(CurrDoc /= OldDoc),
-    ?assert(CurrDoc == NewDoc).
-
-
-t_filter({DbName, DocId, OldDoc, _PSeq}) ->
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
-    create_purge_checkpoint(DbName, 0),
-
-    rpc_update_doc(DbName, OldDoc),
-
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)).
-
-
-t_filter_unknown_node({DbName, DocId, OldDoc, _PSeq}) ->
-    % Unknown nodes are assumed to start at PurgeSeq = 0
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
-    create_purge_checkpoint(DbName, 0),
-
-    {Pos, [Rev | _]} = OldDoc#doc.revs,
-    RROpt = {read_repair, [{'blargh@127.0.0.1', [{Pos, Rev}]}]},
-    rpc_update_doc(DbName, OldDoc, [RROpt]),
-
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)).
-
-
-t_no_filter_old_node({DbName, DocId, OldDoc, PSeq}) ->
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
-    create_purge_checkpoint(DbName, PSeq),
-
-    % The random UUID is to generate a badarg exception when
-    % we try and convert it to an existing atom.
-    create_purge_checkpoint(DbName, 0, couch_uuids:random()),
-
-    rpc_update_doc(DbName, OldDoc),
-
-    ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-
-t_no_filter_different_node({DbName, DocId, OldDoc, PSeq}) ->
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
-    create_purge_checkpoint(DbName, PSeq),
-
-    % Create a valid purge for a different node
-    TgtNode = list_to_binary(atom_to_list('notfoo@127.0.0.1')),
-    create_purge_checkpoint(DbName, 0, TgtNode),
-
-    rpc_update_doc(DbName, OldDoc),
-
-    ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-
-t_filter_local_node({DbName, DocId, OldDoc, PSeq}) ->
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
-    create_purge_checkpoint(DbName, PSeq),
-
-    % Create a valid purge for a different node
-    TgtNode = list_to_binary(atom_to_list('notfoo@127.0.0.1')),
-    create_purge_checkpoint(DbName, 0, TgtNode),
-
-    % Add a local node rev to the list of node revs. It should
-    % be filtered out
-    {Pos, [Rev | _]} = OldDoc#doc.revs,
-    RROpts = [{read_repair, [
-        {tgt_node(), [{Pos, Rev}]},
-        {node(), [{1, <<"123">>}]}
-    ]}],
-    rpc_update_doc(DbName, OldDoc, RROpts),
-
-    ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-
-t_no_filter_after_repl({DbName, DocId, OldDoc, PSeq}) ->
-    ?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
-    create_purge_checkpoint(DbName, PSeq),
-
-    rpc_update_doc(DbName, OldDoc),
-
-    ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
-
-wrap({Name, Fun}) ->
-    fun(Arg) ->
-        {timeout, 60, {atom_to_list(Name), fun() ->
-            process_flag(trap_exit, true),
-            Fun(Arg)
-        end}}
-    end.
-
-
-create_db() ->
-    DbName = ?tempdb(),
-    couch_db:create(DbName, [?ADMIN_CTX]).
-
-
-populate_db(Db) ->
-    Docs = lists:map(fun(Idx) ->
-        DocId = lists:flatten(io_lib:format("~4..0b", [Idx])),
-        #doc{
-            id = list_to_binary(DocId),
-            body = {[{<<"int">>, Idx}, {<<"vsn">>, 2}]}
-        }
-    end, lists:seq(1, 100)),
-    {ok, _} = couch_db:update_docs(Db, Docs).
-
-
-open_doc(DbName, DocId) ->
-    couch_util:with_db(DbName, fun(Db) ->
-        couch_db:open_doc(Db, DocId, [])
-    end).
-
-
-create_update(Doc, NewVsn) ->
-    #doc{
-        id = DocId,
-        revs = {Pos, [Rev | _] = Revs},
-        body = {Props}
-    } = Doc,
-    NewProps = lists:keyreplace(<<"vsn">>, 1, Props, {<<"vsn">>, NewVsn}),
-    NewRev = crypto:hash(md5, term_to_binary({DocId, Rev, {NewProps}})),
-    Doc#doc{
-        revs = {Pos + 1, [NewRev | Revs]},
-        body = {NewProps}
-    }.
-
-
-purge_doc(DbName, DocId) ->
-    {ok, Doc} = open_doc(DbName, DocId),
-    {Pos, [Rev | _]} = Doc#doc.revs,
-    PInfo = {couch_uuids:random(), DocId, [{Pos, Rev}]},
-    Resp = couch_util:with_db(DbName, fun(Db) ->
-        couch_db:purge_docs(Db, [PInfo], [])
-    end),
-    ?assertEqual({ok, [{ok, [{Pos, Rev}]}]}, Resp).
-
-
-create_purge_checkpoint(DbName, PurgeSeq) ->
-    create_purge_checkpoint(DbName, PurgeSeq, tgt_node_bin()).
-
-
-create_purge_checkpoint(DbName, PurgeSeq, TgtNode) when is_binary(TgtNode) ->
-    Resp = couch_util:with_db(DbName, fun(Db) ->
-        SrcUUID = couch_db:get_uuid(Db),
-        TgtUUID = couch_uuids:random(),
-        CPDoc = #doc{
-            id = mem3_rep:make_purge_id(SrcUUID, TgtUUID),
-            body = {[
-                {<<"target_node">>, TgtNode},
-                {<<"purge_seq">>, PurgeSeq}
-            ]}
-        },
-        couch_db:update_docs(Db, [CPDoc], [])
-    end),
-    ?assertMatch({ok, [_]}, Resp).
-
-
-rpc_update_doc(DbName, Doc) ->
-    {Pos, [Rev | _]} = Doc#doc.revs,
-    RROpt = {read_repair, [{tgt_node(), [{Pos, Rev}]}]},
-    rpc_update_doc(DbName, Doc, [RROpt]).
-
-
-rpc_update_doc(DbName, Doc, Opts) ->
-    Ref = erlang:make_ref(),
-    put(rexi_from, {self(), Ref}),
-    fabric_rpc:update_docs(DbName, [Doc], Opts),
-    Reply = test_util:wait(fun() ->
-        receive
-            {Ref, Reply} ->
-                Reply
-        after 0 ->
-            wait
-        end
-    end),
-    ?assertEqual({ok, []}, Reply).
-
-
-tgt_node() ->
-    'foo@127.0.0.1'.
-
-
-tgt_node_bin() ->
-    iolist_to_binary(atom_to_list(tgt_node())).


[couchdb] 05/06: Update ddoc_cache to use fabric2

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

davisp pushed a commit to branch prototype/fdb-layer
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit fc1ffeb3caddad8d83772bf21c05788ca0cb2a8d
Author: Paul J. Davis <pa...@gmail.com>
AuthorDate: Wed Jun 5 13:36:02 2019 -0500

    Update ddoc_cache to use fabric2
---
 src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl     | 2 +-
 src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
index 5248469..7c3dc67 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
@@ -33,7 +33,7 @@ ddocid({_, DDocId}) ->
 
 
 recover({DbName, DDocId}) ->
-    fabric:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]).
+    fabric2_db:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]).
 
 
 insert({DbName, DDocId}, {ok, #doc{revs = Revs} = DDoc}) ->
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
index 868fa77..38445af 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
@@ -34,7 +34,7 @@ ddocid({_, DDocId, _}) ->
 
 recover({DbName, DDocId, Rev}) ->
     Opts = [ejson_body, ?ADMIN_CTX],
-    {ok, [Resp]} = fabric:open_revs(DbName, DDocId, [Rev], Opts),
+    {ok, [Resp]} = fabric2_db:open_doc_revs(DbName, DDocId, [Rev], Opts),
     Resp.